"""
微密圈下载模块 - 专门处理微密圈图片下载
"""

import requests
import urllib3
import re
import json
import os
from concurrent.futures import ThreadPoolExecutor, as_completed
from PySide2.QtCore import QObject, Signal, QThread

# 禁用SSL警告
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)


class WeimiquanDownloader(QObject):
    """微密圈下载器类"""
    
    # 信号定义
    progress_updated = Signal(int, int)  # 当前进度, 总数
    status_updated = Signal(str)  # 状态信息
    download_completed = Signal(int, int, float)  # 成功数, 失败数, 总大小MB
    error_occurred = Signal(str)  # 错误信息
    
    def __init__(self):
        super().__init__()
        self.headers = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            "Accept-Language": "zh-CN,zh;q=0.9,ja;q=0.8",
            "Cache-Control": "max-age=0",
            "Connection": "keep-alive",
            "Sec-Fetch-Dest": "document",
            "Sec-Fetch-Mode": "navigate",
            "Sec-Fetch-Site": "same-origin",
            "Sec-Fetch-User": "?1",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36",
            "sec-ch-ua": "\"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"138\", \"Google Chrome\";v=\"138\"",
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": "\"Windows\""
        }
        
        # 默认cookies - 用户可以通过GUI更新
        self.cookies = {
            "__51uvsct__KisFAjMmWonEmmaa": "1",
            "__51vcke__KisFAjMmWonEmmaa": "124af7b9-a489-54e1-9e9f-2aad28de65e5",
            "__51vuft__KisFAjMmWonEmmaa": "1747203033573",
            "__51uvsct__KpRFHkppObUqVbFN": "1",
            "__51vcke__KpRFHkppObUqVbFN": "af7f4762-a1c2-542c-b776-2e8c1f96f3bb",
            "__51vuft__KpRFHkppObUqVbFN": "1753076527969",
            "deviceId": "zREhXJja4epsynnSXydhrJ2M",
            "userStore": "%7B%22info%22%3A%7B%22userId%22%3A%221947170262007906304%22%2C%22merchantAcct%22%3A%22sf555%22%2C%22masterAcct%22%3A%22sf555_m%22%2C%22agentAcct%22%3A%22sf555_m_no_agent%22%2C%22userAcct%22%3A%22QCS13PF1%22%2C%22acctType%22%3A3%2C%22referCode%22%3Anull%2C%22shareCode%22%3A%22QCS13PF1%22%2C%22isPartner%22%3A0%2C%22phoneNumber%22%3Anull%2C%22background%22%3Anull%2C%22headUrl%22%3A%22%2Fadmin%2Fuser%2Fhead%2Fb1f9ce42dc8341c587a9f9ff5c28e887%22%2C%22nickName%22%3A%22QCS13PF1%22%2C%22signature%22%3Anull%2C%22loginType%22%3Anull%2C%22coinBalance%22%3A0%2C%22balance%22%3A0%2C%22exp%22%3A0%2C%22expLevel%22%3A0%2C%22iconFree%22%3Anull%2C%22vipBegin%22%3Anull%2C%22vipEnd%22%3Anull%2C%22vipFlag%22%3Afalse%2C%22vipTitle%22%3Anull%2C%22vipPackageId%22%3Anull%2C%22userStatus%22%3A0%2C%22followers%22%3Anull%2C%22followed%22%3Anull%2C%22lastLoginDate%22%3Anull%2C%22currentLoginDate%22%3Anull%2C%22city%22%3A%22%E9%A6%99%E6%B8%AF%22%2C%22gender%22%3Anull%2C%22videoFreeBegin%22%3Anull%2C%22videoFreeEnd%22%3Anull%2C%22actorFreeBegin%22%3Anull%2C%22actorFreeEnd%22%3Anull%2C%22expand%22%3Anull%7D%2C%22searchList%22%3A%5B%5D%2C%22scrollLeft%22%3A0%2C%22dialogTime%22%3A1753076530941%2C%22dialog4Time%22%3A0%2C%22dialog9Time%22%3A1753076530941%2C%22dialog16Time%22%3A0%2C%22agentCode%22%3A%22TDV6MC4J%22%2C%22inviteCode%22%3A%22%22%7D",
            "token": "45d27c4c99304d5fb41b2c2a63c4f350.2ROyMfQufDRX3KZY0rC5btihxlqNlZF585WuWf67FaUgWc%2BeEUUs%2FtUKd7XUku0FayrOvXG7gd4P2RGu7qZCqtXVFdAyJiqTHR1KGah%2FkYa6DccDX7eY1ddZ4bcWY6frF1T37kdU1bPq8YX5Jh3QLI0CDOgO1wRN.ab27735a0c4ae07381d4801495896644",
            "__vtins__KpRFHkppObUqVbFN": "%7B%22sid%22%3A%20%22a2055a9b-b372-522e-bd50-0495a37c8a44%22%2C%20%22vd%22%3A%2021%2C%20%22stt%22%3A%201331792%2C%20%22dr%22%3A%20274380%2C%20%22expires%22%3A%201753079659758%2C%20%22ct%22%3A%201753077859758%7D"
        }
        
        # 代理设置 - 默认使用Clash代理
        self.proxies = {
            'http': 'http://127.0.0.1:7890',
            'https': 'http://127.0.0.1:7890'
        }
        
        self.is_downloading = False

        # 创建Session实例用于连接复用，提高性能并减少资源占用
        self.session = requests.Session()
        self.session.headers.update(self.headers)

        # 设置连接池参数，优化资源使用
        adapter = requests.adapters.HTTPAdapter(
            pool_connections=10,  # 连接池大小
            pool_maxsize=20,      # 最大连接数
            max_retries=3         # 重试次数
        )
        self.session.mount('http://', adapter)
        self.session.mount('https://', adapter)

    def update_cookies(self, cookies_dict):
        """更新cookies"""
        self.cookies.update(cookies_dict)

    def set_cookies(self, cookies):
        """设置cookies - 支持字典或字符串格式"""
        if isinstance(cookies, dict):
            self.cookies = cookies
        elif isinstance(cookies, str):
            # 如果是字符串格式，尝试解析
            try:
                import json
                self.cookies = json.loads(cookies)
            except:
                # 如果不是JSON格式，按cookie字符串解析
                cookie_dict = {}
                for item in cookies.split(';'):
                    if '=' in item:
                        key, value = item.strip().split('=', 1)
                        cookie_dict[key] = value
                self.cookies = cookie_dict

    def set_proxy(self, proxy_host="127.0.0.1", proxy_port=7890):
        """设置代理"""
        self.update_proxy(proxy_host, proxy_port)

    def set_thread_count(self, thread_count):
        """设置线程数（微密圈固定为5）"""
        pass  # 微密圈固定使用5个线程

    def set_settings(self, settings):
        """设置下载器配置"""
        if not settings:
            return

        # 设置代理
        if 'proxy_host' in settings and 'proxy_port' in settings:
            self.set_proxy(settings['proxy_host'], settings['proxy_port'])

        # 设置cookies
        if 'cookies' in settings:
            self.set_cookies(settings['cookies'])

        # 设置线程数（微密圈固定为5，忽略此设置）
        if 'thread_count' in settings:
            self.set_thread_count(settings['thread_count'])

    def update_proxy(self, proxy_host="127.0.0.1", proxy_port=7890):
        """更新代理设置"""
        proxy_url = f"http://{proxy_host}:{proxy_port}"
        self.proxies = {
            'http': proxy_url,
            'https': proxy_url
        }
        
    def get_proxy_settings(self):
        """获取代理设置"""
        proxies = {}
        
        # 检查环境变量中的代理设置
        http_proxy = os.environ.get('HTTP_PROXY') or os.environ.get('http_proxy')
        https_proxy = os.environ.get('HTTPS_PROXY') or os.environ.get('https_proxy')
        
        if http_proxy:
            proxies['http'] = http_proxy
        if https_proxy:
            proxies['https'] = https_proxy
            
        # 如果没有环境变量代理，使用默认设置
        if not proxies:
            proxies = self.proxies
            
        return proxies
        
    def search_cloudfront_images(self, content):
        """搜索页面中的CloudFront图片URL"""
        self.status_updated.emit("正在搜索CloudFront图片URL...")
        
        # 目标前缀
        target_prefix = "https://d1y6809ipmcbzp.cloudfront.net/image"
        
        # 搜索包含目标前缀的完整URL
        pattern = r'https://d1y6809ipmcbzp\.cloudfront\.net/image[^\s"\'<>\\]*'
        
        found_urls = re.findall(pattern, content)
        
        # 清理URL，移除末尾的反斜杠和其他无效字符
        cleaned_urls = []
        for url in found_urls:
            cleaned_url = url.rstrip('\\"\',;')
            if cleaned_url and cleaned_url not in cleaned_urls:
                cleaned_urls.append(cleaned_url)
        
        # 去重并排序
        unique_urls = list(set(cleaned_urls))
        unique_urls.sort()
        
        self.status_updated.emit(f"找到 {len(unique_urls)} 个CloudFront图片URL")
        return unique_urls
        
    def get_page_content(self, url):
        """获取页面内容"""
        try:
            self.status_updated.emit(f"正在访问页面: {url}")
            
            # 使用代理设置
            proxies = self.get_proxy_settings()
            
            # 使用Session进行连接复用
            response = self.session.get(url, cookies=self.cookies,
                                      verify=False, timeout=30, proxies=proxies)
            response.raise_for_status()
            
            self.status_updated.emit(f"页面访问成功，状态码: {response.status_code}")
            return response.text
            
        except Exception as e:
            error_msg = f"访问页面失败: {str(e)}"
            self.error_occurred.emit(error_msg)
            return None
            
    def extract_page_info(self, content):
        """从页面内容中提取标题和作者信息"""
        title = "未知标题"
        author = "未知作者"
        
        try:
            # 尝试提取页面标题
            title_match = re.search(r'<title>(.*?)</title>', content, re.IGNORECASE)
            if title_match:
                title = title_match.group(1).strip()
                # 清理标题中的特殊字符，用于文件夹名
                title = re.sub(r'[<>:"/\\|?*]', '_', title)
            
            # 尝试提取作者信息
            author_patterns = [
                r'作者[：:]\s*([^<>\n]+)',
                r'author[：:]\s*([^<>\n]+)',
                r'by\s+([^<>\n]+)',
                r'发布者[：:]\s*([^<>\n]+)',
                r'用户名[：:]\s*([^<>\n]+)',
                r'"author"[：:]\s*"([^"]+)"',
                r'class="author"[^>]*>([^<]+)',
            ]
            
            for pattern in author_patterns:
                author_match = re.search(pattern, content, re.IGNORECASE)
                if author_match:
                    author = author_match.group(1).strip()
                    # 清理作者名中的特殊字符
                    author = re.sub(r'[<>:"/\\|?*]', '_', author)
                    break
                    
        except Exception as e:
            self.status_updated.emit(f"提取页面信息时出错: {str(e)}")
            
        return title, author
        
    def download_single_image(self, url, download_dir, index, total):
        """下载单张图片"""
        try:
            # 清理URL
            clean_url = url.rstrip('\\"\',;')
            
            # 从URL中提取文件名
            url_parts = clean_url.split('/')
            filename = url_parts[-1] if url_parts else f"image_{index}"
            
            # 确保文件名有扩展名
            if not any(filename.lower().endswith(ext) for ext in ['.jpg', '.jpeg', '.png', '.gif', '.webp']):
                filename += '.jpg'
                
            filepath = os.path.join(download_dir, filename)
            
            # 使用代理设置下载图片
            proxies = self.get_proxy_settings()
            
            # 修改请求头，添加Referer
            download_headers = self.headers.copy()
            download_headers.update({
                "Referer": "https://wmqxz.net/",
                "Accept": "image/avif,image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8",
                "Sec-Fetch-Dest": "image",
                "Sec-Fetch-Mode": "no-cors",
                "Sec-Fetch-Site": "cross-site"
            })
            
            # 使用Session进行连接复用，提高下载效率
            response = self.session.get(clean_url, headers=download_headers, cookies=self.cookies,
                                       verify=False, timeout=30, stream=True, proxies=proxies)
            
            if response.status_code == 200:
                # 保存文件
                with open(filepath, 'wb') as f:
                    for chunk in response.iter_content(chunk_size=8192):
                        if chunk:
                            f.write(chunk)
                
                file_size = os.path.getsize(filepath)
                return True, filename, file_size
            else:
                return False, filename, 0
                
        except Exception as e:
            filename = url.split('/')[-1] if '/' in url else f"image_{index}"
            return False, filename, 0

    def download_images(self, image_urls, download_dir, max_workers=5):
        """多线程下载图片"""
        if not image_urls:
            self.error_occurred.emit("没有找到可下载的图片URL")
            return

        self.is_downloading = True
        self.status_updated.emit(f"开始下载 {len(image_urls)} 张图片...")

        # 确保下载目录存在
        if not os.path.exists(download_dir):
            os.makedirs(download_dir)

        successful_downloads = 0
        failed_downloads = 0
        total_size = 0
        completed = 0

        # 使用线程池进行多线程下载
        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            # 提交所有下载任务
            future_to_url = {
                executor.submit(self.download_single_image, url, download_dir, i, len(image_urls)): url
                for i, url in enumerate(image_urls, 1)
            }

            # 处理完成的任务
            for future in as_completed(future_to_url):
                if not self.is_downloading:  # 检查是否被取消
                    break

                success, filename, file_size = future.result()
                completed += 1

                if success:
                    successful_downloads += 1
                    total_size += file_size
                    self.status_updated.emit(f"✅ {filename} 下载完成")
                else:
                    failed_downloads += 1
                    self.status_updated.emit(f"❌ {filename} 下载失败")

                # 更新进度
                self.progress_updated.emit(completed, len(image_urls))

        # 下载完成
        total_size_mb = total_size / 1024 / 1024
        self.download_completed.emit(successful_downloads, failed_downloads, total_size_mb)
        self.is_downloading = False

    def download_images_silent(self, image_urls, download_dir, max_workers=3):
        """静默下载图片（用于批量下载，不显示详细日志）"""
        if not image_urls:
            return 0, 0

        # 确保下载目录存在
        if not os.path.exists(download_dir):
            os.makedirs(download_dir)

        successful_downloads = 0
        failed_downloads = 0

        # 减少线程数，避免资源耗尽（从5个减少到3个）
        # 总线程数控制：URL级别2个 × 图片级别3个 = 6个线程
        try:
            with ThreadPoolExecutor(max_workers=max_workers) as executor:
                # 提交所有下载任务
                future_to_url = {
                    executor.submit(self.download_single_image, url, download_dir, i, len(image_urls)): url
                    for i, url in enumerate(image_urls, 1)
                }

                # 处理完成的任务
                for future in as_completed(future_to_url):
                    if not self.is_downloading:  # 检查是否被取消
                        break

                    try:
                        success, filename, file_size = future.result()

                        if success:
                            successful_downloads += 1
                        else:
                            failed_downloads += 1
                    except Exception as e:
                        # 单个图片下载失败不应该影响整体
                        failed_downloads += 1

        except Exception as e:
            # 线程池异常，记录但不中断
            pass
        finally:
            # 强制垃圾回收，释放内存
            import gc
            gc.collect()

        return successful_downloads, failed_downloads

    def stop_download(self):
        """停止下载"""
        self.is_downloading = False
        self.status_updated.emit("下载已停止")

    def cleanup_resources(self):
        """清理资源，防止内存泄漏"""
        try:
            # 停止下载
            self.is_downloading = False

            # 关闭Session连接
            if hasattr(self, 'session') and self.session:
                self.session.close()
                self.session = None

            # 强制垃圾回收
            import gc
            gc.collect()

        except Exception as e:
            # 清理过程中的错误不应该影响主流程
            pass


class WeimiquanDownloadThread(QThread):
    """微密圈下载线程"""

    def __init__(self, downloader, url, download_dir):
        super().__init__()
        self.downloader = downloader
        self.url = url
        self.download_dir = download_dir

    def run(self):
        """运行下载任务"""
        try:
            # 获取页面内容
            page_content = self.downloader.get_page_content(self.url)

            if page_content:
                # 提取页面标题和作者信息
                title, author = self.downloader.extract_page_info(page_content)

                # 搜索CloudFront图片URL
                found_urls = self.downloader.search_cloudfront_images(page_content)

                if found_urls:
                    # 创建下载目录 - 使用作者名或标题创建子文件夹
                    folder_name = author if author != "未知作者" else title
                    final_download_dir = os.path.join(self.download_dir, folder_name)

                    # 开始下载图片
                    self.downloader.download_images(found_urls, final_download_dir)
                else:
                    self.downloader.error_occurred.emit("未找到任何CloudFront图片URL")
            else:
                self.downloader.error_occurred.emit("无法获取页面内容")

        except Exception as e:
            self.downloader.error_occurred.emit(f"下载过程中发生错误: {str(e)}")
