import os
import time
import requests
import threading
from concurrent.futures import ThreadPoolExecutor
from queue import Queue
import image_pro.config as config

class DownloadManager:
    def __init__(self, db, max_workers=config.MAX_DOWNLOAD_THREADS):
        self.db = db
        self.max_workers = max_workers
        self.executor = None
        self.active_tasks = {}  # {task_uid: is_active}
        self.download_queue = Queue()
        self.lock = threading.Lock()
    
    def start_task(self, task_uid):
        """开始下载任务"""
        # 更新任务状态
        self.db.update_task_state(task_uid, config.STATUS_DOWNLOADING)
        
        with self.lock:
            # 标记任务为活动状态
            self.active_tasks[task_uid] = True
            
            # 如果执行器不存在，创建一个
            if self.executor is None or self.executor._shutdown:
                self.executor = ThreadPoolExecutor(max_workers=self.max_workers)
                
                # 启动工作线程
                for _ in range(self.max_workers):
                    self.executor.submit(self._worker)
            
            # 加载任务的下载项到队列
            self._load_downloads(task_uid)
    
    def pause_task(self, task_uid):
        """暂停下载任务"""
        with self.lock:
            if task_uid in self.active_tasks:
                self.active_tasks[task_uid] = False
                self.db.update_task_state(task_uid, config.STATUS_PAUSED)
    
    def resume_task(self, task_uid):
        """恢复下载任务"""
        with self.lock:
            if task_uid in self.active_tasks:
                self.active_tasks[task_uid] = True
                self.db.update_task_state(task_uid, config.STATUS_DOWNLOADING)
                self._load_downloads(task_uid)
    
    def stop_all(self):
        """停止所有下载任务"""
        with self.lock:
            for task_uid in list(self.active_tasks.keys()):
                self.active_tasks[task_uid] = False
                self.db.update_task_state(task_uid, config.STATUS_PAUSED)
            
            # 清空队列
            while not self.download_queue.empty():
                try:
                    self.download_queue.get_nowait()
                    self.download_queue.task_done()
                except:
                    pass
            
            # 关闭执行器
            if self.executor:
                self.executor.shutdown(wait=False)
                self.executor = None
    
    def _load_downloads(self, task_uid):
        """加载待下载的项目到队列"""
        pending_downloads = self.db.get_pending_downloads(task_uid, limit=100)
        for download in pending_downloads:
            if self.active_tasks.get(task_uid, False):
                self.download_queue.put((download, task_uid))
    
    def _worker(self):
        """工作线程函数"""
        while True:
            try:
                # 从队列获取下载项
                download, task_uid = self.download_queue.get(timeout=1)
                
                # 检查任务是否仍然活动
                if not self.active_tasks.get(task_uid, False):
                    self.download_queue.task_done()
                    continue
                
                # 执行下载
                success = self._download_image(download)
                
                # 更新下载状态
                state = config.STATUS_COMPLETED if success else config.STATUS_FAILED
                self.db.update_download_state(download[0], state)
                
                # 更新任务进度
                if success:
                    task = self.db.get_task_by_uid(task_uid)
                    if task:
                        new_downloaded = task[10] + 1  # downloaded字段索引
                        self.db.update_task_progress(task_uid, new_downloaded)
                        
                        # 检查是否完成
                        if new_downloaded >= task[9]:  # total_images字段索引
                            self.db.update_task_state(task_uid, config.STATUS_COMPLETED)
                            self.active_tasks[task_uid] = False
                
                # 加载更多下载项
                if self.download_queue.qsize() < self.max_workers * 2:
                    self._load_downloads(task_uid)
                
                self.download_queue.task_done()
                
            except TimeoutError:
                # 队列超时，检查是否应该继续
                if all(not active for active in self.active_tasks.values()):
                    break
            except Exception as e:
                print(f"Worker error: {e}")
                # 继续工作
    
    def _download_image(self, download):
        """下载单个图片"""
        download_id, task_uid, url, path, filename, state, create_time, update_time = download
        
        # 获取任务信息以获取referer
        task = self.db.get_task_by_uid(task_uid)
        if not task:
            return False
        
        # 准备随机User-Agent
        import random
        user_agents = [
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:120.0) Gecko/20100101 Firefox/120.0',
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.0 Safari/605.1.15',
            'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Edge/120.0.0.0 Safari/537.36'
        ]
        
        # 准备请求头
        headers = {
            'User-Agent': random.choice(user_agents),
            'Accept': 'image/avif,image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Sec-Fetch-Dest': 'image',
            'Sec-Fetch-Mode': 'no-cors',
            'Sec-Fetch-Site': 'same-origin',
            'Pragma': 'no-cache',
            'Cache-Control': 'no-cache',
            'DNT': '1',  # Do Not Track
            'Upgrade-Insecure-Requests': '1'
        }
        
        # 添加referer如果存在
        referer = task[2]  # refer字段索引
        if referer:
            headers['Referer'] = referer
        else:
            # 如果没有提供referer，使用目标网站的域名作为referer
            from urllib.parse import urlparse
            parsed_url = urlparse(url)
            headers['Referer'] = f"{parsed_url.scheme}://{parsed_url.netloc}/"
        
        try:
            # 创建目录
            os.makedirs(os.path.dirname(path), exist_ok=True)
            
            # 检查文件是否已存在
            if os.path.exists(path):
                return True
            
            # 下载图片
            session = requests.Session()
            max_retries = 3
            retry_count = 0
            
            while retry_count < max_retries:
                try:
                    # 添加随机延迟，避免请求过于频繁
                    time.sleep(random.uniform(0.5, 2.0))
                    
                    # 为每次请求生成新的随机User-Agent
                    headers['User-Agent'] = random.choice(user_agents)
                    
                    # 添加随机Cookie
                    cookies = {
                        'visited': '1',
                        'session_id': f"{random.randint(10000, 99999)}",
                        'screen_res': f"{random.choice(['1920x1080', '1366x768', '2560x1440'])}"
                    }
                    
                    response = session.get(url, headers=headers, cookies=cookies, stream=True, timeout=30)
                    response.raise_for_status()
                    
                    # 检查内容类型是否为图片
                    content_type = response.headers.get('Content-Type', '')
                    if not content_type.startswith(('image/', 'application/octet-stream')):
                        print(f"非图片内容类型: {content_type} for {url}")
                        retry_count += 1
                        continue
                    
                    # 如果文件名没有扩展名或者扩展名不匹配内容类型，则根据内容类型更新文件名
                    from image_pro.image_parser import get_extension_from_content_type
                    file_ext = os.path.splitext(path)[1].lower()
                    if not file_ext or file_ext == '.':
                        # 从内容类型获取正确的扩展名
                        correct_ext = get_extension_from_content_type(content_type)
                        # 更新文件路径
                        path = os.path.splitext(path)[0] + correct_ext
                    
                    # 保存图片
                    with open(path, 'wb') as f:
                        for chunk in response.iter_content(chunk_size=8192):
                            if chunk:
                                f.write(chunk)
                    
                    # 验证文件大小
                    if os.path.getsize(path) < 1024:  # 小于1KB可能是错误页面
                        print(f"文件过小，可能是错误页面: {url}")
                        os.remove(path)  # 删除可能的错误文件
                        retry_count += 1
                        continue
                    
                    return True
                except requests.exceptions.RequestException as e:
                    retry_count += 1
                    error_msg = str(e)
                    print(f"下载重试 ({retry_count}/{max_retries}) {url}: {error_msg}")
                    
                    # 根据错误类型调整等待时间
                    if '403' in error_msg:  # Forbidden
                        print(f"遇到403错误，可能是反爬虫机制，等待时间更长")
                        time.sleep(5 + random.uniform(1, 5))  # 等待5-10秒
                    elif '429' in error_msg:  # Too Many Requests
                        print(f"遇到429错误，请求过于频繁，等待更长时间")
                        time.sleep(10 + random.uniform(5, 15))  # 等待15-25秒
                    elif '5' in error_msg[:3]:  # 5xx服务器错误
                        time.sleep(3 + random.uniform(1, 3))  # 等待4-6秒
                    else:
                        time.sleep(2 + random.uniform(0, 2))  # 等待2-4秒
            
            print(f"下载失败，已达到最大重试次数: {url}")
            return False
        except Exception as e:
            print(f"Error downloading {url}: {e}")
            return False