"""
下载器模块

负责下载网页内容，处理HTTP请求与响应
"""

import time
import random
import logging
import requests
import threading
import concurrent.futures
from urllib.parse import urlparse
# from fake_useragent import UserAgent

logger = logging.getLogger('downloader')

class Downloader:
    """下载器类"""
    
    def __init__(self, config):
        """
        初始化下载器
        
        参数:
            config: 配置对象
        """
        self.config = config
        
        # 从配置中读取设置
        self.max_workers = int(config.get('DEFAULT', 'max_workers', fallback=8))
        self.retry_times = int(config.get('DEFAULT', 'retry_times', fallback=3))
        self.request_delay = float(config.get('DEFAULT', 'request_delay', fallback=2))
        self.timeout = int(config.get('DEFAULT', 'timeout', fallback=30))
        
        # 代理设置
        self.proxy_enabled = config.getboolean('DEFAULT', 'proxy_enabled', fallback=False)
        self.proxies = []
        if self.proxy_enabled:
            self._load_proxies()
        
        # 加载User-Agent列表
        self.user_agents = []
        self._load_user_agents()
        
        # 创建线程池
        self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=self.max_workers)
        
        # 创建会话对象
        self.session = requests.Session()
        
        # 创建锁，用于控制并发请求
        self.lock = threading.Lock()
        
        # 记录上次请求时间
        self.last_request_time = {}
        
        logger.info(f"下载器初始化完成，最大并发数: {self.max_workers}, 重试次数: {self.retry_times}")
    
    def _load_user_agents(self):
        """加载User-Agent列表"""
        # try:
        #     ua_file = self.config.get('DEFAULT', 'user_agents', fallback='./config/user_agents.txt')
        #     with open(ua_file, 'r', encoding='utf-8') as f:
        #         self.user_agents = [line.strip() for line in f if line.strip()]
            
            # if not self.user_agents:
                    # 如果文件为空或不存在，使用fake_useragent生成
                # ua = UserAgent()
                # self.user_agents = [ua.chrome, ua.firefox, ua.safari, ua.edge]
            
            # logger.info(f"加载了 {len(self.user_agents)} 个User-Agent")
        # except Exception as e:
        #     logger.error(f"加载User-Agent失败: {e}，将使用默认值")
        #     # 使用默认User-Agent
        #     self.user_agents = [
        #         'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
        #         'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Safari/605.1.15',
        #         'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0'
        #     ]
    
    def _load_proxies(self):
        """加载代理列表"""
        try:
            proxy_file = self.config.get('PROXY', 'proxy_file', fallback='./config/proxies.txt')
            with open(proxy_file, 'r', encoding='utf-8') as f:
                self.proxies = [line.strip() for line in f if line.strip()]
            
            logger.info(f"加载了 {len(self.proxies)} 个代理")
        except Exception as e:
            logger.error(f"加载代理失败: {e}")
    
    def _get_random_user_agent(self):
        """获取随机User-Agent"""
        return random.choice(self.user_agents) if self.user_agents else None
    
    def _get_random_proxy(self):
        """获取随机代理"""
        if not self.proxies:
            return None
        
        proxy = random.choice(self.proxies)
        return {
            'http': proxy,
            'https': proxy
        }
    
    def _respect_robots_txt(self, url):
        """
        检查robots.txt规则
        
        参数:
            url: 要检查的URL
        
        返回:
            布尔值，表示是否允许爬取
        """
        # TODO: 实现robots.txt解析和检查
        return True
    
    def _enforce_delay(self, domain):
        """
        强制请求延迟
        
        参数:
            domain: 域名
        """
        with self.lock:
            current_time = time.time()
            if domain in self.last_request_time:
                elapsed = current_time - self.last_request_time[domain]
                if elapsed < self.request_delay:
                    sleep_time = self.request_delay - elapsed
                    logger.debug(f"延迟请求 {domain} {sleep_time:.2f} 秒")
                    time.sleep(sleep_time)
            
            self.last_request_time[domain] = time.time()
    
    def download(self, url):
        """
        下载URL内容
        
        参数:
            url: 要下载的URL
        
        返回:
            HTML内容或None
        """
        # 检查robots.txt规则
        if not self._respect_robots_txt(url):
            logger.warning(f"根据robots.txt规则，不允许爬取: {url}")
            return None
        
        # 获取域名
        domain = urlparse(url).netloc
        
        # 强制请求延迟
        self._enforce_delay(domain)
        
        # 准备请求头
        headers = {
            'User-Agent': self._get_random_user_agent(),
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
            'Cache-Control': 'max-age=0'
        }
        
        # 获取代理
        proxies = self._get_random_proxy() if self.proxy_enabled else None
        
        # 重试下载
        for attempt in range(self.retry_times + 1):
            try:
                logger.info(f"下载 {url} (尝试 {attempt + 1}/{self.retry_times + 1})")
                
                response = self.session.get(
                    url,
                    headers=headers,
                    proxies=proxies,
                    timeout=self.timeout,
                    verify=True
                )
                
                # 检查状态码
                response.raise_for_status()
                
                # 检查内容类型
                content_type = response.headers.get('Content-Type', '')
                if 'text/html' not in content_type and 'application/xhtml+xml' not in content_type:
                    logger.warning(f"非HTML内容: {url}, Content-Type: {content_type}")
                    return None
                
                # 设置正确的编码
                if response.encoding == 'ISO-8859-1':
                    response.encoding = response.apparent_encoding
                
                # 获取文本内容并确保返回字符串类型
                try:
                    html_content = response.text
                    if not isinstance(html_content, str):
                        html_content = str(html_content)
                    
                    logger.info(f"下载成功: {url}, 大小: {len(html_content)} 字符")
                    return html_content
                except Exception as e:
                    logger.error(f"解析HTML内容失败: {e}")
                    return None
                
            except requests.exceptions.RequestException as e:
                logger.warning(f"下载失败: {url}, 错误: {e}, 尝试: {attempt + 1}/{self.retry_times + 1}")
                
                if attempt < self.retry_times:
                    # 随机延迟后重试
                    retry_delay = self.request_delay * (attempt + 1) * (0.5 + random.random())
                    logger.debug(f"等待 {retry_delay:.2f} 秒后重试")
                    time.sleep(retry_delay)
                    
                    # 更换User-Agent和代理
                    headers['User-Agent'] = self._get_random_user_agent()
                    if self.proxy_enabled:
                        proxies = self._get_random_proxy()
                else:
                    logger.error(f"下载失败，已达到最大重试次数: {url}")
                    return None
        
        return None
    
    def close(self):
        """关闭下载器，释放资源"""
        self.executor.shutdown(wait=True)
        self.session.close()
        logger.info("下载器已关闭")
