# -*- coding: utf-8 -*-
"""
基础爬虫类
"""

import time
import random
import requests
from abc import ABC, abstractmethod
from typing import Dict, Any, Optional, List
from urllib.parse import urljoin, urlparse
from fake_useragent import UserAgent
from utils.logger import get_logger, log_crawler_info, log_crawler_error
from config.settings import CRAWLER_CONFIG, PROXY_CONFIG

class BaseCrawler(ABC):
    """基础爬虫类"""
    
    def __init__(self, name: str = "BaseCrawler"):
        self.name = name
        self.logger = get_logger(name)
        self.session = requests.Session()
        self.ua = UserAgent()
        self.proxy_index = 0
        
        # 初始化配置
        self.timeout = CRAWLER_CONFIG["timeout"]
        self.retry_times = CRAWLER_CONFIG["retry_times"]
        self.retry_delay = CRAWLER_CONFIG["retry_delay"]
        self.request_delay = CRAWLER_CONFIG["request_delay"]
        self.random_delay = CRAWLER_CONFIG["random_delay"]
        
        # 设置默认请求头
        self.session.headers.update(CRAWLER_CONFIG["default_headers"])
        
        # 初始化代理
        self._init_proxy()
        
        log_crawler_info(f"初始化爬虫: {name}")
    
    def _init_proxy(self):
        """初始化代理设置"""
        if PROXY_CONFIG["enabled"] and PROXY_CONFIG["proxy_list"]:
            self.proxies = PROXY_CONFIG["proxy_list"]
            self.logger.info(f"启用代理，共 {len(self.proxies)} 个代理")
        else:
            self.proxies = []
            self.logger.info("未启用代理")
    
    def _get_random_user_agent(self) -> str:
        """获取随机User-Agent"""
        try:
            return self.ua.random
        except:
            return CRAWLER_CONFIG["default_headers"]["User-Agent"]
    
    def _get_proxy(self) -> Optional[Dict[str, str]]:
        """获取代理"""
        if not self.proxies:
            return None
        
        proxy = self.proxies[self.proxy_index % len(self.proxies)]
        self.proxy_index += 1
        
        if PROXY_CONFIG["proxy_auth"]["username"]:
            auth = f"{PROXY_CONFIG['proxy_auth']['username']}:{PROXY_CONFIG['proxy_auth']['password']}"
            return {
                "http": f"http://{auth}@{proxy}",
                "https": f"http://{auth}@{proxy}"
            }
        
        return {"http": proxy, "https": proxy}
    
    def _delay(self):
        """请求延迟"""
        if self.request_delay > 0:
            delay = self.request_delay
            if self.random_delay:
                delay = random.uniform(self.request_delay * 0.5, self.request_delay * 1.5)
            time.sleep(delay)
    
    def _make_request(self, url: str, method: str = "GET", **kwargs) -> Optional[requests.Response]:
        """
        发送HTTP请求
        
        Args:
            url: 请求URL
            method: 请求方法
            **kwargs: 其他请求参数
            
        Returns:
            Response对象或None
        """
        # 更新User-Agent
        self.session.headers["User-Agent"] = self._get_random_user_agent()
        
        # 设置代理
        proxy = self._get_proxy()
        if proxy:
            kwargs["proxies"] = proxy
        
        # 设置超时
        kwargs.setdefault("timeout", self.timeout)
        
        for attempt in range(self.retry_times + 1):
            try:
                log_crawler_info(f"请求 {method} {url} (尝试 {attempt + 1}/{self.retry_times + 1})")
                
                response = self.session.request(method, url, **kwargs)
                response.raise_for_status()
                
                log_crawler_info(f"请求成功: {url} - 状态码: {response.status_code}")
                return response
                
            except requests.exceptions.RequestException as e:
                log_crawler_error(f"请求失败: {url} - 错误: {str(e)}")
                
                if attempt < self.retry_times:
                    time.sleep(self.retry_delay * (attempt + 1))
                    continue
                else:
                    log_crawler_error(f"请求最终失败: {url}")
                    return None
        
        return None
    
    def get(self, url: str, **kwargs) -> Optional[requests.Response]:
        """GET请求"""
        self._delay()
        return self._make_request(url, "GET", **kwargs)
    
    def post(self, url: str, **kwargs) -> Optional[requests.Response]:
        """POST请求"""
        self._delay()
        return self._make_request(url, "POST", **kwargs)
    
    def get_html(self, url: str, encoding: str = None) -> Optional[str]:
        """
        获取HTML内容
        
        Args:
            url: 请求URL
            encoding: 编码格式
            
        Returns:
            HTML字符串或None
        """
        response = self.get(url)
        if not response:
            return None
        
        if encoding:
            response.encoding = encoding
        
        return response.text
    
    def download_file(self, url: str, filepath: str) -> bool:
        """
        下载文件
        
        Args:
            url: 文件URL
            filepath: 保存路径
            
        Returns:
            是否下载成功
        """
        response = self.get(url, stream=True)
        if not response:
            return False
        
        try:
            with open(filepath, 'wb') as f:
                for chunk in response.iter_content(chunk_size=8192):
                    if chunk:
                        f.write(chunk)
            
            log_crawler_info(f"文件下载成功: {url} -> {filepath}")
            return True
            
        except Exception as e:
            log_crawler_error(f"文件下载失败: {url} - 错误: {str(e)}")
            return False
    
    def is_valid_url(self, url: str) -> bool:
        """检查URL是否有效"""
        try:
            result = urlparse(url)
            return all([result.scheme, result.netloc])
        except:
            return False
    
    def normalize_url(self, url: str, base_url: str) -> str:
        """标准化URL"""
        if not self.is_valid_url(url):
            return urljoin(base_url, url)
        return url
    
    @abstractmethod
    def parse_page(self, html: str, url: str) -> Dict[str, Any]:
        """
        解析页面内容（抽象方法）
        
        Args:
            html: HTML内容
            url: 页面URL
            
        Returns:
            解析结果字典
        """
        pass
    
    def crawl_page(self, url: str, encoding: str = None) -> Optional[Dict[str, Any]]:
        """
        爬取页面并解析
        
        Args:
            url: 页面URL
            encoding: 编码格式
            
        Returns:
            解析结果或None
        """
        html = self.get_html(url, encoding)
        if not html:
            return None
        
        try:
            result = self.parse_page(html, url)
            log_crawler_info(f"页面解析成功: {url}")
            return result
        except Exception as e:
            log_crawler_error(f"页面解析失败: {url} - 错误: {str(e)}")
            return None
    
    def close(self):
        """关闭爬虫"""
        self.session.close()
        log_crawler_info(f"爬虫已关闭: {self.name}")
    
    def __enter__(self):
        return self
    
    def __exit__(self, exc_type, exc_val, exc_tb):
        self.close() 