"""
基础爬虫类
提供通用的爬虫功能：请求、重试、错误处理等
"""
import time
import logging
import requests
import urllib3
import ssl
import urllib.request
import urllib.error
from typing import Optional, Dict, Any
from urllib.parse import urlparse, urljoin, urlencode
from urllib3.exceptions import InsecureRequestWarning

from spider.config.settings import (
    REQUEST_TIMEOUT,
    MAX_RETRIES,
    RETRY_DELAY,
    RETRY_BACKOFF,
    VERIFY_SSL,
)
from spider.config.user_agents import get_random_user_agent
from spider.core.rate_limiter import RateLimiter
from spider.core.proxy_manager import ProxyManager

# 禁用SSL警告（如果禁用SSL验证）
if not VERIFY_SSL:
    urllib3.disable_warnings(InsecureRequestWarning)

logger = logging.getLogger(__name__)


class BaseSpider:
    """基础爬虫类"""
    
    def __init__(
        self,
        rate_limiter: Optional[RateLimiter] = None,
        proxy_manager: Optional[ProxyManager] = None,
        storage_manager=None,  # 延迟导入，避免循环依赖
    ):
        # 延迟导入StorageManager，避免在Django应用初始化时导入模型
        if storage_manager is None:
            from spider.data.storage import StorageManager
            storage_manager = StorageManager()
        
        self.rate_limiter = rate_limiter or RateLimiter()
        self.proxy_manager = proxy_manager or ProxyManager()
        self.storage_manager = storage_manager
        
        # 创建Session
        self.session = requests.Session()
        self._setup_session()
        
        # 创建不验证SSL的urllib opener（直接使用，解决SSL重定向问题）
        ssl_context = ssl.create_default_context()
        ssl_context.check_hostname = False
        ssl_context.verify_mode = ssl.CERT_NONE
        # 设置更宽松的SSL选项
        ssl_context.set_ciphers('DEFAULT:@SECLEVEL=1')
        https_handler = urllib.request.HTTPSHandler(context=ssl_context)
        self.urllib_opener = urllib.request.build_opener(https_handler, urllib.request.HTTPHandler())
    
    def _setup_session(self):
        """配置Session（仅用于管理默认请求头）"""
        # 设置默认请求头
        self.session.headers.update({
            'User-Agent': get_random_user_agent(),
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
        })
    
    def fetch(
        self,
        url: str,
        method: str = 'GET',
        params: Optional[Dict] = None,
        data: Optional[Dict] = None,
        headers: Optional[Dict] = None,
        use_proxy: bool = True,
    ) -> Optional[requests.Response]:
        """
        发送HTTP请求（使用urllib.request，解决SSL重定向问题）
        
        Args:
            url: 目标URL
            method: HTTP方法（支持GET和POST）
            params: URL参数（用于GET请求）
            data: POST数据（用于POST请求，字典格式）
            headers: 额外的请求头
            use_proxy: 是否使用代理（暂不支持）
        
        Returns:
            Response对象，失败返回None
        """
        # 频率控制
        self.rate_limiter.wait()
        
        method_upper = method.upper()
        if method_upper not in ['GET', 'POST']:
            logger.warning(f"Unsupported HTTP method: {method}, only GET and POST are supported")
            return None
        
        try:
            # 准备请求头
            request_headers = self.session.headers.copy()
            if headers:
                request_headers.update(headers)
            else:
                request_headers['User-Agent'] = get_random_user_agent()
            
            # 处理URL参数（仅用于GET请求）
            current_url = url
            if params and method_upper == 'GET':
                if '?' in current_url:
                    current_url = f"{current_url}&{urlencode(params)}"
                else:
                    current_url = f"{current_url}?{urlencode(params)}"
            
            # 处理POST数据
            post_data = None
            if method_upper == 'POST' and data:
                # 将字典编码为 application/x-www-form-urlencoded 格式
                post_data = urlencode(data).encode('utf-8')
                # 确保Content-Type已设置
                if 'Content-Type' not in request_headers:
                    request_headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
            
            # 手动处理重定向（最多10次，POST请求通常不重定向）
            max_redirects = 10 if method_upper == 'GET' else 1
            redirect_count = 0
            
            while redirect_count < max_redirects:
                try:
                    # 创建请求
                    req = urllib.request.Request(current_url, data=post_data, headers=request_headers)
                    req.get_method = lambda: method_upper  # 设置HTTP方法
                    
                    # 使用urllib发送请求
                    try:
                        response = self.urllib_opener.open(req, timeout=REQUEST_TIMEOUT)
                    except urllib.error.HTTPError as e:
                        # HTTP错误，但可能仍然有响应体或重定向
                        if e.code in [301, 302, 303, 307, 308] and method_upper == 'GET':
                            redirect_url = e.headers.get('Location')
                            if redirect_url:
                                current_url = urljoin(current_url, redirect_url)
                                redirect_count += 1
                                logger.debug(f"Following redirect {redirect_count}: {redirect_url}")
                                continue
                        # 对于POST请求，即使有错误码也可能有响应体
                        if method_upper == 'POST':
                            # 读取错误响应体
                            try:
                                error_content = e.read()
                                status_code = e.code
                                
                                # 转换为requests.Response对象
                                req_response = requests.Response()
                                req_response.status_code = status_code
                                req_response.headers = dict(e.headers)
                                req_response.url = current_url
                                req_response._content = error_content
                                encoding = e.headers.get_content_charset() if hasattr(e.headers, 'get_content_charset') else 'utf-8'
                                req_response.encoding = encoding or 'utf-8'
                                
                                logger.warning(f"HTTP {status_code} for POST request: {current_url}")
                                return req_response
                            except:
                                pass
                        logger.error(f"HTTP error: {current_url} - {e.code}")
                        return None
                    
                    try:
                        # 读取内容
                        content = response.read()
                        status_code = response.getcode()
                        
                        # 检查状态码
                        if status_code >= 400:
                            logger.error(f"HTTP error {status_code}: {current_url}")
                            response.close()
                            return None
                        
                        # 转换为requests.Response对象
                        req_response = requests.Response()
                        req_response.status_code = status_code
                        req_response.headers = dict(response.headers)
                        req_response.url = current_url
                        req_response._content = content
                        # 尝试从响应头获取编码
                        encoding = response.headers.get_content_charset() or 'utf-8'
                        req_response.encoding = encoding
                        
                        response.close()
                        logger.info(f"Successfully fetched: {current_url} (Status: {status_code}, Method: {method_upper})")
                        return req_response
                    finally:
                        if hasattr(response, 'close'):
                            response.close()
                    
                except urllib.error.URLError as e:
                    # 检查是否是超时错误
                    error_msg = str(e).lower()
                    if 'timeout' in error_msg or 'timed out' in error_msg:
                        logger.warning(f"Timeout error for {current_url}: {e}")
                    else:
                        logger.error(f"URL error: {current_url} - {e}")
                    return None
                except Exception as e:
                    error_msg = str(e).lower()
                    if 'timeout' in error_msg or 'timed out' in error_msg:
                        logger.warning(f"Timeout error for {current_url}: {e}")
                    else:
                        logger.error(f"Error fetching {current_url}: {e}")
                    return None
            
            logger.error(f"Too many redirects for {url[:50]}")
            return None
            
        except Exception as e:
            logger.error(f"Unexpected error fetching {url[:50]}: {e}")
            return None
    
    
    def fetch_with_retry(
        self,
        url: str,
        max_retries: int = MAX_RETRIES,
        **kwargs
    ) -> Optional[requests.Response]:
        """
        带重试机制的请求
        
        Args:
            url: 目标URL
            max_retries: 最大重试次数
            **kwargs: 传递给fetch的其他参数
        
        Returns:
            Response对象，失败返回None
        """
        for attempt in range(max_retries + 1):
            response = self.fetch(url, **kwargs)
            
            if response is not None:
                return response
            
            if attempt < max_retries:
                wait_time = RETRY_DELAY * (RETRY_BACKOFF ** attempt)
                logger.warning(
                    f"Retry {attempt + 1}/{max_retries} for {url[:50]} "
                    f"after {wait_time:.2f} seconds"
                )
                time.sleep(wait_time)
            else:
                logger.error(f"Failed to fetch {url[:50]} after {max_retries} retries")
        
        return None
    
    def parse_html(self, html: str) -> Optional[Any]:
        """
        解析HTML内容（子类需要实现）
        
        Args:
            html: HTML内容
        
        Returns:
            解析后的数据
        """
        raise NotImplementedError("Subclasses must implement parse_html method")
    
    def crawl(self, url: str) -> Optional[Dict[str, Any]]:
        """
        爬取URL并解析
        
        Args:
            url: 目标URL
        
        Returns:
            解析后的数据字典
        """
        # 获取HTML
        response = self.fetch_with_retry(url)
        if response is None:
            return None
        
        # 解析HTML
        try:
            data = self.parse_html(response.text)
            return data
        except Exception as e:
            logger.error(f"Failed to parse HTML from {url[:50]}: {e}")
            return None
    
    def close(self):
        """关闭爬虫，清理资源"""
        self.session.close()
        logger.info("Spider closed")

