"""
基础爬虫模块

定义爬虫的通用接口和基础功能。
"""

import time
import requests
from abc import ABC, abstractmethod
from typing import List, Optional, Dict, Any
from datetime import datetime
import traceback

from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, WebDriverException

from ..models.data_model import ScrapedData, ScrapingResult, WebsiteConfig
from ..utils.logger import get_logger
from ..utils.text_processor import text_processor
from config.settings import SCRAPER_CONFIG, Constants


class BaseScraper(ABC):
    """基础爬虫抽象类"""

    def __init__(self, config: WebsiteConfig):
        """
        初始化爬虫

        Args:
            config: 网站配置
        """
        self.config = config
        self.logger = get_logger(f"scraper.{config.name}")
        self.text_processor = text_processor
        self.text_processor.set_logger(self.logger)

        # 设置请求会话
        self.session = self._create_session()

        # Selenium驱动（延迟初始化）
        self._driver = None

        self.logger.info(f"初始化爬虫: {config.name}")

    def _create_session(self) -> requests.Session:
        """创建请求会话"""
        session = requests.Session()
        session.headers.update(SCRAPER_CONFIG['HEADERS'])
        session.headers['User-Agent'] = SCRAPER_CONFIG['USER_AGENT']
        return session

    def _get_driver(self) -> webdriver.Chrome:
        """获取Selenium驱动（延迟初始化）"""
        if self._driver is None:
            self._driver = self._create_driver()
        return self._driver

    def _create_driver(self) -> webdriver.Chrome:
        """创建Selenium驱动"""
        options = Options()

        # 配置Chrome选项
        if SCRAPER_CONFIG['SELENIUM']['HEADLESS']:
            options.add_argument('--headless')

        options.add_argument('--no-sandbox')
        options.add_argument('--disable-dev-shm-usage')
        options.add_argument('--disable-gpu')
        options.add_argument('--window-size=1920,1080')
        options.add_argument('--user-agent=' + SCRAPER_CONFIG['USER_AGENT'])

        # 禁用图片加载以提高性能
        prefs = {
            "profile.managed_default_content_settings.images": 2,
            "profile.default_content_setting_values.notifications": 2
        }
        options.add_experimental_option("prefs", prefs)

        try:
            # 使用webdriver-manager自动管理ChromeDriver
            service = Service(ChromeDriverManager().install())
            driver = webdriver.Chrome(service=service, options=options)
            driver.implicitly_wait(SCRAPER_CONFIG['SELENIUM']['IMPLICIT_WAIT'])
            driver.set_page_load_timeout(SCRAPER_CONFIG['SELENIUM']['PAGE_LOAD_TIMEOUT'])
            return driver
        except Exception as e:
            self.logger.error(f"创建Chrome驱动失败: {e}")
            raise

    def scrape(self) -> ScrapingResult:
        """
        执行爬取

        Returns:
            爬取结果
        """
        start_time = time.time()
        result = ScrapingResult(url=self.config.url, success=False)

        try:
            self.logger.info(f"开始爬取: {self.config.url}")

            # 执行具体的爬取逻辑
            data_list = self._scrape_data()

            # 处理数据
            for data in data_list:
                result.add_data(data)

            result.success = True
            result.processing_time = time.time() - start_time

            self.logger.info(f"爬取完成: 获取 {len(data_list)} 条数据")

        except Exception as e:
            result.success = False
            result.error_message = str(e)
            result.processing_time = time.time() - start_time

            self.logger.error(f"爬取失败: {e}")
            self.logger.debug(f"错误详情: {traceback.format_exc()}")

        return result

    @abstractmethod
    def _scrape_data(self) -> List[ScrapedData]:
        """
        具体的爬取逻辑（子类必须实现）

        Returns:
            爬取的数据列表
        """
        pass

    def _make_request(self, url: str, method: str = 'GET',
                     **kwargs) -> Optional[requests.Response]:
        """
        发送HTTP请求

        Args:
            url: 请求URL
            method: 请求方法
            **kwargs: 其他请求参数

        Returns:
            响应对象
        """
        try:
            # 添加延迟
            if self.config.delay > 0:
                time.sleep(self.config.delay)

            response = self.session.request(
                method=method,
                url=url,
                timeout=self.config.timeout,
                **kwargs
            )

            # 检查响应状态
            if response.status_code in Constants.SUCCESS_STATUS_CODES:
                return response
            else:
                self.logger.warning(f"HTTP {response.status_code}: {url}")
                return None

        except requests.exceptions.Timeout:
            self.logger.error(f"请求超时: {url}")
            return None
        except requests.exceptions.ConnectionError:
            self.logger.error(f"连接错误: {url}")
            return None
        except Exception as e:
            self.logger.error(f"请求失败: {url}, 错误: {e}")
            return None

    def _get_page_with_selenium(self, url: str, wait_time: int = 5) -> Optional[str]:
        """
        使用Selenium获取页面内容

        Args:
            url: 页面URL
            wait_time: 等待时间（秒）

        Returns:
            页面HTML内容
        """
        driver = None
        try:
            driver = self._get_driver()
            driver.get(url)

            # 等待页面加载
            time.sleep(wait_time)

            # 等待页面元素加载
            WebDriverWait(driver, 10).until(
                EC.presence_of_element_located((By.TAG_NAME, "body"))
            )

            return driver.page_source

        except TimeoutException:
            self.logger.warning(f"页面加载超时: {url}")
            return driver.page_source if driver else None
        except WebDriverException as e:
            self.logger.error(f"Selenium错误: {url}, 错误: {e}")
            return None
        except Exception as e:
            self.logger.error(f"获取页面失败: {url}, 错误: {e}")
            return None

    def _extract_text_by_selector(self, element, selector: str) -> str:
        """
        根据选择器提取文本

        Args:
            element: 父元素
            selector: CSS选择器

        Returns:
            提取的文本
        """
        try:
            found_element = element.find_element(By.CSS_SELECTOR, selector)
            text = found_element.text.strip()
            return self.text_processor.clean_text(text)
        except Exception:
            return ""

    def _extract_attribute_by_selector(self, element, selector: str,
                                     attribute: str) -> str:
        """
        根据选择器提取属性值

        Args:
            element: 父元素
            selector: CSS选择器
            attribute: 属性名

        Returns:
            属性值
        """
        try:
            found_element = element.find_element(By.CSS_SELECTOR, selector)
            return found_element.get_attribute(attribute) or ""
        except Exception:
            return ""

    def _validate_data(self, data: ScrapedData) -> bool:
        """
        验证数据有效性

        Args:
            data: 要验证的数据

        Returns:
            是否有效
        """
        # 检查必填字段
        if not data.title or not data.source_url:
            self.logger.warning(f"数据缺少必填字段: {data}")
            return False

        # 验证文本长度
        validation_config = {
            'title': 500,
            'registration_time': 50,
            'reply_unit': 200,
            'reply_time': 50,
        }

        for field, max_length in validation_config.items():
            value = getattr(data, field, "")
            if len(value) > max_length:
                self.logger.warning(f"字段 {field} 长度超限: {len(value)}/{max_length}")

        return True

    def _retry_with_backoff(self, func, *args, max_retries: int = None,
                           **kwargs) -> Any:
        """
        带退避的重试机制

        Args:
            func: 要重试的函数
            *args: 函数参数
            max_retries: 最大重试次数
            **kwargs: 函数关键字参数

        Returns:
            函数执行结果
        """
        if max_retries is None:
            max_retries = self.config.max_retries

        for attempt in range(max_retries + 1):
            try:
                return func(*args, **kwargs)
            except Exception as e:
                if attempt == max_retries:
                    raise e

                # 指数退避
                wait_time = 2 ** attempt
                self.logger.warning(f"重试 {attempt + 1}/{max_retries + 1}: 等待 {wait_time}秒")
                time.sleep(wait_time)

    def cleanup(self) -> None:
        """清理资源"""
        if self._driver:
            try:
                self._driver.quit()
            except Exception as e:
                self.logger.warning(f"关闭驱动时出错: {e}")
            finally:
                self._driver = None

        if self.session:
            self.session.close()

        self.logger.info("资源清理完成")

    def __enter__(self):
        """上下文管理器入口"""
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        """上下文管理器出口"""
        self.cleanup()
