"""
浙江政务服务网爬虫

专门用于爬取浙江政务服务网的数据。
"""

import time
from typing import List, Optional
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from bs4 import BeautifulSoup

from .base_scraper import BaseScraper
from ..models.data_model import ScrapedData
from ..utils.logger import get_logger
from ..utils.font_decoder import FontAntiCrawlHandler


class ZJZWFWScraper(BaseScraper):
    """浙江政务服务网爬虫"""

    def __init__(self, config):
        super().__init__(config)
        self.logger = get_logger(f"scraper.zjzwfw")

        # 页面加载等待时间
        self.page_load_wait = config.selectors.get('special_config', {}).get('page_load_wait', 5)

        # 滚动加载配置
        self.scroll_config = config.selectors.get('special_config', {}).get('scroll_load', {})

        # 分页配置
        self.pagination_config = config.selectors.get('special_config', {}).get('pagination', {})

        # 初始化反反爬虫工具
        self.anti_crawler = FontAntiCrawlHandler()
        self.font_mapping_table = {}

    def _scrape_data(self) -> List[ScrapedData]:
        """
        爬取浙江政务服务网数据

        Returns:
            爬取的数据列表
        """
        self.logger.info("开始爬取浙江政务服务网数据")

        # 使用Selenium获取页面
        page_source = self._get_page_with_selenium(
            self.config.url,
            wait_time=self.page_load_wait
        )

        if not page_source:
            self.logger.error("无法获取页面内容")
            return []

        # 处理反爬虫机制
        self._handle_anti_crawler(page_source)

        # 处理滚动加载
        if self.scroll_config.get('enabled', False):
            self._handle_scroll_loading()

        # 获取数据 - 支持翻页
        all_data = []

        # 调试配置信息
        self.logger.info(f"翻页配置: {self.pagination_config}")
        pagination_enabled = self.pagination_config.get('enabled', False)
        max_pages = self.pagination_config.get('max_pages', 1)
        self.logger.info(f"翻页启用状态: {pagination_enabled}, 最大页数: {max_pages}")

        if pagination_enabled and max_pages > 1:
            # 支持翻页的数据获取
            self.logger.info("使用翻页模式爬取数据")
            all_data = self._scrape_with_pagination()
        else:
            # 单页数据获取
            self.logger.info("使用单页模式爬取数据")
            data_list = self._parse_page_data()
            all_data.extend(data_list)

        self.logger.info(f"成功解析 {len(all_data)} 条数据")
        return all_data

    def _handle_scroll_loading(self) -> None:
        """处理滚动加载"""
        driver = self._get_driver()
        max_attempts = self.scroll_config.get('max_scroll_attempts', 10)
        pause_time = self.scroll_config.get('scroll_pause_time', 2)

        self.logger.info("开始处理滚动加载")

        for attempt in range(max_attempts):
            try:
                # 滚动到页面底部
                driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
                time.sleep(pause_time)

                # 检查是否有加载更多按钮
                load_more_selector = self.config.selectors.get('load_more')
                if load_more_selector:
                    try:
                        load_more_btn = driver.find_element(By.CSS_SELECTOR, load_more_selector)
                        if load_more_btn.is_displayed() and load_more_btn.is_enabled():
                            load_more_btn.click()
                            time.sleep(pause_time)
                            self.logger.debug(f"点击加载更多按钮 (尝试 {attempt + 1})")
                        else:
                            self.logger.debug("没有更多内容可加载")
                            break
                    except NoSuchElementException:
                        self.logger.debug("未找到加载更多按钮")
                        break

                # 检查是否有新内容加载
                current_height = driver.execute_script("return document.body.scrollHeight")
                if hasattr(self, '_last_height') and self._last_height == current_height:
                    self.logger.debug("页面高度未变化，停止滚动")
                    break

                self._last_height = current_height

            except Exception as e:
                self.logger.warning(f"滚动加载出错 (尝试 {attempt + 1}): {e}")
                break

        self.logger.info("滚动加载处理完成")

    def _scrape_with_pagination(self) -> List[ScrapedData]:
        """
        支持翻页的数据爬取

        Returns:
            所有页面的数据列表
        """
        all_data = []
        max_pages = self.pagination_config.get('max_pages', 5)
        page_delay = self.pagination_config.get('page_delay', 3)
        wait_for_load = self.pagination_config.get('wait_for_load', 5)

        driver = self._get_driver()

        self.logger.info(f"开始翻页爬取，最大页数: {max_pages}")

        for page_num in range(1, max_pages + 1):
            try:
                self.logger.info(f"正在爬取第 {page_num} 页")

                # 解析当前页数据
                page_data = self._parse_page_data()
                if page_data:
                    all_data.extend(page_data)
                    self.logger.info(f"第 {page_num} 页获取到 {len(page_data)} 条数据")
                else:
                    self.logger.warning(f"第 {page_num} 页没有获取到数据")

                # 如果不是最后一页，尝试翻页
                if page_num < max_pages:
                    if not self._go_to_next_page():
                        self.logger.info("没有下一页或翻页失败，停止爬取")
                        break

                    # 等待页面加载
                    time.sleep(wait_for_load)

                    # 重新处理字体混淆（新页面可能有新的字体）
                    try:
                        page_source = driver.page_source
                        self._handle_anti_crawler(page_source)
                    except Exception as e:
                        self.logger.warning(f"重新处理字体混淆失败: {e}")

                    # 页面间隔
                    time.sleep(page_delay)

            except Exception as e:
                self.logger.error(f"爬取第 {page_num} 页时出错: {e}")
                # 继续爬取下一页
                continue

        self.logger.info(f"翻页爬取完成，总共获取 {len(all_data)} 条数据")
        return all_data

    def _go_to_next_page(self) -> bool:
        """
        跳转到下一页

        Returns:
            是否成功跳转
        """
        driver = self._get_driver()

        try:
            # 查找下一页按钮
            next_page_selector = self.config.selectors.get('next_page', '.ant-pagination-next')
            next_btn = driver.find_element(By.CSS_SELECTOR, next_page_selector)

            # 检查按钮是否可点击
            if not next_btn.is_enabled() or 'ant-pagination-disabled' in next_btn.get_attribute('class'):
                self.logger.info("下一页按钮已禁用，已到最后一页")
                return False

            # 滚动到按钮位置
            driver.execute_script("arguments[0].scrollIntoView(true);", next_btn)
            time.sleep(1)

            # 点击下一页
            next_btn.click()
            self.logger.debug("成功点击下一页按钮")

            return True

        except NoSuchElementException:
            self.logger.warning("未找到下一页按钮")
            return False
        except Exception as e:
            self.logger.error(f"点击下一页按钮失败: {e}")
            return False

    def _get_current_page_number(self) -> int:
        """
        获取当前页码

        Returns:
            当前页码，失败返回1
        """
        driver = self._get_driver()

        try:
            current_page_selector = self.config.selectors.get('current_page', '.ant-pagination-item-active')
            current_page_element = driver.find_element(By.CSS_SELECTOR, current_page_selector)
            page_text = current_page_element.text.strip()

            return int(page_text)

        except (NoSuchElementException, ValueError) as e:
            self.logger.warning(f"获取当前页码失败: {e}")
            return 1

    def _parse_page_data(self) -> List[ScrapedData]:
        """
        解析页面数据

        Returns:
            解析的数据列表
        """
        driver = self._get_driver()
        data_list = []

        try:
            # 首先禁用自定义字体，解决反爬虫编码问题
            self._disable_custom_fonts()

            # 调试：先检查页面内容
            try:
                # 等待页面完全加载
                time.sleep(5)

                # 检查不同的可能选择器
                possible_selectors = [
                    '.ant-table-tbody tr',
                    '.ant-table tbody tr',
                    'table tbody tr',
                    '.el-table__body-wrapper tbody tr',
                    '.ant-table-row',
                    '[class*="table"] [class*="row"]'
                ]

                rows = []
                working_selector = None

                for selector in possible_selectors:
                    try:
                        temp_rows = driver.find_elements(By.CSS_SELECTOR, selector)
                        self.logger.debug(f"选择器 '{selector}' 找到 {len(temp_rows)} 行")
                        if len(temp_rows) > 0:
                            rows = temp_rows
                            working_selector = selector
                            break
                    except Exception as e:
                        self.logger.debug(f"选择器 '{selector}' 失败: {e}")
                        continue

                if working_selector:
                    self.logger.info(f"使用选择器 '{working_selector}' 找到 {len(rows)} 行数据")
                else:
                    self.logger.warning("所有表格选择器都未找到数据")

                    # 如果没找到表格，尝试查找页面上的其他内容
                    page_content = driver.find_element(By.TAG_NAME, "body").text
                    self.logger.debug(f"页面内容长度: {len(page_content)} 字符")
                    if len(page_content) > 100:
                        self.logger.debug(f"页面内容开头: {page_content[:200]}...")

            except Exception as e:
                self.logger.error(f"调试页面结构失败: {e}")
                rows = []

            self.logger.info(f"最终找到 {len(rows)} 行数据")

            for row in rows:
                try:
                    data = self._extract_table_row_data(row)
                    if data and self._validate_data(data):
                        data_list.append(data)
                        self.logger.debug(f"提取数据: {data.title}")
                except Exception as e:
                    self.logger.warning(f"提取行数据失败: {e}")
                    continue

        except NoSuchElementException as e:
            self.logger.error(f"未找到表格数据: {e}")
        except Exception as e:
            self.logger.error(f"解析页面数据失败: {e}")

        return data_list

    def _extract_item_data(self, item_element) -> Optional[ScrapedData]:
        """
        提取单个项目的数据

        Args:
            item_element: 项目元素

        Returns:
            提取的数据对象
        """
        try:
            # 提取标题
            title = self._extract_text_by_selector(
                item_element,
                self.config.selectors.get('title', '.item-title')
            )

            # 提取登记时间
            registration_time = self._extract_text_by_selector(
                item_element,
                self.config.selectors.get('registration_time', '.registration-time')
            )

            # 提取答复单位
            reply_unit = self._extract_text_by_selector(
                item_element,
                self.config.selectors.get('reply_unit', '.reply-unit')
            )

            # 提取答复时间
            reply_time = self._extract_text_by_selector(
                item_element,
                self.config.selectors.get('reply_time', '.reply-time')
            )

            # 提取内容（可选）
            content = self._extract_text_by_selector(
                item_element,
                self.config.selectors.get('content', '.item-content')
            )

            # 提取链接（如果有）
            link = self._extract_attribute_by_selector(
                item_element,
                self.config.selectors.get('title', '.item-title'),
                'href'
            )

            # 构建完整URL
            if link:
                if link.startswith('http'):
                    full_url = link
                else:
                    # 相对URL转换为绝对URL
                    base_url = self.config.url.split('#')[0]  # 移除hash部分
                    full_url = f"{base_url.rstrip('/')}/{link.lstrip('/')}"
            else:
                full_url = self.config.url

            # 创建数据对象
            data = ScrapedData(
                title=title,
                registration_time=self.text_processor.standardize_time(registration_time),
                reply_unit=reply_unit,
                reply_time=self.text_processor.standardize_time(reply_time),
                source_url=full_url,
                content=content if content else None
            )

            return data

        except Exception as e:
            self.logger.warning(f"提取项目数据时出错: {e}")
            return None

    def _wait_for_element(self, selector: str, timeout: int = 10):
        """
        等待元素出现

        Args:
            selector: CSS选择器
            timeout: 超时时间（秒）

        Returns:
            找到的元素
        """
        driver = self._get_driver()
        try:
            element = WebDriverWait(driver, timeout).until(
                EC.presence_of_element_located((By.CSS_SELECTOR, selector))
            )
            return element
        except TimeoutException:
            self.logger.warning(f"等待元素超时: {selector}")
            return None

    def _get_page_info(self) -> dict:
        """
        获取页面信息

        Returns:
            页面信息字典
        """
        driver = self._get_driver()
        info = {
            'title': driver.title,
            'url': driver.current_url,
            'page_source_length': len(driver.page_source)
        }

        # 尝试获取页面统计信息
        try:
            # 查找列表项数量
            item_selector = self.config.selectors.get('item_container', '.list-item')
            items = driver.find_elements(By.CSS_SELECTOR, item_selector)
            info['item_count'] = len(items)
        except Exception:
            info['item_count'] = 0

        return info

    def _handle_anti_crawler(self, page_source: str):
        """处理反爬虫机制"""
        driver = self._get_driver()

        try:
            self.logger.info("开始处理反爬虫机制...")

            # 使用综合反反爬虫解决方案
            self.font_mapping_table = self.anti_crawler.handle_font_obfuscation(
                driver, page_source, self.config.url
            )

            # 设置返回结果格式以兼容旧代码
            results = {
                'font_replaced': True,
                'mapping_table': self.font_mapping_table,
                'ocr_used': False
            }

            self.logger.info(f"反爬虫处理完成: 字体替换={results.get('font_replaced')}, "
                           f"OCR可用={results.get('ocr_available')}, "
                           f"字体映射数量={len(self.font_mapping_table)}")

        except Exception as e:
            self.logger.error(f"处理反爬虫机制失败: {e}")

    def _disable_custom_fonts(self):
        """禁用自定义字体，解决反爬虫编码问题"""
        driver = self._get_driver()

        try:
            # 注入CSS来禁用自定义字体
            css_script = """
            var style = document.createElement('style');
            style.type = 'text/css';
            style.innerHTML = `
                * {
                    font-family: Arial, sans-serif !important;
                }
                .ant-table-tbody tr td div {
                    font-family: Arial, sans-serif !important;
                }
            `;
            document.head.appendChild(style);
            """
            driver.execute_script(css_script)

            # 等待一下让样式生效
            time.sleep(2)

            self.logger.info("已禁用自定义字体")

        except Exception as e:
            self.logger.warning(f"禁用自定义字体失败: {e}")

    def _extract_table_row_data(self, row_element) -> Optional[ScrapedData]:
        """
        从表格行中提取数据

        Args:
            row_element: 表格行元素

        Returns:
            提取的数据对象
        """
        try:
            # 获取所有单元格
            cells = row_element.find_elements(By.CSS_SELECTOR, 'td')

            if len(cells) < 4:
                self.logger.warning(f"表格行单元格数量不足: {len(cells)}")
                return None

            # 提取各列数据
            title = self._get_cell_text(cells[0])  # 第一列：标题
            registration_time = self._get_cell_text(cells[1])  # 第二列：登记时间
            reply_unit = self._get_cell_text(cells[2])  # 第三列：答复单位
            reply_time = self._get_cell_text(cells[3])  # 第四列：答复时间

            # 获取详情链接
            detail_link = self._get_detail_link(cells[0])

            # 创建数据对象
            data = ScrapedData(
                title=title,
                registration_time=self.text_processor.standardize_time(registration_time),
                reply_unit=reply_unit,
                reply_time=self.text_processor.standardize_time(reply_time),
                source_url=detail_link if detail_link else self.config.url,
                content=None
            )

            return data

        except Exception as e:
            self.logger.warning(f"提取表格行数据时出错: {e}")
            return None

    def _get_cell_text(self, cell_element) -> str:
        """获取单元格文本内容"""
        try:
            # 尝试获取div中的文本
            div_element = cell_element.find_element(By.CSS_SELECTOR, 'div')
            text = div_element.text.strip()

            # 如果文本是乱码，尝试字体解码
            if self._is_garbled_text(text):
                # 使用字体映射表解码
                if self.font_mapping_table:
                    decoded_text = self.anti_crawler.font_decoder.decode_text(text, self.font_mapping_table)
                    if decoded_text != text:
                        self.logger.debug(f"字体解码成功: {text} -> {decoded_text}")
                        return decoded_text

                # 尝试获取原始文本内容
                text = div_element.get_attribute('textContent') or div_element.get_attribute('innerText') or text

                # 如果仍然是乱码，尝试OCR识别
                if self._is_garbled_text(text) and self.anti_crawler.ocr_decoder.ocr_available:
                    try:
                        # 使用当前div元素进行OCR识别
                        driver = self._get_driver()
                        # 生成唯一的选择器来定位当前元素
                        unique_selector = self._generate_unique_selector(div_element)
                        if unique_selector:
                            ocr_text = self.anti_crawler.ocr_decoder.decode_text_with_ocr(driver, unique_selector)
                            if ocr_text:
                                self.logger.debug(f"OCR识别成功: {text} -> {ocr_text}")
                                return ocr_text
                    except Exception as ocr_error:
                        self.logger.debug(f"OCR识别失败: {ocr_error}")

            return text
        except Exception as e:
            self.logger.warning(f"获取单元格文本失败: {e}")
            return ""

    def _get_detail_link(self, title_cell) -> str:
        """获取详情链接"""
        try:
            # 查找链接元素
            link_element = title_cell.find_element(By.CSS_SELECTOR, 'div')
            if link_element:
                # 检查是否有点击事件
                onclick = link_element.get_attribute('onclick')
                if onclick:
                    # 这里可以解析onclick事件来获取真实链接
                    # 暂时返回当前页面URL
                    return self.config.url
        except Exception:
            pass

        return self.config.url

    def _generate_unique_selector(self, element) -> str:
        """为元素生成唯一的选择器"""
        try:
            # 尝试使用id
            element_id = element.get_attribute('id')
            if element_id:
                return f"#{element_id}"

            # 尝试使用class
            element_class = element.get_attribute('class')
            if element_class:
                # 使用第一个class
                first_class = element_class.split()[0]
                return f".{first_class}"

            # 使用tag name
            tag_name = element.tag_name
            return tag_name

        except Exception as e:
            self.logger.debug(f"生成选择器失败: {e}")
            return None

    def _is_garbled_text(self, text: str) -> bool:
        """检查文本是否为乱码"""
        if not text:
            return True

        # 检查是否包含大量特殊字符
        special_chars = sum(1 for c in text if ord(c) > 127)
        if special_chars > len(text) * 0.5:
            return True

        return False
