import logging
import re
import time

from selenium.common import (
    NoSuchElementException,
    StaleElementReferenceException,
    TimeoutException,
)

from config import (
    RESULT_TABLE_LIST_SELECTOR,
    PAGE_NEXT_SELECTOR,
    TOTAL_ARTICLE_NUMBER_SELECTOR,
    TITLE_SELECTOR,
    PAGE_DOWN_TIMES,
    REFERENCE_TOTAL_NUMBER_SELECTOR,
    CITATION_FLAG_SELECTOR,
    TYPES,
    REFERENCE_LIST_SELECTOR,
)
from custom_exception import NoMorePagesException


# 日志配置
logging.basicConfig(
    filename="log.log",
    level=logging.INFO,
    format="%(asctime)s - %(message)s",
    datefmt="%d-%b-%y %H:%M:%S",
)


def log_execution_time(start_time, title):
    """日志记录执行时间

    参数：
        start_time (float): 开始时间
        title (str): 标题

    """
    end_time = time.time()
    consume_time = int(end_time - start_time)
    logging.info("%s :  %d 秒", title, consume_time)


def get_main_handle(driver):
    """获取主窗口句柄"""
    return driver.current_window_handle


class ScrapyHandler:
    """爬虫处理类"""

    def __init__(self, webdriver_handler, file_handler):
        """初始化爬虫处理类

        参数：
            webdriver_handler (WebDriverHandler): WebDriverHandler对象
            file_handler (FileHandler): FileHandler对象
        """

        self.total_article_number = 0
        self.page_count = 1
        self.webdriver_handler = webdriver_handler
        self.file_handler = file_handler
        self.all_references = []
        # self.cnki_bypass = CNKIBypass()

    def start_scraping(self, search_query):
        """
        开始爬取参考文献信息
        """
        if not self._init_browser():
            return

        # 执行检索
        self._execute_search(search_query)

        # 处理检索结果
        self._process_results()

    def _init_browser(self):
        """初始化浏览器"""
        return self.webdriver_handler.open_search_page()

    def _execute_search(self, search_query):
        """执行检索"""
        return self.webdriver_handler.execute_search(search_query)

    def _process_results(self):
        """处理文献检索结果"""
        try:
            logging.info("开始处理检索结果...")
            if self.webdriver_handler.driver is None:
                logging.error("WebDriver未正确初始化")
                return
            self.handle_search_results(self.webdriver_handler.driver)
        except Exception as e:
            logging.error("处理检索结果时出错: %s", str(e))
            raise

    def handle_search_results(self, driver):
        """处理文献检索结果

        参数：
            driver (WebDriver): WebDriver对象

        返回值：
            无
        """
        main_handle = get_main_handle(driver)
        # 获取总文献数量
        self.get_total_article_number()

        while True:
            result_table_list = self.webdriver_handler.wait_for_elements(
                RESULT_TABLE_LIST_SELECTOR
            )
            for item in result_table_list:
                self.all_references.clear()
                try:
                    time.sleep(1)
                    if item is not None:
                        item.click()
                        time.sleep(0.1)
                        new_window_handle = self.webdriver_handler.switch_to_new_window(
                            driver.current_window_handle, driver
                        )
                        if new_window_handle is not None:
                            self.handle_single_result(
                                driver, main_handle, self.all_references
                            )
                except (NoSuchElementException, StaleElementReferenceException) as e:
                    logging.error("当前遇到错误: %s，开始重启程序", e)
                    continue

            # 翻页操作
            try:
                self.go_to_next_page()
            except (NoSuchElementException, StaleElementReferenceException):
                logging.warning("已到达最后一页结束")
                break

    def go_to_next_page(self):
        """进行翻页操作"""

        next_page_button = self.webdriver_handler.wait_for_element(PAGE_NEXT_SELECTOR)

        # 如果没有下一页按钮，则抛出异常退出程序，否则点击下一页按钮
        if next_page_button is None:
            if self.total_article_number == 0:
                raise NoMorePagesException("没有下一页了，程序结束")
        next_page_button.click()
        time.sleep(1)  # 等待页面加载
        # self.page_count += 1
        # logging.info(" >> 页面编号: %d", {self.page_count})

    def get_total_article_number(self):
        """获取总文献数量"""

        # 定位到总文献数量元素
        time.sleep(1)
        total_article_element = self.webdriver_handler.wait_for_element(
            TOTAL_ARTICLE_NUMBER_SELECTOR
        )

        # 格式化获取到的总文献数量
        if total_article_element is not None:
            total_article_number = total_article_element.text.replace(",", "")
            self.total_article_number = int(total_article_number)

    def handle_single_result(self, driver, main_handle, all_references):
        """处理单篇文献的参考文献数据和详细信息

        参数：
            driver (WebDriver): WebDriver对象
            main_handle (str): 主窗口句柄
            all_references (list): 用于存储所有参考文献的列表

        返回值：
            无
        """

        start_time = time.time()
        #  检查文献是否已经爬取过
        title = self.check_duplicate_articles(driver, main_handle)
        if title is False:
            return

        self.webdriver_handler.scroll_page(PAGE_DOWN_TIMES)
        article_info = self._extract_article_info()

        if self.process_references(driver, title, all_references, main_handle):
            # 将文章信息和参考文献一起存储
            self.file_handler.store_article_info(title, article_info, all_references)
        log_execution_time(start_time, title)

    def _extract_article_info(self):
        """Extract detailed information of an article."""
        article_info = {}
        article_info["journal_name"] = self._extract_journal_name()
        article_info["authors"] = self._extract_authors()
        article_info["affiliations"] = self._extract_affiliations()
        article_info["abstract"] = self._extract_abstract()
        article_info["keywords"] = self._extract_keywords()
        article_info["funding"] = self._extract_funding()
        article_info["doi"], article_info["album"] = self._extract_doi_and_album()

        return article_info

    def _extract_journal_name(self):
        """Extract the journal name."""
        try:
            journal_element = self.webdriver_handler.driver.execute_script(
                "return document.querySelector('body > div.wrapper > div.main > div.container > div > div.doc-top > div > div.top-tip > span > a:nth-child(1)').textContent.trim();"
            )
            journal_name = journal_element.strip(" .")
            logging.info("Successfully extracted journal name: %s", journal_name)
            return journal_name
        except Exception as e:
            logging.error("Failed to extract journal name: %s", str(e))
            return ""

    def _extract_authors(self):
        """Extract the authors."""
        try:
            authors = self.webdriver_handler.driver.execute_script(
                "return Array.from(document.querySelectorAll('#authorpart > span > a')).map(a => a.textContent.trim().replace(/\\d|,/g, '').trim());"
            )
            logging.info("Successfully extracted authors: %s", authors)
            return authors
        except Exception as e:
            logging.error("Failed to extract authors: %s", str(e))
            return []

    def _extract_affiliations(self):
        """Extract the affiliations."""
        try:
            affiliations = self.webdriver_handler.driver.execute_script(
                "return Array.from(document.querySelectorAll('body > div.wrapper > div.main > div.container > div > div:nth-child(3) > div.brief > div > h3:nth-child(3) > span > a')).map(a => a.textContent.trim());"
            )
            logging.info("Successfully extracted affiliations: %s", affiliations)
            return affiliations
        except Exception as e:
            logging.error("Failed to extract affiliations: %s", str(e))
            return []

    def _extract_abstract(self):
        """Extract the abstract."""
        try:
            abstract_element = self.webdriver_handler.driver.execute_script(
                "return document.querySelector('#ChDivSummary') ? document.querySelector('#ChDivSummary').textContent.trim() : '';"
            )
            if abstract_element:
                logging.info(
                    "Successfully extracted abstract, length: %d", len(abstract_element)
                )
                return abstract_element
            else:
                logging.error("Abstract element not found")
                return ""
        except Exception as e:
            logging.error("Failed to extract abstract: %s", str(e))
            return ""

    def _extract_keywords(self):
        """Extract the keywords."""
        try:
            keywords = self.webdriver_handler.driver.execute_script(
                "return Array.from(document.querySelectorAll('body > div.wrapper > div.main > div.container > div > div:nth-child(3) > div:nth-child(4) > p')).map(p => p.textContent.trim()).join(';').split(';').map(kw => kw.trim()).filter(kw => kw);"
            )
            logging.info("Successfully extracted keywords: %s", keywords)
            return keywords
        except Exception as e:
            logging.error("Failed to extract keywords: %s", str(e))
            return []

    def _extract_funding(self):
        """Extract the funding information."""
        try:
            funding = self.webdriver_handler.driver.execute_script(
                "return Array.from(document.querySelectorAll('body > div.wrapper > div.main > div.container > div > div:nth-child(3) > div:nth-child(5) > p')).map(p => p.textContent.trim()).join('').split('；').map(f => f.trim()).filter(f => f);"
            )
            logging.info("Successfully extracted funding: %s", funding)
            return funding
        except Exception:
            logging.warning("No funding information found")
            return []

    def _extract_doi_and_album(self):
        """Extract the DOI and album information."""
        try:
            elements = self.webdriver_handler.driver.execute_script(
                "return Array.from(document.querySelectorAll('body > div.wrapper > div.main > div.container > div > div:nth-child(3) > div:nth-child(6) > ul > li')).map(li => li.textContent.trim());"
            )
            doi = ""
            album = ""
            for element in elements:
                if "DOI" in element:
                    doi = element.replace("DOI： ", "").strip()
                    logging.info("Successfully extracted DOI: %s", doi)
                if "专辑" in element:
                    album = element.replace("专辑： ", "").strip()
                    logging.info("Successfully extracted album: %s", album)
            return doi, album
        except Exception:
            logging.error("No DOI or album information found")
            return "", ""

    def check_duplicate_articles(self, driver, main_handle):
        """根据文献标题，检查文献是否已经爬取过

        参数：
            driver (WebDriver): WebDriver对象
            main_handle (str): 主窗口句柄
        返回值：
            str: 文献标题
        """

        title_element = self.webdriver_handler.wait_for_element(TITLE_SELECTOR)
        title = self.file_handler.clean_invalid_chars(title_element.text)
        if self.file_handler.check_if_exists(title):
            data = self.file_handler.load_data()
            if title in data and len(data[title]) > 0:
                driver.close()
                driver.switch_to.window(main_handle)
                self.total_article_number -= 1
                logging.info(
                    "======  No.%d: %s 已被爬取, 跳过, 还有 %d 篇待处理======",
                    self.total_article_number + 1,
                    title,
                    self.total_article_number,
                )
                return False

        return title

    def process_references(self, driver, title, all_references, main_handle):
        """Process references for a given article."""
        if self._no_references(driver):
            self._handle_no_references(driver, title, main_handle)
            return False

        for type_name, type_id in TYPES.items():
            if self._is_type_present(driver, type_id):
                self._extract_references_by_type(
                    driver, type_name, type_id, title, all_references
                )

        all_references = list(set(all_references))
        self.file_handler.store_references(title, all_references)
        self.total_article_number -= 1
        logging.info(
            ">> No.%d: %s 已处理完毕.剩余待处理: %d篇======",
            self.total_article_number + 1,
            title,
            self.total_article_number,
        )

        driver.close()
        driver.switch_to.window(main_handle)
        return True

    def _no_references(self, driver):
        """Check if there are no references for the article."""
        references_number_text = self.webdriver_handler.wait_for_element(
            REFERENCE_TOTAL_NUMBER_SELECTOR
        ).text
        citation_network_text = self.webdriver_handler.wait_for_element(
            CITATION_FLAG_SELECTOR
        )
        return references_number_text == "(0)" or citation_network_text is None

    def _handle_no_references(self, driver, title, main_handle):
        """Handle the case where no references are found."""
        driver.close()
        driver.switch_to.window(main_handle)
        self.total_article_number -= 1
        logging.info(
            "====== No.%d: %s 没有检测到参考文献, 跳过, 剩余待处理文献: %d篇======",
            self.total_article_number + 1,
            title,
            self.total_article_number,
        )

    def _is_type_present(self, driver, type_id):
        """Check if a specific type of reference is present."""
        element = self.webdriver_handler.wait_for_element(type_id)
        return element is not None and "display: none" not in element.get_attribute(
            "style"
        )

    def _extract_references_by_type(
        self, driver, type_name, type_id, title, all_references
    ):
        """Extract references of a specific type."""
        article_refs = self.webdriver_handler.wait_for_elements(type_id)
        if not article_refs:
            return

        logging.info(f"开始提取 {title} -- {type_name} 类型数据")
        for _ in article_refs:
            article_ref_lists = self.webdriver_handler.wait_for_elements(
                f"{type_id} > ul > li"
            )
            for article in article_ref_lists:
                ref_list = article.text.split("\n")
                for ref in ref_list:
                    reference = re.sub(r"\[\d+]", "", ref).strip()
                    all_references.append(reference)

            time.sleep(1)
            self._go_to_next_page_refs(
                all_references, driver, title, type_id, type_name
            )

    def _go_to_next_page_refs(self, all_references, driver, title, type_id, type_name):
        """Go to the next page of references."""
        next_page_button = self.webdriver_handler.wait_for_elements(f"{type_id} a.next")
        if next_page_button:
            driver.execute_script("arguments[0].click();", next_page_button[0])
            time.sleep(1)
            self._extract_references_by_type(
                driver, type_name, type_id, title, all_references
            )
        else:
            logging.info("%s -- %s 已完成处理.", title, type_name)

    def article_refs(self, driver, type_name, type_id, title, all_references):
        """抓取特定类型的参考文献
        参数：
            driver (WebDriver): WebDriver对象
            type_name (str): 类型名称
            type_id (str): 类型ID
            title (str): 文献标题
            all_references (list): 用于存储所有参考文献的列表
        返回值：
            bool:
                如果没有参考文献，返回False。
                如果爬取过程成功则返回True；
        """
        article_refs = self.webdriver_handler.wait_for_elements(type_id)
        if not article_refs or article_refs is None:
            return False

        logging.info(f"开始提取 {title} -- {type_name} 类型数据")
        for _ in article_refs:
            article_ref_lists = self.webdriver_handler.wait_for_elements(
                f"{type_id} > ul > li"
            )
            for article in article_ref_lists:
                ref_list = article.text.split("\n")
                for ref in ref_list:
                    reference = re.sub(r"\[\d+]", "", ref).strip()
                    all_references.append(reference)

            time.sleep(1)
            # 获取下一页的参考文献
            self.go_to_next_page_refs(all_references, driver, title, type_id, type_name)

        return True

    def go_to_next_page_refs(self, all_references, driver, title, type_id, type_name):
        """获取特定类型的参考文献的下一页数据

        参数：
            all_references (list): 用于存储所有参考文献的列表
            driver (WebDriver): WebDriver对象
            title (str): 文献标题
            type_id (str): 类型ID
            type_name (str): 类型名称

        返回值：
            无
        """
        next_page_button = self.webdriver_handler.wait_for_elements(f"{type_id} a.next")

        if next_page_button:
            driver.execute_script("arguments[0].click();", next_page_button[0])
            time.sleep(1)
            self.article_refs(driver, type_name, type_id, title, all_references)
        else:
            logging.info("%s -- %s 已完成处理.", title, type_name)
