from selenium.common import StaleElementReferenceException, ElementClickInterceptedException
from selenium.webdriver.chrome.options import Options
import time
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

from spiders.config import driver_path
from spiders.novelCrawler.chapter_scraper import scrape_chapter_content


class NovelScraper:
    def __init__(self, start_url, chapter_selector, next_page_selector, task):
        """初始化ChromeDriver"""
        # 设置Chrome浏览器选项
        chrome_options = Options()
        # if headless:
        # chrome_options.add_argument("--headless")  # 无头模式，不显示浏览器
        chrome_options.add_argument('--disable-gpu')
        chrome_options.add_argument('--window-size=1920,1080')

        # 如果driver_path 为空，则使用默认路径
        # 设置ChromeDriver服务
        service = Service(driver_path)

        # 启动浏览器
        self.driver = webdriver.Chrome(service=service, options=chrome_options)
        """
        初始化 NovelScraper 类。

        :param start_url: 小说章节列表的起始页面 URL
        :param chapter_selector: 章节链接的 CSS 选择器
        :param next_page_selector: 下一页按钮的 CSS 选择器
        :param driver_path: ChromeDriver 路径
        """
        self.start_url = start_url
        self.chapter_selector = chapter_selector
        self.next_page_selector = next_page_selector
        self.driver.implicitly_wait(10)  # 隐式等待 10 秒
        self.task = task

    def start_scraping(self):
        """
        开始爬取小说章节 URL。
        """
        self.driver.get(self.start_url)
        all_chapter_urls = []

        while True:
            # 处理当前页的所有章节、解析章节名和内容
            self.get_chapter_contents()

            # 尝试点击下一页，若无下一页则停止
            try:
                self.click_next_page()
            except Exception as e:
                print(f"Finished scraping. {e}")
                break

        # 爬取完成后，关闭浏览器
        self.driver.quit()

    def get_chapter_contents(self):
        """
        获取当前页面的所有章节 URL。
        :return: 当前页面的章节 URL 列表
        """
        chapter_urls = []
        try:
            # 根据传入的章节选择器定位章节链接
            chapters = self.driver.find_elements(By.CSS_SELECTOR, self.chapter_selector)
            for i, chapter in enumerate(chapters):
                # 重新获取章节元素
                newChapters = self.driver.find_elements(By.CSS_SELECTOR, self.chapter_selector)
                chapter = newChapters[i]

                html_content = ''
                try:
                    html_content = chapter.get_attribute('html_content')
                except StaleElementReferenceException:
                    print("Encountered a stale element reference exception. Trying to re-fetch the element.")
                try:
                    if html_content:
                        scrape_chapter_content(i, html_content, self.task)
                        # 如果 'html_content' 获取不到，尝试其他方法
                    else:
                        try:
                            chapter.click()
                        except ElementClickInterceptedException:
                            print("点击失败： Element is not clickable at point. Trying to scroll to it.")
                            # 睡眠3s
                            # time.sleep(3)
                            self.driver.execute_script("arguments[0].click();", chapter)
                        # 等待新窗口或标签页打开
                        WebDriverWait(self.driver, 3)

                        # 获取新标签页的 URL
                        html_content = self.driver.page_source
                        if html_content:
                            # 解析章节内容
                            scrape_chapter_content(i,html_content, self.task)
                        else:
                            print(f"这个章节没有url chapter: {chapter.text}")

                        # 返回到原始页面或重新加载章节列表
                        self.driver.back()  # 或者其他方法使页面恢复到章节列表状态
                        # 等待原页面重新加载完成
                        WebDriverWait(self.driver, 10)

                except StaleElementReferenceException:
                    print("Encountered a stale element reference exception. Trying to re-fetch the element.")
        except Exception as e:
            print(f"Error while fetching chapter URLs: {e}")

    def click_next_page(self):
        """
        点击下一页按钮。
        """
        try:
            # 等待并点击下一页按钮
            next_button = WebDriverWait(self.driver, 10).until(
                EC.element_to_be_clickable((By.CSS_SELECTOR, self.next_page_selector))
            )
            next_button.click()
            time.sleep(2)  # 等待页面加载完成
        except Exception as e:
            print(f"Error while clicking next page: {e}")
            raise e


# 使用示例
if __name__ == "__main__":
    # 替换成实际的小说章节页面 URL 和选择器
    start_url = 'https://zbwq.wmg.weimeigu.net/app/index.php?i=16414&c=entry&tid=25699&do=mulu&m=iweite_xiaoshuo'
    chapter_selector = 'ul#html_box li'  # 替换为章节链接的选择器
    next_page_selector = '#next_page a'  # 替换为“下一页”按钮的选择器

    # 初始化并启动爬虫
    scraper = NovelScraper(start_url, chapter_selector, next_page_selector)
    scraper.start_scraping()

