import re
from urllib.parse import quote_plus, urljoin
from bs4 import BeautifulSoup
import requests
from scrapers.base_scraper import BaseScraper
from scrapers.common import is_same_chapter, merge_relative_url
from utils import fetch_page_with_playwright, fetch_real_url_playwright
from scrapers.constant import SearchSource, headers


class XS520Scraper(BaseScraper):
    baseUrl = "https://www.xs520.com"

    @property
    def base_url(self) -> str:
        return self.baseUrl

    @property
    def site_name(self) -> str:
        return "XS520"

    def search(self, keyword: str) -> list:
        url = f"{self.baseUrl}/search.php?q={quote_plus(keyword)}"
        print(f"[{self.baseUrl}] 搜索 URL: {url}")
        try:
            _, html = fetch_page_with_playwright(url)
            soup = BeautifulSoup(html, "html.parser")

            results = []
            for item in soup.select(".row dl"):
                title_node = item.select_one("dd h3 a")
                author_node = item.select_one(".book_other span")
                cover_node = item.select_one("dt img")

                if title_node and author_node:
                    title = title_node.get_text(strip=True)
                    author = author_node.get_text(strip=True)
                    href = title_node.get("href")
                    cover = cover_node.get("src") if cover_node else ""
                    if href:
                        results.append(
                            {
                                "title": title,
                                "author": author,
                                "url": self.baseUrl + href + "index_1.html",
                                "source": SearchSource.XS_520.value,
                                "intro": "",
                                "cover": self.baseUrl + cover,
                            }
                        )
            return results
        except Exception as e:
            print(f"[{self.baseUrl}] 搜索失败: {e}")
            return []

    def fetch_chapter(self, url: str) -> dict:
        baseUrl = "https://www.xs520.com"
        visited_urls = set()
        url = url
        title = None
        # 提取章节列表
        chapter_links = []

        while url and url not in visited_urls:
            visited_urls.add(url)
            realUrl = fetch_real_url_playwright(url)
            response = requests.get(realUrl, headers=headers, timeout=10)
            response.raise_for_status()
            soup = BeautifulSoup(response.text, "html.parser")

            # 小说名称
            if not title:
                title_node = soup.select_one(".book_info .info h1")
                title = title_node.get_text(strip=True) if title_node else ""

            for a in soup.select(".book_list2 a"):
                name = a.get_text(strip=True)
                href = a.get("href")
                if href and name:
                    chapter_links.append({"name": name, "url": urljoin(baseUrl, href)})
            # 找下一页链接
            for next_link in soup.select(".page-item .page-link"):
                href = next_link.get("href")
                next_url = urljoin(baseUrl, href)
                if href and next_url not in visited_urls:
                    url = next_url
                    break

        if not chapter_links:
            raise ValueError("未找到章节内容，页面结构可能已变")
        return {"title": title, "chapters": chapter_links}

    def fetch_novel_content(self, url: str) -> dict:
        content_parts = []
        title = None
        visited_urls = set()
        base = url

        while url and url not in visited_urls:
            visited_urls.add(url)
            response = requests.get(url, headers=headers, timeout=10)
            response.raise_for_status()
            soup = BeautifulSoup(response.text, "html.parser")

            if not title:
                # 小说标题
                title_node = soup.select_one(".container h1")
                if title_node:
                    title = title_node.get_text(strip=True)

            content_node = soup.select_one("article")
            if not content_node:
                break

            for br in content_node.find_all("br"):
                br.replace_with("\n")

            raw_text = content_node.get_text("\n", strip=True)
            content_lines = raw_text.splitlines()

            if not content_lines:
                break

            filter_keywords = [
                "请收藏：",
                "温馨提示",
                "第(1/3)页",
                "第(2/3)页",
                "第(3/3)页",
            ]
            content_lines = [
                line
                for line in content_lines
                if not any(k in line for k in filter_keywords)
            ]

            content = "\n".join(content_lines).strip()
            content_parts.append(content)

            # 找下一页链接（分页）
            next_link = soup.select_one("#next")
            if next_link:
                next_href = next_link.get("href")
                if next_href and self.xs_520_next_valid_url(next_href):
                    next_url = urljoin(base, next_href)
                    if next_url in visited_urls or not is_same_chapter(base, next_url):
                        break
                    url = next_url
                else:
                    break
            else:
                break

        full_content = "\n".join(content_parts).strip()
        return {"title": title or "", "content": full_content}

    def xs_520_next_valid_url(self, url: str) -> bool:
        if re.search(r"_([2-9])\.html$", url):
            return True
        return False
