import re
from urllib.parse import urljoin
from bs4 import BeautifulSoup
import requests
from scrapers.base_scraper import BaseScraper
from scrapers.common import is_same_chapter, merge_relative_url
from utils import fetch_real_url_playwright
from scrapers.constant import SearchSource, headers


class Bqg128Scraper(BaseScraper):
    baseUrl = "https://www.bqg128.com"

    @property
    def base_url(self) -> str:
        return self.baseUrl

    @property
    def site_name(self) -> str:
        return "笔趣阁128"

    def search(self, keyword: str) -> list:
        url = f"{self.baseUrl}/user/search.html?q={keyword}"
        try:
            # 使用 Playwright 获取真实 URL
            realUrl = fetch_real_url_playwright(url)
            response = requests.get(realUrl, headers=headers, verify=False, timeout=20)
            response.raise_for_status()
            data = response.json()
            if isinstance(data, int):
                # 接口返回了 int，一般表示搜索关键字不支持
                print(f"[{self.baseUrl}] 搜索失败，返回: {data}")
                return []

            results = []
            for item in data:
                title = item.get("articlename")
                author = item.get("author")
                href = item.get("url_list")
                img = item.get("url_img")
                intro = item.get("intro")
                if title and href:
                    full_url = f"{self.baseUrl}{href}" if href.startswith("/") else href
                    results.append(
                        {
                            "title": title,
                            "author": author,
                            "url": full_url + "list.html",
                            "source": SearchSource.BQG_128.value,
                            "cover": img if img else None,
                            "intro": intro if intro else None,
                        }
                    )
            return results
        except Exception as e:
            print(f"[{self.baseUrl}] 搜索失败: {e}")
            return []

    def fetch_chapter(self, url: str) -> dict:
        realUrl = fetch_real_url_playwright(url)
        response = requests.get(realUrl, headers=headers, timeout=10)
        response.raise_for_status()
        soup = BeautifulSoup(response.text, "html.parser")

        # 小说名称
        title_node = soup.select_one(".title")
        title = title_node.get_text(strip=True) if title_node else ""

        # 提取章节列表，排除 header/footer
        chapter_links = []
        for a in soup.select(".book_last a"):
            if a.has_attr("style"):
                continue
            name = a.get_text(strip=True)
            href = a.get("href")
            if href and name:
                chapter_links.append({"name": name, "url": urljoin(realUrl, href)})

        if not chapter_links:
            raise ValueError("未找到章节内容，页面结构可能已变")

        return {"title": title, "chapters": chapter_links}

    def fetch_novel_content(self, url: str) -> dict:
        content_parts = []
        title = None
        visited_urls = set()

        base = url

        while url and url not in visited_urls:
            visited_urls.add(url)
            response = requests.get(url, headers=headers, timeout=10)
            response.raise_for_status()
            soup = BeautifulSoup(response.text, "html.parser")

            if not title:
                # 小说标题
                title_node = soup.select_one(".title")
                if title_node:
                    title = re.sub(r"^\d+）", "", title_node.get_text(strip=True))

            content_node = soup.select_one("#chaptercontent")
            if not content_node:
                break

            for br in content_node.find_all("br"):
                br.replace_with("\n")

            raw_text = content_node.get_text("\n", strip=True)
            content_lines = raw_text.splitlines()

            if not content_lines:
                break

            filter_keywords = ["请收藏：", "温馨提示"]
            content_lines = [
                line
                for line in content_lines
                if not any(k in line for k in filter_keywords)
            ]

            content = "\n".join(content_lines).strip()
            content_parts.append(content)

            # 找下一页链接（分页）
            next_link = soup.select_one("#pb_next")
            if next_link:
                next_href = next_link.get("href")
                if next_href and next_href.endswith(".html") and next_href != "#":
                    next_url = merge_relative_url(base, next_href)
                    if next_url in visited_urls or not is_same_chapter(base, next_url):
                        break
                    url = next_url
                else:
                    break
            else:
                break

        full_content = "\n".join(content_parts).strip()
        return {"title": title or "", "content": full_content}
