"""
BBQD 小说网站爬虫
"""
import re
from typing import List, Dict, Any, Optional
from urllib.parse import quote_plus, urljoin
from bs4 import BeautifulSoup, Tag
import requests

from scrapers.base_scraper import BaseScraper
from scrapers.common import is_same_chapter, merge_relative_url
from utils import fetch_page_content, fetch_real_url
from scrapers.constant import SearchSource
from config import config
from logger import logger


class BBqdScraper(BaseScraper):
    """笔趣读小说网站爬虫"""

    @property
    def base_url(self) -> str:
        return "https://m.bbiqudu.com"

    @property
    def site_name(self) -> str:
        return "笔趣读"

    def search(self, keyword: str) -> List[Dict[str, Any]]:
        """
        搜索小说

        Args:
            keyword: 搜索关键词

        Returns:
            搜索结果列表
        """
        url = f"{self.base_url}/search/?searchkey={quote_plus(keyword)}"
        logger.info(f"搜索关键词: {keyword}", self.site_name)

        try:
            _, html = fetch_page_content(url)
            soup = BeautifulSoup(html, "html.parser")

            results = []
            for item in soup.select(".book-view-box"):
                title_node = item.select_one(".book-info h4 a")
                author_node = item.select_one(".book-info .author")
                cover_node = item.select_one(".book-img a img")

                if title_node and author_node:
                    title = title_node.get_text(strip=True)
                    author = author_node.get_text(strip=True).removeprefix("作者：")
                    href = title_node.get("href")
                    cover = cover_node.get("src") if cover_node else ""

                    if href:
                        results.append({
                            "title": title,
                            "author": author,
                            "url": self.base_url + href + "1",
                            "source": SearchSource.BBQD.value,
                            "intro": "",
                            "cover": self.base_url + cover if cover else "",
                        })

            logger.info(f"搜索完成，找到 {len(results)} 个结果", self.site_name)
            return results

        except Exception as e:
            logger.error(f"搜索失败: {e}", self.site_name)
            return []

    def fetch_chapter(self, url: str) -> Dict[str, Any]:
        """
        获取小说章节列表

        Args:
            url: 小说目录页URL

        Returns:
            包含 title 和 chapters 的字典
        """
        visited_urls = set()
        current_url = url
        title = None
        chapter_links = []

        logger.info(f"开始获取章节列表: {url}", self.site_name)

        while current_url and current_url not in visited_urls:
            visited_urls.add(current_url)
            real_url = fetch_real_url(current_url)

            try:
                response = requests.get(real_url, headers=config.DEFAULT_HEADERS, timeout=config.REQUEST_TIMEOUT)
                response.raise_for_status()
                soup = BeautifulSoup(response.text, "html.parser")

                # 获取小说名称
                if not title:
                    title_node = soup.select_one(".bookname h1 a")
                    title = title_node.get_text(strip=True) if title_node else "未知小说"

                # 提取章节链接
                for a in soup.select(".chapter-list a"):
                    name = a.get_text(strip=True)
                    href = self._extract_onclick_url(a)

                    if href and name:
                        chapter_links.append({
                            "name": name,
                            "url": urljoin(self.base_url, href)
                        })

                # 查找下一页链接
                current_url = self._find_next_page_url(soup, visited_urls)

            except requests.RequestException as e:
                logger.error(f"获取页面失败: {e}", self.site_name)
                break

        if not chapter_links:
            logger.warning("未找到章节内容，页面结构可能已变", self.site_name)
            return {"title": title or "", "chapters": []}

        logger.info(f"获取到 {len(chapter_links)} 个章节", self.site_name)
        return {"title": title, "chapters": chapter_links}

    def fetch_novel_content(self, url: str) -> Dict[str, Any]:
        """
        获取小说章节内容

        Args:
            url: 小说章节URL

        Returns:
            包含 title 和 content 的字典
        """
        content_parts = []
        title = None
        visited_urls = set()
        base_url = url
        current_url = url

        logger.info(f"开始获取章节内容: {url}", self.site_name)

        while current_url and current_url not in visited_urls:
            visited_urls.add(current_url)

            try:
                response = requests.get(current_url, headers=config.DEFAULT_HEADERS, timeout=config.REQUEST_TIMEOUT)
                response.raise_for_status()
                soup = BeautifulSoup(response.text, "html.parser")

                # 获取章节标题
                if not title:
                    title_node = soup.select_one("#chaptername")
                    title = title_node.get_text(strip=True) if title_node else "未知章节"

                # 提取内容
                content = self._extract_content(soup)
                if content:
                    content_parts.append(content)

                # 查找下一页链接
                current_url = self._find_next_content_page(soup, base_url, visited_urls)

            except requests.RequestException as e:
                logger.error(f"获取章节内容失败: {e}", self.site_name)
                break

        full_content = "\n".join(content_parts).strip()
        logger.info(f"章节内容获取完成，长度: {len(full_content)}", self.site_name)

        return {"title": title or "", "content": full_content}

    def _extract_onclick_url(self, node: Tag) -> Optional[str]:
        """
        从节点的onclick属性中提取URL

        Args:
            node: HTML节点

        Returns:
            提取的URL，如果无法提取则返回None
        """
        onclick = node.get("onclick", "")
        match = re.search(r"location\.href='(.*?)'", onclick)
        return match.group(1) if match else None

    def _find_next_page_url(self, soup: BeautifulSoup, visited_urls: set) -> Optional[str]:
        """
        查找章节列表的下一页URL

        Args:
            soup: BeautifulSoup对象
            visited_urls: 已访问URL集合

        Returns:
            下一页URL，如果没有则返回None
        """
        for next_link in soup.select(".listpage .right a"):
            href = self._extract_onclick_url(next_link)
            if href:
                next_url = urljoin(self.base_url, href)
                if next_url not in visited_urls:
                    return next_url
        return None

    def _find_next_content_page(self, soup: BeautifulSoup, base_url: str, visited_urls: set) -> Optional[str]:
        """
        查找章节内容的下一页URL

        Args:
            soup: BeautifulSoup对象
            base_url: 基础URL
            visited_urls: 已访问URL集合

        Returns:
            下一页URL，如果没有则返回None
        """
        next_link = soup.select_one("#next_url")
        if next_link:
            next_href = self._extract_onclick_url(next_link)
            if next_href and self._is_valid_content_page_url(next_href):
                next_url = urljoin(base_url, next_href)
                if next_url not in visited_urls and is_same_chapter(base_url, next_url):
                    return next_url
        return None

    def _is_valid_content_page_url(self, url: str) -> bool:
        """
        判断是否为有效的内容分页URL

        Args:
            url: 待检查的URL

        Returns:
            如果是有效分页URL返回True
        """
        return bool(re.search(r"_([2-9])\.html$", url))

    def _extract_content(self, soup: BeautifulSoup) -> str:
        """
        从页面中提取小说内容

        Args:
            soup: BeautifulSoup对象

        Returns:
            提取并清理后的内容
        """
        content_node = soup.select_one("#chaptercontent")
        if not content_node:
            return ""

        # 替换br标签为换行符
        for br in content_node.find_all("br"):
            br.replace_with("\n")

        # 提取段落文本
        content_lines = []
        for p in content_node.find_all("p"):
            text = p.get_text(strip=True)
            if text and not any(keyword in text for keyword in config.SITE_EXCLUDE_KEYWORDS):
                content_lines.append(text)

        # 过滤掉无意义的内容
        filtered_lines = [
            line for line in content_lines
            if not any(keyword in line for keyword in config.CONTENT_FILTER_KEYWORDS)
        ]

        return "\n".join(filtered_lines).strip()
