"""
应用核心功能模块
"""
from typing import List, Dict, Any
from urllib.parse import urlparse
from scrapers import BaseScraper, Bqg128Scraper, BqgCCScraper, XS520Scraper, BBqdScraper
from scrapers.constant import SearchSource
from logger import logger


def search_results(keyword: str) -> List[Dict[str, Any]]:
    """
    跨网站搜索小说

    Args:
        keyword: 搜索关键词

    Returns:
        搜索结果列表，去重后返回
    """
    logger.info(f"开始跨站搜索: {keyword}")

    results = []
    seen_urls = set()

    # 初始化所有可用的爬虫
    scrapers: List[BaseScraper] = [
        Bqg128Scraper(),
        BqgCCScraper(),
        XS520Scraper(),
        BBqdScraper(),
    ]

    # 并行搜索（这里使用顺序搜索以保持简单）
    for scraper in scrapers:
        try:
            scraper_results = scraper.search(keyword)
            for item in scraper_results:
                # 去重：同一URL只保留一个结果
                if item["url"] not in seen_urls:
                    seen_urls.add(item["url"])
                    results.append(item)

            logger.info(f"{scraper.site_name} 搜索完成，找到 {len(scraper_results)} 个结果")

        except Exception as e:
            logger.error(f"{scraper.site_name} 搜索失败: {e}")
            continue

    logger.info(f"搜索完成，总计找到 {len(results)} 个不重复结果")
    return results


def fetch_chapter_list(url: str) -> Dict[str, Any]:
    """
    获取小说章节列表

    Args:
        url: 小说目录页URL

    Returns:
        包含 title 和 chapters 的字典
    """
    domain = urlparse(url).netloc.lower()
    logger.info(f"获取章节列表: {domain}")

    scraper = SearchSource.get_scraper(domain)
    if not scraper:
        logger.warning(f"不支持的网站: {domain}")
        return {"title": "", "chapters": []}

    try:
        result = scraper.fetch_chapter(url)
        logger.info(f"获取到 {len(result.get('chapters', []))} 个章节")
        return result
    except Exception as e:
        logger.error(f"获取章节列表失败: {e}")
        return {"title": "", "chapters": []}


def fetch_novel_content(url: str) -> Dict[str, Any]:
    """
    获取小说章节内容

    Args:
        url: 小说章节URL

    Returns:
        包含 title 和 content 的字典
    """
    domain = urlparse(url).netloc.lower()
    logger.info(f"获取章节内容: {domain}")

    scraper = SearchSource.get_scraper(domain)
    if not scraper:
        logger.warning(f"不支持的网站: {domain}")
        return {"title": "", "content": ""}

    try:
        result = scraper.fetch_novel_content(url)
        content_length = len(result.get('content', ''))
        logger.info(f"内容获取完成，长度: {content_length}")
        return result
    except Exception as e:
        logger.error(f"获取章节内容失败: {e}")
        return {"title": "", "content": ""}


# https://m.bbiqudu.com/search/?searchkey=%E8%AF%A1%E7%A7%98
