# -*- coding: utf-8 -*-
"""
小说爬虫示例
演示如何爬取小说数据
"""

import time
import asyncio
from typing import List, Dict, Any, Optional
from crawlers.base_crawler import BaseCrawler
from parsers.novel_parser import NovelParser
from storage.file_storage import FileStorage
from utils.logger import get_logger, log_crawler_info, log_crawler_error
from config.settings import NOVEL_SITES

class NovelCrawler(BaseCrawler):
    """小说爬虫类"""
    
    def __init__(self, site: str = "generic"):
        super().__init__(f"NovelCrawler_{site}")
        self.site = site
        self.parser = NovelParser()
        self.storage = FileStorage()
        self.site_config = NOVEL_SITES.get(site, {})
    
    def parse_page(self, html: str, url: str) -> dict:
        """解析页面内容"""
        # 根据URL类型选择解析方法
        if "chapter" in url or "read" in url:
            return self.parser.parse_chapter_content(html, self.site, url) or {}
        else:
            return self.parser.parse_novel_detail(html, self.site, url) or {}
    
    def crawl_novel_list(self, search_url: str, max_pages: int = 1) -> List[Dict[str, Any]]:
        """
        爬取小说列表
        
        Args:
            search_url: 搜索页面URL
            max_pages: 最大爬取页数
            
        Returns:
            小说列表
        """
        log_crawler_info(f"开始爬取小说列表: {search_url}")
        
        all_novels = []
        
        for page in range(1, max_pages + 1):
            try:
                # 构建分页URL
                if page == 1:
                    url = search_url
                else:
                    # 根据网站规则构建分页URL
                    if "?" in search_url:
                        url = f"{search_url}&page={page}"
                    else:
                        url = f"{search_url}?page={page}"
                
                log_crawler_info(f"爬取第 {page} 页: {url}")
                
                # 获取页面内容
                html = self.get_html(url, self.site_config.get("encoding", "utf-8"))
                if not html:
                    log_crawler_error(f"获取页面失败: {url}")
                    continue
                
                # 解析小说列表
                novels = self.parser.parse_novel_list(html, self.site)
                
                if novels:
                    all_novels.extend(novels)
                    log_crawler_info(f"第 {page} 页解析成功，获取 {len(novels)} 本小说")
                else:
                    log_crawler_info(f"第 {page} 页没有找到小说")
                    break
                
                # 延迟
                time.sleep(2)
                
            except Exception as e:
                log_crawler_error(f"爬取第 {page} 页失败: {str(e)}")
                break
        
        log_crawler_info(f"小说列表爬取完成，共获取 {len(all_novels)} 本小说")
        return all_novels
    
    def crawl_novel_detail(self, novel_url: str) -> Optional[Dict[str, Any]]:
        """
        爬取小说详情
        
        Args:
            novel_url: 小说详情页URL
            
        Returns:
            小说详情信息
        """
        log_crawler_info(f"爬取小说详情: {novel_url}")
        
        try:
            # 获取页面内容
            html = self.get_html(novel_url, self.site_config.get("encoding", "utf-8"))
            if not html:
                log_crawler_error(f"获取小说详情页失败: {novel_url}")
                return None
            
            # 解析小说详情
            novel = self.parser.parse_novel_detail(html, self.site, novel_url)
            
            if novel:
                log_crawler_info(f"小说详情解析成功: {novel.get('title', 'Unknown')}")
                return novel
            else:
                log_crawler_error(f"小说详情解析失败: {novel_url}")
                return None
                
        except Exception as e:
            log_crawler_error(f"爬取小说详情失败: {str(e)}")
            return None
    
    def crawl_chapter_list(self, novel_url: str) -> List[Dict[str, Any]]:
        """
        爬取章节列表
        
        Args:
            novel_url: 小说URL
            
        Returns:
            章节列表
        """
        log_crawler_info(f"爬取章节列表: {novel_url}")
        
        try:
            # 构建章节列表URL（根据网站规则）
            chapter_list_url = self._build_chapter_list_url(novel_url)
            
            # 获取页面内容
            html = self.get_html(chapter_list_url, self.site_config.get("encoding", "utf-8"))
            if not html:
                log_crawler_error(f"获取章节列表页失败: {chapter_list_url}")
                return []
            
            # 解析章节列表
            chapters = self.parser.parse_chapter_list(html, self.site, novel_url)
            
            log_crawler_info(f"章节列表解析成功，共 {len(chapters)} 章")
            return chapters
            
        except Exception as e:
            log_crawler_error(f"爬取章节列表失败: {str(e)}")
            return []
    
    def crawl_chapter_content(self, chapter_url: str) -> Optional[Dict[str, Any]]:
        """
        爬取章节内容
        
        Args:
            chapter_url: 章节URL
            
        Returns:
            章节内容信息
        """
        log_crawler_info(f"爬取章节内容: {chapter_url}")
        
        try:
            # 获取页面内容
            html = self.get_html(chapter_url, self.site_config.get("encoding", "utf-8"))
            if not html:
                log_crawler_error(f"获取章节页面失败: {chapter_url}")
                return None
            
            # 解析章节内容
            chapter = self.parser.parse_chapter_content(html, self.site, chapter_url)
            
            if chapter:
                log_crawler_info(f"章节内容解析成功: {chapter.get('title', 'Unknown')}")
                return chapter
            else:
                log_crawler_error(f"章节内容解析失败: {chapter_url}")
                return None
                
        except Exception as e:
            log_crawler_error(f"爬取章节内容失败: {str(e)}")
            return None
    
    def crawl_complete_novel(self, novel_url: str, max_chapters: int = 10) -> Dict[str, Any]:
        """
        爬取完整小说（包括详情和章节）
        
        Args:
            novel_url: 小说URL
            max_chapters: 最大爬取章节数
            
        Returns:
            完整小说数据
        """
        log_crawler_info(f"开始爬取完整小说: {novel_url}")
        
        # 爬取小说详情
        novel_detail = self.crawl_novel_detail(novel_url)
        if not novel_detail:
            return {}
        
        # 爬取章节列表
        chapters = self.crawl_chapter_list(novel_url)
        if not chapters:
            log_crawler_info("没有找到章节列表")
            return novel_detail
        
        # 限制章节数量
        chapters = chapters[:max_chapters]
        
        # 爬取章节内容
        chapter_contents = []
        for i, chapter in enumerate(chapters, 1):
            log_crawler_info(f"爬取章节 {i}/{len(chapters)}: {chapter.get('title', 'Unknown')}")
            
            chapter_url = chapter.get('url')
            if not chapter_url:
                continue
            
            chapter_content = self.crawl_chapter_content(chapter_url)
            if chapter_content:
                chapter_contents.append(chapter_content)
            
            # 延迟避免请求过快
            time.sleep(1)
        
        # 组装完整数据
        complete_novel = {
            **novel_detail,
            "chapters": chapter_contents,
            "total_chapters": len(chapter_contents),
            "crawled_at": time.strftime("%Y-%m-%d %H:%M:%S")
        }
        
        log_crawler_info(f"完整小说爬取完成: {novel_detail.get('title', 'Unknown')} - {len(chapter_contents)} 章")
        return complete_novel
    
    def _build_chapter_list_url(self, novel_url: str) -> str:
        """构建章节列表URL"""
        # 根据网站规则构建章节列表URL
        if self.site == "qidian":
            # 起点中文网章节列表URL规则
            novel_id = self.parser.cleaner.extract_novel_id(novel_url)
            if novel_id:
                return f"https://book.qidian.com/info/{novel_id}/#Catalog"
        elif self.site == "zongheng":
            # 纵横中文网章节列表URL规则
            return novel_url.replace("/book/", "/chapter/")
        elif self.site == "17k":
            # 17K小说网章节列表URL规则
            return novel_url.replace("/book/", "/list/")
        
        # 默认返回原URL
        return novel_url
    
    def save_novel_data(self, novel_data: Dict[str, Any], filename: str = None) -> bool:
        """保存小说数据"""
        try:
            if not filename:
                novel_id = novel_data.get('id', novel_data.get('novel_id', 'unknown'))
                timestamp = time.strftime("%Y%m%d_%H%M%S")
                filename = f"novel_{novel_id}_{timestamp}.json"
            
            # 保存JSON格式
            success = self.storage.save_novel_json(novel_data, filename)
            
            # 如果有章节内容，也保存为TXT格式
            if novel_data.get('chapters'):
                txt_filename = filename.replace('.json', '.txt')
                self.storage.save_novel_txt(novel_data, novel_data['chapters'], txt_filename)
            
            return success
            
        except Exception as e:
            log_crawler_error(f"保存小说数据失败: {str(e)}")
            return False

def main():
    """主函数"""
    log_crawler_info("启动小说爬虫示例")
    
    # 创建小说爬虫实例
    with NovelCrawler("generic") as crawler:
        # 示例：爬取一个小说详情页
        # 注意：这里使用示例URL，实际使用时需要替换为真实的小说网站URL
        example_urls = [
            "https://example.com/novel/123",  # 示例小说URL
        ]
        
        for url in example_urls:
            try:
                # 爬取完整小说
                novel_data = crawler.crawl_complete_novel(url, max_chapters=3)
                
                if novel_data:
                    # 保存数据
                    crawler.save_novel_data(novel_data)
                    
                    # 显示结果
                    print(f"\n小说信息:")
                    print(f"标题: {novel_data.get('title', 'N/A')}")
                    print(f"作者: {novel_data.get('author', 'N/A')}")
                    print(f"简介: {novel_data.get('summary', 'N/A')[:100]}...")
                    print(f"章节数: {novel_data.get('total_chapters', 0)}")
                else:
                    print(f"爬取失败: {url}")
                    
            except Exception as e:
                log_crawler_error(f"处理URL失败 {url}: {str(e)}")
    
    log_crawler_info("小说爬虫示例完成")

if __name__ == "__main__":
    main() 