# -*- coding: utf-8 -*-
"""
基础爬虫示例
演示如何使用爬虫框架的基本功能
"""

import time
from crawlers.base_crawler import BaseCrawler
from parsers.novel_parser import NovelParser
from storage.file_storage import FileStorage
from utils.logger import get_logger, log_crawler_info

class BasicNovelCrawler(BaseCrawler):
    """基础小说爬虫示例"""
    
    def __init__(self):
        super().__init__("BasicNovelCrawler")
        self.parser = NovelParser()
        self.storage = FileStorage()
    
    def parse_page(self, html: str, url: str) -> dict:
        """
        解析页面内容（实现抽象方法）
        
        Args:
            html: HTML内容
            url: 页面URL
            
        Returns:
            解析结果
        """
        # 这里可以根据URL判断页面类型
        if "novel" in url or "book" in url:
            return self.parser.parse_novel_detail(html, "generic", url)
        elif "chapter" in url:
            return self.parser.parse_chapter_content(html, "generic", url)
        else:
            return {"url": url, "type": "unknown", "content_length": len(html)}
    
    def crawl_example_site(self):
        """爬取示例网站"""
        log_crawler_info("开始爬取示例网站")
        
        # 示例URL列表
        example_urls = [
            "https://httpbin.org/html",  # 测试网站
            "https://httpbin.org/json",  # JSON测试
            "https://httpbin.org/headers"  # 请求头测试
        ]
        
        results = []
        
        for url in example_urls:
            log_crawler_info(f"正在爬取: {url}")
            
            # 爬取页面
            result = self.crawl_page(url)
            
            if result:
                results.append(result)
                log_crawler_info(f"爬取成功: {url}")
            else:
                log_crawler_info(f"爬取失败: {url}")
            
            # 延迟避免请求过快
            time.sleep(2)
        
        # 保存结果
        if results:
            self.storage.save_novels_json(results, "example_crawl_results.json")
            log_crawler_info(f"爬取完成，共获取 {len(results)} 个结果")
        
        return results
    
    def test_download_file(self):
        """测试文件下载功能"""
        log_crawler_info("测试文件下载功能")
        
        # 下载一个示例图片
        image_url = "https://httpbin.org/image/png"
        filepath = "downloaded_image.png"
        
        success = self.download_file(image_url, filepath)
        
        if success:
            log_crawler_info(f"文件下载成功: {filepath}")
        else:
            log_crawler_info("文件下载失败")
        
        return success

def main():
    """主函数"""
    log_crawler_info("启动基础爬虫示例")
    
    # 创建爬虫实例
    with BasicNovelCrawler() as crawler:
        # 测试基本爬取功能
        results = crawler.crawl_example_site()
        
        # 测试文件下载功能
        crawler.test_download_file()
        
        # 显示结果
        for i, result in enumerate(results, 1):
            print(f"\n结果 {i}:")
            print(f"URL: {result.get('url', 'N/A')}")
            print(f"类型: {result.get('type', 'N/A')}")
            print(f"内容长度: {result.get('content_length', 'N/A')}")
    
    log_crawler_info("基础爬虫示例完成")

if __name__ == "__main__":
    main() 