#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
File: base_engine.py
Author: StellaJL
Date: 2025-09-16 14:56:21
Version：1.0.0
Description: 基础爬虫引擎类
"""

import asyncio
import yaml
import os
import aiohttp
from urlPool import URLPool
from contentDownloader import ContentDownloader
from logger import setup_logging

# 读取配置文件
def load_config(config_path='config.yaml'):
    if not os.path.exists(config_path):
        raise FileNotFoundError(f"配置文件 {config_path} 未找到。")
    with open(config_path, 'r', encoding='utf-8') as f:
        return yaml.safe_load(f)

class BaseEngine:
    """
    基础爬虫引擎类
    """
    
    def __init__(self, start_urls: list[str], parse_func: callable, save_func: callable, config_path: str = 'config.yaml'):
        """
        初始化基础引擎
        
        Args:
            start_urls: 初始URL列表
            parse_func: 解析函数，接收 (content, url) 参数，返回 (parsed_data, new_urls) 或 None
                       - parsed_data: list[dict] 解析后的数据列表，可以为空列表
                       - new_urls: list[str] 新发现的URL列表，可以为空列表
                       - 返回 None 表示解析失败
            save_func: 保存函数，接收 (parsed_data, url) 参数
            config_path: 配置文件路径
        """
        self.start_urls = start_urls
        self.parse_func = parse_func
        self.save_func = save_func
        self.config_path = config_path
        
        # 加载配置
        self.config = load_config(config_path)
        
        # 初始化日志
        self.logger = setup_logging(
            log_level=self.config.get('logging', {}).get('level', 'INFO'),
            log_dir=self.config.get('logging', {}).get('dir', 'logs')
        )
        
        # 初始化组件
        self.url_pool = URLPool()
        self.downloader = ContentDownloader(timeout=self.config.get('crawler', {}).get('timeout', 10), config_path=config_path)
        
        # 从配置获取爬虫设置
        crawler_config = self.config.get('crawler', {})
        self.concurrency = crawler_config.get('concurrency', 5)
        self.worker_count = crawler_config.get('workers', 3)
        self.timeout = crawler_config.get('timeout', 10)
        
        # 创建信号量限制并发数
        self.semaphore = asyncio.Semaphore(self.concurrency)
        
        self.logger.info("基础引擎初始化完成")
        self.logger.info(f"配置信息: {self.config}")
        self.logger.info(f"设置并发限制: {self.concurrency}")
        self.logger.info(f"设置超时时间: {self.timeout}秒")

    async def start(self):
        """
        启动爬虫引擎
        """
        self.logger.info("基础引擎启动")
        
        # 使用ContentDownloader的上下文管理器
        async with self.downloader:
            # 添加初始URL
            for url in self.start_urls:
                await self.url_pool.add_url(url)
                self.logger.info(f"添加初始URL: {url}")

            # 启动多个worker协程
            self.logger.info(f"启动 {self.worker_count} 个Worker协程")
            tasks = [asyncio.create_task(self._worker(i)) for i in range(self.worker_count)]
            await asyncio.gather(*tasks)
            self.logger.info("所有任务完成。")

    async def _worker(self, worker_id: int):
        """
        工作协程
        
        Args:
            worker_id: 工作协程ID
        """
        self.logger.info(f"Worker {worker_id} 启动")
        while True:
            url = await self.url_pool.get_url()
            if url is None:  # URL池为空且无新URL时退出
                self.logger.info(f"Worker {worker_id} 结束，URL池为空")
                break
            
            async with self.semaphore:  # 控制并发
                self.logger.info(f"Worker {worker_id} 开始下载: {url}")
                try:
                    # 下载内容
                    content = await self._download(worker_id, url)
                    if not content:
                        continue
                    
                    # 解析数据
                    parsed_data, new_urls = await self._parse(worker_id, content, url)
                    if parsed_data is None:
                        continue
                    
                    # 保存数据
                    await self._save(worker_id, parsed_data, url)
                    
                    # 处理新URL
                    await self._add_new_urls(worker_id, new_urls)
                    
                except asyncio.CancelledError:
                    self.logger.info(f"Worker {worker_id} 被取消")
                    raise
                except Exception as e:
                    self.logger.error(f"Worker {worker_id} 处理 {url} 时发生错误: {e}")

    async def _download(self, worker_id: int, url: str) -> str | None:
        """
        下载内容
        
        Args:
            worker_id: 工作协程ID
            url: 要下载的URL
            
        Returns:
            下载的内容，失败时返回None
        """
        try:
            content = await self.downloader.download(url)
            if not content:
                self.logger.warning(f"Worker {worker_id} 从 {url} 下载内容失败")
                return None
            return content
        except Exception as e:
            self.logger.error(f"Worker {worker_id} 下载 {url} 时发生错误: {e}")
            return None

    async def _parse(self, worker_id: int, content: str, url: str) -> tuple[list[dict] | None, list[str] | None]:
        """
        解析内容
        
        Args:
            worker_id: 工作协程ID
            content: 要解析的内容
            url: 内容来源URL
            
        Returns:
            (parsed_data, new_urls) 元组，解析失败时返回 (None, None)
        """
        try:
            parse_result = self.parse_func(content, url)
            if parse_result is None:
                self.logger.warning(f"Worker {worker_id} 解析函数返回 None")
                return None, None
            
            # 检查解析结果格式
            if not isinstance(parse_result, (list, tuple)) or len(parse_result) != 2:
                self.logger.warning(f"Worker {worker_id} 解析结果格式错误，期望 (parsed_data, new_urls) 元组")
                return None, None
            
            parsed_data, new_urls = parse_result
            
            # 规范化 parsed_data
            if not isinstance(parsed_data, list):
                self.logger.warning(f"Worker {worker_id} parsed_data 不是列表类型，已转换为空列表")
                parsed_data = []
            
            # 规范化 new_urls
            if not isinstance(new_urls, list):
                self.logger.warning(f"Worker {worker_id} new_urls 不是列表类型，已转换为空列表")
                new_urls = []
            
            return parsed_data, new_urls
            
        except Exception as e:
            self.logger.error(f"Worker {worker_id} 解析 {url} 时发生错误: {e}")
            return None, None

    async def _save(self, worker_id: int, parsed_data: list[dict], url: str):
        """
        保存数据
        
        Args:
            worker_id: 工作协程ID
            parsed_data: 要保存的数据列表
            url: 数据来源URL
        """
        try:
            if not parsed_data or len(parsed_data) == 0:
                self.logger.info(f"Worker {worker_id} 解析数据为空，跳过保存")
                return
            
            await self.save_func(parsed_data, url)
            self.logger.info(f"Worker {worker_id} 成功保存 {len(parsed_data)} 条数据")
            
        except Exception as e:
            self.logger.error(f"Worker {worker_id} 保存数据时发生错误: {e}")

    async def _add_new_urls(self, worker_id: int, new_urls: list[str]):
        """
        添加新发现的URL
        
        Args:
            worker_id: 工作协程ID
            new_urls: 新发现的URL列表
        """
        try:
            if not new_urls or len(new_urls) == 0:
                self.logger.info(f"Worker {worker_id} 未发现新URL")
                return
            
            added_count = 0
            for new_url in new_urls:
                if isinstance(new_url, str) and new_url.strip():
                    await self.url_pool.add_url(new_url)
                    self.logger.info(f"Worker {worker_id} 添加新URL: {new_url}")
                    added_count += 1
                else:
                    self.logger.warning(f"Worker {worker_id} 发现无效URL: {new_url}")
            
            if added_count > 0:
                self.logger.info(f"Worker {worker_id} 共添加了 {added_count} 个新URL")
                
        except Exception as e:
            self.logger.error(f"Worker {worker_id} 添加新URL时发生错误: {e}")





