from PerformanceForecastParser import PerformanceForecastParser
from AssetRestructureParser import AssetRestructureParser
from AnnualReportParser import AnnualReportParser
from loguru import logger
import time
import random

class NewsCrawlerScheduler:
    def __init__(self):
        self.parsers = {
            'performance_forecast': PerformanceForecastParser(),
            'asset_restructure': AssetRestructureParser(),
            'annual_report': AnnualReportParser()
        }
    
    def crawl_news_batch(self, tags=None, max_pages=3):
        """批量爬取调度逻辑"""
        if tags is None:
            tags = ['年报预期', '业绩预告', 'A股', '股市', '财经']
        
        all_news = []
        
        for tag in tags:
            logger.info(f"开始爬取标签 '{tag}' 的年报新闻")
            
            # 根据标签选择对应的解析器
            parser = self._select_parser(tag)
            if parser:
                news_list = parser.get_news_list(tag=tag, max_pages=max_pages)
                all_news.extend(news_list)
            
            time.sleep(random.uniform(3, 5))
        
        # 去重处理
        return self._deduplicate_news(all_news)
    
    def _select_parser(self, tag):
        """根据标签选择解析器"""
        tag_mapping = {
            '业绩预告': 'performance_forecast',
            '资产重组': 'asset_restructure',
            '年报预期': 'annual_report',
            '年报': 'annual_report'
        }
        parser_key = tag_mapping.get(tag)
        return self.parsers.get(parser_key)
    
    def _deduplicate_news(self, news_list):
        """去重处理"""
        unique_news = []
        seen_titles = set()
        
        for news in news_list:
            title = news.get('title', '')
            if title and title not in seen_titles:
                seen_titles.add(title)
                unique_news.append(news)
        
        return unique_news