"""搜索编排服务 - 整合搜索、解析和存储"""
import logging
import asyncio
from typing import List, Dict, Optional, Any
from datetime import datetime

from app.services.search.web_search import WebSearchService
from app.services.search.question_parser import QuestionParser
from app.services.search.storage_service import StorageService

logger = logging.getLogger(__name__)


class SearchOrchestrator:
    """搜索编排服务"""
    
    def __init__(self):
        self.web_search = WebSearchService()
        self.parser = QuestionParser()
        self.storage = StorageService()
    
    async def search_and_save_questions(
        self,
        years: Optional[List[int]] = None,
        subjects: Optional[List[str]] = None,
        search_record_id: Optional[int] = None
    ) -> Dict[str, Any]:
        """
        搜索并保存题目
        
        Args:
            years: 年份列表
            subjects: 科目列表
            search_record_id: 搜索记录ID（如果已创建）
            
        Returns:
            搜索结果统计
        """
        try:
            # 1. 搜索网页
            logger.info("开始搜索法考真题...")
            search_results = self.web_search.search_exam_questions(years=years, subjects=subjects)
            
            if search_record_id:
                self.storage.update_search_record(
                    search_record_id,
                    status='running',
                    total_found=len(search_results)
                )
            
            # 2. 获取网页内容并解析
            logger.info(f"开始解析 {len(search_results)} 个搜索结果...")
            all_questions = []
            
            # 使用异步方式获取网页内容
            tasks = []
            for result in search_results[:50]:  # 限制处理数量
                task = self.web_search.fetch_page_content_async(result['url'])
                tasks.append((task, result))
            
            # 批量处理
            for task, result in tasks:
                try:
                    content = await task
                    if content:
                        # 解析题目
                        questions = self.parser.parse_web_content(content)
                        for q in questions:
                            q['source_site'] = result.get('source', '')
                        all_questions.extend(questions)
                except Exception as e:
                    logger.error(f"处理搜索结果失败 {result.get('url')}: {str(e)}")
            
            # 3. 保存到数据库
            logger.info(f"开始保存 {len(all_questions)} 个题目...")
            stats = self.storage.save_questions_batch(all_questions)
            
            # 4. 更新搜索记录
            if search_record_id:
                self.storage.update_search_record(
                    search_record_id,
                    status='completed',
                    total_found=len(search_results),
                    total_saved=stats['saved']
                )
            
            return {
                'status': 'completed',
                'total_searched': len(search_results),
                'total_parsed': len(all_questions),
                'total_saved': stats['saved'],
                'total_skipped': stats['skipped'],
                'total_failed': stats['failed']
            }
            
        except Exception as e:
            logger.error(f"搜索和保存过程失败: {str(e)}", exc_info=True)
            
            if search_record_id:
                self.storage.update_search_record(
                    search_record_id,
                    status='failed',
                    error_message=str(e)
                )
            
            return {
                'status': 'failed',
                'error': str(e)
            }

