#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
结果评估器模块

评估查询结果并推荐下一查询类型
"""

import logging
from typing import List, Dict, Any, Optional
from llama_index.core import PromptTemplate
from .data_structures import AnalysisContext, QueryResult, DataQualityMetrics
from .enums import QueryType
from biz.core.ai.prompts import RESULT_EVALUATION_PROMPT

logger = logging.getLogger(__name__)


class ResultEvaluator:
    """结果评估器 - 评估分析结果并推荐下一查询"""
    
    def __init__(self, llm):
        self.llm = llm
        
        self.result_evaluation_prompt = PromptTemplate(RESULT_EVALUATION_PROMPT)
    
    async def evaluate_results(self, analysis_context: AnalysisContext) -> Dict[str, Any]:
        """评估分析结果并推荐下一个查询（原版字段与结构）"""
        try:
            # 生成详细的查询历史记录
            query_history_details = self._generate_query_history_details(analysis_context.previous_results)
            
            prompt = self.result_evaluation_prompt.format(
                user_input=analysis_context.user_input,
                problem_type=analysis_context.problem_type.value if analysis_context.problem_type else "未知",
                device_ids=", ".join(analysis_context.device_ids) if analysis_context.device_ids else "未指定",
                current_round=analysis_context.current_round,
                max_rounds=analysis_context.max_rounds,
                query_history_details=query_history_details
            )
            
            response = await self.llm.acomplete(prompt)
            evaluation_result = self._parse_evaluation_response(str(response))
            
            # 如果已达到最大轮次，强制停止
            if analysis_context.current_round >= analysis_context.max_rounds:
                evaluation_result["need_continuation"] = False
                evaluation_result["reasoning"] = (evaluation_result.get("reasoning", "") + " (已达到最大分析轮次)").strip()
            
            # 记录推荐的下一个查询
            next_query = evaluation_result.get("next_recommended_query", {})
            if next_query and next_query.get("query_type"):
                logger.info(f"推荐下一个查询: {next_query['query_type']}, 理由: {next_query.get('reasoning', '未提供')}")
            
            logger.info(f"结果评估完成，需要继续: {evaluation_result.get('need_continuation', False)}")
            return evaluation_result
            
        except Exception as e:
            logger.error(f"结果评估失败: {str(e)}")
            # 返回默认评估结果
            return self._get_default_evaluation(analysis_context)
    
    def _generate_query_history_details(self, results: List[QueryResult]) -> str:
        """生成详细的查询历史记录"""
        if not results:
            return "暂无查询历史"
        
        history_parts = []
        for i, result in enumerate(results, 1):
            history_parts.append("------------------------")
            history_parts.append(f"第{i}轮查询:")
            history_parts.append(f"查询类型: {result.query_type.value}")
            history_parts.append(f"查询问题: {result.query_text}")
            
            if result.success:
                data_count = len(result.response.content) if result.response.content else 0
                history_parts.append(f"查询结果: 成功获取{data_count}条数据")
                
                # 添加数据摘要
                if result.response.content and data_count > 0:
                    history_parts.append(f"数据示例: {str(result.response.content)[:1000]}...")
            else:
                history_parts.append(f"查询结果: 失败 - {result.error_message or '未知错误'}")
            
            history_parts.append(f"执行时间: {result.execution_time:.2f}秒")
            
            # 添加数据质量信息
            if hasattr(result, 'data_quality') and result.data_quality:
                history_parts.append(f"数据质量评分: {result.data_quality.overall_score:.2f}")
                if result.data_quality.issues:
                    issues_summary = "; ".join(result.data_quality.issues[:2])  # 显示前2个问题
                    history_parts.append(f"质量问题: {issues_summary}")
        
        history_parts.append("------------------------")
        return "\n".join(history_parts)
    
    def _parse_evaluation_response(self, response_text: str) -> Dict[str, Any]:
        """解析评估响应"""
        from .json_utils import parse_json_from_text
        
        result = parse_json_from_text(
            text=response_text,
            default_value=None
        )
        
        if result is None:
            logger.warning("解析评估响应失败，使用默认评估")
            return self._get_default_evaluation()
        return result
    
    def _get_default_evaluation(self, analysis_context: Optional[AnalysisContext] = None) -> Dict[str, Any]:
        """获取默认评估结果"""
        if analysis_context:
            has_successful_queries = any(result.success for result in analysis_context.previous_results)
            need_continuation = (
                analysis_context.current_round < analysis_context.max_rounds and 
                (not has_successful_queries or analysis_context.current_round == 1)
            )
            completeness_score = 0.5
            reasoning = "默认评估策略，建议继续收集数据"
        else:
            # 没有上下文时使用保守策略
            need_continuation = True
            completeness_score = 0.4
            reasoning = "使用默认评估策略，建议继续收集基础数据"
        
        return {
            "need_continuation": need_continuation,
            "completeness_score": completeness_score,
            "information_gaps": [
                {
                    "gap_type": "数据不足",
                    "description": "需要更多数据支撑分析",
                    "impact": "影响问题定位准确性"
                }
            ],
            "next_recommended_query": {
                "query_type": "fault_records",
                "reasoning": "故障记录是问题分析的基础数据",
                "expected_insights": "获取故障发生的详细信息"
            },
            "reasoning": reasoning
        }