#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
主工作流模块

包含增强版多轮思考智能诊断工作流的实现
"""

import asyncio
import logging
import time
from datetime import datetime
from typing import Dict, List, Any, Optional, Union

# LlamaIndex相关导入
from llama_index.core.workflow import (
    Context, Workflow, StartEvent, StopEvent, step
)
from llama_index.core import PromptTemplate

from .data_structures import AnalysisContext, QueryResult, DiagnosisResult, DataQualityMetrics
from .events import ProblemClassifiedEvent, MultiRoundAnalysisEvent, QueryExecutionEvent, NextQueryRecommendationEvent, QueryExecutionPlanEvent
from .enums import ProblemType, DiagnosisStrategy, QueryType
from .sql_client import SQLService
from .query_planner import IntelligentQueryPlanner
from .query_generator import EnhancedQueryGenerator
from .result_evaluator import ResultEvaluator
from biz.core.ai.prompts import PROBLEM_CLASSIFICATION_PROMPT, ANALYSIS_PROMPT

logger = logging.getLogger(__name__)

TEMPLATE_PROBLEM_CLASSIFICATION_PROMPT = PromptTemplate(PROBLEM_CLASSIFICATION_PROMPT)
TEMPLATE_ANALYSIS_PROMPT = PromptTemplate(ANALYSIS_PROMPT)


class MultiRoundDiagnosisWorkflow(Workflow):
    """多轮思考智能诊断工作流"""
    
    def __init__(self, llm: Any, sql_client: SQLService, max_rounds: int = 3, timeout: float = 300.0):
        super().__init__(timeout=timeout)
        self.llm = llm
        self.sql_client = sql_client
        self.max_rounds = max_rounds
        
        # 初始化各模块
        self.query_planner = IntelligentQueryPlanner(llm)
        self.query_generator = EnhancedQueryGenerator(llm)
        self.result_evaluator = ResultEvaluator(llm)

    @step
    async def classify_problem(self, ctx: Context, ev: StartEvent) -> ProblemClassifiedEvent:
        """步骤1: 问题分类 - 分析用户输入并确定问题类型"""
        user_input = ev.user_input
        logger.info(f"开始问题分类: {user_input[:100]}...")
        try:
            # 使用增强版提示模板进行问题分类
            classification_prompt = TEMPLATE_PROBLEM_CLASSIFICATION_PROMPT.format(
                user_input=user_input
            )
            response = await self.llm.acomplete(classification_prompt)
            result = self._parse_classification_response(str(response))
            
            # 创建分析上下文
            analysis_context = AnalysisContext(
                user_input=user_input,
                problem_type=ProblemType(result.get("problem_type", "unknown")),
                diagnosis_strategy=DiagnosisStrategy(result.get("diagnosis_strategy", "multi_round_thinking")),
                device_ids=result.get("device_ids", []),
                start_time=datetime.now(),
                max_rounds=self.max_rounds,
                current_round=1,
                previous_results=[],
                insights=[]
            )
            
            logger.info(f"问题分类完成: {analysis_context.problem_type.value}, 策略: {analysis_context.diagnosis_strategy.value}")
            return ProblemClassifiedEvent(analysis_context=analysis_context)
        except Exception as e:
            logger.error(f"问题分类失败: {e}")
            # 使用默认上下文
            analysis_context = AnalysisContext(
                user_input=user_input,
                problem_type=ProblemType.UNKNOWN,
                diagnosis_strategy=DiagnosisStrategy.MULTI_ROUND_THINKING,
                device_ids=[],
                start_time=datetime.now(),
                max_rounds=self.max_rounds,
                current_round=1,
                previous_results=[],
                insights=[]
            )
            return ProblemClassifiedEvent(analysis_context=analysis_context)

    @step
    async def bootstrap_recommendation(self, ctx: Context, ev: ProblemClassifiedEvent) -> NextQueryRecommendationEvent:
        """步骤2: 引导首轮查询推荐"""
        analysis_context = ev.analysis_context
        logger.info(f"引导首轮查询推荐，当前轮次: {analysis_context.current_round}")
        try:
            # 使用智能查询规划器生成初始推荐
            recommended_query = await self.query_planner.plan_next_query(analysis_context)
            if recommended_query:
                logger.info(f"首轮推荐查询类型: {recommended_query.value}")
                return NextQueryRecommendationEvent(
                    analysis_context=analysis_context,
                    recommended_query_type=recommended_query
                )
            else:
                logger.warning("规划器未返回有效查询，使用默认查询")
                return NextQueryRecommendationEvent(
                    analysis_context=analysis_context,
                    recommended_query_type=QueryType.DEVICE_BASIC_INFO
                )
        except Exception as e:
            logger.error(f"首轮推荐生成失败: {e}")
            # 回退到基础查询
            return NextQueryRecommendationEvent(
                analysis_context=analysis_context,
                recommended_query_type=QueryType.DEVICE_BASIC_INFO
            )

    @step
    async def plan_single_query(self, ctx: Context, ev: NextQueryRecommendationEvent) -> QueryExecutionPlanEvent:
        """步骤3: 根据推荐生成执行计划"""
        analysis_context = ev.analysis_context
        if not ev.recommended_query_type:
            logger.info("未提供有效的推荐查询，跳过执行计划")
            return QueryExecutionPlanEvent(analysis_context=analysis_context, planned_queries=[])
        logger.info(f"生成执行计划，查询: {ev.recommended_query_type.value}")
        return QueryExecutionPlanEvent(analysis_context=analysis_context, planned_queries=[ev.recommended_query_type])

    @step
    async def execute_single_query(self, ctx: Context, ev: QueryExecutionPlanEvent) -> Union[QueryExecutionEvent, MultiRoundAnalysisEvent]:
        """步骤4: 查询执行 - 执行计划的查询并收集结果"""
        analysis_context = ev.analysis_context
        
        # 若计划为空，则视为强制结束，进入最终分析
        if not ev.planned_queries:
            logger.info("收到强制结束信号，跳转到最终分析")
            return MultiRoundAnalysisEvent(
                analysis_context=analysis_context,
                round_results=analysis_context.previous_results
            )
        
        query_results = []
        
        for planned_query in ev.planned_queries:
            logger.info(f"执行查询: {planned_query.value}")
            
            try:
                # 生成查询语句（按原版签名顺序）
                query_text = await self.query_generator.generate_query(planned_query, analysis_context)
                
                # 记录开始时间
                start_time = time.time()
                
                # 执行查询
                sql_response = await self.sql_client.query_devices(query_text)
                
                # 计算执行时间
                execution_time = time.time() - start_time
                
                # 评估数据质量
                data_quality = self._calculate_quality_metrics(sql_response)
                
                # 创建查询结果
                query_result = QueryResult(
                    query_type=planned_query,
                    query_text=query_text,
                    response=sql_response,
                    data_quality=data_quality,
                    execution_time=execution_time,
                    round_number=analysis_context.current_round,
                    success=sql_response.success,
                    error_message=sql_response.error_message if not sql_response.success else None
                )
                
                query_results.append(query_result)
                
                # 提取洞察
                if sql_response.success and sql_response.content:
                    insights = self._extract_insights_from_results([query_result])
                    analysis_context.insights.extend(insights)
                
                logger.info(f"查询执行完成: {planned_query.value}, 成功: {sql_response.success}, 质量: {data_quality.overall_score:.2f}")
                
            except Exception as e:
                logger.error(f"查询执行失败 {planned_query.value}: {e}")
                
                # 创建失败的查询结果
                query_result = QueryResult(
                    query_type=planned_query,
                    query_text="",
                    response=None,
                    data_quality=DataQualityMetrics(overall_score=0.0, issues=[f"执行失败: {str(e)}"]),
                    execution_time=0.0,
                    round_number=analysis_context.current_round,
                    success=False,
                    error_message=str(e)
                )
                
                query_results.append(query_result)
        
        # 更新分析上下文
        analysis_context.previous_results.extend(query_results)
        
        return QueryExecutionEvent(
            analysis_context=analysis_context,
            query_results=query_results
        )

    @step
    async def evaluate_single_result(self, ctx: Context, ev: QueryExecutionEvent) -> Union[NextQueryRecommendationEvent, MultiRoundAnalysisEvent]:
        """步骤5: 结果评估 - 评估查询结果并推荐下一查询"""
        analysis_context = ev.analysis_context
        
        logger.info(f"开始评估第{analysis_context.current_round}轮单查询结果...")
        
        try:
            current_insights = self._extract_insights_from_results(ev.query_results)
            analysis_context.insights.extend(current_insights)
            
            # 使用结果评估器进行评估
            evaluation_result = await self.result_evaluator.evaluate_results(analysis_context)
            
            analysis_context.needs_continuation = evaluation_result.get("need_continuation", False)
            analysis_context.expected_insights = evaluation_result.get("expected_insights")
            next_query_info = evaluation_result.get("next_recommended_query", {}) or {}
            logger.info("单查询结果评估完成")
            
            # 判断是否需要继续分析
            if (
                analysis_context.needs_continuation and 
                analysis_context.current_round < analysis_context.max_rounds and
                next_query_info.get("query_type")
            ):
                # 解析推荐的查询类型
                recommended_query_type_str = next_query_info.get("query_type")
                try:
                    recommended_query_type = QueryType(recommended_query_type_str)
                except ValueError:
                    logger.warning(f"无法解析推荐的查询类型: {recommended_query_type_str}")
                    return MultiRoundAnalysisEvent(
                        analysis_context=analysis_context,
                        round_results=analysis_context.previous_results
                    )
                analysis_context.current_round += 1
                logger.info(f"基于评估结果，开始第{analysis_context.current_round}轮分析，推荐查询: {recommended_query_type.value}")
                
                return NextQueryRecommendationEvent(
                    analysis_context=analysis_context,
                    recommended_query_type=recommended_query_type
                )
            else:
                completion_reason = (
                    "信息已足够" if not analysis_context.needs_continuation else
                    ("达到最大轮次" if analysis_context.current_round >= analysis_context.max_rounds else "信息已足够")
                )
                logger.info(f"单查询分析完成（{completion_reason}），开始生成最终诊断...")
                return MultiRoundAnalysisEvent(
                    analysis_context=analysis_context,
                    round_results=analysis_context.previous_results
                )
            
        except Exception as e:
            logger.error(f"结果评估失败: {e}")
            return MultiRoundAnalysisEvent(
                analysis_context=analysis_context,
                round_results=analysis_context.previous_results
            )

    @step
    async def generate_final_diagnosis(self, ctx: Context, ev: MultiRoundAnalysisEvent) -> StopEvent:
        """步骤5：生成最终诊断报告"""
        analysis_context = ev.analysis_context
        
        logger.info("开始生成最终诊断报告...")
        
        try:
            # 准备诊断报告所需信息
            results_summary = self._prepare_results_summary(analysis_context.previous_results)
            quality_summary_text = self._prepare_quality_summary(analysis_context.previous_results)
            quality_summary_dict = self._calculate_quality_summary(analysis_context.previous_results)
            
            final_analysis_prompt = TEMPLATE_ANALYSIS_PROMPT.format(
                user_input=analysis_context.user_input,
                analysis_rounds=analysis_context.current_round,
                query_results_summary=results_summary,
                data_quality_summary=quality_summary_text,
                current_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            )
            
            response = await self.llm.acomplete(final_analysis_prompt)
            final_analysis = str(response)
            parsed_analysis = self._parse_final_analysis(final_analysis)
            
            diagnosis_result = DiagnosisResult(
                status="completed",
                analysis_rounds=analysis_context.current_round,
                total_queries=len(analysis_context.previous_results),
                diagnosis_path=[result.query_type.value for result in analysis_context.previous_results],
                findings=parsed_analysis,
                recommendations=parsed_analysis.get("solution_recommendations", []),
                data_quality_summary=quality_summary_dict,
                execution_time=(datetime.now() - analysis_context.start_time).total_seconds(),
                llm_conclusion=final_analysis
            )
            
            logger.info(f"诊断完成: {analysis_context.current_round}轮, {len(analysis_context.previous_results)}次查询")
            return StopEvent(result=diagnosis_result)
            
        except Exception as e:
            logger.error(f"最终诊断生成失败: {e}")
            diagnosis_result = DiagnosisResult(
                status="failed",
                analysis_rounds=analysis_context.current_round,
                total_queries=len(analysis_context.previous_results),
                diagnosis_path=analysis_context.insights,
                findings={"error": str(e)},
                recommendations=["诊断过程出现错误，请检查日志"],
                data_quality_summary={},
                execution_time=(datetime.now() - analysis_context.start_time).total_seconds(),
                llm_conclusion=f"诊断失败: {str(e)}"
            )
            return StopEvent(result=diagnosis_result)

    def _parse_classification_response(self, response_text: str) -> dict:
        """解析问题分类响应"""
        from .json_utils import parse_json_from_text
        
        default_classification = {
            "problem_type": "unknown",
            "diagnosis_strategy": "multi_round_thinking",
            "device_ids": [],
            "urgency_level": 3,
            "reasoning": "默认分类：无法解析响应",
            "key_indicators": []
        }
        
        result = parse_json_from_text(
            text=response_text,
            required_fields=["problem_type", "diagnosis_strategy"],
            default_value=default_classification
        )
        
        if result == default_classification:
            logger.warning("无法解析分类响应，使用默认分类")
        
        return result
    
    def _extract_insights_from_results(self, results: List[QueryResult]) -> List[str]:
        """从查询结果中提取洞察"""
        insights = []
        
        for result in results:
            if result.success and result.response and result.response.content:
                content = result.response.content
                if isinstance(content, list):
                    count = len(content)
                    if count > 0:
                        insights.append(f"{result.query_type.value}查询发现{count}条相关数据")
                    else:
                        insights.append(f"{result.query_type.value}查询未发现相关数据")
                else:
                    insights.append(f"{result.query_type.value}查询获取到数据")
            else:
                insights.append(f"{result.query_type.value}查询执行失败")
        
        return insights
    
    def _calculate_quality_metrics(self, sql_response) -> DataQualityMetrics:
        """计算数据质量指标"""
        if not sql_response.success:
            return DataQualityMetrics(
                overall_score=0.0,
                issues=[f"查询失败: {sql_response.error_message}"]
            )
        
        content = sql_response.content
        score = 0.5  # 基础分数
        issues = []
        
        if isinstance(content, list):
            if len(content) == 0:
                issues.append("结果为空")
                score = 0.2
            elif len(content) < 3:
                issues.append("结果数量较少")
                score = 0.6
            else:
                score = 0.8
        elif isinstance(content, dict):
            if not content:
                issues.append("结果为空")
                score = 0.2
            else:
                score = 0.7
        else:
            issues.append("结果格式未知")
            score = 0.3
        
        return DataQualityMetrics(
            overall_score=score,
            issues=issues
        )
    
    def _prepare_results_summary(self, results: List[QueryResult]) -> str:
        """准备结果汇总"""
        summary_parts = []
        
        for result in results:
            if result.success:
                data_count = len(result.response.content) if result.response and result.response.content else 0
                data_info = f"获得{data_count}条数据，数据内容: {result.response.content}" if data_count > 0 else "空数据"
                summary_parts.append(f"第{result.round_number}轮 {result.query_text}: {data_info}")
            else:
                summary_parts.append(f"第{result.round_number}轮 {result.query_text}: 查询失败 - {result.error_message}")
        
        return "\n".join(summary_parts)
    
    def _prepare_quality_summary(self, results: List[QueryResult]) -> str:
        """准备质量汇总"""
        if not results:
            return "无数据质量信息"
        
        successful_queries = sum(1 for result in results if result.success)
        total_queries = len(results)
        empty_queries = sum(1 for result in results if result.success and (not result.response or not result.response.content or len(result.response.content) == 0))
        failed_queries = sum(1 for result in results if not result.success)
        
        all_issues = []
        for result in results:
            all_issues.extend(result.data_quality.issues)
        
        return f"成功: {successful_queries}/{total_queries}, 空数据: {empty_queries}, 失败: {failed_queries}, 发现问题: {len(all_issues)}个"
    
    def _prepare_diagnosis_path(self, analysis_context: AnalysisContext) -> str:
        """准备诊断路径"""
        path_parts = []
        
        for i, insight in enumerate(analysis_context.insights, 1):
            path_parts.append(f"步骤{i}: {insight}")
        
        return "\n".join(path_parts) if path_parts else "诊断路径: 无"
    
    def _parse_final_analysis(self, analysis_text: str) -> Dict[str, Any]:
        """解析最终分析"""
        from .json_utils import parse_json_from_text
        
        default_analysis = {
            "root_cause": "无法确定根本原因",
            "impact_assessment": {
                "severity": "medium",
                "affected_devices": [],
                "business_impact": "影响程度待评估"
            },
            "correlation_insights": ["需要更多数据进行关联分析"],
            "solution_recommendations": [
                {
                    "action": "检查设备状态",
                    "priority": "high",
                    "estimated_time": "30分钟",
                    "resources_needed": "技术人员"
                }
            ],
            "prevention_measures": ["定期检查设备状态"],
            "analysis_summary": "基于有限数据的初步分析"
        }
        
        return parse_json_from_text(
            text=analysis_text,
            default_value=default_analysis
        )

    def _calculate_quality_summary(self, results: List[QueryResult]) -> Dict[str, float]:
        """计算质量汇总"""
        if not results:
            return {
                "overall": 0.0,
                "success_rate": 0.0,
                "empty_rate": 0.0,
                "failure_rate": 0.0,
                "issues_count": 0.0,
                "total_queries": 0.0,
            }
        
        overall_scores = [r.data_quality.overall_score for r in results]
        overall = sum(overall_scores) / len(overall_scores) if overall_scores else 0.0
        
        total_queries = len(results)
        successful_queries = sum(1 for r in results if r.success)
        empty_queries = sum(1 for r in results if r.success and (not r.response or not r.response.content or len(r.response.content) == 0))
        failed_queries = sum(1 for r in results if not r.success)
        issues_count = float(sum(len(r.data_quality.issues) for r in results))
        
        return {
            "overall": float(overall),
            "success_rate": successful_queries / total_queries if total_queries else 0.0,
            "empty_rate": empty_queries / total_queries if total_queries else 0.0,
            "failure_rate": failed_queries / total_queries if total_queries else 0.0,
            "issues_count": issues_count,
            "total_queries": float(total_queries),
        }