"""
测算结果智能评价
"""
from typing import Dict, Any
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from .model_factory import ModelFactory
from .prompt_templates import EVALUATION_PROMPT


class Evaluator:
    """
    测算结果智能评价类
    """
    def __init__(self, model_name: str = "chatgpt"):
        self.model = ModelFactory().get_model(model_name)
        self.chain = self._create_evaluation_chain()
    
    def _create_evaluation_chain(self) -> LLMChain:
        """
        创建评价链
        """
        prompt = PromptTemplate(
            input_variables=["scene_data", "result_data"],
            template=EVALUATION_PROMPT
        )
        return LLMChain(llm=self.model, prompt=prompt)
    
    def evaluate(self, scene_data: Dict[str, Any], result_data: Dict[str, Any]) -> Dict[str, Any]:
        """
        评价测算结果
        
        Args:
            scene_data: 场景数据
            result_data: 测算结果数据
        """
        response = self.chain.run(
            scene_data=scene_data,
            result_data=result_data
        )
        
        return {
            "evaluation": response,
            "key_metrics": self._extract_key_metrics(response),
            "recommendations": self._extract_recommendations(response)
        }
    
    def _extract_key_metrics(self, evaluation: str) -> Dict[str, str]:
        """
        提取关键指标
        """
        # TODO: 实现关键指标提取逻辑
        return {}
    
    def _extract_recommendations(self, evaluation: str) -> list:
        """
        提取优化建议
        """
        # TODO: 实现建议提取逻辑
        return []