import json
import openai
from typing import List, Dict
from dataclasses import dataclass

from model_config.config import CONFIG
from utils.llm_client import LLMClient
from utils.log_recording import Logging
from utils.data_processing import JsonExtractor
from utils.llm_client import llm_chat

@dataclass
class QuestionAnalysisResult:
    needs_decomposition: bool
    sub_questions: List[Dict] = None

class QuestionAnalyzer(Logging):
    """问题分析智能体：负责理解和分解复杂问题"""

    def __init__(self, model_name: str):
        super().__init__()
        self.model_analyze_complexity = model_name
        self.model_decompose_question = model_name
        self.json_extractor = JsonExtractor()

    def analyze_question(self, question: str) -> QuestionAnalysisResult:
        """分析问题是否需要分解，并在需要时进行分解"""
        needs_decomposition = self._analyze_complexity(question)
        sub_questions = None
        if needs_decomposition:
            sub_questions = self._decompose_question(question)
        return QuestionAnalysisResult(needs_decomposition, sub_questions)

    def _analyze_complexity(self, question: str) -> bool:
        """判断问题是否需要分解"""
        prompt = f"""
        你是一个二分类模型（类别为：“需要”和“不需要”），请分析以下问题的复杂度，判断是否需要分解成子问题:
        {question}

        你只需返回 "需要" 或 "不需要"！
        """
        try:
            # model_config = CONFIG["MODELS"][self.model_analyze_complexity]
            # payload = {
            #     "model": model_config.get("name", None),
            #     "messages": [{"role": "user", "content": prompt}],
            #     "stream": False,
            #     "max_tokens": 4096,
            #     "temperature": 0.01,
            # }

            # client = LLMClient(model_name=model_config["name"],
            #                    model_token=model_config["token"],
            #                    model_api_url=model_config["api_url"])
            # result = client._call_llm(payload)
            result = llm_chat(messages=[{"role": "user", "content": prompt}], model_name=self.model_analyze_complexity,temperature=0)
            print("模型返回结果:", result)
            return result.strip() == "需要"
        except ValueError as e:
            self.log_error(f"调用模型时发生值错误: {str(e)}")
            raise ValueError("模型调用过程中发生错误，可能是输入或输出不符合预期。") from e
        except Exception as e:
            self.log_error(f"分解问题失败，未预期的错误: {str(e)}")
            raise RuntimeError(f"问题复杂度分析过程发生未预期的错误，请检查日志详情。") from e



    def _decompose_question(self, question: str) -> List[Dict]:
        """将复杂问题分解为多个子问题"""
        prompt = f"""
                请将以下复杂问题分解为多个简单的子问题：
                {question}

                对于每个子问题，请提供：
                1. 子问题描述
                2. 子问题的执行顺序
                3. 子问题之间的依赖关系
                4. 如何组合子问题的结果

                请确保内容严格遵循JSON格式（格式如下），不要添加任何其他文本：
                [
                    {{
                        "id": "sub_q1",
                        "question": "子问题描述",
                        "order": 1,
                        "dependencies": [],
                        "combine_method": "描述如何组合结果"
                    }},
                    ...
                ]
                """

        try:
            # model_config = CONFIG["MODELS"][self.model_decompose_question]
            # payload = {
            #     "model": model_config.get("name", None),
            #     "messages": [{"role": "user", "content": prompt}],
            #     "stream": False,
            #     "max_tokens": 4096,
            #     "temperature": 0.01,
            # }

            # client = LLMClient(model_name=model_config["name"],
            #                   model_token=model_config["token"],
            #                   model_api_url=model_config["api_url"])
            # result = client._call_llm(payload)
            result = llm_chat(messages=[{"role": "user", "content": prompt}], model_name=self.model_decompose_question,temperature=0.2)
            print("模型返回结果:", result)
            prompt_extract_json = '''"[
                                {
                                    "id": "sub_q1",
                                    "question": "子问题描述",
                                    "order": 1,
                                    "dependencies": [],
                                    "combine_method": "描述如何组合结果"
                                },
                                ...
                            ]"
                            请从以下文本中提取JSON内容，并确保内容严格遵循JSON格式（格式如上述双引号中的内容所示），不使用代码块格式，不要添加任何其他文本：\n''' + result

            extracted_data = self.json_extractor.extract_json_content(result, prompt_extract_json)
            return extracted_data
        except json.JSONDecodeError as e:
            self.log_error("无法解析子问题分解结果，请检查LLM的响应格式。")
            raise ValueError("LLM返回的结果格式错误，无法解析为JSON") from e
        except ValueError as e:
            self.log_error(f"调用模型时发生值错误: {str(e)}")
            raise ValueError("模型调用过程中发生错误，可能是输入或输出不符合预期。") from e
        except Exception as e:
            self.log_error(f"分解问题失败，未预期的错误: {str(e)}")
            raise RuntimeError(f"问题分解过程发生未预期的错误，请检查日志详情。") from e

if __name__ == "__main__":
    question_analyzer = QuestionAnalyzer(model_name="GLM_4_FLASH")
    question = "比较永泰能源在2019年和2020年的业务变更次数和信用事件次数，计算2020年相对于2019年的变化率。哪个指标的变化更大？"
    analysis_result = question_analyzer.analyze_question(question)
    print("需要分解的问题：", analysis_result.needs_decomposition)
    if analysis_result.needs_decomposition:
        print("子问题列表：", analysis_result.sub_questions)