import os
import json
import yaml
from typing import List, Dict, Any, Optional
from llm.model_factory import ModelFactory
from config import MODEL_NAME
from utils.tools import _parse_llm_response


class IntentRecognitionService:
    def __init__(self, model_name=None):
        # 可根据实际情况选择模型
        self.model_client = ModelFactory.create_model()
        self.prompts = self._load_prompts()
        self.model_name = MODEL_NAME

    def _call_llm(self, system_prompt: str, user_prompt: str) -> str:
        """调用模型 - 使用 model_client 替代"""
        return self.model_client.chat(
            messages=[
                    {"role": "system", "content": system_prompt},
                    {"role": "user", "content": user_prompt}
                ],
            model=self.model_name
        )

    def _load_prompts(self) -> Dict[str, Dict[str, str]]:
        """加载prompt模板配置"""
        config_path = os.path.join('config', 'prompts.yaml')
        with open(config_path, 'r', encoding='utf-8') as f:
            return yaml.safe_load(f)

    def recognize_intent(self, user_input: str) -> dict:
        system_prompt = self.prompts['intent_recognition']['system']
        user_prompt = self.prompts['intent_recognition']['user'].format(
            user_input=user_input)

        # 假设模型接口为 chat/completions，返回 content
        response = self._call_llm(
            system_prompt=system_prompt,
            user_prompt=user_prompt
        )
        # 尝试解析模型输出为 JSON
        return _parse_llm_response(response, default_value={"Unknown": "模型输出解析失败"})
