# -*- coding: utf-8 -*-
"""
信访分类模块，处理信访分类的判断
"""

import sys
import os

# 添加项目根目录到系统路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from llm import quick_chat
from prompts import get_system_prompt
from .config import LLM_CONFIG, USE_SILICONFLOW
from .dict_manager import get_dict_name_cached
from .clean_llm_result import clean_llm_result


async def classify_xflb(case_content: str) -> str:
    """进行信访分类"""
    try:
        # 根据全局配置选择配置
        if USE_SILICONFLOW:
            base_url = LLM_CONFIG["BASE_URL_dev"]
            model = LLM_CONFIG["MODEL_dev"]
            print(f"使用硅基流动配置: {base_url}, 模型: {model}")
        else:
            base_url = LLM_CONFIG["BASE_URL_1238"]
            model = LLM_CONFIG["MODEL_14B"]
            print(f"使用原有配置: {base_url}, 模型: {model}")

        # 根据配置决定是否传递API密钥
        if USE_SILICONFLOW:
            result = quick_chat(
                base_url=base_url,
                model=model,
                system_prompt=get_system_prompt("xflb"),
                user_prompt=case_content,
                api_key=LLM_CONFIG["API_KEY"],
                temperature=0.5,
            )
        else:
            result = quick_chat(
                base_url=base_url,
                model=model,
                system_prompt=get_system_prompt("xflb"),
                user_prompt=case_content,
                temperature=0.5,
            )
        raw_xflb = result.text.strip()
        # 清洗模型输出，移除think标签和Markdown代码块标记
        cleaned_raw_xflb = clean_llm_result(raw_xflb)
        # 对信访分类结果进行字典转换
        converted_xflb = get_dict_name_cached("信访性质", cleaned_raw_xflb)
        print(f"信访分类结果: {cleaned_raw_xflb} -> {converted_xflb}")
        return cleaned_raw_xflb, converted_xflb
    except Exception as e:
        print(f"信访分类失败: {str(e)}")
        return "", ""
