# -*- coding: utf-8 -*-
"""
摘要和分类提取模块，处理问题总结和问题分类的提取
"""

import sys
import os

# 添加项目根目录到系统路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from llm import quick_chat
from prompts import get_system_prompt
from common.config import LLM_CONFIG, USE_SILICONFLOW
from common.clean_llm_result import clean_llm_result
from .dict_manager import get_dict_name_cached


async def extract_wtzy(case_content: str, xflb_type: str) -> str:
    """提取问题总结"""
    try:
        if "检举控告" in xflb_type:
            system_prompt = get_system_prompt("jjkg_summarizer")
        elif any(
            keyword in xflb_type for keyword in ["业务范围外", "意见建议", "申诉"]
        ):
            system_prompt = get_system_prompt("no_jjkg_summarizer")
        else:
            print(f"未知的信访分类类型: {xflb_type}")
            return ""

        # 根据全局配置选择配置
        if USE_SILICONFLOW:
            base_url = LLM_CONFIG["BASE_URL_dev"]
            model = LLM_CONFIG["MODEL_dev"]
            print(f"使用硅基流动配置: {base_url}, 模型: {model}")
        else:
            base_url = LLM_CONFIG["BASE_URL_1244"]
            model = LLM_CONFIG["MODEL_72B"]
            print(f"使用原有配置: {base_url}, 模型: {model}")

        # 根据配置决定是否传递API密钥
        if USE_SILICONFLOW:
            result = quick_chat(
                base_url=base_url,
                model=model,
                system_prompt=system_prompt,
                user_prompt=case_content,
                api_key=LLM_CONFIG["API_KEY"],
                temperature=0.5,
            )
        else:
            result = quick_chat(
                base_url=base_url,
                model=model,
                system_prompt=system_prompt,
                user_prompt=case_content,
                temperature=0.5,
            )
        wtzy_result = result.text.strip()
        # 清洗模型输出，移除think标签和Markdown代码块标记
        cleaned_wtzy_result = clean_llm_result(wtzy_result)
        print(f"问题总结提取结果: {cleaned_wtzy_result}")
        return cleaned_wtzy_result
    except Exception as e:
        print(f"提取问题总结失败: {str(e)}")
        return ""


async def extract_fl(case_content: str, xflb_type: str) -> list:
    """提取问题分类"""
    try:
        if "检举控告" in xflb_type:
            system_prompt = get_system_prompt("jjkg_problem_classification")
        elif "业务范围外" in xflb_type:
            system_prompt = get_system_prompt("yww_problem_classification")
        else:
            print(f"未知的信访分类类型: {xflb_type}")
            return []

        # 根据全局配置选择配置
        if USE_SILICONFLOW:
            base_url = LLM_CONFIG["BASE_URL_dev"]
            model = LLM_CONFIG["MODEL_dev"]
            print(f"使用硅基流动配置: {base_url}, 模型: {model}")
        else:
            base_url = LLM_CONFIG["BASE_URL_1244"]
            model = LLM_CONFIG["MODEL_72B"]
            print(f"使用原有配置: {base_url}, 模型: {model}")

        # 根据配置决定是否传递API密钥
        if USE_SILICONFLOW:
            result = quick_chat(
                base_url=base_url,
                model=model,
                system_prompt=system_prompt,
                user_prompt=case_content,
                api_key=LLM_CONFIG["API_KEY"],
                temperature=0.5,
            )
        else:
            result = quick_chat(
                base_url=base_url,
                model=model,
                system_prompt=system_prompt,
                user_prompt=case_content,
                temperature=0.5,
            )

        # 将分类结果按分号分割成列表，返回原始结果供后续处理
        fl_text = result.text.strip()
        # 清洗模型输出，移除think标签和Markdown代码块标记
        cleaned_fl_text = clean_llm_result(fl_text)
        if cleaned_fl_text:
            raw_fl_list = [
                item.strip() for item in cleaned_fl_text.split(";") if item.strip()
            ]
            print(f"问题分类原始结果: {raw_fl_list}")
            return raw_fl_list
        return []
    except Exception as e:
        print(f"提取问题分类失败: {str(e)}")
        return []
