# -*- coding: utf-8 -*-
"""
扬言提取模块，处理扬言信息的提取
"""

import sys
import os

# 添加项目根目录到系统路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from llm import quick_chat
from prompts import get_system_prompt
from .config import LLM_CONFIG, USE_SILICONFLOW
from .clean_llm_result import clean_llm_result


async def extract_yy(case_content: str) -> str:
    """提取扬言"""
    try:
        # 根据全局配置选择配置
        if USE_SILICONFLOW:
            base_url = LLM_CONFIG["BASE_URL_dev"]
            model = LLM_CONFIG["MODEL_dev"]
            print(f"使用硅基流动配置: {base_url}, 模型: {model}")
        else:
            base_url = LLM_CONFIG["BASE_URL_1244"]
            model = LLM_CONFIG["MODEL_72B"]
            print(f"使用原有配置: {base_url}, 模型: {model}")

        # 根据配置决定是否传递API密钥
        if USE_SILICONFLOW:
            result = quick_chat(
                base_url=base_url,
                model=model,
                system_prompt=get_system_prompt("yy"),
                user_prompt=case_content,
                api_key=LLM_CONFIG["API_KEY"],
                temperature=0.5,
            )
        else:
            result = quick_chat(
                base_url=base_url,
                model=model,
                system_prompt=get_system_prompt("yy"),
                user_prompt=case_content,
                temperature=0.5,
            )
        yy_result = result.text.strip()
        # 清洗模型输出，移除think标签和Markdown代码块标记
        cleaned_yy_result = clean_llm_result(yy_result)
        print(f"扬言提取结果: {cleaned_yy_result}")
        return cleaned_yy_result
    except Exception as e:
        print(f"提取扬言失败: {str(e)}")
        return ""
