#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：financial-large-model 
@File    ：custom_siliconflow_llm.py
@IDE     ：PyCharm 
@Author  ：Simon Zhang
@Date    ：2025/6/11 23:07 
'''

from langchain.llms.base import LLM
from langchain.callbacks.manager import CallbackManagerForLLMRun
from typing import Optional, List, Mapping, Any, Dict
import requests
import time
import logging
from langchain import LLMChain, PromptTemplate

from config.config import settings

class SiliconFlowLLM(LLM):
    """
    自定义 LLM 类，用于连接 SiliconFlow API，支持查询模型列表和选择指定模型。
    """

    api_key: str
    api_url: str = settings.openaiConfig.base_url
    models_url: str = settings.openaiConfig.models_url
    model_id: Optional[str] = None  # 默认使用的模型 ID
    max_tokens: int = 100
    temperature: float = 0.7
    top_p: float = 1.0

    def get_available_models(self) -> List[Dict[str, Any]]:
        """
        查询 SiliconFlow 提供的可用模型列表。
        返回模型信息的列表，每个模型信息是一个字典。
        """
        headers = {
            "Authorization": f"Bearer {self.api_key}",
            "Content-Type": "application/json"
        }

        try:
            response = requests.get(self.models_url, headers=headers, timeout=10)
            if response.status_code == 200:
                result = response.json()
                return result.get("data", [])
            else:
                logging.error(f"SiliconFlow API 获取模型列表失败，状态码: {response.status_code}, 响应: {response.text}")
                return []
        except requests.exceptions.RequestException as e:
            logging.error(f"请求异常: {e}")
            return []

    def set_model(self, model_id: str):
        """
        设置要使用的模型 ID。
        """
        available_models = self.get_available_models()
        model_ids = [model["id"] for model in available_models]
        if model_id in model_ids:
            self.model_id = model_id
            logging.info(f"已设置使用模型 ID: {model_id}")
        else:
            raise ValueError(f"模型 ID '{model_id}' 不在可用模型列表中。")

    def _call(
        self,
        prompt: str,
        stop: Optional[List[str]] = None,
        run_manager: Optional[CallbackManagerForLLMRun] = None
    ) -> str:
        headers = {
            "Authorization": f"Bearer {self.api_key}",
            "Content-Type": "application/json"
        }

        data = {
            "model": self.model_id,  # 指定使用的模型
            "messages": [{"role":"user","content":prompt}],
            "max_tokens": self.max_tokens,
            "temperature": self.temperature,
            "top_p": self.top_p
        }

        if stop:
            data["stop"] = stop

        for attempt in range(3):  # 重试最多3次
            try:
                response = requests.request("POST", self.api_url, json=data, headers=headers, timeout=10)
                if response.status_code == 200:
                    result = response.json()
                    return result['choices'][0].get('message').get('content', '')
                else:
                    logging.error(f"SiliconFlow API 请求失败，状态码: {response.status_code}, 响应: {response.text}")
            except requests.exceptions.RequestException as e:
                logging.error(f"请求异常: {e}")

            time.sleep(2)  # 等待2秒后重试

        raise ValueError("SiliconFlow API 请求失败，多次尝试后仍未成功。")

    def _generate(
        self,
        prompts: List[str],
        stop: Optional[List[str]] = None,
        run_manager: Optional[CallbackManagerForLLMRun] = None
    ) -> List[str]:
        responses = []
        for prompt in prompts:
            response = self._call(prompt, stop=stop, run_manager=run_manager)
            responses.append(response)
        return responses

    @property
    def _llm_type(self) -> str:
        return "SiliconFlowLLM"

    @property
    def _identifying_params(self) -> Dict[str, Any]:
        return {
            "api_key": self.api_key,
            "api_url": self.api_url,
            "models_url": self.models_url,
            "model_id": self.model_id,
            "max_tokens": self.max_tokens,
            "temperature": self.temperature,
            "top_p": self.top_p
        }

    @property
    def _default_params(self) -> Dict[str, Any]:
        return {
            "max_tokens": self.max_tokens,
            "temperature": self.temperature,
            "top_p": self.top_p
        }

def main():
    # 初始化自定义 LLM
    llm = SiliconFlowLLM(
        api_key=settings.openaiConfig.api_key,
        api_url=settings.openaiConfig.base_url,
        models_url=settings.openaiConfig.models_url,
        max_tokens=150,
        temperature=0.6,
        top_p=0.9
    )

    # 查询可用模型
    available_models = llm.get_available_models()
    if not len(available_models) > 0:
        print("无法获取可用的模型列表。请检查 API 凭证和网络连接。")
        return

    print("可用的模型列表：")
    for idx, model in enumerate(available_models, start=1):
        print(f"{idx}. 模型 ID: {model.get('id')}, 名称: {model.get('name')}, 描述: {model.get('description')}")

    # 让用户选择模型
    try:
        selection = int(input("请输入要使用的模型编号: "))
        if 1 <= selection <= len(available_models):
            selected_model = available_models[selection - 1]["id"]
            llm.set_model(selected_model)
            print(f"已选择模型 ID: {selected_model}")
        else:
            print("无效的选择。")
            return
    except ValueError:
        print("请输入有效的数字。")
        return

    # 创建提示模板
    prompt = PromptTemplate(
        input_variables=["question"],
        template="根据以下问题生成详细回答：\n\n{question}"
    )

    # 创建 LLMChain
    chain = LLMChain(prompt=prompt, llm=llm)

    # 获取用户输入的问题
    question = input("请输入您的问题: ")

    # 使用链条生成回答
    response = chain.run({"question": question})

    print("\n生成的回答：")
    print(response)

if __name__ == '__main__':
    main()
