'''欢迎来到LangChain实战课
https://time.geekbang.org/column/intro/100617601
作者 黄佳'''
from llama_cpp import Llama
from typing import Optional, List, Mapping, Any, ClassVar
from langchain.llms.base import LLM

# 模型配置
MODEL_NAME = 'tinyllama-1.1b-chat-v1.0.Q4_K_S.gguf'
MODEL_PATH = 'D:/ideaSpace/MyPython/models/TinyLlama-1.1B-Chat-v1.0-GGUF/'

class CustomLLM(LLM):
    # 添加类型注解
    model_name: ClassVar[str] = MODEL_NAME  # 明确声明为类变量

    def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
        # 初始化模型（添加错误处理）
        try:
            llm = Llama(
                model_path=MODEL_PATH + MODEL_NAME,
                n_threads=4,
                n_ctx=512,
                verbose=False
            )
            # 使用ChatML格式提升回复质量
            formatted_prompt = f"""<|im_start|>system
你是一个专业的客服助手<|im_end|>
<|im_start|>user
{prompt}<|im_end|>
<|im_start|>assistant
"""
            response = llm.create_chat_completion(
                messages=[{"role": "user", "content": prompt}],
                max_tokens=256,
                stop=["<|im_end|>"],
                temperature=0.3
            )
            return response['choices'][0]['message']['content']
        except Exception as e:
            return f"生成失败: {str(e)}"

    @property
    def _identifying_params(self) -> Mapping[str, Any]:
        return {"name_of_model": self.model_name}

    @property
    def _llm_type(self) -> str:
        return "custom"

# 测试
if __name__ == "__main__":
    llm = CustomLLM()
    result = llm("昨天有一个客户抱怨他买了花给女朋友之后，两天花就枯了，你说作为客服我应该怎么解释？")
    print(result)