import os
os.environ['KMP_DUPLICATE_LIB_OK']='TRUE'

from transformers import AutoTokenizer, AutoModel,AutoModelForCausalLM
from langchain_core.language_models.llms import LLM
from typing import Any, List, Optional
from pydantic import Field, BaseModel
import torch

class ChatGLM(LLM, BaseModel):
    model_path: str = Field(..., description="模型路径")
    model: Any = Field(None, description="模型")
    tokenizer: Any = Field(None, description="分词器")
    
    def __init__(self, model_path: str, **kwargs):
        super().__init__(model_path=model_path, **kwargs)
        self.model_path = model_path
        self.tokenizer = AutoTokenizer.from_pretrained(
            self.model_path, 
            trust_remote_code=True,
            local_files_only=True
        )
        print(f"分词器加载完成: {self.tokenizer}")
        #self.model = AutoModelForCausalLM.from_pretrained( 因果语言模型，比如gpt、llama、qwen
        self.model = AutoModel.from_pretrained(    # 通用模型
            self.model_path,
            device_map="auto",
            trust_remote_code=True,
            low_cpu_mem_usage=True,
            torch_dtype=torch.float16,
            local_files_only=True
        ).eval()
        print(f"模型加载完成: {self.model}")

    @property
    def _identifying_params(self):
        return {"model_path": self.model_path}

    @property
    def _llm_type(self) -> str:
        return "chatglm"
    
    # 回答问题
    def answer_question(self, question: str):
        try:
            print(f"问题: {question}")
            response = self._call(question)
            print(f"模型回复: {response}\n")
        except Exception as e:
            print(f"运行出错: {e}")
            
    # 调用模型
    def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
        inputs = self.tokenizer(prompt, return_tensors="pt")
        input_ids = inputs.input_ids.to(self.model.device)
        
        response = self.model.generate(
            input_ids,
            max_new_tokens=128,
            num_beams=1,
            do_sample=False,
            pad_token_id=self.tokenizer.pad_token_id,
            eos_token_id=self.tokenizer.eos_token_id
        )
        return self.tokenizer.decode(response[0], skip_special_tokens=True)

if __name__ == "__main__":

    # pip install cpm_kernels
    # pip install accelerate
    # pip install sentencepiece
    # 安装GCC
    chat = ChatGLM(model_path="D:/demo/gitee/python/models/chatglm2-6b-int4")
    chat.answer_question("你好，请介绍一下自己")