import os
os.environ['KMP_DUPLICATE_LIB_OK']='TRUE'

from transformers import AutoTokenizer, AutoModelForCausalLM
from langchain_core.language_models.llms import LLM
from typing import Any, List, Optional
from pydantic import Field, BaseModel
import time
import torch

# 继承LLM, BaseModel
class QwenChat(LLM, BaseModel):

    model_path: str = Field(..., description="模型路径")
    model: Any = Field(None, description="模型")
    tokenizer: Any = Field(None, description="分词器")
    vector_store: Any = Field(None, description="知识库")
    
    def __init__(self, model_path: str, vector_store: Any = None, **kwargs):
        super().__init__(model_path=model_path, **kwargs)
        self.model_path = model_path
        self.vector_store = vector_store
        self.tokenizer = AutoTokenizer.from_pretrained(
            self.model_path, 
            trust_remote_code=True,
            local_files_only=True
        )
        print(f"分词器加载完成: {self.tokenizer}")
        self.model = AutoModelForCausalLM.from_pretrained(
            self.model_path,
            device_map="cuda", # 自动，GPU模式用
            offload_folder="/tmp",  # 指定模型权重缓存位置，将模型权重从GPU卸载到CPU
            trust_remote_code=True,
            low_cpu_mem_usage=True,
            torch_dtype='auto',
            local_files_only=True,
            attn_implementation="eager"
        ).eval()
        print(f"模型加载完成: {self.model}")
        # 没有提升
        # self.model = torch.compile(self.model)

    @property
    def _identifying_params(self):
        return {"model_path": self.model_path}

    @property
    def _llm_type(self) -> str:
        return "qwen_chat"
    
    # 回答问题
    def answer_question(self, question: str):
        try:
            print(f"问题: {question}")
            response = self._call(question)
            print(f"模型回复: {response}\n")
            return response
        except Exception as e:
            print(f"运行出错: {e}")
            raise e
            
    # 调用模型
    def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
        t0 = time.time()
        
        # 查询知识库
        if self.vector_store:
            from jieba import cut
            # 分词
            c = " ".join(cut(prompt))
            print(f"分词: {c}")
            docs = self.vector_store.similarity_search_with_score(c, k=2)  # 只返回前三个
            if not docs:
                print("未从知识库中检索到有效的信息。")
                knowledge = ""
            else:
                # 获取分数最小的文档，这里没有必要，因为k可以限制数量
                # min_score_doc = min(docs, key=lambda x: x[1]) 
                # if min_score_doc:
                    # knowledge = min_score_doc[0].page_content
                    # print(f"从知识库中检索到的信息: {knowledge}")
                # 这里是tuple，需要取第一个元素
                knowledge = " ".join([doc[0].page_content for doc in docs if doc[0].page_content])  # 确保内容有效
                print(f"从知识库中检索到的信息: {knowledge}")
            # 使用结构化的提示模板
            # 另外你需要注意你是一个法律专家，所以你的回答只能是与法律相关的，不能是其他内容
            prompt_template = """根据系统知识库已知信息，回答用户的问题。
系统知识库已知信息：
{knowledge}

当用户提问：
{question}

回答："""
            # 知识库和用户问题结合
            prompt = prompt_template.format(knowledge=knowledge, question=prompt)
            print(f"提示词: {prompt}")
        
        # 将prompt转换为token
        inputs = self.tokenizer(prompt, return_tensors="pt") # 这步是在CPU上运行
        t1 = time.time()
        input_ids = inputs.input_ids.to(self.model.device)
        # 确保模型是在cuda上
        print(f"输入的设备: {input_ids.device}")
        # 生成回答
        response = self.model.generate(
            input_ids,
            max_new_tokens=128,
            num_beams=1,  # Adjust this parameter if needed
            #do_sample=True, # 设置为false，不适用采样，可以提高生成速度，需要将top_p设置为1
            #top_p=0.8,
            do_sample=False,
            top_p=1,
            pad_token_id=self.tokenizer.pad_token_id,
            eos_token_id=self.tokenizer.eos_token_id
        )
        t2 = time.time()
        result = self.tokenizer.decode(response[0], skip_special_tokens=True)
        # 剔除prompt部分
        prompt_length = len(prompt)
        result = result[prompt_length:].strip()  # 从生成的结果中剔除prompt
        t3 = time.time()
        
        print(f"Tokenization time: {t1-t0:.3f}s")
        print(f"Generation time: {t2-t1:.3f}s")
        print(f"Decoding time: {t3-t2:.3f}s")
        return result
