# -*- coding:UTF-8 -*-
import torch
from langchain_core.language_models.llms import LLM
from langchain_core.prompts import PromptTemplate
from typing import Any, List, Mapping, Optional
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers.generation.utils import GenerationConfig

class BaiChuan_LLM(LLM):
    model:AutoModelForCausalLM=None
    tokenizer: AutoTokenizer = None

    

    def __init__(self,model_path:str):
        super().__init__()
        print("=========================init baichuan model=========================")
        self.tokenizer = AutoTokenizer.from_pretrained(model_path,use_fast=False, revision="v2.0", trust_remote_code=True)
        self.model = AutoModelForCausalLM.from_pretrained(model_path,revision="v2.0", device_map="auto",torch_dtype=torch.bfloat16, trust_remote_code=True)
        self.model.generation_config = GenerationConfig.from_pretrained(model_path, revision="v2.0")
        print("=========================success init model=========================")

    @property
    def _llm_type(self) -> str:
        return "baichuan"

    def _call(
        self,
        prompt: str,
        stop: Optional[List[str]] = None,
        run_manager: Optional[CallbackManagerForLLMRun] = None,
        temperature=0.95,
        top_p=0.7,
        max_tokens=512,
        chat_historys=[],
        **kwargs: Any,
    ) -> str:

        messages=self.history2input(chat_historys=chat_historys,message=prompt)
        response = self.model.chat(self.tokenizer, messages)
        return response
    
    def tool_chat(self,prompt,system_message=None):
        messages=[]
        if system_message:
            messages.append({"role": "system", "content": system_message})
        messages.append({"role": "user", "content": prompt})
        response = self.model.chat(self.tokenizer, messages)
        return response
        

    def calculate_num_tokens(self,text:str):
        if text is None or len(text)<=0:
            return 0
        return len(self.tokenizer(text))
    
    def history2input(self,chat_historys, message):
        return self._history2input(chat_historys,message=message)

    def _history2input(self,chat_historys,message):
        messages=[]
        for chat_history in chat_historys:
            messages.append({"role": "user", "content": chat_history["input"]})
            messages.append({"role": "assistant", "content": chat_history["output"]})
        messages.append({"role": "user", "content": message})
        return messages

    @property
    def _identifying_params(self) -> Mapping[str, Any]:
        """Get the identifying parameters."""
        return {"model": self.model.__class__.__name__,"tokenizer":self.tokenizer.__class__.__name__}