# -*- coding:UTF-8 -*-
from langchain_core.language_models.llms import LLM
from langchain_core.prompts import PromptTemplate
from typing import Any, List, Mapping, Optional
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

class OpenBA_3B_LLM(LLM):
    model:AutoModelForSeq2SeqLM=None
    tokenizer: AutoTokenizer = None

    

    def __init__(self,model_path:str):
        super().__init__()
        print("=========================init model=========================")
        self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
        self.model = AutoModelForSeq2SeqLM.from_pretrained(model_path, trust_remote_code=True).half().cuda()
        self.model=self.model.eval()
        print("=========================success init model=========================")

    @property
    def _llm_type(self) -> str:
        return "openba-3b"

    def _call(
        self,
        prompt: str,
        stop: Optional[List[str]] = None,
        run_manager: Optional[CallbackManagerForLLMRun] = None,
        prefix="",
        temperature=0.95,
        top_p=0.7,
        max_tokens=512,
        chat_historys=[],
        **kwargs: Any,
    ) -> str:
        input_text=self.all2input(chat_historys=chat_historys,prefix=prefix,message=prompt)
        if input_text is None:
            return "当前token数量大于1024"
        print("当前参数：【temperature：{}，top_p：{},max_tokens：{}】".format(temperature,top_p,max_tokens))
        print("【当前输入信息为：】",input_text)
        inputs = self.tokenizer(input_text, return_tensors='pt')
        print("【当前输入token长度：】",len(inputs.input_ids[0]))
        for k in inputs:
            inputs[k] = inputs[k].cuda()
        outputs = self.model.generate(
                    **inputs,
                    do_sample=True,
                    max_new_tokens=max_tokens,
                    temperature = temperature,
                    top_p =top_p)
        response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
        return response
    
    def chat(self,prompt,temperature=0.95,top_p=0.7,max_tokens=512):
        input_text="<S> "+f"Human: {prompt} </s> Assistant: "+" <extra_id_0>"
        inputs = self.tokenizer(input_text, return_tensors='pt')
        for k in inputs:
            inputs[k] = inputs[k].cuda()
        outputs = self.model.generate(
                    **inputs,
                    do_sample=True,
                    max_new_tokens=max_tokens,
                    temperature = temperature,
                    top_p =top_p)
        response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
        return response
        

    def calculate_num_tokens(self,text:str):
        if text is None or len(text)<=0:
            return 0
        return len(self.tokenizer(text,return_tensors='pt').input_ids[0])
    
    def all2input(self,chat_historys,prefix, message):
        history_len=len(chat_historys)
        while history_len>=0:
            num_token,input_text=self._history2input(chat_historys[:history_len],prefix+message)
            if num_token<=1000:
                return input_text
            history_len-=1
        

        prefix_tokens=self.tokenizer(prefix,return_tensors='pt').input_ids[0]
        message_tokens=self.tokenizer(message,return_tensors='pt').input_ids[0]
        
        if len(message_tokens)>=len(prefix_tokens) and len(message_tokens)>=1000:
            return None

        before=self.tokenizer.decode(prefix_tokens[:(1000-len(message_tokens)-10)], skip_special_tokens=True)
        after=self.tokenizer.decode(prefix_tokens[-10:], skip_special_tokens=True)
     
        prefix=before+"\n"+after
        return self._history2input([],prefix+message)[1]

    def _history2input(self,chat_historys,message):
        input_text=""
        for chat_history in chat_historys:
            input_text += f"Human: {chat_history['input']} </s> Assistant: {chat_history['output']} </s> "
        text="<S> "+input_text + f"Human: {message} </s> Assistant: "+" <extra_id_0>"
        num_token=self.calculate_num_tokens(text=text)
        return (num_token,text)

    @property
    def _identifying_params(self) -> Mapping[str, Any]:
        """Get the identifying parameters."""
        return {"model": self.model.__class__.__name__,"tokenizer":self.tokenizer.__class__.__name__}