import os.path
import time


from transformers import AutoModel, AutoModelForCausalLM, AutoTokenizer
import torch

import utils.contants
from utils.CommonUtils import is_model_empty
from peft import PeftModel
from fastchat.conversation  import  get_conv_template
from fastchat.serve.inference import  generate_stream
from fastchat.conversation import  SeparatorStyle

class Parameters:
    temperature: float = 0.1
    max_token_length = 1024
    top_p = 0.9
    history = []
    prompt: str = None
    knowledge: object = None
    knowledge_template:  str = utils.contants.PROMET_TEMPLATE
    stop_str: str = None
    def __init__(self,temperature,max_token_length,top_p,history,prompt,knowledge,knowledge_template):
        self.temperature = temperature
        self.max_token_length = max_token_length
        self.top_p = top_p
        self.history = history
        self.prompt = prompt
        self.knowledge = knowledge
        self.knowledge_template = knowledge_template
    def toJson(self):
        return self.__dict__.__str__().replace("\'", "\"")


def torch_gc():
    if torch.cuda.is_available():
        torch.cuda.empty_cache()
        torch.cuda.ipc_collect()








class LLaMABase():
    chatIO: object = None
    def __init__(self,chatIO:object):
        self.chatIO = chatIO
    def load_model(self,onload_callback):
        print("加载模型tokenizer")
        kwargs = {"torch_dtype": torch.float16}
        chatIO = self.chatIO
        chatIO.tokenizer = AutoTokenizer.from_pretrained(chatIO.model_name_or_path, use_fast=False)
        print("加载模型", os.path.basename(chatIO.model_name_or_path), "args=", kwargs, "device=", chatIO.llm_device)
        if onload_callback is not None:
            onload_callback(0.3, "正在加载大语言模型:" + os.path.basename(chatIO.model_name_or_path))
        chatIO.model = AutoModelForCausalLM.from_pretrained(
            chatIO.model_name_or_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)

    def prompt_template_name(self):
        return None
    def reps_callback(self,text):
        return text

    def knowledge_process(self,parameters: Parameters) -> Parameters:
        if parameters.knowledge is not None and len(
                parameters.knowledge) > 0 and parameters.knowledge_template is not None:
            template = parameters.knowledge_template
            docs = []
            for doc in parameters.knowledge:
                page_content = doc.page_content
                docs.append(page_content)
            template = template.replace("{context}", "\n".join(docs))
            template = template.replace("{question}", parameters.prompt)
            parameters.prompt = template
        return parameters
    def history_process(self,parameter: Parameters) -> Parameters:
        conv = get_conv_template(self.prompt_template_name())
        if len(parameter.history) > 0:
            for user, bot in parameter.history:
                conv.append_message(conv.roles[0], user)
                conv.append_message(conv.roles[1], bot)

        conv.append_message(conv.roles[0], parameter.prompt)
        conv.append_message(conv.roles[1], None)
        parameter.prompt = conv.get_prompt()
        parameter.stop_str = conv.sep if conv.sep_style == SeparatorStyle.ADD_NEW_LINE_SINGLE else conv.sep2

        return parameter
    def conv_prompt(self,parameter:Parameters) -> Parameters:
        """
        对prompt的前置处理
        """

    def generate_stream(self,parameter:Parameters) -> str:
        prompt = parameter.prompt
        print("llma prompt=", prompt)
        chatIO=self.chatIO
        params = {
            "model": chatIO.model,
            "prompt": prompt,
            "temperature": parameter.temperature,
            "max_new_tokens": parameter.max_token_length,
            "top_p": parameter.top_p,
            "stop": parameter.stop_str,

        }
        output_stream = generate_stream(chatIO.model, chatIO.tokenizer, params, chatIO.llm_device)
        for output in output_stream:
            text: str = output['text']
            yield self.reps_callback(text)
        torch_gc()

    def generate(self,parameter:Parameters) -> str:
        parameter=self.conv_prompt(parameter)
        return self.generate_stream(parameter)

class Vicuna(LLaMABase):

    def prompt_template_name(self):
        return "vicuna_v1.1"
    def reps_callback(self,output):
        split ="ASSISTANT"
        if split in output:
            output = output[output.rindex(split) + len(split)::].strip()
            if output.startswith(":") or output.startswith(":"):
                output = output[1::].strip()
                return output
        return output
    def conv_prompt(self,parameter:Parameters) -> Parameters:
        parameter = super().knowledge_process(parameter)
        parameter = super().history_process(parameter)
        return parameter
class ChatGML(LLaMABase):
    def load_model(self,onload_callback):
        kwargs = {"torch_dtype": torch.float16}
        chatIO = self.chatIO
        print("正在加载大语言模型:" + os.path.basename(chatIO.model_name_or_path))
        if onload_callback is not None:
            onload_callback(0.3, "正在加载大语言模型:" + os.path.basename(chatIO.model_name_or_path))
        chatIO.tokenizer = AutoTokenizer.from_pretrained(chatIO.model_name_or_path,
                                                       trust_remote_code=True)
        if torch.cuda.is_available() and chatIO.llm_device.lower().startswith("cuda"):
            chatIO.model = AutoModel.from_pretrained(chatIO.model_name_or_path, trust_remote_code=True,
                                                   **kwargs).half().cuda()
        else:
            chatIO.model = AutoModel.from_pretrained(chatIO.model_name_or_path, trust_remote_code=True).float().to(
                chatIO.llm_device)
        if not is_model_empty(chatIO.lora_model_name_or_path):
            print("正在加载Lora模型:" + os.path.basename(chatIO.lora_model_name_or_path))
            if onload_callback is not None:
                onload_callback(0.3, "正在加载Lora模型:" + os.path.basename(chatIO.lora_model_name_or_path))
            chatIO.model = PeftModel.from_pretrained(chatIO.model, chatIO.lora_model_name_or_path).half()
    def conv_prompt(self,parameter:Parameters) -> Parameters:
        # GhatML 只需要处理知识库即可
        parameter = super().knowledge_process(parameter)
        return parameter
    def generate_stream(self,parameter:Parameters) -> str:
        print("prompt=", parameter.prompt, "history=", parameter.history)
        chatIO = self.chatIO
        for response, history in chatIO.model.stream_chat(chatIO.tokenizer, parameter.prompt, parameter.history,
                                                        max_length=parameter.max_token_length,
                                                        top_p=parameter.top_p,
                                                        temperature=parameter.temperature):
            yield response
        torch_gc()

class DevLLM(LLaMABase):
    def load_model(self,onload_callback):
        if onload_callback is not None:
            onload_callback(0.3, "当前处于debug模式.不加载大模型")
        time.sleep(1)
    def conv_prompt(self,parameter:Parameters) -> Parameters:
        return parameter
    def generate_stream(self,parameter:Parameters) -> str:
        str = "当前是debug模式,模拟大模型的返回:"
        yield str
        for item in ["1", "12", "123"]:
            time.sleep(1)
            yield str + item

class Guanaco(LLaMABase):
    def reps_callback(self,output):
        split = "Response:"
        if split in output:
            output = output[output.rindex(split) + len(split)::].strip()
        return output
    def conv_prompt(self,parameters:Parameters) -> Parameters:
        # 处理history
        prompt = ""
        if len(parameters.history) > 0:
            prompt += "### Instruction:"
            for user, bot in parameters.history:
                prompt += "\nUser: " + user
                prompt += "\nAssistant: " + bot

        prompt += "\n### Input:"
        if parameters.knowledge is not None and len(parameters.knowledge)>0:
            docs = []
            for doc in parameters.knowledge:
                docs.append(doc.page_content)
            context = "\n\t".join(docs)
            prompt += "\nSystem: \n\t"+context
        prompt += "\nUser: "+parameters.prompt
        prompt += "\n### Response:"
        parameters.prompt = prompt
        parameters.stop_str = "###"
        parameters.split = "Response"
        return parameters
