
from transformers import  AutoModelForCausalLM, AutoTokenizer
import torch
from utils.conversation import get_conv_template,register_conv_template,SeparatorStyle,Conversation
from fastchat.serve.inference import  generate_stream
from chatllm.chatllm import Parameters,Guanaco





import copy
def torch_gc():
    torch.cuda.empty_cache()
    torch.cuda.ipc_collect()


# from  transformers.models.llama.modeling_llama import LlamaForCausalLM
# llm=LlamaForCausalLM()
# llm.generation_config

llm_device="cuda"
model_name_or_path = "/root/autodl-tmp/ChatGML-ALL/models/Guanaco"
tokenizer = None
model = None

def init_model():
    global tokenizer
    global model
    global llm_device
    tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
    model = AutoModelForCausalLM.from_pretrained(model_name_or_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)

    model.to(llm_device)
    model.eval()
    print(type(model))


def torch_gc():
    if torch.cuda.is_available():
        torch.cuda.empty_cache()
        torch.cuda.ipc_collect()

# ### Instruction:
# User: History User Input
# Assistant: History Assistant Answer
# ### Input:
# System: Knowledge
# User: New User Input
# ### Response:
# New Assistant Answer




def guanaco_stream(prompt):
    params = {
        "model": model,
        "prompt": prompt,
        "temperature": 0.7,
        "max_new_tokens": 512
    }
    output_stream = generate_stream(model, tokenizer, params, llm_device)
    return output_stream





def predict(message,type):
    parameters: Parameters = Parameters(temperature=0.7, max_token_length=512, top_p="0.9",
                                        prompt=message, knowledge_template=None,history=[],knowledge=None)
    guanaco = Guanaco(None)
    parameters = guanaco.conv_prompt(parameters)
    print(parameters.prompt)
    generation = guanaco_stream(parameters.prompt)

    for reps in generation:
        print("vicuna输出: ",reps)
    print("end...")

class Document():

    page_content: str

if __name__ == '__main__':


    # doc =Document()
    # doc.page_content="Question: 大象装进冰箱需要几步   Answer: 3步:1. 打开冰箱 2. 把大象装进冰箱 3. 关上冰箱门"
    # doc2 =Document()
    # doc2.page_content="Question: 冰变成水最快的方法是什么？  Answer: 去掉冰字哪二点"
    #
    # parameters :Parameters=Parameters(temperature=0.7,max_token_length=512,top_p="0.9",history=[("prompt1","answer1"),("prompt2","answer2")],knowledge=[doc,doc2],prompt="你好",knowledge_template=None)
    # guanaco = Guanaco(None)
    # parameters = guanaco.conv_prompt(parameters)
    #
    # print(parameters.prompt)
    init_model()
    while True:
        message = input("请输入:")
        predict(message,None)

