# -*- coding: utf-8 -*-
# @Time    : 2023/7/17 4:46 下午
# @Author  : Wu WanJie


def bloom_predict():
    from transformers import AutoModelForCausalLM, AutoTokenizer
    checkpoint = "/data/transformers/bloom-7b1"
    tokenizer = AutoTokenizer.from_pretrained(checkpoint)
    model = AutoModelForCausalLM.from_pretrained(checkpoint, torch_dtype="auto", device_map="cuda:0")

    first = "你好"
    inputs = tokenizer.encode(first, return_tensors="pt").to("cuda:0")
    outputs = model.generate(inputs, max_new_tokens=64)
    result = f"Input: {first}\nOutput: {tokenizer.decode(outputs[0])}"
    print(result)

    first = "晚上睡不着怎么办?"
    inputs = tokenizer.encode(first, return_tensors="pt").to("cuda:0")
    outputs = model.generate(inputs, max_new_tokens=128)
    result = f"Input: {first}\nOutput: {tokenizer.decode(outputs[0])}"
    print(result)


def llama_predict():
    from transformers import AutoTokenizer, LlamaForCausalLM
    checkpoint = "/data/transformers/llama-7b"
    model = LlamaForCausalLM.from_pretrained(checkpoint, device_map="cuda:0")
    tokenizer = AutoTokenizer.from_pretrained(checkpoint)

    first = "你好"
    inputs = tokenizer(first, return_tensors="pt").to("cuda:0")
    generate_ids = model.generate(inputs.input_ids, max_length=30)
    result = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
    print(result)

    first = "晚上睡不着怎么办?"
    inputs = tokenizer.encode(first, return_tensors="pt").to("cuda:0")
    generate_ids = model.generate(inputs.input_ids, max_length=30)
    result = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
    print(result)


def glm_predict():
    from transformers import AutoTokenizer, AutoModel
    checkpoint = "/data/transformers/chatglm-6b"
    tokenizer = AutoTokenizer.from_pretrained(checkpoint, trust_remote_code=True)
    model = AutoModel.from_pretrained(checkpoint, trust_remote_code=True).half().cuda()

    first = "你好"
    response, history = model.chat(tokenizer, first, history=[])
    result = f"Input: {first}\nOutput: {response}"
    print(result)

    first = "晚上睡不着怎么办?"
    response, history = model.chat(tokenizer, first, history=[])
    result = f"Input: {first}\nOutput: {response}"
    print(result)


def glm2_predict():
    from transformers import AutoTokenizer, AutoModel
    checkpoint = "/data/transformers/chatglm2-6b"
    tokenizer = AutoTokenizer.from_pretrained(checkpoint, trust_remote_code=True)
    model = AutoModel.from_pretrained(checkpoint, trust_remote_code=True).half().cuda()

    first = "你好"
    response, history = model.chat(tokenizer, first, history=[])
    result = f"Input: {first}\nOutput: {response}"
    print(result)

    first = "晚上睡不着怎么办?"
    response, history = model.chat(tokenizer, first, history=[])
    result = f"Input: {first}\nOutput: {response}"
    print(result)


def llama_predict():
    from transformers import AutoTokenizer, AutoModelForCausalLM
    checkpoint = "/data/transformers/alpaca_7b"
    # vicuna
    tokenizer = AutoTokenizer.from_pretrained(checkpoint)
    model = AutoModelForCausalLM.from_pretrained(checkpoint, torch_dtype="auto", device_map="cuda:0")

    first = "你好"
    inputs = tokenizer(first, return_tensors="pt").to("cuda:0")
    generate_ids = model.generate(inputs.input_ids, max_length=30)
    result = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
    print(result)


if __name__ == "__main__":
    # bloom_predict()
    # llama_predict()
    # glm_predict()
    # glm2_predict()
    llama_predict()
# from transformers import AutoTokenizer, AutoModel
# tokenizer = AutoTokenizer.from_pretrained("/data/transformers/chatglm2-6b", trust_remote_code=True)
# model = AutoModel.from_pretrained("/data/transformers/chatglm2-6b", trust_remote_code=True, device="cuda:0")
# prompt = """
# 已知信息：
# 信息科技费低于30万由组长审批，低于50万由部长审批，低于200万由总经理审批。
#
# 根据上述已知信息，简洁和专业的来回答用户的问题。如果无法从中得到答案，请说 “根据已知信息无法回答该问题” 或 “没有提供足够的相关信息”，不允许在答案中添加编造成分，答案请使用中文。 问题是：信息科技费80万应该由谁审批？"""
# model = model.eval()
# response, history = model.chat(tokenizer, prompt, history=[])
# print(response)
