# -*- coding : utf-8 -*-


import sys
import torch
import transformers


arg = sys.argv

tokenizer = transformers.AutoTokenizer.from_pretrained("/mydata/internal_server_model/bloom-560m-nlu-0511")
model = transformers.AutoModelForCausalLM.from_pretrained("/mydata/internal_server_model/bloom-560m-nlu-0511")

model = model.to("cuda:2")

def generate_prompt(instruction, input=None):
    if input:
        return f"""Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:"""
    else:
        return f"""Instruction:\n{instruction}\n\n### Response:"""

instruction = '下面是段销售员和用户的一次对话。销售人员正在向用户展示短期保险的优势，并希望用户能够尝试办理。需要对用户的话术进行信息提取，信息包括意图，态度，实体，任何一种信息没有就输出空。'
input_sentence = '销售员:这个您还记得嘛？\n用户:都是买过保险。\n'
prompt = generate_prompt(instruction, input_sentence)
# prompt = generate_prompt("{}".format(arg[1]))


inputs = tokenizer(prompt, return_tensors="pt")
input_ids = inputs["input_ids"].to("cuda:2")

with torch.no_grad():
    generation_output = model.generate(
        input_ids=input_ids,
        temperature=0.8,
        top_p=0.9,
        top_k=1,
        do_sample=True,
        num_beams=1,
        max_new_tokens=100,
        eos_token_id=tokenizer.eos_token_id,
        pad_token_id=tokenizer.pad_token_id,
        return_dict_in_generate=True,
        output_scores=True
    )

    s = generation_output.sequences[0]
    output = tokenizer.decode(s)

    res = output.split("### Response:")[1].strip()

    print(res)
