import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

model_path="/home/ubuntu/model_test/THUDM/codegen"
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)

model = AutoModelForCausalLM.from_pretrained(model_path)


def format_prompt(prefix, suffix):
  return prefix + "<mask_1>" + suffix + "<|endoftext|>" + "<sep>" + "<mask_1>"


prefix = "def hello_world():\n    "
suffix = "    return name"
text = format_prompt(prefix, suffix)

# tokenizer = AutoTokenizer.from_pretrained("Salesforce/codegen25-7b-mono", trust_remote_code=True)
# model = AutoModelForCausalLM.from_pretrained("Salesforce/codegen25-7b-mono")
# prompt = "// language: Java\n // Write a class with fields for name, ID, age, address, and company\n"
# prompt = "// 写个代码把后缀是py的 复制到一个文件夹"
# prompt = "// language: C++\n // write a bubble sort function\n"
# prompt ="public SseEmitter  streamBuddyAskGet( ChatGPTReq chatGPTReq) throws IOException, TyrfingServiceException {"
# zer("# this function prints hello world"
prompt="""
from top.starp.util import file_util

file_util.re"""
inputs = tokenizer(prompt, return_tensors="pt")
# inputs = tokenizer("// ", return_tensors="pt")
# sample = model.generate(**inputs,min_length=128*4, max_length=128)
# sample = model.generate(**inputs,min_length=128*4,)
# sample = model.generate(**inputs,min_length=128*4, max_length=128*8)
# sample = model.generate(**inputs,min_length=128, max_length=128*8)
# sample = model.generate(**inputs,min_length=128, max_length=128*4)
sample = model.generate(**inputs, max_length=128)

# min_length
# sample = model.generate(**inputs, max_length=128)
print(tokenizer.decode(sample[0]))

