from transformers import BloomTokenizerFast, BloomForCausalLM

model_name_or_path = "bloom-820m-chat"

tokenizer = BloomTokenizerFast.from_pretrained(model_name_or_path)
model = BloomForCausalLM.from_pretrained(model_name_or_path).cpu()
model = model.eval()

input_pattern = "{}</s>"
text = "python 数组切片和遍历的代码示例"
input_ids = tokenizer(input_pattern.format(text), return_tensors="pt").input_ids
input_ids = input_ids.cpu()

outputs = model.generate(input_ids, do_sample=True, max_new_tokens=1024, top_p=0.85,
                         temperature=0.3, repetition_penalty=1.2, eos_token_id=tokenizer.eos_token_id)

output = tokenizer.decode(outputs[0])
response = output.replace(text, "").replace('</s>', "")
print(response)