from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline

model_dir = r"D:\pythonWork\PythonProject3\model\uer\gpt2-chinese-cluecorpussmall\models--uer--gpt2-chinese-cluecorpussmall\snapshots\c2c0249d8a2731f269414cc3b22dff021f8e07a3"
AutoModelForCausalLM.from_pretrained(model_dir)
tokenizer = AutoTokenizer.from_pretrained(model_dir)
generate = pipeline("text-generation", model=model_dir, tokenizer=tokenizer,device="cpu")
output= generate("你好，我是语言模型", max_length=100, num_return_sequences=1,truncation = True)
print(output)