from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline

# 项目根路径
cache_dir = "/Users/wupeng/pythonProjects/huggingface/models/bert-base-chinese/models--bert-base-chinese/snapshots/c30a6ed22ab4564dc1e3b2ecbf6e766b0611a33f"
# 加载模型
model = AutoModelForCausalLM.from_pretrained(cache_dir)
# 加载分词器
tokenizer = AutoTokenizer.from_pretrained(cache_dir)
# 定义文本生成管道
text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device="cpu")
# 生成文本
prompt = "你好，我是一名程序员，"
generated_text = text_generator(prompt,
                                max_length=50,
                                num_return_sequences=1,
                                truncation=True,
                                temperature=0.7,
                                top_p=0.8,
                                top_k=50,
                                clean_up_tokenization_spaces=True)
print(generated_text)
