#中文古诗生成
from transformers import BertTokenizer, GPT2LMHeadModel, TextGenerationPipeline

cache_dir = "../../my_model_cache/gpt2-chinese"
# tokenizer = BertTokenizer.from_pretrained("uer/gpt2-chinese-poem", cache_dir=cache_dir)
# model = GPT2LMHeadModel.from_pretrained("uer/gpt2-chinese-poem", cache_dir=cache_dir)

tokenizer = BertTokenizer.from_pretrained(
    cache_dir + r"/models--uer--gpt2-chinese-poem/snapshots/6335c88ef6a3362dcdf2e988577b7bafeda6052b")
model = GPT2LMHeadModel.from_pretrained(
    cache_dir + r"/models--uer--gpt2-chinese-poem/snapshots/6335c88ef6a3362dcdf2e988577b7bafeda6052b")
#device=0 指定当前的推理设备为第一块GPU;如果没有GPU环境，就去掉该参数
text_generator = TextGenerationPipeline(model, tokenizer, device=0)
out = text_generator("[CLS]梅山如积翠，", max_length=50, do_sample=True)
print(out)
