#中文文言文生成
from transformers import BertTokenizer, GPT2LMHeadModel, TextGenerationPipeline

cache_dir = "../../my_model_cache/gpt2-chinese"
# tokenizer = BertTokenizer.from_pretrained("uer/gpt2-chinese-ancient", cache_dir=cache_dir)
# model = GPT2LMHeadModel.from_pretrained("uer/gpt2-chinese-ancient", cache_dir=cache_dir)
tokenizer = BertTokenizer.from_pretrained(
    cache_dir + r"/models--uer--gpt2-chinese-ancient/snapshots/3b264872995b09b5d9873e458f3d03a221c00669")
model = GPT2LMHeadModel.from_pretrained(
    cache_dir + r"/models--uer--gpt2-chinese-ancient/snapshots/3b264872995b09b5d9873e458f3d03a221c00669")
#device=0 指定当前的推理设备为第一块GPU;如果没有GPU环境，就去掉该参数
text_generator = TextGenerationPipeline(model, tokenizer, device=0)
out = text_generator("当是时", max_length=100, do_sample=True)
print(out)
