--- license: mit --- ## Usage ### Initalization ``` !pip install transformers[torch] from transformers import GPT2Tokenizer, GPT2LMHeadModel import torch device = torch.device("cuda" if torch.cuda.is_available() else "cpu") tokenizer = GPT2Tokenizer.from_pretrained('Hollway/gpt2_finetune') model = GPT2LMHeadModel.from_pretrained('Hollway/gpt2_finetune').to(device) ``` ### Inference ``` def generate(text): inputs = tokenizer(text, return_tensors="pt").to(model.device) with torch.no_grad(): tokens = model.generate( **inputs, max_new_tokens=256, do_sample=True, temperature=0.7, top_p=0.9, repetition_penalty=1.05, pad_token_id=tokenizer.pad_token_id, ) return tokenizer.decode(tokens[0], skip_special_tokens=True) generate("只因你") ```