from transformers import AutoTokenizer, AutoModelForCausalLM import torch import gradio as gr tokenizer = AutoTokenizer.from_pretrained("gpt2-large") model = AutoModelForCausalLM.from_pretrained("Umarpreet/scaryGPT2-large") def text_generation(input_text, seed,max_tokens): input_ids = tokenizer(input_text, return_tensors="pt").input_ids torch.manual_seed(seed) # Max value: 18446744073709551615 outputs = model.generate(input_ids, do_sample=True, min_length=30, max_length=max_tokens) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) return generated_text title = "Scary Story Generation" description = "Scary Story Generation using GPT2 by umarpreet" gr.Interface( text_generation, [gr.inputs.Textbox(lines=2, label="Enter starting text of Story"), gr.inputs.Number(default=10, label="Enter seed number"), gr.inputs.Number(default=50, label="Enter max tokens")], [gr.outputs.Textbox(type="text", label="Story Generated")], title=title, description=description, theme="huggingface" ).launch()