from transformers import AutoTokenizer, AutoModelForCausalLM import torch import gradio as gr tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-125m") model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-125m") def text_generation(input_text, seed): input_ids = tokenizer(input_text, return_tensors="pt").input_ids torch.manual_seed(seed) # Max value: 18446744073709551615 outputs = model.generate(input_ids, do_sample=True, min_length=50, max_length=200) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) return generated_text title = "Text Generator Demo GPT-Neo" description = "Text Generator Application" # gr.Interface( # text_generation, # [gr.inputs.Textbox(lines=2, label="Enter input text"), gr.inputs.Number(default=10, label="Enter seed number")], # [gr.outputs.Textbox(type="text", label="Text Generated")], # title=title, # description=description, # theme="huggingface" # ).launch() examples = [ ["Once upon a time", 123], ["In a galaxy far, far away", 42], # ["Lorem ipsum dolor sit amet", 999], ["The owners were also directed", 23], ] iface = gr.Interface( fn=text_generation, inputs=[ gr.inputs.Textbox(lines=2, label="Enter input text"), gr.inputs.Number(default=10, label="Enter seed number") ], outputs=gr.outputs.Textbox(type="text", label="Text Generated"), title=title, description=description, theme="huggingface", examples=examples ) iface.launch()