QinghaoGuan's picture
initial commit
6805fa0 verified
from transformers import pipeline, set_seed, GenerationConfig, AutoModelForCausalLM
import gradio as gr
import torch
gpt2_generator = pipeline('text-generation', model='gpt2')
tinyllama_generator = pipeline('text-generation', model='as-cle-bert/tinyllama-essay-scorer')
def load_model(model_name):
global generator
generator = pipeline('text-generation', model=model_name, trust_remote_code=True)
def generate_text(model, prompt, temperature, max_length, top_p):
if temperature == 0:
do_sample = False
else:
do_sample = True
load_model(model)
response = generator(prompt, max_length=max_length, do_sample=do_sample, temperature=temperature, top_p=top_p)[0]["generated_text"]
return response
interface = gr.Interface(
fn=generate_text,
inputs=[
gr.components.Dropdown(label="Choose a Model", choices=['gpt2', 'as-cle-bert/tinyllama-essay-scorer'], value='gpt2', info="Select the model for generating text."),
gr.components.Dropdown(label="Prompt", choices=['Write a tagline for an ice cream shop', 'Write a poem about spring', 'Write an introduction to the University of Zurich'], value='Write a tagline for an ice cream shop'),
gr.components.Slider(minimum=0, maximum=2, step=0.01, value = 1, label="Temperature", info = "(For τ = 1, the distribution is unchanged;For τ > 1, the distribution becomes more uniform; For τ < 1, the distribution becomes more peaked.)"),
gr.components.Slider(minimum=1, maximum=256, step=1, value = 16, label="Max Length", info ="(Maximum length is the maximum limit of the generated text.)"),
gr.components.Slider(minimum=0, maximum=1, step=0.01, value=1, label="Top-p", info="(Top-p sampling is to keep the top p percent of the probability mass.)")
],
outputs=[gr.Textbox(label="Output", lines=3, placeholder = "Hello, World!")],
title="Text Generation Control Panel",
description="Adjust the settings to control the text generation parameters."
)
interface.launch(share=True)