EinfachOlder's picture
Duplicate from Celestinian/Prompt-Generator
b558ff5
from transformers import AutoTokenizer, AutoModelForCausalLM, GPT2LMHeadModel
import gradio as gr
import torch
import git
device = "cuda" if torch.cuda.is_available() else "cpu"
tokenizer = AutoTokenizer.from_pretrained("Celestinian/PromptGPT")
model = AutoModelForCausalLM.from_pretrained("Celestinian/PromptGPT")
def generate_text(prompt, max_length, do_sample, temperature, no_repeat_ngram_size, top_k, top_p):
formatted_prompt = "\n" + prompt
if not ',' in prompt:
formatted_prompt += ','
prompt = tokenizer(formatted_prompt, return_tensors='pt')
prompt = {key: value.to(device) for key, value in prompt.items()}
out = model.generate(**prompt, max_length=max_length, do_sample=do_sample, temperature=temperature,
no_repeat_ngram_size=no_repeat_ngram_size, top_k=top_k, top_p=top_p)
output = tokenizer.decode(out[0])
clean_output = output.replace('\n', '\n')
print(clean_output)
return clean_output
input_text = gr.inputs.Textbox(lines=5, label="Input Text")
max_length = gr.inputs.Slider(minimum=10, maximum=100, default=30, label="Max Length")
do_sample = gr.inputs.Checkbox(default=True, label="Do Sample")
temperature = gr.inputs.Slider(minimum=0.1, maximum=1.0, step=0.1, default=0.4, label="Temperature")
no_repeat_ngram_size = gr.inputs.Slider(minimum=1, maximum=10, default=1, label="No Repeat N-Gram Size")
top_k = gr.inputs.Slider(minimum=1, maximum=100, default=50, label="Top K")
top_p = gr.inputs.Slider(minimum=0.1, maximum=1.0, step=0.1, default=0.2, label="Top P")
output_text = gr.outputs.Textbox(label="Generated Text")
gr.Interface(generate_text, inputs=[input_text, max_length, do_sample, temperature, no_repeat_ngram_size, top_k, top_p],
outputs=output_text).launch()