king007's picture
Update app.py
b459cca
import torch
import gradio as gr
import json
from transformers import GPT2Tokenizer, GPT2LMHeadModel, pipeline
tokenizer = GPT2Tokenizer.from_pretrained('distilgpt2')
tokenizer.add_special_tokens({'pad_token': '[PAD]'})
model = GPT2LMHeadModel.from_pretrained('FredZhang7/anime-anything-promptgen-v2')
# prompt = r'1girl, genshin'
# generate text using fine-tuned model
nlp = pipeline('text-generation', model=model, tokenizer=tokenizer)
def generate(prompt):
# generate 10 samples using contrastive search
outs = nlp(prompt, max_length=76, num_return_sequences=3, do_sample=True, repetition_penalty=1.2, temperature=0.7, top_k=3, early_stopping=True)
jsonStr = json.dumps(outs)
print(prompt)
print(jsonStr)
return jsonStr
# for i in range(len(outs)):
# remove trailing commas and double spaces
# outs[i] = str(outs[i]['generated_text']).replace(' ', '').rstrip(',')
# print('\033[92m' + '\n\n'.join(outs) + '\033[0m\n')
# print(str(outs[i]['generated_text']))
input_component = gr.Textbox(label = "Input a prompt", value = "1girl, genshin")
output_component = gr.Textbox(label = "detail Prompt")
examples = []
description = ""
gr.Interface(generate, inputs = input_component, outputs=output_component, examples=examples, title = "anything prompt", description=description).launch()