|
import torch |
|
import gradio as gr |
|
import json |
|
from transformers import GPT2Tokenizer, GPT2LMHeadModel, pipeline |
|
|
|
tokenizer = GPT2Tokenizer.from_pretrained('distilgpt2') |
|
tokenizer.add_special_tokens({'pad_token': '[PAD]'}) |
|
model = GPT2LMHeadModel.from_pretrained('FredZhang7/anime-anything-promptgen-v2') |
|
|
|
|
|
|
|
|
|
nlp = pipeline('text-generation', model=model, tokenizer=tokenizer) |
|
|
|
def generate(prompt): |
|
|
|
outs = nlp(prompt, max_length=76, num_return_sequences=3, do_sample=True, repetition_penalty=1.2, temperature=0.7, top_k=3, early_stopping=True) |
|
jsonStr = json.dumps(outs) |
|
print(prompt) |
|
print(jsonStr) |
|
return jsonStr |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
input_component = gr.Textbox(label = "Input a prompt", value = "1girl, genshin") |
|
output_component = gr.Textbox(label = "detail Prompt") |
|
examples = [] |
|
description = "" |
|
gr.Interface(generate, inputs = input_component, outputs=output_component, examples=examples, title = "anything prompt", description=description).launch() |
|
|
|
|