|
import httpcore |
|
setattr(httpcore, 'SyncHTTPTransport', 'AsyncHTTPProxy') |
|
|
|
|
|
|
|
import gradio as gr |
|
from googletrans import Translator |
|
from huggingface_hub import InferenceClient |
|
from PIL import Image |
|
import time |
|
|
|
|
|
|
|
|
|
models =[ |
|
"Yntec/NostalgicLife", |
|
"Yntec/Genuine", |
|
"Yntec/Abased", |
|
"Yntec/CuteFurry", |
|
"Yntec/incha_re_zoro", |
|
"Yntec/InsaneM3U", |
|
"digiplay/2K-VAE", |
|
"digiplay/ya3_VAE", |
|
"digiplay/ya3p_VAE", |
|
"digiplay/pan04", |
|
"digiplay/AM-mix1", |
|
"digiplay/MRMD_0505", |
|
] |
|
|
|
|
|
|
|
|
|
translator = Translator() |
|
|
|
|
|
|
|
|
|
def translate_to_english(prompt): |
|
try: |
|
translated_prompt = translator.translate(prompt, src='auto', dest='en').text |
|
return translated_prompt |
|
except Exception as e: |
|
return str(e) |
|
|
|
|
|
|
|
|
|
def respond_with_timestamp(prompt, model_name): |
|
client = InferenceClient(model=model_name) |
|
|
|
|
|
translated_prompt = translate_to_english(prompt) |
|
|
|
|
|
prompt_with_timestamps = [f"{translated_prompt} {time.time() + i}" for i in range(3)] |
|
|
|
|
|
|
|
images = [client.text_to_image(text) for text in prompt_with_timestamps] |
|
|
|
|
|
|
|
|
|
return images |
|
|
|
|
|
|
|
demo = gr.Interface( |
|
fn=respond_with_timestamp, |
|
inputs=[ |
|
gr.Textbox(label="請輸入提示語 Please input a prompt"), |
|
gr.Dropdown(label="選擇模型 Choose a model", choices=models) |
|
], |
|
outputs=[gr.Image(type="pil", label=f"img-{i+1}", show_share_button = False) for i in range(3)], |
|
title="Text-to-Image with Google Translation" |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|