2024ss / app.py
ksyint's picture
Update app.py
246e3d7 verified
import torch
from diffusers import LCMScheduler, AutoPipelineForText2Image,DDPMScheduler
from PIL import Image
import numpy as np
import gradio as gr
import os
from transformers import pipeline
def translate(text):
model_checkpoint = "Helsinki-NLP/opus-mt-ko-en"
translator = pipeline("translation", model=model_checkpoint)
translated=translator(text)
translated=translated[0]["translation_text"]
return translated
model_id = "stabilityai/stable-diffusion-xl-base-1.0"
adapter_id = "ksyint/teu_lora"
pipe = AutoPipelineForText2Image.from_pretrained(model_id, torch_dtype=torch.float16, variant="fp16", low_cpu_mem_usage=False)
pipe.scheduler = DDPMScheduler.from_config(pipe.scheduler.config)
pipe.to("cuda")
pipe.load_lora_weights(adapter_id)
pipe.fuse_lora()
def main(English,Korean,Negative_English,Negative_Korean):
english=English
korean=Korean
prompt2="2024SS "
if korean=="" and english is not None:
prompt2+=english
negative=Negative_English
elif english=="" and korean is not None:
prompt2+=translate(korean)
negative=translate(Negative_Korean)
elif english=="" and korean=="":
raise Exception("only one language or at least one language")
else:
raise Exception("only one language or at least one language")
steps=60
image = pipe(prompt=prompt2, negative_prompt=f"worst quality,multiple people, {negative}",num_inference_steps=steps, guidance_scale=5.0,strength=5.0).images[0]
return image
iface = gr.Interface(fn=main, inputs=["text","text","text","text"], outputs="image", title="Generate 2024SS style from your favorites",
description="Input one Language, English or Korean. Do not input 2024SS")
iface.launch()