import gradio as gr | |
from glyffuser_utils import GlyffuserPipeline | |
pipeline = GlyffuserPipeline.from_pretrained("yuewu/glyffuser") | |
def infer(text): | |
generated_images = pipeline( | |
texts, | |
batch_size=1, # Generate one image at a time for each step | |
# generator=torch.Generator(device='cuda').manual_seed(config.seed), # Generator can be on GPU here | |
num_inference_steps=50 | |
).images | |
return generated_images[0] | |
demo = gr.Interface(fn=infer, inputs="text", outputs="image") | |
demo.launch() |