# pip install transformers gradio scipy ftfy "ipywidgets>=7,<8" datasets diffusers import gradio as gr import torch from torch import autocast from diffusers import StableDiffusionPipeline model_id = "hakurei/waifu-diffusion" device = "cpu" # pipe = StableDiffusionPipeline.from_pretrained(model_id, # resume_download=True, # 模型文件断点续传 # torch_dtype=torch.float16, # revision='fp16') # pipe = pipe.to(device) block = gr.Blocks(css=".container { max-width: 800px; margin: auto; }") num_samples = 2 def infer(prompt): print(prompt) return prompt # with autocast("cuda"): # images = pipe([prompt] * num_samples, # hight=111, # width=100, # # guidance_scale=7.5)["sample"] # return images with block as demo: gr.Markdown("

Waifu Diffusion

") gr.Markdown( "waifu-diffusion is a latent text-to-image diffusion model that has been conditioned on high-quality anime images through fine-tuning." ) with gr.Group(): with gr.Box(): with gr.Column().style(mobile_collapse=False, equal_height=True): text = gr.Textbox( label="Enter your prompt", show_label=False, max_lines=1 ).style( border=(True, False, True, True), rounded=(True, False, False, True), container=False, ) slider = gr.Slider(0, 1000, 10) btn = gr.Button("Run").style( margin=False, rounded=(False, True, True, False), ) gallery = gr.Gallery( label="Generated images", show_label=False ).style( grid=[2], height="auto" ) gr.Interface text.submit(infer, inputs=[text,slider] , outputs=gallery) btn.click(infer, inputs=[text], outputs=gallery) gr.Markdown( """___

Created by https://huggingface.co/hakurei

""" ) demo.launch(debug=True)