#!/usr/bin/env python from __future__ import annotations import os import random import gradio as gr import numpy as np import PIL.Image import requests import spaces import torch from diffusers import AutoencoderKL, DiffusionPipeline DESCRIPTION = "# AI 作画" if not torch.cuda.is_available(): DESCRIPTION += "\n

Running on CPU 🥶 This demo does not work on CPU.

" MAX_SEED = np.iinfo(np.int32).max CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1" MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1024")) USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1" ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1" ENABLE_REFINER = os.getenv("ENABLE_REFINER", "1") == "1" device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") if torch.cuda.is_available(): vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) pipe = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", vae=vae, torch_dtype=torch.float16, use_safetensors=True, variant="fp16", ) if ENABLE_REFINER: refiner = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-refiner-1.0", vae=vae, torch_dtype=torch.float16, use_safetensors=True, variant="fp16", ) if ENABLE_CPU_OFFLOAD: pipe.enable_model_cpu_offload() if ENABLE_REFINER: refiner.enable_model_cpu_offload() else: pipe.to(device) if ENABLE_REFINER: refiner.to(device) if USE_TORCH_COMPILE: pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) if ENABLE_REFINER: refiner.unet = torch.compile(refiner.unet, mode="reduce-overhead", fullgraph=True) def randomize_seed_fn(seed: int, randomize_seed: bool) -> int: if randomize_seed: seed = random.randint(0, MAX_SEED) return seed def translateEN(zh): if zh: result = requests.post( "https://api-free.deepl.com/v2/translate", params={ "auth_key": "e8b4d428-ada5-3f8d-f965-bad01e8a06c1:fx", "target_lang": "EN-US", "text": zh}) return result.json()["translations"][0]["text"] def process_text(prompt): if prompt: print("中文提示词: \n", prompt) prompt_trans = translateEN(prompt) print("prompt: \n", prompt_trans) return prompt_trans @spaces.GPU def generate( prompt: str, # size_option: str = "竖版", negative_prompt: str = "", prompt_2: str = "", negative_prompt_2: str = "", use_negative_prompt: bool = False, use_prompt_2: bool = False, use_negative_prompt_2: bool = False, seed: int = 0, width: int = 736, height: int = 1024, guidance_scale_base: float = 5.0, guidance_scale_refiner: float = 5.0, num_inference_steps_base: int = 25, num_inference_steps_refiner: int = 25, apply_refiner: bool = False, ) -> PIL.Image.Image: generator = torch.Generator().manual_seed(seed) if not use_negative_prompt: negative_prompt = None # type: ignore if not use_prompt_2: prompt_2 = None # type: ignore if not use_negative_prompt_2: negative_prompt_2 = None # type: ignore # if size_option == "横版": # width, height = 1024, 736 # elif size_option == "竖版": # width, height = 736, 1024 # elif size_option == "方形": # width, height = 736, 736 # else: # width, height = 736, 1024 # 可以定义一个默认值 # process_text("里面做一个测试") # print("prompt是:", prompt) # print("negative_prompt是:", negative_prompt) # print("prompt_2是:", prompt_2) # print("negative_prompt_2是:", negative_prompt_2) if not apply_refiner: return pipe( prompt=process_text(prompt), negative_prompt=process_text(negative_prompt), prompt_2=process_text(prompt_2), negative_prompt_2=process_text(negative_prompt_2), width=width, height=height, guidance_scale=guidance_scale_base, num_inference_steps=num_inference_steps_base, generator=generator, output_type="pil", ).images[0] else: latents = pipe( prompt=process_text(prompt), negative_prompt=process_text(negative_prompt), prompt_2=process_text(prompt_2), negative_prompt_2=process_text(negative_prompt_2), width=width, height=height, guidance_scale=guidance_scale_base, num_inference_steps=num_inference_steps_base, generator=generator, output_type="latent", ).images image = refiner( prompt=process_text(prompt), negative_prompt=process_text(negative_prompt), prompt_2=process_text(prompt_2), negative_prompt_2=process_text(negative_prompt_2), guidance_scale=guidance_scale_refiner, num_inference_steps=num_inference_steps_refiner, image=latents, generator=generator, ).images[0] return image examples = [ "宇航员在丛林中,冷色调,柔和的色彩,细节,8k", "一只熊猫戴着草帽,在湖面上划船,电影风格,4K", ] with gr.Blocks(css="style.css") as demo: gr.Markdown(DESCRIPTION) gr.DuplicateButton( value="Duplicate Space for private use", elem_id="duplicate-button", visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1", ) with gr.Group(): with gr.Row(): prompt = gr.Text( label="提示词", show_label=False, max_lines=1, placeholder="输入要生成的画面内容", container=False, ) run_button = gr.Button("生成", scale=0) result = gr.Image(label="生成结果", show_label=False) # # 使用 Radio 组件替代两个 Slider 组件 # size_option = gr.Radio(choices=["横版", "竖版", "方形"], label="选择尺寸", value="竖版") with gr.Accordion("高级选项", open=False): with gr.Row(): use_negative_prompt = gr.Checkbox(label="使用反向提示词", value=False) use_prompt_2 = gr.Checkbox(label="使用提示词 2", value=False) use_negative_prompt_2 = gr.Checkbox(label="使用反向提示词 2", value=False) negative_prompt = gr.Text( label="反向提示词", max_lines=1, placeholder="输入不想在画面中出现的内容,比如:“胡子”,“人群”", visible=False, ) prompt_2 = gr.Text( label="提示词 2", max_lines=1, placeholder="输入你的提示词", visible=False, ) negative_prompt_2 = gr.Text( label="反向提示词 2", max_lines=1, placeholder="输入你的反向提示词", visible=False, ) seed = gr.Slider( label="种子数", minimum=0, maximum=MAX_SEED, step=1, value=0, ) randomize_seed = gr.Checkbox(label="随机种子数", value=True) with gr.Row(): width = gr.Slider( label="宽度", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=736, ) height = gr.Slider( label="高度", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024, ) apply_refiner = gr.Checkbox(label="增加精炼模型(refiner)", value=False, visible=ENABLE_REFINER) with gr.Row(): guidance_scale_base = gr.Slider( label="提示词相关性", minimum=1, maximum=20, step=0.1, value=7.5, ) num_inference_steps_base = gr.Slider( label="模型迭代步数", minimum=10, maximum=100, step=1, value=25, ) with gr.Row(visible=False) as refiner_params: guidance_scale_refiner = gr.Slider( label="提示词相关性(refiner)", minimum=1, maximum=20, step=0.1, value=7.5, ) num_inference_steps_refiner = gr.Slider( label="模型迭代步数(refiner)", minimum=10, maximum=100, step=1, value=25, ) gr.Examples( label="例子", examples=examples, inputs=prompt, outputs=result, fn=generate, cache_examples=CACHE_EXAMPLES, ) use_negative_prompt.change( fn=lambda x: gr.update(visible=x), inputs=use_negative_prompt, outputs=negative_prompt, queue=False, api_name=False, ) use_prompt_2.change( fn=lambda x: gr.update(visible=x), inputs=use_prompt_2, outputs=prompt_2, queue=False, api_name=False, ) use_negative_prompt_2.change( fn=lambda x: gr.update(visible=x), inputs=use_negative_prompt_2, outputs=negative_prompt_2, queue=False, api_name=False, ) apply_refiner.change( fn=lambda x: gr.update(visible=x), inputs=apply_refiner, outputs=refiner_params, queue=False, api_name=False, ) gr.on( triggers=[ prompt.submit, negative_prompt.submit, prompt_2.submit, negative_prompt_2.submit, run_button.click, ], fn=randomize_seed_fn, inputs=[seed, randomize_seed], outputs=seed, queue=False, api_name=False, ).then( fn=generate, inputs=[ prompt, # size_option, negative_prompt, prompt_2, negative_prompt_2, use_negative_prompt, use_prompt_2, use_negative_prompt_2, seed, width, height, guidance_scale_base, guidance_scale_refiner, num_inference_steps_base, num_inference_steps_refiner, apply_refiner, ], outputs=result, api_name="run", ) if __name__ == "__main__": demo.queue(max_size=30).launch(max_threads=2)