import gradio as gr
import numpy as np
import random
import torch
from diffusers import KolorsPipeline, EDMDPMSolverMultistepScheduler
import spaces
import os

os.environ['HF_HUB_DOWNLOAD_TIMEOUT'] = '120'

device = "cuda" if torch.cuda.is_available() else "cpu"
dtype = torch.float16
repo = "hf-models/Kolors-diffusers"

# 天垓 100 添加
# os.environ['PT_SDPA_ENABLE_IXDNN'] = '1'
# torch.backends.cuda.enable_mem_efficient_sdp(False)
# torch.backends.cuda.enable_flash_sdp(False)
# torch.backends.cuda.enable_math_sdp(True)


pipe = KolorsPipeline.from_pretrained(
    repo, torch_dtype=dtype, variant="fp16", device_map="balanced")
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1024

#pipe.scheduler = EDMDPMSolverMultistepScheduler.from_config(pipe.scheduler.config)

def infer(prompt, negative_prompt="残缺的文字, 残缺的手指，畸形的手指，畸形的手，残肢，低质量、色情", seed=1, randomize_seed=True, width=1024, height=1024, guidance_scale=7, num_inference_steps=32, progress=gr.Progress(track_tqdm=True)):
    resPrompt = prompt
    if randomize_seed:
        seed = random.randint(0, MAX_SEED)

    print(resPrompt)

    generator = torch.Generator().manual_seed(seed)

    image = pipe(
        prompt=resPrompt,
        negative_prompt=negative_prompt,
        guidance_scale=guidance_scale,
        num_inference_steps=num_inference_steps,
        width=width, # image_processor.py:111: RuntimeWarning: invalid value encountered in cast
        height=width,
        generator=generator
    ).images[0]

    torch.cuda.empty_cache()

    return image, seed


examples = [
    '一张瓢虫的照片，微距，变焦，高质量，电影，瓢虫拿着一个木牌，上面写着“我爱 Gitee” 的文字',
    "美丽的白色樱花盛开，在晴朗的天空背景下，树枝上精致而新鲜的花朵，柔和对焦的摄影，日式风格，微距镜头，自然光，春天的气息，色调清新",
    "云销雨霁，彩彻区明，落霞与孤鹜齐飞，秋水共长天一色",
]
css = """
#col-container {
    margin: 0 auto;
    max-width: 580px;
}
"""
with gr.Blocks(css=css) as demo:

    with gr.Column(elem_id="col-container"):
        gr.Markdown(f"""
        # Gitee AI Kolors-diffusers
        """)

        with gr.Row():

            prompt = gr.Text(
                label="Prompt",
                show_label=False,
                lines=3,
                placeholder="输入你的提示词",
                container=False,
            )

            run_button = gr.Button("运行", scale=0)

        result = gr.Image(label="Result", show_label=False)
        with gr.Accordion("环境配置", open=False):

            negative_prompt = gr.Text(
                label="否定提示词",
                value="低质量，模糊，残缺的文字, 残缺的手指，畸形的手指，畸形的手，残肢，色情",
                max_lines=1,
                placeholder="输入否定提示词",
            )

            seed = gr.Slider(
                label="种子",
                minimum=0,
                maximum=MAX_SEED,
                step=1,
                value=0,
            )

            randomize_seed = gr.Checkbox(label="使用随机种子", value=True)

            with gr.Row():

                width = gr.Slider(
                    label="宽度",
                    minimum=1024,
                    maximum=MAX_IMAGE_SIZE,
                    step=64,
                    value=1024,
                )

                height = gr.Slider(
                    label="高度",
                    minimum=1024,
                    maximum=MAX_IMAGE_SIZE,
                    step=64,
                    value=1024,
                )

            with gr.Row():

                guidance_scale = gr.Slider(
                    label="引导比例",
                    minimum=0.0,
                    maximum=10.0,
                    step=0.1,
                    value=7.5,
                )

                num_inference_steps = gr.Slider(
                    label="推理步骤数",
                    minimum=1,
                    maximum=50,
                    step=1,
                    value=25,
                )

        gr.Examples(
            label="例子",
            examples=examples,
            cache_examples=True,
            outputs=[result, seed],
            fn=infer,
            inputs=[prompt],
        )
    gr.on(
        triggers=[run_button.click, prompt.submit, negative_prompt.submit],
        fn=infer,
        inputs=[prompt, negative_prompt, seed, randomize_seed,
                width, height, guidance_scale, num_inference_steps],
        outputs=[result, seed]
    )

#demo.queue(default_concurrency_limit=20,  max_size=40)
demo.launch(show_api=True)
