import gradio as gradio
import random
import torch
from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler, StableDiffusionImg2ImgPipeline
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline

sd_models = ["stabilityai/stable-diffusion-2-1-base", "runwayml/stable-diffusion-v1-5",
             "hakurei/waifu-diffusion"]

num_samples = 4
strength = 0.75  # 调整强度 - 取值范围 0～1, 代表文字提示对原图的修改的程度
guidance_scale = 7.5  # 权重
num_inference_steps = 50  # 推理步数
prompt1 = "a photo of an astronaut riding a horse on mars"
prompt2 = "masterpiece, best quality, 1girl, green hair, sweater, looking at viewer, upper body, beanie, outdoors, watercolor, night, turtleneck"
prompt3 = "masterpiece, best quality, 1girl,"


def trans_device(device):
    if device == "auto":
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    elif device == "gpu":
        device = "cuda"
    return device


def gen_text_image(model, device, prompt, negative_prompt, num_samples, seed, width, height, guidance_scale,
                   num_inference_steps):
    device = trans_device(device)

    if model == "stabilityai/stable-diffusion-2-1-base":
        scheduler = EulerDiscreteScheduler.from_pretrained(model, subfolder="scheduler")
        pipe = StableDiffusionPipeline.from_pretrained(model, scheduler=scheduler,
                                                       torch_dtype=torch.float16 if device == "cuda" else None,
                                                       safety_checker=None, requires_safety_checker=False)
    else:
        pipe = StableDiffusionPipeline.from_pretrained(model, torch_dtype=torch.float16 if device == "cuda" else None,
                                                       safety_checker=None,
                                                       requires_safety_checker=False)
    pipe = pipe.to(device)
    if seed is None or seed == '':
        images = pipe(prompt=[prompt] * int(num_samples), negative_prompt=[negative_prompt] * int(num_samples),
                      guidance_scale=float(guidance_scale),
                      num_inference_steps=int(num_inference_steps),
                      width=int(width),
                      height=int(height)).images
    else:
        images = pipe(prompt=[prompt] * int(num_samples), negative_prompt=[negative_prompt] * int(num_samples),
                      guidance_scale=float(guidance_scale),
                      num_inference_steps=int(num_inference_steps),
                      generator=torch.Generator(device).manual_seed(int(seed)),
                      width=int(width),
                      height=int(height)).images

    return images[0] if len(images) >= 1 else None, images[1] if len(images) >= 2 else None, images[
        2] if len(images) >= 3 else None, images[3] if len(images) >= 4 else None


def gen_image_image(model, device, prompt, negative_prompt, num_samples, image, strength, guidance_scale,
                    num_inference_steps):
    device = trans_device(device)

    if model == "stabilityai/stable-diffusion-2-base":
        scheduler = EulerDiscreteScheduler.from_pretrained(model, subfolder="scheduler")
        pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model, scheduler=scheduler,
                                                              torch_dtype=torch.float16 if device == "cuda" else None,
                                                              safety_checker=None, requires_safety_checker=False)
    else:
        pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model,
                                                              torch_dtype=torch.float16 if device == "cuda" else None,
                                                              safety_checker=None,
                                                              requires_safety_checker=False)
    pipe = pipe.to(device)
    images = pipe(prompt=[prompt] * int(num_samples), negative_prompt=[negative_prompt] * int(num_samples), image=image,
                  guidance_scale=float(guidance_scale), strength=float(strength),
                  num_inference_steps=int(num_inference_steps)).images

    return images[0] if len(images) >= 1 else None, images[1] if len(images) >= 2 else None, images[
        2] if len(images) >= 3 else None, images[3] if len(images) >= 4 else None


def gen_text(device, prompt):
    device = trans_device(device)
    tokenizer = AutoTokenizer.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion")
    model = AutoModelForCausalLM.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion")
    gpt2_pipe = pipeline("text-generation", model=model, tokenizer=tokenizer,
                         device=torch.device("cuda:0") if device == "cuda" else None)
    response = gpt2_pipe(prompt, max_length=(len(prompt) + random.randint(60, 90)),
                         num_return_sequences=1)
    return response[0]['generated_text'].replace("\n", "")


app = gradio.Blocks()
with app:
    with gradio.Row():
        with gradio.Column(scale=30):
            sd_model = gradio.Dropdown(label="模型", choices=sd_models, value=sd_models[0])
            sd_device = gradio.Dropdown(label="设备", choices=["auto", "gpu", "cpu"], value="auto")
            sd_prompt = gradio.TextArea(label="提示词", value=prompt3)
            sd_text_gen_submit = gradio.Button("扩展", variant="primary")
            sd_negative_prompt = gradio.TextArea(label="反向提示词")
            sd_num_samples = gradio.Number(label="生成数量", value=num_samples)
            sd_num_inference_steps = gradio.Number(label="推理步数", value=num_inference_steps)
            sd_guidance_scale = gradio.Number(label="权重", value=guidance_scale)

            with gradio.Tab("text2Image"):
                sd_seed = gradio.Text(
                    label="所需的种子。值必须在 [-0x8000_0000_0000_0000, 0xffff_ffff_ffff_ffff] 范围内。")
                sd_width = gradio.Number(label="width", value=512)
                sd_height = gradio.Number(label="height", value=512)
                sd_text_2_image_submit = gradio.Button("生成", variant="primary")
            with gradio.Tab("image2Image"):
                sd_image = gradio.Image(label="参考图片", type="pil")
                sd_strength = gradio.Number(label="调整强度 - 取值范围 0～1", value=strength)
                sd_image_2_image_submit = gradio.Button("生成", variant="primary")
        with gradio.Column(scale=35):
            sd_out_image1 = gradio.Image(label="Output Image")
            sd_out_image2 = gradio.Image(label="Output Image")
        with gradio.Column(scale=35):
            sd_out_image3 = gradio.Image(label="Output Image")
            sd_out_image4 = gradio.Image(label="Output Image")

        sd_text_gen_submit.click(gen_text, [sd_device, sd_prompt], [sd_prompt])
        sd_text_2_image_submit.click(gen_text_image,
                                     [sd_model, sd_device, sd_prompt, sd_negative_prompt, sd_num_samples, sd_seed,
                                      sd_width,
                                      sd_height, sd_guidance_scale,
                                      sd_num_inference_steps],
                                     [sd_out_image1, sd_out_image2, sd_out_image3, sd_out_image4])
        sd_image_2_image_submit.click(gen_image_image,
                                      [sd_model, sd_device, sd_prompt, sd_negative_prompt, sd_num_samples, sd_image,
                                       sd_strength,
                                       sd_guidance_scale,
                                       sd_num_inference_steps],
                                      [sd_out_image1, sd_out_image2, sd_out_image3, sd_out_image4])

    app.launch(share=True, server_port=5000)
