# Copyright (c) 2024-present AI-Labs

from fastapi import APIRouter, Request, HTTPException, Response, Body, Form, status
from fastapi.responses import FileResponse, JSONResponse

from diffusers import AutoPipelineForText2Image, StableDiffusionXLPipeline, DPMSolverSinglestepScheduler, StableDiffusionXLImg2ImgPipeline, StableDiffusionXLInpaintPipeline
from diffusers.utils import load_image
from .stable_diffusion import *

from datetime import datetime

import torch
import ipex_llm
import os
import numpy as np
import random
import uuid
import json


from configs import config

# 定义路由信息
router = APIRouter(
    prefix='/image/stable_diffusion_xl',
    tags = ['图片生成']
)

device = "cpu"
dtype = torch.float32

MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1024

# GiteeAI 平台部署加速
# text2imgPipe = StableDiffusionXLPipeline.from_pretrained("hf-models/sdxl-flash", torch_dtype=dtype).to(device)

# 下载到本地
# text2imgPipe = StableDiffusionXLPipeline.from_pretrained("models/sd-community/sdxl-flash", torch_dtype=dtype).to(device)

# 使用配置文件
# text2imgPipe = StableDiffusionXLPipeline.from_pretrained(config.service.stable_diffusion_xl.model_path, torch_dtype=dtype, use_safetensors=True).to(device)
# text2imgPipe.scheduler = DPMSolverSinglestepScheduler.from_config(text2imgPipe.scheduler.config, timestep_spacing="trailing")

"""
生成图像数据的响应对象，支持返回Base64编码字符串、URL地址、二进制文件内容
"""
def response_output(images, body):
    if body.output_format == "url":
        # 如果前端指定url，就按URL地址返回响应
        localdir = f"image/{datetime.now().strftime('%Y-%m-%d')}"
        os.makedirs(f"{config.setting.statics.path}/{localdir}", exist_ok=True)

        urls = []
        for image in images:
            localfile = f"{localdir}/{uuid.uuid4()}.png"
            image.save(f"{config.setting.statics.path}/{localfile}")
            urls.append({"url": f"{config.setting.statics.urls}/{localfile}"})

        return ImageResponse(status="success", format=body.output_format, data=urls, meta={})
    elif body.output_format == "base64":
        # 如果前端指定base64，就按Base64编码字符串返回响应
        base64s = []
        for image in images:
            base64s.append({"b64_json": f"data:image;base64,{image_to_base64(image)}"})

        return ImageResponse(status="success", format=body.output_format, data=base64s, meta={})
    elif body.output_format == "file":
        # 如果前端指定文件，就按文件二进制数据返回响应
        localdir = f"image/{datetime.now().strftime('%Y-%m-%d')}"
        os.makedirs(f"{config.setting.statics.path}/{localdir}", exist_ok=True)

        localfiles = []
        for image in images:
            localfile = f"{localdir}/{uuid.uuid4()}.png"
            image.save(f"{config.setting.statics.path}/{localfile}")
            localfiles.append(localfile)

        return FileResponse(f"{config.setting.statics.path}/{localfiles[0]}", media_type="image/jpeg")

    return images


if config.service.stable_diffusion_xl.enable_text2img:
    # 如果启用文生图功能，就加载文生图Pipeline
    text2imgPipe = AutoPipelineForText2Image.from_pretrained(config.service.stable_diffusion_xl.model_path,  torch_dtype=dtype, use_safetensors=True).to("cpu")

    """
    文生图的具体实现过程
    """
    def text2img_generate(body: Text2ImageBody):
        real_prompt = generate_prompt(body.prompt)
        print(real_prompt)

        # if body.randomize_seed:
        #     body.seed = random.randint(0, MAX_SEED)
        # generator = torch.Generator().manual_seed(body.seed)
        images = text2imgPipe(
            prompt=real_prompt,
            # negative_prompt=body.negative_prompt,
            # guidance_scale=body.guidance_scale,
            num_inference_steps=body.num_inference_steps,
            # width=body.width,
            # height=body.width,
            # num_images_per_prompt=body.samples,
            # generator=generator,
            output_type="pil",
        ).images

        return images

    """
    对外暴露的文生图接口
    """
    @router.post("/text2img")
    def text2img(body: Text2ImageBody):
        images = text2img_generate(body)
        return response_output(images, body)

    """
    对外暴露的文生图接口
    """
    @router.post("/images/generations")
    def images_generations(body: Text2ImageBody):
        images = text2img_generate(body)
        return response_output(images, body)


if config.service.stable_diffusion_xl.enable_img2img:
    # 如果启用图生图功能，就加载图生图Pipeline
    img2imgPipe = StableDiffusionXLImg2ImgPipeline.from_pretrained(config.service.stable_diffusion_xl.model_path, torch_dtype=dtype).to(device)

    """
    图生图的具体实现过程
    """
    def img2img_generate(body: Img2ImgBody):
        real_prompt = generate_prompt(body.prompt)

        if body.randomize_seed:
            body.seed = random.randint(0, MAX_SEED)
        generator = torch.Generator().manual_seed(body.seed)
        images = img2imgPipe(
            prompt=real_prompt,
            image=(base64_to_image(body.init_image.split(',')[1], "init_image") if body.init_image.startswith("data:") else load_image(body.init_image)).convert("RGB"),
            negative_prompt=body.negative_prompt,
            guidance_scale=body.guidance_scale,
            num_inference_steps=body.num_inference_steps,
            width=body.width,
            height=body.width,
            num_images_per_prompt=body.samples,
            generator=generator,
            output_type="pil"
        ).images

        return images

    """
    对外暴露的图生图接口
    """
    @router.post("/img2img")
    def img2img(body: Img2ImgBody):
        images = img2img_generate(body)
        return response_output(images, body)

    """
    对外暴露的图生图接口
    """
    @router.post("/images/edits")
    def images_edits(body: Img2ImgBody):
        images = img2img_generate(body)
        return response_output(images, body)


if config.service.stable_diffusion_xl.enable_inpainting:
    # 如果启用局部重绘功能，就加载局部重绘Pipeline
    inpaintingPipe = StableDiffusionXLInpaintPipeline.from_pretrained(config.service.stable_diffusion_xl.inpainting_model_path, torch_dtype=dtype, variant="fp16").to(device)

    """
    局部重绘的具体实现过程
    """
    def inpainting_generate(body: InpaintingBody):
        real_prompt = generate_prompt(body.prompt)

        if body.randomize_seed:
            body.seed = random.randint(0, MAX_SEED)
        generator = torch.Generator().manual_seed(body.seed)
        images = inpaintingPipe(
            prompt=real_prompt,
            image=(base64_to_image(body.init_image.split(',')[1], "init_image") if body.init_image.startswith("data:") else load_image(body.init_image)).convert("RGB"),
            mask_image=(base64_to_image(body.mask_image.split(',')[1], "mask_image") if body.mask_image.startswith("data:") else load_image(body.mask_image)).convert("RGB"),
            negative_prompt=body.negative_prompt,
            guidance_scale=body.guidance_scale,
            num_inference_steps=body.num_inference_steps,
            width=body.width,
            height=body.width,
            num_images_per_prompt=body.samples,
            generator=generator,
            output_type="pil"
        ).images

        return images

    """
    对外暴露的局部重绘接口
    """
    @router.post("/inpainting")
    def inpainting(body: InpaintingBody):
        images = inpainting_generate(body)
        return response_output(images, body)

    """
    对外暴露的局部重绘接口
    """
    @router.post("/images/inpainting")
    def images_inpainting(body: InpaintingBody):
        images = inpainting_generate(body)
        return response_output(images, body)
