import gradio as gr
import numpy as np
import random
import torch
from diffusers import KolorsPipeline,LMSDiscreteScheduler

import os
from fastapi import FastAPI
from pydantic import BaseModel
from io import BytesIO
from fastapi.responses import StreamingResponse
import uvicorn
import asyncio
import concurrent.futures

os.environ['HF_HUB_DOWNLOAD_TIMEOUT'] = '150'

device = "cuda" if torch.cuda.is_available() else "cpu"
dtype = torch.float16
repo = "hf-models/Kolors-diffusers"
#pipe = KolorsPipeline.from_pretrained(
#    repo, torch_dtype=dtype, variant="fp16", device_map="balanced"
#)
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1024

app = FastAPI()

executor = concurrent.futures.ThreadPoolExecutor()


class Req(BaseModel):
    prompt: str
    num_inference_steps: int

def create_new_pipeline():
    return KolorsPipeline.from_pretrained(repo, torch_dtype=dtype, variant="fp16", device_map="balanced")

pipe = create_new_pipeline()

def generate_image_blocking(prompt: str, num_inference_steps: int, seed: int):
    torch.cuda.empty_cache()
    generator = torch.Generator().manual_seed(seed)
    # https://github.com/huggingface/diffusers/issues/3672 https://github.com/huggingface/diffusers/issues/5749  
    # diffuser 并发跑不起来,  同时修改了 scheduler 导致出错
    # copy the scheduler for each thread to make it thread-safe
    #pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
    image = pipe(
        prompt=prompt,
        negative_prompt="残缺的文字, 残缺的手指，畸形的手指，畸形的手，残肢，低质量、色情",
        guidance_scale=7,
        num_inference_steps=num_inference_steps,
        width=1024,
        height=1024,
        generator=generator
    ).images[0]
    return image


@app.post("/generate")
async def generate_image(req: Req):
    seed = random.randint(0, MAX_SEED)
    prompt = req.prompt or "哈喽 Gitee AI"
    num_inference_steps = req.num_inference_steps or 25
    print(prompt)
    #pipe.scheduler = pipe.scheduler.from_config(pipe.scheduler.config)
    # 使用 run_in_executor 将模型推理任务放到线程池中执行
    generator = torch.Generator().manual_seed(seed)
    image = pipe(
        prompt=prompt,
        negative_prompt="残缺的文字, 残缺的手指，畸形的手指，畸形的手，残肢，低质量、色情",
        guidance_scale=7,
        num_inference_steps=num_inference_steps,
        width=1024,
        height=1024,
        generator=generator
    ).images[0]

    img_bytes = BytesIO()
    image.save(img_bytes, format="WEBP")
    img_bytes.seek(0)
    return StreamingResponse(img_bytes, media_type="image/webp")


if __name__ == '__main__':
    uvicorn.run("app:app", host="0.0.0.0", port=7860, workers=1)
