# from fastapi import FastAPI, Request
#from sse_starlette.sse import ServerSentEvent, EventSourceResponse
#from fastapi.middleware.cors import CORSMiddleware
# import uvicorn
import torch
import argparse
import logging
import os
import json
import sys
# import requests
from PIL import Image
# import pandas as pd
import base64
import time
import numpy as np
import cv2
import random
# from dataclasses import dataclass
# import pandas as pd
from pathlib import Path
import requests

from tqdm import tqdm
import os
import os.path as osp
import random
# from flask import Flask,Response,request
# from basic import *
from loguru import logger
# from mllm.dialoggen_demo import DialogGen

from hydit.config import get_args
from hydit.inference_controlnet import End2End
# import jsonlines
from tqdm import tqdm
import os
import os.path as osp
import random
from PIL import Image
from torchvision import transforms as T
import cv2

import torch_npu
from torch_npu.contrib import transfer_to_npu

norm_transform = T.Compose(
        [
            T.ToTensor(),   
        ]
    )


def inferencer():
    args = get_args()
    models_root_path = Path(args.model_root)
    if not models_root_path.exists():
        raise ValueError(f"`models_root` not exists: {models_root_path}")

    # Load models
    gen = End2End(args, models_root_path)

    # Try to enhance prompt
    if args.enhance:
        logger.info("Loading DialogGen model (for prompt enhancement)...")
        enhancer = DialogGen(str(models_root_path / "dialoggen"), args.load_4bit)
        logger.info("DialogGen model loaded.")
    else:
        enhancer = None

    return args, gen, enhancer


def getLogger(name, file_name, use_formatter=True):
    logger = logging.getLogger(name)
    logger.setLevel(logging.INFO)
    console_handler = logging.StreamHandler(sys.stdout)
    formatter = logging.Formatter('%(asctime)s    %(message)s')
    console_handler.setFormatter(formatter)
    console_handler.setLevel(logging.INFO)
    logger.addHandler(console_handler)
    if file_name:
        handler = logging.FileHandler(file_name, encoding='utf8')
        handler.setLevel(logging.INFO)
        if use_formatter:
            formatter = logging.Formatter('%(asctime)s - %(name)s - %(message)s')
            handler.setFormatter(formatter)
        logger.addHandler(handler)
    return logger


def im2Str(im,ext='.jpg'):
    imStr = cv2.imencode(ext,im)[1]
    imByte = imStr.tobytes()
    imB64 = base64.b64encode(imByte)
    imB64 = imB64.decode()
    return imB64

def process(img, maskimg, target_size):
    # ratio:'4:3',h:w
    H, W = img.shape[:2]
    # t_H, t_W = int(ratio_dict.get(ratio).split('x')[0]), int(ratio_dict.get(ratio).split('x')[1])
    t_H, t_W = target_size
    ratio = t_H / t_W
    # maskedimg = np.zeros_like(np.array(img))
    # mask = np.ones_like(np.array(img))
    maskedimg = Image.new('RGB', (t_W, t_H), (0, 0, 0))
    maskedimg = np.array(maskedimg)[:,:,::-1]
    mask = Image.new('RGB', (t_W, t_H), (255, 255, 255))
    mask = np.array(mask)[:,:,::-1]

    if W >= H: # 横图
        # 1. 横转横
        if ratio <= 1:
            W_R = t_W
            H_R = int(H*(t_W/W))
            if H_R <= t_H:
                # img = img.resize((W_R, H_R))
                # img = np.array(img)
                img = cv2.resize(img, (W_R, H_R))
                maskimg = cv2.resize(maskimg, (W_R, H_R))
                pad_H = int((t_H-H_R)/2)
                pad_W = 0
                maskedimg[pad_H:pad_H+H_R,:,:] = img
                mask[pad_H:pad_H + H_R, :, :] = maskimg
            else:
                H_R = t_H
                W_R = int(W*(t_H/H))
                # img = img.resize((W_R, H_R))
                # img = np.array(img)
                img = cv2.resize(img, (W_R, H_R))
                maskimg = cv2.resize(maskimg, (W_R, H_R))
                pad_W = int((t_W - W_R) / 2)
                pad_H = 0
                maskedimg[:, pad_W:pad_W + W_R, :] = img
                mask[:, pad_W:pad_W + W_R, :] = maskimg
        # 2. 横转竖
        else:
            W_R = t_W
            H_R = int(H * (t_W / W))
            # img = img.resize((W_R, H_R))
            # img = np.array(img)
            img = cv2.resize(img, (W_R, H_R))
            maskimg = cv2.resize(maskimg, (W_R, H_R))
            pad_H = int((t_H - H_R) / 2)
            pad_W = 0
            maskedimg[pad_H:pad_H + H_R, :, :] = img
            mask[pad_H:pad_H + H_R, :, :] = maskimg
    else: # 竖图
        # 竖转竖
        if ratio >= 1:
            H_R = t_H
            W_R = int(W*(t_H/H))
            if W_R <= t_W:
                # img = img.resize((W_R, H_R))
                # img = np.array(img)
                img = cv2.resize(img, (W_R, H_R))
                maskimg = cv2.resize(maskimg, (W_R, H_R))
                pad_W = int((t_W - W_R) / 2)
                pad_H = 0
                maskedimg[:,pad_W:pad_W + W_R, :] = img
                mask[:, pad_W:pad_W + W_R, :] = maskimg
            else:
                W_R = t_W
                H_R = int(H*(t_W/W))
                # img = img.resize((W_R, H_R))
                # img = np.array(img)
                img = cv2.resize(img, (W_R, H_R))
                maskimg = cv2.resize(maskimg, (W_R, H_R))
                pad_H = int((t_H - H_R) / 2)
                pad_W = 0
                maskedimg[pad_H:pad_H + H_R, :, :] = img
                mask[pad_H:pad_H + H_R, :, :] = maskimg
        # 竖转横
        else:
            H_R = t_H
            W_R = int(W*(t_H/H))
            # img = img.resize((W_R, H_R))
            # img = np.array(img)
            img = cv2.resize(img, (W_R, H_R))
            maskimg = cv2.resize(maskimg, (W_R, H_R))
            pad_W = int((t_W - W_R) / 2)
            pad_H = 0
            maskedimg[:, pad_W:pad_W + W_R, :] = img
            mask[:, pad_W:pad_W + W_R, :] = maskimg
    return maskedimg, mask, pad_H, pad_W
    # mask 填充区域为1，未填充区域为0

def blur_mask(mask):
    # mask = mask / 255.
    mask = ((1-mask) * 255).clip(0, 255)
    kernel = np.ones((60, 60), np.uint8)
    mask = cv2.erode(mask, kernel, iterations=1)  # mask 腐蚀,黑色区域越来越大
    kernel_size = (15, 15)
    blur_size = tuple(2 * j + 1 for j in kernel_size)
    mask = cv2.GaussianBlur(mask, blur_size, 0)
    mask = (1 - mask / 255.)
    mask = (mask*255).astype(np.uint8)
    mask = mask / 255.
    mask[mask<0.5]=0
    mask[mask>=0.5]=1
    return mask


def process_inpaint(img, maskimg, target_size):
    # ratio:'4:3',h:w
    H, W = img.shape[:2]
    # t_H, t_W = int(ratio_dict.get(ratio).split('x')[0]), int(ratio_dict.get(ratio).split('x')[1])
    t_H, t_W = target_size
    ratio = t_H / t_W
    # maskedimg = np.zeros_like(np.array(img))
    # mask = np.ones_like(np.array(img))
    maskedimg = Image.new('RGB', (t_W, t_H), (255, 255, 255))
    maskedimg = np.array(maskedimg)[:,:,::-1]
    mask = Image.new('RGB', (t_W, t_H), (0, 0, 0))
    mask = np.array(mask)[:,:,::-1]

    if W >= H: # 横图
        # 1. 横转横
        if ratio <= 1:
            W_R = t_W
            H_R = int(H*(t_W/W))
            if H_R <= t_H:
                # img = img.resize((W_R, H_R))
                # img = np.array(img)
                img = cv2.resize(img, (W_R, H_R))
                maskimg = cv2.resize(maskimg, (W_R, H_R))
                pad_H = int((t_H-H_R)/2)
                pad_W = 0
                maskedimg[pad_H:pad_H+H_R,:,:] = img
                mask[pad_H:pad_H + H_R, :, :] = maskimg
            else:
                H_R = t_H
                W_R = int(W*(t_H/H))
                # img = img.resize((W_R, H_R))
                # img = np.array(img)
                img = cv2.resize(img, (W_R, H_R))
                maskimg = cv2.resize(maskimg, (W_R, H_R))
                pad_W = int((t_W - W_R) / 2)
                pad_H = 0
                maskedimg[:, pad_W:pad_W + W_R, :] = img
                mask[:, pad_W:pad_W + W_R, :] = maskimg
        # 2. 横转竖
        else:
            W_R = t_W
            H_R = int(H * (t_W / W))
            # img = img.resize((W_R, H_R))
            # img = np.array(img)
            img = cv2.resize(img, (W_R, H_R))
            maskimg = cv2.resize(maskimg, (W_R, H_R))
            pad_H = int((t_H - H_R) / 2)
            pad_W = 0
            maskedimg[pad_H:pad_H + H_R, :, :] = img
            mask[pad_H:pad_H + H_R, :, :] = maskimg
    else: # 竖图
        # 竖转竖
        if ratio >= 1:
            H_R = t_H
            W_R = int(W*(t_H/H))
            if W_R <= t_W:
                # img = img.resize((W_R, H_R))
                # img = np.array(img)
                img = cv2.resize(img, (W_R, H_R))
                maskimg = cv2.resize(maskimg, (W_R, H_R))
                pad_W = int((t_W - W_R) / 2)
                pad_H = 0
                maskedimg[:,pad_W:pad_W + W_R, :] = img
                mask[:, pad_W:pad_W + W_R, :] = maskimg
            else:
                W_R = t_W
                H_R = int(H*(t_W/W))
                # img = img.resize((W_R, H_R))
                # img = np.array(img)
                img = cv2.resize(img, (W_R, H_R))
                maskimg = cv2.resize(maskimg, (W_R, H_R))
                pad_H = int((t_H - H_R) / 2)
                pad_W = 0
                maskedimg[pad_H:pad_H + H_R, :, :] = img
                mask[pad_H:pad_H + H_R, :, :] = maskimg
        # 竖转横
        else:
            H_R = t_H
            W_R = int(W*(t_H/H))
            # img = img.resize((W_R, H_R))
            # img = np.array(img)
            img = cv2.resize(img, (W_R, H_R))
            maskimg = cv2.resize(maskimg, (W_R, H_R))
            pad_W = int((t_W - W_R) / 2)
            pad_H = 0
            maskedimg[:, pad_W:pad_W + W_R, :] = img
            mask[:, pad_W:pad_W + W_R, :] = maskimg
    return maskedimg, mask, pad_H, pad_W, H_R, W_R
    # mask 填充区域为1，未填充区域为0



logger = getLogger('hunyuan_logs', 'hunyuandit_controlnet.log')
logger.info("Start initialize model...")

# 模型初始化
args, gen, enhancer = inferencer()
if enhancer:
    logger.info("Prompt Enhancement...")
    success, enhanced_prompt = enhancer(args.prompt)
    if not success:
        logger.info("Sorry, the prompt is not compliant, refuse to draw.")
        exit()
    logger.info(f"Enhanced prompt: {enhanced_prompt}")
else:
    enhanced_prompt = None

height, width = args.image_size


imgdir = 'data/328test/img'
maskdir = 'data/328test/mask'
# imgdir = '/llmcapagroup1/test-bucket/liuxin/shiyout2i/maskgenerate/mask_generate/0325train_fg/valsample/img'
# maskdir = '/llmcapagroup1/test-bucket/liuxin/shiyout2i/maskgenerate/mask_generate/0325train_fg/valsample/mask'
savedir = args.savedir
os.makedirs(savedir, exist_ok=True)

# savemaskeddir = 'masked'
# os.makedirs(savemaskeddir,exist_ok=True)
for f in tqdm(sorted(os.listdir(imgdir))):
    imgpath = osp.join(imgdir, f)
    maskpath = osp.join(maskdir, f)
    request_data = {'image':imgpath, 'mask_image':maskpath}
    image = request_data['image']
    is_inpaint = False
    is_outpaint = False

    image = cv2.imdecode(np.fromfile(image, dtype=np.uint8), cv2.IMREAD_UNCHANGED) # 4通道图
    h_ori, w_ori = image.shape[:2]

    if 'ratio' in request_data: # 扩图
        ratio = int(request_data['ratio'])
        is_outpaint = True
        # w:h  4:3, 3:4, 16:9, 9:16, 1:1, 9:4, 9：5， 9：6
        ratio_dict = {'0':(960,1280), '1':(1280, 960), '2':(768, 1280), '3':(1280, 768), '4':(1024, 1024), '5':(560, 1280), '6':(704, 1280), '7':(848, 1280)} # h,w
        map_dict = {'0':(844, 1128), '1':(1000, 750), '2':(633, 1125), '3':(1333,750), '4':(1000, 1000), '5':(569, 1280), '6':(711, 1280), '7':(853, 1280)}
        logger.info(f"outpaint, ratio: {ratio}")
        maskimg = np.zeros_like(image) # mask为一张全黑图
        targetsize = ratio_dict.get(str(ratio))
        print('targetsize', targetsize)
        maskedimg, maskimg, pad_H, pad_W = process(image, maskimg, targetsize)
    else: #擦除
        maskpath = request_data['mask_image']
        is_inpaint = True
        maskimg = cv2.imdecode(np.fromfile(maskpath, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
        print(f"inpaint......")
        # maskedimg, maskimg = process(image, maskimg, targetsize)
        maskedimg = (1-maskimg/255)*(image/255)
        maskedimg = (maskedimg*255).astype(np.uint8)

    h1, w1 = maskedimg.shape[:2]
    if is_inpaint: # 如果inpaint，则先resize，然后pading成1024x1024
        targetdictv1 = {'768x1280':0.6, '960x1280':0.75, '1024x1024':1, '560x1280':0.4375, '704x1280':0.55, '848x1280':0.6625}  # hxw:h/w, 横图
        targetdictv2 = {'1280x960':1.3333, '1280x768':1.6667, '1280x832':1.5385, '1280x800':1.6, '1280x576':2.2222} # 竖图
        ratio = int(h1) / int(w1)
        if h1 <= w1:
            res_key, res_val = min(targetdictv1.items(), key=lambda x: abs(ratio - x[1]))
        else:
            res_key, res_val = min(targetdictv2.items(), key=lambda x: abs(ratio - x[1]))
        h_t, w_t = int(res_key.split('x')[0]), int(res_key.split('x')[-1])
        print('h_t, w_t"', h_t, w_t)
        maskedimg, maskimg, padding_h_top, padding_w_left, h, w = process_inpaint(maskedimg, maskimg, (h_t, w_t))
        new_mask = maskimg.copy()
        new_image = maskedimg.copy()

    if is_outpaint:
        new_mask = maskimg.copy()
        new_image = maskedimg.copy()
        
    new_mask = new_mask /255.
    new_mask[new_mask < 0.5] = 0
    new_mask[new_mask >= 0.5] = 1
    
    new_mask = blur_mask(new_mask)

    mask_np = new_mask.copy()
    new_mask = new_mask.transpose(2, 0, 1) # c,h,w
    new_mask = torch.from_numpy(new_mask)  # tensor:1,h,w, [0, 1]
    new_image_pil = Image.fromarray(np.uint8(new_image[:,:,::-1]))
    condition = norm_transform(new_image_pil)
    condition = (1-new_mask)*condition
    condition = condition * 2.0 - 1.0
    condition = condition.unsqueeze(0).to("npu")

    h1, w1 = new_image.shape[:2]
    gen.pipeline.controlnet.x_embedder.update_image_size((h1, w1))
    gen.pipeline.unet.x_embedder.update_image_size((h1, w1))

    prompt = '艺术海报, 高质量'
    # prompt = ""

    results = gen.predict(prompt,
                        height=new_image.shape[0],
                        width=new_image.shape[1],
                        image=condition,
                        seed=None,
                        enhanced_prompt=enhanced_prompt,
                        negative_prompt=args.negative,
                        infer_steps=args.infer_steps,
                        guidance_scale=args.cfg_scale,
                        batch_size=args.batch_size,
                        use_style_cond=args.use_style_cond,
                        src_size_cond=(new_image.shape[1], new_image.shape[0]),
                        )['images'][0] # use_style_cond=args.use_style_cond,src_size_cond=(new_image.shape[1], new_image.shape[0]),

    output = (np.array(results)/255)[:, :, ::-1]*mask_np + (new_image/255)*(1-mask_np)
    output = (output*255).astype(np.uint8) # 1024, 1024

    # output = results

    if is_inpaint:
        output = output[padding_h_top:padding_h_top + h, padding_w_left:padding_w_left + w, :] # pad到1024x1024
        output = cv2.resize(output, (w1, h1))

    if is_outpaint:
        f_h, f_w = map_dict.get(str(ratio))
        output = cv2.resize(output, (f_w, f_h))

    output = Image.fromarray(output[:,:,::-1])
    savename = osp.splitext(f)[0] + '.jpg'
    output.save(osp.join(savedir,savename))
