from PIL import Image
import torch
import numpy as np
from .preprocess.openpose.run_openpose import OpenPose
from .preprocess.humanparsing.run_parsing import Parsing
from .ootd.inference_ootd_hd import OOTDiffusionHD
from .ootd.inference_ootd_dc import OOTDiffusionDC
from .run.utils_ootd import get_mask_location

category_dict = ['upperbody', 'lowerbody', 'dress']
category_dict_utils = ['upper_body', 'lower_body', 'dresses']

# Tensor to PIL
def tensor2pil(image):
    return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))

# Convert PIL to Tensor
def pil2tensor(image):
    return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0)

class OOTD_DC:
    def __init__(self):
        self.openpose_model_dc = None
        self.parsing_model_dc = None
        self.ootd_model_dc = None
    
    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "person_img": ("IMAGE",),
                "clothes_img": ("IMAGE",),
                "tasktype": (['上半身', '下半身', '全身'],),
                "n_samples": ("INT", {"default": 1, "min": 1,"max":4, "step": 1}),
                "n_steps": ("INT", {"default": 20, "min": 20,"max":40, "step": 1}),
                "image_scale": ("FLOAT", {"default": 2.0, "min": 1.0,"max":5.0, "step": 0.1}),
                "seed": ("INT", {"default": -1, "min": -1, "step": 1}), #,"max":2147483647
            },
        }

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "ootd_dc_try_on"
    CATEGORY = "image"
    def process_dc(self, vton_img, garm_img, tasktype, n_samples=1, n_steps=20, image_scale=2.0, seed=-1):
        model_type = 'dc'
        if tasktype == '上半身':
            category = 0
        elif tasktype == '下半身':
            category = 1
        else:
            category =2
        if  self.openpose_model_dc == None:
            self.openpose_model_dc = OpenPose(0)
        if self.parsing_model_dc == None:
            self.parsing_model_dc = Parsing(0)
        if self.ootd_model_dc == None:
            self.ootd_model_dc = OOTDiffusionDC(0)

        with torch.no_grad():
            garm_img = garm_img.resize((768, 1024))
            vton_img = vton_img.resize((768, 1024))
            keypoints = self.openpose_model_dc(vton_img.resize((384, 512)))
            model_parse, _ = self.parsing_model_dc(vton_img.resize((384, 512)))

            mask, mask_gray = get_mask_location(model_type, category_dict_utils[category], model_parse, keypoints)
            mask = mask.resize((768, 1024), Image.NEAREST)
            mask_gray = mask_gray.resize((768, 1024), Image.NEAREST)
            
            masked_vton_img = Image.composite(mask_gray, vton_img, mask)

            images = self.ootd_model_dc(
                model_type=model_type,
                category=category_dict[category],
                image_garm=garm_img,
                image_vton=masked_vton_img,
                mask=mask,
                image_ori=vton_img,
                num_samples=n_samples,
                num_steps=n_steps,
                image_scale=image_scale,
                seed=seed,
            )

        return images

    def ootd_dc_try_on(self, person_img, clothes_img, tasktype, n_samples,n_steps, image_scale, seed):
        vton_img = tensor2pil(person_img)
        garm_img = tensor2pil(clothes_img)
        tryon_image = self.process_dc(vton_img, garm_img, tasktype, n_samples,n_steps, image_scale, seed)
        # print("type:", type(tryon_image))
        return (pil2tensor(np.array(tryon_image),))

class OOTD_HD:
    def __init__(self):
        self.openpose_model_hd = None
        self.parsing_model_hd = None
        self.ootd_model_hd = None
    
    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "person_img": ("IMAGE",),
                "clothes_img": ("IMAGE",),
                "n_samples": ("INT", {"default": 1, "min": 1,"max":4, "step": 1}),
                "n_steps": ("INT", {"default": 20, "min": 20,"max":40, "step": 1}),
                "image_scale": ("FLOAT", {"default": 2.0, "min": 1.0,"max":5.0, "step": 0.1}),
                "seed": ("INT", {"default": -1, "min": -1, "step": 1}), #"max":2147483647, 
            },
        }

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "ootd_hd_try_on"
    CATEGORY = "image"
    def process_hd(self, vton_img, garm_img, n_samples=1, n_steps=20, image_scale=2.0, seed=-1):
        model_type = 'hd'
        category = 0 # 0:upperbody; 1:lowerbody; 2:dress
        if  self.openpose_model_hd == None:
            self.openpose_model_hd = OpenPose(0)
        if self.parsing_model_hd == None:
            self.parsing_model_hd = Parsing(0)
        if self.ootd_model_hd == None:
            self.ootd_model_hd = OOTDiffusionHD(0)

        with torch.no_grad():
            garm_img = garm_img.resize((768, 1024))
            vton_img = vton_img.resize((768, 1024))
            keypoints = self.openpose_model_hd(vton_img.resize((384, 512)))
            model_parse, _ = self.parsing_model_hd(vton_img.resize((384, 512)))

            mask, mask_gray = get_mask_location(model_type, category_dict_utils[category], model_parse, keypoints)
            mask = mask.resize((768, 1024), Image.NEAREST)
            mask_gray = mask_gray.resize((768, 1024), Image.NEAREST)
            
            masked_vton_img = Image.composite(mask_gray, vton_img, mask)

            images = self.ootd_model_hd(
                model_type=model_type,
                category=category_dict[category],
                image_garm=garm_img,
                image_vton=masked_vton_img,
                mask=mask,
                image_ori=vton_img,
                num_samples=n_samples,
                num_steps=n_steps,
                image_scale=image_scale,
                seed=seed,
            )

        return images

    def ootd_hd_try_on(self, person_img, clothes_img, n_samples,n_steps, image_scale, seed):
        vton_img = tensor2pil(person_img)
        garm_img = tensor2pil(clothes_img)
        tryon_image = self.process_hd(vton_img, garm_img, n_samples,n_steps, image_scale, seed)
        # print("type:", type(tryon_image))
        return (pil2tensor(np.array(tryon_image),))

# A dictionary that contains all nodes you want to export with their names
# NOTE: names should be globally unique
NODE_CLASS_MAPPINGS = {
    "OOTD_HD上半身试穿": OOTD_HD,
    "OOTD_DC上半身/下半身/全身试穿": OOTD_DC,
}