import torch
import numpy as np
import gc
import cv2
import os
from PIL import Image
from  comfy import model_management as mm
    
class Prepare_Img_For_ControlNet_Inpaint:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { 
            "image": ("IMAGE",),
            "mask": ("MASK", ),
            "mode": ("BOOLEAN", {"default": True, "label_on": "CV2", "label_off": "PIL", "tooltip": "Method for process."}),
            },
            }
    
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "process"
    CATEGORY = "UL Group/Image Process"
    TITLE = "Prepare ControlNet Inpaint"
    DESCRIPTION = "给ControlNet Inpaint准备图像输入，遮罩部分涂黑。"
        
    def process(self, image, mask, mode):
        if mode:
            image = tensor2numpy_cv2(image)
            mask = tensor2numpy_cv2(mask)
            mask = cv2.bitwise_not(mask) # 遮罩取反
            
            rows, cols = mask.shape  # 获取图像2的属性
            roi = image[0:rows, 0:cols]  # 选择roi范围
            image = cv2.bitwise_and(roi, roi, mask=mask)  #删除了ROI中的mask区域
            image = numpy_cv2tensor(image)
        else:
            # Convert image tensor to PIL
            image_pil = tensor2pil(image.squeeze(0)).convert('RGB')
            
            # Convert mask to PIL
            # mask_pil = tensor2pil(mask).convert('L')
            mask_pil = mask_to_pil(mask).convert('L')
            
            # Create a black image of the same size
            black_image = Image.new('RGB', image_pil.size, (0, 0, 0))
            
            # Apply the mask: use the original image where mask is black, and black image where mask is white
            blackened_image = Image.composite(black_image, image_pil, mask_pil)
            
            # Convert the result back to tensor
            image = pil2tensor(blackened_image)
        
        return (image, )    

class Prepare_Img_For_ControlNet_Outpaint:
    _alignment_options = ["Middle", "Left", "Right", "Top", "Bottom"]
    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "image": ("IMAGE",),
                "width": ("INT", {"default": 1024, "min": 256, "max": 2160, "tooltip": "The width used for the image."}),
                "height": ("INT", {"default": 576, "min": 256, "max": 2160, "tooltip": "The height used for the image."}),
                "alignment": (s._alignment_options, {"tooltip": "Where the original image should be in the outpainted one"}),
                "mode": ("BOOLEAN", {"default": True, "label_on": "PIL", "label_off": "Torch", "tooltip": "PIL method from original repo. Torch method from node author. Below options just for PIL method."}),
                "bg_color_name": (Pillow_Color_Names, {"default": "black"}),
                # "overlap_percentage": ("INT", {"default": 10, "min": 0, "max": 100, "tooltip": ""}),
                # "overlap_pixels": ("INT", {"default": 10, "min": 0, "max": 2160, "tooltip": ""}),
                "custom_resize_percentage": ("INT", {"default": 100, "min": 25, "max": 100, "tooltip": ""}),
                "overlap_type": ("BOOLEAN", {"default": True, "label_on": "percentage", "label_off": "pixels", "tooltip": "PIL method from original repo. Torch method from node author. Below options just for PIL method."}),
                "overlap_left": ("INT", {"default": 0, "min": 0, "max": 3840, "tooltip": "Percentage or pixels."}),
                "overlap_right": ("INT", {"default": 0, "min": 0, "max": 3840, "tooltip": "Percentage or pixels."}),
                "overlap_top": ("INT", {"default": 0, "min": 0, "max": 3840, "tooltip": "Percentage or pixels."}),
                "overlap_bottom": ("INT", {"default": 0, "min": 0, "max": 3840, "tooltip": "Percentage or pixels."}),
            },
        }
    
    RETURN_TYPES = ("IMAGE", "MASK", "IMAGE")
    RETURN_NAMES = ("bg_image", "mask", "outpaint_cnet_image")
    FUNCTION = "process"
    CATEGORY = "UL Group/Image Process"
    TITLE = "Prepare ControlNet Outpaint"
    
    def process(self, image, width, height, alignment="Middle", mode=True, overlap_percentage=10, custom_resize_percentage=100, overlap_left=True, overlap_right=True, overlap_top=True, overlap_bottom=True, bg_color_name='black', overlap_type=True):
        
        # Resize Image
        def can_expand(source_width, source_height, target_width, target_height, alignment):
            """Checks if the image can be expanded based on the alignment."""
            if alignment in ("Left", "Right") and source_width >= target_width:
                return False
            if alignment in ("Top", "Bottom") and source_height >= target_height:
                return False
            return True
        
        if not mode:
            im=tensor2pil(image)
            source=im.convert('RGB')
            target_size = (width, height)
            if source.width == width and source.height == height:
                raise ValueError(f'Input image size is the same as target size, resize input image or change target size.')

            # Upscale if source is smaller than target in both dimensions
            if source.width < target_size[0] and source.height < target_size[1]:
                scale_factor = min(target_size[0] / source.width, target_size[1] / source.height)
                new_width = int(source.width * scale_factor)
                new_height = int(source.height * scale_factor)
                source = source.resize((new_width, new_height), Image.LANCZOS)

            if source.width > target_size[0] or source.height > target_size[1]:
                scale_factor = min(target_size[0] / source.width, target_size[1] / source.height)
                new_width = int(source.width * scale_factor)
                new_height = int(source.height * scale_factor)
                source = source.resize((new_width, new_height), Image.LANCZOS)

            if not can_expand(source.width, source.height, target_size[0], target_size[1], alignment):
                alignment = "Middle"
            # Calculate margins based on alignment
            if alignment == "Middle":
                margin_x = (target_size[0] - source.width) // 2
                margin_y = (target_size[1] - source.height) // 2
            elif alignment == "Left":
                margin_x = 0
                margin_y = (target_size[1] - source.height) // 2
            elif alignment == "Right":
                margin_x = target_size[0] - source.width
                margin_y = (target_size[1] - source.height) // 2
            elif alignment == "Top":
                margin_x = (target_size[0] - source.width) // 2
                margin_y = 0
            elif alignment == "Bottom":
                margin_x = (target_size[0] - source.width) // 2
                margin_y = target_size[1] - source.height

            background = Image.new('RGB', target_size, color=bg_color_name)
            background.paste(source, (margin_x, margin_y))

            image=pil2tensor(background)
            #----------------------------------------------------
            d1, d2, d3, d4 = image.size()
            left, top, bottom, right = 0, 0, 0, 0
            # Image
            new_image = torch.ones(
                (d1, d2 + top + bottom, d3 + left + right, d4),
                dtype=torch.float32,
            ) * 0.5
            new_image[:, top:top + d2, left:left + d3, :] = image
            #----------------------------------------------------
            # Mask coordinates
            if alignment == "Middle":
                margin_x = (width - new_width) // 2
                margin_y = (height - new_height) // 2
            elif alignment == "Left":
                margin_x = 0
                margin_y = (height - new_height) // 2
            elif alignment == "Right":
                margin_x = width - new_width
                margin_y = (height - new_height) // 2
            elif alignment == "Top":
                margin_x = (width - new_width) // 2
                margin_y = 0
            elif alignment == "Bottom":
                margin_x = (width - new_width) // 2
                margin_y = height - new_height
            
            # Create mask as big as new img
            mask = torch.ones(
                (height, width),
                dtype=torch.float32,
            )
            # Create hole in mask
            t = torch.zeros(
                (new_height, new_width),
                dtype=torch.float32
            )
            # Create holed mask
            mask[margin_y:margin_y + new_height, 
                margin_x:margin_x + new_width
            ] = t
            #----------------------------------------------------
            # Prepare "cn_image" for diffusers outpaint
            im=tensor2pil(new_image)
            pil_new_image=im.convert('RGB')
            
            pil_mask=tensor2pil(mask)
            
            cnet_image = pil_new_image.copy() # copy background as cnet_image
            cnet_image.paste(0, (0, 0), pil_mask) # paste mask over cnet_image, cropping it a bit
            
            tensor_cnet_image=pil2tensor(cnet_image)
        else:
            image = tensor2pil(image)
            target_size = (width, height)
            # Calculate the scaling factor to fit the image within the target size
            scale_factor = min(target_size[0] / image.width, target_size[1] / image.height)
            new_width = int(image.width * scale_factor)
            new_height = int(image.height * scale_factor)
            
            # Resize the source image to fit within target size
            source = image.resize((new_width, new_height), Image.LANCZOS)
            
            # Apply resize option using percentages
            resize_percentage = custom_resize_percentage
            
            # Calculate new dimensions based on percentage
            resize_factor = resize_percentage / 100
            new_width = int(source.width * resize_factor)
            new_height = int(source.height * resize_factor)
            
            # Ensure minimum size of 64 pixels
            new_width = max(new_width, 64)
            new_height = max(new_height, 64)
            
            # Resize the image
            source = source.resize((new_width, new_height), Image.LANCZOS)
            
            # Calculate the overlap in pixels based on the percentage
            # overlap_x = int(new_width * (overlap_percentage / 100))
            # overlap_y = int(new_height * (overlap_percentage / 100))
            # overlap_x = overlap_pixels
            # overlap_y = overlap_pixels
            if overlap_type:
                if overlap_left > 100 or overlap_right > 100 or overlap_top > 100 or overlap_bottom > 100:
                    raise ValueError("Overlap percentage can't be bigger than 100.")
                overlap_x_left = int(new_width * (overlap_left / 100))
                overlap_x_right = int(new_width * (overlap_right / 100))
                overlap_y_top = int(new_height * (overlap_top / 100))
                overlap_y_bottom = int(new_height * (overlap_bottom / 100))
            else:
                if overlap_left > max(new_width, new_height) or overlap_right > max(new_width, new_height) or overlap_top > max(new_width, new_height) or overlap_bottom > max(new_width, new_height):
                    raise ValueError("Overlap pixels can't be bigger than image size.")
                overlap_x_left = overlap_left
                overlap_x_right = overlap_right
                overlap_y_top = overlap_top
                overlap_y_bottom = overlap_bottom
            
            
            
            # Ensure minimum overlap of 1 pixel
            # overlap_x = max(overlap_x, 1)
            # overlap_y = max(overlap_y, 1)
            overlap_x_left = max(overlap_x_left, 1)
            overlap_y_top = max(overlap_y_top, 1)
            overlap_x_right = max(overlap_x_right, 1)
            overlap_y_bottom = max(overlap_y_bottom, 1)
            
            # Calculate margins based on alignment
            if alignment == "Middle":
                margin_x = (target_size[0] - new_width) // 2
                margin_y = (target_size[1] - new_height) // 2
            elif alignment == "Left":
                margin_x = 0
                margin_y = (target_size[1] - new_height) // 2
            elif alignment == "Right":
                margin_x = target_size[0] - new_width
                margin_y = (target_size[1] - new_height) // 2
            elif alignment == "Top":
                margin_x = (target_size[0] - new_width) // 2
                margin_y = 0
            elif alignment == "Bottom":
                margin_x = (target_size[0] - new_width) // 2
                margin_y = target_size[1] - new_height
            
            # Adjust margins to eliminate gaps
            margin_x = max(0, min(margin_x, target_size[0] - new_width))
            margin_y = max(0, min(margin_y, target_size[1] - new_height))

            # Create a new background image and paste the resized source image
            background = Image.new('RGB', target_size, color=bg_color_name)
            background.paste(source, (margin_x, margin_y))

            # Create the mask
            mask = Image.new('L', target_size, 255)
            from PIL import ImageDraw
            mask_draw = ImageDraw.Draw(mask)

            # Calculate overlap areas
            # white_gaps_patch = 2

            # left_overlap = margin_x + overlap_x if overlap_left else margin_x + white_gaps_patch
            # right_overlap = margin_x + new_width - overlap_x if overlap_right else margin_x + new_width - white_gaps_patch
            # top_overlap = margin_y + overlap_y if overlap_top else margin_y + white_gaps_patch
            # bottom_overlap = margin_y + new_height - overlap_y if overlap_bottom else margin_y + new_height - white_gaps_patch
            left_overlap = margin_x + overlap_x_left# if overlap_left else margin_x + white_gaps_patch
            right_overlap = margin_x + new_width - overlap_x_right# if overlap_right else margin_x + new_width - white_gaps_patch
            top_overlap = margin_y + overlap_y_top# if overlap_top else margin_y + white_gaps_patch
            bottom_overlap = margin_y + new_height - overlap_y_bottom# if overlap_bottom else margin_y + new_height - white_gaps_patch
    
            if alignment == "Left":
                left_overlap = margin_x + overlap_x_left if overlap_left else margin_x
            elif alignment == "Right":
                right_overlap = margin_x + new_width - overlap_x_right if overlap_right else margin_x + new_width
            elif alignment == "Top":
                top_overlap = margin_y + overlap_y_top if overlap_top else margin_y
            elif alignment == "Bottom":
                bottom_overlap = margin_y + new_height - overlap_y_bottom if overlap_bottom else margin_y + new_height

            # Draw the mask
            mask_draw.rectangle([
                (left_overlap, top_overlap - 2),
                # (left_overlap - 2, top_overlap - 2),
                (right_overlap, bottom_overlap)
            ], fill=0)
            
            if not can_expand(background.width, background.height, width, height, alignment):
                alignment = "Middle"
            
            cnet_image = background.copy()
            cnet_image.paste(0, (0, 0), mask)
            
            new_image = pil2tensor(background)
            mask = pil2tensor(mask)
            tensor_cnet_image = pil2tensor(cnet_image)

        return (new_image, mask, tensor_cnet_image,)
    
NODE_CLASS_MAPPINGS = {
    "UL_Image_Process_Prepare_Img_for_Inpaint": Prepare_Img_For_ControlNet_Inpaint,
    "UL_Image_Process_Prepare_Img_for_Outpaint": Prepare_Img_For_ControlNet_Outpaint,
}

def tensor2numpy_cv2(tensor_img):
    arr_img = tensor_img.numpy()[0] * 255
    arr_img = arr_img.astype(np.uint8)
    return arr_img

def numpy_cv2tensor(img):
    img = img.astype(np.float32) / 255.0
    img = torch.from_numpy(img)[None,]
    return img

def get_device_by_name(device, debug: bool=False):
    """
    Args:
        "device": (["auto", "cuda", "cpu", "mps", "xpu", "meta", "directml"],{"default": "auto"}), 
    """
    if device == 'auto':
        try:
            # device = "cpu"
            # if torch.cuda.is_available():
            #     device = "cuda"
            #     # device = torch.device("cuda")
            # elif torch.backends.mps.is_available():
            #     device = "mps"
            #     # device = torch.device("mps")
            # elif torch.xpu.is_available():
            #     device = "xpu"
            #     # device = torch.device("xpu")
            device = mm.get_torch_device()
        except:
                raise AttributeError("What's your device(到底用什么设备跑的)？")
    # elif device == 'cuda':
    #     device = torch.device("cuda")
    # elif device == "mps":
    #     device = torch.device("mps")
    # elif device == "xpu":
    #     device = torch.device("xpu")
    if debug:
        print("\033[93mUse Device(使用设备):", device, "\033[0m")
    return device

def get_dtype_by_name(dtype, debug: bool=False):
    """
    "dtype": (["auto","fp16","bf16","fp32", "fp8_e4m3fn", "fp8_e4m3fnuz", "fp8_e5m2", "fp8_e5m2fnuz"],{"default":"auto"}),返回模型精度选择。
    """
    if dtype == 'auto':
        try:
            if mm.should_use_fp16():
                dtype = torch.float16
            elif mm.should_use_bf16():
                dtype = torch.bfloat16
            else:
                dtype = torch.float32
        except:
                raise AttributeError("ComfyUI version too old, can't autodetect properly. Set your dtypes manually.")
    elif dtype== "fp16":
         dtype = torch.float16
    elif dtype == "bf16":
        dtype = torch.bfloat16
    elif dtype == "fp32":
        dtype = torch.float32
    elif dtype == "fp8_e4m3fn":
        dtype = torch.float8_e4m3fn
    elif dtype == "fp8_e4m3fnuz":
        dtype = torch.float8_e4m3fnuz
    elif dtype == "fp8_e5m2":
        dtype = torch.float8_e5m2
    elif dtype == "fp8_e5m2fnuz":
        dtype = torch.float8_e5m2fnuz
    if debug:
        print("\033[93mModel Precision(模型精度):", dtype, "\033[0m")
    return dtype
        
def clean_up(debug=False):
    gc.collect()
    if torch.cuda.is_available():
        torch.cuda.empty_cache()
        torch.cuda.ipc_collect()
    elif torch.backends.mps.is_available():
        torch.mps.empty_cache()
    elif torch.xpu.is_available():
        torch.xpu.empty_cache()
    else: 
        if debug:
            print('\033[93m', 'Not needed', '\033[0m')
        pass

def mask_to_pil(mask) -> Image:
    if isinstance(mask, torch.Tensor):
        mask_np = mask.squeeze().cpu().numpy()
    elif isinstance(mask, np.ndarray):
        mask_np = mask
    else:
        raise TypeError("Unsupported mask type")
    mask_pil = Image.fromarray((mask_np * 255).astype(np.uint8))
    return mask_pil

def tensor2pil(image):
    return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
    
def pil2tensor(image):
    return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0)
    
def get_filelist_and_folderlist(root_path, file_extension=None, debug=False):
    """_summary_

    Args:
        root_path (_type_): get filelist with specified extension or not, get folderlist.
        file_extension (_type_, optional): tuple, such as ['.jpeg', '.jpg']. Defaults to None.
        debug (bool, optional): bool. Defaults to False.

    Returns:
        _type_: (file_list, dir_list, )
    """
    file_list = []
    dir_list = []
    for root, dirs, files in os.walk(root_path):
        for dir in dirs:
            relative_path = os.path.relpath(os.path.join(root, dir), root_path)
            dir_list.append(relative_path)
        for file in files:
            if file_extension == None:
                file_relative_path = os.path.relpath(os.path.join(root, file), root_path)
                file_list.append(file_relative_path)
            else:
                for ext in file_extension:
                    if(file.endswith(ext)):
                        file_relative_path = os.path.relpath(os.path.join(root, file), root_path)
                        file_list.append(file_relative_path)
                        break
                
    if debug:
        print('\033[93m', file_list, '\033[0m')
        print('\033[93m', dir_list, '\033[0m')
    return (file_list, dir_list, )

Pillow_Color_Names = [
    "red",
    "green",
    "blue",
    "white",
    "black",
    "yellow",
    "pink",
    "gold",
    "purple",
    "brown",
    "orange",
    "tomato",
    "violet",
    "wheat",
    "snow",
    "yellowgreen",
    "gray",
    "grey",
    "aliceblue",
    "antiquewhite",
    "aqua",
    "aquamarine",
    "azure",
    "beige",
    "bisque",
    "blanchedalmond",
    "blueviolet",
    "burlywood",
    "cadetblue",
    "chartreuse",
    "chocolate",
    "coral",
    "cornflowerblue",
    "cornsilk",
    "crimson",
    "cyan",
    "darkblue",
    "darkcyan",
    "darkgoldenrod",
    "darkgray",
    "darkgrey",
    "darkgreen",
    "darkkhaki",
    "darkmagenta",
    "darkolivegreen",
    "darkorange",
    "darkorchid",
    "darkred",
    "darksalmon",
    "darkseagreen",
    "darkslateblue",
    "darkslategray",
    "darkslategrey",
    "darkturquoise",
    "darkviolet",
    "deeppink",
    "deepskyblue",
    "dimgray",
    "dimgrey",
    "dodgerblue",
    "firebrick",
    "floralwhite",
    "forestgreen",
    "fuchsia",
    "gainsboro",
    "ghostwhite",
    "goldenrod",
    "greenyellow",
    "honeydew",
    "hotpink",
    "indianred",
    "indigo",
    "ivory",
    "khaki",
    "lavender",
    "lavenderblush",
    "lawngreen",
    "lemonchiffon",
    "lightblue",
    "lightcoral",
    "lightcyan",
    "lightgoldenrodyellow",
    "lightgreen",
    "lightgray",
    "lightgrey",
    "lightpink",
    "lightsalmon",
    "lightseagreen",
    "lightskyblue",
    "lightslategray",
    "lightslategrey",
    "lightsteelblue",
    "lightyellow",
    "lime",
    "limegreen",
    "linen",
    "magenta",
    "maroon",
    "mediumaquamarine",
    "mediumblue",
    "mediumorchid",
    "mediumpurple",
    "mediumseagreen",
    "mediumslateblue",
    "mediumspringgreen",
    "mediumturquoise",
    "mediumvioletred",
    "midnightblue",
    "mintcream",
    "mistyrose",
    "moccasin",
    "navajowhite",
    "navy",
    "oldlace",
    "olive",
    "olivedrab",
    "orangered",
    "orchid",
    "palegoldenrod",
    "palegreen",
    "paleturquoise",
    "palevioletred",
    "papayawhip",
    "peachpuff",
    "peru",
    "plum",
    "powderblue",
    "rebeccapurple",
    "rosybrown",
    "royalblue",
    "saddlebrown",
    "salmon",
    "sandybrown",
    "seagreen",
    "seashell",
    "sienna",
    "silver",
    "skyblue",
    "slateblue",
    "slategray",
    "slategrey",
    "springgreen",
    "steelblue",
    "tan",
    "teal",
    "thistle",
    "turquoise",
    "whitesmoke",
    ]