import numpy as np
import cv2
from .utils import *

import torch
import os
import folder_paths
from nodes import PreviewImage

class ImageComparer(PreviewImage):
    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "image_a": ("IMAGE",),
                "image_b": ("IMAGE",),
            },
            "hidden": {
                "prompt": "PROMPT",
                "extra_pnginfo": "EXTRA_PNGINFO"
            },
        }

    RETURN_TYPES = ()
    FUNCTION = "compare"
    CATEGORY = "🌟FJDH/Image tool"
    OUTPUT_NODE = True

    def compare(self, image_a, image_b, prompt=None, extra_pnginfo=None):
        result_a = self.save_images(image_a, filename_prefix="compare_a_", prompt=prompt, extra_pnginfo=extra_pnginfo)
        result_b = self.save_images(image_b, filename_prefix="compare_b_", prompt=prompt, extra_pnginfo=extra_pnginfo)
        
        result = {
            "ui": {
                "images": []
            }
        }
        
        if "ui" in result_a and "images" in result_a["ui"]:
            result["ui"]["images"].extend(result_a["ui"]["images"])
            
        if "ui" in result_b and "images" in result_b["ui"]:
            result["ui"]["images"].extend(result_b["ui"]["images"])
        
        return result


class ImageAligner:
    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "image": ("IMAGE",),
                "mask": ("MASK",),
                "points": ("Point",),
                "width": ("INT", {"default": 512, "min": 1, "max": 4096, "step": 1}),
                "height": ("INT", {"default": 512, "min": 1, "max": 4096, "step": 1}),
            },
        }

    RETURN_TYPES = ("IMAGE", "MASK")
    FUNCTION = "align_flag"
    CATEGORY = "🌟FJDH/Image tool"

    def align_flag(self, image, mask, points, width, height):
        flag_image_np = tensor2np(image)[0]
        flag_mask_np = tensor2np(mask)[0]

        if flag_mask_np.shape[0] == 64 and flag_mask_np.shape[1] == 64:
            flag_mask_np = np.ones((flag_image_np.shape[0], flag_image_np.shape[1]), dtype=np.uint8) * 255

        src_points = np.array([[0, 0], [flag_image_np.shape[1], 0], [flag_image_np.shape[1], flag_image_np.shape[0]], [0, flag_image_np.shape[0]]], dtype=np.float32)
        dst_points = np.array(points, dtype=np.float32)

        M = cv2.getPerspectiveTransform(src_points, dst_points)

        flag_warped = cv2.warpPerspective(flag_image_np, M, (width, height))
        flag_mask_warped = cv2.warpPerspective(flag_mask_np, M, (width, height))

        flag_warped_tensor = np2tensor(flag_warped)
        flag_mask_warped_tensor = np2tensor(flag_mask_warped)

        return (flag_warped_tensor, flag_mask_warped_tensor)

class ImageWarper:
    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "normal_map": ("IMAGE",),
                "image_to_warp": ("IMAGE",),
                "mask_to_warp": ("MASK",),
                "distortion_factor": ("FLOAT", {"default": 10.0, "min": 0.0, "max": 100.0, "step": 0.1}),
            },
        }

    RETURN_TYPES = ("IMAGE", "MASK")
    FUNCTION = "warp_flag"
    CATEGORY = "🌟FJDH/Image tool"

    def warp_flag(self, normal_map, image_to_warp, mask_to_warp, distortion_factor):
        normal_map_np = tensor2np(normal_map)[0]
        image_to_warp_np = tensor2np(image_to_warp)[0]
        mask_to_warp_np = tensor2np(mask_to_warp)[0]

        normals_x = normal_map_np[:, :, 0].astype(np.float32) / 255.0 * 2 - 1
        normals_y = normal_map_np[:, :, 1].astype(np.float32) / 255.0 * 2 - 1

        height, width = image_to_warp_np.shape[:2]
        grid_x, grid_y = np.meshgrid(np.arange(width), np.arange(height))
        grid = np.stack((grid_x, grid_y), axis=-1).astype(np.float32)

        displacement_x = normals_x * distortion_factor
        displacement_y = normals_y * distortion_factor
        grid[:, :, 0] += displacement_x
        grid[:, :, 1] += displacement_y

        warped_image = cv2.remap(image_to_warp_np, grid[:, :, 0], grid[:, :, 1], interpolation=cv2.INTER_LINEAR)
        warped_mask = cv2.remap(mask_to_warp_np, grid[:, :, 0], grid[:, :, 1], interpolation=cv2.INTER_LINEAR)

        warped_image_tensor = np2tensor(warped_image)
        warped_mask_tensor = np2tensor(warped_mask)

        return (warped_image_tensor, warped_mask_tensor)


NODE_CLASS_MAPPINGS = {
    "ImageAligner": ImageAligner,
    "ImageWarper": ImageWarper,
    "ImageComparer": ImageComparer,
}

NODE_DISPLAY_NAME_MAPPINGS = {
    "ImageAligner": "Image Aligner",
    "ImageWarper": "Image Warper",
    "ImageComparer": "Image Comparer"
}