import torch.nn.functional as F

__VERSION__ = "v1.0.0"

class SimpleImageResizerNode:
    CATEGORY = "AmorForge"
    RETURN_TYPES = ("IMAGE",)
    RETURN_NAMES = ("resized_image",)
    FUNCTION = "resize"
    NODE_CLASS_MAPPINGS = {
        "SimpleImageResizerNode": "Simple Image Resizer Node",
    }

    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "image": ("IMAGE",),
                "width": ("INT", {
                    "default": 512,
                    "min": 1,
                    "max": 8192,
                    "step": 1,
                    "display": "number"
                }),
                "height": ("INT", {
                    "default": 512,
                    "min": 1,
                    "max": 8192,
                    "step": 1,
                    "display": "number"
                }),
                "interpolation_mode": (["nearest", "bilinear", "bicubic"], {"default": "bilinear"}),
                "keep_proportion": (["true", "false"], {"default": "false"}),
            },
        }

    def _calculate_new_dimensions(self, original_width, original_height, target_width, target_height, keep_proportion):
        """Calculate new dimensions while maintaining aspect ratio if needed."""
        if keep_proportion == "true":
            # Calculate aspect ratios
            original_aspect = original_width / original_height
            target_aspect = target_width / target_height

            if original_aspect > target_aspect:
                # Original image is wider, use width as reference
                new_width = target_width
                new_height = int(target_width / original_aspect)
            else:
                # Original image is taller, use height as reference
                new_height = target_height
                new_width = int(target_height * original_aspect)

            return new_width, new_height
        else:
            # Don't maintain aspect ratio
            return target_width, target_height

    def resize(self, image, width, height, interpolation_mode="bilinear", keep_proportion="false"):
        # Get original image dimensions
        batch_size, original_height, original_width, channels = image.shape

        # Calculate new dimensions if keeping proportion
        new_width, new_height = self._calculate_new_dimensions(
            original_width, original_height, width, height, keep_proportion)

        # Resize the image
        # PyTorch's interpolate function requires channels in the second dimension
        image = image.permute(0, 3, 1, 2)  # [B, H, W, C] -> [B, C, H, W]
        resized_image = F.interpolate(image, size=(new_height, new_width), mode=interpolation_mode, align_corners=False if interpolation_mode != "nearest" else None)
        resized_image = resized_image.permute(0, 2, 3, 1)  # [B, C, H, W] -> [B, H, W, C]

        return (resized_image,)