import torch
import numpy as np
from PIL import Image
import io

class Charly_FitToAspectNode:
    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "image": ("IMAGE",),
                "aspect_ratio": ("STRING", {"default": "16:9", "multiline": False}),
                "expansion_direction": ("STRING", {"default": "center", "multiline": False}),
            }
        }
    
    RETURN_TYPES = ("IMAGE", "INT", "INT", "INT", "INT")
    RETURN_NAMES = ("image", "pad_top", "pad_bottom", "pad_left", "pad_right")
    FUNCTION = "fit_to_aspect"
    CATEGORY = "image/resize"

    def fit_to_aspect(self, image, aspect_ratio, expansion_direction):
        # Definir las resoluciones objetivo para cada relación de aspecto predefinida
        aspect_resolutions = {
            "1:1": (1024, 1024),
            "16:9": (1392, 752),
            "21:9": (1568, 672),
            "3:2": (1248, 832),
            "2:3": (832, 1248),
            "4:5": (944, 1104),
            "5:4": (1104, 944),
            "3:4": (880, 1184),
            "4:3": (1184, 880),
            "9:16": (752, 1392),
            "9:21": (672, 1568),
        }
        
        # Procesar el aspect_ratio (puede ser predefinido o personalizado)
        aspect_ratio = aspect_ratio.strip()
        if aspect_ratio in aspect_resolutions:
            target_width, target_height = aspect_resolutions[aspect_ratio]
        else:
            # Intentar parsear formato personalizado como "width:height" o "widthxheight"
            try:
                if ":" in aspect_ratio:
                    parts = aspect_ratio.split(":")
                    target_width = int(parts[0])
                    target_height = int(parts[1])
                elif "x" in aspect_ratio.lower():
                    parts = aspect_ratio.lower().split("x")
                    target_width = int(parts[0])
                    target_height = int(parts[1])
                else:
                    # Si no se puede parsear, usar 16:9 por defecto
                    target_width, target_height = aspect_resolutions["16:9"]
            except:
                # Si hay error en el parsing, usar 16:9 por defecto
                target_width, target_height = aspect_resolutions["16:9"]
        
        # Obtener las dimensiones de la imagen de entrada
        # Las imágenes en ComfyUI vienen como tensor con formato (batch, height, width, channels)
        if len(image.shape) == 4:
            # Si hay batch, tomar la primera imagen
            img_tensor = image[0]
        else:
            img_tensor = image
            
        current_height, current_width = img_tensor.shape[:2]
        
        # Calcular el factor de escala para mantener proporciones
        # Escalar la imagen para que quepa dentro del aspect ratio objetivo
        scale_x = target_width / current_width
        scale_y = target_height / current_height
        
        # Usar el factor de escala más pequeño para mantener proporciones
        # y asegurar que la imagen quepa completamente en el objetivo
        scale_factor = min(scale_x, scale_y)
        
        # Si la imagen ya es más pequeña que el objetivo, no escalar
        if scale_factor > 1:
            scale_factor = 1
            new_width = current_width
            new_height = current_height
        else:
            # Calcular las nuevas dimensiones de la imagen escalada
            new_width = int(current_width * scale_factor)
            new_height = int(current_height * scale_factor)
        
        # Convertir tensor a PIL Image
        img_array = (img_tensor.numpy() * 255).astype(np.uint8)
        pil_image = Image.fromarray(img_array)
        
        # Redimensionar la imagen solo si es necesario
        if scale_factor != 1:
            resized_image = pil_image.resize((new_width, new_height), Image.Resampling.LANCZOS)
        else:
            resized_image = pil_image
        
        # Calcular el padding total necesario
        total_pad_width = target_width - new_width
        total_pad_height = target_height - new_height
        
        # Normalizar la dirección de expansión
        expansion_direction = expansion_direction.strip().lower()
        
        # Aplicar la dirección de expansión preferida
        if expansion_direction == "center":
            # Centrar la imagen
            pad_left = total_pad_width // 2
            pad_right = total_pad_width - pad_left
            pad_top = total_pad_height // 2
            pad_bottom = total_pad_height - pad_top
        elif expansion_direction == "top":
            # Expandir hacia arriba
            pad_left = total_pad_width // 2
            pad_right = total_pad_width - pad_left
            pad_top = total_pad_height
            pad_bottom = 0
        elif expansion_direction == "bottom":
            # Expandir hacia abajo
            pad_left = total_pad_width // 2
            pad_right = total_pad_width - pad_left
            pad_top = 0
            pad_bottom = total_pad_height
        elif expansion_direction == "left":
            # Expandir hacia la izquierda
            pad_left = total_pad_width
            pad_right = 0
            pad_top = total_pad_height // 2
            pad_bottom = total_pad_height - pad_top
        elif expansion_direction == "right":
            # Expandir hacia la derecha
            pad_left = 0
            pad_right = total_pad_width
            pad_top = total_pad_height // 2
            pad_bottom = total_pad_height - pad_top
        elif expansion_direction == "top_left":
            # Expandir hacia arriba e izquierda
            pad_left = total_pad_width
            pad_right = 0
            pad_top = total_pad_height
            pad_bottom = 0
        elif expansion_direction == "top_right":
            # Expandir hacia arriba y derecha
            pad_left = 0
            pad_right = total_pad_width
            pad_top = total_pad_height
            pad_bottom = 0
        elif expansion_direction == "bottom_left":
            # Expandir hacia abajo e izquierda
            pad_left = total_pad_width
            pad_right = 0
            pad_top = 0
            pad_bottom = total_pad_height
        elif expansion_direction == "bottom_right":
            # Expandir hacia abajo y derecha
            pad_left = 0
            pad_right = total_pad_width
            pad_top = 0
            pad_bottom = total_pad_height
        
        # Convertir la imagen redimensionada de vuelta a tensor
        result_array = np.array(resized_image).astype(np.float32) / 255.0
        result_tensor = torch.from_numpy(result_array)
        
        # Agregar dimensión de batch si es necesario
        if len(image.shape) == 4:
            result_tensor = result_tensor.unsqueeze(0)
        
        return (result_tensor, pad_top, pad_bottom, pad_left, pad_right)

NODE_CLASS_MAPPINGS = {
    "Charly FitToAspectNode": Charly_FitToAspectNode,
}

NODE_DISPLAY_NAME_MAPPINGS = {
    "Charly FitToAspectNode": "Fit To Aspect Node",
} 