"""
Image Utilities Module

This module provides utilities for converting between ComfyUI image tensors
and base64 encoded images required by the Reve API, with support for different
formats, size validation, and quality preservation.
"""

import io
import base64
import logging
from typing import List, Tuple, Optional, Union
from PIL import Image, ImageOps
import numpy as np

try:
    import torch
except ImportError:
    raise ImportError("PyTorch is required. This should be available in ComfyUI environment.")

# Configure logging
logger = logging.getLogger(__name__)


class ImageConversionError(Exception):
    """Exception raised for image conversion errors"""
    pass


def tensor_to_pil(tensor: torch.Tensor) -> Image.Image:
    """
    Convert ComfyUI image tensor to PIL Image.
    
    Args:
        tensor: ComfyUI image tensor (B, H, W, C) with values in [0, 1]
        
    Returns:
        PIL Image object
        
    Raises:
        ImageConversionError: If tensor format is invalid
    """
    try:
        # Handle batch dimension - take first image if batch
        if tensor.dim() == 4:
            tensor = tensor[0]
        elif tensor.dim() != 3:
            raise ImageConversionError(f"Expected 3D or 4D tensor, got {tensor.dim()}D")
        
        # Ensure tensor is in correct format (H, W, C)
        if tensor.shape[-1] not in [1, 3, 4]:
            raise ImageConversionError(f"Expected 1, 3, or 4 channels, got {tensor.shape[-1]}")
        
        # Convert to numpy and scale to [0, 255]
        numpy_image = tensor.cpu().numpy()
        numpy_image = np.clip(numpy_image * 255.0, 0, 255).astype(np.uint8)
        
        # Convert to PIL Image
        if numpy_image.shape[-1] == 1:
            # Grayscale
            pil_image = Image.fromarray(numpy_image.squeeze(-1), mode='L')
        elif numpy_image.shape[-1] == 3:
            # RGB
            pil_image = Image.fromarray(numpy_image, mode='RGB')
        elif numpy_image.shape[-1] == 4:
            # RGBA
            pil_image = Image.fromarray(numpy_image, mode='RGBA')
        else:
            raise ImageConversionError(f"Unsupported channel count: {numpy_image.shape[-1]}")
        
        return pil_image
        
    except Exception as e:
        raise ImageConversionError(f"Failed to convert tensor to PIL: {str(e)}")


def pil_to_tensor(pil_image: Image.Image) -> torch.Tensor:
    """
    Convert PIL Image to ComfyUI image tensor.
    
    Args:
        pil_image: PIL Image object
        
    Returns:
        ComfyUI image tensor (1, H, W, C) with values in [0, 1]
        
    Raises:
        ImageConversionError: If image format is invalid
    """
    try:
        # Convert to RGB if necessary
        if pil_image.mode not in ['RGB', 'RGBA', 'L']:
            pil_image = pil_image.convert('RGB')
        
        # Convert to numpy array
        numpy_image = np.array(pil_image)
        
        # Handle grayscale
        if len(numpy_image.shape) == 2:
            numpy_image = numpy_image[:, :, np.newaxis]
        
        # Normalize to [0, 1] and convert to float32
        numpy_image = numpy_image.astype(np.float32) / 255.0
        
        # Convert to tensor and add batch dimension
        tensor = torch.from_numpy(numpy_image).unsqueeze(0)
        
        return tensor
        
    except Exception as e:
        raise ImageConversionError(f"Failed to convert PIL to tensor: {str(e)}")


def tensor_to_base64(tensor: torch.Tensor, format: str = "PNG", quality: int = 95) -> str:
    """
    Convert ComfyUI image tensor to base64 encoded string.
    
    Args:
        tensor: ComfyUI image tensor
        format: Output format (PNG, JPEG, WEBP)
        quality: JPEG/WEBP quality (1-100)
        
    Returns:
        Base64 encoded image string
        
    Raises:
        ImageConversionError: If conversion fails
    """
    try:
        # Convert tensor to PIL
        pil_image = tensor_to_pil(tensor)
        
        # Convert to RGB for JPEG (no transparency support)
        if format.upper() == "JPEG" and pil_image.mode in ['RGBA', 'LA']:
            # Create white background
            background = Image.new('RGB', pil_image.size, (255, 255, 255))
            if pil_image.mode == 'RGBA':
                background.paste(pil_image, mask=pil_image.split()[-1])
            else:
                background.paste(pil_image)
            pil_image = background
        
        # Save to bytes buffer
        buffer = io.BytesIO()
        save_kwargs = {"format": format.upper()}
        
        if format.upper() in ["JPEG", "WEBP"]:
            save_kwargs["quality"] = quality
            save_kwargs["optimize"] = True
        
        pil_image.save(buffer, **save_kwargs)
        
        # Encode to base64
        buffer.seek(0)
        base64_string = base64.b64encode(buffer.getvalue()).decode('utf-8')
        
        return base64_string
        
    except Exception as e:
        raise ImageConversionError(f"Failed to convert tensor to base64: {str(e)}")


def base64_to_tensor(base64_string: str) -> torch.Tensor:
    """
    Convert base64 encoded image string to ComfyUI image tensor.
    
    Args:
        base64_string: Base64 encoded image
        
    Returns:
        ComfyUI image tensor
        
    Raises:
        ImageConversionError: If conversion fails
    """
    try:
        # Decode base64
        image_bytes = base64.b64decode(base64_string)
        
        # Load as PIL Image
        buffer = io.BytesIO(image_bytes)
        pil_image = Image.open(buffer)
        
        # Convert to tensor
        tensor = pil_to_tensor(pil_image)
        
        return tensor
        
    except Exception as e:
        raise ImageConversionError(f"Failed to convert base64 to tensor: {str(e)}")


def validate_image_size(tensor: torch.Tensor, max_size_mb: float = 10.0) -> bool:
    """
    Validate image tensor size for API upload.
    
    Args:
        tensor: ComfyUI image tensor
        max_size_mb: Maximum size in megabytes
        
    Returns:
        True if size is acceptable, False otherwise
    """
    try:
        # Convert to base64 to get actual upload size
        base64_string = tensor_to_base64(tensor, format="PNG")
        size_bytes = len(base64_string.encode('utf-8'))
        size_mb = size_bytes / (1024 * 1024)
        
        return size_mb <= max_size_mb
        
    except Exception:
        return False


def resize_image_if_needed(tensor: torch.Tensor, max_dimension: int = 2048) -> torch.Tensor:
    """
    Resize image tensor if it exceeds maximum dimensions.
    
    Args:
        tensor: ComfyUI image tensor
        max_dimension: Maximum width or height
        
    Returns:
        Resized tensor if needed, original tensor otherwise
    """
    try:
        # Get dimensions (handle batch dimension)
        if tensor.dim() == 4:
            _, height, width, _ = tensor.shape
        else:
            height, width, _ = tensor.shape
        
        # Check if resize is needed
        if height <= max_dimension and width <= max_dimension:
            return tensor
        
        # Calculate new dimensions maintaining aspect ratio
        if height > width:
            new_height = max_dimension
            new_width = int(width * (max_dimension / height))
        else:
            new_width = max_dimension
            new_height = int(height * (max_dimension / width))
        
        # Convert to PIL, resize, and convert back
        pil_image = tensor_to_pil(tensor)
        pil_image = pil_image.resize((new_width, new_height), Image.Resampling.LANCZOS)
        resized_tensor = pil_to_tensor(pil_image)
        
        logger.info(f"Resized image from {width}x{height} to {new_width}x{new_height}")
        return resized_tensor
        
    except Exception as e:
        logger.warning(f"Failed to resize image: {str(e)}")
        return tensor


def prepare_image_for_api(tensor: torch.Tensor, format: str = "PNG", 
                         max_size_mb: float = 10.0, max_dimension: int = 2048) -> str:
    """
    Prepare ComfyUI image tensor for Reve API upload.
    
    Handles resizing, format conversion, and size validation.
    
    Args:
        tensor: ComfyUI image tensor
        format: Output format (PNG, JPEG, WEBP)
        max_size_mb: Maximum file size in MB
        max_dimension: Maximum width/height
        
    Returns:
        Base64 encoded image string ready for API
        
    Raises:
        ImageConversionError: If preparation fails
    """
    try:
        # Resize if needed
        processed_tensor = resize_image_if_needed(tensor, max_dimension)
        
        # Convert to base64
        base64_string = tensor_to_base64(processed_tensor, format)
        
        # Validate size
        size_bytes = len(base64_string.encode('utf-8'))
        size_mb = size_bytes / (1024 * 1024)
        
        if size_mb > max_size_mb:
            # Try JPEG with lower quality if PNG is too large
            if format.upper() == "PNG":
                logger.warning(f"PNG too large ({size_mb:.1f}MB), trying JPEG")
                base64_string = tensor_to_base64(processed_tensor, "JPEG", quality=85)
                size_bytes = len(base64_string.encode('utf-8'))
                size_mb = size_bytes / (1024 * 1024)
                
                if size_mb > max_size_mb:
                    raise ImageConversionError(f"Image too large even after compression: {size_mb:.1f}MB > {max_size_mb}MB")
        
        logger.debug(f"Prepared image: {size_mb:.1f}MB, format: {format}")
        return base64_string
        
    except Exception as e:
        raise ImageConversionError(f"Failed to prepare image for API: {str(e)}")


def prepare_multiple_images_for_api(tensors: List[torch.Tensor], format: str = "PNG",
                                   max_size_mb_each: float = 5.0) -> List[str]:
    """
    Prepare multiple ComfyUI image tensors for Reve API (Remix operation).
    
    Args:
        tensors: List of ComfyUI image tensors
        format: Output format
        max_size_mb_each: Maximum size per image in MB
        
    Returns:
        List of base64 encoded image strings
        
    Raises:
        ImageConversionError: If preparation fails
    """
    if not tensors or len(tensors) > 4:
        raise ImageConversionError("Must provide 1-4 images for remix operation")
    
    base64_images = []
    for i, tensor in enumerate(tensors):
        try:
            base64_string = prepare_image_for_api(
                tensor, format, max_size_mb_each, max_dimension=1536  # Smaller for multiple images
            )
            base64_images.append(base64_string)
        except Exception as e:
            raise ImageConversionError(f"Failed to prepare image {i+1}: {str(e)}")
    
    return base64_images
