import torch
import numpy as np
from PIL import Image
import io
import base64
import sys # For fallback print
from typing import Optional, List, Union, Tuple, TypeAlias, TextIO
import logging
import re

# --- Type Aliases ---
PilImageT: TypeAlias = Image.Image

logger = logging.getLogger(__name__)

SUCCESS_HIGHLIGHT = 25
def ensure_utf8_friendly(text_input: str) -> str:
    """
    Ensures the input string is UTF-8 friendly by encoding and then
    decoding with error replacement.
    Args:
        text_input: The string to process.
    Returns:
        A UTF-8 friendly version of the string.
    """
    if not isinstance(text_input, str):
        text_input = str(text_input)
    try:
        # Encode to bytes using UTF-8, replacing errors, then decode back to string
        return text_input.encode('utf-8', errors='replace').decode('utf-8')
    except Exception as e:
        logger.error(f"Error during UTF-8 conversion for input '{text_input[:100]}...': {e}", exc_info=True)
        # Fallback: return original string if conversion fails catastrophically (should be rare with 'replace')
        return text_input

def sanitize_filename(filename: str, max_length: int = 200) -> str:
    """
    Sanitizes a string to be safe for use as a filename.
    Removes invalid characters and truncates to a maximum length.
    """
    if not isinstance(filename, str):
        logger.warning(f"sanitize_filename received non-string input: {type(filename)}. Converting to string.")
        filename = str(filename)

    # Replace invalid characters with an underscore
    invalid_chars = r'[<>:"/\\|?*\x00-\x1f]' # Windows invalid chars + control chars
    sanitized = re.sub(invalid_chars, '_', filename)

    # Remove leading/trailing spaces and periods (invalid on Windows)
    sanitized = sanitized.strip(' .')

    # Truncate if too long
    if len(sanitized) > max_length:
        sanitized = sanitized[:max_length]
        logger.warning(f"Filename truncated to {max_length} characters: {sanitized}...")

    return sanitized

def tensor_to_pil(tensor: Optional[torch.Tensor]) -> Optional[PilImageT]:
    """
    Converts a ComfyUI IMAGE tensor (Batch, Height, Width, Channel) to a single PIL Image.

    Takes the first image from the batch (index 0). Assumes input tensor values
    are floats in the range [0.0, 1.0] and converts them to uint8 [0, 255].
    Handles basic dimension and dtype validation.

    Args:
        tensor (Optional[torch.Tensor]): The input torch tensor, expected shape
                                         [B, H, W, C] and dtype float32/64, or None.

    Returns:
        Optional[PilImageT]: A PIL.Image object on success, None if conversion fails
                             or input is None/invalid.
    """
    if tensor is None:
        logger.debug("tensor_to_pil received None input.")
        return None
    if not isinstance(tensor, torch.Tensor):
        logger.error(f"Input is not a torch.Tensor, but {type(tensor)}. Cannot convert to PIL.")
        return None
    # Handle both 4D (B, H, W, C) and 3D (H, W, C) tensors
    if tensor.ndim == 4:
        if tensor.shape[0] == 0: # Check if batch dimension is empty
            logger.error("Input tensor batch dimension is empty (size 0).")
            return None
        # Get first image from batch
        img_tensor_slice = tensor[0].detach().cpu()
        logger.debug(f"Processing 4D tensor slice with shape: {img_tensor_slice.shape} and dtype: {img_tensor_slice.dtype}")
    elif tensor.ndim == 3:
        # Use 3D tensor directly (single image)
        img_tensor_slice = tensor.detach().cpu()
        logger.debug(f"Processing 3D tensor slice with shape: {img_tensor_slice.shape} and dtype: {img_tensor_slice.dtype}")
    else:
        logger.error(f"Input tensor has incorrect dimensions ({tensor.ndim}). Expected 3 (H, W, C) or 4 (B, H, W, C).")
        return None

    try:
        # Convert to numpy array
        img_np: np.ndarray = img_tensor_slice.numpy()

        # Convert to numpy array
        img_np: np.ndarray = img_tensor_slice.numpy()

        # Handle dtype conversion and range scaling (common case: float [0,1] -> uint8 [0,255])
        if img_np.dtype == np.float32 or img_np.dtype == np.float64:
            if np.min(img_np) < 0.0 or np.max(img_np) > 1.0:
                 logger.warning(f"Input float tensor values outside expected [0, 1] range (min: {np.min(img_np):.2f}, max: {np.max(img_np):.2f}). Clipping.")
            img_np = np.clip(img_np * 255.0, 0, 255).astype(np.uint8)
            logger.debug("Converted float tensor to uint8 [0, 255].")
        elif img_np.dtype == np.uint8:
            logger.debug("Input tensor is already uint8.")
            # Pass through if already uint8
        else:
            logger.warning(f"Unexpected tensor dtype {img_np.dtype}. Attempting direct cast to uint8. Data range might be incorrect.")
            try:
                # Attempt direct cast, but be aware this might not be visually correct
                # if the original range wasn't [0, 255]
                img_np = img_np.astype(np.uint8)
            except ValueError as e:
                 logger.error(f"Error converting tensor dtype {img_np.dtype} to uint8: {e}", exc_info=True)
                 return None # Cannot proceed if cast fails

        # Handle channel dimension for PIL (expecting H, W, C or H, W)
        if img_np.ndim == 3 and img_np.shape[2] == 1: # Grayscale image with channel dim
            img_np = np.squeeze(img_np, axis=2) # Convert [H, W, 1] to [H, W]
            logger.debug("Squeezed grayscale tensor [H, W, 1] to [H, W] for PIL.")
        elif img_np.ndim == 2: # Grayscale [H, W]
            logger.debug("Input is 2D grayscale numpy array.")
        elif img_np.ndim == 3 and img_np.shape[2] in [3, 4]: # RGB or RGBA [H, W, C]
            logger.debug(f"Input is {img_np.shape[2]}-channel numpy array.")
        else:
             logger.error(f"Unsupported numpy array shape for PIL conversion: {img_np.shape}. Expected HWC with C=1,3,4 or HW.")
             return None

        # Convert numpy array to PIL Image
        pil_image: PilImageT = Image.fromarray(img_np)
        logger.debug(f"Successfully converted tensor slice to PIL Image (mode: {pil_image.mode}).")
        return pil_image

    except IndexError:
        # This case should be caught by the initial batch size check, but included for safety
        logger.error("IndexError: Input tensor batch dimension might be empty (should have been caught earlier).")
        return None
    except Exception as e:
        logger.error(f"Unexpected error converting tensor slice to PIL Image: {e}", exc_info=True)
        return None