import os
import time
import numpy as np
import torch
import torch.nn.functional as F
from torchvision.transforms.functional import affine
from torchvision.transforms import InterpolationMode

# --- UpScaler ComfyUI ---
try:
    from comfy_extras.nodes_upscale_model import ImageUpscaleWithModel
    UPSCALER_OK = True
except ImportError:
    print("[Orion4D EnsembleUpscaler] Avertissement : ImageUpscaleWithModel introuvable (comfy_extras).")
    UPSCALER_OK = False


# ==============================
# Utils
# ==============================
def _interp_mode(name: str) -> InterpolationMode:
    name = (name or "BICUBIC").upper()
    if name == "NEAREST":
        return InterpolationMode.NEAREST
    if name == "BICUBIC":
        return InterpolationMode.BICUBIC
    return InterpolationMode.BILINEAR


def _apply_affine_translate_bchw(x_bchw: torch.Tensor, tx: float, ty: float,
                                 interp: InterpolationMode,
                                 bicubic_fallback_flag: dict) -> torch.Tensor:
    """Translation (tx, ty) en pixels. Fallback auto BILINEAR si BICUBIC non supporté."""
    def _do(tensor, mode):
        out = []
        for t in tensor:
            out.append(affine(
                t, angle=0.0, translate=[tx, ty],
                scale=1.0, shear=[0.0, 0.0],
                interpolation=mode
            ))
        return torch.stack(out, dim=0)

    try:
        return _do(x_bchw, interp)
    except Exception:
        if interp == InterpolationMode.BICUBIC:
            if not bicubic_fallback_flag.get("warned", False):
                print("[Orion4D EnsembleUpscaler] BICUBIC non supporté sur Tensor — fallback en BILINEAR.")
                bicubic_fallback_flag["warned"] = True
            return _do(x_bchw, InterpolationMode.BILINEAR)
        raise


def _flip_bchw(x_bchw: torch.Tensor, flip_h: bool, flip_v: bool) -> torch.Tensor:
    if flip_h:
        x_bchw = torch.flip(x_bchw, dims=[3])
    if flip_v:
        x_bchw = torch.flip(x_bchw, dims=[2])
    return x_bchw


def _unsharp_mask_bchw(x_bchw: torch.Tensor, amount: float = 0.25) -> torch.Tensor:
    """Accentuation non-clampée pour préserver la dynamique."""
    if amount <= 0:
        return x_bchw
    kernel = torch.ones((1, 1, 3, 3), device=x_bchw.device, dtype=x_bchw.dtype) / 9.0
    c = x_bchw.shape[1]
    kernel = kernel.repeat(c, 1, 1, 1)
    pad = (1, 1, 1, 1)
    blur = F.conv2d(F.pad(x_bchw, pad, mode="reflect"), kernel, groups=c)
    sharpened = x_bchw + amount * (x_bchw - blur)
    # On ne clamp PAS ici pour conserver les informations > 1.0 (hautes lumières)
    return sharpened

# --- Utilitaires (optionnels) pour l'Espace Couleur Linéaire ---
# Pour un workflow avancé, on peut convertir l'image en espace linéaire avant traitement.
# def srgb_to_linear(img_tensor):
#     return torch.pow(img_tensor.clamp(0.0, 1.0), 2.2)
# def linear_to_srgb(img_tensor):
#     return torch.pow(img_tensor.clamp(0.0, 1.0), 1.0/2.2)


# ==============================
# Node 1 — EnsembleSuperRes (Pixel-Shift) - AMÉLIORÉ
# ==============================
class EnsembleSuperRes:
    """
    Pixel-Shift Ensemble SR avec sorties haute-dynamique.
      - Travaille en float32 pour préserver la précision.
      - Unsharp non-clampé pour garder les détails dans les extrêmes.
      - Sorties :
        - image_float : Master 32-bit float, non-clampé (pour format HDR).
        - image_clamped : Image standardisée (0-1) pour workflows classiques.
        - image16 : Image quantifiée 16-bit.
    """

    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "image": ("IMAGE",),
                "upscale_model": ("UPSCALE_MODEL",),
                "use_flip": ("BOOLEAN", {"default": True}),
                "use_translate": ("BOOLEAN", {"default": True}),
                "randomize_each_run": ("BOOLEAN", {"default": False}),
                "use_unsharp": ("BOOLEAN", {"default": True}),
                "passes": ("INT", {"default": 8, "min": 2, "max": 32}),
                "translate_pixels": ("FLOAT", {"default": 1.5, "min": 0.0, "max": 8.0, "step": 0.25}),
                "interpolation": (["BICUBIC", "BILINEAR", "NEAREST"],),
                "fusion_mode": (["MIX", "MEAN", "MEDIAN"],),
                "mix_weight": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.05}),
                "seed": ("INT", {"default": 0, "min": 0, "max": 2**31 - 1}),
                "unsharp_amount": ("FLOAT", {"default": 0.3, "min": 0.0, "max": 2.0, "step": 0.05}),
            }
        }

    RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE")
    RETURN_NAMES = ("image_float", "image_clamped", "image16")
    FUNCTION = "process"
    CATEGORY = "Pixel-Shift"

    def _rng(self, seed: int, randomize: bool) -> torch.Generator:
        s = seed
        if randomize:
            s = (seed ^ torch.seed()) & 0x7FFFFFFF
        g = torch.Generator()
        g.manual_seed(int(s))
        return g

    def _u(self, g: torch.Generator, lo: float, hi: float, device) -> float:
        return float(torch.empty((), device=device).uniform_(lo, hi, generator=g).item())

    def process(self, image, upscale_model, use_flip, use_translate, randomize_each_run,
                use_unsharp, passes, translate_pixels, interpolation, fusion_mode,
                mix_weight, seed, unsharp_amount):
        if not UPSCALER_OK:
            raise Exception("ImageUpscaleWithModel indisponible (comfy_extras).")

        assert image.dim() == 4 and image.shape[-1] in (3, 4), "Image BHWC attendue (RGB ou RGBA)."
        original_dtype = image.dtype

        upscaler = ImageUpscaleWithModel()
        g = self._rng(seed, randomize_each_run)
        interp = _interp_mode(interpolation)
        bicubic_fallback_flag = {"warned": False}

        # On travaille en float32 pour la précision maximale
        base_bchw = image.permute(0, 3, 1, 2).to(dtype=torch.float32).contiguous()
        passes_out = []

        for _ in range(passes):
            tx = self._u(g, -translate_pixels, translate_pixels, base_bchw.device) if use_translate else 0.0
            ty = self._u(g, -translate_pixels, translate_pixels, base_bchw.device) if use_translate else 0.0
            flip_h = bool(int(self._u(g, 0.0, 1.9999, base_bchw.device))) if use_flip else False
            flip_v = bool(int(self._u(g, 0.0, 1.9999, base_bchw.device))) if use_flip else False
            
            varied = base_bchw
            if tx != 0.0 or ty != 0.0:
                varied = _apply_affine_translate_bchw(varied, tx, ty, interp, bicubic_fallback_flag)
            if flip_h or flip_v:
                varied = _flip_bchw(varied, flip_h, flip_v)

            in_h, in_w = varied.shape[2], varied.shape[3]
            up_bhwc = upscaler.upscale(upscale_model, varied.permute(0, 2, 3, 1))[0]
            up_bchw = up_bhwc.permute(0, 3, 1, 2).to(dtype=torch.float32).contiguous()

            out_h, out_w = up_bchw.shape[2], up_bchw.shape[3]
            sx = out_w / float(in_w)
            sy = out_h / float(in_h)

            restored = up_bchw
            if flip_h or flip_v:
                restored = _flip_bchw(restored, flip_h, flip_v)
            if tx != 0.0 or ty != 0.0:
                restored = _apply_affine_translate_bchw(restored, -tx * sx, -ty * sy, interp, bicubic_fallback_flag)
            passes_out.append(restored)

        stack = torch.stack(passes_out, dim=0)
        
        f = fusion_mode.upper()
        if f == "MEAN": fused = stack.mean(dim=0)
        elif f == "MEDIAN": fused, _ = torch.median(stack, dim=0)
        else:
            mean = stack.mean(dim=0)
            median, _ = torch.median(stack, dim=0)
            fused = (1.0 - float(mix_weight)) * mean + float(mix_weight) * median

        if use_unsharp and unsharp_amount > 0:
            fused = _unsharp_mask_bchw(fused, amount=float(unsharp_amount))

        # Sortie 1: Master float32, non-clampé, pour une flexibilité maximale
        out_img_float32 = fused.permute(0, 2, 3, 1).contiguous()

        # Sortie 2: Version clampée pour compatibilité avec les nœuds standards
        out_img_clamped = torch.clamp(out_img_float32, 0.0, 1.0).to(dtype=original_dtype)

        # Sortie 3: Version quantifiée 16 bits (depuis la version clampée)
        q = (out_img_clamped * 65535.0).round() / 65535.0
        out_img16 = q.to(dtype=original_dtype).contiguous()

        return (out_img_float32, out_img_clamped, out_img16)


# ==============================
# Node 2 — SaveImageAdvanced
# ==============================
class SaveImageAdvanced:
    """
    Sauvegarde d'images en formats standards (PNG 16-bit) et HDR (TIFF 32-bit Float, EXR).
    Gère intelligemment la conversion de type en fonction du format de sortie demandé.
    """

    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "images": ("IMAGE",),
                "filename_prefix": ("STRING", {"default": "ComfyUI"}),
                "subfolder": ("STRING", {"default": "AdvancedOutput"}),
                "format": (["PNG", "TIFF", "TIFF_32F", "EXR"], {"default": "PNG"}),
                "compression": ("INT", {"default": 6, "min": 0, "max": 9, "step": 1, "tooltip": "Niveau de compression pour PNG"}),
                "force_opaque_alpha": ("BOOLEAN", {"default": True, "tooltip": "Pour les formats non-float, force l'alpha à 100% pour éviter les aperçus noirs"}),
                "drop_alpha_on_cv2": ("BOOLEAN", {"default": True, "tooltip": "Si OpenCV est utilisé en fallback, supprime l'alpha (recommandé)"}),
            }
        }

    RETURN_TYPES = ("STRING",)
    RETURN_NAMES = ("paths",)
    FUNCTION = "save"
    OUTPUT_NODE = True
    CATEGORY = "Image/IO/Orion4D"

    def _ensure_outdir(self, subfolder: str) -> str:
        try:
            from folder_paths import get_output_directory
            root = get_output_directory()
        except Exception:
            root = os.path.join(os.getcwd(), "output")
        outdir = os.path.join(root, subfolder) if subfolder else root
        os.makedirs(outdir, exist_ok=True)
        return outdir

    def _save_with_imageio_16b(self, path: str, arr16: np.ndarray, fmt: str, compression: int):
        import imageio.v3 as iio
        if fmt == "PNG": iio.imwrite(path, arr16, compress_level=int(compression))
        else: iio.imwrite(path, arr16, compression="lzw")

    def _save_with_tifffile(self, path: str, arr: np.ndarray):
        import tifffile
        compression = 'deflate' if arr.dtype == np.float32 else 'lzw'
        tifffile.imwrite(path, arr, compression=compression)

    def _save_with_cv2_png_16b(self, path: str, arr16: np.ndarray, compression: int, drop_alpha: bool):
        import cv2
        out = arr16
        if out.shape[2] == 4:
            if drop_alpha: out = out[:, :, :3]
            else: raise RuntimeError("cv2 ne gère pas RGBA 16b. Activez 'drop_alpha_on_cv2'.")
        out_bgr = out[..., ::-1].copy()
        ok = cv2.imwrite(path, out_bgr, [cv2.IMWRITE_PNG_COMPRESSION, int(compression)])
        if not ok: raise RuntimeError("cv2.imwrite a échoué.")

    def _save_with_cv2_exr_32f(self, path: str, arr32f: np.ndarray):
        import cv2
        if arr32f.shape[2] == 4:
            # OpenCV EXR supporte l'alpha
            pass
        out_bgr = arr32f[..., ::-1].copy()
        ok = cv2.imwrite(path, out_bgr)
        if not ok: raise RuntimeError("cv2.imwrite (EXR) a échoué.")

    def _try_save(self, path: str, arr: np.ndarray, fmt: str, compression: int, force_opaque_alpha: bool, drop_alpha: bool):
        # --- HDR / FLOAT FORMATS ---
        if fmt in ["TIFF_32F", "EXR"]:
            arr32f = arr.astype(np.float32)
            if fmt == "TIFF_32F":
                try:
                    import tifffile
                    self._save_with_tifffile(path, arr32f)
                    print(f"[SaveAdvanced] Image 32-bit float sauvegardée avec tifffile : {path}")
                    return
                except ImportError: raise RuntimeError("Format TIFF_32F requiert 'tifffile'. Installez-le avec : pip install tifffile")
            if fmt == "EXR":
                try:
                    import cv2
                    self._save_with_cv2_exr_32f(path, arr32f)
                    print(f"[SaveAdvanced] Image EXR sauvegardée avec OpenCV : {path}")
                    return
                except ImportError: raise RuntimeError("Format EXR requiert 'opencv-python'. Installez-le avec : pip install opencv-python-headless")
            return

        # --- LDR / INTEGER FORMATS (16-bit) ---
        arr16 = np.clip(arr * 65535.0 + 0.5, 0, 65535).astype(np.uint16)
        if force_opaque_alpha and arr16.shape[2] == 4:
            arr16[:, :, 3] = 65535

        # 1. imageio (meilleur pour PNG/TIFF 16b)
        try:
            import imageio.v3
            self._save_with_imageio_16b(path, arr16, fmt, compression)
            print(f"[SaveAdvanced] Image 16-bit sauvegardée avec imageio : {path}")
            return
        except Exception: pass

        # 2. tifffile (fallback pour TIFF 16b)
        if fmt == "TIFF":
            try:
                import tifffile
                self._save_with_tifffile(path, arr16)
                print(f"[SaveAdvanced] Image 16-bit sauvegardée avec tifffile : {path}")
                return
            except Exception: pass
        
        # 3. OpenCV (fallback pour PNG 16b)
        if fmt == "PNG":
            try:
                import cv2
                self._save_with_cv2_png_16b(path, arr16, compression, drop_alpha)
                print(f"[SaveAdvanced] Image 16-bit sauvegardée avec OpenCV : {path}")
                return
            except Exception: pass
        
        # 4. Fallback 8-bit ultime pour ne jamais perdre le travail
        try:
            from PIL import Image
            arr8 = (arr16 / 257.0).astype(np.uint8)
            mode = "RGBA" if arr8.shape[2] == 4 else "RGB"
            alt_path = os.path.splitext(path)[0] + "_8bit_fallback.png"
            print(f"[SaveAdvanced] ATTENTION: Sauvegarde 16-bit échouée. Fallback 8-bit : {alt_path}")
            print(" -> Pour une sauvegarde 16-bit/HDR, installez 'imageio', 'tifffile', 'opencv-python'")
            Image.fromarray(arr8, mode=mode).save(alt_path)
        except Exception as e:
            raise RuntimeError(f"Échec total de la sauvegarde. Installez 'imageio'. Erreur finale: {e}")

    def save(self, images, filename_prefix, subfolder, format, compression, force_opaque_alpha, drop_alpha_on_cv2):
        outdir = self._ensure_outdir(subfolder)
        ts = int(time.time())
        ext_map = {"PNG": ".png", "TIFF": ".tiff", "TIFF_32F": ".tiff", "EXR": ".exr"}
        ext = ext_map[format]
        paths = []
        for i in range(images.shape[0]):
            arr = images[i].detach().cpu().numpy()
            path = os.path.join(outdir, f"{filename_prefix}_{ts}_{i:03d}{ext}")
            self._try_save(path, arr, format, int(compression), bool(force_opaque_alpha), bool(drop_alpha_on_cv2))
            paths.append(path)
        return ("\n".join(paths),)


# ==============================
# Mapping pour ComfyUI
# ==============================
NODE_CLASS_MAPPINGS = {
    "EnsembleSuperRes_Orion4D": EnsembleSuperRes,
    "SaveImageAdvanced_Orion4D": SaveImageAdvanced,
}

NODE_DISPLAY_NAME_MAPPINGS = {
    "EnsembleSuperRes_Orion4D": "Ensemble Super-Resolution (HDR)",
    "SaveImageAdvanced_Orion4D": "Save Image Advanced (16b/32f)",
}