# foley_audio.py
import os
import sys
from typing import List, Tuple
import math
import torch
import numpy as np

# --- repo root so we can import the package -------------------------------
HERE = os.path.dirname(__file__)
HVF_ROOT = os.path.join(HERE, "HunyuanVideo-Foley")  # contains: hunyuanvideo_foley/, configs/, HunyuanVideo-Foley/
if HVF_ROOT not in sys.path:
    sys.path.append(HVF_ROOT)

# HunyuanVideo-Foley internals
from hunyuanvideo_foley.utils.model_utils import denoise_process  # type: ignore
# Import the functions from utils.py located in the same directory as foley_audio.py
from .utils import feature_process_from_images, load_model  # type: ignore

# --------------------------------------------------------------------------
# Small cache so we don't reload weights every node execution
# --------------------------------------------------------------------------
class _FoleyState:
    model_key: Tuple[str, str, str, int] | None = None
    model_dict = None
    cfg = None
    device = None


_STATE = _FoleyState()


def _select_device(device: str = "cuda", gpu_id: int = 0) -> torch.device:
    device = (device or "cuda").lower()
    if device == "cuda":
        if torch.cuda.is_available():
            return torch.device(f"cuda:{int(gpu_id)}")
        # fall back but keep type consistent
        return torch.device("cpu")
    if device == "mps" and getattr(torch.backends, "mps", None) and torch.backends.mps.is_available():
        return torch.device("mps")
    if device == "cpu":
        return torch.device("cpu")
    # "auto" or unknown -> prefer CUDA if present
    return torch.device(f"cuda:{int(gpu_id)}") if torch.cuda.is_available() else torch.device("cpu")


def _ensure_model_loaded(model_path_dir: str, config_path: str, device: str = "cuda", gpu_id: int = 0):
    """
    Load (or reuse) the HunyuanVideo-Foley model bundle on a concrete device.
    """
    torch_device = _select_device(device, gpu_id)

    global _STATE
    key = (os.path.abspath(model_path_dir), os.path.abspath(config_path), str(torch_device), int(gpu_id))
    if _STATE.model_key == key and _STATE.model_dict is not None:
        return _STATE.model_dict, _STATE.cfg

    # This line loads everything to the GPU initially
    model_dict, cfg = load_model(model_path_dir, config_path, torch_device)

    # --- ADD THIS OFFLOADING LOGIC ---
    print("Hunyuan Foley: Offloading feature encoders to CPU to save VRAM.")
    model_dict.siglip2_model.to("cpu")
    model_dict.clap_model.to("cpu")
    model_dict.syncformer_model.to("cpu")
    # Keep foley_model and dac_model on the target device for now
    # ---------------------------------

    _STATE.model_key = key
    _STATE.model_dict = model_dict
    _STATE.cfg = cfg
    _STATE.device = torch_device
    return model_dict, cfg


def _images_tensor_to_uint8_list(images: torch.Tensor) -> List[np.ndarray]:
    """
    Converts a ComfyUI IMAGE tensor to a list of HxWx3 uint8 RGB frames.

    Expect shapes:
      - [N, H, W, C] float32 (0..1)  OR
      - [H, W, C] float32 (0..1)    (single frame)
    """
    if images.ndim == 3:
        images = images.unsqueeze(0)  # [1, H, W, C]
    if images.ndim != 4:
        raise ValueError(f"Expected 4D tensor [N,H,W,C], got {tuple(images.shape)}")

    imgs = (images.clamp(0.0, 1.0) * 255.0).round().to(torch.uint8).cpu().numpy()  # [N,H,W,C]
    out = [imgs[i, :, :, :3] for i in range(imgs.shape[0])]  # drop alpha if present
    return out


class HunyuanFoleyAudio:
    """
    Generate Foley audio from a sequence of frames + an audio prompt.

    Returns an AUDIO object compatible with VHS Video Combine's 'audio' input:
      {'waveform': torch.float32 [1, C, S], 'sample_rate': int}
    """

    @classmethod
    def INPUT_TYPES(cls):
        model_dir_default = os.path.join(HVF_ROOT, "HunyuanVideo-Foley")
        config_path_default = os.path.join(HVF_ROOT, "configs", "hunyuanvideo-foley-xxl.yaml")

        return {
            "required": {
                "images": ("IMAGE",),
                "frame_rate": ("FLOAT", {"default": 8.0, "min": 1.0, "max": 120.0, "step": 1.0}),
                "audio_prompt": ("STRING", {
                    "multiline": True,
                    "default": "suspenseful piano with rising tension; two-hit sting (ta-dum)"
                }),
                "model_path_dir": ("STRING", {"default": model_dir_default}),
                "config_path": ("STRING", {"default": config_path_default}),
                "num_inference_steps": ("INT", {"default": 10, "min": 1, "max": 200, "step": 1}),
                "guidance_scale": ("FLOAT", {"default": 4.5, "min": 0.0, "max": 20.0, "step": 0.1}),
                "enabled": ("BOOLEAN", {"default": True}),
            },
            "optional": {
                # Give manual control so you can keep it on GPU (default) or swap if needed.
                "device": (["cuda", "cpu", "mps", "auto"], {"default": "cuda"}),
                "gpu_id": ("INT", {"default": 0, "min": 0, "max": 7, "step": 1}),
            },
        }

    RETURN_TYPES = ("AUDIO",)
    RETURN_NAMES = ("audio",)
    FUNCTION = "generate"
    CATEGORY = "Hunyuan Foley"

    # --- mirror infer() -> denoise like the reference CLI -------------------
# In foley_audio.py
    def _infer_from_images(
        self,
        images_uint8: List[np.ndarray],
        prompt: str,
        model_dict,
        cfg,
        guidance_scale: float,
        num_inference_steps: int,
        fps_hint: float,
    ):
        global _STATE
        target_device = _STATE.device
        print(f"[Hunyuan Foley] Verifying models are on target device: {target_device}")

        # --- MEMORY-SAFE MODEL MOVEMENT ---
        # Instead of moving all at once, move them one by one, clearing cache in between.
        # This prevents a VRAM spike if multiple models were offloaded.
        def safe_to_device(model):
            model.to(target_device)
            if target_device.type == 'cuda':
                torch.cuda.empty_cache()

        safe_to_device(model_dict.foley_model)
        safe_to_device(model_dict.dac_model)
        safe_to_device(model_dict.siglip2_model)
        safe_to_device(model_dict.clap_model)
        safe_to_device(model_dict.syncformer_model)
        # --- END OF FIX ---
        
        visual_feats, text_feats, audio_len_in_s = feature_process_from_images(
            images_uint8=images_uint8,
            prompt=prompt,
            model_dict=model_dict,
            cfg=cfg,
            fps_hint=fps_hint,
        )

        print("[Hunyuan Foley] Offloading feature encoders to CPU before denoising.")
        model_dict.siglip2_model.to("cpu")
        model_dict.clap_model.to("cpu")
        model_dict.syncformer_model.to("cpu")

        audio_batch, sample_rate = denoise_process(
            visual_feats,
            text_feats,
            audio_len_in_s,
            model_dict,
            cfg,
            guidance_scale=guidance_scale,
            num_inference_steps=int(num_inference_steps),
        )

        return audio_batch[0], sample_rate
    
    def generate(
        self,
        images: torch.Tensor,
        frame_rate: float,
        audio_prompt: str,
        model_path_dir: str,
        config_path: str,
        num_inference_steps: int,
        guidance_scale: float,
        enabled: bool = True,
        device: str = "cuda",
        gpu_id: int = 0,
    ):
        if not enabled:
            return (None,)

        # Import the ComfyUI memory manager and Python's traceback module
        import comfy.model_management as mm
        import traceback

        # --- THE ROBUST ERROR HANDLING FIX (Your Suggestion) ---
        # The entire logic is now wrapped in a try...except...finally block.
        try:
            # --- AGGRESSIVE VRAM MANAGEMENT ---
            print("Hunyuan Foley: Starting execution, preparing VRAM.")
            mm.unload_all_models()
            
            if torch.cuda.is_available():
                print("Hunyuan Foley: Clearing CUDA cache before model load.")
                torch.cuda.empty_cache()
                
                total_mem = torch.cuda.get_device_properties(0).total_memory / (1024**3)
                reserved_mem = torch.cuda.memory_reserved(0) / (1024**3)
                alloc_mem = torch.cuda.memory_allocated(0) / (1024**3)
                free_mem = total_mem - reserved_mem
                print(f"Hunyuan Foley: VRAM Stats Post-Cleanup -> Total: {total_mem:.2f}GB, Reserved: {reserved_mem:.2f}GB, Free: {free_mem:.2f}GB")
            # --- END VRAM MANAGEMENT ---

            frames = _images_tensor_to_uint8_list(images)
            
            MAX_FRAMES = 64
            MIN_FRAMES = 16
            if len(frames) < MIN_FRAMES:
                raise ValueError(
                    f"Hunyuan Foley requires a minimum of {MIN_FRAMES} frames for the Syncformer model to work, "
                    f"but the input only contained {len(frames)} frames. Please provide a longer image sequence."
                )

            if len(frames) > MAX_FRAMES:
                step = math.ceil(len(frames) / MAX_FRAMES)
                frames = frames[::step]

            if len(frames) == 0:
                silent = torch.zeros(1, 1, 1, dtype=torch.float32)
                return ({"waveform": silent, "sample_rate": 44100},)

            model_dict, cfg = _ensure_model_loaded(model_path_dir, config_path, device=device, gpu_id=gpu_id)

            audio_tensor, sample_rate = self._infer_from_images(
                frames, audio_prompt, model_dict, cfg, guidance_scale, num_inference_steps, fps_hint=float(frame_rate)
            )

            waveform = audio_tensor.to(torch.float32)
            if waveform.ndim == 1:
                waveform = waveform.unsqueeze(0)
            waveform = waveform.unsqueeze(0)

            return ({"waveform": waveform, "sample_rate": int(sample_rate)},)

        except Exception as e:
            # --- CATCH BLOCK ---
            # If any exception occurs in the `try` block, this code will run.
            print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
            print("!!! AN ERROR OCCURRED IN HUNYUAN FOLEY NODE !!!")
            print("!!! The video generation will continue without audio. !!!")
            print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
            # Print the full error traceback to the console for debugging later.
            print(traceback.format_exc())
            print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
            
            # Return `(None,)` to gracefully disable audio output, just as if `enabled` was False.
            return (None,)

        finally:
            # --- GUARANTEED CLEANUP ---
            # This block will run whether the `try` succeeded or the `except` was triggered.
            print("Hunyuan Foley: Starting guaranteed cleanup.")
            if 'audio_tensor' in locals(): del audio_tensor
            if 'waveform' in locals(): del waveform
            
            global _STATE
            if _STATE.model_dict is not None:
                print("Hunyuan Foley: Offloading main models to CPU.")
                if hasattr(_STATE.model_dict, 'foley_model'): _STATE.model_dict.foley_model.to("cpu")
                if hasattr(_STATE.model_dict, 'dac_model'): _STATE.model_dict.dac_model.to("cpu")

            if torch.cuda.is_available():
                print("Hunyuan Foley: Clearing CUDA cache after execution.")
                torch.cuda.empty_cache()
            if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
                torch.mps.empty_cache()
            
            print("Hunyuan Foley: Cleanup complete.")
            
# --------------------------------------------------------------------------
# ComfyUI registration
# --------------------------------------------------------------------------
NODE_CLASS_MAPPINGS = {
    "HunyuanFoleyAudio": HunyuanFoleyAudio,
}

NODE_DISPLAY_NAME_MAPPINGS = {
    "HunyuanFoleyAudio": "Hunyuan Foley (Audio)",
}
