"""
Safetensors format Sequential Loaders
For standard .safetensors/.ckpt models
Includes Checkpoint, LoRA, CLIP, and VAE loaders
"""

import folder_paths
from nodes import CheckpointLoaderSimple, LoraLoader
from .base import SequentialLoaderBase, ANY_SOCKET_TYPES


class SequentialCheckpointLoader(SequentialLoaderBase):
    """
    Sequential version of Checkpoint Loader

    Features:
    - Sequential control with trigger input
    - Internally reuses standard CheckpointLoaderSimple
    - Outputs MODEL, CLIP, VAE + trigger
    """

    @classmethod
    def INPUT_TYPES(cls):
        input_types = {
            "required": {
                "ckpt_name": (folder_paths.get_filename_list("checkpoints"),),
            }
        }
        return cls.add_trigger_input(input_types)

    RETURN_TYPES = ("MODEL", "CLIP", "VAE", ANY_SOCKET_TYPES)
    RETURN_NAMES = ("MODEL", "CLIP", "VAE", "Bypass(Relay)")
    FUNCTION = "load_checkpoint"

    def load_checkpoint(self, ckpt_name, trigger=None):
        """
        Load checkpoint

        Args:
            ckpt_name: Checkpoint file name
            trigger: Optional trigger for sequence control

        Returns:
            (MODEL, CLIP, VAE, Bypass(Relay))
        """
        self.log_execution("Checkpoint", {"ckpt_name": ckpt_name})

        # Reuse standard loader
        loader = CheckpointLoaderSimple()
        model, clip, vae = loader.load_checkpoint(ckpt_name)

        # Memory tracking (optional)
        self.track_memory(f"Checkpoint_{ckpt_name}", 0.0)  # Actual size can be measured

        return self.with_trigger_passthrough((model, clip, vae), trigger)


class SequentialLoRALoader(SequentialLoaderBase):
    """Sequential version of LoRA Loader"""

    @classmethod
    def INPUT_TYPES(cls):
        input_types = {
            "required": {
                "model": ("MODEL",),
                "clip": ("CLIP",),
                "lora_name": (folder_paths.get_filename_list("loras"),),
                "strength_model": ("FLOAT", {
                    "default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01
                }),
                "strength_clip": ("FLOAT", {
                    "default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01
                }),
            }
        }
        return cls.add_trigger_input(input_types)

    RETURN_TYPES = ("MODEL", "CLIP", ANY_SOCKET_TYPES)
    RETURN_NAMES = ("MODEL", "CLIP", "Bypass(Relay)")
    FUNCTION = "load_lora"

    def load_lora(self, model, clip, lora_name, strength_model,
                  strength_clip, trigger=None):
        """Load LoRA"""
        self.log_execution("LoRA", {
            "lora_name": lora_name,
            "strength_model": strength_model,
            "strength_clip": strength_clip
        })

        loader = LoraLoader()
        model_lora, clip_lora = loader.load_lora(
            model, clip, lora_name, strength_model, strength_clip
        )

        return (model_lora, clip_lora, trigger)


class SequentialCLIPLoaderSimple(SequentialLoaderBase):
    """
    Sequential version of CLIP Loader (Safetensors only)

    Features:
    - Load CLIP from .safetensors files
    - Supports clip folder
    - Compatible with standard CLIP Loader
    - No external dependencies
    """

    @classmethod
    def INPUT_TYPES(cls):
        # Get CLIP file list from clip folder (safetensors only)
        files = []
        try:
            all_files = folder_paths.get_filename_list("clip")
            files = [f for f in all_files if f.endswith('.safetensors')]
        except:
            pass

        # Get type options from standard CLIPLoader
        try:
            from nodes import CLIPLoader
            base_types = CLIPLoader.INPUT_TYPES()
            clip_types = base_types["required"]["type"]
        except:
            clip_types = (["stable_diffusion"],)

        input_types = {
            "required": {
                "clip_name": (sorted(files) if files else ["No safetensors CLIP files found"],),
                "type": clip_types,
            }
        }
        return cls.add_trigger_input(input_types)

    RETURN_TYPES = ("CLIP", ANY_SOCKET_TYPES)
    RETURN_NAMES = ("CLIP", "Bypass(Relay)")
    FUNCTION = "load_clip"

    def load_clip(self, clip_name, type="stable_diffusion", trigger=None):
        """
        Load CLIP from safetensors

        Args:
            clip_name: CLIP file name (.safetensors)
            type: CLIP type (stable_diffusion, qwen_image, etc.)
            trigger: Optional trigger for sequence control

        Returns:
            (CLIP, Bypass(Relay))
        """
        self.log_execution("CLIP", {"clip_name": clip_name, "type": type})

        clip_path = folder_paths.get_full_path("clip", clip_name)

        if clip_path is None:
            raise FileNotFoundError(
                f"CLIP file not found: {clip_name}\n"
                f"Please place CLIP files in: models/clip/"
            )

        # Load CLIP using standard ComfyUI functionality
        import comfy.sd

        # Convert type string to CLIPType enum
        clip_type = getattr(comfy.sd.CLIPType, type.upper(), comfy.sd.CLIPType.STABLE_DIFFUSION)

        clip = comfy.sd.load_clip(
            ckpt_paths=[clip_path],
            embedding_directory=folder_paths.get_folder_paths("embeddings"),
            clip_type=clip_type
        )

        return (clip, trigger)


class SequentialVAELoaderSimple(SequentialLoaderBase):
    """
    Sequential version of VAE Loader (Safetensors and TAESD)

    Features:
    - Load VAE from .safetensors files
    - Supports vae folder
    - Supports TAESD (tiny VAE) models
    - No external dependencies
    """

    @classmethod
    def INPUT_TYPES(cls):
        # Get VAE file list from vae folder (safetensors only)
        vaes = []
        try:
            all_files = folder_paths.get_filename_list("vae")
            vaes = [f for f in all_files if f.endswith('.safetensors') or f.endswith('.pt')]
        except:
            pass

        # Add TAESD options if available
        try:
            approx_vaes = folder_paths.get_filename_list("vae_approx")
            taesd_variants = ['taesd', 'taesdxl', 'taesd3', 'taef1']

            for variant in taesd_variants:
                has_encoder = any(f.startswith(f'{variant}_encoder.') for f in approx_vaes)
                has_decoder = any(f.startswith(f'{variant}_decoder.') for f in approx_vaes)
                if has_encoder and has_decoder:
                    vaes.append(variant)
        except:
            pass

        input_types = {
            "required": {
                "vae_name": (sorted(vaes) if vaes else ["No VAE files found"],),
            }
        }
        return cls.add_trigger_input(input_types)

    RETURN_TYPES = ("VAE", ANY_SOCKET_TYPES)
    RETURN_NAMES = ("VAE", "Bypass(Relay)")
    FUNCTION = "load_vae"

    def load_vae(self, vae_name, trigger=None):
        """
        Load VAE from safetensors or TAESD

        Args:
            vae_name: VAE file name or TAESD variant
            trigger: Optional trigger for sequence control

        Returns:
            (VAE, Bypass(Relay))
        """
        self.log_execution("VAE", {"vae_name": vae_name})

        import comfy.sd
        import comfy.utils

        # Check if it's a TAESD variant
        if vae_name in ['taesd', 'taesdxl', 'taesd3', 'taef1']:
            sd = self._load_taesd(vae_name)
        else:
            # Load standard VAE using ComfyUI standard functionality
            vae_path = folder_paths.get_full_path_or_raise("vae", vae_name)
            sd = comfy.utils.load_torch_file(vae_path)

        vae = comfy.sd.VAE(sd=sd)
        return (vae, trigger)

    def _load_taesd(self, name):
        """Load TAESD (Tiny AutoEncoder) VAE"""
        import comfy.utils
        import torch

        sd = {}
        approx_vaes = folder_paths.get_filename_list("vae_approx")

        # Find encoder and decoder files
        encoder = next(filter(lambda a: a.startswith(f'{name}_encoder.'), approx_vaes))
        decoder = next(filter(lambda a: a.startswith(f'{name}_decoder.'), approx_vaes))

        # Load encoder
        enc = comfy.utils.load_torch_file(
            folder_paths.get_full_path_or_raise("vae_approx", encoder)
        )
        for k in enc:
            sd[f'taesd_encoder.{k}'] = enc[k]

        # Load decoder
        dec = comfy.utils.load_torch_file(
            folder_paths.get_full_path_or_raise("vae_approx", decoder)
        )
        for k in dec:
            sd[f'taesd_decoder.{k}'] = dec[k]

        # Add scale and shift parameters based on variant
        scales = {
            'taesd': (0.18215, 0.0),
            'taesdxl': (0.13025, 0.0),
            'taesd3': (1.5305, 0.0609),
            'taef1': (0.3611, 0.1159),
        }

        if name in scales:
            scale, shift = scales[name]
            sd['vae_scale'] = torch.tensor(scale)
            sd['vae_shift'] = torch.tensor(shift)

        return sd
