"""
Hugging Face vision models backbone implementations.
This module provides a unified interface for various vision models from Hugging Face.
"""

import torch
from typing import Optional

# Update imports to use specific submodule paths
from transformers.models.vit import ViTModel, ViTConfig
from transformers.models.deit import DeiTModel, DeiTConfig
from transformers.models.swin import SwinModel, SwinConfig
from transformers.models.beit import BeitModel, BeitConfig
from transformers.models.convnext import ConvNextModel, ConvNextConfig
from transformers.models.clip import CLIPModel, CLIPConfig
from transformers.models.segformer import SegformerModel, SegformerConfig

from ..backbones.base import BaseBackbone
from ..core.output_utils import standardize_backbone_output
from continuallearning.interfaces.types import BackboneOutput
from ...registry import BACKBONE_REGISTRY


class HFVisionModelBase(BaseBackbone):
    """
    Base class for Hugging Face vision models.

    Args:
        model_name (str): Name of the pretrained model from Hugging Face.
        pretrained (bool): Whether to use pretrained weights. Default: True
        output_hidden_states (bool): Whether to output all hidden states. Default: False
        freeze_backbone (bool): Whether to freeze backbone parameters. Default: False
        model_cls: Model class to use
        config_cls: Config class to use
        cache_features (bool): Whether to cache features for faster retrieval
        feature_layer (int): The layer to extract features from. Default: -1 (last layer)
    """

    def __init__(
        self,
        model_name: str,
        pretrained: bool = True,
        output_hidden_states: bool = False,
        output_attentions: bool = False,
        freeze_backbone: bool = False,
        model_cls=None,
        config_cls=None,
        cache_features: bool = False,
        feature_layer: int = -1,
    ):
        super().__init__()
        self.model_name = model_name
        self.output_hidden_states = output_hidden_states
        self.output_attentions = output_attentions
        self.cache_features = cache_features
        self.feature_layer = feature_layer
        self.feature_cache = {}
        # Ensure model_cls and config_cls are provided
        assert model_cls is not None, "model_cls must be provided"
        assert config_cls is not None, "config_cls must be provided"

        # Load model from Hugging Face
        if pretrained:
            self.model = model_cls.from_pretrained(
                model_name,
                add_pooling_layer=False,
                output_hidden_states=output_hidden_states,
                output_attentions=output_attentions,
            )
        else:
            config = config_cls.from_pretrained(model_name)
            config.output_hidden_states = output_hidden_states
            config.output_attentions = output_attentions
            self.model = model_cls(config, add_pooling_layer=False)

        # Get hidden dimension size
        self.hidden_size = self.model.config.hidden_size
        # Register hooks for feature extraction if needed
        self.hooks = []
        self.intermediate_features = {}

        if output_hidden_states:
            self._register_hooks()

        # Freeze backbone if requested
        if freeze_backbone:
            for param in self.model.parameters():
                param.requires_grad = False

    def forward(self, x: torch.Tensor, **kwargs) -> BackboneOutput:
        """
        Generic forward pass for Hugging Face vision models.

        This is a template method that standardizes the forward pass for all model types.
        Specific models can customize behavior by overriding the hook methods.

        Args:
            x (torch.Tensor): Input tensor, already preprocessed by dataset transforms

        Returns:
            BackboneOutput: Standardized output with features and transformer states
        """
        # Call the model-specific forward implementation directly
        outputs = self._model_forward(x, **kwargs)

        # Extract features based on model type
        features = self._extract_features_from_outputs(outputs)

        # Prepare result dictionary
        result = {
            "features": features,
            "last_hidden_state": self._get_last_hidden_state(outputs),
        }

        # Add hidden states if available
        if (
            self.output_hidden_states
            and hasattr(outputs, "hidden_states")
            and outputs.hidden_states is not None
        ):
            result["hidden_states"] = outputs.hidden_states

        # Add attentions if available
        if (
            self.output_attentions
            and hasattr(outputs, "attentions")
            and outputs.attentions is not None
        ):
            result["attentions"] = outputs.attentions

        # Apply any model-specific post-processing
        result = self._post_process_outputs(result, outputs)

        # Convert to standardized BackboneOutput
        return standardize_backbone_output(result)

    def _model_forward(self, x: torch.Tensor, **kwargs):
        """
        Hook method for model-specific forward implementation.
        Default implementation calls the model directly.

        Args:
            x (torch.Tensor): Preprocessed input tensor

        Returns:
            Any: Raw model outputs
        """
        return self.model(x, **kwargs)

    def _extract_features_from_outputs(self, outputs):
        """
        Hook method to extract features from model outputs.
        Default implementation tries common output patterns.

        Args:
            outputs: Model outputs

        Returns:
            torch.Tensor: Extracted feature tensor
        """
        # Try different common patterns for feature extraction
        if hasattr(outputs, "pooler_output") and outputs.pooler_output is not None:
            return outputs.pooler_output
        elif hasattr(outputs, "last_hidden_state"):
            # For transformer models, use CLS token by default
            return outputs.last_hidden_state[:, 0]
        elif hasattr(outputs, "image_embeds"):
            # For CLIP-like models
            return outputs.image_embeds
        else:
            raise ValueError(
                f"Could not extract features from outputs of type {type(outputs)}"
            )

    def _get_last_hidden_state(self, outputs):
        """
        Hook method to get last hidden state from outputs.

        Args:
            outputs: Model outputs

        Returns:
            torch.Tensor: Last hidden state
        """
        if hasattr(outputs, "last_hidden_state"):
            return outputs.last_hidden_state
        elif hasattr(outputs, "hidden_states") and outputs.hidden_states:
            return outputs.hidden_states[-1]
        else:
            return None

    def _post_process_outputs(self, result, outputs):
        """
        Hook method for any model-specific post-processing of outputs.

        Args:
            result (dict): Result dictionary being built
            outputs: Raw model outputs

        Returns:
            dict: Updated result dictionary
        """
        # Default implementation returns result unchanged
        return result

    def _register_hooks(self):
        """Register hooks for extracting intermediate features."""
        # Implementation depends on model architecture, default is no-op
        pass

    def get_feature_dims(self):
        """
        Get the feature dimensions output by the backbone.

        Returns:
            int: Feature dimensions
        """
        return self.hidden_size

    def extract_features(
        self, x: torch.Tensor, task_id: Optional[int] = None, **kwargs
    ) -> torch.Tensor:
        """
        Extract features from a specific layer.

        Args:
            x (torch.Tensor): Input tensor of shape [B, C, H, W]
            task_id (int, optional): Task ID for feature caching

        Returns:
            torch.Tensor: Features from the specified layer
        """
        # Check cache first if enabled
        if self.cache_features and task_id is not None:
            # 使用更高效的缓存键策略，避免大张量的昂贵哈希计算
            # 使用张量形状和对象ID的组合作为缓存键
            tensor_shape = tuple(x.shape)
            tensor_id = id(x)
            cache_key = f"{task_id}_{tensor_shape}_{tensor_id}"

            if cache_key in self.feature_cache:
                return self.feature_cache[cache_key]

        # Forward pass
        outputs = self.forward(x, **kwargs)

        # Extract features
        if self.output_hidden_states:
            assert outputs.hidden_states is not None, (
                "Hidden states are not available with self.output_hidden_states set to True"
            )
            features = outputs.hidden_states[self.feature_layer]
            if isinstance(features, torch.Tensor):
                # For models that return the full sequence, take CLS token or average
                if features.dim() > 2:
                    features = features[:, 0]  # CLS token for transformer models
            else:
                features = outputs.features
        else:
            features = outputs.features

        # Cache if enabled
        if self.cache_features and task_id is not None:
            # 使用更高效的缓存键
            tensor_shape = tuple(x.shape)
            tensor_id = id(x)
            cache_key = f"{task_id}_{tensor_shape}_{tensor_id}"
            self.feature_cache[cache_key] = features

            # 实现简单的LRU缓存机制，防止缓存无限增长
            if len(self.feature_cache) > 100:  # 设置合理的缓存大小限制
                # 移除最早添加的项
                oldest_key = next(iter(self.feature_cache))
                del self.feature_cache[oldest_key]

        return features

    def clear_cache(self):
        """Clear the feature cache."""
        if self.cache_features:
            self.feature_cache.clear()


@BACKBONE_REGISTRY.register("hf_vit")
class HFVisionTransformer(HFVisionModelBase):
    """Hugging Face Vision Transformer backbone."""

    def __init__(
        self,
        model_name="google/vit-base-patch16-224",
        pretrained=True,
        output_hidden_states=False,
        output_attentions=False,
        freeze_backbone=True,
    ):
        super().__init__(
            model_name=model_name,
            pretrained=pretrained,
            output_hidden_states=output_hidden_states,
            output_attentions=output_attentions,
            freeze_backbone=freeze_backbone,
            model_cls=ViTModel,
            config_cls=ViTConfig,
        )

    def _extract_features_from_outputs(self, outputs):
        """Extract CLS token features from ViT outputs."""
        return outputs.last_hidden_state[:, 0]  # CLS token features


@BACKBONE_REGISTRY.register("hf_deit")
class HFDeiT(HFVisionModelBase):
    """Hugging Face DeiT backbone."""

    def __init__(
        self,
        model_name="facebook/deit-base-patch16-224",
        pretrained=True,
        output_hidden_states=False,
        output_attentions=False,
        freeze_backbone=True,
    ):
        super().__init__(
            model_name=model_name,
            pretrained=pretrained,
            output_hidden_states=output_hidden_states,
            output_attentions=output_attentions,
            freeze_backbone=freeze_backbone,
            model_cls=DeiTModel,
            config_cls=DeiTConfig,
        )

    def _extract_features_from_outputs(self, outputs):
        """Extract CLS token features from DeiT outputs."""
        return outputs.last_hidden_state[:, 0]  # CLS token features


@BACKBONE_REGISTRY.register("hf_swin")
class HFSwin(HFVisionModelBase):
    """Hugging Face Swin Transformer backbone."""

    def __init__(
        self,
        model_name="microsoft/swin-base-patch4-window7-224",
        pretrained=True,
        output_hidden_states=False,
        output_attentions=False,
        freeze_backbone=True,
    ):
        super().__init__(
            model_name=model_name,
            pretrained=pretrained,
            output_hidden_states=output_hidden_states,
            output_attentions=output_attentions,
            freeze_backbone=freeze_backbone,
            model_cls=SwinModel,
            config_cls=SwinConfig,
        )

    def _extract_features_from_outputs(self, outputs):
        """Extract pooler output features from Swin outputs."""
        return outputs.pooler_output


@BACKBONE_REGISTRY.register("hf_beit")
class HFBeit(HFVisionModelBase):
    """Hugging Face BEiT backbone."""

    def __init__(
        self,
        model_name="microsoft/beit-base-patch16-224",
        pretrained=True,
        output_hidden_states=False,
        output_attentions=False,
        freeze_backbone=True,
    ):
        super().__init__(
            model_name=model_name,
            pretrained=pretrained,
            output_hidden_states=output_hidden_states,
            output_attentions=output_attentions,
            freeze_backbone=freeze_backbone,
            model_cls=BeitModel,
            config_cls=BeitConfig,
        )

    def _extract_features_from_outputs(self, outputs):
        """Extract CLS token features from BEiT outputs."""
        return outputs.last_hidden_state[:, 0]  # CLS token features


@BACKBONE_REGISTRY.register("hf_convnext")
class HFConvNext(HFVisionModelBase):
    """Hugging Face ConvNeXt backbone."""

    def __init__(
        self,
        model_name="facebook/convnext-base",
        pretrained=True,
        output_hidden_states=False,
        output_attentions=False,
        freeze_backbone=True,
    ):
        super().__init__(
            model_name=model_name,
            pretrained=pretrained,
            output_hidden_states=output_hidden_states,
            output_attentions=output_attentions,
            freeze_backbone=freeze_backbone,
            model_cls=ConvNextModel,
            config_cls=ConvNextConfig,
        )

    def _extract_features_from_outputs(self, outputs):
        """Extract pooler output features from ConvNeXt outputs."""
        return outputs.pooler_output


@BACKBONE_REGISTRY.register("hf_clip")
class HFCLIP(HFVisionModelBase):
    """Hugging Face CLIP model for vision features."""

    def __init__(
        self,
        model_name="openai/clip-vit-base-patch32",
        pretrained=True,
        output_hidden_states=False,
        output_attentions=False,
        freeze_backbone=True,
        use_vision_model=True,
    ):
        super().__init__(
            model_name=model_name,
            pretrained=pretrained,
            output_hidden_states=output_hidden_states,
            output_attentions=output_attentions,
            freeze_backbone=freeze_backbone,
            model_cls=CLIPModel,
            config_cls=CLIPConfig,
        )
        self.use_vision_model = use_vision_model

    def _model_forward(self, x: torch.Tensor, **kwargs):
        """Override model forward to handle CLIP's unique vision model structure."""
        if self.use_vision_model:
            return self.model.vision_model(
                x, output_hidden_states=self.output_hidden_states
            )
        else:
            # Use the full CLIP model (less common for pure vision tasks)
            return self.model(pixel_values=x)

    def _extract_features_from_outputs(self, outputs):
        """Extract appropriate features based on CLIP model usage mode."""
        if self.use_vision_model:
            return outputs.pooler_output
        else:
            return outputs.image_embeds

    def _get_last_hidden_state(self, outputs):
        """Extract appropriate last hidden state based on CLIP model usage mode."""
        if self.use_vision_model:
            return outputs.last_hidden_state
        else:
            return outputs.vision_model_output.last_hidden_state


@BACKBONE_REGISTRY.register("hf_segformer")
class HFSegformer(HFVisionModelBase):
    """Hugging Face Segformer backbone for feature extraction."""

    def __init__(
        self,
        model_name="nvidia/segformer-b0-finetuned-ade-512-512",
        pretrained=True,
        output_hidden_states=False,
        output_attentions=False,
        freeze_backbone=True,
    ):
        super().__init__(
            model_name=model_name,
            pretrained=pretrained,
            output_hidden_states=output_hidden_states,
            output_attentions=output_attentions,
            freeze_backbone=freeze_backbone,
            model_cls=SegformerModel,
            config_cls=SegformerConfig,
        )

    def _extract_features_from_outputs(self, outputs):
        """Extract global pooled features from Segformer outputs."""
        # For Segformer, use global average pooling on the last hidden state
        return outputs.last_hidden_state.mean(dim=1)  # Global pooling
