"""
Example of using backbone models in continual learning.

This example demonstrates how to instantiate and use different backbone models,
including custom CNN backbones and pre-trained Hugging Face models.
"""

import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from torchvision.models import ResNet50_Weights

from continuallearning.models.backbones.base import BaseBackbone
from continuallearning.models.backbones.huggingface_models import HFVisionTransformer
from continuallearning.interfaces.types import BackboneOutput


# Example 1: Creating a custom CNN backbone
class SimpleCNNBackbone(BaseBackbone):
    """A simple CNN backbone example."""

    def __init__(self, in_channels=3, feature_dim=512):
        super().__init__()

        # Define a simple CNN architecture
        self.features = nn.Sequential(
            # Block 1
            nn.Conv2d(in_channels, 64, kernel_size=3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),
            # Block 2
            nn.Conv2d(64, 128, kernel_size=3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),
            # Block 3
            nn.Conv2d(128, 256, kernel_size=3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),
            # Feature output
            nn.AdaptiveAvgPool2d((1, 1)),
            nn.Flatten(),
        )

        # Feature projection
        self.projection = nn.Linear(256, feature_dim)
        self._feature_dim = feature_dim

    def forward(self, x):
        """Extract features from input images."""
        features = self.features(x)
        projected = self.projection(features)

        # Return standardized output
        return BackboneOutput(features=projected, last_hidden_state=features)

    def get_feature_dims(self):
        """Return the feature dimension."""
        return self._feature_dim


# Example 2: Using a pre-trained ResNet as a backbone
class ResNetBackbone(BaseBackbone):
    """ResNet backbone example."""

    def __init__(self, pretrained=True, feature_dim=2048):
        super().__init__()

        # Load pre-trained ResNet
        weights = ResNet50_Weights.DEFAULT if pretrained else None
        self.resnet = torchvision.models.resnet50(weights=weights)

        # Remove the classification head
        self.features = nn.Sequential(*list(self.resnet.children())[:-1])
        self._feature_dim = 2048

        # Add projection if needed
        if feature_dim != 2048:
            self.projection = nn.Linear(2048, feature_dim)
            self._feature_dim = feature_dim
        else:
            self.projection = nn.Identity()

    def forward(self, x):
        """Extract features from input images."""
        # Get ResNet features
        features = self.features(x)
        features = torch.flatten(features, 1)

        # Project if needed
        projected = self.projection(features)

        # Return standardized output
        return BackboneOutput(features=projected, last_hidden_state=features)

    def get_feature_dims(self):
        """Return the feature dimension."""
        return self._feature_dim


def main():
    """Example of using backbone models."""
    # Create example input
    batch_size = 2
    sample_input = torch.randn(batch_size, 3, 224, 224)

    print("=== Example 1: Custom CNN Backbone ===")
    # Create custom CNN backbone
    cnn_backbone = SimpleCNNBackbone(in_channels=3, feature_dim=512)
    # Extract features
    cnn_output = cnn_backbone(sample_input)
    print(f"Custom CNN backbone output shape: {cnn_output.features.shape}")

    print("\n=== Example 2: ResNet Backbone ===")
    # Create ResNet backbone
    resnet_backbone = ResNetBackbone(pretrained=False, feature_dim=512)
    # Extract features
    resnet_output = resnet_backbone(sample_input)
    print(f"ResNet backbone output shape: {resnet_output.features.shape}")

    print("\n=== Example 3: HuggingFace Vision Transformer ===")
    print(
        "Note: This example requires the transformers package and would download pretrained weights."
    )
    print("Running with pretrained=False to avoid downloads...")
    try:
        # Create Vision Transformer backbone (setting pretrained=False to avoid downloads)
        vit_backbone = HFVisionTransformer(
            model_name="google/vit-base-patch16-224", pretrained=False
        )
        # Extract features
        vit_output = vit_backbone(sample_input)
        print(f"ViT backbone output shape: {vit_output.features.shape}")
    except Exception as e:
        print(f"Error creating/using ViT backbone: {e}")
        print("Make sure you have the transformers package installed.")

    print("\n=== Using backbones for feature extraction ===")
    # Extract features from an input batch
    with torch.no_grad():
        features = cnn_backbone.extract_features(sample_input)
    print(f"Extracted feature shape: {features.shape}")


if __name__ == "__main__":
    main()
