import torch
import torch.nn as nn
from typing import List, Optional


class ProjectionHead(nn.Module):
    def __init__(
        self,
        input_dim: int,
        output_dim: int,
        hidden_dims: Optional[List[int]] = None,
        activation: str = 'gelu',
        norm_type: Optional[str] = 'layer',
        is_local: bool = False,
        non_linear: bool = False,
    ):
        """
        Initializes the ProjectionHead.

        Args:
            input_dim (int): Dimension of the input features.
            output_dim (int): Dimension to project the features to.
            hidden_dims (List[int], optional): List of hidden layer dimensions. Used only if non_linear=True.
            activation (str, optional): Activation function to use ('relu' or 'gelu'). Defaults to 'relu'.
            norm_type (str, optional): Type of normalization to use ('layer', 'batch1d', or None). Defaults to 'layer'.
            is_local (bool, optional): Indicates whether the input features are local (sequence) features. Defaults to False.
            non_linear (bool, optional): If True, uses multiple layers with activations; otherwise, uses a single linear layer. Defaults to False.
        """
        super().__init__()

        self.is_local = is_local
        self.non_linear = non_linear
        self.norm_type = norm_type.lower() if norm_type is not None else None

        if activation.lower() == 'relu':
            activation_fn = nn.ReLU
        elif activation.lower() == 'gelu':
            activation_fn = nn.GELU
        else:
            raise ValueError(f"Unsupported activation: {activation}. Choose 'relu' or 'gelu'.")

        layers = []

        if not self.non_linear:
            layers.append(nn.Linear(input_dim, output_dim, bias=True))

            if self.norm_type == 'layer':
                norm_layer = nn.LayerNorm(output_dim, dtype=torch.float32)
                layers.append(norm_layer)
            elif self.norm_type == 'batch1d':
                norm_layer = nn.BatchNorm1d(output_dim)
                layers.append(norm_layer)
        else:
            dims = [input_dim] + (hidden_dims if hidden_dims is not None else []) + [output_dim]
            for i in range(len(dims) - 1):
                in_dim = dims[i]
                out_dim = dims[i + 1]

                layers.append(nn.Linear(in_dim, out_dim, bias=True))

                if self.norm_type == 'layer':
                    norm_layer = nn.LayerNorm(out_dim, dtype=torch.float32)
                    layers.append(norm_layer)
                elif self.norm_type == 'batch1d':
                    norm_layer = nn.BatchNorm1d(out_dim)
                    layers.append(norm_layer)

                if i < len(dims) - 2:
                    layers.append(activation_fn())

        self.projection_head = nn.Sequential(*layers)
        self._initialize_weights()

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Forward pass through the projection head.

        Args:
            x (torch.Tensor): Input tensor.
                - Global features: [batch_size, input_dim]
                - Local features: [batch_size, time_steps, input_dim]

        Returns:
            torch.Tensor: Projected tensor.
                - Global output: [batch_size, output_dim]
                - Local output: [batch_size, time_steps, output_dim]
        """
        if self.is_local:
            batch_size, time_steps, _ = x.size()

            if self.norm_type == 'batch1d':
                x = x.view(batch_size * time_steps, -1)

            x = self._apply_layers(x)

            if self.norm_type == 'batch1d':
                x = x.view(batch_size, time_steps, -1)
        else:
            x = self._apply_layers(x)

        return x

    def _apply_layers(self, x: torch.Tensor) -> torch.Tensor:
        """
        Applies the layers sequentially to the input tensor.

        Args:
            x (torch.Tensor): Input tensor.

        Returns:
            torch.Tensor: Output tensor after applying layers.
        """
        for layer in self.projection_head:
            if isinstance(layer, nn.BatchNorm1d):
                if x.dim() == 2:
                    x = layer(x)
                elif x.dim() == 3:
                    batch_time, num_features = x.size(0), x.size(2)
                    x = x.view(-1, num_features)
                    x = layer(x)
                    x = x.view(batch_time, -1, num_features)
                else:
                    raise ValueError("Unsupported input shape for BatchNorm1d.")
            else:
                x = layer(x)
        return x

    def _initialize_weights(self):
        """
        Initialize weights of linear layers using Xavier uniform initialization and biases to zero.
        Initializes normalization layers appropriately.
        """
        for m in self.projection_head:
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                if m.bias is not None:
                    nn.init.zeros_(m.bias)
            elif isinstance(m, (nn.LayerNorm, nn.BatchNorm1d)):
                if m.weight is not None:
                    nn.init.ones_(m.weight)
                if m.bias is not None:
                    nn.init.zeros_(m.bias)


class ProjectionHeadSimple(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim, num_layers=2):
        super().__init__()
        layers = []
        
        if num_layers == 1:
            layers.append(nn.Linear(input_dim, output_dim))
        else:
            layers.append(nn.Linear(input_dim, hidden_dim))
            layers.append(nn.ReLU(inplace=True))
            layers.append(nn.Linear(hidden_dim, output_dim))
        
        self.projection = nn.Sequential(*layers)
        self._initialize_weights()

    def forward(self, x):
        """
        x: Tensor of shape [N, D] for global features or [N, T, D] for local features
        """
        return self.projection(x)

    def _initialize_weights(self):
        for m in self.projection.modules():
            if isinstance(m, nn.Linear):
                nn.init.kaiming_normal_(m.weight, nonlinearity='relu')
                if m.bias is not None:
                    nn.init.zeros_(m.bias)
