import torch
import torch.nn as nn

from ecgcmr.multimodal.multimodal_utils.form_ed_ed_patches import ECGHeartBeatRepresentation


class SpatialAttentionPooling(nn.Module):
    """
    Attention Pooling class for all_tokens using a learnable query token.
    This allows the model to learn how to weight different spatial patches
    rather than just averaging them.
    """
    def __init__(self, feature_dim, num_time_patches, num_spatial_patches, num_heads:int = 2):
        super().__init__()
        self.num_time_patches = num_time_patches
        self.num_spatial_patches = num_spatial_patches
        self.feature_dim = feature_dim

        self.mha = nn.MultiheadAttention(embed_dim=feature_dim, num_heads=num_heads, batch_first=True)
        self.query_token = nn.Parameter(torch.randn(1, 1, feature_dim))

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        :param x: Tensor of shape [batch_size, num_time_patches, num_spatial_patches, feature_dim]
        :return: Tensor of shape [batch_size, num_time_patches, feature_dim]
                 This is the spatially-pooled representation per time step.
        """
        batch_size = x.shape[0]
        x = x.contiguous().view(batch_size * self.num_time_patches, self.num_spatial_patches, self.feature_dim) # [batch_size * num_time_patches, num_spatial_patches, feature_dim]
        q = self.query_token.repeat(batch_size * self.num_time_patches, 1, 1) # [batch_size * num_time_patches, 1, feature_dim]
        attn_output, _ = self.mha(q, x, x) # [batch_size * num_time_patches, 1, feature_dim]
        output = attn_output.view(batch_size, self.num_time_patches, self.feature_dim) # [batch_size, num_time_patches, feature_dim]

        return output

class AttentionPooling(nn.Module):
    """
    Attention Pooling class for global token
    """
    def __init__(self, embed_dim, num_heads) -> None:
        super().__init__()

        self.mha = nn.MultiheadAttention(embed_dim=embed_dim,
                                         num_heads=num_heads,
                                         batch_first=True)
    def forward(self, x):
        # x [batch, seq_len, embed_dim]
        q = x.mean(dim=1, keepdim=True) 
        k = x
        v = x
        x, _ = self.mha(q, k, v)
        return x

class EmbedReduction(nn.Module):
    def __init__(self, cfg, imag_encoder_config, ecg_encoder_config):
        super().__init__()

        self.cfg = cfg
        self.imag_encoder_config = imag_encoder_config
        self.ecg_encoder_config = ecg_encoder_config

        self.init_layer_norms()
        self.init_image_reduction_layers()
        self.init_ecg_reduction_layers()
        
        if self.cfg.training_mode.loss.type in ["local", "both"]:
            self.heartbeat_representation = ECGHeartBeatRepresentation(
                target_length=self.cfg.num_image_time_patches,
                time_patch_size=self.ecg_encoder_config.patch_size[1],
                channel_patch_size=self.ecg_encoder_config.patch_size[0],
                crop_method=cfg.training_mode.reduction.ecg.crop_method,
                n_mri_frames=cfg.used_image_frames,
                channel_reduction=cfg.training_mode.reduction.ecg.all_tokens)
            
        self.apply(self.init_weights)

    def init_weights(self, m):
        init_gain = 0.02
        init_type = getattr(m, 'init_type', 'normal')
        if isinstance(m, nn.Linear):
            if init_type == 'normal':
                nn.init.normal_(m.weight.data, mean=0, std=init_gain)
            elif init_type == 'xavier':
                nn.init.xavier_normal_(m.weight.data, gain=init_gain)
            elif init_type == 'kaiming':
                nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
            elif init_type == 'orthogonal':
                nn.init.orthogonal_(m.weight.data, gain=init_gain)
            if hasattr(m, 'bias') and m.bias is not None:
                nn.init.constant_(m.bias.data, 0.0)

        elif isinstance(m, nn.LayerNorm):
            nn.init.constant_(m.weight, 1)
            nn.init.constant_(m.bias, 0)

    def init_layer_norms(self):
        """Initialize layer normalization layers for both image and ECG based on the configuration."""
        # if self.cfg.training_mode.loss.type in ["local", "both"]:
        #     self.image_layernorm_local = nn.LayerNorm(self.imag_encoder_config.hidden_size, dtype=torch.float32)
        #     self.ecg_layernorm_local = nn.LayerNorm(self.ecg_encoder_config.hidden_size, dtype=torch.float32)
        if self.cfg.training_mode.loss.type in ["global", "both"]:
            self.image_layernorm_global = nn.LayerNorm(self.imag_encoder_config.hidden_size, dtype=torch.float32)
            self.ecg_layernorm_global = nn.LayerNorm(self.ecg_encoder_config.hidden_size, dtype=torch.float32)

    def init_image_reduction_layers(self):
        """Initialize reduction layers for image data."""
        if self.cfg.training_mode.loss.type in ["local", "both"]:
            if self.cfg.training_mode.reduction.image.all_tokens == 'attention_pooling':
                self.image_all_tokens_attention_pooling = SpatialAttentionPooling(
                    feature_dim=self.imag_encoder_config.hidden_size,
                    num_time_patches=self.cfg.num_image_time_patches,
                    num_spatial_patches=self.cfg.num_image_spatial_patches,
                    num_heads=self.cfg.training_mode.reduction.image.num_heads
                    )
            elif self.cfg.training_mode.reduction.image.all_tokens == 'linear':
                self.image_all_tokens_linear_layer_reduction_spatial = nn.Linear(self.cfg.num_image_spatial_patches, 1)
                self.image_all_tokens_linear_layer_reduction_spatial.init_type = 'xavier'

        if self.cfg.training_mode.loss.type in ["global", "both"]:
            if self.cfg.training_mode.reduction.image.global_token == 'attention_pooling':
                self.image_global_token_attention_pool = AttentionPooling(
                    embed_dim=self.imag_encoder_config.hidden_size,
                    num_heads=self.cfg.training_mode.reduction.image.num_heads
                    )
            elif self.cfg.training_mode.reduction.image.global_token == 'linear_together':
                self.image_global_token_linear_layer_reduction = nn.Linear(self.cfg.num_image_patches, 1)
                self.image_global_token_linear_layer_reduction.init_type = 'xavier'
            elif self.cfg.training_mode.reduction.image.global_token == 'linear_separate':
                self.init_image_linear_separate()

    def init_image_linear_separate(self):
        """Initialize separate linear layers for image global token reduction."""
        self.image_global_token_linear_layer_reduction_time = nn.Linear(self.cfg.num_image_time_patches, 1)
        self.image_global_token_linear_layer_reduction_spatial = nn.Linear(self.cfg.num_image_spatial_patches, 1)
        self.image_global_token_linear_layer_reduction_time.init_type = 'xavier'
        self.image_global_token_linear_layer_reduction_spatial.init_type = 'xavier'

    def init_ecg_reduction_layers(self):
        """Initialize reduction layers for ECG data."""
        if self.cfg.training_mode.loss.type in ["local", "both"]:
            if self.cfg.training_mode.reduction.ecg.all_tokens == 'linear':
                self.ecg_all_tokens_linear_layer_reduction_channel = nn.Linear(self.cfg.num_ecg_channel_patches, 1)
                self.ecg_all_tokens_linear_layer_reduction_channel.init_type = 'xavier'

        if self.cfg.training_mode.loss.type in ["global", "both"]:
            if self.cfg.training_mode.reduction.ecg.global_token == 'attention_pooling':
                self.ecg_global_token_attention_pool = AttentionPooling(
                    embed_dim=self.ecg_encoder_config.hidden_size,
                    num_heads=self.cfg.training_mode.reduction.ecg.num_heads)
            elif self.cfg.training_mode.reduction.ecg.global_token == 'linear_together':
                self.ecg_global_token_linear_layer_reduction = nn.Linear(self.cfg.num_ecg_patches, 1)
                self.ecg_global_token_linear_layer_reduction.init_type = 'xavier'
            elif self.cfg.training_mode.reduction.ecg.global_token == 'linear_separate':
                self.init_ecg_linear_separate()

    def init_ecg_linear_separate(self):
        """Initialize separate linear layers for ECG global token reduction."""
        self.ecg_global_token_linear_layer_reduction_time = nn.Linear(self.cfg.num_ecg_time_patches, 1)
        self.ecg_global_token_linear_layer_reduction_channel = nn.Linear(self.cfg.num_ecg_channel_patches, 1) if self.cfg.num_ecg_channel_patches != 1 else None
        self.ecg_global_token_linear_layer_reduction_time.init_type = 'xavier'
        if self.cfg.num_ecg_channel_patches != 1:
            self.ecg_global_token_linear_layer_reduction_channel.init_type = 'xavier'

    def forward(self, hidden_state, encoder_config, mode='image', rpeaks=None, tpfs=None):
        if mode == 'image':
            return self._transform_mri_embeddings(hidden_state=hidden_state, encoder_config=encoder_config)
        elif mode == 'ecg':
            return self._transform_ecg_embeddings(hidden_state=hidden_state, encoder_config=encoder_config, rpeaks=rpeaks, tpfs=tpfs)
        else:
            raise ValueError("Invalid mode specified: choose 'image' or 'ecg'")

    def _transform_mri_embeddings(self, hidden_state, encoder_config):
        global_token = None
        all_tokens = None
        
        batch_size = hidden_state.shape[0]

        if encoder_config.use_cls_token:
            all_tokens, global_token = hidden_state[:, 1:], hidden_state[:, 0]
        else:
            all_tokens = hidden_state

        if self.cfg.training_mode.loss.type in ["global", "both"]:
            method = self.cfg.training_mode.reduction.image.global_token
            if method == 'mean':
                global_token = torch.mean(all_tokens, dim=1)
            
            elif method == 'cls_token':
                global_token = hidden_state[:, 0]

            elif method == 'attention_pooling':
                global_token = self.image_global_token_attention_pool(all_tokens)

            elif method == 'linear_together':
                reshaped_tokens = all_tokens.transpose(1, 2).reshape(batch_size * encoder_config.hidden_size, self.cfg.num_image_patches)
                reduced_tokens = self.image_global_token_linear_layer_reduction(reshaped_tokens)
                global_token = reduced_tokens.view(batch_size, encoder_config.hidden_size)

            elif method == 'linear_separate':
                reshaped_tokens = all_tokens.view(batch_size, self.cfg.num_image_time_patches, self.cfg.num_image_spatial_patches, encoder_config.hidden_size)
                time_reduced = reshaped_tokens.permute(0, 2, 3, 1).contiguous()

                time_reduced = time_reduced.view(-1, self.cfg.num_image_time_patches)
                time_reduced = self.image_global_token_linear_layer_reduction_time(time_reduced)
                time_reduced = time_reduced.view(batch_size, self.cfg.num_image_spatial_patches, encoder_config.hidden_size)

                space_reduced = time_reduced.view(-1, self.cfg.num_image_spatial_patches)
                space_reduced = self.image_global_token_linear_layer_reduction_spatial(space_reduced)
                global_token = space_reduced.view(batch_size, encoder_config.hidden_size)
            else:
                raise NotImplementedError("Please choose a valid reduction method for global token for MRI")
            
            global_token = self.image_layernorm_global(global_token)

        if self.cfg.training_mode.loss.type in ["local", "both"]:
            method = self.cfg.training_mode.reduction.image.all_tokens
            all_tokens = torch.reshape(
                all_tokens,
                (
                    batch_size,
                    self.cfg.num_image_time_patches, # T // tubelet_size
                    self.cfg.num_image_spatial_patches, # H // p * W // p
                    encoder_config.hidden_size # d
                    )
                )
            
            if method == 'attention_pooling':
                all_tokens = self.image_all_tokens_attention_pooling(all_tokens) # b, T // tubelet_size, d
            elif method == 'mean':
                all_tokens = all_tokens.mean(dim=2) # b, T // tubelet_size, d
            elif method == 'linear':
                all_tokens = all_tokens.permute(0, 1, 3, 2)  # [batch_size, num_time_patches, hidden_size, num_spatial_patches]
                all_tokens = self.image_all_tokens_linear_layer_reduction_spatial(all_tokens)  # [batch_size, num_time_patches, hidden_size, 1]
                all_tokens = all_tokens.squeeze(-1)
            else:
                raise NotImplementedError("Please choose a valid reduction method for all tokens for MRI")
            
            # all_tokens = self.image_layernorm_local(all_tokens)

        return {
            "img_all_tokens": all_tokens,
            "img_global_token": global_token
            }

    def _transform_ecg_embeddings(self, hidden_state, encoder_config, rpeaks=None, tpfs=None):
        global_token = None
        all_tokens = None

        batch_size = hidden_state.shape[0]

        if encoder_config.use_cls_token:
            all_tokens, global_token = hidden_state[:, 1:], hidden_state[:, 0]
        else:
            all_tokens = hidden_state # [b, 12//t * 2500//p, d]

        if self.cfg.training_mode.loss.type in ["global", "both"]:
            method = self.cfg.training_mode.reduction.ecg.global_token
            if method == 'mean':
                global_token = torch.mean(all_tokens, dim=1)
            
            elif method == 'cls_token':
                global_token = hidden_state[:, 0]

            elif method == 'attention_pooling':
                global_token = self.ecg_global_token_attention_pool(all_tokens)

            elif method == 'linear_together':
                reshaped_tokens = all_tokens.transpose(1, 2).reshape(batch_size * encoder_config.hidden_size, self.cfg.num_ecg_patches)
                reduced_tokens = self.ecg_global_token_linear_layer_reduction(reshaped_tokens)
                global_token = reduced_tokens.view(batch_size, encoder_config.hidden_size)

            elif method == 'linear_separate':
                reshaped_tokens = all_tokens.view(batch_size,
                                                  self.cfg.num_ecg_time_patches,
                                                  self.cfg.num_ecg_channel_patches,
                                                  encoder_config.hidden_size)
                
                if self.cfg.num_ecg_channel_patches == 1:
                    channel_reduced = reshaped_tokens.squeeze(2)
                else:
                    channel_reduced = reshaped_tokens.permute(0, 1, 3, 2).contiguous()
                    channel_reduced = channel_reduced.view(-1, self.cfg.num_ecg_channel_patches)
                    channel_reduced = self.ecg_global_token_linear_layer_reduction_channel(channel_reduced)
                    channel_reduced = channel_reduced.view(batch_size, self.cfg.num_ecg_time_patches, encoder_config.hidden_size)

                time_reduced = channel_reduced.view(-1, self.cfg.num_ecg_time_patches)
                time_reduced = self.ecg_global_token_linear_layer_reduction_time(time_reduced)
                global_token = time_reduced.view(batch_size, encoder_config.hidden_size)
            else:
                raise NotImplementedError("Please choose a valid reduction method for global token for ECG")
            
            global_token = self.ecg_layernorm_global(global_token)

        if self.cfg.training_mode.loss.type in ["local", "both"]:
            all_tokens = self.heartbeat_representation(ecg_embeddings=all_tokens, rpeaks=rpeaks, tpfs=tpfs)
            # all_tokens = self.ecg_layernorm_local(all_tokens)

        return {
            "ecg_all_tokens": all_tokens,
            "ecg_global_token": global_token,
        }
