import torch
import numpy as np

from models.utils import calculate_cnn_output_size, init_model_weights, init_transformer_weights

from typing import Optional


class Swapaxes(torch.nn.Module):

    def __init__(self, dim_0: int, dim_1: int):
        super(Swapaxes, self).__init__()
        self.dim_0 = dim_0
        self.dim_1 = dim_1

    def forward(self, X: torch.Tensor) -> torch.Tensor:
        return torch.swapaxes(X, self.dim_0, self.dim_1)


class MultimodalEmbeddingWithEarlyFusion(torch.nn.Module):

    def __init__(self, d_embedding, input_features=6, n_freqs=100):
        super(MultimodalEmbeddingWithEarlyFusion, self).__init__()

        channels = [4, 8, 16, 32]
        kernel_sizes = [4, 4, 4, 4]
        paddings = [0, 0, 2, 1]
        strides = [2, 2, 2, 2]

        layers = []
        current_features = input_features
        for c, k, p, s in zip(channels, kernel_sizes, paddings, strides):
            layers.append(torch.nn.Conv1d(current_features, c, kernel_size=k, padding=p, stride=s))
            layers.append(torch.nn.ReLU())
            current_features = c
        output_dimension = calculate_cnn_output_size(n_freqs, channels, paddings, kernel_sizes, strides)
        layers.append(torch.nn.Flatten(start_dim=1))
        layers.append(torch.nn.LayerNorm(output_dimension))
        layers.append(torch.nn.Linear(output_dimension, 512))
        layers.append(torch.nn.ReLU())
        layers.append(torch.nn.Linear(512, d_embedding))

        self.layers = torch.nn.Sequential(*layers)


    def forward(self, X_cwt, rr):
        # X_cwt = (B * timesteps, 101) -> (B * timestamps, 2, 100)
        # rr = (B * timestamps, 4) -> (B * timestamps, 4, freqs)
        X_cwt_freqs = X_cwt[:, :-1].unsqueeze(1)
        X_cwt_timestamps = X_cwt[:, -1:].unsqueeze(1).repeat(1, 1, 100)
        rr = rr.unsqueeze(-1).repeat(1, 1, 100)
        X = torch.concat([X_cwt_freqs, rr, X_cwt_timestamps], dim=1)

        return self.layers(X)


class LinearEmbedding(torch.nn.Module):

    def __init__(self, d_embedding, n_features):
        super(LinearEmbedding, self).__init__()
        self.layer = torch.nn.Linear(n_features, d_embedding, bias=False)

    def forward(self, X):
        return self.layer(X)


class PositionalEncoding(torch.nn.Module):
    def __init__(self, d_embedding, n_input=1000, learned=False):
        super(PositionalEncoding, self).__init__()
        pe = torch.zeros(n_input, d_embedding)
        position = torch.arange(0, n_input, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_embedding, 2).float() * (-np.log(10000.0) / d_embedding))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0)
        if learned:
            self.pe = torch.nn.Parameter(pe, requires_grad=True)
        else:
            self.register_buffer('pe', pe)

    def forward(self, x):
        x = x + self.pe[:, :x.size(1), :]
        return x


class PositionalEncodingWithTimestamp(torch.nn.Module):
    def __init__(self, d_embedding, n_input=1000, base_sr=300):
        super(PositionalEncodingWithTimestamp, self).__init__()
        self.window_length = n_input
        self.base_sr = base_sr
        self.d_embedding =d_embedding

    def forward(self, X, timestamps):
        batch_size = X.size(0)
        device = X.device

        sr = 1 / timestamps[:, 1] - timestamps[:, 0] # (batch_size,)
        sr_ratio = sr / self.base_sr # (batch_size,)
        pe = torch.zeros(batch_size, self.window_length, self.d_embedding, device=device) # (batch_size, window_length, d_embed)
        position = (torch.arange(self.window_length, device=device).unsqueeze(0) * sr_ratio.unsqueeze(1)).unsqueeze(2) # (batch_size, window_length, 1)
        div_term = torch.exp(torch.arange(0, self.d_embedding, 2, dtype=X.dtype, device=device) * (-np.log(1E5) / self.d_embedding)) # (d_embed,)
        pe[:, :, 0::2] = torch.sin(position * div_term) # (batch_size, window_length, d_embed)
        pe[:, :, 1::2] = torch.cos(position * div_term) # (batch_size, window_length, d_embed)
        
        X += pe
        return X


class ECGTransformer(torch.nn.Module):

    def __init__(self, n_input=1000, input_type='cwt+rr', d_embedding=128, n_layers=2,
                 n_classes=3, d_hidden_classifier=512,
                 transformer_dropout=0.1, positional_encoding='timestamp_concat_before',
                 class_probabilities=None, late_fusion=False):
        super(ECGTransformer, self).__init__()

        n_heads = d_embedding // 64
        d_feedforward = d_embedding * 4

        self.n_input = n_input
        self.positional_encoding_scheme = positional_encoding
        self.late_fusion = late_fusion

        self.n_features = 0
        if 'cwt' in input_type:
            self.n_features += 100
        if 'ecg' in input_type:
            self.n_features += 1
        if 'rr' in input_type and not late_fusion:
            self.n_features += 4

        # init embedding module
        # If positional encoding is concatenated after the embedding, the embedding dimension is reduced by 1
        # If positional encoding is already concatenated before the embedding, the positional encoding adds another input feature to the embedding
        self.embedding = LinearEmbedding(d_embedding if 'concat_after' not in self.positional_encoding_scheme else d_embedding - 1,
                                         self.n_features if 'concat_before' not in self.positional_encoding_scheme else self.n_features + 1)
        self.d_embedding = d_embedding

        if self.positional_encoding_scheme == 'zero_addition_learned':
            self.positional_encoding = torch.nn.parameter.Parameter(torch.randn((1, n_input, d_embedding)), requires_grad=True)
        elif self.positional_encoding_scheme == 'standard_addition':
            self.positional_encoding = PositionalEncoding(d_embedding, n_input, learned=False)
        elif self.positional_encoding_scheme == 'standard_addition_learned':
            self.positional_encoding = PositionalEncoding(d_embedding, n_input, learned=True)
        elif self.positional_encoding_scheme == 'standard_timestamp_addition':
            self.positional_encoding = PositionalEncodingWithTimestamp(d_embedding, n_input)
        elif 'linspace' in self.positional_encoding_scheme:
            self.positional_encoding = torch.nn.Parameter(2 * torch.linspace(0, 1, n_input).unsqueeze(0).unsqueeze(-1), requires_grad='learned' in self.positional_encoding_scheme)

        # init transformer
        encoder_layer = torch.nn.TransformerEncoderLayer(d_model=d_embedding, nhead=n_heads, dim_feedforward=d_feedforward,
                                                         batch_first=True, dropout=transformer_dropout,
                                                         norm_first=True)
        self.transformer = torch.nn.Sequential(
            torch.nn.TransformerEncoder(encoder_layer, num_layers=n_layers,
                                        norm=torch.nn.LayerNorm(d_embedding)),
            Swapaxes(1, 2),
            torch.nn.AvgPool1d(kernel_size=n_input),
            torch.nn.Flatten(),
        )

        # init feature head if late fusion is used
        if self.late_fusion:
            self.feature_head = torch.nn.Sequential(
                torch.nn.Linear(4, 128),
                torch.nn.ReLU(),
                torch.nn.Linear(128, 128),
                torch.nn.ReLU(),
                torch.nn.Linear(128, 64),
                torch.nn.ReLU(),
            )

        # init classification module
        self.classifier = torch.nn.Sequential(
            torch.nn.Linear(d_embedding + (64 if self.late_fusion else 0), d_hidden_classifier),
            torch.nn.ReLU(),
            torch.nn.Linear(d_hidden_classifier, n_classes),
        )

        if class_probabilities is not None:
            self.classifier[-1].bias = torch.nn.Parameter(class_probabilities)

        model_parameters = filter(lambda p: p.requires_grad, self.parameters())
        n_params = sum([np.prod(p.size()) for p in model_parameters])
        print(f"Number of parameters: {n_params:,}")


    def apply_embedding(self, X_ecg: Optional[torch.Tensor], X_cwt: Optional[torch.Tensor], rr: Optional[torch.Tensor]) -> torch.Tensor:
        if self.late_fusion:
            rr = None

        if 'concat_before' not in self.positional_encoding_scheme:
            # Remove last row (positional encoding) if positional encoding is added after the embedding
            if X_ecg is not None:
                X_ecg = X_ecg[:, :, :-1]
            if X_cwt is not None:
                X_cwt = X_cwt[:, :, :-1]
        else:
            if X_ecg is not None and X_cwt is not None:
                # Remove duplicate timestamp row
                X_cwt = X_cwt[:, :, :-1]

        if X_ecg is not None:
            # Reshape so embedding can be applied to every timestep
            batch_size, n_seq, n_features = X_ecg.size()
            X_ecg = X_ecg.view(batch_size * n_seq, n_features)
        if X_cwt is not None:
            # Reshape so embedding can be applied to every timestep
            batch_size, n_seq, n_features = X_cwt.size()
            X_cwt = X_cwt.view(batch_size * n_seq, n_features)
        if rr is not None:
            # Duplicate rr features for every timestep
            rr = rr.repeat(n_seq, 1)


        # Concatenate all inputs
        X = torch.cat([X for X in [X_ecg, X_cwt, rr] if X is not None], dim=1)
        X_emb = self.embedding(torch.cat([X for X in [X_ecg, X_cwt, rr] if X is not None], dim=1))
        X_emb = X_emb.view(batch_size, n_seq,
                           # Positional_encoding information is already part of the embedding dimensions
                           self.d_embedding if 'concat_after' not in self.positional_encoding_scheme else self.d_embedding - 1)
        return X_emb


    def forward(self, X_ecg: Optional[torch.Tensor], X_cwt: Optional[torch.Tensor], rr: Optional[torch.Tensor]) -> torch.Tensor:
        # X_ecg = (B, T, 1 + 1)
        # X_cwt = (B, T, 100 + 1)
        # rr = (B, 4)
        if X_ecg is not None and hasattr(self, 'n_input') and X_ecg.shape[1] != self.n_input:
            X_ecg = torch.nn.functional.pad(X_ecg, (0, 0, 0, self.n_input - X_ecg.shape[1], 0, 0), mode='constant', value=-1)
        if X_cwt is not None and hasattr(self, 'n_input') and X_cwt.shape[1] != self.n_input:
            X_cwt = torch.nn.functional.pad(X_cwt, (0, 0, 0, self.n_input - X_cwt.shape[1], 0, 0), mode='constant', value=-1)

        if 'linspace' in self.positional_encoding_scheme:
            if X_cwt is not None:
                batch_size, _, _ = X_cwt.shape
                X_cwt[:, :, -1:] = self.positional_encoding.repeat(batch_size, 1, 1)
            if X_ecg is not None:
                batch_size, _, _ = X_ecg.shape
                X_ecg[:, :, -1:] = self.positional_encoding.repeat(batch_size, 1, 1)

        X_emb = self.apply_embedding(X_ecg, X_cwt, rr) # (B, timesteps, d_embedding) or (B, timesteps, d_embedding - 1) if positional encoding is concatenated after the embedding
        if self.positional_encoding_scheme == 'zero_addition_learned':
            X_emb += self.positional_encoding
        elif self.positional_encoding_scheme == 'standard_addition' or self.positional_encoding_scheme == 'standard_addition_learned':
            # Positional encoding will add the positional encoding to the embedding and return the result
            X_emb = self.positional_encoding(X_emb)
        elif self.positional_encoding_scheme == 'standard_timestamp_addition':
            # Positional encoding will add the positional encoding that is calculated based on the timestamps to the embedding and return the result
            timestamps = X_cwt[:, :, -1] if X_cwt is not None else X_ecg[:, :, -1]
            X_emb = self.positional_encoding(X_emb, timestamps)
        elif 'concat_after' in self.positional_encoding_scheme:
            positional_information = X_cwt[:, :, -1:] if X_cwt is not None else X_ecg[:, :, -1:]
            X_emb = torch.concat((X_emb, positional_information), dim=-1)
        X_latent = self.transformer(X_emb)
        # X_latent = torch.concat([X_latent, rr], dim=1)
        if self.late_fusion:
            rr_latent = self.feature_head(rr)
            X_latent = torch.concat([X_latent, rr_latent], dim=1)
        y = self.classifier(X_latent)
        return y
    

if __name__ == '__main__':
    X = torch.randn((64, 1000, 101)).to('cuda')
    rr = torch.randn((64, 4)).to('cuda')
    model = ECGTransformer(n_input=1000)
    y = model(X, rr)
    print(y.size())