import torch
import numpy as np


class CNN2D(torch.nn.Module):
    def __init__weights(w):
        if isinstance(w, torch.nn.Linear) or isinstance(w, torch.nn.Conv2d):
            torch.nn.init.kaiming_normal_(w.weight)
            torch.nn.init.constant_(w.bias, val=0.0)
    
    def __init__(self, n_classes=4, n_modalities=3, lrelu_slope=.2, device='cuda'):
        super(CNN2D, self).__init__()
        self.n_classes = n_classes
        self.lrelu_slope = lrelu_slope
        self.n_modalities = n_modalities
        self.ecg_head = torch.nn.Sequential(
            torch.nn.Conv1d(in_channels=1, out_channels=16, kernel_size=7),
            torch.nn.BatchNorm1d(16),
            torch.nn.LeakyReLU(self.lrelu_slope),
            torch.nn.MaxPool1d(5),

            torch.nn.Conv1d(in_channels=16, out_channels=32, kernel_size=3),
            torch.nn.BatchNorm1d(32),
            torch.nn.LeakyReLU(self.lrelu_slope),
            torch.nn.MaxPool1d(3),

            torch.nn.Conv1d(in_channels=32, out_channels=64, kernel_size=3),
            torch.nn.BatchNorm1d(64),
            torch.nn.LeakyReLU(self.lrelu_slope),
            torch.nn.AdaptiveMaxPool1d(1),

            torch.nn.Flatten()
        )
        self.cwt_head = torch.nn.Sequential(
            torch.nn.Conv2d(in_channels=1, out_channels=16, kernel_size=7),
            torch.nn.BatchNorm2d(16),
            torch.nn.LeakyReLU(self.lrelu_slope),
            torch.nn.MaxPool2d(5),
            
            torch.nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3),
            torch.nn.BatchNorm2d(32),
            torch.nn.LeakyReLU(self.lrelu_slope),
            torch.nn.MaxPool2d(3),
            
            torch.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3),
            torch.nn.BatchNorm2d(64),
            torch.nn.LeakyReLU(self.lrelu_slope),
            torch.nn.AdaptiveMaxPool2d((1, 1)),
            
            torch.nn.Flatten()
        ).to(device)

        self.feature_head = torch.nn.Sequential(
            torch.nn.Linear(4, 128),
            torch.nn.LeakyReLU(self.lrelu_slope),
            torch.nn.Linear(128, 128),
            torch.nn.LeakyReLU(self.lrelu_slope),
            torch.nn.Linear(128, 64),
            torch.nn.LeakyReLU(self.lrelu_slope),
        ).to(device)
        
        self.linear_blocks = torch.nn.Sequential(
            torch.nn.Linear(64 * self.n_modalities, 256),
            torch.nn.LeakyReLU(self.lrelu_slope),
            torch.nn.Linear(256, 64),
            torch.nn.LeakyReLU(self.lrelu_slope),
            torch.nn.Linear(64, 32),
            torch.nn.LeakyReLU(self.lrelu_slope),
            torch.nn.Linear(32, self.n_classes)
        ).to(device)
        
        self.ecg_head.apply(CNN2D.__init__weights)
        self.cwt_head.apply(CNN2D.__init__weights)
        self.feature_head.apply(CNN2D.__init__weights)
        self.linear_blocks.apply(CNN2D.__init__weights)

        model_parameters = filter(lambda p: p.requires_grad, self.parameters())
        n_params = sum([np.prod(p.size()) for p in model_parameters])
        print(f"Number of parameters: {n_params:,}")


    def forward(self, X_ecg, X_cwt, X_rr):
        # X_ecg = (B, T, 2)
        # X_cwt = (B, T, 101)
        # X_rr = (B, 4)
        # remove timestamp component
        if X_ecg is not None:
            X_ecg = X_ecg[:, :, 0] # (B, T)
            X_ecg = torch.unsqueeze(X_ecg, 1) # (B, 1, T)

        if X_cwt is not None:
            X_cwt = X_cwt[:, :, :-1] # (B, T, 100)
            X_cwt= torch.unsqueeze(X_cwt, 1) # (B, 1, T, 10)
        
        z_ecg, z_cwt, z_rr = None, None, None
        if X_ecg is not None:
            z_ecg = self.ecg_head(X_ecg)
        if X_cwt is not None:
            z_cwt = self.cwt_head(X_cwt)
        if X_rr is not None:
            z_rr = self.feature_head(X_rr)
        
        z = torch.cat([z for z in [z_ecg, z_cwt, z_rr] if z is not None], dim=1)

        return self.linear_blocks(z)


if __name__ == '__main__':
    cnn = CNN2D(n_classes=3, n_modalities=3, device='cpu')
    print(cnn)
    total_params = sum(p.numel() for p in cnn.parameters())
    print(f"Model parameters: {total_params:,}")
    print(cnn(torch.randn(64, 1000, 2), torch.randn(64, 1000, 101), torch.randn(64, 4)).shape)