import math
import torch
import torch.nn as nn
from einops.layers.torch import Rearrange
from torcheeg.models import ViT, Conformer
from torcheeg.models.transformer.vit import Transformer

class MyModel(nn.Module):
    def __init__(self, 
                 in_chs:int = 100,
                 hid_chs:int = 32, 
                 depth:int = 3,
                 heads:int = 4,
                 head_chs:int = 64,
                 mlp_chs:int = 64,
                 num_classes:int = 3,
                 dropout:float = 0.,
                 max_len:int = 100) -> None:
        super().__init__()
        self.dropout = nn.Dropout(dropout)
        
        self.linear = nn.Linear(in_features=in_chs, out_features=hid_chs)
        self.pe = nn.Parameter(torch.empty(max_len, 1, hid_chs))
        nn.init.uniform_(self.pe, -0.02, 0.02)

        self.transformer = Transformer(hid_chs, depth, heads, 
                                       head_chs, mlp_chs, dropout)
        self.mlp_head = nn.Sequential(nn.LayerNorm(hid_chs), 
                                      nn.Linear(hid_chs, num_classes))

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x = self.linear(x)
        x = x + self.pe[:x.size(0), :]
        x = self.dropout(x)
        x = self.transformer(x)
        x = x.mean(dim=1)
        x = self.mlp_head(x)
        return x

class MyViT(nn.Module):
    def __init__(self, chunk_size:int=1, num_classes:int=3, **kwargs):
        super().__init__()
        self.vit = ViT(chunk_size=chunk_size, num_classes=num_classes, 
                       grid_size=(16, 100), t_patch_size=1, 
                       s_patch_size=(4, 4), **kwargs)
    
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x = x.unsqueeze(1)
        x = self.vit(x)
        return x

class PositionalEncoding(nn.Module):
    def __init__(self, d_model:int=512, max_len:int=5000):
        super().__init__()
        pe = torch.zeros(max_len, d_model, requires_grad=False)
        position = torch.arange(0, max_len).float().unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0)
        self.register_buffer('pe', pe)
    
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        return x + self.pe

class MyDICENet(nn.Module):
    def __init__(self, dropout=0.2):
        super().__init__()
        self.conv = nn.Conv2d(kernel_size=(5, 5), stride=(1, 1), groups=19)
        self.pe = PositionalEncoding(d_model=19, max_len=1)
        self.cls = nn.Parameter(torch.randn(1, 26, 1))
        encoder = nn.TransformerEncoderLayer(d_model=2, nhead=2)
        self.transformer = nn.TransformerEncoder(encoder, num_layers=1)
        self.ffn = nn.Sequential(
            nn.LayerNorm(normalized_shape=52),
            nn.Dropout(p=dropout),
            nn.Linear(in_features=52, out_features=24),
            nn.BatchNorm1d(),
            nn.ReLU(),
            nn.Dropout(p=dropout),
            nn.Linear(),
            nn.Sigmoid()
        )
        self.act = nn.GELU()
    
    def forward(self, x1, x2):
        ...
    
class MyConformer(nn.Module):
    def __init__(self, num_electrodes=16, sampling_rate=100, num_classes:int=3) -> None:
        super().__init__()
        self.conformer = Conformer(num_electrodes=num_electrodes, 
                                   sampling_rate=sampling_rate, 
                                   num_classes=num_classes)
    
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x = x.unsqueeze(1)
        x = self.conformer(x)
        return x

class MyConformer2(nn.Module):
    def __init__(self, ch:int=16, num_classes:int=3, d_model:int=128, 
                 nhead:int=8, num_encoder_layers:int=3):
        super().__init__()
        self.conv1 = nn.Conv2d(in_channels=1, out_channels=d_model, kernel_size=(1, 25), stride=(1, 1))
        self.conv2 = nn.Conv2d(in_channels=d_model, out_channels=d_model, kernel_size=(ch, 1), stride=(1, 1))
        self.pool = nn.AvgPool2d(kernel_size=(1, 75), stride=(1, 15))
        self.rear1 = Rearrange('b k 1 m -> b m k')
        encoder = nn.TransformerEncoderLayer(d_model=d_model, nhead=nhead)
        self.transformer = nn.TransformerEncoder(encoder, num_layers=num_encoder_layers)
        self.rear2 = Rearrange('b m k -> b (m k)')
        self.block = nn.Sequential(
            nn.Linear(in_features=d_model, out_features=d_model), 
            nn.ReLU(),
            nn.Linear(in_features=d_model, out_features=num_classes)
        )
    
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x = x.unsqueeze(1)
        x = self.conv1(x)
        x = self.conv2(x)
        x = self.pool(x)
        x = self.rear1(x)
        x = self.transformer(x)
        x = self.rear2(x)
        x = self.block(x)
        return x
    
if __name__ == '__main__':
    import torchinfo
    model = PositionalEncoding(100, 16)
    model.to('cuda')
    sample = torch.randn(size=(2, 16, 100), device='cuda')
    torchinfo.summary(model, sample.shape)
