import torch
import torch.nn as nn


class LearnablePositionalEncoding(nn.Module):
    def __init__(self, d_model, max_len=5000, *args, **kwargs):
        super(LearnablePositionalEncoding, self).__init__(*args, **kwargs)
        self.pos_emb = nn.Parameter(torch.randn(max_len, d_model), requires_grad=True)
        nn.init.normal_(self.pos_emb, mean=0, std=0.02)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        return x + self.pos_emb


class TransformerSequenceClassifier(nn.Module):
    def __init__(self, input_dim: int = 6, seq_len: int = 128, num_classes: int = 5, d_model: int = 128,
        n_heads: int = 8, n_layers: int = 4, dim_feedforward: int = 128, dropout: float = 0.1, *args, **kwargs):
        super(TransformerSequenceClassifier, self).__init__(*args, **kwargs)
        self.input_proj = nn.Sequential(
            nn.Conv1d(input_dim, d_model, 1), nn.GELU(),
            nn.Conv1d(d_model, d_model, 1), nn.GELU(),
            nn.Conv1d(d_model, d_model, 1), nn.GELU(),
            nn.Conv1d(d_model, d_model, 1), nn.GELU()
        )

        self.cls_token = nn.Parameter(torch.randn(1, 1, d_model), requires_grad=True)
        self.pos_embed = LearnablePositionalEncoding(d_model, max_len=seq_len + 1)

        encoder_layer = nn.TransformerEncoderLayer(d_model=d_model, nhead=n_heads, dim_feedforward=dim_feedforward,
            dropout=dropout, batch_first=True)
        self.encoder = nn.TransformerEncoder(encoder_layer, num_layers=n_layers)
        self.classifier = nn.Linear(d_model, num_classes)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        batch_size = x.size(0)
        x = x.transpose(1, 2)
        x = self.input_proj(x)
        cls_tokens = self.cls_token.repeat(batch_size, 1, 1)
        x = x.transpose(1, 2)
        x = torch.cat((cls_tokens, x), dim=1)

        x = self.pos_embed(x)
        cls_output = self.encoder(x)[:, 0, :]
        logits = self.classifier(cls_output)
        return logits
