import torch
import torch.nn as nn


class TransformerEncoder(nn.Module):
    def __init__(self, embed_dim, hidden_dim, num_layers, num_heads, dropout):
        super().__init__()

        self.dim = embed_dim
        self.hidden_dim = hidden_dim
        self.num_layers = num_layers
        self.num_heads = num_heads
        self.dropout = dropout

        # Encoder layers
        self.layers = nn.ModuleList([EncoderLayer(
            embed_dim, hidden_dim, num_heads, dropout) for _ in range(num_layers)])

    def forward(self, x):
        encoder_output = x
        for layer in self.layers:
            encoder_output = layer(encoder_output)

        return encoder_output


class EncoderLayer(nn.Module):
    def __init__(self, embed_dim, hidden_dim, num_heads, dropout):
        super().__init__()

        self.self_attention = MultiHeadAttention(embed_dim, num_heads, dropout)
        self.feed_forward = FeedForward(embed_dim, hidden_dim, dropout)
        self.norm1 = nn.LayerNorm(embed_dim)
        self.norm2 = nn.LayerNorm(embed_dim)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        x = x + self.dropout(self.self_attention(self.norm1(x)))
        x = x + self.dropout(self.feed_forward(self.norm2(x)))
        return x


class MultiHeadAttention(nn.Module):
    def __init__(self, embed_dim, num_heads, dropout):
        super().__init__()

        self.dim = embed_dim
        self.num_heads = num_heads
        self.scale = (embed_dim // num_heads) ** -0.5
        self.qkv = nn.Linear(embed_dim, embed_dim * 3, bias=True)
        self.proj = nn.Linear(embed_dim, embed_dim)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        # [bs, time, dim]
        B, T, D = x.shape
        qkv = self.qkv(x).reshape(B, T, 3, self.num_heads,
                                  D // self.num_heads).permute(2, 0, 3, 1, 4)

        # [bs, head, time, head_dim]
        q, k, v = qkv[0], qkv[1], qkv[2]

        # [bs, head, time, time]
        attn = (q @ k.transpose(-2, -1)) * self.scale
        attn = attn.softmax(dim=-1)

        # [bs, head, time, head_dim] => [bs, time, head, head_dim] => [bs, time, dim]
        x = (attn @ v).transpose(1, 2).reshape(B, T, D)
        x = self.proj(x)
        x = self.dropout(x)
        # [bs, time, dim]
        return x


class FeedForward(nn.Module):
    def __init__(self, embed_dim, hidden_dim, dropout):
        super().__init__()

        self.linear1 = nn.Linear(embed_dim, hidden_dim)
        self.act = nn.ReLU()
        self.linear2 = nn.Linear(hidden_dim, embed_dim)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        x = self.linear1(x)
        x = self.act(x)
        x = self.linear2(x)
        x = self.dropout(x)
        return x
