import torch
import torch.nn as nn


class PositionalEncoding(nn.Module):
    def __init__(self, device, d_model, max_len=512):
        super(PositionalEncoding, self).__init__()
        self.device = device
        self.encoding = torch.zeros(max_len, d_model, device=device)
        position = torch.arange(0, max_len, device=device).unsqueeze(1).float()
        div_term = torch.arange(0, d_model, 2, device=device).float()

        self.encoding[:, 0::2] = torch.sin(position / (10000 ** (div_term / d_model)))
        self.encoding[:, 1::2] = torch.cos(position / (10000 ** (div_term / d_model)))
        # 扩展了一个一维维度
        self.encoding = self.encoding.unsqueeze(0).to(device).detach()

    def forward(self, x):
        '''
        当前值加上位置编码
        :param x:
        :return:
        '''
        return x + self.encoding[:, :x.size(1)].to(self.device)


class TransformerEncoderLayer(nn.Module):
    def __init__(self, d_model, num_head, dim_feedforward=2048, dropout=0.1):
        super(TransformerEncoderLayer, self).__init__()
        self.self_attn = nn.MultiheadAttention(d_model, num_head, dropout=dropout)
        self.ffn = nn.Sequential(
            nn.Linear(d_model, dim_feedforward),
            nn.ReLU(),
            nn.Linear(dim_feedforward, d_model)
        )
        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        attn_output, _ = self.self_attn(x, x, x)
        x = x + self.dropout(attn_output)
        x = self.norm1(x)

        ffn_output = self.ffn(x)
        x = x + self.dropout(ffn_output)
        x = self.norm2(x)

        return x


class TransformerEncoder(nn.Module):
    def __init__(self, num_layers, d_model, num_head, dim_feedforward=2048, dropout=0.1):
        super(TransformerEncoder, self).__init__()
        self.layers = nn.ModuleList([
            TransformerEncoderLayer(d_model, num_head, dim_feedforward, dropout) for _ in range(num_layers)
        ])
        self.flatten = nn.Flatten()

    def forward(self, x):
        for layer in self.layers:
            x = layer(x)
        x = self.flatten(x)
        return x


class FFNLayer(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim, dropout=0.1):
        super(FFNLayer, self).__init__()
        self.ffn = nn.Sequential(
            nn.Linear(input_dim, hidden_dim),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, output_dim)
        )

    def forward(self, x):
        return self.ffn(x)


class TransformerClassifier(nn.Module):
    def __init__(self, num_layers, d_model, seq_length, num_head, dim_feedforward, ff_hidden_dim, output_dim,
                 dropout=0.1, device=torch.device("cpu")):
        super(TransformerClassifier, self).__init__()
        self.positional_encoding = PositionalEncoding(device, d_model, max_len=seq_length)
        self.transformer_encoder = TransformerEncoder(num_layers, d_model, num_head, dim_feedforward, dropout)
        self.ffn = FFNLayer(d_model * seq_length, ff_hidden_dim, output_dim, dropout)
        self.softmax = nn.Softmax(dim=1)

    def forward(self, x):
        x = x.squeeze()
        # 原始版本去掉这个位置编码
        x = self.positional_encoding(x)
        x = self.transformer_encoder(x)
        x = self.ffn(x)
        return x
