import torch
from torch import nn


class PositionEncoding(nn.Module):
    def __init__(self, embedding_dims, max_len, dropout):
        super().__init__()

        self.dropout = nn.Dropout(dropout)
        # self.P = torch.zeros((1, max_len, embedding_dims))
        self.register_buffer('P', torch.zeros(1, max_len, embedding_dims))
        d_model = embedding_dims

        x = torch.arange(max_len).unsqueeze(-1) / torch.pow(10000, torch.arange(0, embedding_dims, 2) / d_model)
        self.P[:, :, 0::2] = torch.sin(x)
        self.P[:, :, 1::2] = torch.cos(x)

    def forward(self, x):  # x是经过词嵌入之后的值。 (batch_size,seq_len,embedding_dims)
        self.P = self.P.to(x.device)
        seq_len = x.shape[1]
        pe = self.P[:, :seq_len, :]
        return self.dropout(x + pe)


class SpeechModel(nn.Module):
    def __init__(self, n_mels, embedding_dims, nhead, num_layers, vocab_size):
        super().__init__()
        self.embedding_dims = embedding_dims
        self.vocab_size = vocab_size

        # 由于声音处理中不需要词嵌入模块，因此更换为线性层
        self.embedding = nn.Linear(n_mels, embedding_dims)
        self.position_encode = PositionEncoding(embedding_dims, 2000, 0.2)
        self.encoder_layer = nn.TransformerEncoderLayer(d_model=embedding_dims, nhead=nhead, batch_first=True)
        self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=num_layers)

        self.ln = nn.Linear(embedding_dims, vocab_size)

    def forward(self, x):
        batch_size = x.shape[0]
        x = self.embedding(x)
        x = self.position_encode(x)
        x = self.transformer_encoder(x)
        x = x.reshape(-1, self.embedding_dims)
        x = self.ln(x)
        return x.reshape(batch_size, -1, self.vocab_size)
