from torch import nn
import torch.nn.functional as F
import torch
import gxl_attention_module
import data_handler


class AsrModel(nn.Module):
    def __init__(self, fbank_dim=100, vocab_size=1200, hidden_dim=1024, dropout=0.0897):
        super(AsrModel, self).__init__()  # input:(b,seq1,fbank) ->(b, fbank, seq1)
        self.cnn_model = nn.Conv1d(in_channels=fbank_dim, out_channels=fbank_dim, kernel_size=3, padding=1)
        self.self_attention = gxl_attention_module.GxlMultiHeadAttention(head_num=2, embedding_dim=fbank_dim,
                                                                         representative_dim=fbank_dim, dropout=dropout)
        self.dropout = nn.Dropout(dropout)
        self.rnn = nn.GRU(fbank_dim, hidden_dim, num_layers=2, dropout=dropout)
        self.dense = nn.Linear(hidden_dim, vocab_size)
        self.layer_norm1 = nn.LayerNorm(fbank_dim)
        self.layer_norm2 = nn.LayerNorm(fbank_dim)
        self.layer_norm3 = nn.LayerNorm(vocab_size)
        self.act = nn.ReLU()

    def forward(self, X, x_len):
        X = X.permute(0, 2, 1)
        X = self.cnn_model(X).permute(0, 2, 1)
        X = self.act(self.layer_norm1(X)) + X  # 残差结构
        padding_self_attention_mask = gxl_attention_module.get_padding_mask(x_len, -100)
        X = self.self_attention(X, X, X, padding_self_attention_mask)  # 自带残差结构
        X = self.act(self.layer_norm2(X))
        X = X.permute(1, 0, 2)  # (step, batch, fbank)
        X, _ = self.rnn(X)
        output = self.dense(X).permute(1, 0, 2)  # output (batch, step, vocab)
        output = self.act(self.layer_norm3(output))
        output = F.softmax(output, dim=2)
        return output


if __name__ == '__main__':
    model = AsrModel()
    input = torch.randn(10, 400, 100)
    lens = torch.randint(0, 400, (10,))
    lens[-1] = 400
    print(model(input, lens).shape)
