import torch.nn as nn
from d2l import torch

import data_handler


class ConvBlock(nn.Module):
    """
    为conv层添加常规的dropout和glu激活函数，同时初始化conv模型参数
    """

    def __init__(self, conv, dropout=0.0897):
        super(ConvBlock, self).__init__()
        self.conv = conv
        nn.init.kaiming_normal_(self.conv.weight)
        self.dropout = nn.Dropout(dropout)
        self.act = nn.GLU(dim=1)
        self.conv = nn.utils.weight_norm(self.conv)

    def forward(self, x):
        x = self.conv(x)
        x = self.dropout(x)
        x = self.act(x)
        return x


class GateConvBlock(nn.Module):
    """"""

    def __init__(self, blank, vocab):
        super(GateConvBlock, self).__init__()
        self.vocab = vocab
        self.blank = blank
        modules = []
        output_units = len(vocab)
        modules.append(
            ConvBlock(nn.Conv1d(in_channels=161, out_channels=500, kernel_size=48, stride=2, padding=97), dropout=0.2))
        for i in range(7):
            modules.append(ConvBlock(nn.Conv1d(250, 500, 7, 1), 0.3))

        modules.append(ConvBlock(nn.Conv1d(250, 2000, 32, 1), 0.5))

        modules.append(ConvBlock(nn.Conv1d(1000, 2000, 1, 1), 0.5))

        modules.append(nn.utils.weight_norm(nn.Conv1d(1000, output_units, 1, 1)))
        self.cnn = nn.Sequential(*modules)

    def forward(self, x, lens):  # -> B * V * T
        x = self.cnn(x)
        for module in self.modules():
            if type(module) == nn.modules.Conv1d:
                lens = (lens - module.kernel_size[0] + 2 * module.padding[0]) // module.stride[0] + 1
        return x, lens

    def decode(self, out, out_len):
        idxs = out.argmax(1)
        texts = []
        for idx, out_len in zip(idxs, out_len):
            idx = idx[:out_len]
            text = ""
            last = None
            for i in idx:
                if i.item() not in (last, self.blank):
                    text += self.vocabulary[i.item()]
                last = i
            texts.append(text)
        return texts

    def predict(self, path):
        self.eval()
        spec = data_handler.compute_fbank(path)
        spec.unsqueeze_(0)
        x_lens = spec.size(-1)
        out = self.cnn(spec)
        out_len = torch.tensor([out.size(-1)])
        text = self.decode(out, out_len)
        self.train()
        return text[0]



if __name__ == '__main__':
    vocab = data_handler.PingyinVocabTokenizer()
    vocab.load_vocab()
    model = GateConvBlock(blank=0, vocab=vocab)
    print(model.predict('F:\code\python\deeplearning\pythonProject\\ai\data\\asr_pytorch\data_thchs30\data\A2_0.wav'))
