import torch
import torchvision
from torch import nn, optim, Tensor
from torch.nn import functional as F
import sys
from pytorch_tdnn.tdnn import TDNN as TDNNLayer
from pytorch_tdnn.tdnnf import TDNNF as TDNNFLayer
import math
import numpy as np


class GRUModel(nn.Module):
    def __init__(self, input_num, hidden_num, gru_layers, output_num):
        super(GRUModel, self).__init__()
        self.GRU_layer = nn.GRU(input_size=input_num, hidden_size=hidden_num, num_layers=1,
                                batch_first=True, bidirectional=False)
        self.output_layer = nn.Sequential(
            nn.Linear(hidden_num, output_num),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        self.GRU_layer.flatten_parameters()
        x, _ = self.GRU_layer(x)
        x = torch.split(x, 512, dim=2)
        x = x[0].add(x[1])
        x = self.output_layer(x)
        return x


class BLSTM(nn.Module):
    def __init__(self, input_num, hidden_num, num_layers, output_num):
        super(BLSTM, self).__init__()
        self.LSTM_layer = nn.LSTM(input_size=input_num, hidden_size=hidden_num, num_layers=num_layers,
                                  batch_first=True, bidirectional=True)
        self.output_layer = nn.Sequential(
            nn.Linear(hidden_num*2, output_num),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        self.LSTM_layer.flatten_parameters()
        x, _ = self.LSTM_layer(x)
        x = self.output_layer(x)
        x = F.softmax(x, dim=2)
        x = torch.log(x)
        return x


class LstmLayer(nn.Module):
    def __init__(self, input_dim, cell_dim, output_dim, bi=True):
        super(LstmLayer, self).__init__()
        num_l = cell_dim if bi == False else cell_dim * 2
        self.layer = nn.LSTM(input_size=input_dim, hidden_size=cell_dim, batch_first=True, bidirectional=True)
        self.bn = nn.BatchNorm1d(num_features=num_l)

    def forward(self, x):
        x, _ = self.l1(x)
        x = x.transpose(1, 2)
        x = self.bn1(x)
        x = x.transpose(1, 2)
        return x


class BLSTM2(nn.Module):
    def __init__(self, input_dim, cell_num, output_dim, bi=True):
        super(BLSTM2, self).__init__()
        num_l = cell_num if bi == False else cell_num * 2
        self.l1 = LstmLayer(input_dim, cell_num, -1, bi)
        self.l2 = LstmLayer(num_l, cell_num, -1, bi)
        self.output_layer = nn.Sequential(
            nn.Linear(num_l, output_dim),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        x = self.l1(x)
        x = self.l2(x)

        x = self.output_layer(x)
        x = F.softmax(x, dim=2)
        #x = torch.log(x)
        return x

    def init_params(self):
        pass


class BLSTM3(nn.Module):
    def __init__(self, input_dim, cell_num, output_dim, bi=True):
        super(BLSTM3, self).__init__()
        num_l = cell_num if bi == False else cell_num * 2
        self.l1 = nn.LSTM(input_size=input_dim, hidden_size=cell_num, batch_first=True, bidirectional=bi)
        self.bn1 = nn.BatchNorm1d(num_features=num_l)
        self.l2 = nn.LSTM(input_size=num_l, hidden_size=cell_num, batch_first=True, bidirectional=bi)
        self.bn2 = nn.BatchNorm1d(num_features=num_l)
        self.l3 = nn.LSTM(input_size=num_l, hidden_size=cell_num, batch_first=True, bidirectional=bi)
        self.bn3 = nn.BatchNorm1d(num_features=num_l)
        self.output_layer = nn.Sequential(
            nn.Linear(num_l, output_dim),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):

        x, _ = self.l1(x)
        x = x.transpose(1, 2)

        x = self.bn1(x)
        x = x.transpose(1, 2)

        x, _ = self.l2(x)
        x = x.transpose(1, 2)

        x = self.bn2(x)
        x = x.transpose(1, 2)

        x, _ = self.l3(x)
        x = x.transpose(1, 2)

        x = self.bn3(x)
        x = x.transpose(1, 2)

        x = self.output_layer(x)
        x = F.softmax(x, dim=2)
        x = torch.log(x)
        return x

    def init_params(self):
        pass


class BLSTM4(nn.Module):
    def __init__(self, input_dim, cell_num, output_dim, bi=True):
        super(BLSTM4, self).__init__()
        num_l = cell_num if bi == False else cell_num * 2
        self.l1 = nn.LSTM(input_size=input_dim, hidden_size=cell_num, batch_first=True, bidirectional=bi)
        self.bn1 = nn.BatchNorm1d(num_features=num_l)
        self.l2 = nn.LSTM(input_size=num_l, hidden_size=cell_num, batch_first=True, bidirectional=bi)
        self.bn2 = nn.BatchNorm1d(num_features=num_l)
        self.l3 = nn.LSTM(input_size=num_l, hidden_size=cell_num, batch_first=True, bidirectional=bi)
        self.bn3 = nn.BatchNorm1d(num_features=num_l)
        self.l4 = nn.LSTM(input_size=num_l, hidden_size=cell_num, batch_first=True, bidirectional=bi)
        self.bn4 = nn.BatchNorm1d(num_features=num_l)
        self.output_layer = nn.Sequential(
            nn.Linear(num_l, output_dim),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):

        x, _ = self.l1(x)
        x = x.transpose(1, 2)

        x = self.bn1(x)
        x = x.transpose(1, 2)

        x, _ = self.l2(x)
        x = x.transpose(1, 2)

        x = self.bn2(x)
        x = x.transpose(1, 2)

        x, _ = self.l3(x)
        x = x.transpose(1, 2)

        x = self.bn3(x)
        x = x.transpose(1, 2)

        x, _ = self.l4(x)
        x = x.transpose(1, 2)

        x = self.bn4(x)
        x = x.transpose(1, 2)

        x = self.output_layer(x)
        x = F.softmax(x, dim=2)
        x = torch.log(x)
        return x


class CLDNN1(nn.Module):
    def __init__(self):
        super(CLDNN1, self).__init__()
        self.CNN_layer = nn.Sequential(
            nn.Conv1d(in_channels=80, out_channels=256, kernel_size=7, stride=1, padding=3),
            nn.ReLU(inplace=True),
            nn.Conv1d(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True)
        )
        self.GRU_layer = nn.GRU(input_size=512, hidden_size=512, num_layers=4, batch_first=True, bidirectional=True)
        self.output_layer = nn.Sequential(
            nn.Linear(512, 40),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        # for i in range(4):
        x = x.permute(0, 2, 1)
        x = self.CNN_layer(x)
        x = x.permute(0, 2, 1)
        x, _ = self.GRU_layer(x)
        x = torch.split(x, 512, dim=2)
        x = x[0].add(x[1])
        x = self.output_layer(x)
        return x


class CLDNN2(nn.Module):
    def __init__(self):
        super(CLDNN2, self).__init__()
        self.CNN_layer = nn.Sequential(
            nn.Conv1d(in_channels=80, out_channels=256, kernel_size=47, stride=1, padding=23),
            nn.ReLU(inplace=True),
            nn.Conv1d(in_channels=256, out_channels=512, kernel_size=7, stride=1, padding=3),
            nn.ReLU(inplace=True)
        )
        self.GRU_layer = nn.GRU(input_size=512, hidden_size=512, num_layers=2, batch_first=True, bidirectional=True)
        self.output_layer = nn.Sequential(
            nn.Linear(1024, 40),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        # for i in range(4):
        self.GRU_layer.flatten_parameters()
        x = x.permute(0, 2, 1)
        x = self.CNN_layer(x)
        x = x.permute(0, 2, 1)
        x, _ = self.GRU_layer(x)
        # x = torch.split(x, 512, dim=2)
        # x = x[0].add(x[1])
        x = self.output_layer(x)
        return x


class CLDNN3(nn.Module):
    def __init__(self):
        super(CLDNN3, self).__init__()
        self.CNN_layer = nn.Sequential(
            nn.Conv1d(in_channels=80, out_channels=256, kernel_size=15, stride=1, padding=7),
            nn.ReLU(inplace=True),
            nn.Conv1d(in_channels=256, out_channels=512, kernel_size=7, stride=1, padding=3),
            nn.ReLU(inplace=True)
        )
        self.GRU_layer = nn.GRU(input_size=512, hidden_size=512, num_layers=2, batch_first=True, bidirectional=True)
        self.output_layer = nn.Sequential(
            nn.Linear(512, 40),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        # for i in range(4):
        x = x.permute(0, 2, 1)
        x = self.CNN_layer(x)
        x = x.permute(0, 2, 1)
        x, _ = self.GRU_layer(x)
        x = torch.split(x, 512, dim=2)
        x = x[0].add(x[1])
        x = self.output_layer(x)
        return x


class W2LSTM(nn.Module):
    def __init__(self):
        super(W2LSTM, self).__init__()
        self.acoustic_model = nn.Sequential(
            nn.Conv1d(in_channels=80, out_channels=250, kernel_size=48, stride=2, padding=23),
            nn.ReLU(inplace=True),
            nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
            nn.ReLU(inplace=True),
            nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
            nn.ReLU(inplace=True),
            nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
            nn.ReLU(inplace=True),
            nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
            nn.ReLU(inplace=True),
            nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
            nn.ReLU(inplace=True),
            nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
            nn.ReLU(inplace=True),
            nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
            nn.ReLU(inplace=True)
        )
        self.GRU_layer = nn.GRU(input_size=250, hidden_size=512, num_layers=2, batch_first=True, bidirectional=True)
        self.output_layer = nn.Sequential(
            nn.Linear(1024, 40),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        # for i in range(4):
        self.GRU_layer.flatten_parameters()
        x = x.permute(0, 2, 1)
        x = self.acoustic_model(x)
        x = x.permute(0, 2, 1)
        x, _ = self.GRU_layer(x)
        x = self.output_layer(x)
        return x


class CLDNN6(nn.Module):
    def __init__(self):
        super(CLDNN6, self).__init__()
        self.CNN_layer = nn.Sequential(
            nn.Conv2d(in_channels=1, out_channels=256, kernel_size=(9,9), stride=(1,1), padding=(4,0)),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=(3,3), stride=(1,1), padding=(1,0)),
            nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(3,3), stride=(1,1), padding=(1,0)),
            nn.ReLU(inplace=True)
        )
        self.linear_layer = nn.Linear(256 * 68, 256)
        self.GRU_layer = nn.GRU(input_size=256, hidden_size=512, num_layers=2, batch_first=True, bidirectional=True)
        self.output_layer = nn.Sequential(
            nn.Linear(512, 1024),
            nn.ReLU(inplace=True),
            nn.Linear(1024, 1024),
            nn.ReLU(inplace=True),
            nn.Linear(1024, 40),
            nn.ReLU(inplace=True)
        )

    def forward(self, inputs):
        inputs = inputs.permute(0, 2, 1)
        inputs = inputs.reshape(64,1,350,80)
        cnn_outputs = self.CNN_layer(inputs)
        cnn_outputs = cnn_outputs.permute(0, 2, 3, 1)
        flatten_outputs = torch.flatten(cnn_outputs, start_dim=2, end_dim=3)
        linear_outputs = self.linear_layer(flatten_outputs)
        gru_outputs, _ = self.GRU_layer(linear_outputs)
        gru_outputs = torch.split(gru_outputs, 512, dim=2)
        gru_outputs = gru_outputs[0].add(gru_outputs[1])
        final_outputs = self.output_layer(gru_outputs)
        final_outputs = final_outputs.permute(0, 2, 1)
        return final_outputs


def test01():
    CNN_layer = nn.Sequential(
        nn.Conv2d(in_channels=1, out_channels=256, kernel_size=(9,9), stride=(1,1), padding=(4,0)),
        nn.ReLU(inplace=True),
        nn.MaxPool2d(kernel_size=(3, 3), stride=(1,1), padding=(1,0)),
        nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(3,3), stride=(1,1), padding=(1,0)),
        nn.ReLU(inplace=True)
    )
    linear_layer = nn.Linear(256 * 68, 256)
    GRU_layer = nn.GRU(input_size=256, hidden_size=512, num_layers=2, batch_first=True, bidirectional=True)
    output_layer = nn.Sequential(
        nn.Linear(512, 1024),
        nn.ReLU(inplace=True),
        nn.Linear(1024, 1024),
        nn.ReLU(inplace=True),
        nn.Linear(1024, 40),
        nn.ReLU(inplace=True)
    )
    print('CNN_layer {}\n linear_layer {}\n, gru_layer {}\n'.format(CNN_layer, linear_layer, GRU_layer))
    inputs = torch.randn(64, 1, 350, 80)
    print("inputs", inputs.size())
    cnn_outputs = CNN_layer(inputs)
    print("cnn_outputs", cnn_outputs.size())
    cnn_outputs = cnn_outputs.permute(0, 2, 3, 1)
    print("cnn_outputs", cnn_outputs.size())
    flatten_outputs = torch.flatten(cnn_outputs, start_dim=2, end_dim=3)
    print("flatten_outputs", flatten_outputs.size())
    linear_outputs = linear_layer(flatten_outputs)
    print("linear_outputs", linear_outputs.size())
    gru_outputs, _ = GRU_layer(linear_outputs)
    gru_outputs = torch.split(gru_outputs, 512, dim=2)
    gru_outputs = gru_outputs[0].add(gru_outputs[1])
    print("gru_outputs", gru_outputs.size())
    final_outputs = output_layer(gru_outputs)
    print("final_outputs",final_outputs.size())


class MyBlstm(nn.Module):
    def __init__(self, input_dim, num_hiddens, num_layers, num_output):
        super(MyBlstm, self).__init__()
        self.layer1 = nn.LSTM(input_size=input_dim, hidden_size=num_hiddens, num_layers=num_layers, batch_first=True, bidirectional=False)
        self.layer1bn = nn.BatchNorm1d(num_features=1)
        #self.output = functional.log_softmax(input=None, dim=num_output)

    def forward(self, x):
        print('shape of input {}\n'.format(x.shape))
        out, _ = self.layer1(x)
        print('shape of out {}\n'.format(out.shape))
        out = self.layer1bn(out)
        #y = self.output(l2n)
        return out


class PyTdnn(nn.Module):
    def __init__(self, input_dim, output_dim, cell_dim=1280):
        super(PyTdnn, self).__init__()
        self.tdnn1 = TDNNLayer(
            input_dim=input_dim,  # input dim
            output_dim=cell_dim,  # output dim
            context=[-2, -1, 0, 1, 2]# context
        )
        self.tdnn2 = TDNNLayer(
          input_dim=cell_dim, # input dim
          output_dim=cell_dim, # output dim
          context=[-2, 0, 2]# context
        )

        self.tdnn3 = TDNNLayer(
            input_dim=cell_dim,  # input dim
            output_dim=cell_dim,  # output dim
            context=[-3, 0, 3]  # context
        )

        self.tdnn4 = TDNNLayer(
            input_dim=cell_dim,  # input dim
            output_dim=cell_dim,  # output dim
            context=[-7, 0, 7]  # context
        )

        self.tdnn5 = TDNNLayer(
            input_dim=cell_dim,  # input dim
            output_dim=cell_dim,  # output dim
            context=[-1, 0, 1]  # context
        )

        self.tdnn6 = TDNNLayer(
            input_dim=cell_dim,  # input dim
            output_dim=output_dim,  # output dim
            context=[-3, 0, 3]  # context
        )

    def forward(self, x):
        y1 = self.tdnn1(x)
        y2 = self.tdnn2(y1)
        y3 = self.tdnn3(y2)
        y4 = self.tdnn4(y3)
        y5 = self.tdnn5(y4)
        output = self.tdnn6(y5)
        return output


def test_tdnn():
    # Here, x should have the shape (batch_size, input_dim, sequence_length).
    net = PyTdnn(80, 5208)
    print('--------STRUCTURE OF NET {}\n'.format(net))
    inputs = torch.randn(1, 80, 10)
    output = net(inputs)
    print('input {}, output {}\n'.format(inputs.shape, output.shape))
    print('input {}\noutput {}\n'.format(inputs, torch.softmax(output, dim=1)))
    print('Done.\n')


def test02():
    new_lstm = nn.Sequential(MyBlstm(80, 1024, 2, 5752)).cuda()
    print('Structure of model {}\n'.format(new_lstm))
    inputs = torch.randn(1, 1, 80).cuda()
    output = new_lstm(inputs)
    print('input {}, output {}\n'.format(inputs.shape, output.shape))
    print('Done.\n')

def test03():
    bn_input_size = 512*2
    naive_lstm = nn.LSTM(input_size=80, num_layers=1, hidden_size=512, dropout=0.9, bidirectional=True).cuda()
    bn = nn.BatchNorm1d(1).cuda()
    print('structure of nnet {}\n'.format(naive_lstm))
    inputs = torch.randn((4, 1, 80)).cuda()
    lstm_output, _ = naive_lstm(inputs)
    bn_output = bn(lstm_output)
    print('in shape {} and out shape {} of lstm\n'.format(inputs.shape, lstm_output.shape))
    print('shape of bn is {}\n'.format(bn_output.shape))
    print('example of lstm {} and bn {}\n'.format(lstm_output, bn_output))
    sys.exit(-1)


def test_blstm3():
    net = BLSTM4(input_dim=80, cell_num=128, output_dim=5208, bi=True)
    print('blstm3 == {}\n'.format(net))
    input = torch.randn(1, 10, 80)
    y = net(input)
    print('input is {}\noutput is {}\n'.format(y.size(), input.size()))
    sys.exit(-1)


if __name__ == '__main__':
    print('version of pytorch {}\n'.format(torch.__version__))
    test_blstm3()
    test_tdnn()

