import torch
from torch import nn, optim, Tensor
import torch.onnx
import onnx
import os
import numpy as np
from collections import OrderedDict


if torch.cuda.is_available():
    device = torch.device("cuda")
else:
    device = torch.device("cpu")


class TdnnLayer(nn.Module):
    def __init__(self, unfold_kernel_size, unfold_stride, unfold_dilation, linear_input, linear_output):
        super(TdnnLayer, self).__init__()
        '''
        input : (batch_size, fream_num, feat_dim)
        '''
        self.unfold_kernel_size = unfold_kernel_size
        self.unfold_stride = unfold_stride
        self.unfold_dilation = unfold_dilation

        self.output_layer = nn.Sequential(
            nn.Linear(linear_input, linear_output),
            nn.ReLU(inplace=True)
        )


    def forward(self, x):
        # x.size = (1,1,batchsize, featdim)
        # x = torch.nn.functional.unfold(x,(5, 80), stride=(1,80), dilation=(1,1))
        

        x = x.unsqueeze(1)
        x = torch.nn.functional.unfold(x, kernel_size = self.unfold_kernel_size, stride = self.unfold_stride, dilation=self.unfold_dilation)
        x = x.transpose(1, 2)
        x = self.output_layer(x) # (1, 300, 1280)
        # ---------------renorm-------------- #
        length = x.shape[2]
        square = torch.mul(x, x)
        rms = torch.sum(square, dim=2) / length
        scale = torch.pow(rms, -0.5)
        scale = torch.unsqueeze(scale, 1)
        scale = scale.transpose(1, 2)
        scale = scale.repeat(1, 1, length)
        x = torch.mul(x, scale)
        return x


def TdnnLayer_unit_test():
    layer1 = TdnnLayer((2, 400), (1, 400), (3, 1), 800, 1280)
    input = torch.tensor()



class TDNN(nn.Module):
    def __init__(self):
        super(TDNN, self).__init__()
        '''
        input : (batch_size, fream_num, feat_dim)
        '''
        self.lda = nn.Linear(400, 400)
        self.layer1 = TdnnLayer((2, 400), (1,400), (3,1), 800, 1280)
        self.layer2 = TdnnLayer((2, 1280), (1,1280), (6,1), 2560, 1280)
        self.layer3 = TdnnLayer((2, 1280), (1,1280), (9,1), 2560, 1280)
        self.layer4 = TdnnLayer((2, 1280), (1,1280), (1,1), 2560, 1280)
        self.layer5 = TdnnLayer((2, 1280), (1,1280), (1,1), 2560, 1280)
        self.layer6 = TdnnLayer((2, 1280), (1,1280), (3,1), 2560, 1280)
        self.output_layer = nn.Linear(1280, 5752)
        self.softmax_func = nn.LogSoftmax(dim=2)

    def forward(self, x):
        # print(x.shape)
        # x_first = x[:,0,:]
        # x_last = x[:,-1,:]
        # x_first = x_first.unsqueeze(1)
        # x_last = x_last.unsqueeze(1)
        # print(x_first.shape)
        # print(x_last.shape)
        # x_first = x_first.repeat(1,17,1)
        # x_last = x_last.repeat(1,10,1)
        # print(x_first.shape)
        # print(x_last.shape)
        # x = torch.cat((x_first, x, x_last), 1)
        # print(x.shape)
        x = x.unsqueeze(1)
        x = torch.nn.functional.unfold(x, (5, 80), stride=(1,80), dilation=(1,1))
        x = x.transpose(1, 2)
        x = self.lda(x)
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)
        x = self.layer5(x)
        x = self.layer6(x)
        x = self.output_layer(x)
        x = self.softmax_func(x)
        return x


class SQUARE(nn.Module):
    def __init__(self):
        super(SQUARE, self).__init__()
        self.model_name = "square = x * x"

    def forward(self, x):
        s = torch.mul(x, x)
        x = torch.sum(s, dim=2)
        return x

        
def load_model(model_path, model):
    model.load_state_dict(torch.load(
        model_path,
        map_location=lambda storage,
        loc: storage
    ))
    model.to(device)
    model.eval()
    return model


def remove_initializer_from_input(model_in, model_out):

    model = onnx.load(model_in)
    if model.ir_version < 4:
        print(
            'Model with ir_version below 4 requires to include initilizer in graph input'
        )
        return

    inputs = model.graph.input
    name_to_input = {}
    for input in inputs:
        name_to_input[input.name] = input

    for initializer in model.graph.initializer:
        if initializer.name in name_to_input:
            inputs.remove(name_to_input[initializer.name])

    onnx.save(model, model_out)


def convert_rnn_to_onnx():
    # load model
    model = TDNN().to(device)
    state_dict = model.state_dict()
    for name, tensor in state_dict.items():
        print(name, tensor.shape)

    new_state_dict = OrderedDict()
    new_state_dict["lda.weight"] = torch.from_numpy(np.load("48params/lda-w.npy"))
    new_state_dict["lda.bias"] = torch.from_numpy(np.load("48params/lda-b.npy"))
    new_state_dict["layer1.output_layer.0.weight"] = torch.from_numpy(np.load("48params/tdnn1.affine-w.npy"))
    new_state_dict["layer1.output_layer.0.bias"] = torch.from_numpy(np.load("48params/tdnn1.affine-b.npy"))
    new_state_dict["layer2.output_layer.0.weight"] = torch.from_numpy(np.load("48params/tdnn2.affine-w.npy"))
    new_state_dict["layer2.output_layer.0.bias"] = torch.from_numpy(np.load("48params/tdnn2.affine-b.npy"))
    new_state_dict["layer3.output_layer.0.weight"] = torch.from_numpy(np.load("48params/tdnn3.affine-w.npy"))
    new_state_dict["layer3.output_layer.0.bias"] = torch.from_numpy(np.load("48params/tdnn3.affine-b.npy"))
    new_state_dict["layer4.output_layer.0.weight"] = torch.from_numpy(np.load("48params/tdnn4.affine-w.npy"))
    new_state_dict["layer4.output_layer.0.bias"] = torch.from_numpy(np.load("48params/tdnn4.affine-b.npy"))
    new_state_dict["layer5.output_layer.0.weight"] = torch.from_numpy(np.load("48params/tdnn5.affine-w.npy"))
    new_state_dict["layer5.output_layer.0.bias"] = torch.from_numpy(np.load("48params/tdnn5.affine-b.npy"))
    new_state_dict["layer6.output_layer.0.weight"] = torch.from_numpy(np.load("48params/tdnn6.affine-w.npy"))
    new_state_dict["layer6.output_layer.0.bias"] = torch.from_numpy(np.load("48params/tdnn6.affine-b.npy"))
    new_state_dict["output_layer.weight"] = torch.from_numpy(np.load("48params/output.affine-w.npy"))
    new_state_dict["output_layer.bias"] = torch.from_numpy(np.load("48params/output.affine-b.npy"))
    model.load_state_dict(new_state_dict)
    model.eval()

    x = torch.randn(size=(1,500,80)).to(device)
    
    # dynamic_axes = None
    dynamic_axes = {'input_rnn':{0:'batch_size', 1:'innum_frames'},
                  'output_rnn':{0:'batch_size', 1:'outnum_frames'}}
    torch.onnx.export(model,
                      x,
                      "tdnn-tmp.onnx",
                      verbose=True,
                      input_names=["input_rnn"],
                      output_names=["output_rnn"],
                      dynamic_axes=dynamic_axes,
                      opset_version=11)
    remove_initializer_from_input(model_in="tdnn-tmp.onnx", model_out="tdnn-dynamic-48.onnx")
    os.remove("tdnn-tmp.onnx")
    print("----------------")
    print(model(x).cpu().detach().numpy().shape)
    # x = np.loadtxt("raw_fbank_debug_one.txt")
    # x = torch.from_numpy(x).unsqueeze(0).float().cuda()
    # x = model(x).cpu().detach().numpy()
    # print(x[0][0])



def test_onnx():
    # load model
    model = SQUARE().to(device)
    model.eval()

    x = torch.randn(size=(1,500,80)).to(device)
    
    # dynamic_axes = None
    dynamic_axes = {'input_rnn':{0:'batch_size', 1:'innum_frames'},
                  'output_rnn':{0:'batch_size', 1:'outnum_frames'}}
    torch.onnx.export(model,
                      x,
                      "tdnn-tmp.onnx",
                      verbose=True,
                      input_names=["input_rnn"],
                      output_names=["output_rnn"],
                      dynamic_axes=dynamic_axes,
                      opset_version=11)
    remove_initializer_from_input(model_in="tdnn-tmp.onnx", model_out="square.2.onnx")
    os.remove("tdnn-tmp.onnx")
    print("----------------")
    print(model(x).cpu().detach().numpy().shape)



def test01():
    a = torch.randn(1, 100, 80)
    a = a.unsqueeze(1)
    print(a.shape)
    a = torch.nn.functional.unfold(a, (5, 80),
                        stride=(1, 80),
                        dilation=(1, 1))
    a = a.transpose(1, 2)
    print(a.shape)
    affine1 = nn.Linear(400, 800)
    a = affine1(a)
    print(a.shape)

def test02():
    b = torch.randn(1, 10, 2)
    b = b.unsqueeze(1)
    print(b)
    print(b.shape)
    b = torch.nn.functional.unfold(b, (2, 2), 
                        stride=(1,2), 
                        dilation=(3,1))
    b = b.transpose(1,2)
    print(b)
    print(b.shape)


def test03():
    a = torch.randn(1, 2, 3)
    c = a
    b = torch.randn(1, 2)
    print("a", a)
    length = a.shape[2]
    a = torch.norm(a, dim=2)
    print("a", a)
    print("----------------")
    print("length", length)
    a = a / length
    print("b", b)
    print("a", a)
    print("----------------")
    a = torch.max(a, b)
    print("a", a)
    print("----------------")
    a = torch.pow(a, -0.5)
    print("a", a)
    print("a.shape", a.shape)
    print("----------------")
    print("a", a)
    print("c", c)
    print("----------------")
    ones = torch.ones(size=(length, 1))
    print("ones.shape", ones.shape)
    a = torch.matmul(ones, a).t()
    print("a", a)
    print("a.shape", a.shape)
    print("----------------")


    #a = torch.mean(a, dim=2)
    #print(a)
    

def test04():
    x = torch.randn(2, 300, 1280)
    length = x.shape[2]
    rms = torch.norm(x, dim=2) / length
    print("rms.shape:", rms.shape)
    scale = torch.pow(rms, -0.5)
    print("scale.shape:", scale.shape)
    scale = torch.unsqueeze(scale, 1)
    scale = scale.transpose(1,2)
    print("scale.shape:", scale.shape)
    scale = scale.repeat(1, 1, length)
    print("scale.shape:", scale.shape)
    x = torch.mul(x, scale)
    print("x.shape:", x.shape)



if __name__ == "__main__":
    convert_rnn_to_onnx()
    # test05()
    # test_onnx()
