from fire import Fire
import torch
from torch import nn
import torch.onnx
import onnx 
import os
from torchaudio.models import Wav2Letter
from collections import OrderedDict
import argparse
import sys, os
import torch.functional as F


sys.path.append('D:\\works\\Speech\\XDF_CALL\\us_any_clause_acc_api_39\\demoEnv\\my-wav2letter\\gru-pytorch')

if torch.cuda.is_available():
    device = torch.device("cuda")
else:
    device = torch.device("cpu")



class BLSTM2(nn.Module):
    def __init__(self, input_dim, hidden_dim, num_layer, output_dim):
        super(BLSTM2, self).__init__()
        self.am = BLSTM(input_num=input_dim, hidden_num=hidden_dim, num_layers=num_layer, output_num=output_dim)
        self.softmax_func = nn.LogSoftmax(dim=2)

    def forward(self, x):
        x = self.am(x)
        x = self.softmax_func(x)
        return x


def load_model(model_path, model):
    model.load_state_dict(torch.load(
        model_path,
        map_location=lambda storage,
        loc: storage
    ))
    model.to(device)
    model.eval()
    return model


def remove_initializer_from_input(model_in, model_out):

    model = onnx.load(model_in)
    if model.ir_version < 4:
        print(
            'Model with ir_version below 4 requires to include initilizer in graph input'
        )
        return

    inputs = model.graph.input
    name_to_input = {}
    for input in inputs:
        name_to_input[input.name] = input

    for initializer in model.graph.initializer:
        if initializer.name in name_to_input:
            inputs.remove(name_to_input[initializer.name])

    onnx.save(model, model_out)


def convert_w2l_to_onnx(model_file="wav2letter.pt", feas_size=80, num_classes=40):
    
    # load model
    model = Wav2Letter(num_classes=num_classes, input_type="mfcc", num_features=feas_size).to(device)
    state_dict = torch.load(model_file)
    new_state_dict = OrderedDict()
    for name, tensor in state_dict.items():
        new_state_dict[name.replace("module.", "")] = tensor
        #print(name)
        #print(tensor, type(tensor))   
    model.load_state_dict(new_state_dict)
    model.eval()
     
     
    batch_size = 1
    num_frames = 500
    x = torch.randn(size=(batch_size, feas_size, num_frames)).to(device)
    #TODO
    #dynamic_axes = {'input_w2l':{0:'batch_size', 2:'innum_frames'},
    #              'output_w2l':{0:'batch_size', 2:'outnum_frames'}}
    
    dynamic_axes = {'input_w2l':{2:'innum_frames'},
                   'output_w2l':{2:'outnum_frames'}}
    torch.onnx.export(model,
                      x,
                      "wav2letter.tmp.onnx",
                      verbose=True,
                      input_names=["input_w2l"],
                      output_names=["output_w2l"],
                      dynamic_axes=dynamic_axes)
    remove_initializer_from_input(model_in="wav2letter.tmp.onnx", model_out="wav2letter.onnx")
    os.remove("wav2letter.tmp.onnx")
    print("----------------")


def convert_rnn_to_onnx(model_file, feas_size, cell_num, num_hidden, num_classes=1000):
    # load model
    model = BLSTM3(input_dim=feas_size, cell_num=cell_num, output_dim=num_classes).cuda()


    am_model = torch.load(model_file)
    print('state_dict {}\n'.format(am_model))


    new_state_dict = OrderedDict()
    for name, tensor in am_model.items():
        new_state_dict[name.replace("module.", "")] = tensor
        print(name)
        print(tensor, type(tensor))

    print('')
    model.load_state_dict(new_state_dict)
    model.eval()

    batch_size = 1
    num_frames = 500
    x = torch.randn(size=(batch_size, num_frames, feas_size)).cuda()
    #TODO
    dynamic_axes = {'input_rnn':{0:'batch_size', 1:'num_frames'},
                  'output_rnn':{0:'batch_size', 1:'num_frames'}}
    #dynamic_axes = None
    onnx_model = model_file + ".onnx"
    temp_model = model_file + ".temp.onnx"
    torch.onnx.export(model,
                      x,
                      temp_model,
                      verbose=True,
                      input_names=["input_rnn"],
                      output_names=["output_rnn"],
                      dynamic_axes=dynamic_axes)
    remove_initializer_from_input(model_in=temp_model, model_out=onnx_model)
    os.remove(temp_model)
    print("---------------- Finish all ------------------\n")


def convert_tdnnf_to_onnx(model_file, input_dim, context_width, cell_dim, num_classes=1000):
    # load model
    from ftdnn import models_mix
    if not os.path.exists(model_file):
        raise FileNotFoundError
    real_input_dim = (context_width*2 + 1) * input_dim
    tdnn = models_mix.TDNNF(in_dim=real_input_dim, cell_dim=cell_dim, out_dim=num_classes).cuda()
    #tdnn.set_infer_flag()
    # This is a sequence of three 2x1 convolutions
    # dimensions go from 1280 -> 256 -> 256 -> 512
    # dilations and paddings handles how much to dilate and pad each convolution
    # Having these configurable is to ensure the sequence length lsstays the same
    # inputs to the FTDNNLayer must be (batch_size, seq_len, in_dim)
    # print('output shape {}\n'.format(tdnn_f(test_input).shape))  # returns (5, 100, 512)
    am_model = torch.load(model_file)
    print('state_dict {}\n'.format(am_model))
    new_state_dict = OrderedDict()
    for name, tensor in am_model.items():
        new_state_dict[name.replace("module.", "")] = tensor
        print(name)
        print(tensor, type(tensor))

    print('')
    tdnn.load_state_dict(new_state_dict)
    tdnn.eval()

    batch_size = 1
    num_frames = 500
    x = torch.randn(size=(batch_size, num_frames, input_dim)).cuda()
    #TODO
    dynamic_axes = {'input':{0:'batch_size', 1:'num_frames'},
                  'output':{0:'batch_size', 1:'num_frames'}}
    #dynamic_axes = None

    onnx_model = model_file + ".onnx"
    temp_model = model_file + ".temp.onnx"
    torch.onnx.export(tdnn,
                      x,
                      temp_model,
                      verbose=True,
                      opset_version=11,
                      input_names=["input"],
                      output_names=["output"],
                      dynamic_axes=dynamic_axes)
    remove_initializer_from_input(model_in=temp_model, model_out=onnx_model)
    os.remove(temp_model)
    print("---------------- Finish all ------------------\n")


def get_args():
    parser = argparse.ArgumentParser(
        description=""""Convert a torch model to onnx""",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        conflict_handler='resolve'
    )

    parser.add_argument("--model", type=str, dest='model', default="", help="pytorch model")
    parser.add_argument("--input_dim", type=int, dest='input_dim', default=80, help="input feature dims")
    parser.add_argument("--cell_num", type=int, dest='cell_num', default=1024, help="cell number of lstm")
    parser.add_argument("--num_layer", type=int, dest='num_layer', default=4, help="number of layers")
    parser.add_argument("--output_dim", type=int, dest='output_dim', default=80, help="output dim of softmax")
    print(' '.join(sys.argv), file=sys.stderr)
    print(sys.argv, file=sys.stderr)
    args = parser.parse_args()
    return args


def conv_rnn_model():
    input_dim, cell_dim, layer, output_dim = 240, 128, 3, 4160
    input_model = "epoch_24_model.pt_model_eval.pt"
    convert_rnn_to_onnx(input_model, input_dim, cell_dim, layer, output_dim)
    print('Convert done !\n')


def conv_tdnnf_model():
    net_file = "final.pt"
    input_dim, context_width, cell_dim, output_dim = 80, 14, 1024, 5688
    convert_tdnnf_to_onnx(net_file, input_dim, context_width,  cell_dim, output_dim)
    print('TDNNF convert to onnx ok!\n')


if __name__ == '__main__':
    rnn_model = False
    if rnn_model:
        conv_rnn_model()
    else:
        conv_tdnnf_model()




