import sys

# sys.path.insert(0, "../../python")
sys.path.insert(0, '/home/xiaomin/wxm/mxnet/python')
import mxnet as mx
import argparse
import os, sys


def print_infer_shape(net):
    ar, ou, au = net.infer_shape(data=(1, 1, 257, 669))
    print ou


def get_lenet(num_classes=2):
    """
    LeCun, Yann, Leon Bottou, Yoshua Bengio, and Patrick
    Haffner. "Gradient-based learning applied to document recognition."
    Proceedings of the IEEE (1998)
    """
    data = mx.symbol.Variable('data')
    label = mx.symbol.Variable(name='label')

    # first conv
    conv1 = mx.symbol.Convolution(data=data, kernel=(1, 4), num_filter=32)
    bn1 = mx.symbol.BatchNorm(data=conv1, fix_gamma=False, momentum=0.9, eps=1e-5)
    relu1 = mx.symbol.Activation(data=bn1, act_type="relu")
    pool1 = mx.symbol.Pooling(data=relu1, pool_type="max",
                              kernel=(1, 4), stride=(1, 4))

    print_infer_shape(pool1)
    conv2 = mx.symbol.Convolution(data=pool1, kernel=(1, 4), num_filter=64)
    bn2 = mx.symbol.BatchNorm(data=conv2, fix_gamma=False, momentum=0.9, eps=1e-5)
    relu2 = mx.symbol.Activation(data=bn2, act_type="relu")
    pool2 = mx.symbol.Pooling(data=relu2, pool_type="max",
                              kernel=(1, 2), stride=(1, 2))
    print_infer_shape(pool2)

    conv3 = mx.symbol.Convolution(data=pool2, kernel=(1, 4), num_filter=128)
    bn3 = mx.symbol.BatchNorm(data=conv3, fix_gamma=False, momentum=0.9, eps=1e-5)
    relu3 = mx.symbol.Activation(data=bn3, act_type="relu")
    pool3 = mx.symbol.Pooling(data=relu3, pool_type="max",
                              kernel=(1, 2), stride=(1, 2))
    print_infer_shape(pool3)
    # first fullc
    global_pool = mx.symbol.Pooling(data=pool3, pool_type="max",
                                    kernel=(1, 35), stride=(1, 35))
    print_infer_shape(global_pool)

    flatten = mx.symbol.Flatten(data=global_pool)
    print_infer_shape(flatten)

    fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=500)
    relu4 = mx.symbol.Activation(data=fc1, act_type="relu")
    bn4 = mx.symbol.BatchNorm(data=relu4, fix_gamma=False, momentum=0.9, eps=1e-5)
    # second fullc
    fc2 = mx.symbol.FullyConnected(data=bn4, num_hidden=num_classes)
    # loss
    lenet = mx.symbol.SoftmaxOutput(data=fc2, label=label, name='softmax')
    return lenet


def get_lenet_withrnn(num_classes=2):
    """
    LeCun, Yann, Leon Bottou, Yoshua Bengio, and Patrick
    Haffner. "Gradient-based learning applied to document recognition."
    Proceedings of the IEEE (1998)
    """
    data = mx.symbol.Variable('data')
    label = mx.symbol.Variable(name='label')

    rnn_h_init = mx.sym.SwapAxis(mx.sym.Variable('LSTM_init_h'),
                                 dim1=0, dim2=1)
    rnn_c_init = mx.sym.SwapAxis(mx.sym.Variable('LSTM_init_c'),
                                 dim1=0, dim2=1)
    rnn_params = mx.sym.Variable('LSTM_bias')

    # first conv
    conv1 = mx.symbol.Convolution(data=data, kernel=(4, 1), num_filter=32)
    bn1 = mx.symbol.BatchNorm(data=conv1, fix_gamma=False, momentum=0.9, eps=1e-5)
    relu1 = mx.symbol.Activation(data=bn1, act_type="relu")
    pool1 = mx.symbol.Pooling(data=relu1, pool_type="max",
                              kernel=(2, 1), stride=(2, 1))

    print_infer_shape(pool1)
    conv2 = mx.symbol.Convolution(data=pool1, kernel=(4, 1), num_filter=64)
    bn2 = mx.symbol.BatchNorm(data=conv2, fix_gamma=False, momentum=0.9, eps=1e-5)
    relu2 = mx.symbol.Activation(data=bn2, act_type="relu")
    pool2 = mx.symbol.Pooling(data=relu2, pool_type="max",
                              kernel=(2, 1), stride=(2, 1))
    print_infer_shape(pool2)

    conv3 = mx.symbol.Convolution(data=pool2, kernel=(4, 1), num_filter=128)
    bn3 = mx.symbol.BatchNorm(data=conv3, fix_gamma=False, momentum=0.9, eps=1e-5)
    relu3 = mx.symbol.Activation(data=bn3, act_type="relu")
    pool3 = mx.symbol.Pooling(data=relu3, pool_type="max",
                              kernel=(2, 1), stride=(2, 1))
    print_infer_shape(pool3)
    # first fullc
    conv4 = mx.symbol.Convolution(data=pool3, kernel=(4, 1), num_filter=256)
    bn4 = mx.symbol.BatchNorm(data=conv4, fix_gamma=False, momentum=0.9, eps=1e-5)
    relu4 = mx.symbol.Activation(data=bn4, act_type="relu")
    pool4 = mx.symbol.Pooling(data=relu4, pool_type="max",
                              kernel=(2, 1), stride=(2, 1))
    print_infer_shape(pool4)

    conv5 = mx.symbol.Convolution(data=pool4, kernel=(4, 1), num_filter=256)
    bn5 = mx.symbol.BatchNorm(data=conv5, fix_gamma=False, momentum=0.9, eps=1e-5)
    relu5 = mx.symbol.Activation(data=bn5, act_type="relu")
    pool5 = mx.symbol.Pooling(data=relu5, pool_type="max",
                              kernel=(2, 1), stride=(2, 1))
    print_infer_shape(pool5)

    global_pool = mx.symbol.Pooling(data=pool5, pool_type="max",
                                    kernel=(5, 1), stride=(5, 1))
    print_infer_shape(global_pool)

    reshape1 = mx.symbol.Reshape(data=global_pool, shape=(1, 256, -1))
    swapaxis1 = mx.symbol.SwapAxis(data=reshape1, dim1=0, dim2=1)
    swapaxis2 = mx.symbol.SwapAxis(data=swapaxis1, dim1=0, dim2=2)

    bilstm = mx.symbol.RNN(data=swapaxis2, bidirectional=False, num_layers=3, mode='lstm',
                           state_size=128, state_outputs=False, state=rnn_h_init,
                           state_cell=rnn_c_init, parameters=rnn_params)
    print_infer_shape(bilstm)
    swapaxis3 = mx.symbol.SwapAxis(data=bilstm, dim1=0, dim2=1)
    print_infer_shape(swapaxis3)
    mean_state = mx.symbol.sum(data=swapaxis3, axis=1) / 669.
    print_infer_shape(mean_state)

    fc = mx.symbol.FullyConnected(data=mean_state, num_hidden=num_classes)
    # loss
    lenet = mx.symbol.SoftmaxOutput(data=fc, label=label, name='softmax')
    return lenet


def get_lenet_noBNnoDropout(num_classes=2):
    """
    LeCun, Yann, Leon Bottou, Yoshua Bengio, and Patrick
    Haffner. "Gradient-based learning applied to document recognition."
    Proceedings of the IEEE (1998)
    """
    data = mx.symbol.Variable('data')
    label = mx.symbol.Variable(name='label')

    # first conv
    conv1 = mx.symbol.Convolution(data=data, kernel=(1, 4), num_filter=32)
    relu1 = mx.symbol.Activation(data=conv1, act_type="relu")
    pool1 = mx.symbol.Pooling(data=relu1, pool_type="max",
                              kernel=(1, 4), stride=(1, 4))

    print_infer_shape(pool1)
    conv2 = mx.symbol.Convolution(data=pool1, kernel=(1, 4), num_filter=64)
    relu2 = mx.symbol.Activation(data=conv2, act_type="relu")
    pool2 = mx.symbol.Pooling(data=relu2, pool_type="max",
                              kernel=(1, 2), stride=(1, 2))
    print_infer_shape(pool2)

    conv3 = mx.symbol.Convolution(data=pool2, kernel=(1, 4), num_filter=128)
    relu3 = mx.symbol.Activation(data=conv3, act_type="relu")
    pool3 = mx.symbol.Pooling(data=relu3, pool_type="max",
                              kernel=(1, 2), stride=(1, 2))
    print_infer_shape(pool3)
    # first fullc
    global_pool = mx.symbol.Pooling(data=pool3, pool_type="max",
                                    kernel=(1, 35), stride=(1, 35))
    print_infer_shape(global_pool)

    flatten = mx.symbol.Flatten(data=global_pool)
    print_infer_shape(flatten)

    fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=500)
    relu4 = mx.symbol.Activation(data=fc1, act_type="relu")
    # second fullc
    fc2 = mx.symbol.FullyConnected(data=relu4, num_hidden=num_classes)
    # loss
    lenet = mx.symbol.SoftmaxOutput(data=fc2, label=label, name='softmax')
    return lenet


def get_lenet_withDropout(num_classes=2):
    """
    LeCun, Yann, Leon Bottou, Yoshua Bengio, and Patrick
    Haffner. "Gradient-based learning applied to document recognition."
    Proceedings of the IEEE (1998)
    """
    data = mx.symbol.Variable('data')
    label = mx.symbol.Variable(name='label')

    # first conv
    conv1 = mx.symbol.Convolution(data=data, kernel=(1, 4), num_filter=32)
    relu1 = mx.symbol.Activation(data=conv1, act_type="relu")
    pool1 = mx.symbol.Pooling(data=relu1, pool_type="max",
                              kernel=(1, 4), stride=(1, 4))

    print_infer_shape(pool1)
    conv2 = mx.symbol.Convolution(data=pool1, kernel=(1, 4), num_filter=64)
    relu2 = mx.symbol.Activation(data=conv2, act_type="relu")
    pool2 = mx.symbol.Pooling(data=relu2, pool_type="max",
                              kernel=(1, 2), stride=(1, 2))
    print_infer_shape(pool2)

    conv3 = mx.symbol.Convolution(data=pool2, kernel=(1, 4), num_filter=128)
    relu3 = mx.symbol.Activation(data=conv3, act_type="relu")
    pool3 = mx.symbol.Pooling(data=relu3, pool_type="max",
                              kernel=(1, 2), stride=(1, 2))
    print_infer_shape(pool3)
    # first fullc
    global_pool = mx.symbol.Pooling(data=pool3, pool_type="max",
                                    kernel=(1, 35), stride=(1, 35))
    print_infer_shape(global_pool)

    flatten = mx.symbol.Flatten(data=global_pool)
    print_infer_shape(flatten)

    fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=500)
    relu4 = mx.symbol.Activation(data=fc1, act_type="relu")
    dropout1 = mx.symbol.Dropout(data=relu4, p=0.5)
    # second fullc
    fc2 = mx.symbol.FullyConnected(data=dropout1, num_hidden=num_classes)
    # loss
    lenet = mx.symbol.SoftmaxOutput(data=fc2, label=label, name='softmax')
    return lenet


def get_neonscript_symbol(num_classes=2):
    data = mx.symbol.Variable('data')
    label = mx.symbol.Variable(name='label')
    print_infer_shape(data)

    conv1 = mx.symbol.Convolution(data=data, kernel=(3, 5), num_filter=64, stride=(1, 4))
    bn1 = mx.symbol.BatchNorm(data=conv1, fix_gamma=False, momentum=0.9, eps=1e-5)
    relu1 = mx.symbol.Activation(data=bn1, act_type="relu")
    pool1 = mx.symbol.Pooling(data=relu1, pool_type="max",
                              kernel=(2, 2), stride=(2, 2))

    print_infer_shape(pool1)
    conv2 = mx.symbol.Convolution(data=pool1, kernel=(3, 3), num_filter=128, stride=(1, 2))
    bn2 = mx.symbol.BatchNorm(data=conv2, fix_gamma=False, momentum=0.9, eps=1e-5)
    relu2 = mx.symbol.Activation(data=bn2, act_type="relu")
    pool2 = mx.symbol.Pooling(data=relu2, pool_type="max",
                              kernel=(2, 2), stride=(2, 2))
    print_infer_shape(pool2)

    conv3 = mx.symbol.Convolution(data=pool2, kernel=(3, 3), num_filter=256, stride=(1, 2))
    bn3 = mx.symbol.BatchNorm(data=conv3, fix_gamma=False, momentum=0.9, eps=1e-5)
    relu3 = mx.symbol.Activation(data=bn3, act_type="relu")
    pool3 = mx.symbol.Pooling(data=relu3, pool_type="max",
                              kernel=(1, 2), stride=(1, 2))
    print_infer_shape(pool3)

    conv4 = mx.symbol.Convolution(data=pool3, kernel=(2, 2), num_filter=512, stride=(1, 1))
    bn4 = mx.symbol.BatchNorm(data=conv4, fix_gamma=False, momentum=0.9, eps=1e-5)
    relu4 = mx.symbol.Activation(data=bn4, act_type="relu")
    print_infer_shape(relu4)

    # bilstm = mx.symbol.RNN(data=relu4, bidirectional=True, num_layers=3, mode='rnn_relu', state_size=128, state_outputs=True)
    # print_infer_shape(bilstm)
    flatten = mx.symbol.Flatten(data=relu4)
    print_infer_shape(flatten)

    fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=500)
    relu5 = mx.symbol.Activation(data=fc1, act_type="relu")
    bn5 = mx.symbol.BatchNorm(data=relu5, fix_gamma=False, momentum=0.9, eps=1e-5)
    # second fullc
    fc2 = mx.symbol.FullyConnected(data=bn5, num_hidden=num_classes)
    # loss
    neonscript_nornn = mx.symbol.SoftmaxOutput(data=fc2, label=label, name='softmax')
    return neonscript_nornn


def get_neonscriptwithrnn_symbol(num_classes=2):
    data = mx.symbol.Variable('data')
    label = mx.symbol.Variable(name='label')
    # Xavier_init = mx.init.Xavier(rnd_type="gaussian", factor_type="in", magnitude=2.0)
    # rnn_h_init_nd = mx.nd.zeros(shape=[177, 10, 256], ctx=mx.gpu(1))
    # rnn_h_init = mx.symbol.Variable('rnn_h_init')
    # Xavier_init('rnn_h_init', rnn_h_init_nd)
    rnn_h_init = mx.sym.SwapAxis(mx.sym.Variable('LSTM_init_h'),
                                 dim1=0, dim2=1)
    rnn_c_init = mx.sym.SwapAxis(mx.sym.Variable('LSTM_init_c'),
                                 dim1=0, dim2=1)
    rnn_params = mx.sym.Variable('LSTM_bias')

    print_infer_shape(data)

    conv1 = mx.symbol.Convolution(data=data, kernel=(3, 5), num_filter=64, stride=(1, 4))
    bn1 = mx.symbol.BatchNorm(data=conv1, fix_gamma=False, momentum=0.9, eps=1e-5)
    relu1 = mx.symbol.Activation(data=bn1, act_type="relu")
    pool1 = mx.symbol.Pooling(data=relu1, pool_type="max",
                              kernel=(2, 2), stride=(2, 2))

    print_infer_shape(pool1)
    conv2 = mx.symbol.Convolution(data=pool1, kernel=(3, 3), num_filter=128, stride=(1, 2))
    bn2 = mx.symbol.BatchNorm(data=conv2, fix_gamma=False, momentum=0.9, eps=1e-5)
    relu2 = mx.symbol.Activation(data=bn2, act_type="relu")
    pool2 = mx.symbol.Pooling(data=relu2, pool_type="max",
                              kernel=(2, 2), stride=(2, 2))
    print_infer_shape(pool2)

    conv3 = mx.symbol.Convolution(data=pool2, kernel=(3, 3), num_filter=256, stride=(1, 2))
    bn3 = mx.symbol.BatchNorm(data=conv3, fix_gamma=False, momentum=0.9, eps=1e-5)
    relu3 = mx.symbol.Activation(data=bn3, act_type="relu")
    pool3 = mx.symbol.Pooling(data=relu3, pool_type="max",
                              kernel=(1, 2), stride=(1, 2))
    print_infer_shape(pool3)

    conv4 = mx.symbol.Convolution(data=pool3, kernel=(2, 2), num_filter=512, stride=(1, 1))
    bn4 = mx.symbol.BatchNorm(data=conv4, fix_gamma=False, momentum=0.9, eps=1e-5)
    relu4 = mx.symbol.Activation(data=bn4, act_type="relu")
    print_infer_shape(relu4)

    reshape1 = mx.symbol.Reshape(data=relu4, shape=(1, 512, -1))
    swapaxis1 = mx.symbol.SwapAxis(data=reshape1, dim1=0, dim2=1)
    swapaxis2 = mx.symbol.SwapAxis(data=swapaxis1, dim1=0, dim2=2)

    bilstm = mx.symbol.RNN(data=swapaxis2, bidirectional=False, num_layers=3, mode='lstm',
                           state_size=128, state_outputs=False, state=rnn_h_init,
                           state_cell=rnn_c_init, parameters=rnn_params)
    print_infer_shape(bilstm)
    swapaxis3 = mx.symbol.SwapAxis(data=bilstm, dim1=0, dim2=1)
    print_infer_shape(swapaxis3)
    mean_state = mx.symbol.sum(data=swapaxis3, axis=1) / 177.
    print_infer_shape(mean_state)
    # expanddim1 = mx.symbol.expand_dims(data=mean_state, axis=0)
    # print_infer_shape(expanddim1)
    # second fullc
    fc1 = mx.symbol.FullyConnected(data=mean_state, num_hidden=num_classes)
    # loss
    neonscript_withrnn = mx.symbol.SoftmaxOutput(data=fc1, label=label, name='softmax')
    return neonscript_withrnn
