import argparse

from tensorflow.python import keras
import tvm
import tvm.relay as relay
# from tensorflow.keras import layers
# from tensorflow.keras import Model
from tensorflow.keras.layers import Dropout, Activation, Flatten
# import json
import tensorflow as tf
# from keras import backend as K

from loguru import logger
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Conv2D, AveragePooling2D, BatchNormalization, Input, Add, Dense, ZeroPadding2D, MaxPooling2D

# from keras.layers.convolutional import Convolution2D, MaxPooling2D
from tensorflow.keras.regularizers import l2

from keras_model_impl import ACGAN, BGAN, GAN, BiGAN, Generator, SqueezeNet, UNet, ZFNet, LeNet, AlexNet
# from classification_models.keras import Classifiers
from inception_v4 import create_inception_v4

# from nfnets_keras import NFNetF3
from keras_cv_attention_models import gated_mlp, aotnet
from nasnet import NASNetMobile

aotnet.AotNet101
depth = 34  # table 5 on page 8 indicates best value (4.17) CIFAR-10
k = 2  # 'widen_factor'; table 5 on page 8 indicates best value (4.17) CIFAR-10
dropout_probability = 0  # table 6 on page 10 indicates best value (4.17) CIFAR-10

use_bias = False  # following functions 'FCinit(model)' and 'DisableBias(model)' in utils.lua
weight_init = "he_normal"  # follows the 'MSRinit(model)' function in utils.lua

weight_decay = 5e-4
# Keras specific
channel_axis = -1
image_size = 224
nb_classes = 10
input_shape = (image_size, image_size, 3)


# Wide residual network http://arxiv.org/abs/1605.07146
def _wide_basic(n_input_plane, n_output_plane, stride):
    def f(net):
        # format of conv_params:
        #               [ [nb_col="kernel width", nb_row="kernel height",
        #               subsample="(stride_vertical,stride_horizontal)",
        #               border_mode="same" or "valid"] ]
        # B(3,3): orignal <<basic>> block
        conv_params = [[3, 3, stride, "same"], [3, 3, (1, 1), "same"]]

        n_bottleneck_plane = n_output_plane

        # Residual block
        for i, v in enumerate(conv_params):
            if i == 0:
                if n_input_plane != n_output_plane:
                    net = BatchNormalization(axis=channel_axis)(net)
                    net = Activation("relu")(net)
                    convs = net
                else:
                    convs = BatchNormalization(axis=channel_axis)(net)
                    convs = Activation("relu")(convs)
                convs = Conv2D(n_bottleneck_plane, (v[0], v[1]),
                               strides=v[2],
                               padding=v[3],
                               kernel_initializer=weight_init,
                               kernel_regularizer=l2(weight_decay),
                               use_bias=use_bias)(convs)
            else:
                convs = BatchNormalization(axis=channel_axis)(convs)
                convs = Activation("relu")(convs)
                if dropout_probability > 0:
                    convs = Dropout(dropout_probability)(convs)
                convs = Conv2D(n_bottleneck_plane, (v[0], v[1]),
                               strides=v[2],
                               padding=v[3],
                               kernel_initializer=weight_init,
                               kernel_regularizer=l2(weight_decay),
                               use_bias=use_bias)(convs)

        # Shortcut Conntection: identity function or 1x1 convolutional
        #  (depends on difference between input & output shape - this
        #   corresponds to whether we are using the first block in each
        #   group; see _layer() ).
        if n_input_plane != n_output_plane:
            shortcut = Conv2D(n_output_plane, (1, 1),
                              strides=stride,
                              padding="same",
                              kernel_initializer=weight_init,
                              kernel_regularizer=l2(weight_decay),
                              use_bias=use_bias)(net)
        else:
            shortcut = net

        return Add()([convs, shortcut])

    return f


# "Stacking Residual Units on the same stage"
def _layer(block, n_input_plane, n_output_plane, count, stride):
    def f(net):
        net = block(n_input_plane, n_output_plane, stride)(net)
        for i in range(2, int(count + 1)):
            net = block(n_output_plane, n_output_plane, stride=(1, 1))(net)
        return net

    return f


def create_model(depth, k):
    logger.debug("Creating model...")

    assert ((depth - 4) % 6 == 0)
    n = (depth - 4) / 6

    inputs = Input(shape=input_shape)

    n_stages = [16, 16 * k, 32 * k, 64 * k]

    conv1 = Conv2D(n_stages[0], (3, 3),
                   strides=1,
                   padding="same",
                   kernel_initializer=weight_init,
                   kernel_regularizer=l2(weight_decay),
                   use_bias=use_bias)(inputs)  # "One conv at the beginning (spatial size: 32x32)"

    # Add wide residual blocks
    block_fn = _wide_basic
    conv2 = _layer(block_fn, n_input_plane=n_stages[0], n_output_plane=n_stages[1], count=n,
                   stride=(1, 1))(conv1)  # "Stage 1 (spatial size: 32x32)"
    conv3 = _layer(block_fn, n_input_plane=n_stages[1], n_output_plane=n_stages[2], count=n,
                   stride=(2, 2))(conv2)  # "Stage 2 (spatial size: 16x16)"
    conv4 = _layer(block_fn, n_input_plane=n_stages[2], n_output_plane=n_stages[3], count=n,
                   stride=(2, 2))(conv3)  # "Stage 3 (spatial size: 8x8)"

    batch_norm = BatchNormalization(axis=channel_axis)(conv4)
    relu = Activation("relu")(batch_norm)

    # Classifier block
    pool = AveragePooling2D(pool_size=(8, 8), strides=(1, 1), padding="same")(relu)
    flatten = Flatten()(pool)
    predictions = Dense(units=nb_classes,
                        kernel_initializer=weight_init,
                        use_bias=use_bias,
                        kernel_regularizer=l2(weight_decay),
                        activation="softmax")(flatten)

    model = Model(inputs=inputs, outputs=predictions)
    return model


def vgg11():
    # input image dimensions
    img_rows, img_cols = 224, 224
    # number of convolutional filters to use
    nb_filters = 64
    # size of pooling area for max pooling
    nb_pool = 2
    # convolution kernel size
    nb_conv = 3

    batch_size = 50
    nb_epoch = 100
    nb_classes = 30  # OR len(set(train_labels))

    model = Sequential()
    model.add(Input(shape=(224, 224, 1), batch_size=1, name="input_1"))
    # CNN-1: 3-64x2
    model.add(Conv2D(nb_filters, nb_conv, padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(
        nb_filters,
        nb_conv,
        padding='same',
    ))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=nb_pool))

    # CNN-2: 3-128x2
    model.add(Conv2D(
        nb_filters * 2,
        nb_conv,
        padding='same',
    ))
    model.add(Activation('relu'))
    model.add(Conv2D(
        nb_filters * 2,
        nb_conv,
        padding='same',
    ))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=nb_pool))

    # CNN-3: 3-256x2
    model.add(Conv2D(
        nb_filters * 4,
        nb_conv,
        padding='same',
    ))
    model.add(Activation('relu'))
    model.add(Conv2D(
        nb_filters * 4,
        nb_conv,
        padding='same',
    ))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=nb_pool))

    # # CNN-4: 3-512x2
    model.add(Conv2D(
        nb_filters * 8,
        nb_conv,
        padding='same',
    ))
    model.add(Activation('relu'))
    model.add(Conv2D(
        nb_filters * 8,
        nb_conv,
        padding='same',
    ))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=nb_pool, strides=(2, 2)))

    # FC-1024x2 Fully connected layers
    model.add(Flatten())
    model.add(Dense(1024))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1024))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    # FC-30 Last layer
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    # Load Model...
    # model = model_from_json(open('my_model_architecture.json').read())
    # model.load_weights('my_model_weights.h5')

    # Adadelta optimizer
    # model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
    # model.save("vgg11.h5")

    return model


def run(args):
    # model = tf.keras.Sequential()
    # model.add(tf.keras.layers.Conv2D(32, (3, 3), activation=args.at, input_shape=(28, 28, 1)))

    input_1 = tf.keras.layers.Input(shape=(100, ), batch_size=1, name="input_1")
    # x = layers.Conv2D(32, 3)(inputs1)
    # x = layers.BatchNormalization()(inputs1)
    # model = Model(inputs=inputs1, outputs=x)

    # onnx_model = keras2onnx.convert_keras(model, model.name)
    # onnx.save(onnx_model, "{}.onnx".format(args.output))
    # node = onnx_model.graph.node[0]
    model_name = "AotNet200V2-ARM"
    # shape_dict = {"input_1": (1, 100)}
    # shape_dict = {"input_1": (1, 224, 224, 1)}
    # shape_dict = {"input_1": (1, 256, 256, 1)}
    shape_dict = {"input_1": (1, 224, 224, 3)}
    # shape_dict = {"input_1": (1, 299, 299, 3)}
    # shape_dict = {"input_1": (1, 28, 28, 1)}
    # ResNet18, preprocess_input = Classifiers.get('resnet18')

    # img_shape = (28, 28, 1)
    with tf.device("/cpu:0"):
        # model = ResNet18((224, 224, 3), weights='imagenet')
        # model = tf.keras.applications.EfficientNetB0()
        # model = LeNet()
        # model = SqueezeNet()
        # model = create_inception_v4()
        # model = AlexNet()
        # model = ACGAN()
        # model = create_model(16, 4)
        # model = create_model(28, 10)
        # model = create_model(34, 2)
        # model = create_model(40, 10)
        # model = ZFNet()
        # model = BGAN()
        # model = BiGAN()
        # model = UNet()
        # gen = Generator()
        model = aotnet.AotNet200V2()
        # model = create_model()
        # model = tf.keras.applications.MobileNetV2()

        # model = vgg11()
        # input_1 = tf.keras.layers.Input(shape=(100, ), batch_size=1, name="input_1")
        # model = create_model()
        # nfnet = NFNetF3(include_top=True, num_classes=10, activation="relu")
        # y = nfnet(input_1, training=False)
        # y = gen(input_1)
        # model = Model(inputs=input_1, outputs=y)

        # model = gated_mlp.GMLPB16(activation="relu")
        model = NASNetMobile()
        # model.summary()

    # exit(0)
    target = "llvm -mtriple=arm64-linux-android"  # "llvm -mcpu=skylake-avx512"
    # target = "llvm -mcpu=aarch64"
    mod, params = relay.frontend.from_keras(model, shape_dict, layout="NHWC")
    with tvm.transform.PassContext(opt_level=args.opt_level):
        lib = relay.build(mod, target, params=params)
    # lib.export_library("{}.so".format(model_name))

    ndk_path = "/home/kxp/workspace/android-toolchain-arm64/bin/aarch64-linux-android-clang++"
    lib.export_library("{}.so".format(model_name), cc=ndk_path)

    json_str = lib["get_json"]()
    with open("{}.json".format(model_name), "w") as f:
        f.write(json_str)

    pass


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='TVM Reversion')
    parser.add_argument("--at", default="relu", type=str)  # activation type
    parser.add_argument("-o", "--output", default="out.so", type=str)
    parser.add_argument("-ol", "--opt_level", default=0, type=int)
    parser.set_defaults(show_failure_case=False)
    args = parser.parse_args()
    run(args)

# BGAN
