"""
Tensorflow 2.x
"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, activations, losses, optimizers, metrics, callbacks
import numpy as np
from python_ai.common.xcommon import *


class ConvBnLeakyRelu(keras.Model):

    def __init__(self, filters, ksize=(3, 3), strides=(1, 1), padding='same', **kwargs):
        super().__init__(**kwargs)
        self.seq = keras.Sequential([
            layers.Conv2D(filters, ksize, strides, padding),
            layers.BatchNormalization(),
            layers.LeakyReLU(),
        ])

    def call(self, inputs, training=None):
        x = self.seq(inputs, training=training)
        return x


class Residual(keras.Model):

    def __init__(self, filters, n_num, **kwargs):
        super().__init__(**kwargs)
        self.modelList = []
        for i in range(n_num):
            self.modelList.append(keras.Sequential([
                ConvBnLeakyRelu(filters // 2, (1, 1)),
                ConvBnLeakyRelu(filters, (3, 3)),
            ]))

    def call(self, x, training=None):
        for seq in self.modelList:
            x = seq(x, training=training) + x
        return x


class Darknet53(keras.Model):

    def __init__(self, n_cls, **kwargs):
        super().__init__(**kwargs)

        self.conv1 = ConvBnLeakyRelu(32)
        self.conv2 = ConvBnLeakyRelu(64, (3, 3), (2, 2))
        self.res3 = Residual(64, 1)

        self.conv4 = ConvBnLeakyRelu(128, (3, 3), (2, 2))
        self.res5_6 = Residual(128, 2)

        self.conv7 = ConvBnLeakyRelu(256, (3, 3), (2,2))
        self.res8_15 = Residual(256, 8)

        self.conv16 = ConvBnLeakyRelu(512, (3, 3), (2, 2))
        self.res17_24 = Residual(512, 8)

        self.conv25 = ConvBnLeakyRelu(1024, (3, 3), (2, 2))
        self.res26_29 = Residual(1024, 4)

        self.avgPoll = layers.GlobalAvgPool2D()
        self.fc = layers.Dense(n_cls)

    def call(self, inputs, training=None):
        x = self.conv1(inputs, training=training)
        x = self.conv2(x, training=training)
        x = self.res3(x, training=training)
        # print(x.shape)

        x = self.conv4(x, training=training)
        x = self.res5_6(x, training=training)
        # print(x.shape)

        x = self.conv7(x, training=training)
        x = self.res8_15(x, training=training)
        # print(x.shape)

        x = self.conv16(x, training=training)
        x = self.res17_24(x, training=training)
        # print(x.shape)

        x = self.conv25(x, training=training)
        x = self.res26_29(x, training=training)
        # print(x.shape)

        x = self.avgPoll(x, training=training)
        # print(x.shape)
        x = self.fc(x, training=training)
        # print(x.shape)
        return x


if '__main__' == __name__:

    import os
    from sklearn.model_selection import train_test_split

    gpus = tf.config.experimental.list_physical_devices('GPU')
    print(gpus)
    if gpus:
        try:
            for gpu in gpus:
                tf.config.experimental.set_memory_growth(gpu, True)
        except RuntimeError as e:
            print(e)

    model = Darknet53(2)
    model.build(input_shape=(None, 416, 416, 3))
    model.summary()

    h = model(np.zeros([4, 416, 416, 3]), training=True)
    print(h)

    sep()
    sep('cat and dog')
    sep()

    BATCH_SIZE = 4
    N_EPOCHS = 8
    # ALPHA = 0.1  # Use exponential decay
    VER = 'v1.6'

    data_path = r'../../../../large_data/DL1/_many_files/zoo'
    IMG_H = 100
    IMG_W = 100

    FILE_NAME = os.path.basename(__file__)
    LOG_DIR = os.path.join('_log', FILE_NAME, VER)


    def get_pic_data(dir):
        files = os.listdir(dir)
        x = []
        for file in files:
            path = os.path.join(dir, file)
            img = cv.imread(path, cv.IMREAD_COLOR)
            img = cv.resize(img, (IMG_H, IMG_W))
            img = img.astype(np.float32) / 255. * 2. - 1.
            x.append(img)
        x = np.float32(x)
        return x

    x_cat = get_pic_data(os.path.join(data_path, 'cat'))
    print('x_cat:', x_cat.shape)
    n_cat = len(x_cat)
    x_dog = get_pic_data(os.path.join(data_path, 'dog'))
    print('x_dog:', x_dog.shape)
    n_dog = len(x_dog)
    x = np.concatenate([x_cat, x_dog], axis=0)
    # x = np.transpose(x, [0, 3, 1, 2])  # not needed by tf
    y_cat = np.full([n_cat], 0, dtype=np.int32)
    print('y_cat:', y_cat.shape)
    y_dog = np.full([n_dog], 1, dtype=np.int32)
    print('y_dog:', y_dog.shape)
    y = np.concatenate([y_cat, y_dog], axis=0)
    print('x:', x.shape)
    print('y:', y.shape)
    x_train, x_val_test, y_train, y_val_test = train_test_split(x, y, train_size=0.8, random_state=1, shuffle=True)
    x_val, x_test, y_val, y_test = train_test_split(x_val_test, y_val_test, train_size=0.5, random_state=1, shuffle=True)
    print('x_train', x_train.shape)
    print('x_val', x_val.shape)
    print('x_test', x_test.shape)
    print('y_train', y_train.shape)
    print('y_val', y_val.shape)
    print('y_test', y_test.shape)

    dl_train = tf.data.Dataset.from_tensor_slices((x_train, y_train)).shuffle(1000)\
        .batch(BATCH_SIZE, drop_remainder=True).prefetch(tf.data.experimental.AUTOTUNE)
    dl_test = tf.data.Dataset.from_tensor_slices((x_test, y_test)).shuffle(1000)\
        .batch(BATCH_SIZE).prefetch(tf.data.experimental.AUTOTUNE)
    dl_val = tf.data.Dataset.from_tensor_slices((x_val, y_val)).shuffle(1000)\
        .batch(BATCH_SIZE).prefetch(tf.data.experimental.AUTOTUNE)

    # exponential decay
    lr = optimizers.schedules.ExponentialDecay(
        initial_learning_rate=1e-4,
        decay_steps=100,
        decay_rate=0.1,
        staircase=True,
    )

    model.compile(
        optimizer=optimizers.Adam(learning_rate=lr),
        loss=losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=[metrics.sparse_categorical_accuracy]
    )

    model.fit(dl_train,
              epochs=N_EPOCHS,
              validation_data=dl_val,
              callbacks=[callbacks.TensorBoard(LOG_DIR, update_freq='batch', profile_batch=0)]
              )

    model.evaluate(dl_test)
