import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, activations, optimizers, losses, metrics, callbacks
from python_ai.common.xcommon import *
from sklearn.model_selection import train_test_split


class Conv_BN_LeakyReLU(keras.Model):

    def __init__(self, out_channels, ksize=(3, 3), strides=(1, 1), padding='same', **kwargs):
        super().__init__(**kwargs)
        self.convs = keras.Sequential([
            layers.Conv2D(out_channels, ksize, strides, padding),
            layers.BatchNormalization(),
            layers.LeakyReLU(),
        ])

    def call(self, x, training=None):
        return self.convs(x, training=training)


class DarkNet_19(keras.Model):

    def __init__(self, num_classes=1000, **kwargs):
        print("Initializing the darknet19 network ......")

        super().__init__(**kwargs)
        # backbone network : DarkNet-19
        # output : stride = 2, c = 32
        self.conv_1 = keras.Sequential([
            Conv_BN_LeakyReLU(32),
            layers.MaxPooling2D((2, 2), (2, 2), 'same'),
        ])

        # output : stride = 4, c = 64
        self.conv_2 = keras.Sequential([
            Conv_BN_LeakyReLU(64),
            layers.MaxPooling2D((2, 2), (2, 2), 'same'),
        ])

        # output : stride = 8, c = 128
        self.conv_3 = keras.Sequential([
            Conv_BN_LeakyReLU(128),
            Conv_BN_LeakyReLU(64, (1, 1)),
            Conv_BN_LeakyReLU(128),
            layers.MaxPooling2D((2, 2), (2, 2), 'same'),
        ])

        # output : stride = 16, c = 256
        self.conv_4 = keras.Sequential([
            Conv_BN_LeakyReLU(256),
            Conv_BN_LeakyReLU(128, (1, 1)),
            Conv_BN_LeakyReLU(256),
            layers.MaxPooling2D((2, 2), (2, 2), 'same'),
        ])

        # output : stride = 32, c = 512
        self.conv_5 = keras.Sequential([
            Conv_BN_LeakyReLU(512),
            Conv_BN_LeakyReLU(256, (1, 1)),
            Conv_BN_LeakyReLU(512),
            Conv_BN_LeakyReLU(256, (1, 1)),
            Conv_BN_LeakyReLU(512),
            layers.MaxPooling2D((2, 2), (2, 2), 'same'),
        ])

        # output : stride = 32, c = 1024
        self.conv_6 = keras.Sequential([
            Conv_BN_LeakyReLU(1024),
            Conv_BN_LeakyReLU(512, (1, 1)),
            Conv_BN_LeakyReLU(1024),
            Conv_BN_LeakyReLU(512, (1, 1)),
            Conv_BN_LeakyReLU(1024),
        ])

        self.conv_7 = layers.Conv2D(num_classes, (1, 1), (1, 1), 'same')
        self.avgpool = layers.GlobalAvgPool2D()
        # self.fc = layers.Dense(num_classes)

    def call(self, x, training=None):
        x = self.conv_1(x, training=training)
        # print('conv_1 output: {}'.format(x.shape))
        x = self.conv_2(x, training=training)
        # print('conv_2 output: {}'.format(x.shape))
        x = self.conv_3(x, training=training)
        # print('conv_3 output: {}'.format(x.shape))
        x = self.conv_4(x, training=training)
        # print('conv_4 output: {}'.format(x.shape))
        x = self.conv_5(x, training=training)
        # print('conv_5 output: {}'.format(x.shape))
        x = self.conv_6(x, training=training)
        # print('conv_6 output: {}'.format(x.shape))

        x = self.conv_7(x, training=training)
        # print('conv_7 output: {}'.format(x.shape))
        x = self.avgpool(x, training=training)
        # print('avgpool output: {}'.format(x.shape))
        # x = self.fc(x, training=training)
        return x

if __name__ == '__main__':

    gpus = tf.config.experimental.list_physical_devices('GPU')
    if gpus:
        try:
            for gpu in gpus:
                tf.config.experimental.set_memory_growth(gpu, True)
        except RuntimeError as e:
            print(e)

    import os
    sep()
    sep('cat and dog')
    sep()

    BATCH_SIZE = 4
    N_EPOCHS = 8
    ALPHA = 1e-3
    VER = 'v1.0'

    data_path = r'../../../../large_data/DL1/_many_files/zoo'
    IMG_H = 100
    IMG_W = 100

    FILE_NAME = os.path.basename(__file__)
    LOG_DIR = os.path.join('_log', FILE_NAME, VER)


    def get_pic_data(dir):
        files = os.listdir(dir)
        x = []
        for file in files:
            path = os.path.join(dir, file)
            img = cv.imread(path, cv.IMREAD_COLOR)
            img = cv.resize(img, (IMG_H, IMG_W))
            img = img.astype(np.float32) / 255.
            x.append(img)
        x = np.float32(x)
        return x

    x_cat = get_pic_data(os.path.join(data_path, 'cat'))
    print('x_cat:', x_cat.shape)
    n_cat = len(x_cat)
    x_dog = get_pic_data(os.path.join(data_path, 'dog'))
    print('x_dog:', x_dog.shape)
    n_dog = len(x_dog)
    x = np.concatenate([x_cat, x_dog], axis=0)
    # x = np.transpose(x, [0, 3, 1, 2])  # not needed by tf
    y_cat = np.full([n_cat], 0, dtype=np.int32)
    print('y_cat:', y_cat.shape)
    y_dog = np.full([n_dog], 1, dtype=np.int32)
    print('y_dog:', y_dog.shape)
    y = np.concatenate([y_cat, y_dog], axis=0)
    print('x:', x.shape)
    print('y:', y.shape)
    x_train, x_val_test, y_train, y_val_test = train_test_split(x, y, train_size=0.8, random_state=1, shuffle=True)
    x_val, x_test, y_val, y_test = train_test_split(x_val_test, y_val_test, train_size=0.5, random_state=1, shuffle=True)
    print('x_train', x_train.shape)
    print('x_val', x_val.shape)
    print('x_test', x_test.shape)
    print('y_train', y_train.shape)
    print('y_val', y_val.shape)
    print('y_test', y_test.shape)

    dl_train = tf.data.Dataset.from_tensor_slices((x_train, y_train)).shuffle(1000)\
        .batch(BATCH_SIZE, drop_remainder=True).prefetch(tf.data.experimental.AUTOTUNE)
    dl_test = tf.data.Dataset.from_tensor_slices((x_test, y_test)).shuffle(1000)\
        .batch(BATCH_SIZE).prefetch(tf.data.experimental.AUTOTUNE)
    dl_val = tf.data.Dataset.from_tensor_slices((x_val, y_val)).shuffle(1000)\
        .batch(BATCH_SIZE).prefetch(tf.data.experimental.AUTOTUNE)

    model = DarkNet_19(2)
    model.build(input_shape=(None, IMG_H, IMG_W, 3))
    model.summary()

    model.compile(
        optimizer=optimizers.Adam(learning_rate=ALPHA),
        loss=losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=[metrics.sparse_categorical_accuracy]
    )

    model.fit(dl_train,
              epochs=N_EPOCHS,
              validation_data=dl_val,
              callbacks=[callbacks.TensorBoard(LOG_DIR, update_freq='batch', profile_batch=0)]
              )

    model.evaluate(dl_test)
