import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.layers import Conv2D, MaxPooling2D, ReLU, BatchNormalization, Flatten, Dense
from tensorflow.keras import losses, optimizers, metrics, callbacks, activations


def ConvBnReluList(filters, ksize=(3, 3), strides=(1, 1), padding='same'):
    return [
        Conv2D(filters, ksize, strides, padding, use_bias=False),
        BatchNormalization(),
        ReLU(),
    ]


Vgg16Conf = [
    64, 64, 'm',
    128, 128, 'm',
    256, 256, 256, 'm',
    512, 512, 512, 'm',
    512, 512, 512, 'm',
    'f', 0.75, 0.75
]
Vgg16ConfReadme = """Integer for ConvBnRelu with that many filters; Float for that many neuron's FC; letter m for maxPool; letter f for flatten."""


class Vgg16ByList(keras.Model):

    def __init__(self, n_cls, input_shape=(224, 224, 3), conf=Vgg16Conf, **kwargs):
        super().__init__(**kwargs)
        self.visual_data = None
        self.visual = False
        H, W, C = input_shape
        self.layersList = []
        n_output = None
        for conf_el in conf:
            if 'm' == conf_el:
                layers = [MaxPooling2D(strides=(2, 2), padding='same')]
                H //= 2
                W //= 2
                H = 1 if H <= 0 else H
                W = 1 if W <= 0 else W
            elif isinstance(conf_el, float):
                n_output = (H * W * C) if n_output is None else n_output
                n_neuron = int(np.ceil(n_output * conf_el))
                n_output = n_neuron
                print('n_neuron', n_neuron)  # tmp
                layers = [Dense(n_neuron, activation=activations.relu)]
            elif 'f' == conf_el:
                layers = [Flatten()]
            elif isinstance(conf_el, int):
                layers = ConvBnReluList(conf_el)
                C = conf_el
            else:
                raise ValueError("Invalid config element {} ".format(conf_el) + Vgg16ConfReadme)
            self.layersList.extend(layers)

        self.layersList.append(Dense(n_cls, activation=activations.softmax))

    def call(self, inputs, training=None, mask=None):
        x = inputs

        if self.visual:
            data = []

        id_layer = 0
        for layer in self.layersList:
            x = layer(x, training=training)
            if self.visual:
                id_layer += 1
                layer_data = []
                if len(x.shape) != 4:
                    continue
                print('visual: id_layer:', id_layer)
                n_ch = x.shape[3]
                for id_sample in range(self.m):
                    print('visual: id_layer, id_sample:', id_layer, id_sample)
                    sample_data = []
                    for id_ch in range(n_ch):
                        sample_data.append(x[id_sample, :, :, id_ch])
                    layer_data.append(sample_data)
                data.append(layer_data)

        if self.visual:
            self.visual_data = data
        return x


if '__main__' == __name__:
    from python_ai.category.data.tf2x_cifar10_to_redis import KEY_PREFIX, UINT8_SUFFIX, FLT32_SUFFIX
    from python_ai.category.redis.conn.local_redis import r as redis
    from python_ai.common.read_data.redis_numpy import toRedisNdLg, fromRedisNdLg
    import matplotlib.pyplot as plt
    from python_ai.common.xcommon import *
    import os
    import cv2 as cv

    VER = 'v1.0'
    ALPHA = 0.001
    BATCH_SIZE = 128
    N_EPOCHS = 1
    START_EPOCH = 4
    GO_ON = 0
    VISUAL = 0
    VISUAL_SMAPLES = 1
    if START_EPOCH is None:
        GO_ON = 1

    BASE_DIR, FILE_NAME = os.path.split(__file__)

    def get_weight_name(epoch):
        weight_name = 'weight'
        if epoch is not None:
            weight_name += '_' + str(epoch)

        path = os.path.join('_save', FILE_NAME, VER, weight_name)
        path = os.path.join(BASE_DIR, path)
        # path = os.path.join('/mnt/d/_const/wsl/_model_save', FILE_NAME, VER, weight_name)
        return path

    model = Vgg16ByList(10, (32, 32, 3))
    model.build((None, 32, 32, 3))
    model.summary()
    model.compile(
        optimizer=optimizers.Adam(learning_rate=ALPHA),
        loss=losses.sparse_categorical_crossentropy,
        metrics=metrics.sparse_categorical_accuracy,
    )

    x_test_flt32 = fromRedisNdLg(redis, KEY_PREFIX + 'x_test' + FLT32_SUFFIX)
    x_test_uint8 = fromRedisNdLg(redis, KEY_PREFIX + 'x_test' + UINT8_SUFFIX)
    y_test = fromRedisNdLg(redis, KEY_PREFIX + 'y_test')

    dl_test = tf.data.Dataset.from_tensor_slices((x_test_flt32, y_test)) \
        .shuffle(buffer_size=1024) \
        .batch(batch_size=BATCH_SIZE) \
        .prefetch(buffer_size=tf.data.experimental.AUTOTUNE)

    START_WEIGHT = get_weight_name(START_EPOCH)
    if os.path.exists(START_WEIGHT + '.index'):
        print(f'Loading weight from {START_WEIGHT} ...')
        model.load_weights(START_WEIGHT)
        print('Loaded.')

    if GO_ON:
        x_train_flt32 = fromRedisNdLg(redis, KEY_PREFIX + 'x_train' + FLT32_SUFFIX)
        x_val_flt32 = fromRedisNdLg(redis, KEY_PREFIX + 'x_val' + FLT32_SUFFIX)
        y_train = fromRedisNdLg(redis, KEY_PREFIX + 'y_train')
        y_val = fromRedisNdLg(redis, KEY_PREFIX + 'y_val')

        dl_train = tf.data.Dataset.from_tensor_slices((x_train_flt32, y_train)) \
            .shuffle(buffer_size=1024) \
            .batch(batch_size=BATCH_SIZE) \
            .prefetch(buffer_size=tf.data.experimental.AUTOTUNE)

        dl_val = tf.data.Dataset.from_tensor_slices((x_val_flt32, y_val)) \
            .shuffle(buffer_size=1024) \
            .batch(batch_size=BATCH_SIZE) \
            .prefetch(buffer_size=tf.data.experimental.AUTOTUNE)

        model.fit(
            dl_train,
            epochs=N_EPOCHS,
            validation_data=dl_val,
        )
        if START_EPOCH is not None:
            NOW_EPOCH = START_EPOCH + N_EPOCHS
        else:
            NOW_EPOCH = N_EPOCHS
        WEIGHT_PATH = get_weight_name(NOW_EPOCH)
        print(f'Save weight to {WEIGHT_PATH} ...')
        model.save_weights(WEIGHT_PATH)
        print('Saved.')

    model.evaluate(dl_test)

    names_list = fromRedisNdLg(redis, KEY_PREFIX + 'names')
    print(names_list)

    # plot and visualization
    spr = 4
    spc = 4
    spn = 0
    plt.figure(figsize=[9, 8])
    n_test_set = spr * spc
    x_test_set = x_test_flt32[:n_test_set]
    model.m = VISUAL_SMAPLES
    model.visual = True
    tf.config.experimental_run_functions_eagerly(True)
    h_test_set = model.predict(x_test_set).argmax(axis=1)
    visual_data = model.visual_data

    if VISUAL:
        rand_name = rand_name_on_now()
        visual_dir = os.path.join('_visual', FILE_NAME, VER, rand_name)
        id_layer = 0
        for layer_data in visual_data:
            id_layer += 1
            print('id_layer', id_layer)
            id_sample = 0
            for sample_data in layer_data:
                print('id_layer, id_sample', id_layer, id_sample)
                id_sample += 1
                sample_path = os.path.join(visual_dir, f'layer_{id_layer}', f'id{id_sample}')
                os.makedirs(sample_path, exist_ok=True)
                id_ch = 0
                for filter_data in sample_data:
                    numpy_data = filter_data.numpy()
                    id_ch += 1
                    path = os.path.join(sample_path, f'filter{id_ch}.jpg')
                    cv.normalize(numpy_data, numpy_data, 0., 255., norm_type=cv.NORM_MINMAX, dtype=cv.CV_32F)
                    numpy_data = numpy_data.astype(np.uint8)
                    cv.imwrite(path, numpy_data)

    for i in range(n_test_set):
        spn += 1
        plt.subplot(spr, spc, spn)
        htype = names_list[h_test_set[i]]
        rtype = names_list[y_test[i, 0]]
        right = True if htype == rtype else False
        title = f'{rtype}=>{htype} ({"Y" if right else "X"})'
        plt.title(title, color="black" if right else "red")
        plt.axis('off')
        plt.imshow(x_test_uint8[i])

    print('Check and close the plotting window to continue ...')
    plt.show()
