import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.layers import Conv2D, MaxPooling2D, ReLU, BatchNormalization, Flatten, Dense
from tensorflow.keras import losses, optimizers, metrics, callbacks, activations


def ConvBnRelu(inputs, filters, ksize=(3, 3), strides=(1, 1), padding='same'):
    x = Conv2D(filters, ksize, strides, padding, use_bias=False)(inputs)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    return x


Vgg16Conf = [
    64, 64, 'm',
    128, 128, 'm',
    256, 256, 256, 'm',
    512, 512, 512, 'm',
    512, 512, 512, 'm',
    'f', -512, -512
]
Vgg16ConfReadme = """Positive nteger for ConvBnRelu with that many filters; Negative integer for that many neuron's FC; letter m for maxPool; letter f for flatten."""


def Vgg16(n_cls, input_shape=(224, 224, 3), conf=Vgg16Conf):
    inputs = keras.Input(input_shape)
    x = inputs
    for conf_el in conf:
        if 'm' == conf_el:
            x = MaxPooling2D(strides=(2, 2), padding='same')(x)
        elif 'f' == conf_el:
            x = Flatten()(x)
        elif isinstance(conf_el, int):
            if conf_el > 0:
                x = ConvBnRelu(x, conf_el)
            elif conf_el == 0:
                raise ValueError("Filter count of conv layer or neuron number of connected layer cannot be zero!")
            else:
                n_neuron = abs(conf_el)
                x = Dense(n_neuron, activation=activations.relu)(x)
        else:
            raise ValueError("Invalid config element {} ".format(conf_el) + Vgg16ConfReadme)
    x = Dense(n_cls, activation=activations.softmax)(x)
    model = keras.Model(inputs, x)
    return model


if '__main__' == __name__:
    from python_ai.category.data.tf2x_cifar10_to_redis import KEY_PREFIX, UINT8_SUFFIX, FLT32_SUFFIX
    from python_ai.category.redis.conn.local_redis import r as redis
    from python_ai.common.read_data.redis_numpy import toRedisNdLg, fromRedisNdLg
    import matplotlib.pyplot as plt
    import os
    import python_ai.category.tvts.tvts as tvts
    import io
    from PIL import Image
    import re

    get_prefix_from_saved_path_regexp = re.compile(r'^(.+)\.index$')

    def get_prefix_from_saved_path(path):
        matcher = get_prefix_from_saved_path_regexp.match(path)
        if matcher is None:
            raise Exception(f'Save path {path} is not right!')
        prefix = matcher.group(1)
        return prefix

    def get_saved_path_prefix(xepoch):
        return os.path.join(SAVE_DIR, ts.get_save_prefix(xepoch))

    def get_saved_path(xepoch):
        prefix = get_saved_path_prefix(xepoch)
        return f'{prefix}.index'

    def get_curve_path(xsave_path):
        return f'{xsave_path}.png'

    # Hyper params
    NAME = 'vgg16_tf2x_on_cifar10'
    VER = 'v3.0'
    LR = 0.001
    N_BATCH_SIZE = 128
    N_EPOCHS = 3
    PARENT_TRAIN_ID = 8
    PARENT_EPOCH = 2
    IS_TRAIN = 1
    if PARENT_TRAIN_ID == 0:
        IS_TRAIN = 1

    BASE_DIR, FILE_NAME = os.path.split(__file__)
    SAVE_DIR = os.path.join(BASE_DIR, '_save', FILE_NAME, VER)
    os.makedirs(SAVE_DIR, exist_ok=True)

    ts = tvts.tvts(NAME, params={
        'ver': VER,
        'batch_size': N_BATCH_SIZE,
        'lr': LR,
        'n_epoch': N_EPOCHS,
    }, save_freq=1)
    CKPT_PATH = ts.resume(PARENT_TRAIN_ID, PARENT_EPOCH)
    CKPT_PREFIX = get_prefix_from_saved_path(CKPT_PATH)

    model = Vgg16(10, (32, 32, 3))
    model.summary()
    model.compile(
        optimizer=optimizers.Adam(learning_rate=LR),
        loss=losses.sparse_categorical_crossentropy,
        metrics=[metrics.sparse_categorical_accuracy],
    )

    x_test_flt32 = fromRedisNdLg(redis, KEY_PREFIX + 'x_test' + FLT32_SUFFIX)
    x_test_uint8 = fromRedisNdLg(redis, KEY_PREFIX + 'x_test' + UINT8_SUFFIX)
    y_test = fromRedisNdLg(redis, KEY_PREFIX + 'y_test')

    dl_test = tf.data.Dataset.from_tensor_slices((x_test_flt32, y_test)) \
        .shuffle(buffer_size=1024) \
        .batch(batch_size=N_BATCH_SIZE) \
        .prefetch(buffer_size=tf.data.experimental.AUTOTUNE)

    if CKPT_PATH is not None and os.path.exists(CKPT_PATH):
        print(f'Loading weight from {CKPT_PREFIX}* ...')
        model.load_weights(CKPT_PREFIX)
        print('Loaded.')

    curve_save_path = None
    if IS_TRAIN:
        x_train_flt32 = fromRedisNdLg(redis, KEY_PREFIX + 'x_train' + FLT32_SUFFIX)
        x_val_flt32 = fromRedisNdLg(redis, KEY_PREFIX + 'x_val' + FLT32_SUFFIX)
        y_train = fromRedisNdLg(redis, KEY_PREFIX + 'y_train')
        y_val = fromRedisNdLg(redis, KEY_PREFIX + 'y_val')

        dl_train = tf.data.Dataset.from_tensor_slices((x_train_flt32, y_train)) \
            .shuffle(buffer_size=1024) \
            .batch(batch_size=N_BATCH_SIZE) \
            .prefetch(buffer_size=tf.data.experimental.AUTOTUNE)

        dl_val = tf.data.Dataset.from_tensor_slices((x_val_flt32, y_val)) \
            .shuffle(buffer_size=1024) \
            .batch(batch_size=N_BATCH_SIZE) \
            .prefetch(buffer_size=tf.data.experimental.AUTOTUNE)

        class MyCallback(keras.callbacks.Callback):

            def __init__(self, **kwargs):
                super().__init__(**kwargs)
                self.epoch = 1

            def on_epoch_end(self, epoch, logs=None):
                epoch = epoch + 1
                # print(f'> epoch {epoch} ends.')
                save_prefix = get_saved_path_prefix(epoch)
                save_path = get_saved_path(epoch)
                print(f'Save weight to {save_path} ...')
                model.save_weights(save_prefix)
                print('Saved.')
                lr = self.model.optimizer.lr.numpy().item()
                ts.save_epoch(epoch, {
                    'cost': logs['loss'],
                    'acc': logs['sparse_categorical_accuracy'],
                    'cost_val': logs['val_loss'],
                    'acc_val': logs['val_sparse_categorical_accuracy'],
                    'lr': lr,
                }, save_path)
                self.epoch += 1

            def on_train_batch_end(self, batch, logs=None):
                batch = batch + 1
                # print(f'> epoch {self.epoch}: batch {batch}')
                lr = self.model.optimizer.lr.numpy().item()
                ts.save_batch(self.epoch, batch, {
                    'cost': logs['loss'],
                    'acc': logs['sparse_categorical_accuracy'],
                    'lr': lr,
                })

        callback = MyCallback()
        his = model.fit(
            dl_train,
            epochs=N_EPOCHS,
            validation_data=dl_val,
            callbacks=[callback],
        )
        his = his.history

        # save training curve
        plt.figure(figsize=[12, 6])
        spr = 1
        spc = 2
        spn = 0

        spn += 1
        plt.subplot(spr, spc, spn)
        plt.title(f'Cost(lr={LR})')
        plt.plot(his['loss'], label='train')
        plt.plot(his['val_loss'], label='val')
        plt.legend()
        plt.grid()

        spn += 1
        plt.subplot(spr, spc, spn)
        plt.title('Metrics')
        plt.plot(his['sparse_categorical_accuracy'], label='acc_train')
        plt.plot(his['val_sparse_categorical_accuracy'], label='acc_val')
        plt.legend()
        plt.grid()

        save_path = get_saved_path(N_EPOCHS)
        curve_save_path = get_curve_path(save_path)
        plt.savefig(curve_save_path)
        plt.close()

    print('Evaluating ...')
    model.evaluate(dl_test)

    names_list = fromRedisNdLg(redis, KEY_PREFIX + 'names')
    print(names_list)

    spr = 4
    spc = 4
    spn = 0
    plt.figure(figsize=[9, 8])
    n_test_set = spr * spc
    x_test_set = x_test_flt32[:n_test_set]
    h_test_set = model.predict(x_test_set).argmax(axis=1)
    for i in range(n_test_set):
        spn += 1
        plt.subplot(spr, spc, spn)
        htype = names_list[h_test_set[i]]
        rtype = names_list[y_test[i, 0]]
        right = True if htype == rtype else False
        title = f'{rtype}=>{htype} ({"Y" if right else "X"})'
        plt.title(title, color="black" if right else "red")
        plt.axis('off')
        plt.imshow(x_test_uint8[i])
    buf = io.BytesIO()
    buf.seek(0)
    plt.savefig(buf, format='png')
    img_demo = Image.open(buf)
    plt.close()

    print('Check and close the plotting window to continue ...')

    # Show curve of cost and metrics in training if it exits and show the demo of test
    if not IS_TRAIN and curve_save_path is not None and os.path.exists(curve_save_path):
        plt.figure(figsize=[15, 5])
        plt.axis('off')
        img2 = Image.open(curve_save_path)
        images = (img_demo, img2)
        widths, heights = zip(*(i.size for i in images))
        total_width = sum(widths)
        max_height = max(heights)
        new_im = Image.new('RGB', (total_width, max_height))
        x_offset = 0
        for im in images:
            new_im.paste(im, (x_offset, 0))
            x_offset += im.size[0]
        plt.imshow(new_im)
        plt.show()
    elif IS_TRAIN:
        plt.figure(figsize=[15, 5])
        plt.axis('off')
        spr = 1
        spc = 3
        spn = 0

        spn += 1
        plt.subplot(spr, spc, spn)
        plt.title(f'Cost(lr={LR})')
        plt.plot(his['loss'], label='train')
        plt.plot(his['val_loss'], label='val')
        plt.legend()
        plt.grid()

        spn += 1
        plt.subplot(spr, spc, spn)
        plt.title('Metrics')
        plt.plot(his['sparse_categorical_accuracy'], label='acc_train')
        plt.plot(his['val_sparse_categorical_accuracy'], label='acc_val')
        plt.legend()
        plt.grid()

        spn += 1
        plt.subplot(spr, spc, spn)
        plt.axis('off')
        plt.imshow(img_demo)
        buf.close()

        plt.show()
    else:
        plt.figure(figsize=[6, 6])
        plt.axis('off')
        plt.imshow(img_demo)
        buf.close()
        plt.show()
