#!/usr/bin/env python
# _*_ coding: utf-8 _*_
#
#
# Copyright (C) 2020 Wei Keting<weikting@gmail.com>. All rights reserved.
# @Time : 2020-09-10 12:15 
# @File : train_softmax.py
# @Description :
#
#
import os
import time

import visualize
import tensorflow as tf
import utils
import numpy as np

from exdense import ExtendDense


class LRScheduler:

    def __init__(self):
        self._lr = None

    def __call__(self, epoch, lr, *args, **kwargs):
        if self._lr is None:
            self._lr = lr
        return lr


class EarlyStopping(tf.keras.callbacks.EarlyStopping):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._last_logs = {}

    def on_epoch_end(self, epoch, logs=None):
        super().on_epoch_end(epoch, logs)
        self._last_logs.update(logs)
        self._last_logs['epoch'] = epoch

    @property
    def last_logs(self):
        return self._last_logs


def model0(input_shape):
    return tf.keras.models.Sequential([
        tf.keras.layers.Flatten(input_shape=input_shape),
        tf.keras.layers.Dense(128),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.PReLU(),

        tf.keras.layers.Dense(128),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.PReLU(),

        tf.keras.layers.Dense(256),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.PReLU(),

        tf.keras.layers.Dense(512),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.PReLU(),

        tf.keras.layers.Dropout(0.45),

        tf.keras.layers.Dense(2),  # 输出2维向量
        tf.keras.layers.BatchNormalization(name='features'),
    ])


def model1(input_shape):
    return tf.keras.models.Sequential([
        tf.keras.layers.InputLayer(input_shape=input_shape),
        tf.keras.layers.Conv2D(64, 3, strides=2),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.PReLU(),
        tf.keras.layers.Conv2D(128, 3, strides=2),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.PReLU(),
        tf.keras.layers.Conv2D(192, 3),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.PReLU(),
        tf.keras.layers.Flatten(),
        tf.keras.layers.Dropout(0.45),
        tf.keras.layers.Dense(2),  # 输出2维向量
        tf.keras.layers.BatchNormalization(name='features'),
    ])


def main(args):
    if args.dataset == 'mnist':
        kdata = tf.keras.datasets.mnist
    elif args.dataset == 'fashion_mnist' or args.dataset == 'fashion':
        kdata = tf.keras.datasets.fashion_mnist
    elif args.dataset == 'cifar100':
        kdata = tf.keras.datasets.cifar100
    elif args.dataset == 'cifar10':
        kdata = tf.keras.datasets.cifar10
    else:
        kdata = tf.keras.datasets.fashion_mnist

    args.reduce_lr_patience = 96
    (x_train, y_train), (x_test, y_test) = kdata.load_data()

    if args.n_classes > 0:
        s = y_train < args.n_classes
        s = s.reshape((-1,))
        x_train = x_train[s]
        y_train = y_train[s]

        s = y_test < args.n_classes
        s = s.reshape((-1,))
        x_test = x_test[s]
        y_test = y_test[s]

        clazz_labels = np.sort(np.unique(y_train))
        clazz_num = len(clazz_labels)
        args.dataset = '{}_{}'.format(args.dataset, clazz_num)
    else:
        clazz_labels = np.sort(np.unique(y_train))
        clazz_num = len(clazz_labels)

    # 归一化处理
    if args.data_norm == 0:
        # 不处理
        pass
    elif args.data_norm == 1:
        # [-1, 1]
        x_train, x_test = x_train / 127.5 - 1, x_test / 127.5 - 1.
    elif args.data_norm == 2:
        # [0,1]
        x_train, x_test = x_train / 255., x_test / 255.0
    else:
        raise RuntimeError("data type `{}` is unknown.".format(args.data_norm))

    if len(x_train[0].shape) == 2:
        x_train = x_train.reshape((-1, 28, 28, 1))
        x_test = x_test.reshape((-1, 28, 28, 1))

    args.train_count = len(y_train)
    args.val_count = len(y_test)

    utils.fix_tf_cudnn_error()

    # 特征提取模型
    inshape = x_train[0].shape
    if args.model == 'dense':
        feature_model = model0(inshape)
    elif args.model == 'conv':
        feature_model = model1(inshape)
    else:
        raise RuntimeError("model type `{}` is unknown.".format(args.model))

    # feature_model.summary()
    x = feature_model.output
    lname = 'classifier_dense'
    # 分类器全链接层某些参数的选择
    if args.dense_type == 0:
        x = tf.keras.layers.Dense(clazz_num, use_bias=True, name=lname)(x)
    elif args.dense_type == 1:
        x = tf.keras.layers.Dense(clazz_num, use_bias=False, name=lname)(x)
    elif args.dense_type == 2:
        x = ExtendDense(clazz_num, normalize=True, name=lname)(x)
        args.reduce_lr_patience = int(args.reduce_lr_patience * 1.5)
    elif args.dense_type == 3:
        x = ExtendDense(clazz_num, normalize=False, name=lname)(x)
    else:
        raise RuntimeError("dense type `{}` is unknown.".format(args.dense_type))
    x = tf.keras.layers.Softmax(name='classifier_softmax')(x)

    model_name = "{}-{}-{}-C{}-DT{}-DN{}".format(args.model, len(feature_model.trainable_variables), args.dataset,
                                                 clazz_num, args.dense_type, args.data_norm)
    model = tf.keras.Model(feature_model.input, x, name=model_name)
    model.summary()
    print("\n", "args:", args, "\n")

    mckpt = os.path.join('outputs', model_name, '{}.hdf5'.format(model_name))
    os.makedirs(os.path.dirname(mckpt), exist_ok=True)
    if utils.tf_version('2.3'):
        monitor = 'loss'
        acc = 'accuracy'
    else:
        monitor = 'loss'
        acc = 'acc'
    early_stopping = EarlyStopping(monitor=monitor, patience=int(args.reduce_lr_patience * 3), verbose=1)
    callbacks = [tf.keras.callbacks.ModelCheckpoint(filepath=mckpt,
                                                    monitor='loss',
                                                    mode='auto',
                                                    period=8,
                                                    save_best_only=True,
                                                    verbose=1),
                 tf.keras.callbacks.TerminateOnNaN(),
                 tf.keras.callbacks.ReduceLROnPlateau(factor=0.96, patience=args.reduce_lr_patience, verbose=1,
                                                      cooldown=0, monitor='loss', min_delta=0,
                                                      min_lr=tf.keras.backend.epsilon() * 10),
                 tf.keras.callbacks.LearningRateScheduler(LRScheduler()),
                 tf.keras.callbacks.TensorBoard(
                     log_dir=os.path.join("outputs", "logs", time.strftime("%Y%m%d"), model_name)),
                 early_stopping,
                 ]
    model.compile(optimizer=tf.keras.optimizers.RMSprop(args.lr),
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    if os.path.isfile(mckpt):
        print("\n\nRestoring weights from {}...".format(mckpt), end='\r')
        model.load_weights(mckpt)
        print("Restoring weights from {}...done\n\n".format(mckpt))
    # 训练模型
    model.fit(x=x_train, y=y_train,
              batch_size=args.batch_size,
              validation_data=(x_test, y_test),
              verbose=2, epochs=args.epochs,
              shuffle=True,
              callbacks=callbacks)
    model.evaluate(x_test, y_test)
    extras = None
    if args.visualize_classifier_weights:
        # 获取分类器全链接层的权重参数
        extras = model.get_layer(name=lname).trainable_variables[0]
        extras = list(np.transpose(utils.tf_variable_value(extras), (1, 0)))
    # 可视化训练集图片的特征
    visualize.visualize_model(feature_model, [(x_train, y_train)],
                              "{}-B{}-E{}-A{:.6f}-softmax".format(model_name, args.batch_size,
                                                                  early_stopping.last_logs.get('epoch', 0),
                                                                  early_stopping.last_logs.get(acc, 0)),
                              show=args.show,
                              extras=extras)


def parse_arguments():
    import argparse
    parser = argparse.ArgumentParser()

    parser.add_argument('--epochs', '-ep', '-e', type=int,
                        help='epochs for training',
                        default=1024)

    parser.add_argument('--model', '-model', type=str,
                        help='name of model to train,default: conv',
                        default='conv')

    parser.add_argument('--dataset', '-data', type=str,
                        help='dataset for training. default: cifar10',
                        default='cifar10')

    parser.add_argument('--lr', '-lr', type=float,
                        help='Learning rate. default: 0.001',
                        default=0.001)

    parser.add_argument('--batch-size', '-b', type=int,
                        help='Batch size for training,default: 1000',
                        default=1000)

    parser.add_argument('--dense-type', '-dt', type=int,
                        help='type of last Dense layer,default: 0',
                        default=0)

    parser.add_argument('--data-norm', '-dn', type=int,
                        help='data normalize preprocessing,default: 0',
                        default=0)

    parser.add_argument('--show', '-show',
                        action='store_true',
                        help='Show visualize image,default: False',
                        default=False)

    parser.add_argument('--visualize-classifier-weights', '-vcw',
                        action='store_true',
                        help='Show classifier weights in visualize image,default: False',
                        default=False)

    parser.add_argument('--n-classes', '-nc', type=int,
                        help='max number of classes,default: 0',
                        default=0)

    return parser.parse_args()


if __name__ == '__main__':
    main(parse_arguments())
