"""
训练模型
"""
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from image_classification.data_manage import read_data
from image_classification import config
from image_classification.nets.lenet import lenet
from image_classification.nets.alexnet import alexnet
from image_classification.nets.vgg import vgg_16, vgg_19
from image_classification.nets.resnet import resnet_50, resnet_101, resnet_152
from image_classification.nets.inception_v3 import inception_v3
from image_classification.nets.mobilenet import mobilenet_v1, mobilenet_v2


MODEL_WIDTH = config.MODEL_WIDTH
MODEL_HEIGHT = config.MODEL_HEIGHT
MODEL_CHANNEL = config.MODEL_CHANNEL
EPOCH_SIZE = config.EPOCH_SIZE
BATCH_SIZE = config.BATCH_SIZE
MODEL_DIR = config.MODEL_DIR
OUTPUT_LIST = config.OUTPUT_LIST
NETWORK = {'lenet': 0, 'alexnet': 1, 'vgg_16': 2, 'vgg_19': 3, 'resnet_50': 4, 'resnet_101': 5, 'resnet_152': 6,
           'mobilenet_v1': 7, 'mobilenet_v2': 8, 'googlenet': 9}
NETWORK = NETWORK['alexnet']


# 按批次取数据
def read_batch(inputs=None, labels=None, batch_size=None, shuffle=False):
    assert len(inputs) == len(labels)
    indices = None
    if shuffle:
        indices = np.arange(len(inputs))
        np.random.shuffle(indices)
    for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):
        if shuffle:
            excerpt = indices[start_idx: start_idx + batch_size]
        else:
            excerpt = slice(start_idx, start_idx + batch_size)
        yield inputs[excerpt], labels[excerpt]


if __name__ == '__main__':
    # 读取数据集和标签
    train_data, train_label, train_classes = read_data(OUTPUT_LIST[0], (MODEL_WIDTH, MODEL_HEIGHT))
    test_data, test_label, test_classes = read_data(OUTPUT_LIST[1], (MODEL_WIDTH, MODEL_HEIGHT))
    val_data, val_label, val_classes = read_data(OUTPUT_LIST[2], (MODEL_WIDTH, MODEL_HEIGHT))

    # 定义网络占位符
    with tf.name_scope('input'):
        x = tf.placeholder(tf.float32, shape=[None, MODEL_WIDTH, MODEL_HEIGHT, MODEL_CHANNEL], name='x')
        y_ = tf.placeholder(tf.int32, shape=[None, ], name='y_')

    # 选择网络结构
    regularizer = tf.contrib.layers.l2_regularizer(0.1)     # L2正则化
    with tf.name_scope('network'):
        params = {}     # 统计模型参数
        if NETWORK == 0:
            logits = lenet(x, train_classes)
        elif NETWORK == 1:
            logits = alexnet(x, train_classes, keep_prob=0.5, regularizer=regularizer)
        elif NETWORK == 2:
            logits = vgg_16(x, train_classes, keep_prob=0.5, regularizer=regularizer)
        elif NETWORK == 3:
            logits = vgg_19(x, train_classes, keep_prob=0.5, regularizer=regularizer)
        elif NETWORK == 4:
            logits = resnet_50(x, train_classes)
        elif NETWORK == 5:
            logits = resnet_101(x, train_classes)
        elif NETWORK == 6:
            logits = resnet_152(x, train_classes)
        elif NETWORK == 7:
            logits = mobilenet_v1(x, train_classes)
        elif NETWORK == 8:
            logits = mobilenet_v2(x, train_classes)
        else:
            logits = inception_v3(x, train_classes, keep_prob=0.8)

    # 定义网络的输入输出
    logits_value = tf.multiply(logits, tf.constant(value=1, dtype=tf.float32), name='logits_value')     # 命名节点
    loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y_)     # 损失函数
    train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)   # 优化器，此处设置学习速率
    correct_prediction = tf.equal(tf.cast(tf.argmax(logits, 1), tf.int32), y_)  # 输出预测值
    acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))   # 输出精确度
    # 定义保存模型
    var_list = tf.trainable_variables()
    global_list = tf.global_variables()
    bn_moving_vars = [g for g in global_list if 'moving_mean' in g.name]
    bn_moving_vars += [g for g in global_list if 'moving_variance' in g.name]
    var_list += bn_moving_vars
    saver = tf.train.Saver(var_list=var_list, max_to_keep=5)

    # 训练模型
    with tf.Session() as sess:
        plt_acc = []
        plt_loss = []
        tf.global_variables_initializer().run()
        for epoch in range(1, EPOCH_SIZE + 1):
            train_loss, train_acc, n_batch = 0, 0, 0
            # 按批次取数据计算损失和精确度
            for train_x, train_y in read_batch(train_data, train_label, BATCH_SIZE, shuffle=False):
                _, error, accuracy = sess.run([train_op, loss, acc], feed_dict={x: train_x, y_: train_y})
                train_loss += error
                train_acc += accuracy
                n_batch += 1
            print('<--------------------epoch: %d-------------------->' % epoch)
            print("Train loss: %s" % round(np.sum(train_loss) / n_batch, 6))
            print("Train acc: %s" % round(np.sum(train_acc) / n_batch, 4))
            # 交叉验证
            val_loss, val_acc, n_batch = 0, 0, 0
            for x_val_a, y_val_a in read_batch(val_data, val_label, BATCH_SIZE, shuffle=False):
                error, accuracy = sess.run([loss, acc], feed_dict={x: x_val_a, y_: y_val_a})
                val_loss += error
                val_acc += accuracy
                n_batch += 1
            plt_acc.append(round(np.sum(val_acc) / n_batch, 4))
            plt_loss.append(round(np.sum(val_loss) / n_batch, 6))
            print("Validation loss: %s" % round(np.sum(val_loss) / n_batch, 6))
            print("Validation acc: %s" % round(np.sum(val_acc) / n_batch, 4))
            # 保存模型
            if epoch % 64 == 0 and epoch != EPOCH_SIZE:
                saver.save(sess, MODEL_DIR, global_step=epoch)
        saver.save(sess, MODEL_DIR)

        # 测试模型
        acc_sum = 0
        acc_count = 0
        for x_test, y_test in read_batch(test_data, test_label, BATCH_SIZE, shuffle=False):
            test_acc = sess.run(acc, feed_dict={x: x_test, y_: y_test})
            acc_sum += test_acc
            acc_count += 1
        print('<--------------------test-------------------->')
        print('Test acc: %.4f' % (acc_sum / acc_count))

        # 绘制折线图
        fig = plt.figure()
        ax1 = fig.add_subplot(111)
        ax1.plot(range(EPOCH_SIZE), plt_loss, 'b', label='Loss function')
        ax1.set_ylabel(u'Loss function')    # 左纵轴标签
        ax1.set_title(u"Original")
        ax2 = ax1.twinx()   # 添加一条坐标轴
        ax2.plot(range(EPOCH_SIZE), plt_acc, 'r', label='Accuracy')
        ax2.set_ylabel(u'Accuracy')     # 右纵轴标签
        ax2.set_xlabel(u'Iterator')     # 横轴标签
        fig.legend(loc='upper left')    # 图例位置
        plt.grid()  # 背景网格
        fig.savefig(MODEL_DIR + 'figure.png')
