"""
训练CNN模型
"""
import os
import glob
import config
import numpy as np
import tensorflow as tf
from skimage import io, transform
from CNN.Nets.jaynet import jaynet_32, jaynet_32_tiny
from CNN.Nets.alexnet import alexnet
from CNN.Nets.vgg import vgg_16, vgg_19
from CNN.Nets.resnet import resnet_50


TRAIN_DIR = 'Trainsets/'
MODEL_DIR = 'Model/model'
MODEL_WIDTH = config.MODEL_WIDTH
MODEL_HEIGHT = config.MODEL_HEIGHT
MODEL_CHANNEL = config.MODEL_CHANNEL
EPOCH_SIZE = config.CNN_EPOCH_SIZE
BATCH_SIZE = config.CNN_BATCH_SIZE
# 输入尺寸：jaynet: 32    alexnet,vgg,resnet: 224
NETWORK = {'jaynet_32': 0, 'jaynet_32_tiny': 1, 'alexnet': 2, 'vgg_16': 3, 'vgg_19': 4, 'resnet': 5}
NETWORK = NETWORK['jaynet_32']


# 获取所有的图片路径名和标签
def read_data(file_dir):
    # 遍历目录获取所有类别
    file_list = os.listdir(file_dir)
    classes = []
    for i in file_list:
        classes.append(i)
    # 获取每个类别中的所有图片
    images_list = []
    labels_list = []
    for index, name in enumerate(classes):
        path = file_dir + name + '/'
        # 获取所有该类别文件夹下所有的图片路径
        path_all = glob.glob(path + '*.bmp')
        for img_ in path_all:
            img = io.imread(img_)
            img = transform.resize(img, (MODEL_WIDTH, MODEL_HEIGHT, MODEL_CHANNEL))
            images_list.append(img)
            labels_list.append(index)
    return np.asarray(images_list, np.float32), np.asarray(labels_list, np.int32), len(classes)


# 生成训练与验证集
def separate_data(image, label, ratio=0.7):
    # 打乱顺序
    num_example = image.shape[0]
    array = np.arange(num_example)
    np.random.shuffle(array)
    image = image[array]
    label = label[array]
    # 将所有数据分为训练集和验证集
    index = np.int(num_example * ratio)
    x_train = image[:index]
    y_train = label[:index]
    x_val = image[index:]
    y_val = label[index:]
    return x_train, y_train, x_val, y_val


# 按批次取数据
def shuffle_data(inputs=None, labels=None, batch_size=None, shuffle=False):
    assert len(inputs) == len(labels)
    if shuffle:
        indices = np.arange(len(inputs))
        np.random.shuffle(indices)
    for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):
        if shuffle:
            excerpt = indices[start_idx: start_idx + batch_size]
        else:
            excerpt = slice(start_idx, start_idx + batch_size)
        yield inputs[excerpt], labels[excerpt]


if __name__ == '__main__':
    # 定义占位符输入
    with tf.name_scope('input'):
        x = tf.placeholder(tf.float32, shape=[None, MODEL_WIDTH, MODEL_HEIGHT, MODEL_CHANNEL], name='x')
        y_ = tf.placeholder(tf.int32, shape=[None, ], name='y_')
    # 数据集操作
    images, labels, num_classes = read_data(TRAIN_DIR)  # 读取数据集
    train_data, train_label, val_data, val_label = separate_data(images, labels, 0.7)  # 分割数据集
    # 定义模型输入输出
    regularizer = tf.contrib.layers.l2_regularizer(0.1)     # L2正则化
    # 调用网络模型
    if NETWORK == 0:
        logits = jaynet_32(x, num_classes, 0.5, regularizer)    # FR-CNN
    elif NETWORK == 1:
        logits = jaynet_32_tiny(x, num_classes, regularizer)    # Tiny FR-CNN
    elif NETWORK == 2:
        logits = alexnet(x, num_classes, regularizer)   # AlexNet
    elif NETWORK == 3:
        logits = vgg_16(x, num_classes, 0.5, regularizer)   # VGG-16
    elif NETWORK == 4:
        logits = vgg_19(x, num_classes, 0.5, regularizer)   # VGG-19
    else:
        logits = resnet_50(x, num_classes)  # ResNet-50
    logits_eval = tf.multiply(logits, tf.constant(value=1, dtype=tf.float32), name='logits_eval')   # 定义name
    # 定义损失函数，计算交叉熵损失和精确度
    loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y_)
    train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
    correct_prediction = tf.equal(tf.cast(tf.argmax(logits, 1), tf.int32), y_)
    acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    # 定义保存模型
    saver = tf.train.Saver()
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    # 开始训练
    for epoch in range(1, EPOCH_SIZE + 1):
        train_loss, train_acc, n_batch = 0, 0, 0
        for train_x, train_y in shuffle_data(train_data, train_label, BATCH_SIZE, shuffle=False):
            _, error, accuracy = sess.run([train_op, loss, acc], feed_dict={x: train_x, y_: train_y})
            train_loss += error
            train_acc += accuracy
            n_batch += 1
        print('<--------------------epoch: %d-------------------->' % epoch)
        print("train loss: %s" % round(np.sum(train_loss) / n_batch, 6))
        print("train acc: %s" % round(np.sum(train_acc) / n_batch, 4))
        # 交叉验证
        val_loss, val_acc, n_batch = 0, 0, 0
        for x_val_a, y_val_a in shuffle_data(val_data, val_label, BATCH_SIZE, shuffle=False):
            error, accuracy = sess.run([loss, acc], feed_dict={x: x_val_a, y_: y_val_a})
            val_loss += error
            val_acc += accuracy
            n_batch += 1
        print("validation loss: %s" % round(np.sum(val_loss) / n_batch, 6))
        print("validation acc: %s" % round(np.sum(val_acc) / n_batch, 4))
        # 保存模型
        if epoch % 64 == 0 and epoch != EPOCH_SIZE:
            saver.save(sess, MODEL_DIR, global_step=epoch)

    saver.save(sess, MODEL_DIR)
    sess.close()
