#! /usr/bin/env python3
import numpy as np
np.random.seed(3) # 固定初始随机值

import keras
from keras.layers import (
    Input, Conv2D, MaxPool2D, BatchNormalization, Flatten, Dense, Dropout, Activation)
from keras.models import Model

import os, pickle, argparse

from libs.datasetFuncs import readDatasets, getDatas, getTargets

def trainingTypeClassifier(datasets_path, model_filename, clean, training):
    # ### 初始配置
    # 
    # - `mini_img_shape` 通常不需要修改；
    # - `classes_num` 指定类别的数量；
    # - `batch_size` 训练批次大小，一般不需要改变；
    # - `epochs_num` 训练轮数，可以根据实际情况调整，但不宜过大。

    USE_CLEAN = clean # True
    USE_TRAINING = training # True

    mini_img_shape = (64, 64, 3) # 图片的（高，宽，rgb通道）

    batch_size = 50
    epochs_num = 40

    # datasets_path = './datasets/'
    labels = os.listdir(datasets_path)
    classes_num = len(labels)
    # model_filename = './model.h5'

    input_img = Input(shape=mini_img_shape, name='input_img') # 输入

    conv1 = Conv2D(1, (3, 3), activation='relu', padding='same')(input_img)
    conv2 = Conv2D(3, (3, 3), activation='relu')(conv1)
    pool1 = MaxPool2D((2, 2))(conv2)

    normalized1 = BatchNormalization(axis=1)(pool1)
    conv3 = Conv2D(5, (3, 3), activation='relu')(normalized1)
    pool2 = MaxPool2D((2, 2))(conv3)
    conv4 = Conv2D(8, (3, 3), activation='relu')(pool2)
    pool4 = MaxPool2D((2, 2))(conv4)

    flattened = Flatten()(pool4)
    normalized2 = BatchNormalization(axis=-1)(flattened)
    dense1 = Dense(64, activation='relu')(normalized2)
    dropout = Dropout(0.5)(dense1)
    dense2 = Dense(classes_num, activation='softmax', name='output_vect')(dropout)

    if not USE_CLEAN and os.path.isfile(model_filename):
        model = keras.models.load_model(model_filename)
    else:
        USE_CLEAN = True
        model = Model(inputs=input_img, outputs=dense2)

    if USE_CLEAN:
        model.compile(optimizer='Adadelta',
                      loss='binary_crossentropy',
                      metrics=['accuracy'])

    # ## 开始导入数据
    # **如果数据量庞大，可能需要比较长的时间导入，需要耐心等待！**
    datasets = readDatasets(labels)
    datas = getDatas(datasets, dtype='float32')
    targets = getTargets(datasets)

    train_X = (1 - datas / 255).reshape(-1, 64, 64, 3)
    train_Y = keras.utils.to_categorical(targets, num_classes=classes_num)

    # ## 开始训练神经网络模型
    if USE_TRAINING:
        model.fit(train_X, train_Y, batch_size=batch_size, epochs=epochs_num)

    # ### 保存训练好的模型
    # **PS：**可能需要安装 h5py 库，可以通过命令：`pip install h5py` 进行安装。
    if USE_TRAINING:
        model.save(model_filename)
        with open('labels.pkl', 'wb') as f:
            pickle.dump(labels, f)

    # ## 验证人工神经网络在训练集的效果
    # 其返回结果为[代价值，准确率]，**如果数据量足够，最好使用测试集进行训练！**
    if os.path.isfile(model_filename):
        model = keras.models.load_model(model_filename)

    print('Validation in train set')
    print(model.evaluate(train_X, train_Y, batch_size=batch_size))
    print('Finished!')

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='图片内容二分类模型训练工具')
    parser.add_argument('-d', dest='datasets', type=str, default='./datasets/',
                        help='图片数据集目录的路径，默认为 "./datasets/"')
    parser.add_argument('-m', dest='modelfile', type=str, default='./model.h5',
                        help='模型数据存取文件名称，默认为 "./model.h5"')
    parser.add_argument('-c', dest='clean', nargs='?', const=True, default=False,
                        help='指定是否重新训练模型，若不启用则为否')
    parser.add_argument('-t', dest='training', nargs='?', const=True, default=False,
                        help='指定否非训练模型，若不启用则为否')

    args = parser.parse_args()
    trainingTypeClassifier(args.datasets, args.modelfile, args.clean, args.training)
