"""
    作者：徐飞
    日期：2020/02/28
    版本：01
    功能：训练数据
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function


import Model
import dataset_process
# from quiz_recog.train_model import Model
# from quiz_recog.train_model import dataset_process
import tensorflow as tf
from tensorflow.python.platform import app
from sklearn.model_selection import train_test_split
import time
import os

# x训练模型和保存
num_class = 100
batch_size = 32
capacity = 256

FLAGS = app.flags.FLAGS
app.flags.DEFINE_string("data_dir", './data/train', "Data storage files")


X, y = dataset_process.get_file(FLAGS.data_dir)  # 获取图片列表和标签列表
# X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.25, random_state=0)
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.25, random_state=None)
image_batch, label_batch = dataset_process.get_batch(X_train, y_train, 224, 224, batch_size, capacity)
print(image_batch, label_batch)
val_image_batch, val_label_batch = dataset_process.get_batch(X_valid, y_valid, 224, 224, batch_size, capacity)
x = tf.placeholder(tf.float32, [None, 224, 224, 3], name='image_train')
y = tf.placeholder(tf.int32, [None, num_class], name='label_train')

logits, end_points = Model.DenseNet(x, num_class=num_class, is_training=True,
                                    dropout_keep_prob=0.6, scope='DenseNet', type="121")
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
# optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss)
predict = tf.nn.softmax(logits=logits)
predict1 = tf.reshape(predict, [-1, num_class])
equal = tf.equal(tf.argmax(y, 1), tf.argmax(predict1, 1))
acc = tf.reduce_mean(tf.cast(equal, dtype=tf.float32))

# 开启会话
init_op = tf.global_variables_initializer()
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
    sess.run(init_op)
    saver = tf.train.Saver()
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord, sess=sess)
    echo_start_time = time.time()
    for i in range(10000):
        images, labels = sess.run([image_batch, label_batch])
        val_image, val_label = sess.run([val_image_batch, val_label_batch])
        losses, _ = sess.run([loss, train_op], feed_dict={x: images, y: labels})
        accuracy = sess.run([acc], feed_dict={x: images, y: labels})
        # accuracy = sess.run([acc], feed_dict={x: val_image, y: val_label})
        if i > 0 and i % 100 == 0:
            print("after{}, train_loss={}, val_acc={} ".format(i, losses, accuracy))
        echo_end_time = time.time()
    saver.save(sess, os.path.join("./model/", "model.ckpt"))
    print("train finished!")
    coord.request_stop()
    coord.join(threads)
