# -*- coding:utf-8 -*-
# ======================================================================
# 该代码的与complete_pro区别在于生成batch的方式不一样了

# 导入文件
import os

os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import numpy as np
import tensorflow as tf
import input_data2 as  input_data
import model



import time




# 变量声明
N_CLASSES = 5  # 2分类
IMG_W = 224  # resize图像，太大的话训练时间久
IMG_H = 224
BATCH_SIZE = 4  # 原来是 128
CAPACITY = 256
# defeine a epoch
# epoch = 200
MAX_STEP = 50000  # 一般大于10K
# learning_rate = 0.00001  # 一般小于0.0001
learning_rate = 0.0001

# 获取批次batch
train_dir = '../../datasets/flower/train/'  # 训练样本的读入路径

test_dir = '../../datasets/flower/val/'  # 测试样本的读入路径

logs_train_dir = './train_logs/logs/'  # logs存储路径
logs_checkpoint = './train_logs/checkpoint/'

train, train_label = input_data.get_files(train_dir)
data_len = len(train)

val, val_label = input_data.get_files(test_dir)
test_each_epoch_step = len(val)// BATCH_SIZE


# data_len = len(train)



epochs = 1000  # set a epoch
each_epoch_step = data_len // BATCH_SIZE
MAX_STEP = epochs * each_epoch_step  # 总的step

# 训练数据及标签
train_batch, train_label_batch, train_iterator = input_data.train_build(epochs, BATCH_SIZE, train, train_label)
print(train_batch)
print(train_label_batch)

# print("!!!!!!!!!!!!!!!!!!!!!!!train.type=={}\ train_label.type:{}".format(type(train), type(train_label)))
# 测试数据及标签
val_batch, val_label_batch,test_iterator = input_data.test_build(BATCH_SIZE,val,val_label)

# ----------------
input_batch_image = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMG_W, IMG_H, 3])  # 输入数据batch
input_batch_label = tf.placeholder(tf.int64, shape=[BATCH_SIZE, ])
drop_out = tf.placeholder(tf.float32, shape=())
# -----------------

# 训练操作定义
train_logits = model.inference(input_batch_image, BATCH_SIZE, N_CLASSES, drop_out)
train_loss = model.losses(train_logits, input_batch_label)
train_op = model.trainning(train_loss, learning_rate)
train_acc = model.evaluation(train_logits, input_batch_label)


pre_test_acc = 0
# 产生一个会话
sess = tf.Session()
# 产生一个writer来写log文件
# print("train==%d" % (len(train)))
train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)

saver = tf.train.Saver()
# 所有节点初始化
sess.run([tf.global_variables_initializer(),tf.local_variables_initializer()])
sess.run([train_iterator.initializer, test_iterator.initializer])

epoch_index = 0
# log汇总记录
summary_op = tf.summary.merge_all()
start = time.clock()
# 进行batch的训练

if tf.train.latest_checkpoint(logs_checkpoint) is not None:
    # saver.restore(sess, tf.train.latest_checkpoint(logs_checkpoint))
    # print("----------------------- 正在恢复模型训练----------------")
    print("----------------本次测试不导入预训练模型-----------------")

for step in range(MAX_STEP):

    tr_batch, tr_bat_label = sess.run([train_batch, train_label_batch])
    _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc],
                                    feed_dict={input_batch_image: tr_batch, input_batch_label: tr_bat_label,
                                               drop_out: 0.4})
    # 每隔100步打印一次当前的loss以及acc，同时记录log，写入writer
    if step % each_epoch_step == 0:
        if step >= each_epoch_step:
            epoch_index = epoch_index + 1
        all_test_acc = 0
        all_test_loss = 0
        for batch in range(test_each_epoch_step):
            va_batch, va_bat_label = sess.run([val_batch, val_label_batch])
            te_logits, te_loss, te_acc = sess.run([train_logits, train_loss, train_acc],
                                                  feed_dict={input_batch_image: va_batch,
                                                             input_batch_label: va_bat_label, drop_out: 1.0,
                                                             })
            all_test_acc = all_test_acc + te_acc
            all_test_loss = all_test_loss + te_loss
            # print("te_acc_index_{}:{}".format(batch, te_acc))
        avg_test_acc = all_test_acc / test_each_epoch_step
        avg_test_loss = all_test_loss / test_each_epoch_step
        print('Step %d  %d/epoch:%d------  train loss = %.2f  ------ train accuracy = %.2f%%  ------'
              '    test_loss = %.2f '
              ' ------  test accuracy = %.2f%%' % (step,
                                                   epoch_index,
                                                   epochs, tra_loss,
                                                   tra_acc * 100.0, avg_test_loss, avg_test_acc * 100.0))


        summary_str = sess.run(summary_op, feed_dict={input_batch_image: tr_batch, input_batch_label: tr_bat_label,
                                                      drop_out: 1.0})
        train_writer.add_summary(summary_str, step)
        if avg_test_acc > pre_test_acc:
            checkpoint_path = os.path.join(logs_checkpoint, 'model.ckpt')
            saver.save(sess, checkpoint_path, global_step=step)
            print("current test acc higher {} than last one".format(avg_test_acc - pre_test_acc))
            pre_test_acc = avg_test_acc

end = time.clock()
print("一共运行了:%s秒----约等于%s分钟" % ((end - start), (end - start) / 60))
# ========================================================================


