import tensorflow as tf
import numpy as np
import os
import time
import ssds.tfr_data_process  as dataprocess
import ssds.ssd as ssd
import ssds.preprocess_img_tf as preprocess_img_tf
import ssds.util_tf as util_tf
import matplotlib.pyplot as plt
slim = tf.contrib.slim
run_config = tf.ConfigProto()
run_config.operation_timeout_in_ms = 10000
max_steps = 10000
batch_size = 60
num_epochs_per_decay = 2.0
num_samples_per_epoch = 17125
ssd = ssd.ssd()
layers_anchors = []
for i, s in enumerate(ssd.feature_map_size):
    anchor_bboxes = ssd.ssd_anchor_layer(ssd.img_size, s,
                                          ssd.anchor_sizes[i],
                                          ssd.anchor_ratios[i],
                                          ssd.anchor_steps[i],
                                          ssd.boxes_len[i])
    layers_anchors.append(anchor_bboxes)

dataset=  dataprocess.get_split('../tfrecords', 'voc_train_*.tfrecord',num_classes=21,
                                       num_samples=num_samples_per_epoch)
#深度学习训练的步骤
#1.数据处理
#2.数据输入
#3.计算损失函数
#4.设置学习率
#5.优化器
#6.进行训练
image, glabels, ggggbboxes = dataprocess.tfr_read(dataset)
image, glabels, gbboxes = preprocess_img_tf.preprocess_image(image, glabels, ggggbboxes, out_shape=(300, 300))
#对真实数据集进行编码
target_labels, target_localizations, target_scores  = ssd.bboxes_encode(glabels,gbboxes,layers_anchors)
batch_shape =[1]+[len(layers_anchors)]*3

r = tf.train.batch(  # 图片，中心点类别，真实框坐标，得分
                util_tf.reshape_list([image, target_labels, target_localizations, target_scores]),
                batch_size=batch_size,
                num_threads=4,
                capacity=5 * batch_size)
batch_queue = slim.prefetch_queue.prefetch_queue(
                r,
                capacity=2)
b_image, b_gclasses, b_glocalisations, b_gscores = util_tf.reshape_list(batch_queue.dequeue(), batch_shape)
print(b_image.shape)
pred_locations ,pred_predictions,logit = ssd.set_net(x=b_image)
cls_pos_loss,cls_neg_loss,loca_loss = ssd.ssd_losses(logit, pred_locations,
                                              b_gclasses, b_glocalisations, b_gscores)
total_loss=tf.reduce_sum([cls_pos_loss, cls_neg_loss, loca_loss])
global_step = tf.train.create_global_step()
#decay_steps = int(num_samples_per_epoch / batch_size * num_epochs_per_decay)
#learning_rate = tf.train.exponential_decay(0.001, global_step,decay_steps,0.94,staircase=True,  name='exponential_decay_learning_rate')
optimizer= tf.train.AdamOptimizer(1e-3).minimize(total_loss)
init_op = tf.initialize_all_variables()
saver = tf.train.Saver()
coord = tf.train.Coordinator()
with tf.Session() as sess:
    loss_array = []
    queue_runner = tf.train.start_queue_runners(sess, coord=coord)
    sess.run(init_op)
    for step in range(max_steps):
        sess.run(optimizer)
        loss_value = sess.run(total_loss)
        loss_array.append(loss_value)
        print("第%d次的误差为：%f" % (step, loss_value))
        if step == 10000:
            plt.plot([i for i in range(len(loss_array))], loss_array)
            plt.show()
    saver.save(sess, 'model.ckpt')
    coord.request_stop()
    coord.join(queue_runner)










