# -*- coding: utf-8 -*-

import random
import numpy as np
import tensorflow as tf
from read_data import get_images_path, process_image
from mcnn import cnn_column_1, cnn_column_2, cnn_column_3


def generate_shuffle_train_data(batch_size=32):
    folder = "/home/lijun/Dataset/crowd_counting/part_b/train_multi_1000"
    images_path, ground_truths_path = get_images_path(folder)
    indices = list(range(len(images_path)))
    random.shuffle(indices)
    for i in range(0, len(indices) // batch_size):
        image_batch = np.zeros(shape=[batch_size, 192, 256, 3], dtype=np.float32)
        ground_truth_batch = np.zeros(shape=[batch_size, 48, 64, 1], dtype=np.float32)
        for j in range(0, batch_size):
            index = indices[i*batch_size+j]
            image, ground_truth = process_image(images_path[index],
                                                ground_truths_path[index])
            image_batch[j] = image
            ground_truth = np.expand_dims(ground_truth, axis=-1)
            ground_truth_batch[j] = ground_truth
        yield image_batch, ground_truth_batch


def main():
    with tf.Graph().as_default():
        in_put_x = tf.placeholder(dtype=tf.float32, shape=[None, 192, 256, 3])
        in_put_y = tf.placeholder(dtype=tf.float32, shape=[None, 48, 64, 1])
        inference_op = cnn_column_1(in_put=in_put_x)
        inference_loss_op = tf.nn.l2_loss(tf.sub(in_put_y, inference_op)) / (32*1000)
        reg_loss_op = sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
        train_loss_op = tf.add(inference_loss_op, reg_loss_op)

        global_step = tf.Variable(initial_value=0, trainable=False)
        learning_rate = tf.train.exponential_decay(learning_rate=1.0e-8, global_step=global_step,
                                                   decay_steps=5000, decay_rate=0.1, staircase=True)
        momentum_opt = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.95)
        opt_op = momentum_opt.minimize(train_loss_op, global_step=global_step)
        # opt_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(train_loss_op, global_step=global_step)

        saver = tf.train.Saver()
        with tf.Session() as session:
            session.run(tf.initialize_all_variables())
            session.run(tf.initialize_local_variables())
            saver.restore(session, "./model/cnn_column_1-20000")
            for epoch in range(200):
                for image, ground_truth in generate_shuffle_train_data():
                    inference_loss, reg_loss, step, inference, _ = session.run(
                        [inference_loss_op, reg_loss_op, global_step, inference_op, opt_op],
                        feed_dict={in_put_x: image, in_put_y: ground_truth})
                    if step % 100 == 0:
                        print("epoch: {}, step: {}, inference_loss: {}, reg_loss: {}".format(
                            epoch+1, step, inference_loss, reg_loss))
                        print("predict: {}, ground truth: {}, difference: {}".format(
                            np.sum(inference)/1000, np.sum(ground_truth)/1000,
                            np.abs(np.sum(inference) - np.sum(ground_truth))/1000))
            saver.save(session, "./model/cnn_column_1", global_step=global_step)


if __name__ == "__main__":
    main()
