import sys

# from tensorflow.contrib import slim

from yolo_v3 import yolo_v3
from config import Input_shape, channels, path, data_path, model_type, MODEL_PATH, num_class

from loss_function import compute_loss
from utils.yolo_utils import save_np_data, read_anchors, read_classes, Data_Gen

from PIL import ImageFile

from yolo_v3_tiny import yolo_v3_tiny

ImageFile.LOAD_TRUNCATED_IMAGES = True

import numpy as np
import tensorflow as tf
import time
import os
from datetime import datetime

from tensorflow.python import debug as tf_debug

os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"

np.random.seed(101)

# Get Data #############################################################################################################

# annotation_path_train = data_path + '/train.txt'
# annotation_path_valid = data_path + '/valid.txt'
anchors_paths = data_path + {'N': '/normal_anchor.txt',
                             'T': '/new_anchor0305.txt'}[model_type]

anchors = read_anchors(anchors_paths)
########################################################################################################################
"""
# Clear the current graph in each run, to avoid variable duplication
# tf.reset_default_graph()
"""
print("Starting 1st session...")
# Explicitly create a Graph object
graph = tf.Graph()
with graph.as_default():
    global_step = tf.Variable(0, name='global_step', trainable=False)
    # Start running operations on the Graph.
    # STEP 1: Input data ###############################################################################################
    
    X = tf.placeholder(tf.float32, shape=[None, Input_shape, Input_shape, channels], name='Input')  # for image_data
    phase = tf.placeholder(tf.bool, shape=[], name='Phase')
    with tf.name_scope("Target"):
        Y1 = tf.placeholder(tf.float32, shape=[None, Input_shape / 32, Input_shape / 32, 3, 8 + num_class],
                            name='target_S1')
        Y2 = tf.placeholder(tf.float32, shape=[None, Input_shape / 16, Input_shape / 16, 3, 8 + num_class],
                            name='target_S2')
        Y3 = tf.placeholder(tf.float32, shape=[None, Input_shape / 8, Input_shape / 8, 3, 8 + num_class],
                            name='target_S3')
    # Reshape images for visualization
    x_reshape = tf.reshape(X, [-1, Input_shape, Input_shape, 1])
    tf.summary.image("input", x_reshape)
    
    if model_type == 'N':
        model = yolo_v3
    else:
        model = yolo_v3_tiny
    scale_pred = model(X, phase)
    y_true = [Y1, Y2, Y3]
    
    with tf.name_scope("Loss_and_Detect"):
        # Calculate loss
        loss_tensor, wrong_order_tensor, accuracy_tensor, loss_list, log_data = compute_loss(scale_pred, y_true,
                                                                                             anchors, num_class,
                                                                                             is_training=phase,
                                                                                             print_loss=False)
        
        
        def load_test_summary(avg_loss):
            train_summary = []
            train_summary.append(tf.summary.scalar('t_loss', avg_loss[0]))
            train_summary.append(tf.summary.scalar('v_loss', avg_loss[1]))
            train_summary.append(tf.summary.scalar('t_conf_loss', avg_loss[2]))
            train_summary.append(tf.summary.scalar('v_conf_loss', avg_loss[3]))
            return tf.summary.merge(train_summary)
        
        
        avg_loss_node = tf.placeholder(tf.float32, [4])
        sum_node = load_test_summary(avg_loss_node)
    
    with tf.name_scope("Optimizer"):
        # temp = set(tf.global_variables())
        optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(loss_tensor, global_step=global_step)
        # adam_vars = set(tf.global_variables())-temp
    
    input_shape = (Input_shape, Input_shape)  # multiple of 32
    
    # STEP 5: Train the model, and write summaries #####################################################################
    # The Graph to be launched (described above)
    # config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True) #, gpu_options.allow_growth = False)
    g_list = tf.global_variables()
    bn_moving_vars = [g for g in g_list if 'moving_avg' in g.name or 'moving_var' in g.name]
    
    
    # 为了继续训练，最好保存adam参数,但是代价是模型巨大，保存慢，不值得，不如从新训练adam
    var_list = tf.trainable_variables() + bn_moving_vars
    loader = tf.train.Saver(var_list=var_list)
    saver_val = tf.train.Saver(var_list=var_list, max_to_keep=5)
    saver_rut = tf.train.Saver(var_list=var_list, max_to_keep=20)
    
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config, graph=graph) as sess:
        # sess = tf_debug.LocalCLIDebugWrapperSession(sess)
        sess.run(tf.global_variables_initializer())  # 初始化 checkpoint 中没有的 adam 参数

        # dir_load = None  # where to restore the model
        dir_load = '/20190625-2133/rutine'  # where to restore the model
        model_name = 'model.ckpt-15'
        need_save = True
        need_shuffle = True
        
        ######################
        
        if dir_load is not None:
            load_checkpoints_dir = MODEL_PATH + dir_load
            var_file = os.path.join(load_checkpoints_dir, model_name)
            loader.restore(sess, var_file)  # 从模型中恢复最新变量

        
        if need_save:
            dir_save = datetime.now().strftime("%Y%m%d-%H%M")
            ckpt_dir = MODEL_PATH + '/' + dir_save
            os.makedirs(ckpt_dir)
            ckpt_dir_val = ckpt_dir + '/valid'
            ckpt_dir_rut = ckpt_dir + '/rutine'
            os.makedirs(ckpt_dir_val)
            os.makedirs(ckpt_dir_rut)
            
            writer = tf.summary.FileWriter(ckpt_dir, sess.graph)
        # val_sum_writer = tf.summary.FileWriter(save_checkpoints_dir+'val_sum', sess.graph)
        
        ###############
        
        # If you want to continue training from check point
        # checkpoint = "F:/tf_projects/CNN/yolo3/save_model/SAVER_MODEL_VOC1/model.ckpt-" + "11"
        # saver.restore(sess, checkpoint)
        epochs = 30  #
        batch_size = 16  # consider 32 tiny  16
        batch_size_v = 32  #32
        best_loss_valid = 10e6
        save_epoch_internal = 5
        sum_iternal = 10
        # ######## INIT SAVE ########
        # saver.save(sess, save_checkpoints_dir+"model.ckpt", global_step=0)
        # print("Model saved in file: %s" % save_checkpoints_dir)
        # #########################
        
        np_path_train = os.path.join(data_path, 'npz2')
        np_path_valid = os.path.join(data_path, 'valid_npz')
        data_gen_train = Data_Gen(np_path_train)
        data_gen_valid = Data_Gen(np_path_valid)
        
        is_end_ = [False, '']
        x_valid, box_data_valid, image_shape_valid, y_valid = data_gen_valid.load_pkg(is_end_)
        num_img_valid = np.shape(x_valid)[0]
        print("number_image_valid", num_img_valid)
        
        for epoch in range(epochs):
            sum_train = {'xy_loss': 0,
                         'wh_loss': 0,
                         'conf_loss': 0,
                         'class_loss': 0,
                         'seed_loss': 0,
                         'order_loss': 0,
                         'loss': 0,
                         'wrong_order': 0,
                         'accuracy': 0}
            sum_valid = {'xy_loss': 0,
                         'wh_loss': 0,
                         'conf_loss': 0,
                         'class_loss': 0,
                         'seed_loss': 0,
                         'order_loss': 0,
                         'loss': 0,
                         'wrong_order': 0,
                         'accuracy': 0}
            
            is_end = [False, '']
            while not is_end[0]:
                start_time = time.time()
                
                x_train, box_data_train, image_shape_train, y_train = data_gen_train.load_pkg(is_end)
                
                num_img_train = np.shape(x_train)[0]
                print("num_img_train ", num_img_train)
                if need_shuffle:
                    perm = np.arange(num_img_train)
                    np.random.shuffle(perm)  # 打乱
                    
                    x_train = x_train[perm]
                    y_train[0] = y_train[0][perm]
                    y_train[1] = y_train[1][perm]
                    if model_type == 'N':
                        y_train[2] = y_train[2][perm]
                
                ## Training#################################################################################################
                
                for i, start in enumerate(range(0, num_img_train, batch_size)):
                    end = start + batch_size
                    
                    feed_in = {X: x_train[start:end] / 255.,
                               phase: True,
                               Y1: y_train[0][start:end],
                               Y2: y_train[1][start:end],
                               }
                    if model_type == 'N':
                        feed_in[Y3] = y_train[2][start:end]
                    
                    loss, wrong_order, accuracy, _ \
                        , xy_loss, wh_loss, conf_loss, class_loss, seed_loss, order_loss, \
                        = sess.run([loss_tensor, wrong_order_tensor, accuracy_tensor, optimizer] + loss_list,
                                   feed_dict=feed_in)  # , options=run_options)
                    # print("(xy_loss=%.5f,\t wh_loss=%.5f,\t conf_loss=%.5f,\t class_loss=%.5f,\t seed_loss=%.5f,\t order_loss=%.5f  "
                    #       %(xy_loss,wh_loss,conf_loss,class_loss,seed_loss,order_loss))
                    # print("(start: %s end: %s, \tepoch: %s)\tloss=%.5f\twrong_order =%.5f\taccuracy =%.5f\t  pkg =%s\n"
                    #       % (start, end, epoch + 1, loss, wrong_order, accuracy,is_end[1]))
                    if np.isnan(loss):
                        print('NaN occur!!!!  exit')
                        sys.exit()

                    sum_train_list = [xy_loss, wh_loss, conf_loss, class_loss, seed_loss, order_loss,
                                      loss, wrong_order, accuracy]
                    
                    sum_train['xy_loss'] = (sum_train['xy_loss'] * i + xy_loss) / (i + 1)
                    sum_train['wh_loss'] = (sum_train['wh_loss'] * i + wh_loss) / (i + 1)
                    sum_train['conf_loss'] = (sum_train['conf_loss'] * i + conf_loss) / (i + 1)
                    sum_train['class_loss'] = (sum_train['class_loss'] * i + class_loss) / (i + 1)
                    sum_train['seed_loss'] = (sum_train['seed_loss'] * i + seed_loss) / (i + 1)
                    sum_train['order_loss'] = (sum_train['order_loss'] * i + order_loss) / (i + 1)
                    sum_train['loss'] = (sum_train['loss'] * i + loss) / (i + 1)
                    sum_train['wrong_order'] = (sum_train['wrong_order'] * i + wrong_order) / (i + 1)
                    sum_train['accuracy'] = (sum_train['accuracy'] * i + accuracy) / (i + 1)
                # npz summary position
                
            # epoch summary position
            print("\nTrain_summary:")
            print(
                    "(xy_loss=%.5f,\t wh_loss=%.5f,\t conf_loss=%.5f,\t class_loss=%.5f,\t seed_loss=%.5f,\t order_loss=%.5f  "
                    % (sum_train['xy_loss'], sum_train['wh_loss'], sum_train['conf_loss'],
                       sum_train['class_loss'], sum_train['seed_loss'], sum_train['order_loss']))

            print("epoch %s / %s \tloss=%.5f \twrong_order: %s\taccuracy = %s\n"
                  % (epoch + 1, epochs, sum_train['loss'], sum_train['wrong_order'], sum_train['accuracy']))
                
            # Validation ###############################################################################################
            # Validation position ###############################################################################################
            for i, start in enumerate(range(0, num_img_valid, batch_size_v)):
                end = start + batch_size_v
                
                feed_in = {X: x_valid[start:end] / 255.,
                           phase: False,
                           Y1: y_valid[0][start:end],
                           Y2: y_valid[1][start:end],
                           }
                
                if model_type == 'N':
                    feed_in[Y3] = y_valid[2][start:end]
                
                # Run summaries and measure accuracy on validation set
                loss, wrong_order, accuracy \
                    , xy_loss, wh_loss, conf_loss, class_loss, seed_loss, order_loss \
                    = sess.run([loss_tensor, wrong_order_tensor, accuracy_tensor] + loss_list,
                               feed_dict=feed_in)  # ,options=run_options)


                # Flushes the event file to disk
                
                ######
                sum_valid['xy_loss'] = (sum_valid['xy_loss'] * i + xy_loss) / (i + 1)
                sum_valid['wh_loss'] = (sum_valid['wh_loss'] * i + wh_loss) / (i + 1)
                sum_valid['conf_loss'] = (sum_valid['conf_loss'] * i + conf_loss) / (i + 1)
                sum_valid['class_loss'] = (sum_valid['class_loss'] * i + class_loss) / (i + 1)
                sum_valid['seed_loss'] = (sum_valid['seed_loss'] * i + seed_loss) / (i + 1)
                sum_valid['order_loss'] = (sum_valid['order_loss'] * i + order_loss) / (i + 1)
                sum_valid['loss'] = (sum_valid['loss'] * i + loss) / (i + 1)
                sum_valid['wrong_order'] = (sum_valid['wrong_order'] * i + wrong_order) / (i + 1)
                sum_valid['accuracy'] = (sum_valid['accuracy'] * i + accuracy) / (i + 1)
                ######
            
            print("\n\n\n==========================VALID=====================")
            print(
                        "(xy_loss=%.5f,\t wh_loss=%.5f,\t conf_loss=%.5f,\t class_loss=%.5f,\t seed_loss=%.5f,\t order_loss=%.5f  "
                        % (sum_valid['xy_loss'], sum_valid['wh_loss'], sum_valid['conf_loss'],
                           sum_valid['class_loss'], sum_valid['seed_loss'], sum_valid['order_loss']))
            
            print("epoch %s / %s \tloss=%.5f \twrong_order: %s\taccuracy = %s"
                  % (epoch + 1, epochs, sum_valid['loss'], sum_valid['wrong_order'], sum_valid['accuracy']))
            print("\n====================================================\n\n")
            
            if need_save:
                feed_dict = {avg_loss_node: np.array([sum_train['loss'],
                                                      sum_valid['loss'],
                                                      sum_train['conf_loss'],
                                                      sum_valid['conf_loss'],
                                                      ])}
                
                mean_summary = sess.run(sum_node, feed_dict=feed_dict)
                
                writer.add_summary(mean_summary, epoch)
                
                if sum_valid['loss'] < best_loss_valid:
                    best_loss_valid = sum_valid['loss']
                    saver_val.save(sess, ckpt_dir_val + "/model.ckpt", global_step=epoch)
                    print("Model saved in file: %s" % ckpt_dir_val)
                
                if (epoch + 1) % save_epoch_internal == 0:
                    saver_rut.save(sess, ckpt_dir_rut + "/model.ckpt", global_step=epoch + 1)
                    print("Model saved in file: %s" % ckpt_dir_rut)
            
            print("This epoch completed!")
