# author : Zhu Jiang & Ziyuan Li 2020-03-16

import numpy as np
import tensorflow as tf
import os
import time
import pandas as pd

#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ['CUDA_VISIBLE_DEVICES'] = '3'

FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('num_gpus', 1, """How many GPUs to use.""")
tf.app.flags.DEFINE_integer('epochs', 5, """Number of epoch to train.""")
# epochs , 10
tf.app.flags.DEFINE_integer('batch_size', 64, """Number of batches to run.""")
tf.app.flags.DEFINE_boolean('is_training', True, """Is training or not.""")
tf.app.flags.DEFINE_float('scaling', 100, """The linear scaling parameter of y.""")
tf.app.flags.DEFINE_string('logname', os.path.splitext(os.path.basename(__file__))[0], """The log directory.""")
tf.app.flags.DEFINE_float('lr_start', 1e-3, """start learning rate.""")
tf.app.flags.DEFINE_float('lr_end', 1e-6, """end learning rate.""")
tf.app.flags.DEFINE_integer('train_mindocid', 0, """mindocid for trainning dataset""")
tf.app.flags.DEFINE_integer('train_maxdocid', 2000, """maxdocid for trainning dataset""")
#maxdocid , 2000
tf.app.flags.DEFINE_integer('val_mindocid', 9000, """mindocid for valid dataset""")
tf.app.flags.DEFINE_integer('val_maxdocid', 10000, """maxdocid for valid dataset""")
tf.app.flags.DEFINE_integer('nevt_file', 200, """number of events per file""")

path_to_tfr = './data/tfr/'
path_to_model = './result/'
path_to_log = path_to_model + FLAGS.logname + '/' + FLAGS.logname
if not os.path.exists(path_to_model):
            os.makedirs(path_to_model)

# ------ VAR ------

param_sc = FLAGS.scaling
epoch_num = FLAGS.epochs
batch_size = FLAGS.batch_size
epoch_size = (FLAGS.train_maxdocid-FLAGS.train_mindocid+FLAGS.val_maxdocid-FLAGS.val_mindocid) * FLAGS.nevt_file

# ------ FUNCIONS ------

def identity_block(X_input, kernel_size, filters, stage, block):
    """
    Implementation of the identity block as defined in Figure 3
    Arguments:
    X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
    kernel_size -- integer, specifying the shape of the middle CONV's window for the main path
    filters -- python list of integers, defining the number of filters in the CONV layers of the main path
    stage -- integer, used to name the layers, depending on their position in the network
    block -- string/character, used to name the layers, depending on their position in the network
    Returns:
    X -- output of the identity block, tensor of shape (n_H, n_W, n_C)
    """

    # defining name basis
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    with tf.name_scope("id_block_stage" + str(stage)):
        filter1, filter2, filter3 = filters
        X_shortcut = X_input

        # First component of main path
        x = tf.layers.conv2d(X_input, filter1, kernel_size=(1, 1), strides=(1, 1), name=conv_name_base + '2a')
        x = tf.nn.relu(x)  
        x = tf.layers.batch_normalization(x, name=bn_name_base + '2a', training=FLAGS.is_training)

        # Second component of main path
        x = tf.layers.conv2d(x, filter2, (kernel_size, kernel_size), padding='same', name=conv_name_base + '2b')
        x = tf.nn.relu(x)
        x = tf.layers.batch_normalization(x, name=bn_name_base + '2b', training=FLAGS.is_training)

        # Third component of main path
        x = tf.layers.conv2d(x, filter3, kernel_size=(1, 1), name=conv_name_base + '2c')
        x = tf.layers.batch_normalization(x, name=bn_name_base + '2c', training=FLAGS.is_training)

        # Final step: Add shortcut value to main path, and pass it through a RELU activation
        X_add_shortcut = tf.add(x, X_shortcut)
        add_result = tf.nn.elu(X_add_shortcut) 

    return add_result


def convolutional_block(X_input, kernel_size, filters, stage, block, stride=2):
    """
    Implementation of the convolutional block as defined in Figure 4
    Arguments:
    X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
    kernel_size -- integer, specifying the shape of the middle CONV's window for the main path
    filters -- python list of integers, defining the number of filters in the CONV layers of the main path
    stage -- integer, used to name the layers, depending on their position in the network
    block -- string/character, used to name the layers, depending on their position in the network
    stride -- Integer, specifying the stride to be used
    Returns:
    X -- output of the convolutional block, tensor of shape (n_H, n_W, n_C)
    """

    # defining name basis
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    with tf.name_scope("conv_block_stage" + str(stage)):
        # Retrieve Filters
        filter1, filter2, filter3 = filters

        # Save the input value
        X_shortcut = X_input

        # First component of main path
        x = tf.layers.conv2d(X_input, filter1, kernel_size=(1, 1), strides=(stride, stride), name=conv_name_base + '2a')
        x = tf.nn.relu(x)
        x = tf.layers.batch_normalization(x, name=bn_name_base + '2a', training=FLAGS.is_training)

        # Second component of main path
        x = tf.layers.conv2d(x, filter2, (kernel_size, kernel_size), name=conv_name_base + '2b', padding='same')
        x = tf.nn.relu(x)
        x = tf.layers.batch_normalization(x, name=bn_name_base + '2b', training=FLAGS.is_training)

        # Third component of main path
        x = tf.layers.conv2d(x, filter3, (1, 1), name=conv_name_base + '2c')
        x = tf.layers.batch_normalization(x, name=bn_name_base + '2c', training=FLAGS.is_training)

        # SHORTCUT PATH
        X_shortcut = tf.layers.conv2d(X_shortcut, filter3, (1, 1),
                                      strides=(stride, stride), name=conv_name_base + '1')
        X_shortcut = tf.layers.batch_normalization(X_shortcut, axis=3, name=bn_name_base + '1',
                                                   training=FLAGS.is_training)

        # Final step: Add shortcut value to main path, and pass it through a RELU activation
        X_add_shortcut = tf.add(X_shortcut, x)
        add_result = tf.nn.elu(X_add_shortcut)

    return add_result


def inference_resnet(x_data_inf):
    """
    Implementation of the popular ResNet50 the following architecture:
    CONV2D -> BATCHNORM -> RELU -> MAXPOOL -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3
    -> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> TOPLAYER
    Arguments:
    Returns:
    """

    # stage 1
    x = tf.layers.conv2d(x_data_inf, filters=64, kernel_size=[6, 3], strides=[2, 1], padding='same', name='conv1')
    x = tf.nn.relu(x)
    x = tf.layers.batch_normalization(x, name='bn_conv1', training=FLAGS.is_training)
    x = tf.layers.conv2d(x, filters=64, kernel_size=[3, 3], padding='same', name='conv2')  # 115, 122, 64
    x = tf.nn.relu(x)
    x = tf.layers.batch_normalization(x, name='bn_conv2', training=FLAGS.is_training)
    x = tf.layers.max_pooling2d(x, pool_size=(3, 3), strides=(2, 2))  # 58, 61, 128

    # stage 2
    x = convolutional_block(x, kernel_size=3, filters=[64, 64, 256], stage=2, block='a', stride=1)  # 58, 61, 256
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

    stage2 = tf.layers.average_pooling2d(x, pool_size=(8, 8), strides=(4, 4))
    stage2 = tf.layers.flatten(stage2, name='stage2_flatten')
    stage2 = tf.layers.dense(stage2, units=100, name='stage2_dense')
    #stage2 = tf.layers.dense(stage2, units=3, name='stage2_reg')
    """
    --->>>>>>> Ddddavid ->>>>> Change Here
    """
    stage2 = tf.layers.dense(stage2, units=1, name='stage2_reg')

    # stage 3
    x = convolutional_block(x, kernel_size=3, filters=[128, 128, 512], stage=3, block='a', stride=2)  # 29, 32, 512
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')

    stage3 = tf.layers.average_pooling2d(x, pool_size=(4, 4), strides=(4, 4))
    stage3 = tf.layers.flatten(stage3, name='stage3_flatten')
    stage3 = tf.layers.dense(stage3, units=100, name='stage3_dense')
    #stage3 = tf.layers.dense(stage3, units=3, name='stage3_reg')
    """
    --->>>>>>> Ddddavid ->>>>> Change Here
    """
    stage3 = tf.layers.dense(stage3, units=1, name='stage3_reg')


    # stage 4
    x = convolutional_block(x, kernel_size=3, filters=[256, 256, 1024], stage=4, block='a', stride=2)  # 15, 16, 1024
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')

    stage4 = tf.layers.average_pooling2d(x, pool_size=(4, 4), strides=(2, 2))
    stage4 = tf.layers.flatten(stage4, name='stage4_flatten')
    stage4 = tf.layers.dense(stage4, units=100, name='stage4_dense')
    #stage4 = tf.layers.dense(stage4, units=3, name='stage4_reg')
    """
    --->>>>>>> Ddddavid ->>>>> Change Here
    """
    stage4 = tf.layers.dense(stage4, units=1, name='stage4_reg')

    # stage 5
    x = convolutional_block(x, kernel_size=3, filters=[512, 512, 2048], stage=5, block='a', stride=2)  # 8, 8, 2048
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')

    x = tf.layers.average_pooling2d(x, pool_size=(8, 8), strides=(1, 1))

    flatten = tf.layers.flatten(x, name='flatten')
    dense1_all = tf.layers.dense(flatten, units=100, name='fc100')
    #regression = tf.layers.dense(dense1_all, units=3, name='regression')
    """
    --->>>>>>> Ddddavid ->>>>> Change Here
    """
    regression = tf.layers.dense(dense1_all, units=1, name='regression')

    return regression, flatten, stage2, stage3, stage4


def stage_weight_loss(regression, y_data_inf, y_weight, stage2, stage3, stage4):
    y_weight = tf.maximum(0.1, y_weight)
    y_weight = tf.minimum(10.0, y_weight)

    #print(regression) #(,3)
    #print(y_weight.shape) #(,3)
    #print(y_data_inf.shape) #(,1)
    #print(stage2, stage3, stage4) #(,3)
    #exit()

    """
    --->>>>>>> Ddddavid ->>>>> Change Here
    """

    cross_entropy = tf.pow((regression - y_data_inf), 2) * 10.0 / tf.reduce_mean(y_weight, axis=1)
    cross_entropy = tf.reduce_mean(cross_entropy)

    loss_stage2 = tf.pow((stage2 - y_data_inf), 2) * 10.0 / tf.reduce_mean(y_weight, axis=1)
    loss_stage3 = tf.pow((stage3 - y_data_inf), 2) * 10.0 / tf.reduce_mean(y_weight, axis=1)
    loss_stage4 = tf.pow((stage4 - y_data_inf), 2) * 10.0 / tf.reduce_mean(y_weight, axis=1)

    loss_stage2 = tf.reduce_mean(loss_stage2)
    loss_stage3 = tf.reduce_mean(loss_stage3)
    loss_stage4 = tf.reduce_mean(loss_stage4)

    # cross_entropy = cross_entropy + .3 * loss_stage2 + .3 * loss_stage3 + .3 * loss_stage4
    cross_entropy = cross_entropy + .2 * loss_stage3 + .3 * loss_stage4
    # cross_entropy = cross_entropy + .3 * loss_stage4

    # cross_entropy = tf.minimum(cross_entropy, 10000000000)

    return cross_entropy


def get_accuracy(predict_label, true_label):
    residual = tf.reduce_sum(tf.pow((predict_label - true_label), 2), axis=1)  # loss for track count
    residual = tf.pow(residual, 0.5)
    residual = tf.reduce_mean(residual)
    return residual


def tfr_names():
    train_names_list = []
    val_names_list = []

    #trainning dataset
    for eve in range(FLAGS.train_mindocid, FLAGS.train_maxdocid, 1):
        tfc_name = path_to_tfr + "eplus_{:d}.tfrecords".format(eve)
        train_names_list.append(tfc_name)
        if not os.path.exists(tfc_name):
            print "can not find " + tfc_name
            break

    #validation dataset
    for eve in range(FLAGS.val_mindocid, FLAGS.val_maxdocid, 1):
        tfc_name = path_to_tfr + "eplus_{:d}.tfrecords".format(eve)
        val_names_list.append(tfc_name)
        if not os.path.exists(tfc_name):
            print "can not find " + tfc_name
            break

    return train_names_list, val_names_list


def parse_function(example_proto):
    dics = {
        'data': tf.FixedLenFeature(shape=(), dtype=tf.string),
        'label': tf.FixedLenFeature(shape=(), dtype=tf.string),
        'energy': tf.FixedLenFeature([], tf.float32),
    }

    parsed_example = tf.parse_single_example(example_proto, dics)

    parsed_example['data'] = tf.decode_raw(parsed_example['data'], tf.float32)
    parsed_example['data'] = tf.reshape(parsed_example['data'], [230, 122, 2])
    parsed_example['data'].set_shape([230, 122, 2])

    parsed_example['label'] = tf.decode_raw(parsed_example['label'], tf.float32)
    parsed_example['label'] = tf.reshape(parsed_example['label'], [3, ])
    parsed_example['label'].set_shape([3, ])

    parsed_example['energy'] = tf.reshape(parsed_example['energy'], [1, ])
    parsed_example['energy'].set_shape([1, ])

    return parsed_example


def check_center(xdata):
    data = xdata
    is_center = np.zeros((FLAGS.batch_size, 1), dtype=int)

    for i in xrange(FLAGS.batch_size):
        area1 = np.sum(data[i, 0:50, :, 1])
        area3 = np.sum(data[i, 180:230, :, 1])
        area2 = np.sum(data[i, 50:180, :, 1])

    if area2 >= area1 + area3:
        is_center[i, 0] = 1
    else:
        is_center[i, 0] = 0

    return is_center


def count_parameter():
    total_parameters = 0
    layer = 0
    for variable in tf.trainable_variables():
        # shape is an array of tf.Dimension
        shape = variable.get_shape()
        # print layer, (shape)
        # print(len(shape))
        variable_parameters = 1
        for dim in shape:
            # print(dim)
            variable_parameters *= dim.value
            # print(variable_parameters)

        total_parameters += variable_parameters
        if len(shape) >= 4:
            layer += 1

    print 'Parameters: %d' % total_parameters


# ------ STEPS FUNC ------

def build_tf_pipline(handle):
    train_names, val_names = tfr_names()

    #train_set = tf.data.Dataset.from_tensor_slices(train_names)
    #train_set = train_set.interleave(lambda x: tf.data.TFRecordDataset(x), cycle_length=100, block_length=1)
    ##train_set = tf.data.TFRecordDataset(train_names, num_parallel_reads=10)
    #train_set = train_set.map(parse_function)
    #train_set = train_set.shuffle(buffer_size=20000)
    #train_set = train_set.repeat().batch(FLAGS.batch_size * FLAGS.num_gpus)
    #train_set = train_set.prefetch(buffer_size=FLAGS.batch_size * FLAGS.num_gpus * 2)

    #val_set = tf.data.Dataset.from_tensor_slices(val_names)
    #val_set = val_set.interleave(lambda x: tf.data.TFRecordDataset(x), cycle_length=12, block_length=1)
    ##val_set = tf.data.TFRecordDataset(val_names, num_parallel_reads=10)
    #val_set = val_set.map(parse_function)
    #val_set = val_set.repeat().batch(FLAGS.batch_size)

    #lizy::Update Try parallel
    train_set = tf.data.Dataset.from_tensor_slices(train_names)
    train_set = train_set.apply(tf.contrib.data.parallel_interleave(tf.data.TFRecordDataset, cycle_length=100))
    train_set = train_set.apply(tf.contrib.data.map_and_batch(parse_function, batch_size=FLAGS.batch_size))
    train_set = train_set.apply(tf.contrib.data.shuffle_and_repeat(FLAGS.batch_size))
    train_set = train_set.prefetch(buffer_size=FLAGS.batch_size * FLAGS.num_gpus * 2)

    val_set = tf.data.Dataset.from_tensor_slices(val_names)
    val_set = val_set.apply(tf.contrib.data.parallel_interleave(tf.data.TFRecordDataset, cycle_length=100))
    val_set = val_set.apply(tf.contrib.data.map_and_batch(parse_function, batch_size=FLAGS.batch_size))
    val_set = val_set.apply(tf.contrib.data.shuffle_and_repeat(FLAGS.batch_size))
    val_set = val_set.prefetch(buffer_size=FLAGS.batch_size * FLAGS.num_gpus * 2)

    iterator = tf.data.Iterator.from_string_handle(handle, train_set.output_types, train_set.output_shapes)
    element = iterator.get_next()

    training_iterator = train_set.make_one_shot_iterator()
    validation_iterator = val_set.make_one_shot_iterator()

    return element, training_iterator, validation_iterator


def preprocess(x_tr, y_tr):
    # scaling and shifting
    x_tr = tf.cast(x_tr, dtype=tf.float32)
    x_tr_hittime = tf.slice(x_tr, [0, 0, 0, 0], [-1, 230, 122, 1])
    x_tr_npe = tf.slice(x_tr, [0, 0, 0, 1], [-1, 230, 122, 1])

    has_hit = tf.cast(tf.cast(x_tr_npe, dtype=bool), dtype=tf.float32)
    x_tr_hittime = has_hit * x_tr_hittime
    x_tr_hittime = x_tr_hittime / 200.0  # normalize for 200 ns
    x_tr = tf.concat([x_tr_hittime, x_tr_npe], axis=3)
    y_tr = tf.cast(y_tr, dtype=tf.float32) / param_sc

    return x_tr, y_tr


def ouput_2_tensorboard():
    # regression_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'regress_mum')
    # tf.summary.histogram('weight_reg', regression_vars[0])
    # tf.summary.histogram('bias_reg', regression_vars[1])
    # tf.summary.histogram('activation_reg', reg_activation)
    # grads1 = tf.gradients(cross_entropy, dense1_vars[0], name='grads1')
    # tf.summary.histogram('grads1', grads1)

    tf.summary.scalar('loss function', loss)
    tf.summary.scalar('accuracy', accuracy)
    tf.summary.scalar('lr', learning_rate)

    # tf.summary.scalar('sub loss', loss_s)
    # tf.summary.scalar('sub accuracy', accuracy_s)
    # tf.summary.scalar('lr2', learning_rate_2)

    merged = tf.summary.merge_all()
    summary_writer = tf.summary.FileWriter('./tfboard/' + FLAGS.logname, sess.graph, filename_suffix='.'+FLAGS.logname)

    return merged, summary_writer


# ------ STEPS CODE ------
print "let's  begin !!!"
# --- dataset and pineline --- >

print 'start time : %s ' % time.asctime( time.localtime(time.time()) )

handle = tf.placeholder(tf.string, shape=[])
element, training_iterator, validation_iterator = build_tf_pipline(handle)

image = element['data']  # (none,230,122,2) float.32
label = element['label']  # (none,3) float.32
energy = element['energy']  # (none) float.32

# --- data i/o --- >
x_tr = image
y_tr = energy
y_tr_en = label


x_tr, y_tr = preprocess(x_tr, y_tr)

# ---model --- >
with tf.variable_scope("net_1"):
    prediction, features, s2, s3, s4 = inference_resnet(x_tr)
    loss = stage_weight_loss(prediction, y_tr, y_tr_en, s2, s3, s4)
    accuracy = get_accuracy(prediction, y_tr)

# --- session --- >
sess = tf.InteractiveSession()

# --- lr --- >
start_lr = FLAGS.lr_start
end_lr = FLAGS.lr_end 
total_steps = epoch_size * epoch_num / batch_size
global_step = tf.Variable(0, trainable=False)
decrease_rate = (end_lr / start_lr) ** (1000.0 / total_steps)
learning_rate = tf.train.exponential_decay(start_lr, global_step, 1000, decrease_rate, staircase=True)
print "**************** PARAMETERS *****************"
print "Total number of events = ", epoch_size
print "epoch_num = %d, batch size = %d, in total %d steps" % (epoch_num, FLAGS.batch_size * FLAGS.num_gpus, total_steps)
print "start learning rate = %.1e, end learning rate = %.1e" % (start_lr, end_lr)
print "decrease rate is %.3f in every 1000 steps" % decrease_rate
print "**************** PARAMETERS *****************"

# ---  optimizer --->
opt = tf.train.AdamOptimizer(learning_rate)
# opt = tf.train.GradientDescentOptimizer(learning_rate)

update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
    train_step = opt.minimize(loss, global_step=global_step)

# --- Saver --->
saver2 = tf.train.Saver(max_to_keep=20)
#saver2.restore(sess, save_path=path_to_log+'-401')
sess.run(global_step.assign(0))
print "updating the steps info"
old_step = sess.run(global_step)
left_steps = total_steps - old_step
print "%d left for training" % left_steps

# --- initialize --- >
init = tf.global_variables_initializer()
sess.run(init)

# --- tensorboard --- >
merged, summary_writer = ouput_2_tensorboard()

# ----------------------------- LET US TRAIN ( ` . ')/ ~O ----------------------

training_handle = sess.run(training_iterator.string_handle())
validation_handle = sess.run(validation_iterator.string_handle())
for i in range(left_steps):
    start_time = time.time()  # TIME | >>>>

    _, loss_value, predict_value, label_value, accuracy_value, data_v, fv, istep = sess.run(
        [train_step, loss, prediction, label, accuracy, x_tr, features, global_step],
        feed_dict={handle: training_handle})
    #print(label_value)
    #exit()
    
    duration = time.time() - start_time  # TIME | <<<<

    if i % 200 == 0 or i == 50 or i == 100 or i == 150:
        num_examples_per_step = FLAGS.batch_size * FLAGS.num_gpus
        examples_per_sec = num_examples_per_step / duration
        sec_per_batch = duration / FLAGS.num_gpus
        epoch = istep * FLAGS.batch_size * FLAGS.num_gpus / epoch_size
        lr_value = sess.run(learning_rate)

        """
        --->>>>>>> Ddddavid ->>>>> Change Here
        """
        print ('%d, epoch %d, loss = %.2f,  res = %.1f '
               'pd %.1f, true %.1f, lr %.1e (%.1f examples/sec; %.3f ''sec/batch)'
               % (i, epoch, loss_value, accuracy_value * param_sc,
                  predict_value[0, 0] * param_sc, 
                  label_value[0, 0], 
                  lr_value, examples_per_sec, sec_per_batch))

    if i % 2000 == 0 and i != 0 or i == total_steps - 1:
        summary = sess.run(merged, feed_dict={handle: training_handle})
        summary_writer.add_summary(summary, i)
        time_str = time.strftime('%m%d_%H%M%S', time.localtime())
        saver2.save(sess, path_to_log, global_step=global_step)

        # return a val acc
        val_acc_v = 0
        for j in range(10):
            val_acc_v += sess.run(accuracy, feed_dict={handle: validation_handle})
        val_acc_v = val_acc_v / 10.0
        print 'VAL ACC = %.2f' % (val_acc_v * param_sc)

# ----------------------------- VALIDATING --------
print "let us test with val set (' ')b U"

val_step = 500
acc_val = 0
# sess.run(val_init_op)
FLAGS.is_training = False
for i in range(val_step):
    acc_val += sess.run(accuracy, feed_dict={handle: validation_handle})

acc_val = acc_val * 1.0 / val_step
print 'total res = %.2f' % (acc_val * param_sc)

print 'end time : %s ' % time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
