#!/bin/bash
# author : Zhu Jiang & Ziyuan Li
# 	make vertex prediction using trained model
# 	user need to modify modelname and the ID acoording to trained model

import numpy as np
import tensorflow as tf
import os
import time
import pandas as pd
import argparse

# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'

FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('num_gpus', 1, """How many GPUs to use.""")
tf.app.flags.DEFINE_integer('batch_size', 100, """Number of batches to run.""")
tf.app.flags.DEFINE_boolean('is_training', False, """Is training or not.""")
tf.app.flags.DEFINE_float('scaling', 100, """The linear scaling parameter of y.""")
tf.app.flags.DEFINE_integer('mindocID', 0, """mindocID for valid dataset.""")
tf.app.flags.DEFINE_integer('maxdocID', 1100, """maxdocID for valid dataset.""")
tf.app.flags.DEFINE_integer('nevt_file', 100, """number of events per file.""")
tf.app.flags.DEFINE_string('modelname', 'modelname', """The model name.""")
tf.app.flags.DEFINE_integer('ID', 0, """model id for rec""")

parser = argparse.ArgumentParser(description='Transfer modelname and ID to script')
parser.add_argument("--modelname", default="model_vtx_res50", help="model name for rec")
parser.add_argument("--ID", type=int, default=20001, help='model id for rec')

args = parser.parse_args()

path_to_tfr = './data/tfr/'
path_to_model = '../train/result/' + str(args.modelname) + '/' + str(args.modelname) + '-' + str(args.ID)
path_to_result = './result/' + str(args.modelname) + '/'
if not os.path.exists(path_to_result):
    os.makedirs(path_to_result)

output_name = 'recvtx_' + str(args.ID)
output_file = path_to_result + output_name + '_' + str(FLAGS.mindocID) + '_' + str(FLAGS.maxdocID) + '.csv'

# ------ VAR ------

param_sc = FLAGS.scaling
batch_size = FLAGS.batch_size * FLAGS.num_gpus
epoch_size = (FLAGS.maxdocID - FLAGS.mindocID) * FLAGS.nevt_file
val_step = epoch_size / batch_size

print
"%s %d" % (args.modelname, args.ID)
print
"Input model : %s" % path_to_model
print
"Reconstrcution result : %s" % output_file
print
"rec files from id %d to %d, nevt/file = %d, total events = %d" % (
FLAGS.mindocID, FLAGS.maxdocID, FLAGS.nevt_file, epoch_size)
print
"batch size is = %d, num of step to run = %d\n" % (batch_size, val_step)


# ------ FUNCIONS ------

def identity_block(X_input, kernel_size, filters, stage, block):
    """
    Implementation of the identity block as defined in Figure 3
    Arguments:
    X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
    kernel_size -- integer, specifying the shape of the middle CONV's window for the main path
    filters -- python list of integers, defining the number of filters in the CONV layers of the main path
    stage -- integer, used to name the layers, depending on their position in the network
    block -- string/character, used to name the layers, depending on their position in the network
    Returns:
    X -- output of the identity block, tensor of shape (n_H, n_W, n_C)
    """

    # defining name basis
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    with tf.name_scope("id_block_stage" + str(stage)):
        filter1, filter2, filter3 = filters
        X_shortcut = X_input

        # First component of main path
        x = tf.layers.conv2d(X_input, filter1, kernel_size=(1, 1), strides=(1, 1), name=conv_name_base + '2a')
        x = tf.nn.relu(x)
        x = tf.layers.batch_normalization(x, name=bn_name_base + '2a', training=FLAGS.is_training)

        # Second component of main path
        x = tf.layers.conv2d(x, filter2, (kernel_size, kernel_size), padding='same', name=conv_name_base + '2b')
        x = tf.nn.relu(x)
        x = tf.layers.batch_normalization(x, name=bn_name_base + '2b', training=FLAGS.is_training)

        # Third component of main path
        x = tf.layers.conv2d(x, filter3, kernel_size=(1, 1), name=conv_name_base + '2c')
        x = tf.layers.batch_normalization(x, name=bn_name_base + '2c', training=FLAGS.is_training)

        # Final step: Add shortcut value to main path, and pass it through a RELU activation
        X_add_shortcut = tf.add(x, X_shortcut)
        add_result = tf.nn.elu(X_add_shortcut)

    return add_result


def convolutional_block(X_input, kernel_size, filters, stage, block, stride=2):
    """
    Implementation of the convolutional block as defined in Figure 4
    Arguments:
    X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
    kernel_size -- integer, specifying the shape of the middle CONV's window for the main path
    filters -- python list of integers, defining the number of filters in the CONV layers of the main path
    stage -- integer, used to name the layers, depending on their position in the network
    block -- string/character, used to name the layers, depending on their position in the network
    stride -- Integer, specifying the stride to be used
    Returns:
    X -- output of the convolutional block, tensor of shape (n_H, n_W, n_C)
    """

    # defining name basis
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    with tf.name_scope("conv_block_stage" + str(stage)):
        # Retrieve Filters
        filter1, filter2, filter3 = filters

        # Save the input value
        X_shortcut = X_input

        # First component of main path
        x = tf.layers.conv2d(X_input, filter1, kernel_size=(1, 1), strides=(stride, stride), name=conv_name_base + '2a')
        x = tf.nn.relu(x)
        x = tf.layers.batch_normalization(x, name=bn_name_base + '2a', training=FLAGS.is_training)

        # Second component of main path
        x = tf.layers.conv2d(x, filter2, (kernel_size, kernel_size), name=conv_name_base + '2b', padding='same')
        x = tf.nn.relu(x)
        x = tf.layers.batch_normalization(x, name=bn_name_base + '2b', training=FLAGS.is_training)

        # Third component of main path
        x = tf.layers.conv2d(x, filter3, (1, 1), name=conv_name_base + '2c')
        x = tf.layers.batch_normalization(x, name=bn_name_base + '2c', training=FLAGS.is_training)

        # SHORTCUT PATH
        X_shortcut = tf.layers.conv2d(X_shortcut, filter3, (1, 1),
                                      strides=(stride, stride), name=conv_name_base + '1')
        X_shortcut = tf.layers.batch_normalization(X_shortcut, axis=3, name=bn_name_base + '1',
                                                   training=FLAGS.is_training)

        # Final step: Add shortcut value to main path, and pass it through a RELU activation
        X_add_shortcut = tf.add(X_shortcut, x)
        add_result = tf.nn.elu(X_add_shortcut)

    return add_result


def inference_resnet(x_data_inf):
    """
    Implementation of the popular ResNet50 the following architecture:
    CONV2D -> BATCHNORM -> RELU -> MAXPOOL -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3
    -> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> TOPLAYER
    Arguments:
    Returns:
    """

    # stage 1
    x = tf.layers.conv2d(x_data_inf, filters=64, kernel_size=[6, 3], strides=[2, 1], padding='same', name='conv1')
    x = tf.nn.relu(x)
    x = tf.layers.batch_normalization(x, name='bn_conv1', training=FLAGS.is_training)
    x = tf.layers.conv2d(x, filters=64, kernel_size=[3, 3], padding='same', name='conv2')  # 115, 122, 64
    x = tf.nn.relu(x)
    x = tf.layers.batch_normalization(x, name='bn_conv2', training=FLAGS.is_training)
    x = tf.layers.max_pooling2d(x, pool_size=(3, 3), strides=(2, 2))  # 58, 61, 128

    # stage 2
    x = convolutional_block(x, kernel_size=3, filters=[64, 64, 256], stage=2, block='a', stride=1)  # 58, 61, 256
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

    stage2 = tf.layers.average_pooling2d(x, pool_size=(8, 8), strides=(4, 4))
    stage2 = tf.layers.flatten(stage2, name='stage2_flatten')
    stage2 = tf.layers.dense(stage2, units=100, name='stage2_dense')
    # stage2 = tf.layers.dense(stage2, units=3, name='stage2_reg')

    # >>>>>>------VVVVVictor change------<<<<<<<

    stage2 = tf.layers.dense(stage2, units=1, name='stage2_reg')

    # stage 3
    x = convolutional_block(x, kernel_size=3, filters=[128, 128, 512], stage=3, block='a', stride=2)  # 29, 32, 512
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')

    stage3 = tf.layers.average_pooling2d(x, pool_size=(4, 4), strides=(4, 4))
    stage3 = tf.layers.flatten(stage3, name='stage3_flatten')
    stage3 = tf.layers.dense(stage3, units=100, name='stage3_dense')
    # stage3 = tf.layers.dense(stage3, units=3, name='stage3_reg')

    # >>>>>>------VVVVVictor change------<<<<<<<

    stage3 = tf.layers.dense(stage3, units=1, name='stage3_reg')

    # stage 4
    x = convolutional_block(x, kernel_size=3, filters=[256, 256, 1024], stage=4, block='a', stride=2)  # 15, 16, 1024
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')

    stage4 = tf.layers.average_pooling2d(x, pool_size=(4, 4), strides=(2, 2))
    stage4 = tf.layers.flatten(stage4, name='stage4_flatten')
    stage4 = tf.layers.dense(stage4, units=100, name='stage4_dense')
    # stage4 = tf.layers.dense(stage4, units=3, name='stage4_reg')

    # >>>>>>------VVVVVictor change------<<<<<<<

    stage4 = tf.layers.dense(stage4, units=1, name='stage4_reg')

    # stage 5
    x = convolutional_block(x, kernel_size=3, filters=[512, 512, 2048], stage=5, block='a', stride=2)  # 8, 8, 2048
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')

    x = tf.layers.average_pooling2d(x, pool_size=(8, 8), strides=(1, 1))

    flatten = tf.layers.flatten(x, name='flatten')
    dense1_all = tf.layers.dense(flatten, units=100, name='fc100')
    # regression = tf.layers.dense(dense1_all, units=3, name='regression')

    # >>>>>>------VVVVVictor change------<<<<<<<

    regression = tf.layers.dense(dense1_all, units=1, name='regression')

    return regression, flatten, stage2, stage3, stage4


def get_accuracy(predict_label, true_label):
    # residual = tf.reduce_sum(tf.pow((predict_label - true_label), 2), axis=1)  # loss for track count
    # >>>>>>------VVVVVictor change------<<<<<<<
    residual = tf.reduce_sum(tf.pow((predict_label - true_label), 2))
    residual = tf.pow(residual, 0.5)
    residual = tf.reduce_mean(residual)
    return residual


def get_radius(true_label):
    # residual = tf.reduce_sum(tf.pow(true_label, 2), axis=1)

    # >>>>>>------VVVVVictor change------<<<<<<<
    residual = tf.reduce_sum(tf.pow(true_label, 2))

    residual = tf.pow(residual, 0.5)
    return residual


def tfr_names():
    val_names_list = []

    # validation dataset
    for eve in range(FLAGS.mindocID, FLAGS.maxdocID, 1):
        tfc_name = path_to_tfr + "eplus_{:d}.tfrecords".format(eve)
        val_names_list.append(tfc_name)
        if not os.path.exists(tfc_name):
            print
            "can not find " + tfc_name
            break

    return val_names_list


def parse_function(example_proto):
    dics = {
        'data': tf.FixedLenFeature(shape=(), dtype=tf.string),
        'label': tf.FixedLenFeature(shape=(), dtype=tf.string),
        'energy': tf.FixedLenFeature([], tf.float32),
    }

    parsed_example = tf.parse_single_example(example_proto, dics)

    parsed_example['data'] = tf.decode_raw(parsed_example['data'], tf.float32)
    parsed_example['data'] = tf.reshape(parsed_example['data'], [230, 122, 2])
    parsed_example['data'].set_shape([230, 122, 2])

    parsed_example['label'] = tf.decode_raw(parsed_example['label'], tf.float32)
    parsed_example['label'] = tf.reshape(parsed_example['label'], [3, ])
    parsed_example['label'].set_shape([3, ])

    parsed_example['energy'] = tf.reshape(parsed_example['energy'], [1, ])
    parsed_example['energy'].set_shape([1, ])

    return parsed_example


def check_center(xdata):
    data = xdata
    is_center = np.zeros((FLAGS.batch_size, 1), dtype=int)

    for i in xrange(FLAGS.batch_size):
        area1 = np.sum(data[i, 0:50, :, 1])
        area3 = np.sum(data[i, 180:230, :, 1])
        area2 = np.sum(data[i, 50:180, :, 1])

    if area2 >= area1 + area3:
        is_center[i, 0] = 1
    else:
        is_center[i, 0] = 0

    return is_center


def count_parameter():
    total_parameters = 0
    layer = 0
    for variable in tf.trainable_variables():
        # shape is an array of tf.Dimension
        shape = variable.get_shape()
        # print layer, (shape)
        # print(len(shape))
        variable_parameters = 1
        for dim in shape:
            # print(dim)
            variable_parameters *= dim.value
            # print(variable_parameters)

        total_parameters += variable_parameters
        if len(shape) >= 4:
            layer += 1

    print
    'Parameters: %d' % total_parameters


# ------ STEPS FUNC ------

def build_tf_pipline():
    val_names = tfr_names()

    # val_set = tf.data.Dataset.from_tensor_slices(val_names)
    # val_set = val_set.interleave(lambda x: tf.data.TFRecordDataset(x), cycle_length=12, block_length=1)
    val_set = tf.data.TFRecordDataset(val_names, num_parallel_reads=100)
    val_set = val_set.map(parse_function)
    val_set = val_set.repeat().batch(FLAGS.batch_size)

    iterator = tf.data.Iterator.from_structure(val_set.output_types, val_set.output_shapes)
    val_init_op = iterator.make_initializer(val_set)
    element = iterator.get_next()

    return element, val_init_op


def preprocess(x_tr, y_tr):
    # scaling and shifting
    x_tr = tf.cast(x_tr, dtype=tf.float32)
    x_tr_hittime = tf.slice(x_tr, [0, 0, 0, 0], [-1, 230, 122, 1])
    x_tr_npe = tf.slice(x_tr, [0, 0, 0, 1], [-1, 230, 122, 1])

    has_hit = tf.cast(tf.cast(x_tr_npe, dtype=bool), dtype=tf.float32)
    x_tr_hittime = has_hit * x_tr_hittime
    x_tr_hittime = x_tr_hittime / 200.0  # normalize for 200 ns
    x_tr = tf.concat([x_tr_hittime, x_tr_npe], axis=3)
    y_tr = tf.cast(y_tr, dtype=tf.float32) / param_sc

    return x_tr, y_tr


# ------ STEPS CODE ------
print "let's  begin !!!"
print 'start time : %s ' % time.asctime(time.localtime(time.time()))

# --- dataset and pineline --- >
element, val_init_op = build_tf_pipline()

image = element['data']  # (none,230,122,2) float.32
label = element['label']  # (none,3) float.32
energy = element['energy']  # (none) float.32

# --- data i/o --- >
x_tr = image
# y_tr = label
# y_tr_en = energy

# >>>>>>------VVVVVictor change------<<<<<<<
y_tr = energy
y_tr_en = label

x_tr, y_tr = preprocess(x_tr, y_tr)

# ---model --- >
with tf.variable_scope("net_1"):
    prediction, features, s2, s3, s4 = inference_resnet(x_tr)
    accuracy = get_accuracy(prediction, y_tr)
    radius = get_radius(y_tr)

# --- session --- >
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
sess = tf.InteractiveSession(config=session_conf)

# --- Saver --->
saver = tf.train.Saver()
saver.restore(sess, save_path=path_to_model)

# --- parameter --->
count_parameter()

# --- initialize --- >
acc_val = 0
sess.run(val_init_op)

# --- file --- >
rec_list = []
numpy_file = None

# ----------------------------- LET US RECONSTRUCT ( ` . ')/ ~O ----------------------
print "let us rec with val set (' ')b U"

for i in range(val_step):
    prediction_value, label_value, energy_value, accuracy_value, image_value, radius_v = sess.run(
        [prediction, label, energy, accuracy, image, radius])

    prediction_value = prediction_value * param_sc
    # output = np.concatenate((prediction_value, label_value), axis=1)
    # energy_value = np.reshape(energy_value, [-1, 1])

    # >>>>>>------VVVVVictor change------<<<<<<<
    output = np.concatenate((prediction_value, energy_value), axis=1)
    label_value = np.reshape(label_value, [-1, 3])

    center_column = check_center(image_value)

    # output = np.concatenate((output, energy_value, center_column), axis=1)

    # >>>>>>------VVVVVictor change------<<<<<<<
    output = np.concatenate((output, label_value, center_column), axis=1)

    rec_list.append(output)
    if i == 0:
        numpy_file = output
    else:
        numpy_file = np.concatenate([numpy_file, output], axis=0)

    acc_val += accuracy_value * param_sc
    radius_v = radius_v * param_sc
    print '%d res = %.2f, ' % (i, accuracy_value * param_sc)

# df = pd.DataFrame(numpy_file, columns=['rec_x', 'rec_y', 'rec_z', 'sim_x', 'sim_y', 'sim_z', 'energy', 'isCenter'])

# >>>>>>------VVVVVictor change------<<<<<<<
df = pd.DataFrame(numpy_file, columns=['rec_E', 'sim_E', 'x', 'y', 'z', 'isCenter'])

df.to_csv(output_file, index=False)

acc_val = acc_val * 1.0 / val_step
print 'total res = %.2f' % (acc_val)

print 'end time : %s ' % time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
