### author : Zhu Jiang & Ziyuan Li 2020-03-1
### Updated by Ddddavid 21-5-8 

import sys
import os
import argparse
import time
import numpy as np
import pandas as pd
import tensorflow as tf


# Environment check. Python 3 required.
if sys.version_info < (3,0):
    raise EnvironmentError("This script should be execute with Python 3")


# Add arguments for quick test
ap = argparse.ArgumentParser()
ap.add_argument('--gpu', type=int, choices=[0, 1, 2, 3], default=0, help='Choose which GPU to use.')
ap.add_argument('--num_gpus', type=int, default=1, help='Nunmber of GPUs used.')
ap.add_argument('--epochs', type=int, default=10, help='Number of epochs to train.')
ap.add_argument('--batch_size', type=int, default=64, help='Number of batches to run.')
ap.add_argument('--scaling', type=float, default=100, help='The linear scaling parameter of y.')
ap.add_argument('--logname', type=str, default=os.path.splitext(os.path.basename(__file__))[0], help='Log director.')
ap.add_argument('--log_level', type=int, default=2, help='CUDA log level.')

args = ap.parse_args()
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.log_level)
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)

FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_integer('num_gpus', args.num_gpus, """How many GPUs to use.""")
tf.flags.DEFINE_integer('epochs', args.epochs, """Number of epoch to train.""")
tf.flags.DEFINE_integer('batch_size', args.batch_size, """Number of batches to run.""")
tf.flags.DEFINE_float('scaling', args.scaling, """The linear scaling parameter of y.""")
tf.flags.DEFINE_string('logname', args.logname, """The log directory.""")
tf.flags.DEFINE_boolean('is_training', True, """Is training or not.""")
tf.flags.DEFINE_float('lr_start', 1e-3, """start learning rate.""")
tf.flags.DEFINE_float('lr_end', 1e-6, """end learning rate.""")
tf.flags.DEFINE_integer('train_mindocid', 0, """mindocid for trainning dataset""")
tf.flags.DEFINE_integer('train_maxdocid', 9000, """maxdocid for trainning dataset""")
tf.flags.DEFINE_integer('val_mindocid', 9000, """mindocid for valid dataset""")
tf.flags.DEFINE_integer('val_maxdocid', 10000, """maxdocid for valid dataset""")
tf.flags.DEFINE_integer('nevt_file', 200, """number of events per file""")

path_to_tfr = './data/tfr/'
path_to_model = './result/'
path_to_log = path_to_model + FLAGS.logname + '/' +FLAGS.logname
if not os.path.exists(path_to_model):
    os.makedirs(path_to_model)
    

scaling = FLAGS.scaling
epoch = FLAGS.epochs
batch_size = FLAGS.batch_size
epoch_size = (FLAGS.train_maxdocid - FLAGS.train_mindocid + FLAGS.val_maxdocid - FLAGS.val_mindocid) * FLAGS.nevt_file


class ResNet50(tf.keras.Model):
    
    def __init__(self):
        super(ResNet50, self).__init__()
        self.dense = tf.keras.layers.Dense
        self.conv2d = tf.keras.layers.Conv2D
        self.batchNormalization = tf.keras.layers.BatchNormalization
        self.flatten = tf.keras.layers.Flatten
        self.relu = tf.nn.relu
        self.elu = tf.nn.elu
        self.training = False
        
    def id_block(self, X, kernel_size, filters, stage, block):
        """
        Implementation of the identity block as defined in Figure 3
        Arguments:
        X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
        kernel_size -- integer, specifying the shape of the middle CONV's window for the main path
        filters -- python list of integers, defining the number of filters in the CONV layers of the main path
        stage -- integer, used to name the layers, depending on their position in the network
        block -- string/character, used to name the layers, depending on their position in the network
        Returns:
        X -- output of the identity block, tensor of shape (n_H, n_W, n_C)
        """
        conv_name_base = 'res' + str(stage) + block + '_branch'
        bn_name_base = 'bn' + str(stage) + block + '_branch
        
        with tf.name_scope('id_block_stage' + str(stage)):
            filter1, filter2, filter3 = filters
            
            x = self.conv2d(filter1, kernel_size=(1, 1), strides=(1, 1), activation='relu', name=conv_name_base + '2a')(X)
            x = self.batchNormalization(x, name=bn_name_base + '2a', training=FLAGS.is_training)
            
            x = self.conv2d(filter2, kernel_size=(kernel_size, kernel_size), activation='relu', padding='same', name=conv_name_base + '2b')(x)
            x = self.batchNormalization(x, , name=bn_name_base + '2b', training=FLAGS.is_training)
            
            x = self.conv2d(filter3, kernel_size=(1, 1), activation='relu', name=conv_name_base + '2c')(x)
            x = self.batchNormalization(x, name=bn_name_base + '2c', training=FLAGS.is_training)
            
        return self.elu(tf.add(x, X))
            
    def cv_block(self, X, kernel_size, filters, stage, block, stride=2):
        """
        Implementation of the convolutional block as defined in Figure 4
        Arguments:
        X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
        kernel_size -- integer, specifying the shape of the middle CONV's window for the main path
        filters -- python list of integers, defining the number of filters in the CONV layers of the main path
        stage -- integer, used to name the layers, depending on their position in the network
        block -- string/character, used to name the layers, depending on their position in the network
        stride -- Integer, specifying the stride to be used
        Returns:
        X -- output of the convolutional block, tensor of shape (n_H, n_W, n_C)
        """

        # defining name basis
        conv_name_base = 'res' + str(stage) + block + '_branch'
        bn_name_base = 'bn' + str(stage) + block + '_branch'

        with tf.name_scope("conv_block_stage" + str(stage)):
            # Retrieve Filters
            filter1, filter2, filter3 = filters

            # First component of main path
            x = self.conv2d(filter1, kernel_size=(1, 1), strides=(stride, stride), activation='relu', name=conv_name_base + '2a')(X)
            x = self.batchNormalization(x, name=bn_name_base + '2a', training=FLAGS.is_training)

            # Second component of main path
            x = self.conv2d(filter2, (kernel_size, kernel_size), activation='relu', name=conv_name_base + '2b', padding='same')(x)
            x = self.batchNormalization(x, name=bn_name_base + '2b', training=FLAGS.is_training)

            # Third component of main path
            x = self.conv2d(filter3, (1, 1), activation='relu', name=conv_name_base + '2c')(x)
            x = self.batchNormalization(x, name=bn_name_base + '2c', training=FLAGS.is_training)

            # SHORTCUT PATH
            X = self.conv2d(filter3, (1, 1), strides=(stride, stride), activation='relu', name=conv_name_base + '1')(X)
            X = self.batchNormalization(X, axis=3, name=bn_name_base + '1', training=FLAGS.is_training)

        return self.elu(tf.add(x, X))
        


