# model of EfficientNet B0 ~ B7
# Ddddavid 2021/08/22
# Python 3.8  Tensorflow 2.3.0


import os
import numpy as np
import math
import tensorflow as tf
from efficientnet import EfficientNet, efficient_net_b7, get_efficient_net


gpus = tf.config.experimental.list_physical_devices('gpu')
for gpu in gpus:
    tf.config.experimental.set_memory_growth(gpu, True)

os.environ['CUDA_VISIBLE_DEVICES'] = '1'

FLAGS = tf.compat.v1.app.flags.FLAGS
tf.compat.v1.app.flags.DEFINE_integer('num_gpus', 1, """How many GPUs to use.""")
tf.compat.v1.app.flags.DEFINE_integer('epochs', 10, """Number of epoch to train.""")
tf.compat.v1.app.flags.DEFINE_boolean('is_training', True, """Is training or not.""")
tf.compat.v1.app.flags.DEFINE_string('logname', os.path.splitext(os.path.basename(__file__))[0], """The log directory.""")
tf.compat.v1.app.flags.DEFINE_float('lr_start', 1e-3, """start learning rate.""")
tf.compat.v1.app.flags.DEFINE_float('lr_end', 1e-7, """end learning rate.""")
tf.compat.v1.app.flags.DEFINE_integer('train_mindocid', 0, """mindocid for trainning dataset""")
tf.compat.v1.app.flags.DEFINE_integer('train_maxdocid', 9000, """maxdocid for trainning dataset""")
tf.compat.v1.app.flags.DEFINE_integer('val_mindocid', 9000, """mindocid for valid dataset""")
tf.compat.v1.app.flags.DEFINE_integer('val_maxdocid', 10000, """maxdocid for valid dataset""")
tf.compat.v1.app.flags.DEFINE_integer('nevt_file', 500, """number of events per file""")

path_to_tfr = '../data/tfr/'
path_to_model = '../result/'
path_to_log = path_to_model + FLAGS.logname + '/' + FLAGS.logname
if not os.path.exists(path_to_model):
    os.makedirs(path_to_model)


BATCH_SIZE = 8 # Number of batches to run.
EPOCHS = 10 # Number of epoch to train.
PARAM_SC = 100  # The linear scaling parameter of y.
SAVE_EVERY_N_STEP = 1000
MODEL_PATH = path_to_model
TRAIN_EPOCH_SIZE = (FLAGS.train_maxdocid-FLAGS.train_mindocid) * FLAGS.nevt_file
VAL_EPOCH_SIZE = (FLAGS.val_maxdocid-FLAGS.val_mindocid) * FLAGS.nevt_file
TRAIN_STEPS = TRAIN_EPOCH_SIZE / BATCH_SIZE
VAL_STEPS = VAL_EPOCH_SIZE / BATCH_SIZE
model = efficient_net_b7()


loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.RMSprop()

train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalCrossentropy(name='train_accuracy')

valid_loss = tf.keras.metrics.Mean(name='valid_loss')
valid_accuracy = tf.keras.metrics.SparseCategoricalCrossentropy(name='valid_accuracy')


def parse_function(example_proto):
    dics = {
        'data': tf.io.FixedLenFeature(shape=(), dtype=tf.string),
        'label': tf.io.FixedLenFeature(shape=(), dtype=tf.string),
        'energy': tf.io.FixedLenFeature([], tf.float32),
    }

    parsed_example = tf.io.parse_single_example(serialized=example_proto, features=dics)

    parsed_example['data'] = tf.io.decode_raw(parsed_example['data'], tf.float32)
    parsed_example['data'] = tf.reshape(parsed_example['data'], [230, 122, 2])
    parsed_example['data'].set_shape([230, 122, 2])

    parsed_example['label'] = tf.io.decode_raw(parsed_example['label'], tf.float32)
    parsed_example['label'] = tf.reshape(parsed_example['label'], [3, ])
    parsed_example['label'].set_shape([3, ])

    parsed_example['energy'] = tf.reshape(parsed_example['energy'], [1, ])
    parsed_example['energy'].set_shape([1, ])

    return parsed_example


def tfr_names():
    train_names_list = []
    val_names_list = []

    #trainning dataset
    for eve in range(FLAGS.train_mindocid, FLAGS.train_maxdocid, 1):
        tfc_name = path_to_tfr + "eplus_{:d}.tfrecords".format(eve)
        train_names_list.append(tfc_name)
        if not os.path.exists(tfc_name):
            print("can not find " + tfc_name)
            break

    #validation dataset
    for eve in range(FLAGS.val_mindocid, FLAGS.val_maxdocid, 1):
        tfc_name = path_to_tfr + "eplus_{:d}.tfrecords".format(eve)
        val_names_list.append(tfc_name)
        if not os.path.exists(tfc_name):
            print("can not find " + tfc_name)
            break

    return train_names_list, val_names_list


def build_tf_pipline():
    train_names, val_names = tfr_names()
    train_set = tf.data.Dataset.from_tensor_slices(train_names)
    train_set = train_set.apply(tf.data.experimental.parallel_interleave(tf.data.TFRecordDataset, cycle_length=100))
    train_set = train_set.apply(tf.data.experimental.map_and_batch(parse_function, batch_size=BATCH_SIZE))
    train_set = train_set.apply(tf.data.experimental.shuffle_and_repeat(BATCH_SIZE))
    train_set = train_set.prefetch(buffer_size=BATCH_SIZE * FLAGS.num_gpus * 2)

    val_set = tf.data.Dataset.from_tensor_slices(val_names)
    val_set = val_set.apply(tf.data.experimental.parallel_interleave(tf.data.TFRecordDataset, cycle_length=100))
    val_set = val_set.apply(tf.data.experimental.map_and_batch(parse_function, batch_size=BATCH_SIZE))
    val_set = val_set.apply(tf.data.experimental.shuffle_and_repeat(BATCH_SIZE))
    val_set = val_set.prefetch(buffer_size=BATCH_SIZE * FLAGS.num_gpus * 2)

    # iterator = tf.compat.v1.data.Iterator.from_string_handle(handle, train_set.output_types, train_set.output_shapes)
    # element = iterator.get_next()

    training_iterator = tf.compat.v1.data.make_one_shot_iterator(train_set)
    validation_iterator = tf.compat.v1.data.make_one_shot_iterator(val_set)

    return training_iterator, validation_iterator


def preprocess(x_tr, y_tr):
    # scaling and shifting
    x_tr = tf.cast(x_tr, dtype=tf.float32)
    x_tr_hittime = tf.slice(x_tr, [0, 0, 0, 0], [-1, 230, 122, 1])
    x_tr_npe = tf.slice(x_tr, [0, 0, 0, 1], [-1, 230, 122, 1])

    has_hit = tf.cast(tf.cast(x_tr_npe, dtype=bool), dtype=tf.float32)
    x_tr_hittime = has_hit * x_tr_hittime
    x_tr_hittime = x_tr_hittime / 200.0  # normalize for 200 ns

    x_tr = tf.concat([x_tr_hittime, x_tr_npe], axis=3)
    tmp = tf.Variable(tf.zeros((x_tr.shape[0], 230, 230, 2)))
    tmp[:,:,:122, :2].assign(x_tr)
    x_tr = tmp
    y_tr = tf.cast(y_tr, dtype=tf.float32) / PARAM_SC
    
    return x_tr, y_tr


def loss(predict, real):
    return tf.reduce_mean(tf.pow(predict - real, 2))


def train_step(image_batch, label_batch):
    with tf.GradientTape() as tape:
        predictions = model(image_batch, training=True)
        # debugger
        print(predictions.shape, label_batch.shape)
        exit(-1)

        loss = loss_object(y_true=label_batch, y_pred=predictions)
    
    gradients = tape.gradient(loss, model.trainable_variables)
    optimizer.apply_gradients(grads_and_vars=zip(gradients, model.trainable_variables))
    
    train_loss.update_state(values=loss)
    train_accuracy.update_state(y_true=label_batch, y_pred=predictions)
    

def valid_step(image_batch, label_batch):
    predictions = model(image_batch, training=False)
    valid_loss = loss_object(label_batch, predictions)
    
    valid_loss.update_state(values=valid_loss)
    valid_accuracy.update_state(y_true=label_batch, y_pred=predictions)
    

# handle = tf.compat.v1.placeholder(tf.string, shape=[])
training_iterator, validation_iterator = build_tf_pipline()
# images, labels, energy = element['data'], element['label'], element['energy']
# x_tr, y_tr = images, labels
# y_te = energy 
# x_tr, y_tr = preprocess(x_tr, y_tr) 

for epoch in range(EPOCHS):
    step = 0
    for i in range(TRAIN_EPOCH_SIZE):
        element = training_iterator.get_next()
        images, labels, energy = element['data'], element['label'], element['energy']
        x_tr, y_tr = images, labels
        y_te = energy 
        train_images, train_labels = preprocess(x_tr, y_tr)
        train_step(train_images, train_labels)

        
        print('Epoch {}/{}, step = {}/{},'
              'loss = {:.5f}, acc = {:.5f}'.format(
                  epoch, EPOCHS, step,
                  math.ceil(TRAIN_EPOCH_SIZE / BATCH_SIZE),
                  train_loss.result().numpy(),
                  train_accuracy.result().numpy()
              ))

    for i in range(TRAIN_EPOCH_SIZE):
        element = validation_iterator.get_next()
        images, labels, energy = element['data'], element['label'], element['energy']
        x_tr, y_tr = images, labels
        y_te = energy 
        valid_images, valid_labels = preprocess(x_tr, y_tr)
        valid_step(valid_images, valid_labels)
        
    print("Epoch {}/{}, train loss = {:.5f}, train acc = {:.5f}," 
                "valid loss {:.5}, valid acc = {:.5}".format(
                    epoch, EPOCHS, train_loss.result().numpy(),
                    valid_accuracy.result().numpy(),
                    valid_loss.result().numpy(),
                    valid_accuracy.result().numpy()
                ))


    train_loss.reset_states()
    train_accuracy.reset_states()
    valid_loss.reset_states()
    valid_accuracy.reset_states()

    if epoch % SAVE_EVERY_N_STEP == 0:
        model.save_weights(filepath=MODEL_PATH+"epoch-{}".format(epoch), save_format='tf')


# save weights
model.save_weights(filepath=MODEL_PATH+"model", save_format='tf')








