# author = Ddddavid 
# date = 2020/08/16
## try GAN model
## Use Python 3 and Tensorflow 2 

import os
import time
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt


BUFFER_SIZE = 100000
BATCH_SIZE = 32
EPOCHS = 5
PARAM_SC = 100
NOISE_SIZE = 1000


image = []
_label = []
energy = []

data, label = image, energy
# data -> (num, 230, 122, 2); label -> (num)
weight = _label
# weight -> (num, 3)


def preprocess(x_tr, y_tr):
    # scaling and shifting
    x_tr = tf.cast(x_tr, dtype=tf.float32)
    #x_tr_hittime = tf.slice(x_tr, [0, 0, 0, 0], [-1, 230, 122, 1])
    #x_tr_npe = tf.slice(x_tr, [0, 0, 0, 1], [-1, 230, 122, 1])
    x_tr_npe = x_tr[:, :230, :122, 2]
    x_tr_hittime = x_tr[:, :230, :122, 1]

    has_hit = tf.cast(tf.cast(x_tr_npe, dtype=bool), dtype=tf.float32)
    x_tr_hittime = has_hit * x_tr_hittime
    x_tr_hittime = x_tr_hittime / 200.0  # normalize for 200 ns
    x_tr = tf.concat([x_tr_hittime, x_tr_npe], axis=3)
    y_tr = tf.cast(y_tr, dtype=tf.float32) / PARAM_SC

    return x_tr, y_tr


image_channel, label = preprocess(data, label)
label_channel = tf.ones_like(data[:, :, :, 1], dtype=tf.float32)
for i in range(tf.shape(label_channel)[0]):
    label_channel *= label[i]
x_train = tf.concat([image_channel, label_channel], axis=3)
# Combine hit_time, npe and energy to form a 3 channel image
# x_train: [num, 230, 122, 3] = [[hit_time, npe, label]]

datasets = tf.data.Dataset.from_tensor_slices(x_train)
datasets = datasets.shuffle(BUFFER_SIZE).batch(BATCH_SIZE)


# >> generator model
generator = keras.Sequential()

generator.add(layers.Dense(2048, input_shape=(NOISE_SIZE, ), use_bias=False))
generator.add(layers.BatchNormalization())
generator.add(layers.LeakyReLU())

generator.add(layers.Dense(4096, use_bias=False))
generator.add(layers.BatchNormalization())
generator.add(layers.LeakyReLU())

generator.add(layers.Dense(8192, use_bias=False))
generator.add(layers.BatchNormalization())
generator.add(layers.LeakyReLU())

generator.add(layers.Dense(8192*2, use_bias=False))
generator.add(layers.BatchNormalization())
generator.add(layers.LeakyReLU())

generator.add(layers.Dense(230*122*3, use_bias=False))
generator.add(layers.BatchNormalization())

generator.add(layers.Reshape((230, 122, 3)))
# << 

# >> discriminator model
discriminator = keras.Sequential()

discriminator.add(layers.Flatten())

discriminator.add(layers.Dense(8192*2, use_bias=False))
discriminator.add(layers.BatchNormalization())
discriminator.add(layers.LeakyReLU())

discriminator.add(layers.Dense(8192, use_bias=False))
discriminator.add(layers.BatchNormalization())
discriminator.add(layers.LeakyReLU())

discriminator.add(layers.Dense(4096, use_bias=False))
discriminator.add(layers.BatchNormalization())
discriminator.add(layers.LeakyReLU())

discriminator.add(layers.Dense(2048, use_bias=False))
discriminator.add(layers.BatchNormalization())
discriminator.add(layers.LeakyReLU())

discriminator.add(layers.Dense(1))
# <<

    
def lossFunction(real, examined):
    loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=examined, 
                                                  labels=real)
    return loss


# crossEntropy = keras.losses.BinaryCrossentropy(from_logits=True)


def discriminator_loss(real, generated):
    # cross_entropy should be replaced by LossFunction
    # real_loss = crossEntropy(tf.ones_like(real), real)
    # generated_loss = crossEntropy(tf.zeros_like(generated), generated)
    real_loss = lossFunction(tf.ones_like(real), real)
    generated_loss = lossFunction(tf.zeros_like(generated), generated)
    return tf.reduce_mean(real_loss + generated_loss)


def generated_loss(generated):
    return tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(logits=generated,
                                                labels=tf.ones_like(generated))
    )
    
    
def train(__images):
    noise = tf.random.poisson((BATCH_SIZE, NOISE_SIZE))
    with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
        real_score = discriminator(noise, training=True)
        generated_image = generator(noise, training=True)
        generated_score = discriminator(generated_image, training=True)
        
        gen_loss = generated_loss(generated_score)
        disc_loss = discriminator_loss(real_score, generated_score)
        
    gradient_gen = gen_tape.gradient(gen_loss, generator.trainable_variables)
    gradient_disc = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
    
    gen_opt = keras.optimizers.Adam(1e-5)
    disc_opt = keras.optimizers.Adam(1e-5) 
    
    gen_opt.apply_gradients(zip(gradient_disc, generator.trainable_variables))
    disc_opt.apply_gradients(zip(gradient_disc, discriminator.trainable_variables))
    
    return gen_loss, disc_loss
    

length = len(datasets)

for epoch in range(EPOCHS):
    i = 1
    for batch in datasets:
        gen_loss, disc_loss = train(batch)
        print(f'epoch={epoch}, batch={i}/{length},'
              f'discriminator loss={disc_loss}')
        i += 1
        

