import data_utils
import utils
import json
import numpy as np
import pandas as pd
import tensorflow as tf
from time import time
from mod_core_rnn_cell_impl import LSTMCell  # modified to allow initializing bias in lstm
import AutoencoderFunctions
import mmd

begin = time()
tf.logging.set_verbosity(tf.logging.ERROR)

# Load Data ------------------------------------------------------------------------------------------------------------
# --- get settings --- #
# parse command line arguments, or use defaults
parser = utils.rgan_options_parser()
settings = vars(parser.parse_args())
# if a settings file is specified, it overrides command line arguments/defaults
if settings['settings_file']: settings = utils.load_settings_from_file(settings)

# --- get data, split --- #
# samples, pdf, labels = data_utils.get_data(settings)
data_path = './experiments/data/' + settings['data_load_from'] + '.data.npy'
print('Loading data from', data_path)
settings["eval_an"] = False
settings["eval_single"] = False
samples, labels, index = data_utils.get_data(settings["data"], settings["seq_length"], settings["seq_step"],
                                             settings["num_signals"], settings['sub_id'], settings["eval_single"],
                                             settings["eval_an"], data_path)

# -- number of variables -- #
num_variables = samples.shape[2]
print('num_variables:', num_variables)
# --- save settings, data --- #
print('Ready to run with settings:')
for (k, v) in settings.items(): print(v, '\t', k)
# add the settings to local environment
# WARNING: at this point a lot of variables appear
locals().update(settings)
json.dump(settings, open('./experiments/settings/' + identifier + '.txt', 'w'), indent=0)
#-----------------------------------------------------------------------------------------------------------------------

# Locally defined parameters -------------------------------------------------------------------------------------------
batch_size = 500
seq_length = 30
num_signals = 6
latent_dim = 15
hidden_units = 100
learning_rate = 0.001
training_epochs = 800
display_step = 1
#-----------------------------------------------------------------------------------------------------------------------

# Parameters and Results Paths -----------------------------------------------------------------------------------------
path_autoencoder_training_parameters = "./Experiments_Autoencoder2/Autoencoder_Loss_G/Training_Parameters/autoencoder_kdd99_30_"
path_autoencoder_training_results = "./Experiments_Autoencoder/Autoencoder_Loss_G/Training_Results/"
#-----------------------------------------------------------------------------------------------------------------------

# Create Encoder Model -------------------------------------------------------------------------------------------------
X = tf.placeholder(tf.float32, [batch_size, seq_length, num_signals])
z_enc_outputs = AutoencoderFunctions.encoderModel(X, hidden_units, seq_length, batch_size, latent_dim, reuse=False, parameters=None)
#-----------------------------------------------------------------------------------------------------------------------

# Load Pre Trained Generator Model -------------------------------------------------------------------------------------
# Get parameters
GeneratorEpoch = 8
para_path = './experiments/parameters/' + settings['sub_id'] + '_' + str(settings['seq_length']) + '_' + str(GeneratorEpoch) + '.npy'
parameters = AutoencoderFunctions.loadParameters(para_path)
# Create Generator Model
x_dec_outputs, _ = AutoencoderFunctions.generatorModel(z_enc_outputs, settings['hidden_units_g'], settings['seq_length'], batch_size, settings['num_generated_features'], reuse=False, parameters=parameters)
#-----------------------------------------------------------------------------------------------------------------------

# Get true and prediction outputs --------------------------------------------------------------------------------------
encoder_inputs = [tf.reshape(X, [-1, num_signals])]
encoder_outputs = [tf.reshape(z_enc_outputs, [-1, latent_dim])]
decoder_outputs = [tf.reshape(x_dec_outputs, [-1, num_signals])]

y_true = [tf.reshape(yt, [-1]) for yt in encoder_inputs]
y_encoded = [tf.reshape(ye, [-1]) for ye in encoder_outputs]
y_pred = [tf.reshape(yp, [-1]) for yp in decoder_outputs]
#-----------------------------------------------------------------------------------------------------------------------

# Load the trained Discriminator and Evaluate its value for x and G(E(x)) ----------------------------------------------
# d_output_xr, d_logits_xr = AutoencoderFunctions.discriminatorModel(X, settings['hidden_units_d'], reuse=False, parameters=parameters)
# d_output_xg, d_logits_xg = AutoencoderFunctions.discriminatorModelPred(x_dec_outputs, settings['hidden_units_d'], reuse=False, parameters=parameters)

# disc_outputs_x = [tf.reshape(d_output_xr, [-1, num_signals])]
# disc_outputs_xg = [tf.reshape(d_output_xg, [-1, num_signals])]

# d_y_true = [tf.reshape(dyt, [-1]) for dyt in disc_outputs_x]
# d_y_pred = [tf.reshape(dyp, [-1]) for dyp in disc_outputs_xg]
#-----------------------------------------------------------------------------------------------------------------------

# Define Trainable Variables -------------------------------------------------------------------------------------------
t_vars = tf.trainable_variables()
# train_vars = [var for var in t_vars]    # trains all variables
train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "encoder")   # trains the encoder variables
train_vars = train_vars + tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "generator")   # trains the generator variables
print("train_vars: ", train_vars)
#-----------------------------------------------------------------------------------------------------------------------

# Define Loss and Optimizer --------------------------------------------------------------------------------------------
# loss = 0
# for i in range(len(y_true)):
#     loss += tf.reduce_sum(tf.square(tf.subtract(y_pred[i], y_true[i])))
# optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(loss)

loss = 0
for i in range(len(y_true)):
    loss += tf.reduce_sum(tf.square(tf.subtract(y_pred[i], y_true[i])))   # loss used in first results
    # loss += ( tf.reduce_sum(tf.square(tf.subtract(y_pred[i], y_true[i]))) + 0.5*tf.reduce_sum(tf.square(tf.subtract(d_y_pred[i], d_y_true[i]))) )
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss, var_list=train_vars)

# loss = tfa.losses.contrastive_loss(y_true, y_pred, margin=0.5)
# optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss, var_list=train_vars)
#-----------------------------------------------------------------------------------------------------------------------

# Initialize variables and launch graph --------------------------------------------------------------------------------
init = tf.initialize_all_variables()
l = []
a_values = []
b_values = []
c_values = []

gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.4)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
    sess.run(init)

    for epoch in range(training_epochs):
        loss_epoch = 0
        num_batches = int(samples.shape[0]/batch_size)
        for batch in range(num_batches):    # for batch in range(1):
            i = batch*500
            x = samples[i:i + 500]
            x = x.reshape((batch_size, seq_length, num_signals))
            feed = {X: x}
            # Fit training using batch data
            _, loss_batch = sess.run([optimizer, loss], feed_dict=feed)
            loss_epoch = loss_epoch + np.sum(loss_batch)

        l = np.append(l, loss_epoch)
        
        if epoch % display_step == 0:
            a, b, c = sess.run([y_pred, y_true, y_encoded], feed_dict=feed)
            a_values = np.append(a_values, a)
            b_values = np.append(a_values, b)
            c_values = np.append(a_values, c)
            print ("logits")
            print (a)
            print ("labels")
            print (b)
            print ("encoded")
            print (c)
            print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(loss_epoch))
            print("\n\n")

        # if (epoch == (training_epochs-1)):
            print("train_vars: ", train_vars)
            AutoencoderFunctions.dumpParameters(path_autoencoder_training_parameters + str(epoch), sess)    # Modificar o nome do arquivo ou pasta para salvar outros resultados

    np.savetxt(path_autoencoder_training_results + 'loss_per_epoch.txt', l, fmt='%f')   # Modificar o nome do arquivo ou pasta para salvar outros resultados
    np.savetxt(path_autoencoder_training_results + 'a_values.txt', a_values, fmt='%f')  # Modificar o nome do arquivo ou pasta para salvar outros resultados
    np.savetxt(path_autoencoder_training_results + 'b_values.txt', b_values, fmt='%f')  # Modificar o nome do arquivo ou pasta para salvar outros resultados
    np.savetxt(path_autoencoder_training_results + 'c_values.txt', c_values, fmt='%f')  # Modificar o nome do arquivo ou pasta para salvar outros resultados

    elapsedTime = time() - begin
    print("elapsedTime: ", elapsedTime)

    print("Optimization Finished!")
#-----------------------------------------------------------------------------------------------------------------------