'''

nohup python Seq2Seqtrain.py > ./output/Seq2Seqtrain.txt 2>&1 &
nohup tensorboard --logdir=./tensorboard --port=60004 --host=0.0.0.0 > ./output/tensorboard.txt 2>&1 &
'''
import os
import time

import tensorflow as tf
import numpy as np
import random
import csv
# from TrajectoryLoader import TrajectoryLoader
from ShipLoader import ShipLoader
import Model
import os

physical_gpus = tf.config.list_physical_devices("GPU")
tf.config.experimental.set_virtual_device_configuration(
    physical_gpus[0],
    [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=6000)]
)
logical_gpus = tf.config.list_logical_devices("GPU")
file_path = "./DataSet/datasets/liner_npy"
# parameters for traning
learnig_rate = 0.001
epochs = 100
batch_size = 64
display_step = 100
# parameters for seq2seq model
n_lstm = 128
encoder_length = 40
decoder_length = 20
now_time = time.strftime('%Y-%m-%d_%H-%M-%S',time.localtime(time.time()))
settings = '{}_{}_{}_{}'.format('seq2seq', epochs, batch_size, now_time)

# Choose Adam optimizer.
optimizer = tf.keras.optimizers.Adam(learnig_rate, clipnorm=1)
# Create and build encoder and decoder.
encoder = Model.Encoder(n_lstm, batch_size)
decoder = Model.Decoder(n_lstm, batch_size)

x = np.zeros((batch_size, 1, 5), dtype=np.float32)
output = encoder(x)
decoder(x, output[1:])
encoder.summary()
decoder.summary()

# restore the last checkpoint
checkpoint2 = tf.train.Checkpoint(Encoder=encoder)
checkpoint2.restore(tf.train.latest_checkpoint('./SaveEncoder'))

checkpoint3 = tf.train.Checkpoint(Decoder=decoder)
checkpoint3.restore(tf.train.latest_checkpoint('./SaveDecoder'))

# tensorboard
summary_writer = tf.summary.create_file_writer('tensorboard/' + settings)
tf.summary.trace_on(profiler=True)
# checkpoint
checkpoint1 = tf.train.Checkpoint(Encoder=encoder)
manager1 = tf.train.CheckpointManager(checkpoint1, directory='./SaveEncoder', checkpoint_name='Encoder.ckpt',
                                      max_to_keep=3)
checkpoint2 = tf.train.Checkpoint(Decoder=decoder)
manager2 = tf.train.CheckpointManager(checkpoint2, directory='./SaveDecoder', checkpoint_name='Decoder.ckpt',
                                      max_to_keep=3)

sampling_probability_list = np.linspace(
    start=0.0,
    stop=1.0,
    num=epochs*2000,
    dtype=np.float32)

    
def RunOptimization(source_seq, target_seq_in, target_seq_out, step):
    loss = 0
    decoder_length = target_seq_out.shape[1]
    with tf.GradientTape() as tape:
        encoder_outputs = encoder(source_seq)
        states = encoder_outputs[1:]
        y_sample = 0
        for t in range(decoder_length):
            # scheduled sampling
            if t == 0 or random.random() < sampling_probability_list[step]:
                # 使用生成的元素
                decoder_in = tf.expand_dims(target_seq_in[:, t], 1)
            else:
                # 使用真实值
                decoder_in = tf.expand_dims(y_sample, 1)
            # if t == 0 or random.randint(0, 1) == 2:
            #     decoder_in = tf.expand_dims(target_seq_in[:, t], 1)
            # else:
            #     decoder_in = tf.expand_dims(y_sample, 1)
            # decoder_in = tf.expand_dims(target_seq_in[:, t], 1)
            logit, de_state_h, de_state_c = decoder(decoder_in, states)
            y_sample = logit
            states = de_state_h, de_state_c
            # loss function : RSME
            loss_0 = tf.keras.losses.MSE(target_seq_out[:, t, 1:3], logit[:, 1:3])
            loss += tf.sqrt(loss_0+1e-8)
    variables = encoder.trainable_variables + decoder.trainable_variables
    gradients = tape.gradient(loss, variables)
    optimizer.apply_gradients(zip(gradients, variables))

    loss = tf.reduce_mean(loss)
    loss = loss / decoder_length
    with summary_writer.as_default():
        tf.summary.scalar("loss", loss.numpy(), step=step)

    return loss


# Load Ship data.
seq2seq_loader = ShipLoader(file_path, 'train')
seq2seq_loader.loadShipData()


for epoch in range(1, epochs + 1):
    flag = True
    while flag:
        seq_encoder, seq_decoder = seq2seq_loader.getBatchSeq2Seq()
        if not isinstance(seq_encoder, np.ndarray):
            flag = False
        else:
            # print("seq_encoder.shape", seq_encoder.shape) # (64, 120, 5)
            # print("seq_decoder.shape", seq_decoder.shape) # (64, 31, 5)

            seq_decoder_in = seq_decoder[:, :decoder_length, :]
            seq_decoder_out = seq_decoder[:, 1:decoder_length + 1, :]  # ?
            loss = RunOptimization(seq_encoder, seq_decoder_in, seq_decoder_out, seq2seq_loader.index)

            # if batch_index % display_step == 0:
            print("Epoch: %d, batch %d: loss %f" % (epoch, seq2seq_loader.index, loss.numpy()))
            if seq2seq_loader.index % display_step == 0:
                # print("batch %d: loss %f" % (batch_index, loss.numpy()))
                path1 = manager1.save(checkpoint_number=seq2seq_loader.index)
                path2 = manager2.save(checkpoint_number=seq2seq_loader.index)

    # 每个epoch训练完后 开始进行测试
    

with summary_writer.as_default():
    tf.summary.trace_export(name="model_trace", step=0, profiler_outdir='tensorboard')