'''
mist@gpu180.mistgpu.xyz:20305
nohup python -u 04_mySeq2Seqtrain.py > ./output/04_mySeq2Seqtrain.txt 2>&1 &
nohup tensorboard --logdir=./tensorboard --port=20305 --host=0.0.0.0 > ./output/tensorboard.txt 2>&1 &
tail -f output/04_mySeq2Seqtrain.txt

ps aux | grep python
watch -n 1 nvidia-smi

'''

# elu 不怎么好
# tanh 训练快, 收敛快,
# relu
import os
import time
import tensorflow as tf
import numpy as np
from tensorflow.python.keras.callbacks import ModelCheckpoint

from ShipLoader import ShipLoader
# import Model
import os
from models import create_lstm2lstm, create_gru2gru
from utils.metrics import RMSE, root_mean_squared_error

model_name = "lstm2lstm"
# model_name = "gru2gru"

train_dir_name = "ospline_big"

file_path = "./DataSet/datasets/{}_npy".format(train_dir_name)
# parameters for traning
learnig_rate = 0.001
epochs = 100
batch_size = 128

# parameters for seq2seq model
n_lstm = 256  #136W参数
encoder_length = 40
decoder_length = 20
now_time = time.strftime('%Y-%m-%d_%H-%M-%S',time.localtime(time.time()))
settings = '{}_ds{}_ep{}_bt{}_nlstm{}_{}'.format(model_name, train_dir_name, epochs, batch_size, n_lstm, now_time)

# Load Ship data.
seq2seq_loader = ShipLoader(file_path, 'train')
seq2seq_loader.loadShipData()
train_x, train_y = seq2seq_loader.get_all_data(flag='train', dir_name=train_dir_name)

seq2seq_loader = ShipLoader(file_path, 'valid')
seq2seq_loader.loadShipData()
test_x, test_y = seq2seq_loader.get_all_data(flag='valid', dir_name=train_dir_name)
# 创建model
func_dict = {
    'lstm2lstm' : create_lstm2lstm,
    'gru2gru' : create_gru2gru,
}
func_name = func_dict[model_name]
seq2seq_model = func_name(encoder_length, n_lstm, decoder_length)

# Choose Adam optimizer.
optimizer = tf.keras.optimizers.Adam(learnig_rate)

seq2seq_model.compile(loss='mse', metrics='mse', optimizer=optimizer)
seq2seq_model.summary()

callback_lists = []
# callback_lists.append(tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10))
callback_lists.append(tf.keras.callbacks.TensorBoard(log_dir="tensorboard/{}".format(settings), 
                                                     histogram_freq=0, 
                                                     batch_size=batch_size, 
                                                     write_graph=False, 
                                                     write_grads=False, 
                                                     write_images=False, 
                                                     embeddings_freq=0, 
                                                     embeddings_layer_names=None, 
                                                     embeddings_metadata=None, 
                                                     embeddings_data=None, 
                                                     update_freq='epoch'
                                                     ))
callback_lists.append(ModelCheckpoint("./checkpoints/{}".format(settings), model_name+'_{epoch:03d}.hdf5',
                                   verbose=1, save_weights_only=False, period=5))
# 开始训练
seq2seq_model.fit((train_x, np.zeros_like(train_y)),
# seq2seq_model.fit((train_x, train_y),
                   train_y,
                   batch_size=batch_size, 
                   validation_data=((test_x, np.zeros_like(test_y)), 
                   test_y), 
                   epochs=epochs,
                   callbacks=callback_lists)

# 保存模型
tf.saved_model.save(seq2seq_model, "saved_models/seq2seq/{}".format(settings))