import sys
import os
import logging

import torch.nn

sys.path.append(os.getcwd())
logging.basicConfig(level=logging.INFO)

import time
import random
import matplotlib.pyplot as plt
from torch.utils.data import TensorDataset
from torch.optim.lr_scheduler import ReduceLROnPlateau
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from config import Variables, RemainTimePredParameters
from utils import *
from network.remain_time_pred_network import SingleLSTMNet, NNet
from pytorchtools import EarlyStopping


def train(xtrain, ytrain, xvalid, yvalid, hidden_d, layers, dropout, learning_rate, n_epoch, pic_name, batch_size,
          device, save_path, patience=50):
    def setup_seed(seed):
        torch.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)
        np.random.seed(seed)
        random.seed(seed)
        torch.backends.cudnn.deterministic = True

    def plot_loss(train_losses, valid_losses):
        plt.figure(figsize=(20, 10))
        plt.plot(train_losses, 'b', label='train_loss')
        plt.plot(valid_losses, 'r', label='valid_loss')
        plt.legend()
        # plt.show()
        plt.savefig(save_path + "/" + pic_name + '.jpg')

    train_dataset = TensorDataset(xtrain, ytrain)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)

    setup_seed(0)
    # model = SingleLSTMNet(input_dim=xtrain.shape[-1], hidden_dim=hidden_d, n_layer=layers, drop_out=dropout).to(device)
    model = NNet(input_dim=xtrain.shape[-1], hidden_dim=hidden_d, n_layer=layers, drop_out=dropout).to(device)
    criterion = torch.nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.5, eps=1e-4)
    early_stopping = EarlyStopping(patience=patience, verbose=True, path=save_path+"/"+pic_name+"_checkpoint.pt")

    train_loss = []
    valid_loss = []
    for epoch in range(n_epoch):
        train_loss_tmp = 0
        cnt = 0
        for step, (batch_x, batch_y) in enumerate(train_loader):
            prediction = model(batch_x)
            loss = criterion(prediction, batch_y)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            train_loss_tmp += loss.data
            cnt += 1
        train_loss.append(train_loss_tmp / cnt)

        model.eval()
        valid_output = model(xvalid)
        valid_loss_data = criterion(valid_output, yvalid)
        scheduler.step(valid_loss_data)
        valid_loss.append(valid_loss_data.data)
        print('EPOCH: %d, TRAINING LOSS: %f, VALIDATION LOSS: %f' % (epoch, train_loss_tmp / cnt, valid_loss_data))
        early_stopping(valid_loss_data, model)
        if early_stopping.early_stop:
            print('Early stopped.')
            break
        model.train()
    plot_loss(train_loss, valid_loss)
    return model


def model_metrics(model, x, ytrue, device):
    if device == 'cpu':
        tmp = model(x).detach().numpy()
    else:
        tmp = model(x).cpu().detach().numpy()
        ytrue = ytrue.cpu()
    predy = np.reshape(tmp, (tmp.shape[0],))
    result = 'MSE: %f, MAE: %f.' % (mean_squared_error(y_true=ytrue, y_pred=predy),
                                    mean_absolute_error(y_true=ytrue, y_pred=predy))
    print(result)
    return result


def write_results(argv, train_info, valid_info, test_info, save_path):
    N_HIDDENS, N_LAYERS, DROPOUT, LEARNING_RATE, N_EPOCHS, BATCH_SIZE, FLAG, PATIENCE = \
        argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]
    f = open(save_path + "/" + FLAG + '.txt', 'w', encoding='utf-8')
    f.write('n_hidden=%s, n_layers=%s, dropout=%s, learning_rate=%s, epochs=%s, batch_size=%s, patience=%s.\n' %
            (N_HIDDENS, N_LAYERS, DROPOUT, LEARNING_RATE, N_EPOCHS, BATCH_SIZE, PATIENCE))
    f.write('\nTraining Result:\n')
    f.write(train_info + '\n')
    f.write('\nValidation Result:\n')
    f.write(valid_info + '\n')
    f.write('\nTest Result:\n')
    f.write(test_info + '\n')
    f.close()


def main(argv):
    start_time = time.time()

    # 加载数据
    parameter_space = RemainTimePredParameters(dataset_name="bpic2012")  # TODO: 需修改
    train_data_path = parameter_space.TRAIN_DATA_PATH
    test_data_path = parameter_space.TEST_DATA_PATH
    xtrain, ytrain, xvalid, yvalid, xtest, ytest = load_np_data(train_data_path, test_data_path)
    print('Data loaded. Time spent is %f. ' % (time.time() - start_time))
    print('Xtrain data shape = ', xtrain.shape, ', ytrain data shape = ', ytrain.shape, '.')
    print('Xvalidation data shape = ', xvalid.shape, ', yvalidation data shape = ', yvalid.shape, '.')
    print('Xtest data shape = ', xtest.shape, ', ytest data shape = ', ytest.shape, '.\n')

    # 训练
    if not os.path.exists(parameter_space.RESULT_PATH):
        os.makedirs(parameter_space.RESULT_PATH)
    N_HIDDENS, N_LAYERS, DROPOUT, LEARNING_RATE, N_EPOCHS, BATCH_SIZE, FLAG, PATIENCE = \
        int(argv[0]), int(argv[1]), float(argv[2]), float(argv[3]), int(argv[4]), int(argv[5]), argv[6], int(argv[7])
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print('Device is %s.' % device)
    xtrain = xtrain.to(device)
    ytrain = ytrain.to(device)
    xvalid = xvalid.to(device)
    yvalid = yvalid.to(device)
    xtest = xtest.to(device)
    ytest = ytest.to(device)
    model = train(xtrain, ytrain, xvalid, yvalid, hidden_d=N_HIDDENS, layers=N_LAYERS,
                  dropout=DROPOUT, learning_rate=LEARNING_RATE, n_epoch=N_EPOCHS, pic_name=FLAG,
                  batch_size=BATCH_SIZE, device=device, save_path=parameter_space.RESULT_PATH, patience=PATIENCE)
    print('Finished model training. Time is %f.' % (time.time() - start_time))

    # 衡量指标
    train_info = model_metrics(model, xtrain, ytrain, device)
    valid_info = model_metrics(model, xvalid, yvalid, device)
    test_info = model_metrics(model, xtest, ytest, device)
    write_results(argv, train_info, valid_info, test_info, parameter_space.RESULT_PATH)
    print('Ended. Time is %f.' % (time.time() - start_time))


if __name__ == '__main__':
    # 命令行参数
    # main(sys.argv[1:])

    # 自定义参数
    argv = ['6', '1', '0', '0.0003', '50', '2048', '0', '10']
    main(argv)
