# provide training process
# author-by: xjtu-blacksmith
# create-on: 2020.2.22

import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt 
import time
from os import path

import config
from utils.tool import timeSince, filePrint
from pretrain import build_data
from val import eval_model

start = time.time()
btch_size = config.BATCH_SIZE
log_rate = config.LOG_RATE

def train_net(net, lr=.1, epoch_num=100, msg=None, resume=None):

    def prepare_data():
        'build training data from datasets'
        train_set, train_loader = build_data('train', btch_size)
        return train_set, train_loader
    
    def resume_train():
        'resume from previous training'
        pass

    def save_files(mode='model', data=None, name=None):
        'save state_dict of model or stats npy files'

        if name == None:
            name = mode + '-unnamed'
        else:
            name = mode + '-' + name

        if mode == 'model':
            name += '.pth'
            torch.save(data, path.join(config.OUTPUT_PATH, name))
        else:
            name += '.npy'
            np.save(path.join(config.OUTPUT_PATH, name), data)

        # console message
        print('`%s` succesfully saved at %s.' % (name, config.OUTPUT_PATH))

    log_path = config.LOG_PATH
    loss_data, acc_data = [[], []], [[], [], []]  # create stats
    epoch_acc = [.0]
    best_acc = .0
    stagnation_count = 0
    data_modes = 'model', 'loss_data', 'acc_data', 'epoch_acc'

    if msg is None:
        msg = 'unnamed'

    with open(log_path, 'w+') as f:
        
        net.cuda()  # move the model to gpu

        # basic information
        filePrint('\n\n' + '-' * 80, file=f)  # divider
        filePrint('Training on %s is exciting!' % config.DATASET_NAME, file=f)
        filePrint('Model name: %s' % msg.capitalize(), file=f)
        time_msg = time.strftime("%H:%M:%S (%Y %b %d, %a) ", time.localtime())
        filePrint('Current LOCAL time: ' + time_msg, file=f)

        # prepare data and resume training
        train_set, train_loader = prepare_data()
        if resume is not None:
            resume_train()
        filePrint('Loading data completed (%s)' % timeSince(start), file=f)

        # start training
        filePrint('Training start (%s)' % timeSince(start), file=f)
        criterion = nn.CrossEntropyLoss()  # loss function

        # in epoches
        for epoch in range(epoch_num):

            # optimizer = optim.SGD(
            #     net.parameters(), lr=lr,
            #     momentum=0.9, weight_decay=.0005)  # sgd optimizer
            optimizer = optim.Adam(net.parameters(), lr=lr)  # adam optimizer

            running_loss, acc = [.0] * 2  # init stats
            filePrint('Epoch [%3d / %3d] start, lr=%.5f (%s)'
                      % (epoch + 1, epoch_num, lr, timeSince(start)), file=f)

            for i, data in enumerate(train_loader, 0):

                # standard training process
                inputs, labels = data
                inputs, labels = inputs.cuda(), labels.cuda()
                optimizer.zero_grad()
                outputs = net(inputs)
                loss = criterion(outputs, labels)
                loss.backward()
                optimizer.step()

                # calculate the loss and acc
                running_loss += float(loss.item()) / log_rate
                _, predicted = torch.max(outputs.data, 1)
                acc += (predicted == labels).sum().cpu().numpy()

                # at log point
                if i % log_rate == log_rate - 1:
                    
                    # get total sample num.
                    count = (i + 1) * btch_size + epoch * config.TRAIN_NUM

                    # on x-axis
                    loss_data[0].append(count)
                    acc_data[0].append(count)  

                    # on y-axis
                    loss_data[1].append(running_loss)
                    acc /= (log_rate * btch_size)
                    acc_data[1].append(acc)
                    
                    # printing stats
                    filePrint('[%3d, %5d] loss: %.3f, train accuracy: %.2f%% (%s)'
                              % (epoch + 1, (i + 1) * btch_size, running_loss,
                                acc * 100, timeSince(start)), file=f)
                    running_loss, acc = [.0] * 2

            # check epoch accuracy
            current_epoch_acc = eval_model(net, model_path='').cpu().numpy()
            filePrint('Epoch [%3d / %3d] complete, validation accuracy: %.2f%% (%s)'
                      % (epoch + 1, epoch_num, current_epoch_acc * 100,
                         timeSince(start)), file=f)
            epoch_acc.append(current_epoch_acc)  # update epoch accuracy

            if current_epoch_acc > best_acc:  # save best acc model
                print('Best validation accuracy achieved.')  # only in console
                save_files(mode='model', data=net.state_dict(),
                           name=msg + '-best_acc')
                best_acc = current_epoch_acc  # update

            if current_epoch_acc <= epoch_acc[-2]:  # compared with previous data
                filePrint('Validation accuracy is in stagnation.' +
                          'Learning rate decays by 5.', file=f)
                lr /= 5  # lr decay
                stagnation_count += 1
                if stagnation_count > 3:  # stagnated for three epochs
                    filePrint('Stagnated for three epochs,' +
                              'training halted.', file=f)
                    break  # stop training
        
        # save files
        data_tuple = net.state_dict(), loss_data, acc_data, epoch_acc
        for data_mode, data in zip(data_modes, data_tuple):
            save_files(mode=data_mode, data=data, name=msg)

        filePrint('Finished Training (%s)' % timeSince(start), file=f)  # say goodbye

    print('Training process completed.')  # only show in console