import torch
from torch.utils.data import DataLoader
import numpy as np
import csv
import os

from COVID19Dataset import COVID19Dataset
from NeuralNet import NeuralNet
from plot import plot_learning_curve, plot_pred

'''
Hints
Simple Baseline
    Run sample code
Medium Baseline
    Feature selection: 40 states + 2 tested_positive (TODO in dataset)
Strong Baseline
    Feature selection (what other features are useful?)
    DNN architecture (layers? dimension? activation function?)
    Training (mini-batch? optimizer? learning rate?)
    L2 regularization
    There are some mistakes in the sample code, can you find them?
'''


def get_device():
    """ Get device (if GPU is available, use GPU) """
    return 'cuda' if torch.cuda.is_available() else 'cpu'


def prep_dataloader(path, mode, batch_size, n_jobs=0, target_only=False):
    """
    A DataLoader loads data from a given Dataset into batches.
    """
    dataset = COVID19Dataset(path, mode=mode, target_only=target_only)  # Construct dataset
    dataloader = DataLoader(
        dataset, batch_size,
        shuffle=(mode == 'train'), drop_last=False,
        num_workers=n_jobs, pin_memory=True)  # Construct dataloader
    return dataloader


def train(tr_set, dv_set, model, config, device):
    """ DNN training """
    n_epochs = config['n_epochs']  # Maximum number of epochs

    # 从配置生成optimizer
    optimizer = getattr(torch.optim, config['optimizer'])(
        model.parameters(), **config['optim_hparas'])

    min_mse = 1000.
    loss_record = {'train': [], 'dev': []}  # for recording training loss
    early_stop_cnt = 0
    epoch = 0

    while epoch < n_epochs:
        model.train()  # set model to training mode
        for x, y in tr_set:  # iterate through the dataloader
            optimizer.zero_grad()  # set gradient to zero
            x, y = x.to(device), y.to(device)  # move data to device (cpu/cuda)
            pred = model(x)  # forward pass (compute output)
            mse_loss = model.cal_loss(pred, y)  # compute loss
            mse_loss.backward()  # compute gradient (backpropagation)
            optimizer.step()  # update model with optimizer
            loss_record['train'].append(mse_loss.detach().cpu().item())

        # After each epoch, test your model on the validation set.
        dev_mse = dev(dv_set, model, device)
        if dev_mse < min_mse:
            # Save model if your model improved
            min_mse = dev_mse
            print('Saving model (epoch = {:4d}, loss = {:.4f})'
                  .format(epoch + 1, min_mse))
            torch.save(model.state_dict(), config['save_path'])  # Save model to specified path
            early_stop_cnt = 0
        else:
            early_stop_cnt += 1

        epoch += 1
        loss_record['dev'].append(dev_mse)
        if early_stop_cnt > config['early_stop']:
            # Stop training if your model stops improving for "config['early_stop']" epochs.
            break

    print('Finished training after {} epochs'.format(epoch))
    return min_mse, loss_record


def dev(dv_set, model, device):
    """    Validation    """
    model.eval()  # set model to evalutation mode
    total_loss = 0
    for x, y in dv_set:  # iterate through the dataloader
        x, y = x.to(device), y.to(device)  # move data to device (cpu/cuda)
        with torch.no_grad():  # disable gradient calculation
            pred = model(x)  # forward pass (compute output)
            mse_loss = model.cal_loss(pred, y)  # compute loss
        total_loss += mse_loss.detach().cpu().item() * len(x)  # accumulate loss
    total_loss = total_loss / len(dv_set.dataset)  # compute averaged loss

    return total_loss


def test(tt_set, model, device):
    """Testing"""
    model.eval()  # set model to evalutation mode
    preds = []
    for x in tt_set:  # iterate through the dataloader
        x = x.to(device)  # move data to device (cpu/cuda)
        with torch.no_grad():  # disable gradient calculation
            pred = model(x)  # forward pass (compute output)
            preds.append(pred.detach().cpu())  # collect prediction
    preds = torch.cat(preds, dim=0).numpy()  # concatenate all predictions and convert to a numpy array
    return preds


def save_pred(preds, file):
    """ Save predictions to specified file """
    print('Saving results to {}'.format(file))
    with open(file, 'w') as fp:
        writer = csv.writer(fp)
        writer.writerow(['id', 'tested_positive'])
        for i, p in enumerate(preds):
            writer.writerow([i, p])


"""
baseline 0.7575
SGD换成Adam，达到0.6920
取20分之1的数据作为验证集，到达了0.6891
只使用两个target作为训练数据，loss为0.9
使用更复杂的网络，loss会更低，5层0.6920
六层比较难训练，5层（0.6920）和4层（0.6907）差不多，对这个问题4层就够了
通过optimizer的参数weight_decay来设置L2正则化，使参数平均化
batchsize选270最好，大了小了都效果不好
"""
if __name__ == '__main__':
    myseed = 42069  # set a random seed for reproducibility
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    np.random.seed(myseed)
    torch.manual_seed(myseed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(myseed)
        print('cuda')
    '''
    Setup Hyper-parameters
    config contains hyper-parameters for training and the path to save your model.
    '''
    device = get_device()  # get the current available device ('cpu' or 'cuda')
    os.makedirs('models', exist_ok=True)  # The trained model will be saved to ./models/
    target_only = False  # Using 40 states & 2 tested_positive features

    # How to tune these hyper-parameters to improve your model's performance?
    config_sgd = {
        'n_epochs': 3000,  # maximum number of epochs
        'batch_size': 270,  # mini-batch size for dataloader
        'optimizer': 'SGD',  # optimization algorithm (optimizer in torch.optim)
        'optim_hparas': {  # hyper-parameters for the optimizer (depends on which optimizer you are using)
            'lr': 0.001,  # learning rate of SGD
            'momentum': 0.9  # momentum for SGD
        },
        'early_stop': 200,  # early stopping epochs (the number epochs since your model's last improvement)
        'save_path': 'models/model.pth'  # your model will be saved here
    }

    config = {
        'n_epochs': 30000,  # maximum number of epochs
        'batch_size': 270,  # mini-batch size for dataloader
        'optimizer': 'Adam',  # optimization algorithm (optimizer in torch.optim)
        'optim_hparas': {  # hyper-parameters for the optimizer (depends on which optimizer you are using)
            'lr': 0.01,  # learning rate
            #'weight_decay': 0.001
        },
        'early_stop': 300,  # early stopping epochs (the number epochs since your model's last improvement)
        'save_path': 'models/model_adam.pth'  # your model will be saved here
    }

    '''Load data and model'''
    tr_path = 'covid.train.csv'  # path to training data
    tt_path = 'covid.test.csv'  # path to testing data
    tr_set = prep_dataloader(tr_path, 'train', config['batch_size'], target_only=target_only)
    dv_set = prep_dataloader(tr_path, 'dev', config['batch_size'], target_only=target_only)
    tt_set = prep_dataloader(tt_path, 'test', config['batch_size'], target_only=target_only)

    '''Start Training!'''
    model = NeuralNet(tr_set.dataset.dim).to(device)  # Construct model and move to device
    model_loss, model_loss_record = train(tr_set, dv_set, model, config, device)
    plot_learning_curve(model_loss_record, title='deep model')

    del model
    model = NeuralNet(tr_set.dataset.dim).to(device)
    ckpt = torch.load(config['save_path'], map_location='cpu')  # Load your best model
    model.load_state_dict(ckpt)
    plot_pred(dv_set, model, device)  # Show prediction on the validation set

    '''
    Testing
    The predictions of your model on testing set will be stored at pred.csv
    '''
    preds = test(tt_set, model, device)  # predict COVID-19 cases with your model
    save_pred(preds, 'pred.csv')  # save prediction file to pred.csv
