import math
import csv

import torch
import numpy
from torch.utils.data import random_split

import matplotlib.pyplot as plt
from matplotlib.pyplot import figure


# 训练可复现
def same_seed(seed):
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    numpy.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(seed)


# 划分训练集和验证集
def train_valid_split(data_set, valid_ratio, seed):
    valid_set_size = int(valid_ratio * len(data_set))
    train_set_size = len(data_set) - valid_set_size
    train_set, valid_set = random_split(data_set, [train_set_size, valid_set_size],
                                        generator=torch.Generator().manual_seed(seed))
    return numpy.array(train_set), numpy.array(valid_set)


# train分离特征和标签
def train_feats_labels_split(data, is_test=False, select_all=True):
    if is_test:
        feat = data[:, :]
        label = None
    else:
        feat = data[:, :-1]
        label = data[:, -1]

    if not select_all:
        feat_idx = list(
            numpy.array([40, 41, 42, 45, 46, 47, 48, 49, 50, 52, 55, 57]))  # 1 day: COVIDPositive
        feat_idx += list(
            numpy.array([40, 41, 42, 45, 46, 47, 48, 49, 50, 52, 55, 57]) + 18)  # 2 day: COVIDPositive
        feat_idx += list(numpy.array([40, 41, 42, 45, 46, 47, 48, 49, 50, 52, 55]) + 36)  # 3 day: COVID
        feat_idx.sort()
        return feat[:, feat_idx], label

    return feat, label


# Plot learning curve
def plot_learning_curve(loss_record, title=''):
    """ Plot learning curve of your DNN (train & valid loss) """
    total_steps = len(loss_record['train'])
    x_1 = range(total_steps)
    x_2 = x_1[::len(loss_record['train']) // len(loss_record['valid'])]  # 生成一个与验证集损失记录长度相匹配的 x 轴坐标列表 x_2
    # len(loss_record['train']) // len(loss_record['valid'])是采样步长
    figure(figsize=(6, 4))
    plt.plot(x_1, loss_record['train'], c='tab:red', label='train')
    plt.plot(x_2, loss_record['valid'], c='tab:cyan', label='valid')
    plt.ylim(0.0, 5.)
    plt.xlabel('Training steps')
    plt.ylabel('MSE loss')
    plt.title('Learning curve of {}'.format(title))
    plt.legend()
    plt.show()


def plot_pred(dv_set, model, device, lim=35., preds=None, targets=None):
    """ Plot prediction of your DNN """
    if preds is None or targets is None:
        model.eval()
        preds, targets = [], []
        for x, y in dv_set:
            x, y = x.to(device), y.to(device)
            with torch.no_grad():
                pred = model(x)
                preds.append(pred.detach())
                targets.append(y.detach())
        preds = torch.cat(preds, dim=0).numpy()
        targets = torch.cat(targets, dim=0).numpy()

    figure(figsize=(5, 5))
    plt.scatter(targets, preds, c='r', alpha=0.5)
    plt.plot([-0.2, lim], [-0.2, lim], c='b')
    plt.xlim(-0.2, lim)
    plt.ylim(-0.2, lim)
    plt.xlabel('ground truth value')
    plt.ylabel('predicted value')
    plt.title('Ground Truth v.s. Prediction')
    plt.show()


# 训练
def trainer(train_set, valid_set, model, config):
    # 设置优化算法
    # optimizer = torch.optim.SGD(model.parameters(), lr=config['lr'], weight_decay=1e-5, momentum=config['momentum'])
    optimizer = torch.optim.AdamW(model.parameters(), lr=config['lr'])
    # 设置epoch次数 最优loss update次数  loss没有变化次数
    epochs, best_loss, step, early_stop_count = config['epochs'], math.inf, 0, 0
    loss_record = {'train': [], 'valid': []}
    # 开始训练
    for epoch in range(epochs):
        model.train()
        for feat, label in train_set:
            optimizer.zero_grad()
            feat, label = feat.to(config['device']), label.to(config['device'])
            pred = model(feat)
            loss = model.cal_loss(pred, label)
            loss.backward()
            optimizer.step()
            loss_record['train'].append(loss.item())
        # 将模型切换到评估模式
        model.eval()
        total_loss = 0
        # 验证
        for feat, label in valid_set:
            feat, label = feat.to(config['device']), label.to(config['device'])
            # 关闭梯度计算
            with torch.no_grad():
                pred = model(feat)
                mse_loss = model.cal_loss(pred, label)
            total_loss += mse_loss.detach().item() * len(feat)
        valid_mse = total_loss / len(valid_set.dataset)  # compute averaged loss
        if valid_mse < best_loss:
            # 如果loss降低
            best_loss = valid_mse
            print('Saving model (epoch = {:4d}, loss = {:.4f})'
                  .format(epoch + 1, best_loss))
            torch.save(model.state_dict(), config['save_model_path'])  # Save model to specified path
            early_stop_count = 0
        else:

            early_stop_count += 1

        loss_record['valid'].append(valid_mse)
        if early_stop_count > config['early_stop']:
            # 如果loss超过early_stop次没有减小退出计算
            break
    return best_loss, loss_record


# 预测
def tester(tt_set, model, device):
    model.eval()
    pred_list = []
    for x in tt_set:
        x = x.to(device)
        with torch.no_grad():
            pred = model(x)
            pred_list.append(pred.detach().cpu())
    pred_list = torch.cat(pred_list, dim=0).numpy()
    return pred_list


# 保存生成csv文件 提交作业网站 https://www.kaggle.com/competitions/ml2021spring-hw1/submissions
def save_pred(pred_data, file):
    """ Save predictions to specified file """
    print('Saving results to {}'.format(file))
    with open(file, 'w') as fp:
        writer = csv.writer(fp)
        writer.writerow(['id', 'tested_positive'])
        for i, p in enumerate(pred_data):
            writer.writerow([i, p])
