# -*- coding: utf-8 -*-

"""
Created on 03/23/2022
train_test.
@author: Kang Xiatao (kangxiatao@gmail.com)
"""

import os

import torch
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
from utils.common_utils import PresetLRScheduler


def r2_score(y_true, y_pred):
    y_mean = torch.mean(y_true)
    total_sum_squares = torch.sum((y_true - y_mean)**2)
    residual_sum_squares = torch.sum((y_true - y_pred)**2)
    r2 = 1 - residual_sum_squares / total_sum_squares
    return r2


def train(net, loader, optimizer, criterion, epoch, writer):
    print('\nEpoch: %d' % epoch)
    net.train()
    train_loss = 0
    score = 0

    for batch_idx, (inputs, targets) in enumerate(loader):
        # inputs, targets = inputs.cuda(), targets.cuda()
        optimizer.zero_grad()
        outputs = net(inputs)
        loss = criterion(outputs, targets)
        loss.backward()
        optimizer.step()

        train_loss += loss.item()
        score += r2_score(outputs, targets)

    print('- Loss: %.3f | score: %.3f' % (train_loss/(batch_idx+1), score/(batch_idx+1)))
    print(inputs[0])
    print(targets[0])
    print(outputs[0])
    print(score)
    print(loss)

    # --- 把损失和测试精度写入tensorboard ---
    writer.add_scalar('train/loss', train_loss / (batch_idx + 1), epoch)
    writer.add_scalar('train/score', score / (batch_idx + 1), epoch)


def test(net, loader, criterion, epoch, writer):
    net.eval()
    test_loss = 0
    score = 0

    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(loader):
            # inputs, targets = inputs.cuda(), targets.cuda()
            outputs = net(inputs)
            loss = criterion(outputs, targets)

            test_loss += loss.item()
            # score += nn.functional.mse_loss(outputs, targets)
            score += r2_score(outputs, targets)
    print('Loss: %.3f | score: %.3f' % (test_loss/(batch_idx+1), score/(batch_idx+1)))
    # print(inputs[0])
    # print(targets[0])
    # print(outputs[0])
    # print(score)
    # print(loss)

    writer.add_scalar('test/loss', test_loss / (batch_idx + 1), epoch)
    writer.add_scalar('test/score', score / (batch_idx + 1), epoch)
    return score / (batch_idx + 1)


def train_once(model, trainloader, testloader, config, writer, logger, pretrain=None, lr_mode='cosine', optim_mode='SGD'):
    net = model
    learning_rate = config.learning_rate
    weight_decay = config.weight_decay
    num_epochs = config.epoch
    # --- 损失函数 ---
    # criterion = nn.CrossEntropyLoss()
    criterion = nn.MSELoss()
    # --- 优化器 ---
    if optim_mode == 'SGD':
        optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=weight_decay)
    else:
        optimizer = optim.Adam(net.parameters(), lr=learning_rate, weight_decay=weight_decay)
    # --- 学习率衰减模式 ---
    if lr_mode == 'cosine':
        lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)
    elif 'preset' in lr_mode:
        lr_schedule = {0: learning_rate,
                       int(num_epochs * 0.5): learning_rate * 0.1,
                       int(num_epochs * 0.75): learning_rate * 0.01}
        lr_scheduler = PresetLRScheduler(lr_schedule)
    else:
        print('===!!!=== Wrong learning rate decay setting! ===!!!===')
        exit()

    print_inf = ''
    best_epoch = 0
    if pretrain:
        best_acc = pretrain['acc']
        continue_epoch = pretrain['epoch']
    else:
        best_acc = 0
        continue_epoch = -1

    # --- 循环训练n次 ---
    for epoch in range(num_epochs):
        if epoch > continue_epoch:  # 其他时间电表空转

            # --- 训练和评估 ---
            train(net, trainloader, optimizer, criterion, epoch, writer)
            test_acc = test(net, testloader, criterion, epoch, writer)

            # --- 保存模型 ---
            if test_acc > best_acc and epoch > 10:
                print('Saving..')
                state = {
                    'net': net,
                    'acc': test_acc,
                    'epoch': epoch,
                    'args': config,
                }
                path = os.path.join(config.checkpoint_dir, 'train_%s_best.pth.tar' % config.exp_name)
                torch.save(state, path)
                best_acc = test_acc
                best_epoch = epoch

        # --- 更新学习率 ---
        if lr_mode == 'cosine':
            _lr = lr_scheduler.get_last_lr()
            lr_scheduler.step()
        else:
            _lr = lr_scheduler.get_lr(optimizer)
            lr_scheduler(optimizer, epoch)
        if writer:
            writer.add_scalar('train/lr', _lr, epoch)

    logger.info('best acc: %.4f, epoch: %d' % (best_acc, best_epoch))
    return 'best acc: %.4f, epoch: %d\n' % (best_acc, best_epoch), print_inf

