# -*- coding: utf-8 -*-

"""
Created on 03/23/2022
train_test.
@author: Kang Xiatao (kangxiatao@gmail.com)
"""

import os
from datetime import datetime

import torch
import torch.nn as nn
import torch.optim as optim
import torch.distributed as dist
from tqdm import tqdm
from utils.common_utils import PresetLRScheduler
from utils.data_utils import get_dataloader


def train(gpu, model, loader, optimizer, criterion, epoch, writer):
    
    if gpu == 0: print('\nEpoch: %d   - %s' % (epoch, str(datetime.now())))

    model.train()
    train_loss = 0
    correct = 0
    total = 0

    for batch_idx, (inputs, targets) in enumerate(loader):
        inputs, targets = inputs.cuda(non_blocking=True), targets.cuda(non_blocking=True)
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, targets)
        loss.backward()
        optimizer.step()

        train_loss += loss.item()
        _, predicted = outputs.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()

    if gpu == 0:
        print('- Loss: %.3f | Acc: %.3f%% (%d/%d)' % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
        if writer:
            writer.add_scalar('train/loss', train_loss / (batch_idx + 1), epoch)
            writer.add_scalar('train/acc', 100. * correct / total, epoch)


def test(model, loader, criterion, epoch, writer):
    model.eval()
    test_loss = 0
    correct = 0
    total = 0

    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(loader):
            inputs, targets = inputs.cuda(non_blocking=True), targets.cuda(non_blocking=True)
            outputs = model(inputs)
            loss = criterion(outputs, targets)

            test_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()

    acc = 100. * correct / total
    print('Loss: %.3f | Acc: %.3f%% (%d/%d)' % (test_loss/(batch_idx+1), acc, correct, total))
    if writer:
        writer.add_scalar('test/loss', test_loss / (batch_idx + 1), epoch)
        writer.add_scalar('test/acc', 100. * correct / total, epoch)

    return acc


def train_once(gpu, net, config, writer, logger, pretrain=None, lr_mode='cosine', optim_mode='SGD'):

    # 分布式配置
    rank = config.nr * config.gpus + gpu	                          
    dist.init_process_group(                                   
    	backend='nccl',                                         
   		init_method='env://',                                   
    	world_size=config.world_size,                              
    	rank=rank                                               
    )

    learning_rate = config.learning_rate
    weight_decay = config.weight_decay
    num_epochs = config.epoch

    torch.manual_seed(0)
    torch.cuda.set_device(gpu)

    # model = net.cuda(gpu)
    model = nn.parallel.DistributedDataParallel(net, device_ids=[gpu])
    # model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)

    trainloader, testloader = get_dataloader(config.dataset, config.batch_size, 256, config.world_size, rank, root=config.dp)

    criterion = nn.CrossEntropyLoss().cuda(gpu)
    if optim_mode == 'SGD':
        optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9, weight_decay=weight_decay)
    else:
        optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
    if lr_mode == 'cosine':
        # lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs, eta_min=0.001)
        lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)
    elif 'preset' in lr_mode:
        lr_schedule = {0: learning_rate,
                       int(num_epochs * 0.5): learning_rate * 0.1,
                       int(num_epochs * 0.75): learning_rate * 0.01}
        lr_scheduler = PresetLRScheduler(lr_schedule)
    else:
        print('===!!!=== Wrong learning rate decay setting! ===!!!===')
        exit()

    print_inf = ''
    best_epoch = 0
    if pretrain:
        best_acc = pretrain['acc']
        continue_epoch = pretrain['epoch']
    else:
        best_acc = 0
        continue_epoch = -1
    for epoch in range(num_epochs):
        if epoch > continue_epoch:  # 其他时间电表空转
            train(gpu, model, trainloader, optimizer, criterion, epoch, writer)
            
            if gpu == 0:
                test_acc = test(model, testloader, criterion, epoch, writer)
                if test_acc > best_acc and epoch > 10:
                    print('Saving..')
                    state = {
                        'net': model,
                        'acc': test_acc,
                        'epoch': epoch,
                        'args': config
                    }
                    path = os.path.join(config.checkpoint_dir, 'train_%s_best.pth.tar' % config.exp_name)
                    torch.save(state, path)
                    best_acc = test_acc
                    best_epoch = epoch

        if lr_mode == 'cosine':
            lr_scheduler.step()
        else:
            lr_scheduler(optimizer, epoch)

    if gpu == 0: logger.info('best acc: %.4f, epoch: %d' % (best_acc, best_epoch))
    return 'best acc: %.4f, epoch: %d\n' % (best_acc, best_epoch), print_inf

