# data_loader.py
import os
import logging
import time
import numpy as np
import torch
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from models.vit import VisionTransformer
from torch.utils.data import DataLoader, Dataset
from torch.distributed import get_rank
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DistributedSampler
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import KLDivLoss

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

bce_loss = nn.BCELoss().to(device)
bce_loss_sum = nn.BCELoss(reduction='sum').to(device)
softmax = nn.Softmax(dim=1).to(device)

klloss = KLDivLoss(reduction='mean').to(device)

def to_onehot(y, class_num=100):
    batch_size = y.size()[0]
    new_y = torch.zeros((batch_size, class_num))
    for i in range(batch_size):
        new_y[i,y[i]]=1
    return new_y

def mixup_data(x, y, alpha=1.0, lam=None, index=None, device='cuda:0'):
    '''Compute the mixup data. Return mixed inputs, pairs of targets, and lambda'''
    batch_size = x.size()[0]
    if lam is None:
        lam = Variable(torch.FloatTensor(np.random.beta(alpha, alpha, batch_size)),requires_grad=True).to(device)
    if index==None:
        index = torch.randperm(batch_size).to(device)

    mixed_x = lam.view(-1,1,1,1) * x + (1 - lam).view(-1,1,1,1) * x[index]
    y_a, y_b = y, y[index]
    mixed_y = lam.view(-1,1) * y_a + (1-lam).view(-1,1) * y_b
    return mixed_x, mixed_y, lam, index

torch.distributed.init_process_group(backend='nccl')
if torch.cuda.device_count() > 1:
    print("Let's use", torch.cuda.device_count(), "GPUs!")

local_rank = get_rank()
torch.cuda.set_device(local_rank)
device = torch.device("cuda", local_rank)

# train config
n_epochs = 20
lr = 5e-5

# logger config

logger_cfg = dict(
    filename="log.txt",
    format='%(asctime)s - %(message)s',
    filemode='w',
)

logging.basicConfig(**logger_cfg)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)

# imagenet loader
def data_loader(root, batch_size=256, workers=1, pin_memory=True):
    traindir = os.path.join(root, 'train')
    valdir = os.path.join(root, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_dataset = datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            # transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize
        ])
    )
    val_dataset = datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize
        ])
    )

    train_sampler = DistributedSampler(train_dataset)
    val_sampler = DistributedSampler(val_dataset)

    train_loader = DataLoader(train_dataset,
                                batch_size=batch_size,
                                sampler=train_sampler,
                                shuffle=False,
                                num_workers=workers
                                )
    val_loader = DataLoader(val_dataset,
                            batch_size=batch_size,
                            sampler=val_sampler,
                            shuffle=False,
                            num_workers=workers
                            )
    
    return train_loader, val_loader

def reduce_tenosr(tensor):
    tensor = tensor.clone()
    torch.distributed.all_reduce(tensor, op=torch.distributed.ReduceOp.SUM)
    
    return tensor

def train(epoch,model,data_loader,optimizer,criterion,scheduler, step_size=1e-2):

    model.train()
    train_loss = 0
    top1_acc = 0.0
    top5_acc = 0.0

    for step, (inputs, labels) in enumerate(data_loader):

        start_time = time.time()

        inputs, labels = inputs.to(device), labels.to(device)

        labels = to_onehot(labels, class_num=1000).to(device)

        # 1st synthesis
        inputs_, targets_, lam, index = mixup_data(inputs, labels, alpha=1.0, device=local_rank)

        outputs = model(inputs_)
        target_logits = outputs.detach().clone()

        loss = bce_loss(softmax(outputs), targets_.detach())
        lam.retain_grad()
        loss.backward(retain_graph=True)
        # update the lambda
        lam = Variable(lam.clone().detach()+
                       step_size*lam.grad.clone().detach(), # step_size * grad_of_lambda
                       requires_grad=False)
        lam= torch.clamp(lam,0,1)
        # clear the grads
        optimizer.zero_grad()
        # 2nd synthesis
        inputs_, targets_, lam, _ = mixup_data(inputs, labels, lam=lam, index = index, device = local_rank)
        outputs = model(inputs_)
        loss = bce_loss_sum(softmax(outputs), targets_.detach())

        loss += 0.02 * klloss(torch.nn.functional.log_softmax(outputs), torch.nn.functional.softmax(target_logits))

        loss.backward()
        optimizer.step()
        optimizer.zero_grad()
        train_loss += loss.item()

        scheduler.step()

        end_time = time.time()

        # _, top1_preds = outputs.max(1)
        # top1_acc += reduce_tenosr(top1_preds.eq(labels).sum()).item()

        # top5_pred = outputs.topk(5, 1, True, True)[1]
        # top5_acc += reduce_tenosr(top5_pred.eq(labels.view(-1, 1).expand_as(top5_pred).to(device)).sum()).item()

        if local_rank==0:
            logger.info(
                    "Epoch [{epoch}/{max_epoch}], Step [{step}/{iter_num}], Loss: {loss:.4f}, lr: {lr:.7f}, time: {time}\n".format(
                        epoch=epoch,
                        max_epoch=n_epochs,
                        step=step+1,
                        iter_num=len(data_loader),
                        loss=loss.item(),
                        lr=optimizer.param_groups[0]['lr'],
                        time=end_time - start_time)
            )
    # if local_rank==0:
    #     logger.info(
    #         "Epoch [{epoch}/{max_epoch}], train_top1_acc [{top1_acc}], train_top5_acc [{top5_acc}] \n".format(
    #             epoch=epoch,
    #             max_epoch=n_epochs,
    #             top1_acc=top1_acc/len(data_loader.dataset),
    #             top5_acc=top5_acc/len(data_loader.dataset),
    #         )

    # )


@torch.no_grad()
def test(model,data_loader,criterion):

    model.eval()
    top1_acc = 0.0
    top5_acc = 0.0

    for step, (inputs, labels) in enumerate(data_loader):

        inputs, labels = inputs.to(device), labels.to(device)
        outputs = model(inputs)
        # loss = criterion(outputs, labels)

        _, top1_preds = outputs.max(1)

        top1_acc += reduce_tenosr(top1_preds.eq(labels).sum()).item()

        top5_pred = outputs.topk(5, 1, True)[1]
        top5_acc += reduce_tenosr(top5_pred.eq(labels.view(-1, 1).expand_as(top5_pred).to(device)).sum()).item()


    if local_rank==0:
        logger.info(
                "test_top1_acc [{top1_acc}], test_top5_acc [{top5_acc}] \n".format(
                top1_acc=top1_acc/len(data_loader.dataset),
                top5_acc=top5_acc/len(data_loader.dataset),
            )

    )


def main():

    # args.data = './imagenet'
    print(f"init data_loader")

    data_path = "/mnt/datasets/imagenet2012/"

    train_loader, val_loader = data_loader(data_path, batch_size=64, workers=8, pin_memory=True)

    print(f"train_loader size is {len(train_loader)}")
    print(f"val_loader size is {len(val_loader)}")

    model = VisionTransformer(img_size=224,
                              patch_size=16,
                              in_chans=3,
                              embed_dim=768,
                              depth=12,
                              num_heads=12,
                              num_classes=1000)

    print("init model weights")
    model.load_pretrained("./ViT-B_16.npz")

    model = model.to(device)
    model = DistributedDataParallel(model,device_ids=[local_rank])
    print("current GPU",{local_rank},"\n")

    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)

    total_iter = n_epochs*len(train_loader)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, total_iter)

    print("Mix UP training")
    # fintuning model
    for epoch in range(1,n_epochs+1):
        train(epoch,model,train_loader,optimizer,criterion,scheduler)
        test(model,val_loader,criterion)
        if epoch%2==0:
            if local_rank==0:
                torch.save(model.state_dict(), "./epoch_{}.pth".format(epoch))

if __name__ == '__main__':

    main()