# data_loader.py
import os
import logging
import time
import numpy as np
from tracemalloc import start

import torch
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from models.vit import VisionTransformer
from flagai.auto_model.auto_loader import AutoLoader
from torchvision.datasets import CIFAR100
from torch.utils.data import DataLoader, Dataset
from timm.models.vision_transformer import vit_base_patch16_224_in21k as create_model  
from timm.models.vision_transformer import _load_weights

from torch.distributed import get_rank
import random 

from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DistributedSampler
import os
import torch
import numpy as np
from torchvision import datasets, transforms
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.data import Mixup
from timm.data import create_transform
# 
# os.environ["CUDA_VISIBLE_DEVICES"] = "1,3"


try:
    from torchvision.transforms import InterpolationMode
    def _pil_interp(method):
        if method == 'bicubic':
            return InterpolationMode.BICUBIC
        elif method == 'lanczos':
            return InterpolationMode.LANCZOS
        elif method == 'hamming':
            return InterpolationMode.HAMMING
        else:
            # default bilinear, do we want to allow nearest?
            return InterpolationMode.BILINEAR
    import timm.data.transforms as timm_transforms
    timm_transforms._pil_interp = _pil_interp
except:
    from timm.data.transforms import _pil_interp


def random_seed(seed=42, rank=0):
    # set seeds
    torch.manual_seed(seed + rank)
    np.random.seed(seed + rank)
    random.seed(seed + rank)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

torch.distributed.init_process_group(backend='nccl')
if torch.cuda.device_count() > 1:
    print("Let's use", torch.cuda.device_count(), "GPUs!")

local_rank = get_rank()
torch.cuda.set_device(local_rank)
device = torch.device("cuda", local_rank)


# train config
n_epochs = 20
lr = 5e-5

# logger config

logger_cfg = dict(
    filename="log_flagai_vit_base_16.txt",
    format='%(asctime)s - %(message)s',
    filemode='w',
)

logging.basicConfig(**logger_cfg)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)

image_size = 224

def build_transform(is_train):
    if is_train:
        # this should always dispatch to transforms_imagenet_train
        transform = create_transform(
            input_size=image_size,
            is_training=True,
            color_jitter=0.4,
            auto_augment="rand-m9-mstd0.5-inc1",
            re_prob=0.25,
            re_mode="pixel",
            re_count=1.0,
            interpolation="bicubic",
        )
       
        return transform

    t = []
    size = int((256 / 224) * image_size)
    t.append(
        transforms.Resize(size, interpolation=_pil_interp("bicubic")),
    )
    t.append(transforms.CenterCrop(image_size))

    t.append(transforms.ToTensor())
    t.append(transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD))
    return transforms.Compose(t)

# imagenet loader
def data_loader(root, batch_size=256, workers=1, pin_memory=True):
    traindir = os.path.join(root, 'train')
    valdir = os.path.join(root, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])


    train_transform = build_transform(is_train=True)
    val_transform = build_transform(is_train=False)

    train_dataset = datasets.ImageFolder(
        traindir,
        train_transform,
        # transforms.Compose([
        #     transforms.RandomResizedCrop(224),
        #     # transforms.RandomHorizontalFlip(),
        #     transforms.ToTensor(),
        #     normalize
        # ])
    )
    val_dataset = datasets.ImageFolder(
        valdir,
        val_transform,
        # transforms.Compose([
        #     transforms.Resize(256),
        #     transforms.CenterCrop(224),
        #     transforms.ToTensor(),
        #     normalize
        # ])
    )


    train_sampler = DistributedSampler(train_dataset)
    val_sampler = DistributedSampler(val_dataset)

    train_loader = DataLoader(train_dataset,
                                batch_size=batch_size,
                                sampler=train_sampler,
                                shuffle=False,
                                num_workers=workers
                                )
    val_loader = DataLoader(val_dataset,
                            batch_size=batch_size,
                            sampler=val_sampler,
                            shuffle=False,
                            num_workers=workers
                            )
    
    return train_loader, val_loader


def reduce_tenosr(tensor):
    tensor = tensor.clone()
    torch.distributed.all_reduce(tensor, op=torch.distributed.ReduceOp.SUM)
    
    return tensor


def train(epoch,model,data_loader,optimizer,criterion,scheduler):


    model.train()
    train_loss = 0
    top1_acc = 0.0
    top5_acc = 0.0


    for step, (inputs, labels) in enumerate(data_loader):

        start_time = time.time()

        inputs, labels = inputs.to(device), labels.to(device)

        # inputs,label_a,label_b,lam = mixup_data(inputs, labels,1.0,True)

        outputs = model(inputs)
        if type(outputs) is dict:
            outputs = outputs["logits"]


        loss = criterion(outputs, labels)

        loss.backward()
        optimizer.step()
        optimizer.zero_grad()

        scheduler.step()

        end_time = time.time()

        _, top1_preds = outputs.max(1)
        top1_acc += reduce_tenosr(top1_preds.eq(labels).sum()).item()

        top5_pred = outputs.topk(5, 1, True, True)[1]
        top5_acc += reduce_tenosr(top5_pred.eq(labels.view(-1, 1).expand_as(top5_pred).to(device)).sum()).item()

        train_loss += loss.item()
        if local_rank==0:
            logger.info(
                    "Epoch [{epoch}/{max_epoch}], Step [{step}/{iter_num}], Loss: {loss:.4f}, lr: {lr:.7f}, time: {time}\n".format(
                        epoch=epoch,
                        max_epoch=n_epochs,
                        step=step+1,
                        iter_num=len(data_loader),
                        loss=loss.item(),
                        lr=optimizer.param_groups[0]['lr'],
                        time=end_time - start_time)
            )
    if local_rank==0:
        logger.info(
            "Epoch [{epoch}/{max_epoch}], train_top1_acc [{top1_acc}], train_top5_acc [{top5_acc}] \n".format(
                epoch=epoch,
                max_epoch=n_epochs,
                top1_acc=top1_acc/len(data_loader.dataset),
                top5_acc=top5_acc/len(data_loader.dataset),
            )

    )



@torch.no_grad()
def test(model,data_loader,criterion):

    model.eval()
    top1_acc = 0.0
    top5_acc = 0.0


    for step, (inputs, labels) in enumerate(data_loader):

        inputs, labels = inputs.to(device), labels.to(device)
        outputs = model(inputs)
        if type(outputs) is dict:
            outputs = outputs["logits"]
        loss = criterion(outputs, labels)

        _, top1_preds = outputs.max(1)

        top1_acc += reduce_tenosr(top1_preds.eq(labels).sum()).item()

        top5_pred = outputs.topk(5, 1, True)[1]
        top5_acc += reduce_tenosr(top5_pred.eq(labels.view(-1, 1).expand_as(top5_pred).to(device)).sum()).item()


    if local_rank==0:
        logger.info(
                "test_top1_acc [{top1_acc}], test_top5_acc [{top5_acc}] \n".format(

                top1_acc=top1_acc/len(data_loader.dataset),
                top5_acc=top5_acc/len(data_loader.dataset),
            )

    )


def main():

    # args.data = './imagenet'
    print(f"init data_loader")

    data_path = '/share/projset/baaishare/baai-mrnd/datasets/imagenet2012'

    train_loader, val_loader = data_loader(data_path, batch_size=128, workers=8, pin_memory=True)

    #train_loader, val_loader = build_cifar(batch_size=128)

    print(f"train_loader size is {len(train_loader)}")
    print(f"val_loader size is {len(val_loader)}")


    # model = VisionTransformer(img_size=224,
    #                           patch_size=16,
    #                           in_chans=3,
    #                           embed_dim=768,
    #                           depth=12,
    #                           num_heads=12,
    #                           num_classes=1000)

    # print("init model weights")
    # model.load_pretrained("/home/xingzhaohu/models/imagenet21k_ViT-B_16.npz")


    # loader = AutoLoader(task_name="classification",
    #                     # model_name="swinv2-base-patch4-window8-256",
    #                     model_name="vit-large-p16-224",
    #                     # only_download_config=True,
    #                     model_dir="checkpoints",
    #                     num_classes=1000)
    # model = loader.get_model()
    random_seed(42, rank=0)

    model = create_model(pretrained=False, num_classes=1000)
        # self.model.load_state_dict("./checkp")
    _load_weights(model, "./checkpoints/vit-base-p16-224/pytorch_model.npz")
    

    random_seed(42, rank=local_rank)

    model = model.to(device)
    model = DistributedDataParallel(model,device_ids=[local_rank])
    print("current GPU",{local_rank},"\n")


    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)

    total_iter = n_epochs*len(train_loader)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, total_iter)

    print("training")
    # fintuning model
    for epoch in range(1,n_epochs+1):
        train(epoch,model,train_loader,optimizer,criterion,scheduler)
        test(model,val_loader,criterion)
        if epoch%8==0:
            if local_rank==0:
                torch.save(model.state_dict(), "./epoch_{}.pth".format(epoch))

  
if __name__ == '__main__':

    main()