import os
import sys
import json
from tqdm import tqdm
import matplotlib.pyplot as plt

import torch
import torch.nn as nn
from torchvision import transforms, datasets
from torch.utils.data import DataLoader
import torch.optim as optim

import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP

from models.densenet import densenet_cifar, DenseNet121


available_models = {
    'densenet_cifar': densenet_cifar,
    'densenet121': DenseNet121,
}


def pred(args):
    """
    Define test function
    """
    print("Loading the best model parameters...")
    device = torch.device('cuda:0')
    model = available_models.get(args.choose_model)()
    model.to(device)
    model.load_state_dict(torch.load("./ckpt/{}/best.pth".format(args.choose_model)), strict=False)

    N_CLASSES = 10
    classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')


    test_transforms = transforms.Compose([transforms.ToTensor(),
                                          transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
    data_path = os.path.join(os.getcwd(), "./data")
    test_dataset = datasets.CIFAR10(root=data_path,
                                    train=False,
                                    download=False,
                                    transform=test_transforms)
    test_loader = DataLoader(test_dataset,
                             batch_size=args.batch_size,
                             shuffle=False)

    model.eval()
    total_correct = 0
    total = 0
    class_correct = list(0. for i in range(N_CLASSES))
    class_total = list(0. for i in range(N_CLASSES))
    with torch.no_grad():
        test_bar = tqdm(test_loader, file=sys.stdout)
        for test_data in test_bar:
            test_images, test_labels = test_data
            outputs = model(test_images.to(device))
            predict_y = torch.max(outputs, dim=1)[1]
            total_correct += torch.eq(predict_y, test_labels.to(device)).sum().item()
            total += len(test_data)
            c = (predict_y == test_labels.to(device)).squeeze()

            for i in range(len(test_labels)):
                _label = test_labels[i]
                class_correct[_label] += c[i].item()
                class_total[_label] += 1

    print('Total acc: {:.4f}'.format(total_correct / total))

    for i in range(N_CLASSES):
        print('{} acc: {:.4f}'.format(classes[i], class_correct[i] / class_total[i]))


def main(args):
    """
    Define training and validating
    """
    dist.init_process_group(backend="nccl")
    local_rank = dist.get_rank()
    torch.cuda.set_device(local_rank)
    device = torch.device("cuda", local_rank)

    data_transform = {
        "train": transforms.Compose([transforms.RandomCrop(32, padding=4),
                                     transforms.RandomHorizontalFlip(),
                                     transforms.ToTensor(),
                                     transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))]),
        "val": transforms.Compose([transforms.ToTensor(),
                                   transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])  # very important!
    }

    data_path = os.path.join(os.getcwd(), "./data")
    assert os.path.exists(data_path), "{} path does not exist.".format(data_path)

    # Download CIFAR10
    train_dataset = datasets.CIFAR10(root=data_path,
                                     train=True,
                                     download=True,
                                     transform=data_transform['train'])
    validate_dataset = datasets.CIFAR10(root=data_path,
                                        train=False,
                                        download=True,
                                        transform=data_transform['val'])

    # Get classes index
    class_list = train_dataset.class_to_idx
    class_dict = dict((val, key) for key, val in class_list.items())
    # Write dict into json file
    json_str = json.dumps(class_dict, indent=4)  # indent=4, 在每一行前面加上4个空格
    with open('cifar10_class_indices.json', 'w') as json_file:
        json_file.write(json_str)

    # Load datasets
    batch_size = args.batch_size
    sampler = DistributedSampler(train_dataset)
    train_loader = DataLoader(train_dataset,
                              batch_size=batch_size,
                              sampler=sampler,
                              num_workers=2)
    validate_loader = DataLoader(validate_dataset,
                                 batch_size=batch_size,
                                 shuffle=False,
                                 num_workers=2)

    # Choose the corresponding model
    choose_model = args.choose_model
    assert choose_model in available_models, "Error: Model {} not in available_models".format(choose_model)

    model = available_models.get(choose_model)()
    model.to(device)
    if torch.cuda.device_count() > 1:
        model = DDP(model, device_ids=[local_rank], output_device=local_rank)
    print(model)

    # Define criterion and optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
    # optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=5e-4)
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)
    # scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda epoch: 0.98 if epoch < 30 else (0.95 if epoch < 70 else (0.925 if epoch <100 else 0.88)))

    epochs = args.epochs
    best_acc = 0.0
    best_epoch = 0
    save_path = './ckpt/{}'.format(choose_model)
    if dist.get_rank() == 0 and not os.path.exists(save_path):
        os.mkdir(save_path)

    train_steps = len(train_loader)
    loss_list = []
    iter_list = []
    for epoch in range(epochs):
        # train
        model.train()
        running_loss = 0.0
        train_bar = tqdm(train_loader, file=sys.stdout)
        for step, data in enumerate(train_bar):
            images, labels = data
            optimizer.zero_grad()
            outputs = model(images.to(device))
            loss = criterion(outputs, labels.to(device))
            loss.backward()
            optimizer.step()

            running_loss += loss.item()
            iter_list.append(epoch*len(train_bar) + step)
            loss_list.append(loss.item())

            train_bar.desc = "train epoch[{}/{}] loss:{:.4f}".format(epoch + 1, epochs, loss.item())

        # update learning rate
        scheduler.step()

        # validate
        if dist.get_rank() == 0:
            model.eval()
            acc = 0  # accumulate accurate number / epoch
            with torch.no_grad():
                val_bar = tqdm(validate_loader, file=sys.stdout)
                for val_data in val_bar:
                    val_images, val_labels = val_data
                    outputs = model(val_images.to(device))
                    predict_y = torch.max(outputs, dim=1)[1]
                    acc += torch.eq(predict_y, val_labels.to(device)).sum().item()

            val_accurate = acc / len(validate_dataset)
            print("[epoch {:>3d}] train_loss: {:.4f}  val_accuracy: {:.4f}".format(epoch + 1, running_loss / train_steps, val_accurate))

            if val_accurate > best_acc:
                cu_save_path = os.path.join(save_path, "best.pth")
                best_acc = val_accurate
                best_epoch = epoch
                if best_acc > 0.9:
                    torch.save(model.state_dict(), cu_save_path)

    print(f'best acc: {best_acc}, in epoch {best_epoch}')
    print('Finished Training')

    # plt.figure()
    # plt.plot(loss_list, "loss")
    # plt.plot(iter_list, "iter")
    # plt.title('Loss')
    # plt.show()


def parse_args():
    import argparse
    parser = argparse.ArgumentParser()

    parser.add_argument('--epochs', default=None, type=int)
    parser.add_argument('--batch_size', default=None, type=int)
    parser.add_argument('--lr', default=None, type=float)
    parser.add_argument('--choose_model', default=None)
    parser.add_argument('--local_rank', default=-1)

    args = parser.parse_args()
    print(args)
    return args


if __name__ == '__main__':
    args = parse_args()
    main(args)
    # pred(args)
