import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torchvision.models.resnet import resnet50
from torch.utils.data.distributed import DistributedSampler
from torch.utils.tensorboard import SummaryWriter
import torch.optim.lr_scheduler as lr_scheduler
import torch.multiprocessing as mp
from torch.multiprocessing import Process
from tqdm import tqdm
import os
import time
import shutil
import argparse
from enum import Enum
from loguru import logger
from datetime import datetime
import sys
import tempfile
import math
import torch.distributed as dist


from utils import reduce_value, is_main_process, init_distributed_mode, cleanup


logger.add(f"../../log/{str(datetime.today())[:10]}_ResNet_train.log", level='INFO', encoding="utf-8", enqueue=True)


def time_it(func):
    def wrapper(*args, **kwargs):
        start_time = time.time()
        result = func(*args, **kwargs)
        end_time = time.time()
        print(f"Function {func.__name__} took {(end_time - start_time) :.3f} seconds to run.")
        return result
    return wrapper


def main_spawn(rank, args):
    if torch.cuda.is_available() is False:
        raise EnvironmentError("not find GPU device for training.")

    # 初始化各进程环境 start
    os.environ["MASTER_ADDR"] = "localhost"
    os.environ["MASTER_PORT"] = "12355"

    args.distributed = True
    args.rank = rank

    torch.cuda.set_device(args.rank)
    args.dist_backend = 'nccl'
    print('| distributed init (rank {}): {}'.format(
        args.rank, args.dist_url), flush=True)

    dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
                            world_size=args.world_size, rank=args.rank)

    dist.barrier()
    # 初始化各进程环境 end


    device = args.device
    # rank = args.rank
    # device = torch.device(args.device)
    # batch_size = args.batch_size
    # weights_path = args.weights
    args.lr *= args.world_size  # 学习率要根据并行GPU的数量进行倍增
    checkpoint_path = ""

    if args.rank in {-1, 0}:  # 在第一个进程中打印信息，并实例化tensorboard
        print(args)
        print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/')
        tb_writer = SummaryWriter()

    # Data loading code
    if args.dummy:
        print("=> Dummy data is used!")
        train_dataset = datasets.FakeData(1281167, (3, 224, 224), 1000, transforms.ToTensor())
        val_dataset = datasets.FakeData(50000, (3, 224, 224), 1000, transforms.ToTensor())
    else:
        traindir = os.path.join(args.data_path, 'train')
        valdir = os.path.join(args.data_path, 'test')
        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])

        train_dataset = datasets.ImageFolder(
            traindir,
            transforms.Compose([
                transforms.RandomResizedCrop(224),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                normalize,
            ]))

        val_dataset = datasets.ImageFolder(
            valdir,
            transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                normalize,
            ]))

    trian_sampler = DistributedSampler(train_dataset)  # 这个sampler会自动分配数据到各个gpu上
    val_sampler = DistributedSampler(val_dataset)  # 这个sampler会自动分配数据到各个gpu上

    # move shuffle=True,使用了DistributedSampler, sampler和 shuffle不能同时为真.
    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=args.batch_size,
        num_workers=args.workers, pin_memory=True, sampler=trian_sampler)
    val_loader = torch.utils.data.DataLoader(
        val_dataset, batch_size=args.batch_size,
        num_workers=args.workers, pin_memory=True, sampler=val_sampler)

    # save:  daisy,dandelion,roses,sunflowers,tulips
    classes_list = train_dataset.classes
    with open('../../datasets/flowers/classes.txt', 'w') as text_cls:
        classes_str = ','.join(classes_list)
        text_cls.write(classes_str)

    train_num = len(train_loader)  # 训练总数/batch_size
    val_num = len(val_loader)
    logger.info(f"using {train_num} batchs for training, {val_num} batchs for validation.")

    model = resnet50(num_classes=5)
    model.to(device)
    model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.rank])

    optimizer = optim.Adam(model.parameters(), args.lr)
    # Scheduler https://arxiv.org/pdf/1812.01187.pdf
    # lf = lambda x: ((1 + math.cos(x * math.pi / args.epochs)) / 2) * (1 - args.lrf) + args.lrf  # cosine
    # scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)

    criterion = nn.CrossEntropyLoss().to(device)

    best_acc1 = 0

    for epoch in range(args.epochs):  # loop over the dataset multiple times

        # train for one epoch
        # print(f"===========rank is {args.rank}")
        mean_loss = train(train_loader, model, criterion, optimizer, device, epoch)

        # evaluate on validation set
        sum_num = evaluate(val_loader, model, device)
        acc1 = sum_num / val_sampler.total_size

        # scheduler.step()
        # remember best acc@1 and save checkpoint
        if args.rank == 0:   # torch.distributed.get_rank() == 0
            logger.info("[epoch {}] accuracy: {}".format(epoch, round(acc1, 3)))
            tags = ["loss", "accuracy", "learning_rate"]
            tb_writer.add_scalar(tags[0], mean_loss, epoch)
            tb_writer.add_scalar(tags[1], acc1, epoch)
            tb_writer.add_scalar(tags[2], optimizer.param_groups[0]["lr"], epoch)

            is_best = acc1 > best_acc1
            best_acc1 = max(acc1, best_acc1)
            save_checkpoint({'epoch': epoch + 1,
                             'state_dict': model.module.state_dict(),
                             'best_acc1': best_acc1,
                             #    'optimizer' : optimizer.state_dict(),
                             #     'scheduler' : scheduler.state_dict()
                             },
                            is_best, args.save_path)

    # torch.save(net.state_dict(), args.save_path)
    # 删除临时缓存文件
    if args.rank == 0:
        if os.path.exists(checkpoint_path) is True:
            os.remove(checkpoint_path)
        logger.info('Finished Training')
    cleanup()


def train(train_loader, model, criterion, optimizer, device, epoch):
    model.train()
    loss_function = torch.nn.CrossEntropyLoss()
    mean_loss = torch.zeros(1).to(device)
    optimizer.zero_grad()

    # 在进程0中打印训练进度
    # if is_main_process():
    #     train_loader = tqdm(train_loader, file=sys.stdout)

    with tqdm(train_loader, unit="batch") as tepoch:
        for step, data in enumerate(tepoch):
            tepoch.set_description(f"Epoch {epoch}")

            images, labels = data
            pred = model(images.to(device))

            loss = criterion(pred, labels.to(device))
            loss.backward()
            loss = reduce_value(loss, average=True)
            mean_loss = (mean_loss * step + loss.detach()) / (step + 1)  # update mean losses

            # 在进程0中打印平均loss
            # if is_main_process():
            if True:
                train_loader.desc = "[epoch {}] mean loss {}".format(epoch, round(mean_loss.item(), 3))

            if not torch.isfinite(loss):
                print('WARNING: non-finite loss, ending training ', loss)
                sys.exit(1)

            optimizer.step()
            optimizer.zero_grad()

            tepoch.set_postfix(loss=loss.item())
        # 等待所有进程计算完毕
        if device != torch.device("cpu"):
            torch.cuda.synchronize(device)

        return mean_loss.item()


@torch.no_grad()
def evaluate(val_loader, model, device):
    model.eval()

    # 用于存储预测正确的样本个数
    sum_num = torch.zeros(1).to(device)

    for step, data in enumerate(val_loader):
        images, labels = data
        pred = model(images.to(device))
        pred = torch.max(pred, dim=1)[1]
        sum_num += torch.eq(pred, labels.to(device)).sum()

    # 等待所有进程计算完毕
    if device != torch.device("cpu"):
        torch.cuda.synchronize(device)

    sum_num = reduce_value(sum_num, average=False)

    return sum_num.item()


def save_checkpoint(state, is_best, filename='checkpoint.pth'):
    torch.save(state, filename)
    if is_best:
        shutil.copyfile(filename, '../../models/ResNet_best.pth')


def parse_option():
    parser = argparse.ArgumentParser('ResNet')
    parser.add_argument('--dummy', type=bool, help='Whether data needs to be generated', default=False)
    parser.add_argument('--epochs', type=int, help='the number of epoch', default=3)
    parser.add_argument('--gpu_id', type=str, default='5,6')   # add for multi
    parser.add_argument('--batch_size', type=int, help='batch_size', default=32)
    parser.add_argument("--workers", type=int, help="the number of workers", default=8)
    parser.add_argument("--data_path", type=str, help="the path of model saved",
                        default='../../datasets/flowers')
    parser.add_argument("--save_path", type=str, help="the path of model saved",
                        default='../../models/ResNet.pth')
    parser.add_argument('--lr', default=0.0002, type=float, metavar='LR',
                        help='initial learning rate', dest='lr')
    parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
                        help='momentum')
    parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
                        metavar='W', help='weight decay (default: 1e-4)',
                        dest='weight_decay')
    parser.add_argument('--print_freq', default=30, type=int,metavar='W', help='Print frequency')
    # 是否启用SyncBatchNorm
    parser.add_argument('--syncBN', type=bool, default=False)
    parser.add_argument('--weights', type=str, default='',
                        help='initial weights path')
    parser.add_argument('--freeze_layers', type=bool, default=False)
    # 不要改该参数，系统会自动分配
    parser.add_argument('--device', default='cuda', help='device id (i.e. 0 or 0,1 or cpu)')
    # 开启的进程数(注意不是线程),不用设置该参数，会根据nproc_per_node自动设置
    parser.add_argument('--world_size', default=2, type=int,
                        help='number of distributed processes')
    parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
    args = parser.parse_args()  # 也可直接使用 args, _ = parser.parse_known_args()
    return args


@time_it
def main():
    args_ = parse_option()

    processes = []
    for rank in range(args_.world_size):
        p = Process(target=main_spawn, args=(rank, args_))
        p.start()
        processes.append(p)
    for p in processes:
        p.join()

    # mp.spawn(main_spawn,
    #          args=(args_,),
    #          nprocs=args_.world_size,
    #          join=True)




if __name__ == '__main__':

    main()
