import argparse
import torch
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from PIL import Image
import torch.nn.functional as F
from torchvision.models.resnet import resnet50
import glob
import torch.multiprocessing as mp
from torch.multiprocessing import Process
from torch.utils.data.distributed import DistributedSampler
import torch.distributed as dist
import os 

def inference(rank, args):
    args.distributed = True
    args.rank = rank

    # 初始化各进程环境 start
    os.environ["MASTER_ADDR"] = "localhost"
    os.environ["MASTER_PORT"] = "12355"
    torch.cuda.set_device(args.rank)
    args.dist_backend = 'nccl'
    print('| distributed init (rank {}): {}'.format(
        args.rank, args.dist_url), flush=True)

    dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
                            world_size=args.world_size, rank=args.rank)

    dist.barrier()
    # 初始化各进程环境 end

    device = args.device
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    valdir = os.path.join(args.data_path, 'test')
    val_dataset = datasets.ImageFolder(
            valdir,
            transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                normalize,
            ]))

    val_sampler = DistributedSampler(val_dataset)  # 这个sampler会自动分配数据到各个gpu上

    val_loader = torch.utils.data.DataLoader(
        val_dataset, batch_size=args.batch_size,
        num_workers=args.workers, pin_memory=True, sampler=val_sampler)

    with open('../../datasets/flowers/classes.txt', 'r') as text_cls:
        classes_str = text_cls.read()
    classes = classes_str.split(',')  # classes = ['daisy', 'dandelion', 'roses', 'sunflowers', 'tulips']


    model = resnet50(num_classes=5)
    model.load_state_dict(torch.load('../../models/ResNet_best.pth', map_location='cpu')['state_dict'])
    model.to(device)
    model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.rank])

    model.eval()
    # sum_num = evaluate(val_loader, model, device)

    # 用于存储预测正确的样本个数
    sum_num = torch.zeros(1).to(device)

    for step, data in enumerate(val_loader):
        images, labels = data
        pred = model(images.to(device))
        pred = torch.max(pred, dim=1)[1]
        sum_num += torch.eq(pred, labels.to(device)).sum()
        
    # 等待所有进程计算完毕
    if device != torch.device("cpu"):
        torch.cuda.synchronize(device)

    # sum_num = reduce_value(sum_num, average=False)
    print(sum_num)

# def reduce_value(value, average=True):
#     world_size = get_world_size()
#     if world_size < 2:  # 单GPU的情况
#         return value
#     with torch.no_grad():
#         dist.all_reduce(value)
#         if average:
#             value /= world_size
#         return value

# def get_world_size():
#     if not is_dist_avail_and_initialized():
#         return 1
#     return dist.get_world_size()

# def is_dist_avail_and_initialized():
#     """检查是否支持分布式环境"""
#     if not dist.is_available():
#         return False
#     if not dist.is_initialized():
#         return False
#     return True


def parse_option():
    parser = argparse.ArgumentParser('ResNet')
    parser.add_argument('--epochs', type=int, help='the number of epoch', default=3)
    parser.add_argument('--gpu_id', type=str, default='5,6')   # add for multi
    parser.add_argument('--batch_size', type=int, help='batch_size', default=32)
    parser.add_argument("--workers", type=int, help="the number of workers", default=8)
    parser.add_argument("--data_path", type=str, help="the path of model saved",
                        default='../../datasets/flowers')
    parser.add_argument("--save_path", type=str, help="the path of model saved",
                        default='../../models/ResNet.pth')
    # 不要改该参数，系统会自动分配
    parser.add_argument('--device', default='cuda', help='device id (i.e. 0 or 0,1 or cpu)')
    # 开启的进程数(注意不是线程),不用设置该参数，会根据nproc_per_node自动设置
    parser.add_argument('--world_size', default=2, type=int,
                        help='number of distributed processes')
    parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
    args = parser.parse_args()  # 也可直接使用 args, _ = parser.parse_known_args()
    return args

def main():
    args_ = parse_option()
    processes = []
    for rank in range(args_.world_size):
        p = Process(target=inference, args=(rank, args_))
        p.start()
        processes.append(p)
    for p in processes:
        p.join()


if __name__ == '__main__':
    main()