import os
import time
import numpy as np



import argparse
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from torch.utils.data.distributed import DistributedSampler
from torchvision import transforms, models


from core.c_dataset import CassavaDataset
from core.c_utils import model_test, model_train, CenterLoss


parser = argparse.ArgumentParser(description='PyTorch Kaggle_cassava resnet50')
parser.add_argument('--epochs', type=int, default=70,
                    help='the epochs of this run')
parser.add_argument('--n_class', type=int, default=5,
                    help='the number of cassava disease')
parser.add_argument('--lr', type=float, default=0.0015,
                    help='initial learning rate')
# ---------------------------
# model
# ---------------------------
parser.add_argument('--root_path', type=str, default='/home/handewei/data/cassava-leaf/',
                    help='dir of the dataset')
parser.add_argument('--batch_size', type=int, default=8,
                    help='the num of img in a batch')
parser.add_argument('--num_workers', type=int, default=3,
                    help='cpu num_workers')
parser.add_argument('--log_interval', type=int, default=20,
                    help='how many batches to wait before logging training status (default: 20)')
# ---------------------------
# GPU
# ---------------------------
parser.add_argument('--use_gpu', type=bool, default=True,
                    help='dir of the all ori img')
parser.add_argument('--gpu_avaiable', type=str, default='0,1',
                    help='the gpu used')
parser.add_argument('--distributed', type=bool, default=True,
                    help='use pytorch DDP')

parser.add_argument("--local_rank", type=int, default=0)

args = parser.parse_args()
print(args)


# Transformations for both the training and testing data
train_transforms = transforms.Compose([transforms.Resize(256),
                                       transforms.RandomResizedCrop(224),
                                       transforms.ColorJitter(0.2, 0.2, 0.2),
                                       transforms.RandomRotation(30),
                                       transforms.RandomHorizontalFlip(),
                                       transforms.ToTensor(),
                                       transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])

test_transforms = transforms.Compose([transforms.Resize(224),
                                      # transforms.CenterCrop(224),
                                      transforms.ToTensor(),
                                      transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])

if __name__ == '__main__':
    os.environ['CUDA_DEVICE_ORDER'] = "PCI_BUS_ID"
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_avaiable

    np.set_printoptions(suppress=True)
    if args.distributed:
        cudnn.benchmark = True  # True 可增加运行速度
        cudnn.deterministic = True  # True 避免结果的随机波动
        cudnn.enabled = True
        device = torch.device('cuda:{}'.format(args.local_rank))

        # dist.init_process_group(backend="nccl", init_method='tcp://192.168.1.45:11144', rank=0, world_size=1)
        dist.init_process_group(backend="nccl")

        print("args.local_rank", args.local_rank)

    EPS = 1e-7
    model = models.resnet50(pretrained=False, num_classes=5)
    model.to(device)
    # optimizer = torch.optim.Adam()
    # optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr)
    # optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)

    if args.distributed:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        torch.cuda.set_device(args.local_rank)  # 指定代码运行的GPU号
        model = nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank)

    # criterion = CenterLoss()
    criterion = nn.CrossEntropyLoss()
    # softmax_2d = nn.Softmax2d()

    # train data
    trainSet = CassavaDataset(args.root_path, istrain=True, img_list_file="train_nohead.csv", transform=train_transforms)

    if args.distributed:
        train_sampler = DistributedSampler(trainSet)
    else:
        train_sampler = None

    trainloader = torch.utils.data.DataLoader(trainSet,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              num_workers=args.num_workers,
                                              pin_memory=True,
                                              drop_last=True,
                                              sampler=train_sampler)

    # test data
    testSet = CassavaDataset(args.root_path, istrain=False, img_list_file="test_nohead.csv", transform=test_transforms)

    if args.distributed:
        test_sampler = DistributedSampler(testSet)
    else:
        test_sampler = None

    testloader = torch.utils.data.DataLoader(testSet,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.num_workers,
                                             pin_memory=True,
                                             drop_last=True,
                                             sampler=test_sampler)


    # trian and test
    for epoch in range(1, args.epochs + 1):
        epoch_time = time.time()
        model_train(model, trainloader, optimizer, epoch, criterion, args)

        model_test(model, testloader, epoch, args)
        if args.local_rank == 1:
            print('epoch: ' + str(epoch) + ', total time used: ', (time.time() - epoch_time))

        # ppi = np.argmax(out.cpu().data.numpy(), 1)
        # tmp_out = ppi.reshape([-1])
        # gt = gt.cpu().data.numpy().reshape([-1])
        # my_confusion = metrics.confusion_matrix(tmp_out, gt).astype(np.float32)
        # meanIU, Acc,Se,Sp,IU = calculate_Accuracy(my_confusion)
        #
        # print('{:s}_{:s} | epoch_batch: {:d}_{:d} | loss: {:f}  | Acc: {:.3f} | Se: {:.3f} | Sp: {:.3f}'
        #           '| Background_IOU: {:f}, vessel_IOU: {:f}'.format(model_name, my_des, epoch, i_iter, loss.item(),
        #                                                              Acc, Se, Sp, IU[0], IU[1]))



            # if epoch == 3 or epoch == 150:
            #     torch.save(model.state_dict(),
            #                project_root + '/models/lung_dataset_mode/%s/%s.pth' % (my_des, str(epoch)))
            #     print('success save Nucleus_best model')
