import math
import timeit
import operator
import os
import random
import shutil
import time
import warnings
import sys
from functools import partial
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from common.utils.log import log
from common.utils.torchsummary import summary
from common.utils.check_point import save_checkpoint
from torch.utils.data import DataLoader
from prefetch_generator import BackgroundGenerator
from common.utils.speed_loader import MultiEpochsDataLoader, CudaDataLoader
from common.lib.module.bnmomentum import BNMomentumScheduler
import torchvision.transforms as transforms
import dataset.data_utils as d_utils
#from models.pointnet2_msg_sem import PointNet2SemSegMSG
#from models.pointnet2_ssg_sem import PointNet2SemSegSSG
#from models.densepoint_cls_L6_k24_g2 import DensePoint
# from models.two_stage_graspnet import Two_Stage_GraspNet
from models.grasp_suction_net import Two_Stage_GraspNet_Combine

from dataset.BindatasetLoader import Bindataset
from dataset.SuctionBindatasetLoader import SuctionBindataset
from dataset.ApproachBindatasetLoader import ApproachBindataset
from dataset.Grasp_SuctionBindatasetLoader import Grasp_SuctionBindataset

# from core.validation import validate
# from core.train import train
from core.validation_gs import validate
from core.train_gs import train
#torch.set_default_tensor_type(torch.DoubleTensor)


class DataLoaderX(DataLoader):
    def __iter__(self):
        return BackgroundGenerator(super().__iter__())

def set_seed(worker_id, base=0, rank=0, workers=4):
    
    seed_ = np.random.get_state()[1][0] + worker_id + workers * rank
#    print('worker_id is:', worker_id, 'seed_ is :', seed_)
    np.random.seed(seed_)


def set_torch_seed(seed):
    assert seed >= 0, '{} >= {}'.format(seed, 0)
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)


def main_worker(gpu, ngpu, config):
    if config.test_mode:
        config.print_freq = 1
    config.rank = gpu
    config.world_size = ngpu
    
    print('world_size is: ', config.world_size, 'rank: ', config.rank)

    if config.rank is not None:
        print("Use GPU: {} for training".format(config.rank))

    if config.distributed:
        torch.cuda.set_device(config.rank)
        dist_backend = 'nccl'
        dist_url = 'tcp://' + '127.0.0.1' + ':' + str(config.port)
            
        dist.init_process_group(
            backend=dist_backend,
            init_method=dist_url,
            world_size=config.world_size,
            rank=config.rank,
            group_name='mtorch'
        )
    if config.seed > 0:
        cudnn.benchmark = False
        cudnn.deterministic = True
    else:
        cudnn.benchmark = True
        cudnn.deterministic = False
    if config.set_torch_seed:
        set_torch_seed(config.seed)
    model = eval(config.model)(config) 
    if config.load_pretrained_back_bone:
        pretrained_model_dict = torch.load(config.pretrained_back_bone_file_path, map_location='cpu')['state_dict']
        model.back_bone.load_state_dict(pretrained_model_dict)

    bn_momentum_init = 0.5
    bn_momentum_max = 0.01
    bn_decay_rate = 0.5
    bn_decay_step = 2
    it = -1
    bn_lbmd = lambda it: max(bn_momentum_init * bn_decay_rate**(int(it/bn_decay_step)), bn_momentum_max)
    bn_scheduler = BNMomentumScheduler(model, bn_lambda=bn_lbmd, last_epoch=config.start_epoch-1)
    if not config.use_normal:
        config.points_feature_dim = 0
    if config.network_summary:
        #network_summary = summary(model.cuda(), input_size=(config.num_points, 3+config.points_feature_dim), batch_size = config.batch_size, device='cuda')
        network_summary = None
    else:    
        network_summary = None
    if config.rank == 0:
        logger = log('log', config)
        logger.info(network_summary)
    else:
        logger = None

    # set the base learning_rate
    if config.distributed:
        config.base_lr = config.base_lr * config.world_size * config.batch_size / 32

    if config.rank == 0:
        logger.info(config)
        logger.info('base_lr is : {}'.format(config.base_lr))


#     normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
    # initialize the dataset with transforms
    print('init the dataset!!!')
    if not config.non_uniform_sampling:
        config.num_points *= config.sample_rate
        if config.rank == 0:
            logger.info('use uniform_sampling')


    train_dataset = eval(config.dataset)(
        dataset_dir=config.train_dataset_dir,
        num_points=config.num_points,
        transforms=transforms.Compose([
            d_utils.PointcloudToTensor(),
           # d_utils.PointcloudJitter(std=0.0005, clip=0.0005),
        ]),
        mode='train',
        use_normal=config.use_normal,
        platform=config.platform,
        non_uniform_sampling=config.non_uniform_sampling,
    )

    val_dataset = eval(config.dataset)(
        dataset_dir=config.test_dataset_dir,
        num_points=config.num_points,
        transforms=transforms.Compose([
            d_utils.PointcloudToTensor(),
            #d_utils.PointcloudJitter(std=0.0005, clip=0.0005),
        ]),
        mode='validate',
        use_normal=config.use_normal,
        platform=config.platform,
        non_uniform_sampling=config.non_uniform_sampling,
    )
    # initialize the training
    if config.set_torch_seed:
        worker_init_fn = partial(set_seed, base=config.seed, rank=config.rank, workers=config.workers)
        #worker_init_fn = False
    else:
        worker_init_fn = False
    if config.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_dataset, num_replicas=config.world_size, rank=config.rank)
        val_sampler = torch.utils.data.distributed.DistributedSampler(
            val_dataset, num_replicas=config.world_size, rank=config.rank)
    if not config.distributed:
        config.batch_size = config.batch_size * config.world_size
        config.workers *= config.world_size
    torch_version = int(torch.__version__.split('.')[1])
    if torch_version > 3:
        print('use DataLoader')
        loader = DataLoader
    else:
        print('use MultiEpochDataLoader')
        loader = MultiEpochsDataLoader

    train_loader = loader(
        train_dataset, batch_size=config.batch_size, shuffle=(train_sampler is None),
        drop_last=False, num_workers=config.workers, pin_memory=True, sampler=train_sampler, worker_init_fn=worker_init_fn)
        
    val_loader = loader(
        val_dataset, batch_size=config.batch_size, shuffle=False,
        drop_last=False, num_workers=config.workers, pin_memory=True, sampler=val_sampler, 
        worker_init_fn=worker_init_fn
        )


    # load others model for use
    if config.resume:
        #pretrained_dict = torch.load('{}'.format(config.resume_model))['state_dict']
        pretrained_dict = torch.load('{}'.format(config.resume_model), map_location='cpu')
        new_dict = {}
        for key, _ in pretrained_dict.items():
            new_dict['model.' + key] = pretrained_dict[key]
        model.load_state_dict(new_dict, strict=True)
        print('resume from model {}'.format(config.resume_model))

    # put the model on gpu with distributed training
    model.cuda()
    if config.syncBN:
        print('syncBn is used')
        model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
    # set optimizer
    if config.optimizer == 'SGD':
        optimizer = torch.optim.SGD(
                                filter(lambda p: p.requires_grad, model.parameters()),
                                config.base_lr,
                                momentum=config.momentum,
                                weight_decay=config.weight_decay,
                                nesterov=True)
    elif config.optimizer == "Adam":
        optimizer = torch.optim.Adam(
                                filter(lambda p: p.requires_grad, model.parameters()), 
                                config.base_lr,
                                betas=(0.9, 0.999), eps=1e-8,
                                weight_decay=config.weight_decay,
                                amsgrad=False)

    # set for the lr_type and warm_up method
    if not config.warm_up:
        config.warmup_epoch = 0
    if config.lr_type == 'cosine':
        after_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            optimizer, config.epochs - config.warmup_epoch)
    elif config.lr_type == 'multi_step':
        after_scheduler = torch.optim.lr_scheduler.MultiStepLR(
            optimizer, milestones=config.milestones, gamma=config.gamma)
    else:
        raise TypeError('{} lr_type is not support now.'.format(config.lr_type))
    if config.warm_up:
        print('use wram up')
        assert False
        # scheduler = GradualWarmupScheduler(optimizer, warmup_epoch=config.warmup_epoch, method=config.method, last_iter=config.start_epoch * len(train_loader),
        #                                    warmup_iter=config.warmup_epoch * len(train_loader), after_scheduler=after_scheduler)
    else:
        print('do not use warm up')
        scheduler = after_scheduler
#    # define loss function (criterion) and optimizer
#
#    #criterion_train = nn.BCEWithLogitsLoss().cuda()
#    #criterion_val = nn.BCEWithLogitsLoss().cuda()
#    if config.use_focalloss:
#        criterion_train = SigmoidFocalClassificationLoss(gamma=config.focal_loss_gamma, alpha=config.focal_loss_alpha).cuda()
#        criterion_val = nn.BCEWithLogitsLoss().cuda()
#    else:
#        weights = torch.tensor([1., 5.])
#        criterion_train = nn.CrossEntropyLoss(weight=weights).cuda()
#        criterion_val = nn.CrossEntropyLoss().cuda()

    if config.distributed:
        model = torch.nn.parallel.DistributedDataParallel(
                model, device_ids=[config.rank], find_unused_parameters=True)



    if config.evaluate:
        validate(val_loader, model, 0, config, logger)
        return

#     total_iterations = int(config.epochs) * n_iteration_per_epoch
    for epoch in range(config.start_epoch, config.epochs):

        epoch_time = time.time()
        if config.distributed:
            pass
            train_sampler.set_epoch(epoch)
            #np.random.seed(epoch+config.rank)
            #np.random.seed()

        # train for one epoch
        bn_scheduler.step()
        train(train_loader, model, optimizer, scheduler,
                  epoch, config, logger)
        scheduler.step()

        epoch_time = time.time() - epoch_time
        if config.rank == 0:
            logger.info('Epoch{0} take {1:.2f}s'.format(epoch, epoch_time))
        # evaluate on validation set
        if epoch % config.val_freq == 0:
            singleval_time = time.time()
            validate(val_loader, model, epoch, config, logger)
            singleval_time = time.time() - singleval_time
            if config.rank == 0:
                logger.info('singleval_time take {}'.format(singleval_time))

                savecheckpoint_time = time.time()
                save_checkpoint(model, epoch, optimizer, config)
                savecheckpoint_time = time.time() - savecheckpoint_time
                logger.info('save checkpoint take {:2f}'.format(
                    savecheckpoint_time))
