import argparse
import datetime
import os
import traceback

import torch
import torch.nn as nn
from torch import distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torchvision.utils import make_grid
from tqdm import tqdm, trange

from data.cocostuff_loader import *
from data.vg import *
from models import *
from trainer import *
from effinet_sn import *
from utils import *
from model import CombineDiscriminator64, ResnetGenerator64, ResnetGenerator128, CombineDiscriminator128

torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = False


def get_dataset(dataset, img_size, num_o, ColorJitter, dir_dataset="./datasets"):
    dir_dataset = os.path.abspath(dir_dataset)
    if dataset == "coco":
        data = CocoSceneGraphDataset(image_dir=f'{dir_dataset}/coco/train2017/',
                                        instances_json=f'{dir_dataset}/coco/annotations/instances_train2017.json',
                                        stuff_json=f'{dir_dataset}/coco/annotations/stuff_train2017.json',
                                        stuff_only=True, image_size=(img_size, img_size), 
                                        max_objects_per_image=num_o, left_right_flip=True, ColorJitter=ColorJitter)
    elif dataset == 'vg':
        with open(f"{dir_dataset}/vg/vocab.json", "r") as read_file:
            vocab = json.load(read_file)
        data = VgSceneGraphDataset(vocab=vocab, h5_path=f'{dir_dataset}/vg/train.h5',
                                      image_dir=f'{dir_dataset}/vg/images/',
                                      image_size=(img_size, img_size), max_objects=num_o, left_right_flip=True,
                                      ColorJitter=ColorJitter)
    return data

def initialize_writer_logger(args, flag_single):
    if flag_single:
        # make dirs
        now = str(datetime.now())[:-7].replace(" ", "_")
        out_path = os.path.join( args.out_path, now)

        if not os.path.exists(out_path):
            print(f'mkdir {os.path.abspath(out_path)}')
            os.makedirs(out_path)

        model_path = os.path.join(out_path, 'model/')
        if not os.path.exists(model_path):
            os.mkdir(model_path)
        
        writer_path = os.path.join(out_path, f'log')
        writer = SummaryWriterCount(writer_path)

        logger = setup_logger(f"CLAMA/{now}", out_path, 0)      
        
        if distributed.is_initialized():
            print(f"Rank {distributed.get_rank()} will do records")
    else:
        writer = None
        logger = None
        out_path = None
        model_path = None
    return writer, logger, out_path, model_path

def initialize_train_loader(args, num_o):
    train_dataloader = None
    if not dist.is_initialized():
        train_data = get_dataset(args.dataset, args.img_size, num_o, False, args.dir_dataset) 
        # train_data = onefetch(train_data, 32) #! fetch all data in memory
        train_dataloader = torch.utils.data.DataLoader( train_data, 
                    batch_size=args.batch_size, drop_last=True, num_workers=8, shuffle=True)
        train_sampler = None
    else:
        train_data = get_dataset(args.dataset, args.img_size, num_o, False, args.dir_dataset) 
        train_sampler = torch.utils.data.distributed.DistributedSampler(train_data, shuffle=True, drop_last=True)
        train_dataloader = torch.utils.data.DataLoader( train_data, \
                    batch_size=args.batch_size, sampler=train_sampler, drop_last=True, num_workers=4, prefetch_factor=3, pin_memory=True)
    train_dataloader = num_iter(train_dataloader, args.total_iter)
    train_dataloader = iter(train_dataloader)
    return train_dataloader, train_sampler

def main(args, local_rank):
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    args.total_iter += 1

    # parameters
    img_size = args.img_size
    assert img_size in [64, 128, 256, 512]
    num_classes = 184 if args.dataset.lower() == 'coco' else 179
    num_o = 8 if args.dataset.lower() == 'coco' else 10
    # cmap = colormap(num_classes, as_array=True)

    flag_single = not dist.is_initialized() or (dist.is_initialized() and dist.get_rank() == 0 )
    device = torch.device("cuda") if local_rank > -1 else torch.device("cpu")

    args.out_path = os.path.join('experiments', f'{args.dataset.lower()}_{img_size}_{args.out_path}')
    writer, logger, out_path, model_path = initialize_writer_logger(args, flag_single)
    if flag_single:
        logger.info(f"""{args.dataset}_{args.img_size} begin in {os.path.abspath(out_path)} \
        {['linear', 'nonlinear'][args.nlar]} ar, on {device}, with{['out',''][args.R1]} R1.""")  

    # data loader
    train_dataloader, train_sampler = initialize_train_loader(args, num_o)

    if flag_single:
        logger.info("Train dataloader prepared")
    
    # val_dataset = data_util.get_dataset(args.dataset, img_size, left_right_flip=False, train=False, dir_dataset=args.dir_dataset) if flag_single else None
    # val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=4, drop_last=False, shuffle=True, num_workers=1) if flag_single else None
    # if flag_single:
    #     logger.info("Val dataloader finished")
    
    # netG = legacy.ResnetGenerator(img_size, num_classes, num_o = num_o).train()
    # netD = legacy.ResnetDiscriminator(img_size, num_classes, 3,).train()
    cg_G = CategoryCausalGraph(num_classes ).train()
    cg_D = CategoryCausalGraph(num_classes ).train()
    netG = globals()[f'ResnetGenerator{img_size}'](num_classes=num_classes)
    netD = globals()[f'CombineDiscriminator{img_size}'](num_classes=num_classes)
    dim = netD.dim_obj_feat
    dc = DiscriminatorCausal(dim).train()
    lar = NonLinAutoRegressor(dim, num_classes ).train() if args.nlar else LinAutoRegressor(dim, num_classes).train()
    
    if flag_single:
        logger.info("Model before DDP")
    netG_ema = ema_model(netG).to(device) if flag_single else None
    if dist.is_initialized():
        print(f"{dist.get_rank()} finishes model initialization.")

    if args.load_path is not None:
        print(f"Try to load from {args.load_path}")
        dc   = load_model(dc,   os.path.join(args.load_path, "discriminator_causal.pth"), device )
        cg_G = load_model(cg_G, os.path.join(args.load_path, "W.pth"),  device )
        cg_D = load_model(cg_D, os.path.join(args.load_path, "cg.pth"), device )
        lar  = load_model(lar,  os.path.join(args.load_path, "ar.pth"), device )
        netD = load_model(netD, os.path.join(args.load_path, "discriminator.pth"), device )
        netG = load_model(netG, os.path.join(args.load_path, "generator.pth"), device )
        if flag_single:
            netG_ema = load_model(netG_ema, os.path.join(args.load_path, f"EMA_G.pth"), device).to(device)
        print(f"Successfully load from {args.load_path}")

    # use DDP
    parallel = dist.is_initialized()
    if parallel:
        process_group = dist.new_group(list(range(dist.get_world_size())))
        if dist.get_world_size() > 1:
            netG = nn.SyncBatchNorm.convert_sync_batchnorm(netG, process_group)
        netG = DDP(netG.to(device), device_ids=[local_rank])
        netD = DDP(netD.to(device), device_ids=[local_rank])
        cg_G = DDP(cg_G.to(device), device_ids=[local_rank])
        cg_D = DDP(cg_D.to(device), device_ids=[local_rank])
        dc   = DDP(dc.to(device),   device_ids=[local_rank])
        lar  = DDP(lar.to(device),  device_ids=[local_rank])

        if flag_single:
            logger.info("DDP Models initialized")

    pac = {"total_iter": args.total_iter, "device": device, "logger": logger, "writer": writer}
    dc_trainer = DiscriminatorCausalTrainer(dc, lr = args.lr, **pac)
    ar_trainer = ARTrainer(cg_D, lar, dc_trainer, lr = args.lr,  **pac)
    Dtrainer   = ImageDiscriminatorTrainer(netD, lr = 3*args.lr, **pac)
    Gtrainer   = ImageGeneratorTrainer(netG, cg_G, netD, cg_G, lr = args.lr, **pac)
    
    if flag_single:
        logger.info("Trainers initialized")

    # * main
    r = range(args.load_begin, args.total_iter)
    if flag_single:
        r = tqdm(r, total = args.total_iter - args.load_begin, dynamic_ncols=True, desc="main")

    try:
        for ii in r: # idx
            this_batch = next(train_dataloader)
            real_samples, labels, bbox = this_batch

            real_samples = real_samples.to(device)
            labels = labels.long().to(device)
            bbox = bbox.float().to(device)

            W_selected_D = cg_D(bbox, labels)
            desc = f'iter_{ii}'
            #* adver train
            fake_samples, mask, raw_mask, adjust, W_selected_G = Gtrainer.step_train(real_samples, bbox, labels, W_selected_D, train=(ii % 1000 > [8,100][ii<20000] and ii > 203) or ii < 3 )
            real_rearranged, fake_rearranged, desc_D = Dtrainer.step_train(real_samples, fake_samples, bbox, labels, flag_R1=args.R1)
            desc += " / " + desc_D

            #* causal train
            obj_feat_rearrage_real, bbox_rearrage_real, labels_rearrage_real = real_rearranged
            result = []
            # construct features for causal learning of size b o d 
            for iii in range(bbox.size(0)):
                idx_of_sample = bbox_rearrage_real[:, 0] == iii
                feature_of_sample = obj_feat_rearrage_real[ idx_of_sample , : ]
                padding = torch.zeros( [ bbox.size(1) - len(idx_of_sample.nonzero()),  feature_of_sample.size(1)], device=feature_of_sample.device ) 
                result.append( torch.cat([ feature_of_sample, padding ], dim=0) ) 
            obj_feat_real_for_causal = torch.stack( result, dim = 0 )
            
            #* discriminator causal train
            W_selected_D, residual = ar_trainer.step_train(obj_feat_real_for_causal.detach(), bbox, labels, W_selected_G.detach())
            for _ in range([1, 10][ii<200]):
                desc_DC = dc_trainer.step_train(residual)
            desc += " / " + desc_DC

            if dist.is_initialized() and ii % 100 == 0:
                train_sampler.set_epoch(ii)
                
            if flag_single:
                writer.step(ii)
                r.set_description_str(desc)
                
                netG_ema.update_parameters(netG)
                
                if writer.interval(10000) or writer.comes_to(11):
                    dc_trainer.save('discriminator_causal', model_path, ii)
                    ar_trainer.save('cg',            model_path, ii)
                    ar_trainer.save('ar',            model_path, ii)
                    Dtrainer.save(  'discriminator', model_path, ii)
                    Gtrainer.save(  'generator',     model_path, ii) 
                    Gtrainer.save(  'W',             model_path, ii) 
                    torch.save(netG_ema.state_dict(), os.path.join(model_path, str(ii), f"EMA_G.pth"))
                    logger.info("model saved!")

    except KeyboardInterrupt:
        if dist.is_initialized():
            print(f"{dist.get_rank()} accepts KeyboardInterrupt")
        #* save model in emergence
    except Exception as e:
        if dist.is_initialized():
            print(f"Exception in rank {dist.get_rank()}")
        print(str(e))
        print("--------------")
        traceback.print_exc()
        print("--------------")
        traceback.print_exception(*sys.exc_info())
        print("------------------------------------")


    if flag_single:
        logger.info('I come out of training loops')
        dc_trainer.save('discriminator_causal', model_path, 'outside')
        ar_trainer.save('cg',                   model_path, 'outside')
        ar_trainer.save('ar',                   model_path, 'outside')
        Dtrainer.save(  'discriminator',        model_path, 'outside')
        Gtrainer.save(  'generator',            model_path, 'outside')
        Gtrainer.save(  'W',                    model_path, 'outside')
        torch.save(netG_ema.state_dict(), os.path.join(model_path, str('outside'), f"EMA_G.pth"))
        logger.info(f"Models saved in outside! In {out_path}")

    torch.cuda.empty_cache()
    
    if flag_single:
        writer.flush()
        writer.close()
    
    print("end main()")


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--seed', type=int, default=1, help='random seed')
    parser.add_argument('--dataset', type=str, default='coco',
                        help='training dataset')
    parser.add_argument('--dir_dataset', type=str, default='./datasets',
                        help='training dataset')
    parser.add_argument('--batch_size', type=int, default=24,
                        help='mini-batch size of training data. Default: 128')
    parser.add_argument('--lr', type=float, default=1e-4,
                        help='The basic learning rate of the optimizers. Default: 1e-4.')
    parser.add_argument('--total_iter', type=int, default=400_000,
                        help='number of total iteration')
    parser.add_argument('--out_path', type=str, default='./tmp/',
                        help='path to output files')
    parser.add_argument('--img_size', type=int, default=128, help='image size')
    parser.add_argument('--GTmasks', action='store_true', default=False)
    parser.add_argument('--R1', default=False, action='store_true',
                        help='trained in a single device')
    parser.add_argument('--load_path', type=str, default=None, help='path to continue training')
    parser.add_argument('--load_begin', type=int, default=0, help='the beginning iteration')
    parser.add_argument('--local_rank', default=-1, type=int, 
                        help='''node rank for distributed training. -1 when single cpu test.
                         0 when signle cuda test''')
    parser.add_argument('--single', default=False, action='store_true',
                        help='trained in a single device')
    parser.add_argument('--nlar', default=False, action='store_true',
                        help='use linear auto regressor')
    args = parser.parse_args()

    local_rank = int(os.environ['LOCAL_RANK'])
    if not args.single and local_rank > -1:
        dist.init_process_group(backend='nccl')
        torch.cuda.set_device(local_rank)
        os.environ['OMP_NUM_THREADS'] = "4"
    
    # torch.multiprocessing.set_sharing_strategy('file_system')

    main(args, local_rank)

# export CUDA_VISIBLE_DEVICES=0,1,2,3; python -m torch.distributed.run --nproc_per_node=4 train.py --dataset coco --batch_size 16 --img_size 128 --out_path 40w_double_cg_DC_LECAM --total_iter 400000 --lr 1e-4
# export CUDA_VISIBLE_DEVICES=0,1,2,3; python -m torch.distributed.run --nproc_per_node=4 train.py --dataset coco --batch_size 16 --img_size 128 --out_path 40w_double_cg_DC_R1_continue_small_lr --total_iter 200000 --R1 --load_path ./experiments/coco_128_40w_double_cg_DC_R1/2021-12-12_23\:23\:02/model/240000/ --lr 1e-5
# export CUDA_VISIBLE_DEVICES=0,1,2,3; python -m torch.distributed.run --nproc_per_node=4 train.py --dataset coco --batch_size 16 --img_size 128 --out_path 40w_double_cg_DC_R1 --total_iter 400000 --R1
# export CUDA_VISIBLE_DEVICES=0,1,2,3; python -m torch.distributed.run --nproc_per_node=4 train.py --dataset coco --batch_size 16 --img_size 128 --out_path 20w_double_cg_DC --total_iter 200000
# export CUDA_VISIBLE_DEVICES=0,1,2,3; python -m torch.distributed.run --nproc_per_node=4 train.py --dataset coco --batch_size 16 --img_size 128 --out_path 20w_double_cg --total_iter 200000
# export CUDA_VISIBLE_DEVICES=3; python -m torch.distributed.run --nproc_per_node=1 train.py --dataset coco --batch_size 16 --img_size 128 --out_path tmp --total_iter 200000
# export CUDA_VISIBLE_DEVICES=0,1,2,3; python -m torch.distributed.run --nproc_per_node=4 train.py --dataset coco --batch_size 32 --img_size 128 --out_path 20w_check --total_iter 200000
# export CUDA_VISIBLE_DEVICES=0,1,2,3; python -m torch.distributed.run --nproc_per_node=4 train.py --dataset coco --batch_size 32 --img_size 128 --out_path 20w_no_SE --total_iter 200000
# export CUDA_VISIBLE_DEVICES=0,2,3,4; python -m torch.distributed.run --nproc_per_node=4 train.py --dataset coco --batch_size 32 --img_size 128 --out_path 20w_SE_continue --load_path experiments0/coco_128_20w_SE_gain_in_G/2021-11-21_22\:04\:18/model/50000/ --total_iter 150000
# export CUDA_VISIBLE_DEVICES=1,2,3,4; python -m torch.distributed.run --nproc_per_node=4 train.py --dataset coco --batch_size 32 --img_size 128 --out_path 20w_attn --total_iter 200000
# export CUDA_VISIBLE_DEVICES=1,2,3,4; python -m torch.distributed.run --nproc_per_node=4 train.py --dataset coco --batch_size 32 --img_size 128 --out_path 20w_04diffaug_no_scheduler_R1 --total_iter 200000
# export CUDA_VISIBLE_DEVICES=1,2,3,4; python -m torch.distributed.run --nproc_per_node=4 train.py --dataset coco --batch_size 32 --img_size 128 --out_path 20w_04diffaug_no_scheduler --total_iter 200000
# export CUDA_VISIBLE_DEVICES=0,1,2,3; python -m torch.distributed.run --nproc_per_node=4 train.py --dataset coco --batch_size 32 --img_size 128 --out_path reproduce_lama_20k_coloraug --total_iter 200000
# export CUDA_VISIBLE_DEVICES=0,1,2,3; python -m torch.distributed.launch --nproc_per_node=4 train.py --dataset coco --batch_size 64 --img_size 64 --out_path reproduce_lama_20k_coloraug --total_iter 200000
# export CUDA_VISIBLE_DEVICES=0,1,2,3; python -m torch.distributed.launch --nproc_per_node=4 train.py --dataset coco --batch_size 64 --img_size 64 --out_path reproduce_lama_20k --total_iter 200000
# export CUDA_VISIBLE_DEVICES=0,1,2,3; python -m torch.distributed.launch --nproc_per_node=4 train.py --dataset coco --batch_size 64 --img_size 64 --out_path reproduce_lama
# export CUDA_VISIBLE_DEVICES=0,1,2,3,4; python -m torch.distributed.launch --nproc_per_node=5 train.py --dataset coco --dir_dataset ./datasets --batch_size 20 --img_size 128 --out_path experiments/coco_128_1013/
# export CUDA_VISIBLE_DEVICES=0,1,2,3,4; python -m torch.distributed.launch --nproc_per_node=5 train.py --dataset coco --dir_dataset ./datasets --batch_size 20 --img_size 128 --out_path experiments/coco_128_1005/ --load_path experiments/coco_128_1005/2021-10-05_22:56:02/model/39992/
# export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7; python -m torch.distributed.launch --nproc_per_node=8 train.py --dataset coco --dir_dataset ./datasets --batch_size 16 --img_size 64 --out_path experiments/coco_64_1004_continue/ --load_path experiments/coco_64_1001_continue/2021-10-01_21\:26\:01/model/outside/ --total_iter 10000
# export CUDA_VISIBLE_DEVICES=0,1,2,3,4; python -m torch.distributed.launch --nproc_per_node=5 train.py --dataset vg --dir_dataset ./datasets --batch_size 40 --img_size 64 --out_path experiments/vg_64/
# export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7; python -m torch.distributed.launch --nproc_per_node=8 train.py --dataset coco --dir_dataset ./datasets --batch_size 12 --img_size 64 --out_path experiments/coco_64_more_parameter --load_path experiments/coco_64_more_parameter/2021-09-26/model/restart/
# export CUDA_VISIBLE_DEVICES=7; python train.py --dataset coco --dir_dataset ./datasets --batch_size 20 --img_size 64 --out_path experiments/coco_64_more_parameter/ --load_path experiments/coco_64_more_parameter/2021-09-26/model/restart/ --local_rank 0 --single
# export CUDA_VISIBLE_DEVICES=7; python -m torch.distributed.launch --nproc_per_node=1 train.py --dataset coco --dir_dataset ./datasets --batch_size 20 --img_size 64 --out_path experiments/coco_64_more_parameter/ --load_path experiments/coco_64_more_parameter/2021-09-26/model/restart/
# export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7; python -m torch.distributed.launch --nproc_per_node=8 train.py --dataset coco --dir_dataset ./datasets --batch_size 16 --img_size 64 --out_path experiments/coco_64_more_parameter 
# export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5; python -m torch.distributed.launch --nproc_per_node=6 train.py --dataset coco --dir_dataset ./datasets --batch_size 20 --img_size 64 --out_path experiments/coco_64_more_parameter 
# export CUDA_VISIBLE_DEVICES=0,1,2,3; python -m torch.distributed.launch --nproc_per_node=4 train.py --dataset coco --dir_dataset ./datasets --batch_size 20 --img_size 64 
# export CUDA_VISIBLE_DEVICES=0,1; python -m torch.distributed.launch --nproc_per_node=2 train.py --dataset coco --dir_dataset ./datasets --batch_size 20 --img_size 64 --out_path experiments/coco_64_more_parameter 
# export CUDA_VISIBLE_DEVICES=0; python -m torch.distributed.launch --nproc_per_node=1 train.py --dataset coco --dir_dataset ./datasets --batch_size 20 --img_size 64 --out_path experiments/coco_64_more_parameter 

