from train import *

def main(args):
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    # parameters
    img_size = args.img_size
    assert img_size in [64, 128, 256, 512]
    num_classes = 184 if args.dataset == 'coco' else 179
    # cmap = colormap(num_classes, as_array=True)

    flag_single = not dist.is_initialized() or (dist.is_initialized() and dist.get_rank() == 0 )
    device = torch.device("cuda") if args.local_rank > -1 else torch.device("cpu")

    writer, logger, out_path, model_path = initialize_writer_logger(args, flag_single)
    if flag_single:
        logger.info(f"""{args.dataset}_{args.img_size} begin in {os.path.abspath(out_path)} \
        {['linear', 'nonlinear'][args.nlar]} ar, on {device}.""")  

    # data loader
    num_o = 8
    train_dataloader, train_sampler = initialize_train_loader(args, num_o)

    if flag_single:
        logger.info("Train dataloader prepared")
    
    
    netG = legacy.ResnetGenerator_no_causal(img_size, num_classes, num_o = num_o).train()
    netD = legacy.ResnetDiscriminator(img_size, num_classes, 3,).train()

    dim = netD.dim_obj_feat
    cg = CategoryCausalGraph(num_classes ).train()
    dc = DiscriminatorCausal(dim).train()
    lar = NonLinAutoRegressor(dim, num_classes).train() if args.nlar else LinAutoRegressor(dim, num_classes).train()
    if flag_single:
        logger.info("Model before DDP")
    netG_ema = ema_model(netG).to(device) if flag_single else None
    if dist.is_initialized():
        print(f"{dist.get_rank()} finishes model initialization.")

    if args.load_path is not None:
        print(f"Try to load from {args.load_path}")
        dc = load_model(dc,  os.path.join(args.load_path, "discriminator_causal.pth"), device )
        cg = load_model(cg,  os.path.join(args.load_path, "cg.pth"), device )
        lar  = load_model(lar, os.path.join(args.load_path, "ar.pth"), device )
        netD = load_model(netD, os.path.join(args.load_path, "discriminator.pth"), device )
        netG = load_model(netG, os.path.join(args.load_path, "generator.pth"), device )
        if flag_single:
            netG_ema = load_model(netG_ema, os.path.join(args.load_path, f"EMA_G.pth"), device).to(device)
        print(f"Successfully load from {args.load_path}")

    # use DDP
    parallel = dist.is_initialized()
    if parallel:
        process_group = dist.new_group(list(range(dist.get_world_size())))
        if dist.get_world_size() > 1:
            netG = nn.SyncBatchNorm.convert_sync_batchnorm(netG, process_group)
        netG = DDP(netG.to(device), device_ids=[args.local_rank])
        netD = DDP(netD.to(device), device_ids=[args.local_rank])
        cg   = DDP(cg.to(device),   device_ids=[args.local_rank])
        dc   = DDP(dc.to(device),   device_ids=[args.local_rank])
        lar  = DDP(lar.to(device),  device_ids=[args.local_rank])

        if flag_single:
            logger.info("DDP Models initialized")

    pac = {"device": device, "logger": logger, "writer": writer}
    dc_trainer = DiscriminatorCausalTrainer(dc,  **pac)
    ar_trainer = ARTrainer(cg, lar, dc_trainer,  **pac)
    Dtrainer   = ImageDiscriminatorTrainer(netD, **pac)
    Gtrainer   = ImageGeneratorTrainer(netG, cg, netD, ar_trainer, **pac)
    
    if flag_single:
        logger.info("Trainers initialized")

    # * main
    r = range(args.total_iter)
    if flag_single:
        r = tqdm(r, total = args.total_iter, dynamic_ncols=True, desc="main")
    # for ii, data in zip(r, train_dataloader):

    try:
        for ii in r: # idx
            this_batch = next(train_dataloader)
            real_samples, labels, bbox = this_batch

            real_samples = real_samples.to(device)
            labels = labels.long().to(device)
            bbox = bbox.float().to(device)

            #* adver train
            fake_samples, mask, raw_mask, adjust = Gtrainer.step_train(real_samples, bbox, labels, )
            real_rearranged, fake_rearranged = Dtrainer.step_train(real_samples, fake_samples, bbox, labels, )

            #* causal train
            obj_feat_rearrage_real, bbox_rearrage_real, labels_rearrage_real = real_rearranged
            
            #* discriminator causal train
            residual = ar_trainer.step_train(obj_feat_rearrage_real.detach(), 
                        bbox_rearrage_real.detach(), labels_rearrage_real.detach(), )
            if ii % 3 ==0:
                dc_trainer.step_train(residual)

            if dist.is_initialized():
                train_sampler.set_epoch(ii)
                
            if flag_single:
                writer.step(ii)
                
                netG_ema.update_parameters(netG)
                
                if writer.interval(9998):
                    dc_trainer.save('discriminator_causal', model_path, ii)
                    ar_trainer.save('cg', model_path, ii)
                    ar_trainer.save('ar', model_path, ii)
                    Dtrainer.save(  'discriminator', model_path, ii)
                    Gtrainer.save(  'generator', model_path, ii) 
                    torch.save(netG_ema.state_dict(), os.path.join(model_path, str(ii), f"EMA_G.pth"))
                    logger.info("model saved!")

    except KeyboardInterrupt:
        if dist.is_initialized():
            print(f"{dist.get_rank()} accepts KeyboardInterrupt")
        #* save model in emergence
    except Exception as e:
        if dist.is_initialized():
            print(f"Exception in rank {dist.get_rank()}")
        print(str(e))
        print("--------------")
        traceback.print_exc()
        print("--------------")
        traceback.print_exception(*sys.exc_info())
        print("------------------------------------")


    if flag_single:
        logger.info('I come out of training loops')
        dc_trainer.save('discriminator_causal', model_path, 'outside')
        ar_trainer.save('cg',                   model_path, 'outside')
        ar_trainer.save('ar',                   model_path, 'outside')
        Dtrainer.save(  'discriminator',        model_path, 'outside')
        Gtrainer.save(  'generator',            model_path, 'outside') 
        torch.save(netG_ema.state_dict(), os.path.join(model_path, str('outside'), f"EMA_G.pth"))
        logger.info("Models saved in outside!")

    torch.cuda.empty_cache()
    
    if flag_single:
        writer.flush()
        writer.close()
    
    print("end main()")


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--seed', type=int, default=0, help='random seed')
    parser.add_argument('--dataset', type=str, default='coco',
                        help='training dataset')
    parser.add_argument('--dir_dataset', type=str, default='./datasets',
                        help='training dataset')
    parser.add_argument('--batch_size', type=int, default=24,
                        help='mini-batch size of training data. Default: 128')
    parser.add_argument('--total_iter', type=int, default=400_000,
                        help='number of total iteration')
    parser.add_argument('--out_path', type=str, default='./tmp/',
                        help='path to output files')
    parser.add_argument('--img_size', type=int, default=128, help='image size')
    parser.add_argument('--GTmasks', action='store_true', default=False)
    parser.add_argument('--load_path', type=str, default=None, help='path to continue training')
    parser.add_argument('--local_rank', default=-1, type=int, 
                        help='''node rank for distributed training. -1 when single cpu test.
                         0 when single cuda test''')
    parser.add_argument('--single', default=False, action='store_true',
                        help='trained in a single device')
    parser.add_argument('--nlar', default=False, action='store_true',
                        help='use linear auto regressor')
    args = parser.parse_args()

    if not args.single and args.local_rank > -1:
        dist.init_process_group(backend='nccl')
        torch.cuda.set_device(args.local_rank)
        os.environ['OMP_NUM_THREADS'] = "4"

    main(args)

# 