import argparse
import time
import datetime
from pathlib import Path
import json

import numpy as np
import torch
from torch.utils.data import DataLoader
import datasets.samplers as sampler
# from torchcontrib.optim import SWA

from datasets import build_dataset, get_coco_api_from_dataset
import datasets

from datasets import build_dataset
from engine import train_one_epoch, evaluate_hoi
from models import build_model
import os
import pdb
import sys
#from thop import profile
from torch.utils.tensorboard import SummaryWriter

import torch.multiprocessing as mp
import torch.distributed as dist
from util.generate import generate
import pickle
from add_on.vsrl_eval import VCOCOeval
import util.misc as utils
from util.EMA import ModelEMA
# from models.postprocess.newmy_postprocesshoi import PostProcessHOI
from models.postprocess.postprocesshoi import PostProcessHOI

valid_obj_ids = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13,
                14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
                24, 25, 27, 28, 31, 32, 33, 34, 35, 36,
                37, 38, 39, 40, 41, 42, 43, 44, 46, 47,
                48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
                58, 59, 60, 61, 62, 63, 64, 65, 67, 70,
                72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
                82, 84, 85, 86, 87, 88, 89, 90)

verb_classes = ['hold_obj', 'stand', 'sit_instr', 'ride_instr', 'walk', 'look_obj', 'hit_instr', 'hit_obj',
            'eat_obj', 'eat_instr', 'jump_instr', 'lay_instr', 'talk_on_phone_instr', 'carry_obj',
            'throw_obj', 'catch_obj', 'cut_instr', 'cut_obj', 'run', 'work_on_computer_instr',
            'ski_instr', 'surf_instr', 'skateboard_instr', 'smile', 'drink_instr', 'kick_obj',
            'point_instr', 'read_obj', 'snowboard_instr']

def get_args_parser():
    parser = argparse.ArgumentParser('Set transformer detector', add_help=False)
    parser.add_argument('--exp_id', type=str, default='default')
    parser.add_argument('--lr', default=1e-4, type=float)
    
    #parser.add_argument('--lr_backbone', default=1e-5, type=float)
    parser.add_argument('--lr_backbone', default=0, type=float)
    #   deformableDETR
    parser.add_argument('--lr_backbone_names', default=["backbone.0"], type=str, nargs='+')
    parser.add_argument('--lr_linear_proj_names', default=['reference_points', 'sampling_offsets'], type=str, nargs='+')
    parser.add_argument('--lr_linear_proj_mult', default=1.0, type=float)
    
    parser.add_argument('--batch_size', default=4, type=int)    #2
    parser.add_argument('--epochs', default=90, type=int)#50/150
    
    #   pix2seq
    parser.add_argument('--warmup_lr',default=1e-5, type=float)
    parser.add_argument('--warmup', default=0, type=int)#10
    
    parser.add_argument('--lr_decay', action='store_true')
    parser.add_argument('--lr_drop', default=60, type=int)#40/120
    parser.add_argument('--lr_drop_epochs', default=None, type=int, nargs='+')
    parser.add_argument('--clip_max_norm', default=0.1, type=float,
                        help='gradient clipping max norm')

    # Variants of Deformable DETR
    parser.add_argument('--with_box_refine', default=False, action='store_true')
    parser.add_argument('--two_stage', default=False, action='store_true')

    #   anchor DETR
    parser.add_argument('--smooth', default=8, type=float,
                        help='gaussian_smooth')
    parser.add_argument('--dynamic_scale', default="type1", type=str,
                        help="type1 for no scale, type 2 for dynamic scale, type 3 for dyanmic xy scale, type 4 for covariance matrix scale")
    
    # Model parameters
    parser.add_argument('--frozen_weights', type=str, default=None,
                        help="Path to the pretrained model. If set, only the mask head will be trained")
    # Backbone
    parser.add_argument('--backbone', default='resnet50', type=str,
                        help="Name of the convolutional backbone to use")
    parser.add_argument('--dilation', action='store_true',      #   DC5
                        help="If true, we replace stride with dilation in the last convolutional block (DC5)")
    parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),
                        help="Type of positional embedding to use on top of the image features")
    parser.add_argument('--position_embedding_scale', default=2 * np.pi, type=float,
                        help="position / size * scale")
    parser.add_argument('--use_checkpoint', action='store_true')
    #   deformableDETR
    parser.add_argument('--num_feature_levels', default=4, type=int, help='number of feature levels')
    parser.add_argument('--fpn', action='store_true')

    # * Transformer
    parser.add_argument('--enc_layers', default=6, type=int,
                        help="Number of encoding layers in the transformer")
    parser.add_argument('--dec_layers', default=6, type=int,
                        help="Number of hopd decoding layers in the transformer")
    parser.add_argument('--dim_feedforward', default=2048, type=int,
                        help="Intermediate size of the feedforward layers in the transformer blocks")
    parser.add_argument('--hidden_dim', default=256, type=int,
                        help="Size of the embeddings (dimension of the transformer)")
    parser.add_argument('--dropout', default=0.1, type=float,
                        help="Dropout applied in the transformer")
    parser.add_argument('--nheads', default=8, type=int,
                        help="Number of attention heads inside the transformer's attentions")
    parser.add_argument('--num_queries', default=100, type=int,
                        help="Number of query slots")
    parser.add_argument('--pre_norm', action='store_true')
    #   OCN-HOI
    parser.add_argument('--stochastic_context_transformer', action = 'store_true',
                        help='Enable the stochastic context transformer')
    parser.add_argument('--semantic_hidden_dim', default=256, type=int,
                        help="Size of the embeddings for semantic reasoning")
    parser.add_argument('--gru_hidden_dim', default=256, type=int,
                        help="Size of the embeddings GRU")
    
    #   CDN
    parser.add_argument('--dec_layers_hopd', default=3, type=int,
                        help="Number of hopd decoding layers in the transformer")
    parser.add_argument('--dec_layers_interaction', default=3, type=int,
                        help="Number of interaction decoding layers in the transformer")
    #   deformableDETR
    # parser.add_argument('--dim_feedforward', default=1024, type=int,
    #                     help="Intermediate size of the feedforward layers in the transformer blocks")
    # parser.add_argument('--num_queries', default=300, type=int,
    #                     help="Number of query slots")
    parser.add_argument('--dec_n_points', default=4, type=int)
    parser.add_argument('--enc_n_points', default=4, type=int)
    #   anchorDETR
    parser.add_argument('--num_query_position', default=300, type=int,#300
                        help="Number of query positions")
    parser.add_argument('--num_query_pattern', default=3, type=int,
                        help="Number of query patterns")
    parser.add_argument('--spatial_prior', default='learned', choices=['learned', 'grid'],
                        type=str,help="Number of query patterns")
    parser.add_argument('--attention_type',
                        default="RCDA",
                        choices=['RCDA', 'nn.MultiheadAttention'],
                        type=str,help="Type of attention module")
    
    # * Modified architecture(sparse detr)
    parser.add_argument('--backbone_from_scratch', default=False, action='store_true')
    parser.add_argument('--finetune_early_layers', default=False, action='store_true')
    parser.add_argument('--scrl_pretrained_path', default='', type=str)

    # * Segmentation
    parser.add_argument('--masks', action='store_true',
                        help="Train segmentation head if the flag is provided")

    # HOI
    parser.add_argument('--num_obj_classes', type=int, default=80,
                        help="Number of object classes")
    parser.add_argument('--num_verb_classes', type=int, default=117,
                        help="Number of verb classes")
    parser.add_argument('--pretrained', type=str, default='',
                        help='Pretrained model path')
    parser.add_argument('--subject_category_id', default=0, type=int)
    parser.add_argument('--missing_category_id', default=80, type=int)
    parser.add_argument('--verb_loss_type', type=str, default='focal',
                        help='Loss type for the verb classification')

    #   Loss
    parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',
                        help="Disables auxiliary decoding losses (loss at each layer)")
    #   OCN
    parser.add_argument('--entropy_bound', action = 'store_true',
                        help='Enable the loss to bound the entropy for the gaussian distribution')
    parser.add_argument('--kl_divergence', action = 'store_true',
                        help='Enable the loss to bound the shape for the gaussian distribution')
    parser.add_argument('--verb_gt_recon', action = 'store_true',
                        help='Enable the loss for recondtructing the gt labels.')
    parser.add_argument('--ranking_verb', action = 'store_true',
                        help='Enable the loss for ranking verbs.')
    parser.add_argument('--no_verb_bce_focal', action = 'store_true',
                        help='Disable the loss for loss_verb_labels.')
    parser.add_argument('--verb_hm', action = 'store_true',
                        help='Enable the heatmap loss DETRHOIhm.')
    parser.add_argument('--semantic_similar', action = 'store_true',
                        help='Enable the loss for semantic similarity.')
    parser.add_argument('--verb_threshold', action = 'store_true',
                        help='Enable the loss for verb similarity.')
    
    #   deformableDETR
    parser.add_argument('--sgd', action='store_true')
    parser.add_argument('--momentum', default=0.9, type=float)
    parser.add_argument('--weight_decay', default=1e-4, type=float)

 
    parser.add_argument('--use_matching', action='store_true',
                        help="Use obj/sub matching 2class loss in first decoder, default not use")
    parser.add_argument('--hoi_thres', default=0.2, type=float)
    
    #  Matcher
    parser.add_argument('--set_cost_class', default=1, type=float,
                        help="Class coefficient in the matching cost")
    parser.add_argument('--set_cost_bbox', default=2.5, type=float,#5
                        help="L1 box coefficient in the matching cost")
    parser.add_argument('--set_cost_giou', default=1, type=float,#2
                        help="giou box coefficient in the matching cost")
    parser.add_argument('--set_cost_obj_class', default=1, type=float,
                        help="Object class coefficient in the matching cost")
    parser.add_argument('--set_cost_verb_class', default=1, type=float,#1
                        help="Verb class coefficient in the matching cost")
    #   CDN
    parser.add_argument('--set_cost_matching', default=1, type=float,
                        help="Sub and obj box matching coefficient in the matching cost")

    #   OCN
    parser.add_argument('--HOICVAE', action = 'store_true',
                        help='Enable the CVAE model for DETRHOI')
    parser.add_argument('--SemanticDETRHOI', action = 'store_true',
                        help='Enable the Semantic model for DETRHOI')
    parser.add_argument('--IterativeDETRHOI', action = 'store_true',
                        help='Enable the Iterative Refining model for DETRHOI')
    parser.add_argument('--DETRHOIhm', action = 'store_true',
                        help='Enable the verb heatmap query prediction for DETRHOI')
    parser.add_argument('--OCN', action = 'store_true',
                        help='Augment DETRHOI with Cross-Modal Calibrated Semantics.')
    parser.add_argument('--save_ckp', action = 'store_true', help='Save model for the last 5 epoches')

    # * Loss coefficients
    parser.add_argument('--mask_loss_coef', default=1, type=float)
    parser.add_argument('--dice_loss_coef', default=1, type=float)
    parser.add_argument('--bbox_loss_coef', default=2.5, type=float)#5
    parser.add_argument('--giou_loss_coef', default=1, type=float)#2
    parser.add_argument('--obj_loss_coef', default=1, type=float)
    parser.add_argument('--verb_loss_coef', default=2, type=float)#1
    parser.add_argument('--mask_prediction_coef', default=1, type=float)
    parser.add_argument('--matching_loss_coef', default=1, type=float)
    parser.add_argument('--eos_coef', default=0.1, type=float,
                        help="Relative classification weight of the no-object class")
    parser.add_argument('--verb_topK_coef', default=1, type=float)#1

    parser.add_argument('--focal_alpha', default=0.5, type=float, help='focal loss alpha')    
    parser.add_argument('--alpha', default=0.5, type=float, help='focal loss alpha')
    parser.add_argument('--gamma', default=2, type=float)

    #   OCN
    parser.add_argument('--entropy_bound_coef', default=0.01, type=float)
    parser.add_argument('--kl_divergence_coef', default=0.01, type=float)
    parser.add_argument('--verb_gt_recon_coef', default=1, type=float)
    parser.add_argument('--ranking_verb_coef', default=1, type=float)
    parser.add_argument('--verb_hm_coef', default=1, type=float)
    parser.add_argument('--exponential_hyper', default=0.8, type=float)
    parser.add_argument('--exponential_loss', action = 'store_true',
                        help='Enable the exponentially increasing loss coef.')
    parser.add_argument('--semantic_similar_coef', default=1, type=float)
    parser.add_argument('--verb_threshold_coef', default=1, type=float)
    
    # dataset parameters
    
    #   pix2seq
    parser.add_argument('--large_scale_jitter', action='store_true')
    parser.add_argument('--dataset_file', default='vcoco')
    #   detr
    parser.add_argument('--coco_path', default='./data/coco', type=str)
    parser.add_argument('--coco_panoptic_path', type=str)
    parser.add_argument('--remove_difficult', action='store_true')
    parser.add_argument('--hoi_path', type=str)

        
    parser.add_argument('--output_dir', default='',
                        help='path where to save, empty for no saving')
    parser.add_argument('--gpus', default='0,1,2,3', help='-1 for CPU, use comma for multiple gpus')
    parser.add_argument('--device', default='cuda',
                        help='device to use for training / testing')
    parser.add_argument('--seed', default=42, type=int)
    parser.add_argument('--resume', default='', help='resume from checkpoint')
    parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
                        help='start epoch')
    parser.add_argument('--eval', action='store_true')
    parser.add_argument('--eval_extra', action='store_true')
    parser.add_argument('--num_workers', default=4, type=int)
    parser.add_argument('--cache_mode', default=False, action='store_true', help='whether to cache images on memory')

    # distributed training parameters
    parser.add_argument('--world_size', default=1, type=int,
                        help='number of distributed processes')
    parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
    parser.add_argument('--master_addr', default="127.0.0.1", type=str, help='number of distributed processes')
    parser.add_argument('--master_port', default="1333", type=str)
    parser.add_argument('--no_distributed', dest='distributed', action='store_false')

    # decoupling training parameters
    parser.add_argument('--freeze_mode', default=0, type=int)
    parser.add_argument('--obj_reweight', action='store_true')      #Dynamic re-weighting.
    parser.add_argument('--verb_reweight', action='store_true')     #Dynamic re-weighting.
    parser.add_argument('--use_static_weights', action='store_true', 
                        help='use static weights or dynamic weights, default use dynamic')
    #   pix2seq
    parser.add_argument('--queue_size', default=4704*1.0, type=float,
                        help='Maxsize of queue for obj and verb reweighting, default 1 epoch')
    parser.add_argument('--p_obj', default=0.7, type=float,
                        help='Reweighting parameter for obj')
    parser.add_argument('--p_verb', default=0.7, type=float,
                        help='Reweighting parameter for verb')
    
    # * Efficient DETR
    parser.add_argument('--eff_query_init', default=False, action='store_true')
    parser.add_argument('--eff_specific_head', default=False, action='store_true')

    # * Sparse DETR
    parser.add_argument('--use_enc_aux_loss', default=False, action='store_true')
    parser.add_argument('--rho', default=0., type=float)

    # * benchmark
    parser.add_argument('--approx_benchmark_only', default=False, action='store_true')
    parser.add_argument('--benchmark_only', default=False, action='store_true')
    parser.add_argument('--no_benchmark', dest='benchmark', action='store_false')

    # hoi eval parameters
    parser.add_argument('--use_nms_filter', action='store_true', help='Use pair nms filter, default not use')#PNMS
    parser.add_argument('--HO_scale', type=float, default=0.)
    parser.add_argument('--query_thres', type=float, default=0.)
    parser.add_argument('--max_hois', type=int, default=100)
    parser.add_argument('--nms_alpha', default=1.0, type=float)
    parser.add_argument('--nms_beta', default=0.5, type=float)
    parser.add_argument('--nms_thresh', default=0.5, type=float)
    parser.add_argument('--json_file', default='results.json', type=str)
    parser.add_argument('--save_path', type=str, default='./')
    #   amp
    parser.add_argument('--amp', action='store_true')
    parser.add_argument('--ema', action='store_true') 
    parser.add_argument('--ema_delay', default=0.999, type=float)   
    parser.add_argument('--swa', action='store_true') 
    parser.add_argument('--swa_freq', default=5, type=int)  

    
    # clip
    parser.add_argument('--ft_clip_with_small_lr', action='store_true',
                        help='Use smaller learning rate to finetune clip weights')
    parser.add_argument('--with_clip_label', action='store_true', help='Use clip to classify HOI')
    parser.add_argument('--early_stop_mimic', action='store_true', help='stop mimic after step')
    parser.add_argument('--with_obj_clip_label', action='store_true', help='Use clip to classify object')
    parser.add_argument('--clip_model', default='ViT-B/32',
                        help='clip pretrained model path')
    parser.add_argument('--fix_clip', action='store_true', help='')
    parser.add_argument('--clip_embed_dim', default=512, type=int)
    # zero shot type
    parser.add_argument('--zero_shot_type', default='default',
                        help='default, rare_first, non_rare_first, unseen_object, unseen_verb')
    parser.add_argument('--del_unseen', action='store_true', help='')
    parser.add_argument('--set_cost_hoi', default=1, type=float,
                        help="Hoi class coefficient")
    parser.add_argument('--hoi_loss_coef', default=2, type=float)
    parser.add_argument('--with_mimic', action='store_true',
                        help="Use clip feature mimic")
    
    parser.add_argument('--k_one2many', default=1, type=int)
    parser.add_argument('--topK', default=-1, type=int)
    parser.add_argument('--painting', action='store_true', help='')
    # parser.add_argument('--no_painting', dest='painting', action='store_false')
    parser.add_argument('--output_img', default="./outputs_img", type=str,
        help="Path to an image file.")        

    #   paint
    parser.add_argument('--rel_threshold', type=float, default='0')
    parser.add_argument('--top_k_start', type=int, default=0)
    parser.add_argument('--top_k', type=int, default=3)
    parser.add_argument('--epoch_limit', type=int, default=60)
    parser.add_argument('--mode', default='scenario_2', type=str)
    parser.add_argument('--ignore_point', action='store_true')
    return parser


def main(rank, args):
    #utils.init_distributed_mode(args)
    dist.init_process_group(
        backend="nccl",
        init_method="env://",
        world_size=args.world_size,
        rank=rank)
    torch.distributed.barrier()
    utils.setup_for_distributed(rank == 0)
    utils.build_seed(args.seed)


    torch.cuda.set_device(rank)

    print("git:\n  {}\n".format(utils.get_sha()))

    if args.frozen_weights is not None:
        assert args.masks, "Frozen training is meant for segmentation only"
    print(args)
    args.device = dist.get_rank() if args.device=='cuda' else 'cpu'
    device = torch.device(args.device)

    dataset_train = build_dataset(image_set='train', args=args)
    dataset_val = build_dataset(image_set='val', args=args)

    if args.device!='cpu':
        if args.cache_mode:
            sampler_train = sampler.NodeDistributedSampler(dataset_train)
            sampler_val   = sampler.NodeDistributedSampler(dataset_val, shuffle=False)
        else:#True
            sampler_train = sampler.DistributedSampler(dataset_train)
            sampler_val   = sampler.DistributedSampler(dataset_val, shuffle=False)
    else:
        sampler_train = torch.utils.data.RandomSampler(dataset_train)
        sampler_val = torch.utils.data.SequentialSampler(dataset_val)

    batch_sampler_train = torch.utils.data.BatchSampler(sampler_train, args.batch_size, drop_last=True)


    data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train, 
                                   collate_fn=utils.collate_fn, num_workers=args.num_workers, pin_memory=True, worker_init_fn=utils.worker_init)
    data_loader_val = DataLoader(dataset_val, args.batch_size, sampler=sampler_val,
                                 drop_last=False, collate_fn=utils.collate_fn, 
                                 num_workers=args.num_workers, pin_memory=True, worker_init_fn=utils.worker_init)

    
    model, criterion, postprocessors = build_model(args)
    if args.dataset_file=='vcoco':
        post_processor = PostProcessHOI(args, mode='test', correct_mat = dataset_val.correct_mat).to(device)
    model.to(device)
    
    model_without_ddp = model
    param_dicts = [
        #   detr
        {"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" not in n and p.requires_grad]},
        {"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" in n and p.requires_grad],
            "lr": args.lr_backbone},
        #   deformableDETR
        # {"params":[p for n, p in model_without_ddp.named_parameters() if not utils.match_name_keywords(n, args.lr_backbone_names) and not utils.match_name_keywords(n, args.lr_linear_proj_names) and p.requires_grad],
        #     "lr": args.lr},
        # {"params": [p for n, p in model_without_ddp.named_parameters() if utils.match_name_keywords(n, args.lr_backbone_names) and p.requires_grad],
        #     "lr": args.lr_backbone},
        # {"params": [p for n, p in model_without_ddp.named_parameters() if utils.match_name_keywords(n, args.lr_linear_proj_names) and p.requires_grad],
        #     "lr": args.lr * args.lr_linear_proj_mult}
    ]

    
    #   deformableDETR
    if args.sgd:
        optimizer = torch.optim.SGD(param_dicts, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
    else:
        optimizer = torch.optim.AdamW(param_dicts, lr=args.lr, weight_decay=args.weight_decay)
    if args.swa:
        pass
        # optimizer = SWA(optimizer, swa_start = args.warmup, swa_freq = args.swa_freq, swa_lr=args.lr)
    #   pix2seq
    learning_rate_schedule = None
    warmup = args.warmup
    if warmup != 0:
        min_lr = args.warmup_lr
        max_lr = args.lr
        full_epochs = args.epochs
        warmup_lr = [min_lr + ((max_lr - min_lr) * i / warmup) for i in range(warmup)]
        decay_lr = [max(i * args.lr / full_epochs, min_lr) for i in range(full_epochs - warmup)]
        decay_lr.reverse()
        learning_rate_schedule = warmup_lr + decay_lr
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)


    ema = None
    if args.ema:
        ema = ModelEMA(model_without_ddp, args.ema_delay)
        ema.register()    

    if args.device!='cpu':
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[device], find_unused_parameters=True)
        # model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[device])
        model_without_ddp = model.module
    n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)

################################# new ###########################################
    #   CDN
    if args.freeze_mode == 1:
        for name, p in model.named_parameters():
            if 'decoder' not in name and \
                'verb_class_embed' not in name and \
                'obj_class_embed' not in name and \
                'sub_bbox_embed' not in name and \
                'obj_bbox_embed' not in name:
                p.requires_grad = False
            if args.use_matching and 'matching_embed' in name:
                p.requires_grad = True
################################# new ###########################################
    #   DETR
    if args.dataset_file == "coco_panoptic":
        # We also evaluate AP during panoptic training, on original coco DS
        coco_val = datasets.coco.build("val", args)
        base_ds = get_coco_api_from_dataset(coco_val)
    elif args.dataset_file == "coco":
        base_ds = get_coco_api_from_dataset(dataset_val)


    if args.frozen_weights is not None:
        checkpoint = torch.load(args.frozen_weights, map_location='cpu')
        model_without_ddp = utils.load_model(model_without_ddp, checkpoint['model'])
    output_dir = Path(args.output_dir)
    try:
        os.makedirs(output_dir, exist_ok = True)
        print("Directory created successfully" )
    except OSError as error:
        print("Directory can not be created")
        
    if args.resume:
        if args.resume.startswith('https'):
            checkpoint = torch.hub.load_state_dict_from_url(args.resume, map_location='cpu', check_hash=True)
        else:
            checkpoint = torch.load(args.resume, map_location='cpu')
        model_without_ddp = utils.load_model(model_without_ddp, checkpoint['model'])
        if not args.eval and 'optimizer' in checkpoint and \
        'lr_scheduler' in checkpoint and \
        'epoch' in checkpoint:
            optimizer.load_state_dict(checkpoint['optimizer'])
            lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
            args.start_epoch = checkpoint['epoch'] + 1
    elif args.pretrained:
        checkpoint = torch.load(args.pretrained, map_location='cpu')
        print("pretrained location: " + args.pretrained)
        model_without_ddp = utils.load_model(model_without_ddp, checkpoint['model'])
    
    #   write args to file
    tb_writer = None
    if args.output_dir and utils.is_main_process():
        arg = dict((name, getattr(args, name)) for name in dir(args) if not name.startswith('_'))
        with (output_dir / "log.txt").open("a") as f:
            f.write('==> torch version: {}\n'.format(torch.__version__))
            f.write('==> cudnn version: {}\n'.format(torch.backends.cudnn.version()))
            f.write('==> Cmd:\n')
            f.write(str(sys.argv))
            f.write('\n==> args:\n')
            for k, v in sorted(arg.items()):
                f.write('  %s: %s\n' % (str(k), str(v)))
        time_str = time.strftime('%Y-%m-%d-%H-%M')
        tb_output_dir = Path(args.output_dir +'/tensorboard/{}'.format(time_str))
        tb_writer = SummaryWriter(tb_output_dir)
    #   cal the flops
    

    epoch = -1
    # dummy_input = torch.randn(1, 3, 800, 1200).to(device)
    # flops, params = profile(model_without_ddp, (dummy_input,), verbose=False)
    # print('flops: %.2f M, params: %.2f M' % (flops / 1000000.0, params / 1000000.0))
    # if utils.is_main_process():
    #     tb_writer.add_scalar(f'flops:', flops / 1000000.0, global_step=epoch, walltime=None)
    #     tb_writer.add_scalar(f'params:', params / 1000000.0, global_step=epoch, walltime=None)

    if args.eval:#  EVAL
        start_time = time.time()
        #   DETR
        if args.dataset_file == "coco":
            # test_stats, coco_evaluator = evaluate(model, criterion, postprocessors,
            #                                     data_loader_val, base_ds, device, args)
            # if args.output_dir:
            #     utils.save_on_master(coco_evaluator.coco_eval["bbox"].eval, output_dir / "eval.pth")
            # print_final_result_on_master(model, dataset_val_org, args, test_stats, start_time)
            return
        #   official vcoco
        elif args.dataset_file=='vcoco':
            with torch.no_grad():
                detections = generate(model, post_processor, data_loader_val, device, verb_classes, args.missing_category_id)
            if utils.is_main_process():
                output_dir = args.output_dir + "cache.pkl"
                with open(output_dir, 'wb') as f:
                    pickle.dump(detections, f, protocol=2)
            torch.distributed.barrier()
            torch.cuda.empty_cache()        
            if utils.is_main_process():
                print('start eval')
                start_time = time.time()
                if not os.path.exists(output_dir):
                    raise NotImplementedError(f"No det file, please check.")
                vcocoeval = VCOCOeval(vsrl_annot_file='/WHZH/dataset/v-coco/data/vcoco/vcoco_test.json', 
                                      coco_annot_file='/WHZH/dataset/v-coco/data/instances_vcoco_all_2014.json', 
                                      split_file='/WHZH/dataset/v-coco/data/splits/vcoco_test.ids')
                total_time = time.time() - start_time
                total_time_str = str(datetime.timedelta(seconds=int(total_time)))
                print('data load time {}'.format(total_time_str))
                vcocoeval._do_eval(output_dir, ovr_thresh=0.5, mode=args.mode,  ignore_point = args.ignore_point)  
            torch.distributed.barrier()  
            return   
        elif args.dataset_file=='hico':
            with torch.no_grad():
                test_stats = evaluate_hoi(args.dataset_file, model, postprocessors, data_loader_val, args.subject_category_id, device, args, epoch, tb_writer, ema=ema)
            if utils.is_main_process():   
                for k, v in test_stats.items():
                    tb_writer.add_scalar(f'test_{k}', v, global_step=epoch, walltime=None)
                tb_writer.close()
            total_time = time.time() - start_time
            total_time_str = str(datetime.timedelta(seconds=int(total_time)))
            print('eval time {}'.format(total_time_str))
            return

                    
    print("Start training")
    start_time = time.time()
    
    best_performance = 0
    for epoch in range(args.start_epoch, args.epochs):
        if args.device!='cpu':
            sampler_train.set_epoch(epoch)
        train_stats = train_one_epoch(model, criterion, data_loader_train, optimizer, device, epoch, args.clip_max_norm, 
                                      args.amp, args.lr_decay, learning_rate_schedule, ema)
        if args.swa:
            optimizer.swap_swa_sgd()
            optimizer.bn_update(data_loader_train, model)
        lr_scheduler.step()
        if utils.is_main_process():   
            for k, v in train_stats.items():
                tb_writer.add_scalar(f'train_{k}', v, global_step=epoch, walltime=None)

        #评估
        if epoch % 1 == 0:
            checkpoint_path = os.path.join(output_dir, 'checkpoint_{}.pth'.format(epoch))
            utils.save_on_master({
                'model': model_without_ddp.state_dict(),
                'optimizer': optimizer.state_dict(),
                'lr_scheduler': lr_scheduler.state_dict(),
                'epoch': epoch,
                'args': args,
            }, checkpoint_path)

######################################################################################################################
        if args.freeze_mode == 0 and epoch < args.lr_drop and epoch % 5 != 0:  ## eval every 5 epoch before lr_drop
            continue
        elif args.freeze_mode == 0 and epoch >= args.lr_drop and epoch % 5 != 0:  ## eval every 2 epoch after lr_drop
            continue   

        with torch.no_grad():
            if epoch==args.epochs-1:
                test_stats = evaluate_hoi(args.dataset_file, model, postprocessors, data_loader_val, args.subject_category_id, device, 
                                        args, epoch, tb_writer=tb_writer, ema=ema)
            else:
                test_stats = evaluate_hoi(args.dataset_file, model, postprocessors, data_loader_val, args.subject_category_id, device, 
                                        args, epoch, ema=ema)
        if utils.is_main_process():  
            for k, v in test_stats.items():
                tb_writer.add_scalar(f'test_{k}', v, global_step=epoch, walltime=None)  
                                
        coco_evaluator = None
        if args.dataset_file == 'hico':
            performance = test_stats['mAP_def']
        elif args.dataset_file == 'vcoco':
            performance = test_stats['mAP_all']

        if performance > best_performance:
            checkpoint_path = os.path.join(output_dir, 'checkpoint_best.pth')
            utils.save_on_master({
                'model': model_without_ddp.state_dict(),
                'optimizer': optimizer.state_dict(),
                'lr_scheduler': lr_scheduler.state_dict(),
                'epoch': epoch,
                'args': args,
            }, checkpoint_path)
        
            best_performance = performance
        

        log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
                    **{f'test_{k}': v for k, v in test_stats.items()},
                    'epoch': epoch,
                    'n_parameters': n_parameters,
                    #'flops':flops / 1000000.0
                    }
        torch.distributed.barrier()
        if args.output_dir and utils.is_main_process():
            with (output_dir / "log.txt").open("a") as f:
                f.write(json.dumps(log_stats) + "\n")

            # for evaluation logs
            if coco_evaluator is not None:
                (output_dir / 'eval').mkdir(exist_ok=True)
                if "bbox" in coco_evaluator.coco_eval:
                    filenames = ['latest.pth']
                    if epoch % 50 == 0:
                        filenames.append(f'{epoch:03}.pth')
                    for name in filenames:
                        torch.save(coco_evaluator.coco_eval["bbox"].eval, output_dir / "eval" / name)
    
    torch.distributed.barrier()
    if utils.is_main_process():
        total_time = time.time() - start_time
        total_time_str = str(datetime.timedelta(seconds=int(total_time)))
        print("best mAP: {:.4f}".format(best_performance))
        print('Training time {}'.format(total_time_str))
        with (output_dir / "log.txt").open("a") as f:
            f.write(json.dumps("best mAP: {:.4f}".format(best_performance)) + "\n")  
            f.write(json.dumps('Training time {}'.format(total_time_str)) + "\n") 
        tb_writer.close()

    if args.dataset_file=='vcoco':
        with torch.no_grad():
            detections = generate(model, post_processor, data_loader_val, device, verb_classes, args.missing_category_id)            
        torch.distributed.barrier()
        if utils.is_main_process():
            output_dir=args.output_dir + "cache.pkl"
            with open(output_dir, 'wb') as f:
                pickle.dump(detections, f, protocol=2)
        torch.distributed.barrier()
        torch.cuda.empty_cache()
        if utils.is_main_process():
            print('start eval')
            start_time = time.time()
            if not os.path.exists(output_dir):
                raise NotImplementedError(f"No det file, please check.")
            vcocoeval = VCOCOeval(vsrl_annot_file='/WHZH/dataset/v-coco/data/vcoco/vcoco_test.json', 
                                coco_annot_file='/WHZH/dataset/v-coco/data/instances_vcoco_all_2014.json', 
                                split_file='/WHZH/dataset/v-coco/data/splits/vcoco_test.ids')
            total_time = time.time() - start_time
            total_time_str = str(datetime.timedelta(seconds=int(total_time)))
            print('data load time {}'.format(total_time_str))
            vcocoeval._do_eval(output_dir, ovr_thresh=0.5, mode=args.mode,  ignore_point = args.ignore_point)  
        torch.distributed.barrier() 
    return

if __name__ == '__main__':
    parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()])
    args = parser.parse_args()
    if args.output_dir:
        Path(args.output_dir).mkdir(parents=True, exist_ok=True)
    os.environ["MASTER_ADDR"] = args.master_addr
    os.environ["MASTER_PORT"] = args.master_port
    os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
    os.environ['PYTHONHASHSEED'] = str(args.seed)
    torch.cuda.manual_seed_all(args.seed) 
    start_time = time.time()
    if args.device=='cuda':
        mp.spawn(main, nprocs=args.world_size, args=(args,))
    elif args.device=='cpu':
        main(0, args)
    
    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=int(total_time)))
    print('running time {}'.format(total_time_str))

