# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
MaskFormer Training Script.

This script is a simplified version of the training script in detectron2/tools.
"""
try:
    # ignore ShapelyDeprecationWarning from fvcore
    from shapely.errors import ShapelyDeprecationWarning
    import warnings
    warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning)
except:
    pass
import sys

sys.path.append("./")
sys.path.append("./detectron2")
sys.path.append("./detectron2/detectron2")

import copy
import itertools
import logging
import os
import time
import datetime
from collections import OrderedDict
from typing import Any, Dict, List, Set

import torch
from torch import nn
from contextlib import ExitStack, contextmanager

import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import (
    DatasetCatalog,MetadataCatalog, get_detection_dataset_dicts,
    build_detection_train_loader,build_detection_test_loader)
from detectron2.engine import (
    DefaultTrainer,
    default_argument_parser,
    default_setup,
    launch,
)
from detectron2.evaluation import (
    CityscapesInstanceEvaluator,
    CityscapesSemSegEvaluator,
    COCOEvaluator,
    COCOPanopticEvaluator,
    DatasetEvaluators,
    LVISEvaluator,
    SemSegEvaluator,
    verify_results,
)
from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler
from detectron2.solver.build import maybe_add_gradient_clipping
from detectron2.utils.logger import setup_logger
from detectron2.utils.logger import log_every_n_seconds

# MaskFormer
from mask2former import (
    COCOInstanceNewBaselineDatasetMapper,
    COCOPanopticNewBaselineDatasetMapper,
    InstanceSegEvaluator,
    GirEvaluator,
    MaskFormerInstanceDatasetMapper,
    MaskFormerPanopticDatasetMapper,
    MaskFormerSemanticDatasetMapper,
    SemanticSegmentorWithTTA,
    add_maskformer2_config,
)

# os.environ["CUDA_VISIBLE_DEVICES"]=""
os.environ['DETECTRON2_DATASETS'] = '/root/ws/data'
# os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
@contextmanager
def inference_context_gir(model):
    """
    A context where the model is temporarily changed to eval mode,
    and restored to previous mode afterwards.

    Args:
        model: a torch Module
    """
    training_mode = model.training
    model.eval()
    yield
    model.train(training_mode)
    
g_all_ids_tensor = None
g_all_feats_tensor = None

g_save_file = ""



def test_evaluate():
    g_all_ids_tensor = torch.randint(0, 10, (300,))
    g_all_feats_tensor = torch.rand((300,4096),dtype=torch.float)
    
    result_mean = {}
    result_std = {}
    ids = torch.unique(g_all_ids_tensor)
    for id in ids:
        id_inds = torch.where(g_all_ids_tensor==id)
        id_fs = g_all_feats_tensor[id_inds]
        mean_fs = torch.mean(id_fs,dim=0)
        
        id_fs_l2 = torch.linalg.norm(id_fs-mean_fs,dim=1)
        std_fs = torch.std(id_fs_l2)
        
        result_mean[id] = mean_fs
        result_std[id] = std_fs
    
    res = {"mean":result_mean,"std":result_std}
    torch.save(res,g_save_file)
    pass

class Trainer(DefaultTrainer):
    """
    Extension of the Trainer class adapted to MaskFormer.
    """


    @classmethod
    def _evaluate(cls):
        if g_all_ids_tensor is None:
            return
        result_mean = {}
        result_std = {}
        ids = torch.unique(g_all_ids_tensor)
        for id in ids:
            id_inds = torch.where(g_all_ids_tensor==id)
            id_fs = g_all_feats_tensor[id_inds]
            mean_fs = torch.mean(id_fs,dim=0)
            
            id_fs_l2 = torch.linalg.norm(id_fs-mean_fs,dim=1)
            std_fs = torch.std(id_fs_l2)
            
            id = id.item()
            result_mean[id] = mean_fs
            result_std[id] = std_fs
        
        res = {
            "mean":result_mean,
            "std":result_std,
            "all_features":g_all_feats_tensor,
            "all_ids":g_all_ids_tensor
            }
        torch.save(res,g_save_file)
        pass

    @classmethod
    def _process_gir(cls,inputs,outputs):
        global g_all_ids_tensor
        global g_all_feats_tensor
        ids_tensor = outputs['instance_tgt_ids']
        feats_tensor = outputs['instance_perd_feats']
        if g_all_ids_tensor is None:
            g_all_ids_tensor = torch.tensor([],dtype=torch.int)
            g_all_feats_tensor = torch.tensor([])
            g_all_feats_tensor = g_all_feats_tensor.to(feats_tensor)
        g_all_ids_tensor = torch.cat((g_all_ids_tensor,ids_tensor),dim = 0)
        g_all_feats_tensor = torch.cat((g_all_feats_tensor,feats_tensor),dim=0)
        pass
    
    
    @classmethod
    def test_before(cls,cfg, model):
        #data_loader = cls.build_train_loader(cfg)
        for idx, dataset_name in enumerate(cfg.DATASETS.TRAIN):
            mapper = MaskFormerInstanceDatasetMapper(cfg, True)
            dataset = get_detection_dataset_dicts(dataset_name)
            data_loader = build_detection_test_loader(dataset,mapper=mapper,batch_size=16)
        # model = create_ddp_model(model, broadcast_buffers=False)
        # total = len(data_loader.dataset)  # inference data loader must have a fixed length
        # for idx, dataset_name in enumerate(cfg.DATASETS.TRAIN):
            # data_loader = cls.build_test_loader(cfg, dataset_name)
        total = len(data_loader)
        # total = 1000
        
        num_warmup = 5
        start_time = time.perf_counter()
        total_data_time = 0
        total_compute_time = 0
        total_eval_time = 0
        with ExitStack() as stack:
            if isinstance(model, nn.Module):
                stack.enter_context(inference_context_gir(model))
            stack.enter_context(torch.no_grad())

            start_data_time = time.perf_counter()
            for idx, inputs in enumerate(data_loader):
                total_data_time += time.perf_counter() - start_data_time
                if idx == num_warmup:
                    start_time = time.perf_counter()
                    total_data_time = 0
                    total_compute_time = 0
                    total_eval_time = 0
                start_compute_time = time.perf_counter()
                outputs = model.forward_eval_width_tgt(inputs)
                
                if torch.cuda.is_available():
                    torch.cuda.synchronize()
                total_compute_time += time.perf_counter() - start_compute_time

                start_eval_time = time.perf_counter()
                cls._process_gir(inputs,outputs)
                total_eval_time += time.perf_counter() - start_eval_time

                iters_after_start = idx + 1 - num_warmup * int(idx >= num_warmup)
                data_seconds_per_iter = total_data_time / iters_after_start
                compute_seconds_per_iter = total_compute_time / iters_after_start
                eval_seconds_per_iter = total_eval_time / iters_after_start
                total_seconds_per_iter = (time.perf_counter() - start_time) / iters_after_start
                if idx >= num_warmup * 2 or compute_seconds_per_iter > 5:
                    log_every_n_seconds(
                        logging.INFO,
                        (
                            f"Inference done {idx + 1}/{total}. "
                            f"Dataloading: {data_seconds_per_iter:.4f} s/iter. "
                            f"Inference: {compute_seconds_per_iter:.4f} s/iter. "
                            f"Eval: {eval_seconds_per_iter:.4f} s/iter. "
                            f"Total: {total_seconds_per_iter:.4f} s/iter. "
                        ),
                        n=5,
                    )
                start_data_time = time.perf_counter()
            
            cls._evaluate()
            
        pass

def setup(args):
    """
    Create configs and perform basic setups.
    """
    cfg = get_cfg()
    # for poly lr schedule
    add_deeplab_config(cfg)
    add_maskformer2_config(cfg)
    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)   
    cfg.freeze()
    default_setup(cfg, args)
    # Setup logger for "mask_former" module
    
    setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name="mask2former")
    return cfg

def main(args):
    cfg = setup(args)
    model = Trainer.build_model(cfg)
    DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
        cfg.MODEL.WEIGHTS, resume=args.resume
    )
    
    Trainer.test_before(cfg,model)
    
    # res = Trainer.test(cfg, model)
    # if cfg.TEST.AUG.ENABLED:
    #     res.update(Trainer.test_with_TTA(cfg, model))
    # if comm.is_main_process():
    #     verify_results(cfg, res)
    # return res

import warnings
from gen_instance_config import GenConfig

# g_save_file = "/root/autodl-tmp/models/girformer_gir_meshb_r50_160k_noclass/instance_features_final.pth"
g_save_file = GenConfig["g_save_file"]
os.environ["DETECTRON2_DATASETS"] = "/root/ws/data/"
if __name__ == "__main__":

    sys.argv.append("--config-file")
    sys.argv.append(GenConfig["config_file"])
    sys.argv.append("--num-gpus")
    sys.argv.append(GenConfig["num_gpus"])
    sys.argv.append("MODEL.WEIGHTS")
    sys.argv.append(GenConfig["MODEL_WEIGHTS"])
    
    
    # sys.argv.append("./configs/mesh_buildings/gir/girformer_R50_bs16_160k.yaml")
    # sys.argv.append("/root/autodl-tmp/models/girformer_gir_meshb_r50_160k_triloss/model_0004999.pth")
    # sys.argv.append("/root/ws/logs/models/girformer_gir_meshb_r50_160k_triloss/model_final.pth")
    
    
    args = default_argument_parser().parse_args()
    print("Command Line Args:", args)
    warnings.filterwarnings("ignore")
    launch(
        main,
        args.num_gpus,
        num_machines=args.num_machines,
        machine_rank=args.machine_rank,
        dist_url=args.dist_url,
        args=(args,),
    )
