# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from: https://github.com/facebookresearch/detectron2/blob/master/demo/demo.py
import argparse
import glob
import multiprocessing as mp
import os

# fmt: off
import sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
# fmt: on

import sys
sys.path.append("./")
sys.path.append("./detectron2")
sys.path.append("./detectron2/detectron2")

import tempfile
import time
import warnings
import io
import cv2
import numpy as np
import tqdm
import contextlib

import sys
sys.path.append("../")
sys.path.append("../detectron2")
sys.path.append("../detectron2/detectron2")

from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.projects.deeplab import add_deeplab_config
from detectron2.utils.logger import setup_logger
from detectron2.utils.file_io import PathManager

from mask2former import add_maskformer2_config
from predictor import VisualizationDemo
from gir_retrieval_evaluation import GirRetrievalEvaluator
from visualizer import Visualizer

from pycocotools.coco import COCO
import copy
import logging

import numpy as np
import pycocotools.mask as mask_util
import torch
from torch.nn import functional as F

from detectron2.config import configurable
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.projects.point_rend import ColorAugSSDTransform
from detectron2.structures import BitMasks, Instances, polygons_to_bitmask
from detectron2.data import MetadataCatalog, build_detection_train_loader,get_detection_dataset_dicts,build_detection_test_loader

# constants
WINDOW_NAME = "mask2former demo"


def setup_cfg(args):
    # load config from file and command-line arguments
    cfg = get_cfg()
    add_deeplab_config(cfg)
    add_maskformer2_config(cfg)
    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()
    return cfg


def get_parser():
    parser = argparse.ArgumentParser(description="maskformer2 demo for builtin configs")
    parser.add_argument(
        "--config-file",
        default="configs/coco/panoptic-segmentation/maskformer2_R50_bs16_50ep.yaml",
        metavar="FILE",
        help="path to config file",
    )
    parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.")
    parser.add_argument("--video-input", help="Path to video file.")
    parser.add_argument(
        "--input",
        nargs="+",
        help="A list of space separated input images; "
        "or a single glob pattern such as 'directory/*.jpg'",
    )
    parser.add_argument(
        "--output",
        help="A file or directory to save output visualizations. "
        "If not given, will show output in an OpenCV window.",
    )

    parser.add_argument(
        "--confidence-threshold",
        type=float,
        default=0.5,
        help="Minimum score for instance predictions to be shown",
    )
    parser.add_argument(
        "--opts",
        help="Modify config options using the command-line 'KEY VALUE' pairs",
        default=[],
        nargs=argparse.REMAINDER,
    )
    return parser

def image2instances(dataset_dict):

    dataset_dict = copy.deepcopy(dataset_dict)  # it will be modified by code below
    image = utils.read_image(dataset_dict["file_name"], format="RGB")
    # transform instnace masks
    assert "annotations" in dataset_dict
    for anno in dataset_dict["annotations"]:
        anno.pop("keypoints", None)

    annos = [
        obj for obj in dataset_dict.pop("annotations")
        if obj.get("iscrowd", 0) == 0
    ]

    if len(annos):
        assert "segmentation" in annos[0]
    segms = [obj["segmentation"] for obj in annos]
    masks = []
    for segm in segms:
        if isinstance(segm, list):
            # polygon
            masks.append(polygons_to_bitmask(segm, *image.shape[:2]))
        elif isinstance(segm, dict):
            # COCO RLE
            masks.append(mask_util.decode(segm))
        elif isinstance(segm, np.ndarray):
            assert segm.ndim == 2, "Expect segmentation of 2 dimensions, got {}.".format(
                segm.ndim
            )
            # mask array
            masks.append(segm)
        else:
            raise ValueError(
                "Cannot convert segmentation of type '{}' to BitMasks!"
                "Supported types are: polygons as list[list[float] or ndarray],"
                " COCO-style RLE as a dict, or a binary segmentation mask "
                " in a 2D numpy array of shape HxW.".format(type(segm))
            )

    # Pad image and segmentation label here!
    # image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
    masks = [torch.from_numpy(np.ascontiguousarray(x)) for x in masks]

    classes = [int(obj["category_id"]) for obj in annos]
    classes = torch.tensor(classes, dtype=torch.int64)

    g_ids = None
    if "ins_id" in annos[0]:
        g_ids = [int(obj["ins_id"]) for obj in annos]
        g_ids = torch.tensor(g_ids, dtype=torch.int64)
    
    image_shape = (image.shape[-2], image.shape[-1])  # h, w

    # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
    # but not efficient on large generic data structures due to the use of pickle & mp.Queue.
    # Therefore it's important to use torch.Tensor.
    dataset_dict["image"] = image

    # Prepare per-category binary masks
    instances = Instances(image_shape)
    instances.gt_classes = classes
    if len(masks) == 0:
        # Some image does not have annotation (all ignored)
        instances.pred_masks = torch.zeros((0, image.shape[-2], image.shape[-1]))
    else:
        masks = BitMasks(torch.stack(masks))
        instances.pred_masks = masks.tensor
        
    if g_ids != None:
        instances.pred_ins_ids = g_ids
    
    dataset_dict["instances"] = instances

    return dataset_dict

def test_opencv_video_format(codec, file_ext):
    with tempfile.TemporaryDirectory(prefix="video_format_test") as dir:
        filename = os.path.join(dir, "test_file" + file_ext)
        writer = cv2.VideoWriter(
            filename=filename,
            fourcc=cv2.VideoWriter_fourcc(*codec),
            fps=float(30),
            frameSize=(10, 10),
            isColor=True,
        )
        [writer.write(np.zeros((10, 10, 3), np.uint8)) for _ in range(30)]
        writer.release()
        if os.path.isfile(filename):
            return True
        return False

import warnings
from demo_cfg import DemoConfig

if __name__ == "__main__":
    
    sys.argv.append("--config-file")
    sys.argv.append(DemoConfig["config_file"])
    sys.argv.append("--input")
    sys.argv.append(DemoConfig["input"])
    sys.argv.append("--output")
    sys.argv.append(DemoConfig["output"])
    
    mp.set_start_method("spawn", force=True)
    args = get_parser().parse_args()
    setup_logger(name="fvcore")
    logger = setup_logger()
    logger.info("Arguments: " + str(args))

    cfg = setup_cfg(args)
    demo = VisualizationDemo(cfg)
    
    dataset = get_detection_dataset_dicts(cfg.DATASETS.TEST)
    for data in tqdm.tqdm(dataset):
        instance = image2instances(data)
        visualized_output = demo.run_on_tgt_image(instance)

        if args.output:
            if os.path.isdir(args.output):
                assert os.path.isdir(args.output), args.output
                path = data['image_id'] + '.png'
                out_filename = os.path.join(args.output, os.path.basename(path))
            else:
                assert len(args.input) == 1, "Please specify a directory with args.output"
                out_filename = args.output
            visualized_output.save(out_filename)

