# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from: https://github.com/facebookresearch/detectron2/blob/master/demo/demo.py
import argparse
import glob
import multiprocessing as mp
import os
# fmt: off
import sys

sys.path.insert(1, os.path.join(sys.path[0], '..'))
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
# fmt: on

import time

import cv2
import torch
import numpy as np
import tqdm
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger
from detectron2.utils.visualizer import ColorMode, Visualizer
from param import get_param
from predictor import Merge_6, Split_6, VisualizationDemo

from maskdino import add_maskdino_config, add_deeplab_config
from maskdino.data.detection_utils import read_dianjiao_image
import detectron2.projects.deeplab

# constants
WINDOW_NAME = "maskdino demo"


def setup_cfg(args):
    # load config from file and command-line arguments
    cfg = get_cfg()
    add_deeplab_config(cfg)
    add_maskdino_config(cfg)
    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()
    return cfg


def get_parser():
    parser = argparse.ArgumentParser(description="maskdino demo for builtin configs")
    parser.add_argument(
        "--config-file",
        default="configs/dianjiao_maskdino/fasternet/maskdino_Fasternet.yaml",
        metavar="FILE",
        help="path to config file",
    )
    parser.add_argument("--weight", default="output/test_0912_fasternet/model_final.pth", help="Take inputs from webcam.")
    parser.add_argument("--project_name", default="Cancun_23", help="Path to video file.")
    parser.add_argument(
        "--input",
        default=["workspaces/hjh9_data/dianjiao/Cancun_23/download_zip_file_594_2023-06-27/*.jpg"],
        nargs="+",
        help="A list of space separated input images; "
        "or a single glob pattern such as 'directory/*.jpg'",
    )
    parser.add_argument(
        "--output",
        default="result.jpg",
        help="A file or directory to save output visualizations. "
        "If not given, will show output in an OpenCV window.",
    )

    parser.add_argument(
        "--confidence-threshold",
        type=float,
        default=0.5,
        help="Minimum score for instance predictions to be shown",
    )
    parser.add_argument(
        "--use_sigmoid",
        type=bool,
        default=False,
        help="MaskDINO is False, DeepLabv3 is True",
    )
    parser.add_argument(
        "--save_visualize",
        type=bool,
        default=True,
        help="Minimum score for instance predictions to be shown",
    )
    parser.add_argument(
        "--save_mask",
        type=bool,
        default=False,
        help="Minimum score for instance predictions to be shown",
    )
    parser.add_argument(
        "--opts",
        help="Modify config options using the command-line 'KEY VALUE' pairs",
        default=[],
        nargs=argparse.REMAINDER,
    )
    return parser

def judge_parallel(model):
    with torch.no_grad():
        raw = torch.randn((3, 360, 2640))
        raw_inputs = {"image": raw, "height": raw.shape[1], "width": raw.shape[2]}
        batch_tmps = [6, 3, 2]
        parallel_nums = 1
        for b in batch_tmps:
            try:
                _ = model([raw_inputs]*b)
                parallel_nums = b
                break
            except:
                pass
    return parallel_nums



if __name__ == "__main__":
    mp.set_start_method("spawn", force=True)
    args = get_parser().parse_args()
    setup_logger(name="fvcore")
    logger = setup_logger()
    logger.info("Arguments: " + str(args))

    cfg = setup_cfg(args)
    cfg['MODEL']['WEIGHTS'] = args.weight

    demo = VisualizationDemo(cfg)

    parallel_nums = judge_parallel(demo.predictor.model)
    print(f"parallel_nums: {parallel_nums}")

    time_counts = []
    if args.input:
        if len(args.input) == 1:
            args.input = glob.glob(os.path.expanduser(args.input[0]))
            assert args.input, "The input path(s) was not found"
        for path in tqdm.tqdm(args.input, disable=not args.output):
            # use PIL, to be consistent with evaluation
            param = get_param(args.project_name)
            black_img = np.zeros([3648, 5472], dtype=np.uint8)

            start_time = time.time()
            image = read_dianjiao_image(path, format="BGR")
            t1 = time.time()
            dict_part2img = Split_6(image, param)
            part2img_keys = dict_part2img.keys()

            dict_part2mask={}
            if parallel_nums == 6:
                img_list = list(dict_part2img.values())
                mask_list = demo.run_model_parallel(img_list, threshold=args.confidence_threshold, use_sigmoid=args.use_sigmoid)
                for part, mask in zip(part2img_keys, mask_list):
                    dict_part2mask[part] = mask.astype(np.uint8)
            elif parallel_nums == 3:
                img_list = list(dict_part2img.values())
                img_list_1 = img_list[:3]
                img_list_2 = img_list[3:]
                mask_list = []
                for imgs in zip(img_list_1, img_list_2):
                    mask_list.extend(demo.run_model_parallel(imgs, threshold=args.confidence_threshold, use_sigmoid=args.use_sigmoid))
                for part, mask in zip(part2img_keys, mask_list):
                    dict_part2mask[part] = mask.astype(np.uint8)
            elif parallel_nums == 2:
                img_list = list(dict_part2img.values())
                img_list_1 = img_list[:2]
                img_list_2 = img_list[2:4]
                img_list_3 = img_list[4:]
                mask_list = []
                for imgs in zip(img_list_1, img_list_2, img_list_3):
                    mask_list.extend(demo.run_model_parallel(imgs, threshold=args.confidence_threshold, use_sigmoid=args.use_sigmoid))
                for part, mask in zip(part2img_keys, mask_list):
                    dict_part2mask[part] = mask.astype(np.uint8)
            else:
                mask_list = []
                for part, img in dict_part2img.items():
                    mask = demo.run_model(img, threshold=args.confidence_threshold, use_sigmoid=args.use_sigmoid)
                    dict_part2mask[part] = mask.astype(np.uint8)
            
            for part, mask in dict_part2mask.items():
                if part == 'l' or part == 'r':
                    dict_part2mask[part] = np.rot90(mask, -1)

            mask = Merge_6(black_img, dict_part2mask, param)
            time_counts.append(time.time() - t1)

            continue

            image = image[:, :, ::-1]
            visualizer = Visualizer(image, demo.metadata, instance_mode=demo.instance_mode)

            # 将像素值为0的像素改为255，将像素值为255的像素改为0
            if args.save_visualize:
                draw_mask = 255 - torch.tensor(mask, dtype=torch.int64)
                visualized_output = visualizer.draw_dianjiao_seg(draw_mask, alpha=0.2)

            logger.info(
                "{}: finished in {:.2f}s".format(
                    path,
                    time.time() - start_time,
                )
            )
            
            if args.output:
                if not args.output.endswith(('.jpg', '.png')):
                    os.makedirs(args.output, exist_ok=True)
                if os.path.isdir(args.output):
                    assert os.path.isdir(args.output), args.output
                    out_filename = os.path.join(args.output, os.path.basename(path))
                    mask_out_filename = os.path.join(args.output, os.path.basename(path).replace('.jpg', '.png'))
                else:
                    assert len(args.input) == 1, "Please specify a directory with args.output"
                    out_filename = args.output
                    mask_out_filename = os.path.join(os.path.dirname(args.output), os.path.basename(path).split('.')[0] + '_mask.png')
                if args.save_visualize:
                    visualized_output.save(out_filename)
                if args.save_mask:
                    cv2.imwrite(mask_out_filename, mask)
            else:
                cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
                cv2.imshow(WINDOW_NAME, visualized_output.get_image()[:, :, ::-1])
                if cv2.waitKey(0) == 27:
                    break  # esc to quit
    else:
        print("没有输入.")

    print("平均每张图耗时: {}".format(round(np.mean(time_counts), 3)))