import argparse

import cv2
import torch
import os
import numpy as np

from mmdet.apis import inference_detector, init_detector


def filter_results(results, scores=[0.3, 0.3, 0.3, 0.3]):
    outs = []
    for i, o in enumerate(results):
        outs.append(np.where(o[:, -1:] > scores[i], o, np.zeros_like(o)))
    return outs


def parse_args():
    parser = argparse.ArgumentParser(
        description="generate txt format results",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
    )
    parser.add_argument(
        "--ckpt_path", "-cp", help="the model to inference.", type=str, default=None
    )
    parser.add_argument(
        "--input_name", "-ip", help="images directory.", type=str, default=None
    )
    parser.add_argument(
        "--output_name", "-o", help="output directory.", type=str, default=None
    )
    parser.add_argument(
        "--device", type=str, default="cuda:0", help="CPU/CUDA device option"
    )

    args = parser.parse_args()
    return args


def main():
    args = parse_args()

    device = torch.device(args.device)

    ckpt = torch.load(args.ckpt_path)
    with open("tmp.py", "w") as f:
        f.write(ckpt["meta"]["config"])
    model = init_detector("tmp.py", args.ckpt_path, device="cuda:0")
    os.remove("tmp.py")

    bs_ckpt = torch.load("/workspace/algorithm/mmdetection/work_dirs/cm_2/epoch_12.pth")
    with open("tmp2.py", "w") as f:
        f.write(bs_ckpt["meta"]["config"])
    bs_model = init_detector(
        "tmp2.py",
        "/workspace/algorithm/mmdetection/work_dirs/cm_2/epoch_12.pth",
        device="cuda:0",
    )
    os.remove("tmp2.py")
    camera = cv2.VideoCapture(args.input_name)

    fourcc = cv2.VideoWriter_fourcc("m", "p", "4", "v")
    fps = camera.get(cv2.CAP_PROP_FPS)
    size = (
        int(camera.get(cv2.CAP_PROP_FRAME_WIDTH)),
        int(camera.get(cv2.CAP_PROP_FRAME_HEIGHT)),
    )
    out = cv2.VideoWriter(args.output_name, fourcc, fps, (size[0] * 2, size[1]))

    while True:
        ret_val, img = camera.read()
        if ret_val:
            result = inference_detector(model, img)
            bs_result = inference_detector(bs_model, img)
            #             result = filter_results(result, scores=[0.45, 0.45, 0.45, 0.45])

            frame1 = bs_model.show_result(
                img,
                bs_result,
                score_thr=0.40,
                wait_time=1,
                show=False,
                thickness=2,
                font_scale=1,
            )
            frame2 = model.show_result(
                img,
                result,
                score_thr=0.40,
                wait_time=1,
                show=False,
                thickness=2,
                font_scale=1,
            )

            split_line = np.zeros((size[1], 20, 3), dtype=np.uint8)
            merge_frame = np.concatenate([frame1, frame2], axis=1)

            out.write(merge_frame)
        else:
            break
    camera.release()
    out.release()


if __name__ == "__main__":
    main()
