import sys

sys.path.append(".")

import argparse
from pathlib import Path

import cv2
import torch
import numpy as np
import onnxruntime as ort

from auxmodels.utils.calc import np_sigmoid

from src.models.model import CenterNet
from src.models.decoder import Decoder

COLORS = [
    (255, 0, 0),
    (0, 255, 0),
    (0, 0, 255),
    (255, 255, 0),
    (255, 0, 255),
    (0, 255, 255),
    (0, 0, 0),
    (127, 127, 127),
]


def plot_results(image, outputs, results):
    h, w, _ = image.shape
    scale_h = 1
    scale_w = 1
    results = results[0]
    if results is not None:
        for obj in results:
            p1 = (int(obj[3] * scale_w), int(obj[4] * scale_h))
            p2 = (int(obj[5] * scale_w), int(obj[6] * scale_h))
            cv2.rectangle(image, p1, p2, COLORS[int(obj[2])], 2)

            cv2.putText(image, f"{obj[1]:.2f}", p1, cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)

    if torch.is_tensor(outputs[1]):
        heatmap = torch.sigmoid(outputs[1]).squeeze().detach().cpu().numpy()
    else:
        heatmap = np_sigmoid(outputs[1].squeeze())
    h, w = heatmap.shape
    heatmap = cv2.resize(heatmap, (4 * w, 4 * h))
    heatmap = (heatmap * 255).astype(np.uint8)
    return image, heatmap


def set_video_io(video_path, input_size, output_dir):
    vc = cv2.VideoCapture(video_path)
    fourcc = cv2.VideoWriter.fourcc("m", "p", "4", "v")

    output_path = str(Path(output_dir) / Path(video_path).name)
    assert video_path != output_path

    vw = cv2.VideoWriter(output_path, fourcc, 30, input_size)
    return vc, vw


def set_run_mode(weight):
    weight_suffix = Path(weight).suffix
    if weight_suffix == ".pth":
        return "torch"
    elif weight_suffix == ".onnx":
        return "onnx"
    else:
        return


def set_model(weight, mode="torch", device="cpu"):
    if mode == "torch":
        model = CenterNet(base_channel=32, repeats=3, num_cls=1)
        model.load_state_dict(torch.load(weight, map_location="cpu"))
        model = model.to(device)
    elif mode == "onnx":
        model = ort.InferenceSession(weight, providers=["CUDAExecutionProvider"])
    else:
        Exception("无法识别运行模式")
    return model


def run_inference(model, image, mode="torch", device="cpu"):
    if mode == "torch":
        inputs = torch.tensor(image.transpose(2, 0, 1), dtype=torch.float32).unsqueeze(0).to(device)
        outputs = model(inputs)
    elif mode == "onnx":
        inputs = np.expand_dims(image.transpose(2, 0, 1).astype(np.float32), axis=0)
        outputs = model.run(None, {"input": inputs})
    return outputs


def test_model(video, weight, size, device="cpu"):
    run_mode = set_run_mode(weight)

    vc, vw = set_video_io(video, size, "resources/demo")
    model = set_model(weight, mode=run_mode, device=device)
    decoder = Decoder(0.3, scale=64)

    ret, frame = vc.read()
    while ret:
        image = cv2.resize(frame, size)
        outputs = run_inference(model, image, run_mode, device)
        results = decoder.run(outputs)
        image, heatmap = plot_results(image, outputs, results)

        # vw.write(image)
        cv2.imshow("image", image)
        cv2.imshow("hm", heatmap)
        if ord("q") == cv2.waitKey(1):
            break
        ret, frame = vc.read()

    return


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--video", default="resources\\videos\\Y1.mp4", type=str)
    parser.add_argument("--weights", default="resources\\weights\\output\\20230620.onnx", type=str)
    args = parser.parse_args()

    device = "cuda:0"
    input_size = (1024, 576)

    test_model(video=args.video, weight=args.weights, size=input_size, device=device)
