#!/usr/bin/env python3
# coding=utf-8
from pathlib import Path

import cv2
import depthai as dai
import numpy as np

numClasses = 80

blob = Path(__file__).parent.joinpath("yolov11n_openvino_2021.4_6shave.blob")
model = dai.OpenVINO.Blob(blob)
dim = next(iter(model.networkInputs.values())).dims
W, H = dim[:2]

output_name, output_tenser = next(iter(model.networkOutputs.items()))

labelMap = [
    "person",         "bicycle",    "car",           "motorbike",     "aeroplane",   "bus",           "train",
    "truck",          "boat",       "traffic light", "fire hydrant",  "stop sign",   "parking meter", "bench",
    "bird",           "cat",        "dog",           "horse",         "sheep",       "cow",           "elephant",
    "bear",           "zebra",      "giraffe",       "backpack",      "umbrella",    "handbag",       "tie",
    "suitcase",       "frisbee",    "skis",          "snowboard",     "sports ball", "kite",          "baseball bat",
    "baseball glove", "skateboard", "surfboard",     "tennis racket", "bottle",      "wine glass",    "cup",
    "fork",           "knife",      "spoon",         "bowl",          "banana",      "apple",         "sandwich",
    "orange",         "broccoli",   "carrot",        "hot dog",       "pizza",       "donut",         "cake",
    "chair",          "sofa",       "pottedplant",   "bed",           "diningtable", "toilet",        "tvmonitor",
    "laptop",         "mouse",      "remote",        "keyboard",      "cell phone",  "microwave",     "oven",
    "toaster",        "sink",       "refrigerator",  "book",          "clock",       "vase",          "scissors",
    "teddy bear",     "hair drier", "toothbrush"
]
# fmt: on

# Weights to use when blending depth/rgb image (should equal 1.0)
rgbWeight = 0.5
depthWeight = 0.5

def updateBlendWeights(percent_rgb):
    """
    Update the rgb and depth weights used to blend depth/rgb image

    @param[in] percent_rgb The rgb weight expressed as a percentage (0..100)
    """
    global depthWeight
    global rgbWeight
    rgbWeight = float(percent_rgb) / 100.0
    depthWeight = 1.0 - rgbWeight


def create_pipeline(device):
    monoResolution = dai.MonoCameraProperties.SensorResolution.THE_400_P
    # Create pipeline
    pipeline = dai.Pipeline()

    # Define sources and outputs
    camRgb = pipeline.create(dai.node.ColorCamera)
    left = pipeline.create(dai.node.MonoCamera)
    right = pipeline.create(dai.node.MonoCamera)

    stereo = pipeline.create(dai.node.StereoDepth)
    spatialDetectionNetwork = pipeline.create(dai.node.YoloSpatialDetectionNetwork)

    # device.setIrLaserDotProjectorBrightness(800)
####################################################################################
    xoutRgb = pipeline.create(dai.node.XLinkOut)
    xoutNN = pipeline.create(dai.node.XLinkOut)
    xoutRgb.setStreamName("image")
    xoutNN.setStreamName("detections")

    camRgb.setBoardSocket(dai.CameraBoardSocket.RGB)
    camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
    camRgb.setFps(30)
    camRgb.setIspScale(1, 3)
    camRgb.setPreviewSize(W, H)
    camRgb.setInterleaved(False)
    camRgb.setPreviewKeepAspectRatio(False)

    # For now, RGB needs fixed focus to properly align with depth.
    # This value was used during calibration
    try:
        calibData = device.readCalibration2()
        lensPosition = calibData.getLensPosition(dai.CameraBoardSocket.RGB)
        if lensPosition:
            camRgb.initialControl.setManualFocus(lensPosition)
    except:
        raise
    left.setResolution(monoResolution)
    left.setBoardSocket(dai.CameraBoardSocket.LEFT)
    left.setFps(30)
    right.setResolution(monoResolution)
    right.setBoardSocket(dai.CameraBoardSocket.RIGHT)
    right.setFps(30)

    stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_ACCURACY)
    # LR-check is required for depth alignment
    stereo.initialConfig.setMedianFilter(dai.MedianFilter.KERNEL_7x7)
    stereo.setLeftRightCheck(True)
    stereo.setSubpixel(True)
    stereo.setExtendedDisparity(False)
    stereo.setDepthAlign(dai.CameraBoardSocket.RGB)

    # Network specific settings
    spatialDetectionNetwork.setBlob(model)
    spatialDetectionNetwork.setBoundingBoxScaleFactor(0.3)
    spatialDetectionNetwork.setConfidenceThreshold(0.5)

    # Yolo specific parameters
    spatialDetectionNetwork.setNumClasses(numClasses)
    spatialDetectionNetwork.setCoordinateSize(4)
    spatialDetectionNetwork.setAnchors([])
    spatialDetectionNetwork.setAnchorMasks({})
    spatialDetectionNetwork.setIouThreshold(0.5)

    # spatial specific parameters
    spatialDetectionNetwork.setBoundingBoxScaleFactor(1)
    spatialDetectionNetwork.setDepthLowerThreshold(10)
    spatialDetectionNetwork.setDepthUpperThreshold(50000)

    # Linking
    camRgb.isp.link(xoutRgb.input)
    camRgb.preview.link(spatialDetectionNetwork.input)

    left.out.link(stereo.left)
    right.out.link(stereo.right)

    stereo.depth.link(spatialDetectionNetwork.inputDepth)

    spatialDetectionNetwork.out.link(xoutNN.input)

    return pipeline, stereo.initialConfig.getMaxDisparity()


def run():
    global refPt, click_roi
    # Connect to device and start pipeline
    with dai.Device() as device:
        pipeline, maxDisparity = create_pipeline(device)
        device.startPipeline(pipeline)

        frameRgb = None
        frameDisp = None
        detections = []


        # Configure windows; trackbar adjusts blending ratio of rgb/depth
        rgbWindowName = "rgb"
        depthWindowName = "depth"
        blendedWindowName = "rgb-depth"
        # cv2.namedWindow(rgbWindowName)
        # cv2.namedWindow(depthWindowName)
        cv2.namedWindow(blendedWindowName)
        # cv2.resize(blendedWindowName,1920,1080)
        #cv2.setWindowProperty(blendedWindowName, cv2.WND_PROP_FULLSCREEN, 1)
        cv2.createTrackbar(
            "RGB Weight %",
            blendedWindowName,
            int(rgbWeight * 100),
            100,
            updateBlendWeights,
        )


        print("Use WASD keys to move ROI!")

        # spatialCalcConfigInQueue = device.getInputQueue("spatialCalcConfig")
        imageQueue = device.getOutputQueue("image")
        # spatialDataQueue = device.getOutputQueue("spatialData")
        detectQueue = device.getOutputQueue(name="detections")

        def frameNorm(frame, bbox):
            """
            nn data, being the bounding box locations, are in <0..1> range
            - they need to be normalized with frame width/height
            """
            normVals = np.full(len(bbox), frame.shape[0])
            normVals[::2] = frame.shape[1]
            return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int)

        def drawText(frame, text, org, color=(255, 255, 255)):
            cv2.putText(
                frame,
                text,
                org,
                cv2.FONT_HERSHEY_SIMPLEX,
                0.5,
                (0, 0, 0),
                4,
                cv2.LINE_AA,
            )
            cv2.putText(
                frame, text, org, cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 1, cv2.LINE_AA
            )

        def drawDetection(frame, frame_a, detections):
            for detection in detections:
                bbox = frameNorm(
                    frame,
                    (detection.xmin, detection.ymin, detection.xmax, detection.ymax),
                )
                drawText(
                    frame,
                    labelMap[detection.label],
                    (bbox[0] + 10, bbox[1] + 20),
                )
                drawText(
                    frame,
                    f"{detection.confidence:.2%}",
                    (bbox[0] + 10, bbox[1] + 35),
                )
                cv2.rectangle(
                    frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0, 0), 4
                )
                cv2.rectangle(
                    frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0, 255), 1
                )
                if hasattr(detection, "boundingBoxMapping"):
                    if (detection.label < 8) and (detection.spatialCoordinates.z < 6000):
                        if (detection.spatialCoordinates.x < 0):
                            frame_a[:,:50] = (0,0,255)
                            cv2.circle(frame, (120, 160), 8, (0, 0, 255), -1)
                        else:
                            frame_a[:,590:] = (0,0,255)
                            cv2.circle(frame, (520, 160), 8, (0, 0, 255), -1)


                    drawText(
                        frame,
                        f"X: {int(detection.spatialCoordinates.x)} mm",
                        (bbox[0] + 10, bbox[1] + 50),
                    )
                    drawText(
                        frame,
                        f"Y: {int(detection.spatialCoordinates.y)} mm",
                        (bbox[0] + 10, bbox[1] + 65),
                    )
                    drawText(
                        frame,
                        f"Z: {int(detection.spatialCoordinates.z)} mm",
                        (bbox[0] + 10, bbox[1] + 80),
                    )

        while not device.isClosed():
            imageData = imageQueue.tryGet()
            detData = detectQueue.tryGet()

            # if spatialData is not None:
            #     depthDatas = spatialData.getSpatialLocations()

            if detData is not None:
                detections = detData.detections

            if imageData is not None:
                frameRgb = imageData.getCvFrame()
                frameDisp = frameRgb.copy()
                drawDetection(frameRgb, frameDisp, detections)
                cv2.line(frameRgb, (320, 0), (320, 480), (0, 0, 255), thickness=2)
                # cv2.imshow(rgbWindowName, frameRgb)


            # Blend when both received
            if frameRgb is not None and frameDisp is not None:
                # Need to have both frames in BGR format before blending
                blended = cv2.addWeighted(
                    frameRgb, rgbWeight, frameDisp, depthWeight, 0
                )
                blended = cv2.resize(blended, (1280, 720))

                cv2.imshow(blendedWindowName, blended)
                frameRgb = None
                frameDisp = None
                spatialData = []

            key = cv2.waitKey(1)
            if key == ord("q"):
                break


if __name__ == "__main__":
    refPt = None
    click_roi = None
    run()
