#!/usr/bin/env python3
# coding=utf-8
from __future__ import annotations

import cv2
import depthai as dai
import numpy as np
print(f"depthai: {dai.__version__}")

COLORFUL = True

# Closer-in minimum depth, disparity range is doubled (from 95 to 190):
extended_disparity = False
# Better accuracy for longer distance, fractional disparity 32-levels:
subpixel = True
# Better handling for occlusions:
lr_check = True

calculation_algorithm = dai.SpatialLocationCalculatorAlgorithm.AVERAGE

RESOLUTION = "720"  # 400, 480, 720, 800, 1200, ...

mono_res_opts = {
    "400": dai.MonoCameraProperties.SensorResolution.THE_400_P,
    "480": dai.MonoCameraProperties.SensorResolution.THE_480_P,
    "720": dai.MonoCameraProperties.SensorResolution.THE_720_P,
    "800": dai.MonoCameraProperties.SensorResolution.THE_800_P,
    "1200": dai.MonoCameraProperties.SensorResolution.THE_1200_P,
}

color_res_opts = {
    "720":  dai.ColorCameraProperties.SensorResolution.THE_720_P,
    "800":  dai.ColorCameraProperties.SensorResolution.THE_800_P,
    "1080": dai.ColorCameraProperties.SensorResolution.THE_1080_P,
    "1200": dai.ColorCameraProperties.SensorResolution.THE_1200_P,
    "4k":   dai.ColorCameraProperties.SensorResolution.THE_4_K,
    "5mp": dai.ColorCameraProperties.SensorResolution.THE_5_MP,
    "12mp": dai.ColorCameraProperties.SensorResolution.THE_12_MP,
    "13mp": dai.ColorCameraProperties.SensorResolution.THE_13_MP,
    "48mp": dai.ColorCameraProperties.SensorResolution.THE_48_MP,
}

topLeft = dai.Point2f(0.4, 0.4)
bottomRight = dai.Point2f(0.6, 0.6)
config = dai.SpatialLocationCalculatorConfigData()


def create_pipeline(device):
    global calculation_algorithm, config
    # Create pipeline
    pipeline = dai.Pipeline()

    # Define sources and outputs
    if COLORFUL:
        left = pipeline.create(dai.node.ColorCamera)
        right = pipeline.create(dai.node.ColorCamera)
        monoResolution = color_res_opts.get(RESOLUTION)
    else:
        left = pipeline.create(dai.node.MonoCamera)
        right = pipeline.create(dai.node.MonoCamera)
        monoResolution = mono_res_opts.get(RESOLUTION)

    stereo = pipeline.create(dai.node.StereoDepth)
    spatialLocationCalculator = pipeline.create(dai.node.SpatialLocationCalculator)

    rightOut = pipeline.create(dai.node.XLinkOut)
    leftOut = pipeline.create(dai.node.XLinkOut)
    disparityOut = pipeline.create(dai.node.XLinkOut)

    xoutSpatialData = pipeline.create(dai.node.XLinkOut)
    xinSpatialCalcConfig = pipeline.create(dai.node.XLinkIn)

    rightOut.setStreamName("right")
    leftOut.setStreamName("left")
    disparityOut.setStreamName("disp")

    xoutSpatialData.setStreamName("spatialData")
    xinSpatialCalcConfig.setStreamName("spatialCalcConfig")

    # Properties
    left.setResolution(monoResolution)
    left.setBoardSocket(dai.CameraBoardSocket.CAM_B)
    left.setFps(30)
    right.setResolution(monoResolution)
    right.setBoardSocket(dai.CameraBoardSocket.CAM_C)
    right.setFps(30)

    stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
    # LR-check is required for depthQueueData alignment
    stereo.setLeftRightCheck(lr_check)
    stereo.setExtendedDisparity(extended_disparity)
    stereo.setSubpixel(subpixel)

    # stereo.setDepthAlign(dai.CameraBoardSocket.RGB)

    # Config

    config.depthThresholds.lowerThreshold = 100
    config.depthThresholds.upperThreshold = 10000
    config.calculationAlgorithm = calculation_algorithm
    config.roi = dai.Rect(topLeft, bottomRight)

    spatialLocationCalculator.inputConfig.setWaitForMessage(False)
    spatialLocationCalculator.initialConfig.addROI(config)

    # Linking
    stereo.rectifiedRight.link(rightOut.input)
    stereo.rectifiedLeft.link(leftOut.input)

    if COLORFUL:
        left.isp.link(stereo.left)
        right.isp.link(stereo.right)
    else:
        left.out.link(stereo.left)
        right.out.link(stereo.right)

    # stereo.disparity.link(disparityOut.input)
    spatialLocationCalculator.passthroughDepth.link(disparityOut.input)

    stereo.depth.link(spatialLocationCalculator.inputDepth)

    spatialLocationCalculator.out.link(xoutSpatialData.input)
    xinSpatialCalcConfig.out.link(spatialLocationCalculator.inputConfig)

    return pipeline, stereo.initialConfig.getMaxDisparity()


def check_input(roi: list | np.ndarray, frame, DELTA=5):
    """Check if input is ROI or point. If point, convert to ROI"""
    # Convert to a numpy array if input is a list
    if isinstance(roi, list):
        roi = np.array(roi)

    # Limit the point so ROI won't be outside the frame
    if roi.shape == (2,) or roi.shape == (2, 1):
        roi = np.hstack([roi, np.array([[-DELTA, -DELTA], [DELTA, DELTA]])])
    elif roi.shape == (4,) or roi.shape == (4, 1):
        roi = np.array(roi)

    roi.clip([DELTA, DELTA], [frame.shape[1] - DELTA, frame.shape[0] - DELTA])

    return roi / frame.shape[1::-1]


def click_and_crop(event, x, y, flags, param):
    # grab references to the global variables
    global ref_pt, click_roi
    # if the left mouse button was clicked, record the starting
    # (x, y) coordinates and indicate that cropping is being
    # performed
    if event == cv2.EVENT_LBUTTONDOWN:
        ref_pt = [(x, y)]
    # check to see if the left mouse button was released
    elif event == cv2.EVENT_LBUTTONUP:
        # record the ending (x, y) coordinates and indicate that
        # the cropping operation is finished
        ref_pt.append((x, y))
        ref_pt = np.array(ref_pt)
        click_roi = np.array([np.min(ref_pt, axis=0), np.max(ref_pt, axis=0)])


def run():
    global ref_pt, click_roi, calculation_algorithm, config

    # Connect to device and start pipeline
    with dai.Device() as device:
        pipeline, maxDisparity = create_pipeline(device)
        device.startPipeline(pipeline)

        frameRight = None
        frameLeft = None
        frameDisp = None
        depthDatas = []
        stepSize = 0.01
        newConfig = False

        # Configure windows; trackbar adjusts blending ratio of rgb/depthQueueData
        rightWindowName = "imageRight"
        leftWindowName = "imageLeft"
        depthWindowName = "depthQueueData"
        cv2.namedWindow(rightWindowName)
        cv2.namedWindow(leftWindowName)
        cv2.namedWindow(depthWindowName)

        cv2.setMouseCallback(rightWindowName, click_and_crop)
        cv2.setMouseCallback(leftWindowName, click_and_crop)
        cv2.setMouseCallback(depthWindowName, click_and_crop)

        print("Use WASD keys to move ROI!")

        spatialCalcConfigInQueue = device.getInputQueue("spatialCalcConfig")
        rightQueue = device.getOutputQueue("right")
        leftQueue = device.getOutputQueue("left")
        dispQueue = device.getOutputQueue("disp")
        spatialDataQueue = device.getOutputQueue("spatialData")

        def drawText(frame, text, org, color=(255, 255, 255), thickness=1):
            cv2.putText(
                frame, text, org, cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                (0, 0, 0), thickness + 3, cv2.LINE_AA
            )
            cv2.putText(
                frame, text, org, cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                color, thickness, cv2.LINE_AA
            )


        def drawRect(frame, topLeft, bottomRight, color=(255, 255, 255), thickness=1):
            cv2.rectangle(frame, topLeft, bottomRight, (0, 0, 0), thickness + 3)
            cv2.rectangle(frame, topLeft, bottomRight, color, thickness)

        def drawSpatialLocations(frame, spatialLocations):
            for depthData in spatialLocations:
                roi = depthData.config.roi
                roi = roi.denormalize(width=frame.shape[1], height=frame.shape[0])
                xmin = int(roi.topLeft().x)
                ymin = int(roi.topLeft().y)
                xmax = int(roi.bottomRight().x)
                ymax = int(roi.bottomRight().y)

                cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (0, 0, 0), 4)
                cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (255, 255, 255), 1)
                drawRect(
                    frame,
                    (xmin, ymin),
                    (xmax, ymax),
                )
                drawText(
                    frame,
                    f"X: {int(depthData.spatialCoordinates.x)} mm",
                    (xmin + 10, ymin + 20),
                )
                drawText(
                    frame,
                    f"Y: {int(depthData.spatialCoordinates.y)} mm",
                    (xmin + 10, ymin + 35),
                )
                drawText(
                    frame,
                    f"Z: {int(depthData.spatialCoordinates.z)} mm",
                    (xmin + 10, ymin + 50),
                )

        while not device.isClosed():
            rightImageData = rightQueue.get()
            leftImageData = leftQueue.get()
            dispData = dispQueue.tryGet()
            spatialData = spatialDataQueue.tryGet()

            if spatialData is not None:
                depthDatas = spatialData.getSpatialLocations()

            if rightImageData is not None:
                frameRight = rightImageData.getCvFrame()
                frameLeft = leftImageData.getCvFrame()
                drawSpatialLocations(frameRight, depthDatas)
                drawSpatialLocations(frameLeft, depthDatas)

                cv2.imshow(rightWindowName, frameRight)
                cv2.imshow(leftWindowName, frameLeft)

            if dispData is not None:
                frameDisp = dispData.getFrame()
                frameDisp = (frameDisp * (255 / maxDisparity)).astype(np.uint8)
                frameDisp = cv2.applyColorMap(frameDisp, cv2.COLORMAP_JET)
                frameDisp = np.ascontiguousarray(frameDisp)
                drawSpatialLocations(frameDisp, depthDatas)

                cv2.imshow(depthWindowName, frameDisp)

            # Blend when both received
            if frameRight is not None and frameDisp is not None:
                if click_roi is not None:
                    [topLeft.x, topLeft.y], [
                        bottomRight.x,
                        bottomRight.y,
                    ] = check_input(click_roi, frameRight)
                    click_roi = None
                    newConfig = True

                frameDisp = None
                depthDatas = []

            key = cv2.waitKey(1)
            if key == ord("q"):
                break
            elif key == ord("w"):
                if topLeft.y - stepSize >= 0:
                    topLeft.y -= stepSize
                    bottomRight.y -= stepSize
                    newConfig = True
            elif key == ord("a"):
                if topLeft.x - stepSize >= 0:
                    topLeft.x -= stepSize
                    bottomRight.x -= stepSize
                    newConfig = True
            elif key == ord("s"):
                if bottomRight.y + stepSize <= 1:
                    topLeft.y += stepSize
                    bottomRight.y += stepSize
                    newConfig = True
            elif key == ord("d"):
                if bottomRight.x + stepSize <= 1:
                    topLeft.x += stepSize
                    bottomRight.x += stepSize
                    newConfig = True

            elif key == ord("1"):
                calculation_algorithm = dai.SpatialLocationCalculatorAlgorithm.MEAN
                print("Switching calculation algorithm to MEAN!")
                newConfig = True
            elif key == ord("2"):
                calculation_algorithm = dai.SpatialLocationCalculatorAlgorithm.MIN
                print("Switching calculation algorithm to MIN!")
                newConfig = True
            elif key == ord("3"):
                calculation_algorithm = dai.SpatialLocationCalculatorAlgorithm.MAX
                print("Switching calculation algorithm to MAX!")
                newConfig = True
            elif key == ord("4"):
                calculation_algorithm = dai.SpatialLocationCalculatorAlgorithm.MODE
                print("Switching calculation algorithm to MODE!")
                newConfig = True
            elif key == ord("5"):
                calculation_algorithm = dai.SpatialLocationCalculatorAlgorithm.MEDIAN
                print("Switching calculation algorithm to MEDIAN!")
                newConfig = True

            if newConfig:
                config.roi = dai.Rect(topLeft, bottomRight)
                config.calculationAlgorithm = calculation_algorithm
                cfg = dai.SpatialLocationCalculatorConfig()
                cfg.addROI(config)
                spatialCalcConfigInQueue.send(cfg)
                newConfig = False


if __name__ == "__main__":
    ref_pt = None
    click_roi: list | None = None
    run()
