#!/usr/bin/env python3
# coding=utf-8
from __future__ import annotations

import contextlib
from typing import NamedTuple

import cv2
import depthai as dai
import numpy as np

colorful = False

# Closer-in minimum depth, disparity range is doubled (from 95 to 190):
extended_disparity = False
# Better accuracy for longer distance, fractional disparity 32-levels:
subpixel = True
# Better handling for occlusions:
lr_check = True

calculation_algorithm = dai.SpatialLocationCalculatorAlgorithm.AVERAGE

RESOLUTION = "400"  # 400, 480, 720, 800, 1200, ...

mono_res_opts = {
    "400": dai.MonoCameraProperties.SensorResolution.THE_400_P,
    "480": dai.MonoCameraProperties.SensorResolution.THE_480_P,
    "720": dai.MonoCameraProperties.SensorResolution.THE_720_P,
    "800": dai.MonoCameraProperties.SensorResolution.THE_800_P,
    "1200": dai.MonoCameraProperties.SensorResolution.THE_1200_P,
}

color_res_opts = {
    "720": dai.ColorCameraProperties.SensorResolution.THE_720_P,
    "800": dai.ColorCameraProperties.SensorResolution.THE_800_P,
    "1080": dai.ColorCameraProperties.SensorResolution.THE_1080_P,
    "1200": dai.ColorCameraProperties.SensorResolution.THE_1200_P,
    "4k": dai.ColorCameraProperties.SensorResolution.THE_4_K,
    "5mp": dai.ColorCameraProperties.SensorResolution.THE_5_MP,
    "12mp": dai.ColorCameraProperties.SensorResolution.THE_12_MP,
    "13mp": dai.ColorCameraProperties.SensorResolution.THE_13_MP,
    "48mp": dai.ColorCameraProperties.SensorResolution.THE_48_MP,
}

topLeft = dai.Point2f(0.4, 0.4)
bottomRight = dai.Point2f(0.6, 0.6)
config = dai.SpatialLocationCalculatorConfigData()


class DeviceMap(NamedTuple):
    device: dai.Device
    rgbWindowName: str
    depthWindowName: str
    previewQueue: dai.DataOutputQueue
    spatialCalcConfigInQueue: dai.DataOutputQueue
    dispQueue: dai.DataOutputQueue
    spatialDataQueue: dai.DataOutputQueue
    maxDisparity: int


def create_pipeline():
    global calculation_algorithm, config
    # Create pipeline
    pipeline = dai.Pipeline()

    # Define sources and outputs
    if colorful:
        left = pipeline.create(dai.node.ColorCamera)
        right = pipeline.create(dai.node.ColorCamera)
        monoResolution = color_res_opts.get(RESOLUTION)
    else:
        left = pipeline.create(dai.node.MonoCamera)
        right = pipeline.create(dai.node.MonoCamera)
        monoResolution = mono_res_opts.get(RESOLUTION)

    stereo = pipeline.create(dai.node.StereoDepth)
    spatialLocationCalculator = pipeline.create(dai.node.SpatialLocationCalculator)

    rightOut = pipeline.create(dai.node.XLinkOut)
    disparityOut = pipeline.create(dai.node.XLinkOut)

    xoutSpatialData = pipeline.create(dai.node.XLinkOut)
    xinSpatialCalcConfig = pipeline.create(dai.node.XLinkIn)

    rightOut.setStreamName("right")
    disparityOut.setStreamName("disp")

    xoutSpatialData.setStreamName("spatialData")
    xinSpatialCalcConfig.setStreamName("spatialCalcConfig")

    # Properties
    left.setResolution(monoResolution)
    left.setBoardSocket(dai.CameraBoardSocket.CAM_B)
    left.setFps(30)
    right.setResolution(monoResolution)
    right.setBoardSocket(dai.CameraBoardSocket.CAM_C)
    right.setFps(30)

    stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
    # LR-check is required for depthQueueData alignment
    stereo.setLeftRightCheck(lr_check)
    stereo.setExtendedDisparity(extended_disparity)
    stereo.setSubpixel(subpixel)

    # stereo.setDepthAlign(dai.CameraBoardSocket.RGB)

    # Config

    config.depthThresholds.lowerThreshold = 100
    config.depthThresholds.upperThreshold = 10000
    config.calculationAlgorithm = calculation_algorithm
    config.roi = dai.Rect(topLeft, bottomRight)

    spatialLocationCalculator.inputConfig.setWaitForMessage(False)
    spatialLocationCalculator.initialConfig.addROI(config)

    # Linking
    stereo.syncedRight.link(rightOut.input)

    if colorful:
        left.isp.link(stereo.left)
        right.isp.link(stereo.right)
    else:
        left.out.link(stereo.left)
        right.out.link(stereo.right)

    # stereo.disparity.link(disparityOut.input)
    spatialLocationCalculator.passthroughDepth.link(disparityOut.input)

    stereo.depth.link(spatialLocationCalculator.inputDepth)

    spatialLocationCalculator.out.link(xoutSpatialData.input)
    xinSpatialCalcConfig.out.link(spatialLocationCalculator.inputConfig)

    return pipeline, stereo.initialConfig.getMaxDisparity()


def check_input(roi: list | np.ndarray, frame, DELTA=5):
    """Check if input is ROI or point. If point, convert to ROI"""
    # Convert to a numpy array if input is a list
    if isinstance(roi, list):
        roi = np.array(roi)

    # Limit the point so ROI won't be outside the frame
    if roi.shape == (2,) or roi.shape == (2, 1):
        roi = np.hstack([roi, np.array([[-DELTA, -DELTA], [DELTA, DELTA]])])
    elif roi.shape == (4,) or roi.shape == (4, 1):
        roi = np.array(roi)

    roi.clip([DELTA, DELTA], [frame.shape[1] - DELTA, frame.shape[0] - DELTA])

    return roi / frame.shape[1::-1]


def click_and_crop(event, x, y, flags, param):
    # grab references to the global variables
    global ref_pt, click_roi
    # if the left mouse button was clicked, record the starting
    # (x, y) coordinates and indicate that cropping is being
    # performed
    if event == cv2.EVENT_LBUTTONDOWN:
        ref_pt = [(x, y)]
    # check to see if the left mouse button was released
    elif event == cv2.EVENT_LBUTTONUP:
        # record the ending (x, y) coordinates and indicate that
        # the cropping operation is finished
        ref_pt.append((x, y))
        ref_pt = np.array(ref_pt)
        click_roi = np.array([np.min(ref_pt, axis=0), np.max(ref_pt, axis=0)])


def run():
    global ref_pt, click_roi, calculation_algorithm, config

    device_maps = {}

    # Connect to device and start pipeline
    with contextlib.ExitStack() as stack:
        deviceInfos = dai.Device.getAllAvailableDevices()
        usbSpeed = dai.UsbSpeed.SUPER

        for deviceInfo in deviceInfos:
            deviceInfo: dai.DeviceInfo
            device: dai.Device = stack.enter_context(dai.Device(deviceInfo, usbSpeed))
            print("===Connected to ", deviceInfo.getMxId())
            mxId = device.getMxId()
            cameras = device.getConnectedCameras()
            usbSpeed = device.getUsbSpeed()
            eepromData = device.readCalibration2().getEepromData()
            print("   >>> MXID:", mxId)
            print("   >>> Num of cameras:", len(cameras))
            print("   >>> USB speed:", usbSpeed)
            if eepromData.boardName != "":
                print("   >>> Board name:", eepromData.boardName)
            if eepromData.productName != "":
                print("   >>> Product name:", eepromData.productName)

            pipeline, max_disparity = create_pipeline()
            device.startPipeline(pipeline)

            # Configure windows; trackbar adjusts blending ratio of rgb/depthQueueData
            rgb_window_name = f"{device.getDeviceName()}: image"
            depth_window_name = f"{device.getDeviceName()}: depth"
            cv2.namedWindow(rgb_window_name)
            cv2.namedWindow(depth_window_name)

            cv2.setMouseCallback(rgb_window_name, click_and_crop)
            cv2.setMouseCallback(depth_window_name, click_and_crop)

            spatial_calc_config_in_queue = device.getInputQueue("spatialCalcConfig")
            preview_queue = device.getOutputQueue("right")
            disp_queue = device.getOutputQueue("disp")
            spatial_data_queue = device.getOutputQueue("spatialData")

            device_maps[deviceInfo.name] = DeviceMap(**{
                "device": device,
                "rgb_window_name": rgb_window_name,
                "depth_window_name": depth_window_name,
                "preview_queue": preview_queue,
                "spatial_calc_config_in_queue": spatial_calc_config_in_queue,
                "disp_queue": disp_queue,
                "spatial_data_queue": spatial_data_queue,
                "max_disparity": max_disparity,
            })

        def drawText(frame, text, org, color=(255, 255, 255), thickness=1):
            cv2.putText(
                frame, text, org, cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                (0, 0, 0), thickness + 3, cv2.LINE_AA
            )
            cv2.putText(
                frame, text, org, cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                color, thickness, cv2.LINE_AA
            )

        def drawRect(frame, top_left, bottom_right, color=(255, 255, 255), thickness=1):
            cv2.rectangle(frame, top_left, bottom_right, (0, 0, 0), thickness + 3)
            cv2.rectangle(frame, top_left, bottom_right, color, thickness)

        def drawSpatialLocations(frame, spatialLocations):
            for depthData in spatialLocations:
                roi = depthData.config.roi
                roi = roi.denormalize(width=frame.shape[1], height=frame.shape[0])
                x_min = int(roi.top_left().x)
                y_min = int(roi.top_left().y)
                x_max = int(roi.bottom_right().x)
                y_max = int(roi.bottom_right().y)

                cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), (0, 0, 0), 4)
                cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), (255, 255, 255), 1)
                drawRect(
                    frame,
                    (x_min, y_min),
                    (x_max, y_max),
                )
                drawText(
                    frame,
                    f"X: {int(depthData.spatialCoordinates.x)} mm",
                    (x_min + 10, y_min + 20),
                )
                drawText(
                    frame,
                    f"Y: {int(depthData.spatialCoordinates.y)} mm",
                    (x_min + 10, y_min + 35),
                )
                drawText(
                    frame,
                    f"Z: {int(depthData.spatialCoordinates.z)} mm",
                    (x_min + 10, y_min + 50),
                )

        print("Use WASD keys to move ROI!")
        stepSize = 0.01
        newConfig = False
        while device_maps:
            for k, device_map in device_maps.items():
                if device_map.device.isClosed():
                    device_maps.pop(k)
                if newConfig:
                    config.roi = dai.Rect(topLeft, bottomRight)
                    config.calculationAlgorithm = calculation_algorithm
                    cfg = dai.SpatialLocationCalculatorConfig()
                    cfg.addROI(config)
                    device_map.spatialCalcConfigInQueue.send(cfg)
                    newConfig = False

                inRectified = device_map.previewQueue.get()
                dispData = device_map.dispQueue.get()
                spatialData = device_map.spatialDataQueue.get()

                depthDatas = spatialData.getSpatialLocations()

                rectifiedRight = inRectified.getCvFrame()
                drawSpatialLocations(rectifiedRight, depthDatas)

                cv2.imshow(device_map.rgbWindowName, rectifiedRight)

                frameDisp = dispData.getFrame()
                frameDisp = (frameDisp * (255 / device_map.maxDisparity)).astype(np.uint8)
                frameDisp = cv2.applyColorMap(frameDisp, cv2.COLORMAP_JET)
                frameDisp = np.ascontiguousarray(frameDisp)
                drawSpatialLocations(frameDisp, depthDatas)

                cv2.imshow(device_map.depthWindowName, frameDisp)

                if click_roi is not None:
                    [topLeft.x, topLeft.y], [
                        bottomRight.x,
                        bottomRight.y,
                    ] = check_input(click_roi, rectifiedRight)
                    click_roi = None
                    newConfig = True

            key = cv2.waitKey(1)
            if key == ord("q"):
                break
            if key == ord("w"):
                if topLeft.y - stepSize >= 0:
                    topLeft.y -= stepSize
                    bottomRight.y -= stepSize
                    newConfig = True
            elif key == ord("a"):
                if topLeft.x - stepSize >= 0:
                    topLeft.x -= stepSize
                    bottomRight.x -= stepSize
                    newConfig = True
            elif key == ord("s"):
                if bottomRight.y + stepSize <= 1:
                    topLeft.y += stepSize
                    bottomRight.y += stepSize
                    newConfig = True
            elif key == ord("d"):
                if bottomRight.x + stepSize <= 1:
                    topLeft.x += stepSize
                    bottomRight.x += stepSize
                    newConfig = True

            elif key == ord("1"):
                calculation_algorithm = dai.SpatialLocationCalculatorAlgorithm.MEAN
                print("Switching calculation algorithm to MEAN!")
                newConfig = True
            elif key == ord("2"):
                calculation_algorithm = dai.SpatialLocationCalculatorAlgorithm.MIN
                print("Switching calculation algorithm to MIN!")
                newConfig = True
            elif key == ord("3"):
                calculation_algorithm = dai.SpatialLocationCalculatorAlgorithm.MAX
                print("Switching calculation algorithm to MAX!")
                newConfig = True
            elif key == ord("4"):
                calculation_algorithm = dai.SpatialLocationCalculatorAlgorithm.MODE
                print("Switching calculation algorithm to MODE!")
                newConfig = True
            elif key == ord("5"):
                calculation_algorithm = dai.SpatialLocationCalculatorAlgorithm.MEDIAN
                print("Switching calculation algorithm to MEDIAN!")
                newConfig = True


if __name__ == "__main__":
    ref_pt = None
    click_roi = None
    run()
