#!/usr/bin/env python3
# coding=utf-8
from __future__ import annotations

import collections
import time

import cv2
import depthai as dai
import numpy as np

# Closer-in minimum depth, disparity range is doubled (from 95 to 190):
extended_disparity = False
# Better accuracy for longer distance, fractional disparity 32-levels:
subpixel = True
# Better handling for occlusions:
lr_check = True

calculation_algorithm = dai.SpatialLocationCalculatorAlgorithm.MEDIAN
topLeft = dai.Point2f(0.4, 0.4)
bottomRight = dai.Point2f(0.6, 0.6)
lowerThreshold = 0  # mm
upperThreshold = 15_000  # mm
config = dai.SpatialLocationCalculatorConfigData()
isp_scale = (1, 3)
enableRectified = True
enableRectifiedFirst = True

class FPSHandler:
    """
    Class that handles all FPS-related operations.

    Mostly used to calculate different streams FPS, but can also be
    used to feed the video file based on it's FPS property, not app performance (this prevents the video from being sent
    to quickly if we finish processing a frame earlier than the next video frame should be consumed)
    """

    _fpsBgColor = (0, 0, 0)
    _fpsColor = (255, 255, 255)
    _fpsType = cv2.FONT_HERSHEY_SIMPLEX
    _fpsLineType = cv2.LINE_AA

    def __init__(self, cap=None, maxTicks=100):
        """
        Args:
            cap (cv2.VideoCapture, Optional): handler to the video file object
            maxTicks (int, Optional): maximum ticks amount for FPS calculation
        """
        self._timestamp = None
        self._start = None
        self._framerate = cap.get(cv2.CAP_PROP_FPS) if cap is not None else None
        self._useCamera = cap is None

        self._iterCnt = 0
        self._ticks = {}

        if maxTicks < 2:
            msg = f"Proviced maxTicks value must be 2 or higher (supplied: {maxTicks})"
            raise ValueError(msg)

        self._maxTicks = maxTicks

    def nextIter(self):
        """
        Marks the next iteration of the processing loop. Will use :obj:`time.sleep` method if initialized with video file
        object
        """
        if self._start is None:
            self._start = time.monotonic()

        if not self._useCamera and self._timestamp is not None:
            frameDelay = 1.0 / self._framerate
            delay = (self._timestamp + frameDelay) - time.monotonic()
            if delay > 0:
                time.sleep(delay)
        self._timestamp = time.monotonic()
        self._iterCnt += 1

    def tick(self, name):
        """
        Marks a point in time for specified name

        Args:
            name (str): Specifies timestamp name
        """
        if name not in self._ticks:
            self._ticks[name] = collections.deque(maxlen=self._maxTicks)
        self._ticks[name].append(time.monotonic())

    def tickFps(self, name):
        """
        Calculates the FPS based on specified name

        Args:
            name (str): Specifies timestamps' name

        Returns:
            float: Calculated FPS or :code:`0.0` (default in case of failure)
        """
        if name in self._ticks and len(self._ticks[name]) > 1:
            timeDiff = self._ticks[name][-1] - self._ticks[name][0]
            return (len(self._ticks[name]) - 1) / timeDiff if timeDiff != 0 else 0.0
        else:
            return 0.0

    def fps(self):
        """
        Calculates FPS value based on :func:`nextIter` calls, being the FPS of processing loop

        Returns:
            float: Calculated FPS or :code:`0.0` (default in case of failure)
        """
        if self._start is None or self._timestamp is None:
            return 0.0
        timeDiff = self._timestamp - self._start
        return self._iterCnt / timeDiff if timeDiff != 0 else 0.0

    def printStatus(self):
        """Prints total FPS for all names stored in :func:`tick` calls"""
        print("=== TOTAL FPS ===")
        for name in self._ticks:
            print(f"[{name}]: {self.tickFps(name):.1f}")

    def drawFps(self, frame, name):
        """
        Draws FPS values on requested frame, calculated based on specified name

        Args:
            frame (numpy.ndarray): Frame object to draw values on
            name (str): Specifies timestamps' name
        """
        frameFps = f"{name.upper()} FPS: {round(self.tickFps(name), 1)}"
        # cv2.rectangle(frame, (0, 0), (120, 35), (255, 255, 255), cv2.FILLED)
        cv2.putText(frame, frameFps, (5, 15), self._fpsType, 0.5, self._fpsBgColor, 4, self._fpsLineType)
        cv2.putText(frame, frameFps, (5, 15), self._fpsType, 0.5, self._fpsColor, 1, self._fpsLineType)

        if "nn" in self._ticks:
            cv2.putText(frame, f"NN FPS:  {round(self.tickFps('nn'), 1)}", (5, 30), self._fpsType, 0.5,
                        self._fpsBgColor, 4, self._fpsLineType)
            cv2.putText(frame, f"NN FPS:  {round(self.tickFps('nn'), 1)}", (5, 30), self._fpsType, 0.5, self._fpsColor,
                        1, self._fpsLineType)


def create_pipeline():
    global calculation_algorithm, config

    # Create pipeline
    pipeline = dai.Pipeline()

    # Define sources and outputs
    left = pipeline.create(dai.node.ColorCamera)
    center = pipeline.create(dai.node.ColorCamera)
    right = pipeline.create(dai.node.ColorCamera)

    LC_stereo = pipeline.create(dai.node.StereoDepth)
    LR_stereo = pipeline.create(dai.node.StereoDepth)
    CR_stereo = pipeline.create(dai.node.StereoDepth)

    LC_slc = pipeline.create(dai.node.SpatialLocationCalculator)
    LR_slc = pipeline.create(dai.node.SpatialLocationCalculator)
    CR_slc = pipeline.create(dai.node.SpatialLocationCalculator)

    LC_SpatialData = pipeline.create(dai.node.XLinkOut)
    LR_SpatialData = pipeline.create(dai.node.XLinkOut)
    CR_SpatialData = pipeline.create(dai.node.XLinkOut)

    xout_LC = pipeline.create(dai.node.XLinkOut)
    xout_LR = pipeline.create(dai.node.XLinkOut)
    xout_CR = pipeline.create(dai.node.XLinkOut)

    xout_LC.setStreamName("disparity_LC")
    if enableRectified:
        xoutl_LC = pipeline.create(dai.node.XLinkOut)
        xoutl_LC.setStreamName("rectifiedLeft_LC")
        if enableRectifiedFirst == False:
            xoutr_LC = pipeline.create(dai.node.XLinkOut)
            xoutr_LC.setStreamName("rectifiedRight_LC")

    xout_LR.setStreamName("disparity_LR")
    if enableRectified:
        xoutl_LR = pipeline.create(dai.node.XLinkOut)
        xoutl_LR.setStreamName("rectifiedLeft_LR")
        if enableRectifiedFirst == False:
            xoutr_LR = pipeline.create(dai.node.XLinkOut)
            xoutr_LR.setStreamName("rectifiedRight_LR")

    xout_CR.setStreamName("disparity_CR")
    if enableRectified:
        xoutl_CR = pipeline.create(dai.node.XLinkOut)
        xoutl_CR.setStreamName("rectifiedLeft_CR")
        if enableRectifiedFirst == False:
            xoutr_CR = pipeline.create(dai.node.XLinkOut)
            xoutr_CR.setStreamName("rectifiedRight_CR")

    LC_SpatialData.setStreamName("spatialData_LC")
    LR_SpatialData.setStreamName("spatialData_LR")
    CR_SpatialData.setStreamName("spatialData_CR")

    xinSpatialCalcConfig = pipeline.create(dai.node.XLinkIn)
    xinSpatialCalcConfig.setStreamName("spatialCalcConfig")

    # Properties
    left.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1200_P)
    left.setBoardSocket(dai.CameraBoardSocket.CAM_B)
    left.setIspScale(*isp_scale)

    center.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1200_P)
    center.setBoardSocket(dai.CameraBoardSocket.CAM_A)
    center.setIspScale(*isp_scale)

    right.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1200_P)
    right.setBoardSocket(dai.CameraBoardSocket.CAM_C)
    right.setIspScale(*isp_scale)

    for _, stereo in enumerate([LC_stereo, LR_stereo, CR_stereo]):
        stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
        stereo.initialConfig.setMedianFilter(dai.MedianFilter.MEDIAN_OFF)
        # LR-check is required for depthQueueData alignment
        stereo.setLeftRightCheck(lr_check)
        stereo.setExtendedDisparity(extended_disparity)
        stereo.setSubpixel(subpixel)
        stereo.setDepthAlign(dai.RawStereoDepthConfig.AlgorithmControl.DepthAlign.RECTIFIED_LEFT)


    # Config
    config.depthThresholds.lowerThreshold = lowerThreshold
    config.depthThresholds.upperThreshold = upperThreshold
    config.calculationAlgorithm = calculation_algorithm
    config.roi = dai.Rect(topLeft, bottomRight)

    for spatialLocationCalculator in [LC_slc, LR_slc, CR_slc]:
        spatialLocationCalculator.inputConfig.setWaitForMessage(False)
        spatialLocationCalculator.initialConfig.addROI(config)


    # Linking
    # LC
    left.isp.link(LC_stereo.left)
    center.isp.link(LC_stereo.right)
    LC_stereo.disparity.link(xout_LC.input)
    LC_stereo.depth.link(LC_slc.inputDepth)
    LC_slc.out.link(LC_SpatialData.input)
    xinSpatialCalcConfig.out.link(LC_slc.inputConfig)
    if enableRectified:
        LC_stereo.rectifiedLeft.link(xoutl_LC.input)
        if enableRectifiedFirst == False:
            LC_stereo.rectifiedRight.link(xoutr_LC.input)

    # LR
    # left.isp.link(LR_stereo.left)
    LC_stereo.syncedLeft.link(LR_stereo.left)
    right.isp.link(LR_stereo.right)

    LR_stereo.disparity.link(xout_LR.input)
    LR_stereo.depth.link(LR_slc.inputDepth)
    LR_slc.out.link(LR_SpatialData.input)
    xinSpatialCalcConfig.out.link(LR_slc.inputConfig)
    if enableRectified:
        LR_stereo.rectifiedLeft.link(xoutl_LR.input)
        if enableRectifiedFirst == False:
            LR_stereo.rectifiedRight.link(xoutr_LR.input)

    # CR
    # center.isp.link(CR_stereo.left)
    # right.isp.link(CR_stereo.right)
    LC_stereo.syncedRight.link(CR_stereo.left)
    LR_stereo.syncedRight.link(CR_stereo.right)

    CR_stereo.disparity.link(xout_CR.input)
    CR_stereo.depth.link(CR_slc.inputDepth)
    CR_slc.out.link(CR_SpatialData.input)
    xinSpatialCalcConfig.out.link(CR_slc.inputConfig)
    if enableRectified:
        CR_stereo.rectifiedLeft.link(xoutl_CR.input)
        if enableRectifiedFirst == False:
            CR_stereo.rectifiedRight.link(xoutr_CR.input)

    return pipeline, LC_stereo.initialConfig.getMaxDisparity()


def check_input(roi, frame, DELTA=5):
    """Check if input is ROI or point. If point, convert to ROI"""
    # Convert to a numpy array if input is a list
    if isinstance(roi, list):
        roi = np.array(roi)

    # Limit the point so ROI won't be outside the frame
    if roi.shape == (2,) or roi.shape == (2, 1):
        roi = np.hstack([roi, np.array([[-DELTA, -DELTA], [DELTA, DELTA]])])
    elif roi.shape == (4,) or roi.shape == (4, 1):
        roi = np.array(roi)

    roi.clip([DELTA, DELTA], [frame.shape[1] - DELTA, frame.shape[0] - DELTA])

    return roi / frame.shape[1::-1]


def click_and_crop(event, x, y, flags, param):
    # grab references to the global variables
    global ref_pt, click_roi
    # if the left mouse button was clicked, record the starting
    # (x, y) coordinates and indicate that cropping is being
    # performed
    if event == cv2.EVENT_LBUTTONDOWN:
        ref_pt = [(x, y)]
    # check to see if the left mouse button was released
    elif event == cv2.EVENT_LBUTTONUP:
        # record the ending (x, y) coordinates and indicate that
        # the cropping operation is finished
        ref_pt.append((x, y))
        ref_pt = np.array(ref_pt)
        click_roi = np.array([np.min(ref_pt, axis=0), np.max(ref_pt, axis=0)])



def run():
    global ref_pt, click_roi, calculation_algorithm, config
    # Connect to device and start pipeline
    with dai.Device() as device:
        pipeline, maxDisparity = create_pipeline()
        device.startPipeline(pipeline)

        outputQueueNames = device.getOutputQueueNames()

        for name in outputQueueNames:
            if "spatialData" in name:
                continue
            # Configure windows; trackbar adjusts blending ratio of rgb/depthQueueData
            cv2.namedWindow(name)
            cv2.setMouseCallback(name, click_and_crop)

        spatialCalcConfigInQueue = device.getInputQueue("spatialCalcConfig")

        def drawText(frame, text, org, color=(255, 255, 255), thickness=1):
            cv2.putText(
                frame, text, org, cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                (0, 0, 0), thickness + 3, cv2.LINE_AA
            )
            cv2.putText(
                frame, text, org, cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                color, thickness, cv2.LINE_AA
            )

        def drawRect(frame, topLeft, bottomRight, color=(255, 255, 255), thickness=1):
            cv2.rectangle(frame, topLeft, bottomRight, (0, 0, 0), thickness + 3)
            cv2.rectangle(frame, topLeft, bottomRight, color, thickness)

        def drawSpatialLocations(frame, spatialLocations):
            for depthData in spatialLocations:
                roi = depthData.config.roi
                roi = roi.denormalize(width=frame.shape[1], height=frame.shape[0])
                xmin = int(roi.topLeft().x)
                ymin = int(roi.topLeft().y)
                xmax = int(roi.bottomRight().x)
                ymax = int(roi.bottomRight().y)

                cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (0, 0, 0), 4)
                cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (255, 255, 255), 1)
                drawRect(
                    frame,
                    (xmin, ymin),
                    (xmax, ymax),
                )
                drawText(
                    frame,
                    f"X: {int(depthData.spatialCoordinates.x)} mm",
                    (xmin + 10, ymin + 20),
                )
                drawText(
                    frame,
                    f"Y: {int(depthData.spatialCoordinates.y)} mm",
                    (xmin + 10, ymin + 35),
                )
                drawText(
                    frame,
                    f"Z: {int(depthData.spatialCoordinates.z)} mm",
                    (xmin + 10, ymin + 50),
                )

        fps = FPSHandler()
        stepSize = 0.01
        newConfig = False
        frame = None

        depthDatas = {
            "LC": [],
            "LR": [],
            "CR": [],
        }

        print("Use WASD keys to move ROI!")
        messages = ["disparity", "spatialData"]
        if enableRectified:
            if enableRectifiedFirst:
                messages.append("rectifiedLeft")
            else:
                messages.extend(["rectifiedLeft", "rectifiedRight"])

        while not device.isClosed():

            if click_roi is not None and frame is not None:
                [topLeft.x, topLeft.y], [
                    bottomRight.x,
                    bottomRight.y,
                ] = check_input(click_roi, frame)
                click_roi = None
                newConfig = True

            for cam_group in ["LC", "LR", "CR"]:
                for q in [f"{x}_{cam_group}" for x in messages]:
                    message = device.getOutputQueue(q).tryGet()
                    if message is None:
                        continue
                    if type(message) == dai.SpatialLocationCalculatorData:
                        depthDatas[cam_group] = message.getSpatialLocations()

                    if type(message) == dai.ImgFrame:
                        frame = message.getCvFrame()
                        fps.tick(q)
                        if "disparity" in q:
                            disp = (frame * (255.0 / maxDisparity)).astype(np.uint8)
                            disp = cv2.applyColorMap(disp, cv2.COLORMAP_JET)
                            drawSpatialLocations(disp, depthDatas[cam_group])
                            fps.drawFps(disp, q)
                            cv2.imshow(q, disp)
                        else:
                            drawSpatialLocations(frame, depthDatas[cam_group])
                            fps.drawFps(frame, q)
                            cv2.imshow(q, frame)

            key = cv2.waitKey(1)
            if key == ord("q"):
                break
            if key == ord("w"):
                if topLeft.y - stepSize >= 0:
                    topLeft.y -= stepSize
                    bottomRight.y -= stepSize
                    newConfig = True
            elif key == ord("a"):
                if topLeft.x - stepSize >= 0:
                    topLeft.x -= stepSize
                    bottomRight.x -= stepSize
                    newConfig = True
            elif key == ord("s"):
                if bottomRight.y + stepSize <= 1:
                    topLeft.y += stepSize
                    bottomRight.y += stepSize
                    newConfig = True
            elif key == ord("d"):
                if bottomRight.x + stepSize <= 1:
                    topLeft.x += stepSize
                    bottomRight.x += stepSize
                    newConfig = True

            elif key == ord("1"):
                calculation_algorithm = dai.SpatialLocationCalculatorAlgorithm.MEAN
                print("Switching calculation algorithm to MEAN!")
                newConfig = True
            elif key == ord("2"):
                calculation_algorithm = dai.SpatialLocationCalculatorAlgorithm.MIN
                print("Switching calculation algorithm to MIN!")
                newConfig = True
            elif key == ord("3"):
                calculation_algorithm = dai.SpatialLocationCalculatorAlgorithm.MAX
                print("Switching calculation algorithm to MAX!")
                newConfig = True
            elif key == ord("4"):
                calculation_algorithm = dai.SpatialLocationCalculatorAlgorithm.MODE
                print("Switching calculation algorithm to MODE!")
                newConfig = True
            elif key == ord("5"):
                calculation_algorithm = dai.SpatialLocationCalculatorAlgorithm.MEDIAN
                print("Switching calculation algorithm to MEDIAN!")
                newConfig = True

            if newConfig:
                config.roi = dai.Rect(topLeft, bottomRight)
                config.calculationAlgorithm = calculation_algorithm

                cfg = dai.SpatialLocationCalculatorConfig()
                cfg.addROI(config)
                spatialCalcConfigInQueue.send(cfg)
                newConfig = False


if __name__ == "__main__":
    ref_pt = None
    click_roi = None
    run()
