import cv2
import numpy as np
import depthai as dai
from time import sleep
import datetime
import argparse

median   = dai.StereoDepthProperties.MedianFilter.KERNEL_7x7

def create_stereo_depth_pipeline():
    print("Creating Stereo Depth pipeline: ", end='')
    pipeline = dai.Pipeline()

    cam_left      = pipeline.create(dai.node.MonoCamera)
    cam_right     = pipeline.create(dai.node.MonoCamera)
    stereo            = pipeline.create(dai.node.StereoDepth)
    xout_left         = pipeline.create(dai.node.XLinkOut)
    xout_right        = pipeline.create(dai.node.XLinkOut)
    xout_depth        = pipeline.create(dai.node.XLinkOut)
    xout_disparity    = pipeline.create(dai.node.XLinkOut)
    xout_rectif_left  = pipeline.create(dai.node.XLinkOut)
    xout_rectif_right = pipeline.create(dai.node.XLinkOut)

    cam_left .setBoardSocket(dai.CameraBoardSocket.LEFT)
    cam_right.setBoardSocket(dai.CameraBoardSocket.RIGHT)
    for cam in [cam_left, cam_right]: # Common config
        cam.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
        #cam.setFps(20.0)

    stereo.initialConfig.setConfidenceThreshold(200)
    stereo.setRectifyEdgeFillColor(0) # Black, to better see the cutout
    stereo.initialConfig.setMedianFilter(median) # KERNEL_7x7 default
    #stereo.setLeftRightCheck(lrcheck)
    #stereo.setExtendedDisparity(extended)
    #stereo.setSubpixel(subpixel)

    xout_left        .setStreamName('left')
    xout_right       .setStreamName('right')
    xout_depth       .setStreamName('depth')
    xout_disparity   .setStreamName('disparity')
    xout_rectif_left .setStreamName('rectified_left')
    xout_rectif_right.setStreamName('rectified_right')

    cam_left .out        .link(stereo.left)
    cam_right.out        .link(stereo.right)
    stereo.syncedLeft    .link(xout_left.input)
    stereo.syncedRight   .link(xout_right.input)
    stereo.depth         .link(xout_depth.input)
    stereo.disparity     .link(xout_disparity.input)

    stereo.rectifiedLeft .link(xout_rectif_left.input)
    stereo.rectifiedRight.link(xout_rectif_right.input)

    streams = ['left', 'right']


    streams.extend(['rectified_left', 'rectified_right'])
    streams.extend(['disparity', 'depth'])

    return pipeline, streams

# The operations done here seem very CPU-intensive, TODO
def convert_to_cv2_frame(image):
    data, w, h = image.getData(), image.getWidth(), image.getHeight()
    frame = np.array(data).astype(np.uint8).view(np.uint16).reshape((h, w))
   
    return frame

def test_pipeline():
    print("Creating DepthAI device")
    with dai.Device() as device:
        cams = device.getConnectedCameras()
        depth_enabled = dai.CameraBoardSocket.LEFT in cams and dai.CameraBoardSocket.RIGHT in cams
        if depth_enabled:
            pipeline, streams = create_stereo_depth_pipeline()
        else:
            print("depth unabled!!!")

        print("Starting pipeline")
        device.startPipeline(pipeline)

        # Create a receive queue for each stream
        q_list = []
        for s in streams:
            q = device.getOutputQueue(s, 8, blocking=False)
            q_list.append(q)

        # Need to set a timestamp for input frames, for the sync stage in Stereo node
        timestamp_ms = 0
        index = 0
        while True:
            # Handle output streams
            for q in q_list:
                name  = q.getName()
                image = q.get()
                #print("Received frame:", name)
                # Skip some streams for now, to reduce CPU load
                if name in ['left', 'right', 'depth']: continue
                frame = convert_to_cv2_frame(name, image)
                cv2.imshow(name, frame)
            if cv2.waitKey(1) == ord('q'):
                break
test_pipeline()