#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import cv2
import depthai as dai
import numpy as np
import threading
import time
from pathlib import Path

def create_pipeline():
    """Create pipeline"""
    pipeline = dai.Pipeline()
    
    # Create nodes
    camRgb = pipeline.create(dai.node.ColorCamera)
    spatialDetectionNetwork = pipeline.create(dai.node.YoloSpatialDetectionNetwork)
    monoLeft = pipeline.create(dai.node.MonoCamera)
    monoRight = pipeline.create(dai.node.MonoCamera)
    stereo = pipeline.create(dai.node.StereoDepth)
    
    # Output queues
    xoutRgb = pipeline.create(dai.node.XLinkOut)
    xoutNN = pipeline.create(dai.node.XLinkOut)
    xoutDepth = pipeline.create(dai.node.XLinkOut)
    
    xoutRgb.setStreamName("rgb")
    xoutNN.setStreamName("detections")
    xoutDepth.setStreamName("depth")
    
    # Configure camera
    camRgb.setPreviewSize(512, 288)
    camRgb.setVideoSize(1920, 1080)
    camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
    camRgb.setInterleaved(False)
    camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)
    
    monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
    monoLeft.setCamera("left")
    monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
    monoRight.setCamera("right")
    
    # Configure stereo depth
    stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.DEFAULT)
    stereo.setDepthAlign(dai.CameraBoardSocket.CAM_A)
    stereo.setOutputSize(monoLeft.getResolutionWidth(), monoLeft.getResolutionHeight())
    stereo.initialConfig.setMedianFilter(dai.MedianFilter.KERNEL_7x7)
    stereo.setSubpixel(True)
    stereo.setLeftRightCheck(True)
    stereo.setExtendedDisparity(False)
    
    # Configure detection network
    nnPath = str((Path(__file__).parent / 'best_openvino_2022.1_6shave.blob').resolve())
    spatialDetectionNetwork.setBlobPath(nnPath)
    spatialDetectionNetwork.setConfidenceThreshold(0.5)
    spatialDetectionNetwork.input.setBlocking(False)
    spatialDetectionNetwork.setBoundingBoxScaleFactor(0.5)
    spatialDetectionNetwork.setDepthLowerThreshold(400)
    spatialDetectionNetwork.setDepthUpperThreshold(5000)
    spatialDetectionNetwork.setNumClasses(2)
    spatialDetectionNetwork.setCoordinateSize(4)
    spatialDetectionNetwork.setIouThreshold(0.5)
    
    # Link nodes
    monoLeft.out.link(stereo.left)
    monoRight.out.link(stereo.right)
    camRgb.preview.link(spatialDetectionNetwork.input)
    camRgb.video.link(xoutRgb.input)
    spatialDetectionNetwork.out.link(xoutNN.input)
    stereo.depth.link(spatialDetectionNetwork.inputDepth)
    spatialDetectionNetwork.passthroughDepth.link(xoutDepth.input)
    
    return pipeline

def camera_thread(camera_id, device_id):
    """Camera thread"""
    pipeline = create_pipeline()
    
    try:
        # Get device info
        devices = dai.Device.getAllAvailableDevices()
        if device_id >= len(devices):
            print(f"Camera {camera_id}: Device {device_id} not found")
            return
            
        device_info = devices[device_id]
        device = dai.Device(pipeline, deviceInfo=device_info)
        
        print(f"Camera {camera_id} successfully connected to device {device_id}")
        
        with device:
            previewQueue = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
            detectionNNQueue = device.getOutputQueue(name="detections", maxSize=4, blocking=False)
            depthQueue = device.getOutputQueue(name="depth", maxSize=4, blocking=False)

            startTime = time.monotonic()
            counter = 0
            fps = 0
            color = (0, 255, 0)
            text_color = (0, 255, 255)

            while True:
                # Get data
                inPreview = previewQueue.get()
                inDet = detectionNNQueue.get()
                depth = depthQueue.get()

                # Process image
                frame = inPreview.getCvFrame()
                depthFrame = depth.getFrame()
                detections = inDet.detections

                # Depth map processing
                depth_downscaled = depthFrame[::4]
                if np.all(depth_downscaled == 0):
                    min_depth = 0
                else:
                    min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1)
                max_depth = np.percentile(depth_downscaled, 99)
                depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8)
                depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT)

                # FPS calculation
                counter += 1
                current_time = time.monotonic()
                if (current_time - startTime) > 1:
                    fps = counter / (current_time - startTime)
                    counter = 0
                    startTime = current_time

                # Draw detection results
                height = frame.shape[0]
                width = frame.shape[1]
                for detection in detections:
                    # RGB detection box
                    x1 = int(detection.xmin * width)
                    x2 = int(detection.xmax * width)
                    y1 = int(detection.ymin * height)
                    y2 = int(detection.ymax * height)
                    
                    # Get label
                    labelMap = ["durian", "person"]
                    try:
                        label = labelMap[detection.label]
                    except:
                        label = detection.label

                    # Draw label and confidence
                    cv2.putText(frame, str(label), (x1 + 10, y1 + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, text_color)
                    cv2.putText(frame, f"{detection.confidence*100:.2f}%", (x1 + 10, y1 + 35), cv2.FONT_HERSHEY_TRIPLEX, 0.5, text_color)
                    
                    # Draw detection box
                    cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)

                # Display FPS
                cv2.putText(frame, f"Camera {camera_id} FPS: {fps:.2f}", (2, frame.shape[0] - 4), cv2.FONT_HERSHEY_TRIPLEX, 0.4, color)
                
                # Display images
                cv2.imshow(f"Camera {camera_id}", cv2.resize(frame, (960, 540)))
                cv2.imshow(f"Depth {camera_id}", cv2.resize(depthFrameColor, (960, 540)))

                # Key handling
                key = cv2.waitKey(1) & 0xFF
                if key == ord('q'):
                    break
                    
    except Exception as e:
        print(f"Camera {camera_id} error: {e}")

if __name__ == "__main__":
    # Check model file
    nnPath = Path(__file__).parent / 'best_openvino_2022.1_6shave.blob'
    if not nnPath.exists():
        print(f"Model file not found: {nnPath}")
        exit(1)
    
    # Get available devices
    devices = dai.Device.getAllAvailableDevices()
    print(f"Found {len(devices)} OAK devices")
    
    if len(devices) >= 2:
        print("Starting dual camera mode")
        # Create two threads
        thread1 = threading.Thread(target=camera_thread, args=(0, 0))
        thread2 = threading.Thread(target=camera_thread, args=(1, 1))
        
        thread1.daemon = True
        thread2.daemon = True
        
        thread1.start()
        thread2.start()
        
        try:
            thread1.join()
            thread2.join()
        except KeyboardInterrupt:
            print("Received interrupt signal")
    else:
        print("Starting single camera mode")
        camera_thread(0, 0)
    
    cv2.destroyAllWindows()
