import onnx
import pyrealsense2 as rs
import numpy as np
import cv2
import torch
import threading
import pathlib
from pathlib import Path
import openvino.runtime as ov
from openvino.preprocess import PrePostProcessor
from openvino.preprocess import ColorFormat
from openvino.runtime import Layout, Type
temp = pathlib.PosixPath
pathlib.PosixPath = pathlib.WindowsPath

background = np.zeros((960, 1280, 3), dtype=np.uint8)

SCORE_THRESHOLD = 0.2
NMS_THRESHOLD = 0.4
CONFIDENCE_THRESHOLD = 0.4


def resize_and_pad(image, new_shape):
    old_size = image.shape[:2]
    ratio = float(new_shape[-1] / max(old_size))  # fix to accept also rectangular images
    new_size = tuple([int(x * ratio) for x in old_size])

    image_resized = cv2.resize(image, (new_size[1], new_size[0]))

    delta_w = new_shape[1] - new_size[1]
    delta_h = new_shape[0] - new_size[0]

    color = [100, 100, 100]
    new_im = cv2.copyMakeBorder(image_resized, 0, delta_h, 0, delta_w, cv2.BORDER_CONSTANT, value=color)

    return new_im, delta_w, delta_h


def dectshow(org_img, boxs):
    img = org_img.copy()
    for box in boxs:
        cv2.rectangle(img, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 255, 0), 2)
        cv2.putText(img, str(box[-1]), (int(box[0]), int(box[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
    return img


def process_device(device, window_name, index):
    core = ov.Core()
    model = core.read_model(str(Path("./model.xml")))
    ppp = PrePostProcessor(model)
    ppp.input().tensor().set_element_type(Type.u8).set_layout(Layout("NHWC")).set_color_format(ColorFormat.BGR)
    ppp.input().preprocess().convert_element_type(Type.f32).convert_color(ColorFormat.RGB).scale([255., 255., 255.])
    ppp.input().model().set_layout(Layout("NCHW"))
    ppp.output().tensor().set_element_type(Type.f32)
    model = ppp.build()
    compiled_model = core.compile_model(model, "CPU")
    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_device(device.get_info(rs.camera_info.serial_number))
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 60)
    pipeline.start(config)
    try:
        while True:
            frames = pipeline.wait_for_frames()
            color_frame = frames.get_color_frame()
            color_image = np.asanyarray(color_frame.get_data())
            img_resized, dw, dh = resize_and_pad(color_image, (480, 640))
            img = color_image.copy()  # Use a copy of color_image as img
            input_tensor = np.expand_dims(img_resized, 0)
            infer_request = compiled_model.create_infer_request()
            infer_request.infer({0: input_tensor})
            output = infer_request.get_output_tensor()
            detections = output.data[0]

            boxes = []
            class_ids = []
            confidences = []
            for prediction in detections:
                confidence = prediction[4].item()
                if confidence >= CONFIDENCE_THRESHOLD:
                    classes_scores = prediction[5:]
                    _, _, _, max_indx = cv2.minMaxLoc(classes_scores)
                    class_id = max_indx[1]
                    if (classes_scores[class_id] > .25):
                        confidences.append(confidence)
                        class_ids.append(class_id)
                        x, y, w, h = prediction[0].item(), prediction[1].item(), prediction[2].item(), prediction[
                            3].item()
                        xmin = x - (w / 2)
                        ymin = y - (h / 2)
                        box = np.array([xmin, ymin, w, h])
                        boxes.append(box)

            indexes = cv2.dnn.NMSBoxes(boxes, confidences, SCORE_THRESHOLD, NMS_THRESHOLD)

            detections = []
            for i in indexes:
                j = i.item()
                detections.append({"class_index": class_ids[j], "confidence": confidences[j], "box": boxes[j]})

            for detection in detections:
                box = detection["box"]
                classId = detection["class_index"]
                confidence = detection["confidence"]

                rx = img.shape[1] / (img_resized.shape[1] - dw)
                ry = img.shape[0] / (img_resized.shape[0] - dh)
                box[0] = rx * box[0]
                box[1] = ry * box[1]
                box[2] = rx * box[2]
                box[3] = ry * box[3]

                print(
                    f"Bbox {i} Class: {classId} Confidence: {confidence} Scaled coords: [ cx: {(box[0] + (box[2] / 2)) / img.shape[1]}, cy: {(box[1] + (box[3] / 2)) / img.shape[0]}, w: {box[2] / img.shape[1]}, h: {box[3] / img.shape[0]} ]")
                xmax = box[0] + box[2]
                ymax = box[1] + box[3]
                img = cv2.rectangle(img, (int(box[0]), int(box[1])), (int(xmax), int(ymax)), (0, 255, 0), 3)
                img = cv2.rectangle(img, (int(box[0]), int(box[1]) - 20), (int(xmax), int(box[1])), (0, 255, 0),
                                    cv2.FILLED)
                img = cv2.putText(img, str(classId), (int(box[0]), int(box[1]) - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                  (0, 0, 0))

            x = (index % 2) * 640
            y = (index // 2) * 480
            background[y:y+480, x:x+640] = img

            cv2.imshow("Traffic System", background)

            key = cv2.waitKey(1)
            if key & 0xFF == ord('q') or key == 27:
                break
    finally:
        pipeline.stop()


if __name__ == "__main__":
    ctx = rs.context()
    devices = ctx.query_devices()
    threads = []

    for i, device in enumerate(devices):
        window_name = f"Device {i + 1}: Serial Number - {device.get_info(rs.camera_info.serial_number)}"
        thread = threading.Thread(target=process_device, args=(device, window_name, i))
        threads.append(thread)

    for thread in threads:
        thread.start()

    for thread in threads:
        thread.join()

    cv2.destroyAllWindows()
