import pyrealsense2 as rs
import numpy as np
import cv2
import torch


# Load the YOLOv5 model
# model = torch.hub.load('ultralytics/yolov5', 'custom', path='yolov5s.pt')
# model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
model = torch.hub.load('/home/qinsy/yolov5', 'custom', path='./best0813.pt', source='local')


# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()

# Start streaming from Intel RealSense D435
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

# Start the streaming
pipeline.start(config)


try:
    while True:
        # Wait for a coherent pair of frames: depth and color
        frames = pipeline.wait_for_frames()
        depth_frame = frames.get_depth_frame()
        color_frame = frames.get_color_frame()
        print(2222222)
        if not depth_frame or not color_frame:
            continue

        # Convert images to numpy arrays
        depth_image = np.asanyarray(depth_frame.get_data())
        color_image = np.asanyarray(color_frame.get_data())

        # Perform YOLOv5 inference on the color image
        results = model(color_image)

        # Extract bounding box and class information
        df = results.pandas().xyxy[0]  # Pandas DataFrame

        # Variables to store detection information
        boxes = []  # List to store bounding boxes
        class_ids = []  # List to store class IDs
        confidences = []  # List to store confidences
        class_names = []  # List to store class names

        # Iterate over the detections and store the information in variables
        for index, row in df.iterrows():
            box = {
                'xmin': int(row['xmin']),
                'ymin': int(row['ymin']),
                'xmax': int(row['xmax']),
                'ymax': int(row['ymax'])
            }
            boxes.append(box)
            class_ids.append(int(row['class']))
            confidences.append(float(row['confidence']))
            class_names.append(row['name'])

        # Print detection information
        for box, class_id, confidence, class_name in zip(boxes, class_ids, confidences, class_names):
            print(f"Class: {class_name}, Confidence: {confidence:.2f}")
            print(f"Bounding Box: ({box['xmin']}, {box['ymin']}), ({box['xmax']}, {box['ymax']})")

        # Render the results on the color image
        result_img = np.squeeze(results.render())

        # Display the result
        cv2.imshow('YOLOv5 Real-Time Detection', result_img)

        # Break the loop with 'q'
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

finally:
    # Stop streaming
    pipeline.stop()
    cv2.destroyAllWindows()
