#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import pyzed.sl as sl
import cv2
import os
import rospy
from std_msgs.msg import Float32
from ultralytics import YOLO
from geometry_msgs.msg import Point

# 初始化ROS节点
rospy.init_node('depth_publisher', anonymous=True)
# 创建一个发布者对象
depth_publisher = rospy.Publisher('/depth_topic', Float32, queue_size=10)
point_zed = rospy.Publisher('/zed_point', Point, queue_size=10)
# 2. 捕获图像
def image_capture():
    zed = sl.Camera()
    # 设置相机的分辨率1080和采集帧率30fps
    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.HD720  # Use HD1080 video mode
    init_params.camera_fps = 5  # fps可选：15、30、60、100
    err = zed.open(init_params)  # 根据自定义参数打开
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)
    runtime_parameters = sl.RuntimeParameters()  # 设置相机获取参数
    runtime_parameters.sensing_mode = sl.SENSING_MODE.STANDARD
    # 创建sl.Mat对象来存储图像（容器），Mat类可以处理1到4个通道的多种矩阵格式（定义储存图象的类型）
    image = sl.Mat()  # 图像
    disparity = sl.Mat()  # 视差值
    dep = sl.Mat()  # 深度图
    depth = sl.Mat()  # 深度值
    point_cloud = sl.Mat()  # 点云数据
    # depth_publisher = rospy.Publisher('/depth_topic', Float32, queue_size=10)
    # 获取分辨率
    rate = rospy.Rate(100) # 10HZ
    while not rospy.is_shutdown():
        # 获取最新的图像，修正它们，并基于提供的RuntimeParameters(深度，点云，跟踪等)计算测量值。
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:  # 相机成功获取图象
            # 获取图像
            # timestamp = zed.get_timestamp(sl.TIME_REFERENCE.CURRENT)  # 获取图像被捕获时的时间点
            zed.retrieve_image(image, sl.VIEW.LEFT)  # image：容器，sl.VIEW.LEFT：内容
            img = image.get_data()  # 转换成图像数组，便于后续的显示或者储存
            # 获取深度
            zed.retrieve_measure(depth, sl.MEASURE.DEPTH, sl.MEM.CPU)  # 深度值
            zed.retrieve_image(dep, sl.VIEW.DEPTH)  # 深度图
            depth_map = depth.get_data()
            # 发布深度值
            dep_map = dep.get_data()
            resolution = zed.get_camera_information().camera_resolution
            w, h = resolution.width, resolution.height
            #print(w,h,"wh")
            # img = cv2.resize(img, (h, w))
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            model = YOLO('yolov8n.pt').cpu()
            det = model.track(img, save=False, conf=0.4, iou=0.4)
            #print("--------------------------------------------------------------")
            #print (det)
            #print("--------------------------------------------------------------")
            for r in det:
                imgc = r.plot()
                cv2.imshow("View",  imgc)
            boxes = det[0].boxes.cpu().numpy()
           # print(boxes.xywh,"NHAO")
            XYWH = boxes.xywh
            # 获取视差值
            #zed.retrieve_measure(disparity, sl.MEASURE.DISPARITY, sl.MEM.CPU)
            #dis_map = disparity.get_data()
            # 获取点云
            zed.retrieve_measure(point_cloud, sl.MEASURE.XYZBGRA, sl.MEM.CPU)
            point_map = point_cloud.get_data()
            print(XYWH.shape,"size")
            for i in XYWH:
                print(int(i[0]), int(i[1]))
                center_depth = depth_map[int(i[1]), int(i[0])]
                point_cloud_zed = Point()
                point_cloud_zed.x = float(point_map[int(i[1]), int(i[0])][0]/1000.0)
                point_cloud_zed.y = float(point_map[int(i[1]), int(i[0])][1]/1000.0)
                point_cloud_zed.z = float(point_map[int(i[1]), int(i[0])][2]/1000.0)
                point_zed.publish(point_cloud_zed)
                depth_publisher.publish(Float32(center_depth))
                # print(' 时间点', dis_map[int(i[0]), int(i[1])])
                print('中心点深度值',depth_map[int(i[1]), int(i[0])])
               # print('中心点云数据',point_map[int(i[1]), int(i[0])])
                # 利用cv2.imshow显示视图，并对想要的视图进行保存
                rate.sleep()
            rospy.sleep(0.2)
            key = cv2.waitKey(1)
            if key & 0xFF == 27:  # esc退出
                break
    zed.close()
if __name__ == '__main__':
    image_capture()
    rospy.spin()
