#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys

import numpy as np
import pyzed.sl as sl
import cv2
import os
import rospy
import torch
from std_msgs.msg import Float32
from ultralytics import YOLO
from geometry_msgs.msg import Point
# out_cat = cv2.VideoWriter("C:\\Users\\Administrator\\Desktop\\save.mp4", fourcc, 24, (352, 288), True)  # 保存位置/格式
center = (250, 250)  # 圆心坐标
radius = 20  # 半径
color = (0, 0, 255)  # 颜色（这里是蓝色）
thickness = -1  # 设置为-1，表示要填充这个圆
# 初始化ROS节点
rospy.init_node('depth_publisher', anonymous=True)
# 创建一个发布者对象
depth_publisher = rospy.Publisher('/depth_topic', Float32, queue_size=10)
point_zed = rospy.Publisher('/zed_point', Point, queue_size=10)
point_zed_person = rospy.Publisher('/zed_point_person', Point, queue_size=10)
# 2. 捕获图像
def image_capture():
    zed = sl.Camera()

    # Set configuration parameters
    input_type = sl.InputType()
    if len(sys.argv) >= 2:
        input_type.set_from_svo_file(sys.argv[1])
    init = sl.InitParameters(input_t=input_type)
    init.camera_resolution = sl.RESOLUTION.HD720
    init.depth_mode = sl.DEPTH_MODE.PERFORMANCE
    init.coordinate_units = sl.UNIT.MILLIMETER

    # Open the camera
    err = zed.open(init)

    if err != sl.ERROR_CODE.SUCCESS:
        print(repr(err))
        zed.close()
        exit(1)
    runtime = sl.RuntimeParameters()
    resolution = zed.get_camera_information().camera_configuration.resolution
    dep = sl.Mat(resolution.width, resolution.height, sl.MAT_TYPE.U8_C4)  # 深度图
    depth = sl.Mat(resolution.width, resolution.height, sl.MAT_TYPE.U8_C4)  # 深度值

    image_zed = sl.Mat(resolution.width, resolution.height, sl.MAT_TYPE.U8_C4)
    point_cloud = sl.Mat()  # 点云数据

    # rate = rospy.Rate(100) # 10HZ
    while not rospy.is_shutdown():
        err = zed.grab(runtime)
        if err == sl.ERROR_CODE.SUCCESS:
            # 获取图像
            # timestamp = zed.get_timestamp(sl.TIME_REFERENCE.CURRENT)  # 获取图像被捕获时的时间点
            zed.retrieve_image(image_zed, sl.VIEW.LEFT, sl.MEM.CPU, resolution) # image：容器，sl.VIEW.LEFT：内容

            # 获取深度
            # cv2.imshow("Image", img)
            zed.retrieve_measure(depth, sl.MEASURE.DEPTH, sl.MEM.CPU)  # 深度值
            zed.retrieve_image(dep, sl.VIEW.DEPTH)  # 深度图
            zed.retrieve_measure(point_cloud, sl.MEASURE.XYZBGRA, sl.MEM.CPU)
            point_map = point_cloud.get_data()
            depth_map = depth.get_data()
            img = image_zed.get_data()  # 转换成图像数组，便于后续的显示或者储存
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            model = YOLO('yolov8n.pt').cpu()
            results = model.track(img, save=False, conf=0.4, iou=0.4)
            #print("--------------------------------------------------------------")
            #print (det)
            #print("--------------------------------------------------------------")
            boxes = results[0].boxes.xywh.cpu()
            target = results[0].boxes.cls
            print(target)
            if 0 in target:
                print("you 人")
            else:
                print("没人")

            if results[0].boxes.id is not None and 0 in target:
                track_ids = results[0].boxes.id.int().cpu().tolist()
                # print(track_ids, "当前id")
                num_zeros = torch.sum(target == 0).item()
                # print(num_zeros, "人的数量")
                # print(target.count(0))
                # print(results[0].boxes.cls[0].item() == 0, "当前的类型")

                # print(results[0].names)
                # class_id = results[0].names[boxes[0].cls[0].item()]
                # Visualize the results on the frame
                annotated_frame = results[0].plot()
                annotated_frame = cv2.putText(annotated_frame, str(num_zeros) + "person", (250, 50),
                                              cv2.FONT_HERSHEY_SIMPLEX, 1.2,
                                              (0, 0, 255), 2)
                for box, target_id in zip(boxes, target):
                    x, y, w, h = box

                    if target_id.item() == 0:
                        cv2.circle(annotated_frame, (int(x), int(y)), radius, color, thickness)
                        center_depth = depth_map[int(y), int(x)]
                        point_cloud_zed_person = Point()
                        point_cloud_zed_person.x = float(point_map[int(y), int(x)][0] / 1000.0)
                        point_cloud_zed_person.y = float(point_map[int(y), int(x)][1] / 1000.0)
                        point_cloud_zed_person.z = float(point_map[int(y), int(x)][2] / 1000.0)
                        point_zed_person.publish(point_cloud_zed_person)
                        depth_publisher.publish(Float32(center_depth))
                        # print(' 时间点', dis_map[int(i[0]), int(i[1])])
                        print('中心点深度值', depth_map[int(y), int(x)] /1000.0 ,  point_cloud_zed_person.z)
                        # print('中心点云数据',point_map[int(i[1]), int(i[0])])
                        # 利用cv2.imshow显示视图，并对想要的视图进行保存
                        # rate.sleep()
                        break


            else:
                annotated_frame = results[0].plot()
                annotated_frame = cv2.putText(annotated_frame, "no person", (250, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.2,
                                              (0, 0, 255), 2)

            # # Plot the tracks
            # if results[0].boxes.id is not None and 0 in target:
            #
            #     for box, track_id in zip(boxes, track_ids):
            #         x, y, w, h = box
            #         track = track_history[track_id]
            #         track.append((float(x), float(y)))  # x, y center point
            #         if len(track) > 30:  # retain 90 tracks for 90 frames
            #             track.pop(0)
            #
            #         # Draw the tracking lines
            #         points = np.hstack(track).astype(np.int32).reshape((-1, 1, 2))
            #         cv2.polylines(annotated_frame, [points], isClosed=False, color=(track_id*10%255, 100, 255), thickness=2)
            #
            # # Display the annotated frame
            #
            cv2.imshow("YOLOv8 Tracking", annotated_frame)
            boxes = results[0].boxes.cpu().numpy()
           # print(boxes.xywh,"NHAO")
            XYWH = boxes.xywh
            # 获取视差值
            # 获取点云

            print(XYWH.shape,"size")
            # for i in XYWH:
            #     print(int(i[0]), int(i[1]))
            #     center_depth = depth_map[int(i[1]), int(i[0])]
            #     point_cloud_zed = Point()
            #     point_cloud_zed.x = float(point_map[int(i[1]), int(i[0])][0]/1000.0)
            #     point_cloud_zed.y = float(point_map[int(i[1]), int(i[0])][1]/1000.0)
            #     point_cloud_zed.z = float(point_map[int(i[1]), int(i[0])][2]/1000.0)
            #     point_zed.publish(point_cloud_zed)
            #     depth_publisher.publish(Float32(center_depth))
            #     # print(' 时间点', dis_map[int(i[0]), int(i[1])])
            #     print('中心点深度值',depth_map[int(i[1]), int(i[0])])
            #    # print('中心点云数据',point_map[int(i[1]), int(i[0])])
            #     # 利用cv2.imshow显示视图，并对想要的视图进行保存
            #     # rate.sleep()
            rospy.sleep(0.01)
            key = cv2.waitKey(1)
            if key & 0xFF == 27:  # esc退出
                break
    cv2.destroyAllWindows()
    zed.close()
if __name__ == '__main__':
    image_capture()
    rospy.spin()
