import rospy
import cv2
import json
import numpy as np
from ultralytics import YOLO
from std_msgs.msg import Header, String
from sensor_msgs.msg import Image

class YoloV8ROS:
    def __init__(self):
        self.is_print = rospy.get_param("~is_print",False)
        self.is_show = rospy.get_param("~is_show", True)
        self.device = rospy.get_param("~drives","")
        self.model = YOLO(rospy.get_param("~weight_path","yolov8n.pt"))
        self.color_image = Image()
        self.sub = rospy.Subscriber(rospy.get_param("~image_topic",""),Image, self.image_callback, queue_size=1)
        self.position_pub = rospy.Publisher("/yolov8/detection_msg", String, queue_size=1)
        self.detection_pub = rospy.Publisher("/yolov8/detection_image", Image, queue_size=1)
        
    def image_callback(self, data):
        self.color_image = np.frombuffer(data.data, dtype=np.uint8).reshape(data.height, data.width, -1)
        self.color_image = cv2.cvtColor(self.color_image, cv2.COLOR_BGR2RGB)
        results = self.model(self.color_image)
        # self.dectshow(results, data.height, data.width)
        self.dectshowToSkeleton(results, data.height, data.width)
        cv2.waitKey(3)
    # 人体骨架识别，只有权重文件为-pose的才可使用
    def dectshowToSkeleton(self, results, height, widht):
        self.frame = results[0].plot()
        fps = 1000.0 / results[0].speed["inference"]
        
        
        msg = String() 
        tmp = []
        if len(results[0]) > 0:
            for index, k in enumerate(results[0].keypoints.data.cpu().numpy()):
                data = {}
                data["nose"] = {"x":k[0][0].item(),"y":k[0][1].item(),"probability":k[0][2].item()} # 鼻子特征点
                
                data["left-eye"] = {"x":k[1][0].item(),"y":k[1][1].item(),"probability":k[1][2].item()}  # 左眼

                data["right-eye"]={"x":k[2][0].item(),"y":k[2][1].item(),"probability":k[2][2].item()}  # 右眼

                data["left-ear"]={"x":k[3][0].item(),"y":k[3][1].item(),"probability":k[3][2].item()}  # 左耳

                data["right-ear"] = {"x":k[4][0].item(),"y":k[4][1].item(),"probability":k[4][2].item()} # 右耳

                data["left-shoulder"]={"x":k[5][0].item(),"y":k[5][1].item(),"probability":k[5][2].item()} # 左肩

                data["right-shoulder"]={"x":k[6][0].item(),"y":k[6][1].item(),"probability":k[6][2].item()} # 右肩

                data["left-elbow"]={"x":k[7][0].item(),"y":k[7][1].item(),"probability":k[7][2].item()} # 左肘

                data["right-elbow"]={"x":k[8][0].item(),"y":k[8][1].item(),"probability":k[8][2].item()} # 右肘

                data["left-wrist"]={"x":k[9][0].item(),"y":k[9][1].item(),"probability":k[9][2].item()} # 左手腕

                data["right-wrist"]={"x":k[10][0].item(),"y":k[10][1].item(),"probability":k[10][2].item()} # 右手腕

                data["left-hip"]={"x":k[11][0].item(),"y":k[11][1].item(),"probability":k[11][2].item()} # 左臀

                data["right-hip"]={"x":k[12][0].item(),"y":k[12][1].item(),"probability":k[12][2].item()} # 右臀

                data["left-knee"]={"x":k[13][0].item(),"y":k[13][1].item(),"probability":k[13][2].item()} # 左膝

                data["right-knee"]={"x":k[14][0].item(),"y":k[14][1].item(),"probability":k[14][2].item()} # 右膝

                data["left-ankle"]={"x":k[15][0].item(),"y":k[15][1].item(),"probability":k[15][2].item()} # 左踝

                data["right-ankle"]={"x":k[16][0].item(),"y":k[16][1].item(),"probability":k[16][2].item()} # 右踝

                tmp.append(data)
        
        msg.data = json.dumps(tmp)
        if self.is_print == True:
            print("----------华丽的分割线---------------")
            print(msg.data)
        self.position_pub.publish(msg)  # 发布坐标信息
        self.publish_image(self.frame, height=height, width=widht)  # 发布识别后的图片
        if self.is_show == True:
            cv2.putText(self.frame, f'YOLOV8 FPS: {int(fps)}', (20,20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,255,0),2, cv2.LINE_AA)
            cv2.imshow("YOLOV8", self.frame)


    def dectshowToBox(self, results, height, widht):
        self.frame = results[0].plot()
        fps = 1000.0 / results[0].speed["inference"]
        cv2.putText(self.frame, f'YOLOV8 FPS: {int(fps)}', (20,20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,255,0),2, cv2.LINE_AA)
        
        msg = String()
        data = {}
        tmp = []
        # 判断是否识别到物体
        if len(results[0]) > 0:
            # 循环获取xy坐标和识别类别
            for result in results[0].boxes:
                # 判断是否识别的是人
                if results[0].names[result.cls.item()] == "person" and result.conf.item() > 0.7:
                    data["cls"] = results[0].names[result.cls.item()]
                    data["frame"]["xmin"] = int(np.int64(result.xyxy[0][0].item()))
                    data["frame"]["ymin"] = int(np.int64(result.xyxy[0][1].item()))
                    data["frame"]["xmax"] = int(np.int64(result.xyxy[0][2].item()))
                    data["frame"]["ymax"] = int(np.int64(result.xyxy[0][3].item()))
                    data["frame"]["probability"] = result.conf.item()
                    tmp.append(data)
        msg.data = json.dumps(tmp)
        self.position_pub.publish(msg)  # 发布坐标信息
        self.publish_image(self.frame, height=height, width=widht)
        cv2.imshow("YOLOV8", self.frame)
    # 发布识别后的图片
    def publish_image(self, imgdata, height, width):
        image_tmp = Image()
        header = Header(stamp=rospy.Time.now())
        header.frame_id = "camera_color_frame"
        image_tmp.height = height
        image_tmp.width = width
        image_tmp.encoding = 'bgr8'
        image_tmp.data = np.array(imgdata).tobytes()
        image_tmp.header = header
        image_tmp.step = width * 3
        self.detection_pub.publish(image_tmp)
def main():
    rospy.init_node("yolov8_ros", anonymous=True)
    yolo_dect = YoloV8ROS()
    rospy.spin()
if __name__ == "__main__":
    main()

'''
model = YOLO('yolov8n.pt')
cap = cv2.VideoCapture(0)
while cap.isOpened():
    success, frame = cap.read()
    if success:
        results = model(frame)
        annotated_frame = results[0].plot()
        cv2.imshow("YOLOV8", annotated_frame)
        if cv2.waitKey(1) & 0xFF == ord("q"):
            break
    else:
        break


cap.release()
cv2.destroyAllWindows()
'''