import rclpy  
from rclpy.node import Node    
from geometry_msgs.msg import Twist  
from std_msgs.msg import Float32MultiArray
import numpy as np  
from std_msgs.msg import Int32MultiArray
from std_msgs.msg import Int32
from ai_msgs.msg import PerceptionTargets
from origincar_msg.msg import Sign  # 根据图片的消息类型
from origincar_msg.msg import Data
from std_msgs.msg import String
import math
import datetime
import time
from pyzbar.pyzbar import decode
from sensor_msgs.msg import CompressedImage
from sensor_msgs.msg import Image
import cv2
from threading import Thread
import keyboard
import sys
import tty
import termios
import os
from openai import OpenAI
import base64
import json

class CvDetection(Node):  
    def __init__(self):  
        super().__init__('cv_detection')  # 初始化节点名称

        self.qrcode_info = 0
        self.task = 0

        #0为默认，1为启动大模型调用
        self.vlm_flag = 0
        #0为默认，1为启动cv检测
        self.opencv_flag = 0


        # 创建一个订阅者，订阅 /sigh4return 话题，消息类型为 Int32,上位机信号
        self.sign4return = self.create_subscription(
            Int32,
            '/sign4return',
            self.sign4return_callback,
            10)     
        # 创建订阅者，订阅话题 /image
        self.subscription = self.create_subscription(
            CompressedImage,
            '/image',
            self.image_callback,
            3)
        self.subscription_vlm = self.create_subscription(  
            Int32,  
            '/vlm',  
            self.vlm_callback,  
            3)

        # 创建速度命令发布者
        self.cmd_pub = self.create_publisher(Twist, '/cmd_vel', 10)  
        # 创建发布者，发布到 /sign_switch 话题
        self.sign_switch_pub = self.create_publisher(Sign, 'sign_switch', 10)      # 发布二维码的信息
        #创建新的发布者，用于发布避障信息
        self.cone_publisher = self.create_publisher(Int32MultiArray, '/cone', 10)  # 创建新的发布者，用于发布x和y坐标
        #创建新的发布者，用于发布park信息
        self.park_publisher = self.create_publisher(Int32MultiArray, '/park', 10)  # 创建新的发布者，用于发布x和y坐标   
        #创建发布者，发布vlm识别结果
        self.vlm_msg_publisher = self.create_publisher(String, '/vlm_msg', 10)
        #创建发布者，发布二维码识别结果
        self.qrcode_publisher = self.create_publisher(String, '/qrcode', 10)

        #图像识别参数
        self.up = 120
        self.left = 50
        self.area = 1200    
        self.right_park = 640  #580
        self.up_park = 270    
        
        self.park_relativ_position = np.array([0.0, 0.0,0.0,0.0])
        self.zhuitong_relativ_position = np.array([0.0, 0.0,0.0,0.0])
        self.task1_time = 0
        self.task3_time = 0

        # 透视变换矩阵
        '''
        self.matrix = np.array([
            [-4.78185634e-02, -4.35936013e-01, 1.17525583e+02],
            [5.34996953e-03, -8.68712398e-01, 2.17611259e+02],
            [2.24575710e-05, -4.29432464e-03, 1.00000000e+00]
        ])
        '''
    def vlm_callback(self, msg):  #接受按键信息，并设置vlm_flag为1，启动大模型，可设置cv检测
        self.vlm = msg.data
        if self.vlm == 1:
            self.vlm_flag = 1
        if self.vlm == 2:
            self.opencv_flag = 1
        if self.vlm == 3:
            self.opencv_flag = 0

    def qwen_vl(self,frame):    #调用qwen API

        client = OpenAI(
            # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key="sk-xxx",
            api_key=os.getenv("DASHSCOPE_API_KEY"),
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
        )
        #  Base64 编码格式
        # def encode_image(image_path):
        #     with open(image_path, "rb") as image_file:
        #         return base64.b64encode(image_file.read()).decode("utf-8")

        """将 OpenCV 读取的帧转换为 Base64"""
        _, buffer = cv2.imencode(".png", frame)  # 将 frame 转换为 PNG 格式的字节流
        base64_image = base64.b64encode(buffer).decode("utf-8")
            
        completion = client.chat.completions.create(
            model="qwen-vl-plus",  # 此处以qwen-vl-plus为例，可按需更换模型名称。模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
            messages=[{"role": "user","content": [
                    {"type": "text","text": "中文描述图片内的插图或者说海报或者说白色立牌上病人的内容"},
                    {"type": "image_url",
                    #"image_url": {"url": "https://dashscope.oss-cn-beijing.aliyuncs.com/images/dog_and_girl.jpeg"}}
                    "image_url": {"url": f"data:image/png;base64,{base64_image}"}}
                    ]}],
            #max_tokens = 30,
            )
        response_str = completion.model_dump_json()
        response_dict = json.loads(response_str)
        content = response_dict["choices"][0]["message"]["content"]

        return content

    def image_callback(self, msg):  #image话题的回调函数，使用cv获取物体参数，设置小车模式；接受vlm_flag开启大模型图生文

        #将图像数据转换为OpenCV格式
        np_arr = np.frombuffer(msg.data, np.uint8)
        cv_image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)

        if(self.vlm_flag == 1):
            vlm_msg = String()
            #vlm_msg.data = self.qwen_vl(frame=cv_image)
            vlm_msg.data = str(self.qwen_vl(frame=cv_image))
            self.vlm_msg_publisher.publish(vlm_msg)
            self.get_logger().info(f"大模型识别结果为: {vlm_msg.data}")
            self.vlm_flag = 0

        if(self.opencv_flag == 1):    #按键k开启opencv检测，便于观察识别结果
            #获取锥桶的底边中点与宽高的坐标（x,y,w,h)
            zhuitong_position = self.cone_frame(cv_image)
            #把坐标发布出去方便调试
            cone_msg = Int32MultiArray()
            cone_msg.data = zhuitong_position
            self.cone_publisher.publish(cone_msg)

            #获取P点的中点与宽高(x,y,w,h)
            park_position = self.park_frame(cv_image)
            #把坐标发布出去方便调试
            park_msg = Int32MultiArray()
            park_msg.data =  park_position
            self.park_publisher.publish(park_msg)


        if(self.task == 1): 


            #获取锥桶的底边中点与宽高的坐标（x,y,w,h)
            self.zhuitong_relativ_position = self.cone_frame(cv_image)
            #把坐标发布出去方便调试
            cone_msg = Int32MultiArray()
            cone_msg.data = self.zhuitong_relativ_position
            self.cone_publisher.publish(cone_msg)

            self.decode_qr_code_pyzbar(cv_image)  # 使用 pyzbar 解码

        if(self.task == 3):
            time = self.get_clock().now()
            dt = (time - self.task3_time).nanoseconds * 1e-9
            #为防止干扰，4秒内识别锥桶，不识别停车点；4秒后识别停车点，不识别锥桶
            if dt < 5: 
                #如果dt时间太短，较远的锥桶无法识别；时间太长，距离p点较近的位置又会有一些干扰
                #考虑设置两套参数，根据赛场情况切换，通过keyboard按键控制方案选择
                #keyboard采用按键发送话题flag的方式选择方案
                self.zhuitong_relativ_position = self.cone_frame(cv_image)
                cone_msg = Int32MultiArray()
                cone_msg.data = self.zhuitong_relativ_position
                self.cone_publisher.publish(cone_msg)
                self.get_logger().info('锥桶识别时间为前5s')

            if dt > 4:
                #获取P点的中点与宽高(x,y,w,h)
                self.park_relativ_position = self.park_frame(cv_image)
                park_msg = Int32MultiArray()
                park_msg.data = self.park_relativ_position
                self.park_publisher.publish(park_msg)

    def sign4return_callback(self,msg): #sign4return话题的回调函数，在foxglove上显示，传递self.task的值

    # 创建 Sign 消息实例
        if msg.data == 6:

            self.task = 3
            self.task3_time = self.get_clock().now()

        if msg.data == -1:

            self.task = 1
            #self.task1_time = self.get_clock().now()
            
        if msg.data == 5:
            self.task = 2

        return
        
    def perspective_point(self,x,y): #透视变换函数，输入yolo锥桶底边中点，输出相对小车实际位置，右正，左负
        homogeneous_point = np.array([x, y, 1.0], dtype=np.float32)
        transformed_point = np.dot(self.matrix, homogeneous_point)
        transformed_point /= transformed_point[2]
        #if int(round(transformed_point[0])) > 200 or int(round(transformed_point[1])) > 200 or int(round(transformed_point[0])) < 0 or int(round(transformed_point[1])) < 0:
        #    return None
        #else:
        location =(-1.0 * (0.01*(transformed_point[0] - 100.0)), 0.01*(200.0 - transformed_point[1]))
        return location
        #参数回调函数,回调使用       

    def decode_qr_code_pyzbar(self, image):  #识别到二维码结果后停车，进入任务二
        # 使用 pyzbar 进行二维码解码
        #gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        #image = cv2.resize(image,(320,240)) #处理更快，但是精度更低
        decoded_objects = decode(image)
        
        # 遍历结果并判断是否是顺时针或逆时针二维码
        for obj in decoded_objects:
            qr_code_data = obj.data.decode('utf-8')
            if qr_code_data in ["ClockWise", "AntiClockWise"]:
                self.publish_sign_message(qr_code_data)
                self.task = 2

                #停车
                twist_msg = Twist()
                twist_msg.linear.x = 0.00  # 示例：将 x 坐标复位
                twist_msg.angular.z = 0.00  # 使用 角速度复位
                self.cmd_pub.publish(twist_msg)

                return  # 成功识别并发布后立即返回，避免重复处理
    
    def publish_sign_message(self, qr_code_data): #发布二维码的信息
        # 创建并发布 Sign 消息
        sign_msg = Sign()
        sign_msg.sign_data = 3 if qr_code_data == "ClockWise" else 4
        self.sign_switch_pub.publish(sign_msg)
        qrcode_msg = String()
        qrcode_msg.data =  f"二维码的识别结果为: {qr_code_data}"
        self.qrcode_publisher.publish(qrcode_msg)
        self.get_logger().info(f"已发布二维码信息: {qr_code_data}")

    def cone_frame(self,frame):  #返回锥桶的x0,y0,w0,h0，底边中点与宽高
        x0 = 0
        y0 = 0
        w0 = 0
        h0 = 0
        im0 = frame[self.up:450, self.left:600]
        hsv_img = cv2.cvtColor(im0, cv2.COLOR_BGR2HSV)

        # Define range of target color in HSV
        lower_hsv1 = np.array([0, 0, 0])
        lower_hsv2 = np.array([82, 255, 255])
        upper_hsv1 = np.array([0, 0, 0])
        upper_hsv2 = np.array([255, 90, 255])

        # Threshold the HSV image
        imgThreshLow = cv2.inRange(hsv_img, lower_hsv1, lower_hsv2)
        imgThreshHigh = cv2.inRange(hsv_img, upper_hsv1, upper_hsv2)
        threshed_img = cv2.bitwise_or(imgThreshLow, imgThreshHigh)

        # Find contours in the black regions of smoothed_img
        contours, hierarchy = cv2.findContours(255 - threshed_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)


        # Initialize variables to store the largest contour
        max_area = 0
        largest_rect = None
        largest_cnt = None

        # Identify the largest contour and store its bounding rectangle
        for cnt in contours:
            area = cv2.contourArea(cnt)
            if area > self.area:  # Filter small contours if needed
                x, y, w, h = cv2.boundingRect(cnt)
                if area > max_area:
                    max_area = area
                    largest_rect = (x, y, w, h)
                    largest_cnt = cnt

        # Draw bounding rectangles and centroids
        for cnt in contours:
            area = cv2.contourArea(cnt)
            if area > self.area:
                x, y, w, h = cv2.boundingRect(cnt)

                # Draw the largest rectangle in red, others in green
                if (x, y, w, h) == largest_rect:
                    #color = (0, 0, 255)  # Red for the largest rectangle
                    x0 = int(x + self.left + w / 2)
                    y0 = int(y + self.up + h )
                    w0 = int(w)
                    h0 = int(h)         
     
        return x0,y0,w0,h0
    
    def park_frame(self,frame): #返回P点的x0,y0,w0,h0 中心点与宽，高
            x0 = 0
            y0 = 0
            w0 = 0
            h0 = 0
            im0 = frame[self.up_park:450, 0:self.right_park]#增大右边界就要减小lower_hsv2第二个阈值
            hsv_img = cv2.cvtColor(im0, cv2.COLOR_BGR2HSV)

            # Define range of target color in HSV
            lower_hsv1 = np.array([0, 0, 0])
            lower_hsv2 = np.array([47, 73, 255])
            upper_hsv1 = np.array([0, 0, 0])
            upper_hsv2 = np.array([255, 50, 255])#60

            # Threshold the HSV image，阈值内为白色（255）
            imgThreshLow = cv2.inRange(hsv_img, lower_hsv1, lower_hsv2)
            imgThreshHigh = cv2.inRange(hsv_img, upper_hsv1, upper_hsv2)
            threshed_img = cv2.bitwise_and(imgThreshLow, 255-imgThreshHigh)

            # Find contours in the black regions of smoothed_img
            contours, hierarchy = cv2.findContours(threshed_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

            # Initialize variables to store the largest contour
            max_area = 0
            largest_rect = None
            largest_cnt = None

            # Identify the largest contour and store its bounding rectangle
            for cnt in contours:
                area = cv2.contourArea(cnt)
                if area > self.area:  # Filter small contours if needed
                    x, y, w, h = cv2.boundingRect(cnt)
                    if area > max_area:
                        max_area = area
                        largest_rect = (x, y, w, h)
                        largest_cnt = cnt

            # Draw bounding rectangles and centroids
            for cnt in contours:
                area = cv2.contourArea(cnt)
                if area > self.area:
                    x, y, w, h = cv2.boundingRect(cnt)

                    # Draw the largest rectangle in red, others in green
                    if (x, y, w, h) == largest_rect:

                        x0 = int(x + w / 2)
                        y0 = int(y + self.up_park + h / 2)
                        w0 = int(w)
                        h0 = int(h)
                
            return  x0,y0,w0,h0 
    
   
def main(args=None):  
    rclpy.init(args=args)  # 初始化rclpy  
    cvdetection = CvDetection()  #实例化对象
    rclpy.spin(cvdetection)
    cvdetection.destroy_node()  # 销毁节点
    rclpy.shutdown()
  
if __name__ == '__main__':  
    main()  # 入口函数

'''
    def yolo_callback(self, msg):
        if self.flag == 1:
            for num, target in enumerate(msg.targets):
                class_type = target.rois[0].type.strip()
                if target.rois:

                    # 获取第一个 ROI
                    roi = target.rois[0].rect

                    # 获取框参数
                    x_offset = roi.x_offset
                    y_offset = roi.y_offset
                    height = roi.height
                    width = roi.width
                    #confidence =target.rois[0].confidence
                    
                if class_type == 'zhuitong' :
                    if(height > self.safety_zhuitong_height):
                        #坐标变换
                        x = int(x_offset + 0.5 * width)
                        y = y_offset + height
                        #这里暂时更新一下在图像的中心位置
                        self.zhuitong_relativ_position = (x,y) 
                        #识别到锥桶这里加一个状态函数
                        self.state = 1
                        return

                        ''''''
                        self.zhuitong_center = x
                        self.zhuitong_height = height
                        self.zhuitong_relativ_position = self.perspective_point(x,y)
                        self.left = int(x_offset - width / 2) - self.turn_error   #校正左转点
                        self.right = int(x_offset + 3 * width /2) + self.turn_error   #校正右转点
                        #更新绝对坐标（这里更新其实问题不大，可以储存下来）
                        distance = math.sqrt(self.zhuitong_relativ_position[0]**2+self.zhuitong_relativ_position[1]**2)
                        delta_angular = math.atan(self.zhuitong_relativ_position[0] / self.zhuitong_relativ_position[1])
                        self.zhuitong_position[0] = self.position[0] + distance * math.cos(self.angular + delta_angular)
                        self.zhuitong_position[1] = self.position[1] + distance * math.sin(self.angular + delta_angular)
                        #这里已经摄制成绝对坐标 
                        self.get_logger().info('计算锥桶中心坐标：{},{}'.format(self.zhuitong_position[0], self.zhuitong_position[1]))
                        self.get_logger().info('计算相对锥桶中心坐标：{},{}'.format(self.zhuitong_relativ_position[0], self.zhuitong_relativ_position[1]))
                        distance_cone = math.sqrt((self.zhuitong_position[1] - self.position[1])**2 + 
                                    (self.zhuitong_position[0] - self.position[0])**2)
                        self.get_logger().info('与锥桶中心的距离：{}'.format(distance_cone))
                    ''''''
                    
                elif class_type == 'qrcode' :
                    self.state = 2
                    #坐标变换
                    x = int(x_offset + 0.5 * width)
                    y = y_offset + height
                    self.relativ_target_position = (x,y)
                    #self.relativ_target_position = self.perspective_point(x,y)
                    return
                        
                elif class_type == 'p' :
                    self.state = 3
                    x = int(x_offset + 0.5 * width)
                    y = y_offset + height
                    self.relativ_target_position = (x,y)
                    #self.relativ_target_position = self.perspective_point(x,y)
                    return                    
                else:
                    self.state = 0
    def odom_callback(self, msg):  
        self.position[0] = msg.pose.pose.position.x  # 读取当前位置信息
        self.position[1] = msg.pose.pose.position.y  # 读取当前位置信息

        #这里的y很可能是根据x推测得出来的，因为小车的odom实际上只有一个方向的速度，因为小车是横着走的
        # 打印当前位置（调试用）  
        self.get_logger().info('当前位置: x={:.6f}, y={:.6f}, 角度={:.6f},状态{},'.format(self.position[0], self.position[1], self.angular,self.state))
    
'''
'''
        self.subscription_odom = self.create_subscription(
            Odometry,  
            '/odom',
            self.odom_callback,
            3)
        #创建订阅者，订阅话题 hobot_dnn_detection 话题   
        self.subscription_yolo = self.create_subscription( 
            PerceptionTargets, 
            'hobot_dnn_detection',  
            self.yolo_callback, 
            3)    
        '''