#!/usr/bin/env python3
import rclpy
from rclpy.node import Node
from std_msgs.msg import String, Float32MultiArray, MultiArrayDimension
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
import torch
import numpy as np
import time
import os
import sys

# 添加原始Voltage项目路径
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../voltage'))

# 导入Voltage项目组件
from model_utils import get_transformer_model, get_input_embedding, get_image_tensor
from config import PARTITIONS

class VoltageMasterNode(Node):
    def __init__(self):
        super().__init__('voltage_master_node')
        
        # 声明参数
        self.declare_parameter('model_type', 'bert')
        self.declare_parameter('device', 'cuda:0' if torch.cuda.is_available() else 'cpu')
        self.declare_parameter('partitions', PARTITIONS)
        
        # 获取参数
        self.model_type = self.get_parameter('model_type').value
        self.device = self.get_parameter('device').value
        self.partitions = self.get_parameter('partitions').value
        
        # 初始化模型
        self.get_logger().info(f'初始化模型: {self.model_type} 在设备 {self.device} 上')
        self.initialize_model()
        
        # 创建发布者和订阅者
        self.input_sub = self.create_subscription(
            String if self.model_type == 'bert' else Image,
            'voltage/input',
            self.input_callback,
            10)
        
        self.tensor_pub = self.create_publisher(
            Float32MultiArray,
            'voltage/tensor',
            10)
        
        self.result_sub = self.create_subscription(
            Float32MultiArray,
            'voltage/worker_result',
            self.worker_result_callback,
            10)
        
        self.result_pub = self.create_publisher(
            Float32MultiArray,
            'voltage/result',
            10)
        
        # 初始化状态变量
        self.worker_results = []
        self.current_input = None
        self.bridge = CvBridge()
        self.get_logger().info('Voltage主节点已初始化')
    
    def initialize_model(self):
        """初始化模型层"""
        if self.model_type == 'bert':
            self.model_layers = get_transformer_model()
            self.get_logger().info('BERT模型已加载')
        elif self.model_type == 'vit':
            # 使用默认图像URL
            image_path = "https://raw.githubusercontent.com/EliSchwartz/imagenet-sample-images/refs/heads/master/n02086240_Shih-Tzu.JPEG"
            _, self.model_layers = get_image_tensor(image_path, device=self.device)
            self.get_logger().info('ViT模型已加载')
        else:
            self.get_logger().error(f'不支持的模型类型: {self.model_type}')
    
    def input_callback(self, msg):
        """处理输入数据并发布张量"""
        self.get_logger().info('收到输入数据')
        start_time = time.time()
        
        # 重置工作节点结果
        self.worker_results = []
        
        # 处理输入
        if self.model_type == 'bert':
            # 文本输入
            x = get_input_embedding(msg.data, max_length=128, device=self.device)
            self.get_logger().info(f'输入形状: {x.shape}')
        else:
            # 图像输入
            cv_image = self.bridge.imgmsg_to_cv2(msg, desired_encoding='rgb8')
            # 这里需要将OpenCV图像转换为PIL图像并处理
            # 简化实现，实际应用中需要完整处理
            x = torch.randn(197, 768).to(self.device)  # 占位符
        
        # 保存当前输入用于后续处理
        self.current_input = x
        
        # 将张量转换为ROS消息并发布
        tensor_msg = Float32MultiArray()
        tensor_msg.data = x.cpu().numpy().flatten().tolist()
        
        # 设置维度信息
        tensor_msg.layout.dim = [
            MultiArrayDimension(label="height", size=x.shape[0], stride=x.shape[0]*x.shape[1]),
            MultiArrayDimension(label="width", size=x.shape[1], stride=x.shape[1])
        ]
        
        self.tensor_pub.publish(tensor_msg)
        self.get_logger().info(f'已发布输入张量，耗时: {time.time() - start_time:.4f}秒')
    
    def worker_result_callback(self, msg):
        """接收工作节点的结果"""
        # 将接收到的数据转换回张量
        data = np.array(msg.data, dtype=np.float32)
        height = int(msg.layout.dim[0].size)
        width = int(msg.layout.dim[1].size)
        result_tensor = torch.tensor(data.reshape(height, width))
        
        self.worker_results.append(result_tensor)
        self.get_logger().info(f'收到工作节点结果 {len(self.worker_results)}/{self.partitions-1}')
        
        # 如果收到所有工作节点的结果，则合并并处理
        if len(self.worker_results) == self.partitions - 1:
            self.process_all_results()
    
    def process_all_results(self):
        """处理所有工作节点的结果"""
        self.get_logger().info('处理所有工作节点结果')
        
        # 计算主节点的部分
        N = self.current_input.shape[0]
        part_len = N // self.partitions
        start = 0
        end = part_len
        
        # 这里简化了实际的partitioned_layer调用
        # 实际应用中需要完整实现
        my_output = self.current_input[start:end]
        
        # 合并所有结果
        all_outputs = [my_output] + self.worker_results
        output = torch.cat(all_outputs, dim=0)
        
        # 发布最终结果
        result_msg = Float32MultiArray()
        result_msg.data = output.cpu().numpy().flatten().tolist()
        
        # 设置维度信息
        result_msg.layout.dim = [
            MultiArrayDimension(label="height", size=output.shape[0], stride=output.shape[0]*output.shape[1]),
            MultiArrayDimension(label="width", size=output.shape[1], stride=output.shape[1])
        ]
        
        self.result_pub.publish(result_msg)
        self.get_logger().info(f'已发布最终结果，形状: {output.shape}')

def main(args=None):
    rclpy.init(args=args)
    node = VoltageMasterNode()
    rclpy.spin(node)
    node.destroy_node()
    rclpy.shutdown()

if __name__ == '__main__':
    main()