#!/usr/bin/env python3
import rclpy
from rclpy.node import Node
from std_msgs.msg import Float32MultiArray, MultiArrayDimension
import torch
import numpy as np
import time
import os
import sys

# 添加原始Voltage项目路径
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../voltage'))

# 导入Voltage项目组件
from partitioned_transformer import partitioned_layer
from config import PARTITIONS

class VoltageWorkerNode(Node):
    def __init__(self):
        super().__init__('voltage_worker_node')
        
        # 声明参数
        self.declare_parameter('worker_id', 1)  # 默认为工作节点1
        self.declare_parameter('device', 'cuda:0' if torch.cuda.is_available() else 'cpu')
        self.declare_parameter('partitions', PARTITIONS)
        
        # 获取参数
        self.worker_id = self.get_parameter('worker_id').value
        self.device = self.get_parameter('device').value
        self.partitions = self.get_parameter('partitions').value
        
        # 创建发布者和订阅者
        self.tensor_sub = self.create_subscription(
            Float32MultiArray,
            'voltage/tensor',
            self.tensor_callback,
            10)
        
        self.result_pub = self.create_publisher(
            Float32MultiArray,
            'voltage/worker_result',
            10)
        
        # 初始化模型层
        self.model_layers = None
        
        self.get_logger().info(f'Voltage工作节点 {self.worker_id} 已初始化')
    
    def tensor_callback(self, msg):
        """处理输入张量并返回结果"""
        self.get_logger().info('收到输入张量')
        start_time = time.time()
        
        # 将接收到的数据转换回张量
        data = np.array(msg.data, dtype=np.float32)
        height = int(msg.layout.dim[0].size)
        width = int(msg.layout.dim[1].size)
        x = torch.tensor(data.reshape(height, width)).to(self.device)
        
        # 计算此工作节点负责的部分
        N = x.shape[0]
        part_len = N // self.partitions
        start = self.worker_id * part_len
        end = (self.worker_id + 1) * part_len if self.worker_id < self.partitions - 1 else N
        
        self.get_logger().info(f'处理分区 {start}:{end} / {N}')
        
        # 如果模型层尚未初始化，则从主题获取
        if self.model_layers is None:
            # 在实际应用中，应该从服务或参数服务器获取模型层
            # 这里简化实现，假设已经通过其他方式获取
            self.get_logger().warn('模型层未初始化，使用随机数据模拟')
            # 模拟处理结果
            my_output = x[start:end]
        else:
            # 使用partitioned_layer处理数据
            # 注意：这里简化了实际的partitioned_layer调用
            my_output = x[start:end]  # 占位符，实际应用中需要完整实现
        
        # 将结果转换为ROS消息并发布
        result_msg = Float32MultiArray()
        result_msg.data = my_output.cpu().numpy().flatten().tolist()
        
        # 设置维度信息
        result_msg.layout.dim = [
            MultiArrayDimension(label="height", size=my_output.shape[0], stride=my_output.shape[0]*my_output.shape[1]),
            MultiArrayDimension(label="width", size=my_output.shape[1], stride=my_output.shape[1])
        ]
        
        self.result_pub.publish(result_msg)
        self.get_logger().info(f'已发布工作节点结果，耗时: {time.time() - start_time:.4f}秒')

def main(args=None):
    rclpy.init(args=args)
    node = VoltageWorkerNode()
    rclpy.spin(node)
    node.destroy_node()
    rclpy.shutdown()

if __name__ == '__main__':
    main()