#!/usr/bin/env python3
import rclpy
from rclpy.node import Node
from std_msgs.msg import String, Float32MultiArray, MultiArrayDimension
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
import torch
import torch.nn.functional as F
import numpy as np
import time
import os
import sys
from PIL import Image as PILImage
from io import BytesIO
import requests

# 添加原始Voltage项目路径
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../voltage'))

# 导入Voltage项目组件
from model_utils import get_transformer_model, get_input_embedding, get_image_tensor
from bert_head import BertClassificationHead
from transformers import ViTForImageClassification

class VoltageInferenceService(Node):
    def __init__(self):
        super().__init__('voltage_inference_service')
        
        # 声明参数
        self.declare_parameter('model_type', 'bert')
        self.declare_parameter('device', 'cuda:0' if torch.cuda.is_available() else 'cpu')
        
        # 获取参数
        self.model_type = self.get_parameter('model_type').value
        self.device = self.get_parameter('device').value
        
        # 初始化模型
        self.get_logger().info(f'初始化模型: {self.model_type} 在设备 {self.device} 上')
        self.initialize_model()
        
        # 创建订阅者
        self.result_sub = self.create_subscription(
            Float32MultiArray,
            'voltage/result',
            self.result_callback,
            10)
        
        # 创建发布者
        self.text_input_pub = self.create_publisher(
            String,
            'voltage/input',
            10)
        
        self.image_input_pub = self.create_publisher(
            Image,
            'voltage/input',
            10)
        
        # 创建服务
        # 注意：在实际应用中，应该创建适当的服务接口
        # 这里简化实现，使用话题进行通信
        
        # 初始化状态变量
        self.bridge = CvBridge()
        self.get_logger().info('Voltage推理服务已初始化')
    
    def initialize_model(self):
        """初始化模型头部"""
        if self.model_type == 'bert':
            self.head = BertClassificationHead(hidden_size=768, num_classes=2, use_cls=True).to(self.device)
            self.head.eval()
            self.get_logger().info('BERT分类头已加载')
        elif self.model_type == 'vit':
            self.clf_model = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224").to(self.device)
            self.clf_model.eval()
            self.get_logger().info('ViT分类头已加载')
        else:
            self.get_logger().error(f'不支持的模型类型: {self.model_type}')
    
    def result_callback(self, msg):
        """处理推理结果"""
        self.get_logger().info('收到推理结果')
        
        # 将接收到的数据转换回张量
        data = np.array(msg.data, dtype=np.float32)
        height = int(msg.layout.dim[0].size)
        width = int(msg.layout.dim[1].size)
        output = torch.tensor(data.reshape(height, width)).to(self.device)
        
        # 根据模型类型处理结果
        if self.model_type == 'bert':
            self.process_bert_result(output)
        else:
            self.process_vit_result(output)
    
    def process_bert_result(self, output):
        """处理BERT模型的推理结果"""
        with torch.no_grad():
            logits = self.head(output)
            probs = F.softmax(logits, dim=-1)
            pred = torch.argmax(probs).item()
        
        self.get_logger().info(f'BERT推理结果: 类别 {pred}, 概率 {probs.tolist()}')
    
    def process_vit_result(self, output):
        """处理ViT模型的推理结果"""
        with torch.no_grad():
            # 取 [CLS] token 表示（第一位）
            cls_token = output[0].unsqueeze(0)
            logits = self.clf_model.classifier(cls_token)
            probs = torch.softmax(logits, dim=-1)
            pred = torch.argmax(probs, dim=-1).item()
        
        label = self.clf_model.config.id2label[pred]
        self.get_logger().info(f'ViT推理结果: 类别 {pred}, 标签 {label}')
    
    def infer_text(self, text):
        """发起文本推理"""
        self.get_logger().info(f'发起文本推理: {text}')
        
        # 发布文本输入
        msg = String()
        msg.data = text
        self.text_input_pub.publish(msg)
    
    def infer_image(self, image_path):
        """发起图像推理"""
        self.get_logger().info(f'发起图像推理: {image_path}')
        
        # 加载图像
        if image_path.startswith('http'):
            img = PILImage.open(BytesIO(requests.get(image_path).content)).convert('RGB')
        else:
            img = PILImage.open(image_path).convert('RGB')
        
        # 转换为ROS图像消息
        img_msg = self.bridge.cv2_to_imgmsg(np.array(img), encoding='rgb8')
        
        # 发布图像输入
        self.image_input_pub.publish(img_msg)

def main(args=None):
    rclpy.init(args=args)
    node = VoltageInferenceService()
    
    # 示例：发起推理
    if len(sys.argv) > 1:
        if sys.argv[1] == 'text':
            text = "This is a test for Voltage." if len(sys.argv) <= 2 else sys.argv[2]
            node.infer_text(text)
        elif sys.argv[1] == 'image':
            image_path = "https://raw.githubusercontent.com/EliSchwartz/imagenet-sample-images/refs/heads/master/n02086240_Shih-Tzu.JPEG"
            if len(sys.argv) > 2:
                image_path = sys.argv[2]
            node.infer_image(image_path)
    
    rclpy.spin(node)
    node.destroy_node()
    rclpy.shutdown()

if __name__ == '__main__':
    main()