#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
自动语音识别 (ASR) ROS 节点

该节点的核心功能是作为一个可控的语音识别服务：
1.  **服务化控制**: 通过 'start_asr' 和 'stop_asr' 两个ROS服务来控制其生命周期。
    这允许中央控制节点 (center_node) 在需要时（如机器人空闲时）开启识别，
    在不需要时（如机器人说话或执行任务时）关闭识别。
2.  **实时音频流**: 使用一个独立的录音线程，通过 PyAudio 从麦克风捕获音频。
3.  **静音检测 (VAD)**: 包含简单的语音活动检测逻辑。当用户停止说话一段时间后，
    会自动结束当前识别会话。
4.  **云服务集成**: 使用另一个独立的WebSocket线程，将音频流实时发送到火山引擎的
    ASR 服务。
5.  **智能发布**: 它不是实时发布零碎的识别结果，而是等待用户说话暂停后，
    将一句完整的话发布到 'raw_text' 话题，供 center_node 理解。
"""

import rospy
import os
import sys
import time
import uuid
import json
import pyaudio
import wave
import threading
import gzip
import websocket
import struct
import queue
from std_msgs.msg import String
from std_srvs.srv import SetBool, SetBoolResponse

# 全局录音参数
FORMAT = pyaudio.paInt16    # 16-bit PCM
CHANNELS = 1                # 单声道
RATE = 16000                # 16kHz 采样率
CHUNK = 3200                # 每次读取的音频帧数 (0.2秒的数据)

class VolcEngineASR:
    """
    封装了ASR所有功能的ROS节点类。
    管理音频录制、与云服务的WebSocket通信以及服务化启停逻辑。
    """
    def __init__(self):
        """
        初始化ASR节点。
        """
        rospy.init_node('speech_recognition', anonymous=True)
        
        # --- ROS Publishers & Services ---
        # 发布最终识别出的完整句子
        self.voice_pub = rospy.Publisher('raw_text', String, queue_size=10)
        # 发布ASR自身的状态 ("started", "stopped")
        self.status_pub = rospy.Publisher('asr_status', String, queue_size=10)
        
        # 提供给其他节点（主要是center_node）调用的启停服务
        self.start_service = rospy.Service('start_asr', SetBool, self.start_asr_callback)
        self.stop_service = rospy.Service('stop_asr', SetBool, self.stop_asr_callback)
        
        # --- 云服务认证信息 ---
        # 从ROS参数服务器或环境变量中加载凭证
        self.app_id = rospy.get_param('~app_id')
        self.access_key = rospy.get_param('~access_key')
        if not self.app_id or not self.access_key:
            rospy.logerr('未能加载火山引擎ASR的AppID或AccessKey，请检查配置！')
            sys.exit(1)

        # ASR WebSocket服务地址
        self.ws_url = 'wss://openspeech.bytedance.com/api/v3/sauc/bigmodel'
        self.connect_id = None
        
        # --- 状态控制变量 ---
        self.is_active = False          # 服务是否被激活（由服务调用控制）
        self.is_recording = False       # 是否正在录音
        self.is_connected = False       # WebSocket是否已连接
        self.audio_queue = queue.Queue() # 线程安全的队列，用于在录音和网络线程间传递音频数据
        self.ws = None

        # --- 静音检测参数 ---
        self.silence_threshold = 50     # 幅度阈值，低于此值被认为是静音
        self.max_silence_frames = 25    # 连续静音帧数的最大值 (25 * 0.2s = 5s)
        self.silence_frames = 0 
        
        # --- 识别与发布逻辑控制 ---
        self.text_lock = threading.Lock()
        self.speech_activity_timer = None # 用于检测说话暂停的计时器
        self.speech_timeout_duration = 1.5 # 说话暂停超过此时长，则发布句子
        self.pending_text_to_publish = "" # 待发布的句子
        
        # --- 线程句柄 ---
        self.ws_thread = None
        self.record_thread = None
        
        rospy.loginfo('语音识别节点已启动，等待 "start_asr" 服务调用...')

    def start_asr_callback(self, req):
        """
        处理 'start_asr' 服务请求。
        将节点置于活动状态，并启动录音和WebSocket线程。
        """
        if self.is_active:
            rospy.logwarn("ASR服务已处于启动状态，忽略本次请求。")
            return SetBoolResponse(True, "ASR已在运行。")
        
        try:
            self.is_active = True
            self.reset_recognition_state()
            self.connect_id = str(uuid.uuid4()) # 为每个新会话生成唯一ID
            
            # 启动WebSocket和录音线程
            if not self.ws_thread or not self.ws_thread.is_alive():
                self.ws_thread = threading.Thread(target=self.websocket_thread)
                self.ws_thread.daemon = True
                self.ws_thread.start()
            
            if not self.record_thread or not self.record_thread.is_alive():
                self.record_thread = threading.Thread(target=self.recording_thread)
                self.record_thread.daemon = True
                self.record_thread.start()
            
            self.status_pub.publish("started")
            rospy.loginfo("ASR服务已启动，开始监听语音。")
            return SetBoolResponse(True, "ASR启动成功。")
        except Exception as e:
            rospy.logerr(f"启动ASR服务失败: {e}")
            self.is_active = False
            return SetBoolResponse(False, f"启动失败: {e}")

    def stop_asr_callback(self, req):
        """
        处理 'stop_asr' 服务请求。
        将节点置于非活动状态，这将导致相关线程安全退出。
        """
        if not self.is_active:
            rospy.logwarn("ASR服务已处于停止状态，忽略本次请求。")
            return SetBoolResponse(True, "ASR已经停止。")
            
        try:
            self.is_active = False
            self.is_recording = False # 立即停止录音
            
            # 通过向队列中放入一个特殊标记来通知WebSocket线程结束
            self.audio_queue.put(b'STOP')
            
            # 清空队列中剩余的音频数据
            with self.audio_queue.mutex:
                self.audio_queue.queue.clear()
            
            self.status_pub.publish("stopped")
            rospy.loginfo("ASR服务已停止。")
            return SetBoolResponse(True, "ASR停止成功。")
        except Exception as e:
            rospy.logerr(f"停止ASR服务失败: {e}")
            return SetBoolResponse(False, f"停止失败: {e}")

    def reset_recognition_state(self):
        """重置与单次识别会话相关的状态。"""
        with self.text_lock:
            self.pending_text_to_publish = ""
            self.silence_frames = 0
            if self.speech_activity_timer:
                self.speech_activity_timer.cancel()
                self.speech_activity_timer = None

    def is_silence(self, audio_data):
        """
        简单的静音检测函数。
        计算音频块的平均振幅，如果低于阈值则认为是静音。
        """
        if not audio_data:
            return True
        try:
            # 将字节数据转换为16位整数列表
            values = struct.unpack(f'{len(audio_data)//2}h', audio_data)
            # 计算平均绝对振幅
            avg_amplitude = sum(abs(v) for v in values) / len(values)
            return avg_amplitude < self.silence_threshold
        except Exception as e:
            rospy.logwarn(f"静音检测时出错: {e}")
            return False

    def create_header(self, msg_type, msg_flags, serialization, compression):
        """创建火山ASR服务V3协议的二进制消息头。"""
        byte1 = (1 << 4) | 1  # version | header_size
        byte2 = (msg_type << 4) | msg_flags
        byte3 = (serialization << 4) | compression
        byte4 = 0 # reserved
        return bytes([byte1, byte2, byte3, byte4])

    def create_full_request(self):
        """创建包含配置信息的初始WebSocket请求帧。"""
        req = {
            "header": {
                "app_id": self.app_id,
                "access_key": self.access_key,
                "resource_id": "volc.bigasr.sauc.duration",
                "connect_id": self.connect_id
            },
            "payload": {
                "req_id": str(uuid.uuid4()),
                "config": {
                    "asr_config": {
                        "language": "zh-CN",
                        "sample_rate": 16000,
                        "bit_depth": 16,
                        "channel": 1,
                        "format": "raw",
                        "codec": "pcm"
                    },
                    "common_config": {
                        "service_type": "asr",
                        "version": "v1"
                    }
                },
                "operation": "start"
            }
        }
        json_data = json.dumps(req).encode('utf-8')
        compressed_data = gzip.compress(json_data)
        
        header = self.create_header(1, 0, 1, 1) # Control message, JSON, Gzip
        size_bytes = struct.pack('>I', len(compressed_data))
        
        return header + size_bytes + compressed_data

    def create_audio_request(self, audio_data):
        """创建包含PCM音频数据的WebSocket请求帧。"""
        compressed_data = gzip.compress(audio_data)
        header = self.create_header(2, 0, 0, 1) # Audio message, no flags, binary, Gzip
        size_bytes = struct.pack('>I', len(compressed_data))
        return header + size_bytes + compressed_data

    def on_message(self, ws, message):
        """WebSocket消息回调函数。"""
        # 解析响应，这里省略了详细的协议解析，直接处理JSON负载
        try:
            # 假设响应是JSON格式的字符串
            response = json.loads(message)
            if response['header']['status_code'] == 200:
                result = response['payload'].get('result', [{}])[0]
                text = result.get('text', '').strip()
                is_final = result.get('is_final', False)

                if text:
                    with self.text_lock:
                        self.pending_text_to_publish = text
                        # 重置或启动计时器，在用户说话暂停后发布
                        if self.speech_activity_timer:
                            self.speech_activity_timer.cancel()
                        self.speech_activity_timer = threading.Timer(self.speech_timeout_duration, self.publish_final_sentence)
                        self.speech_activity_timer.start()

                if is_final and self.pending_text_to_publish:
                    self.publish_final_sentence()

            else:
                rospy.logwarn(f"ASR服务返回错误: {response['header']['status_message']}")
        except Exception as e:
            rospy.logerr(f"解析ASR响应失败: {e}")


    def publish_final_sentence(self):
        """发布最终识别出的句子，并重置状态。"""
        with self.text_lock:
            if self.pending_text_to_publish:
                rospy.loginfo(f"发布识别结果: '{self.pending_text_to_publish}'")
                self.voice_pub.publish(self.pending_text_to_publish)
            self.reset_recognition_state()

    def recording_thread(self):
        """
        录音线程（生产者）。
        负责从麦克风读取音频数据，进行静音检测，并将有效音频放入队列。
        """
        rospy.loginfo("录音线程已启动。")
        audio = pyaudio.PyAudio()
        stream = None
        
        while self.is_active and not rospy.is_shutdown():
            try:
                if not self.is_recording:
                    # 尝试打开音频流
                    stream = audio.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK)
                    self.is_recording = True
                    rospy.loginfo("麦克风已开启，开始录音。")
                
                # 从流中读取音频数据
                data = stream.read(CHUNK)
                
                if self.is_silence(data):
                    self.silence_frames += 1
                else:
                    self.silence_frames = 0 # 检测到语音，重置静音计数
                    
                # 将非静音数据放入队列
                if self.silence_frames < self.max_silence_frames:
                    self.audio_queue.put(data)
                else:
                    # 检测到长时间静音，停止当前会话
                    rospy.loginfo("检测到长时间静音，停止当前录音会话。")
                    self.is_recording = False
                    self.audio_queue.put(b'END') # 发送结束标志
                    if stream:
                        stream.stop_stream()
                        stream.close()
                    stream = None
                    time.sleep(1) # 等待一段时间再重新开始监听
                    self.reset_recognition_state()

            except Exception as e:
                rospy.logerr(f"录音线程发生错误: {e}")
                self.is_recording = False
                if stream:
                    stream.stop_stream()
                    stream.close()
                stream = None
                time.sleep(1)

        # 清理资源
        if stream and stream.is_active():
            stream.stop_stream()
            stream.close()
        audio.terminate()
        rospy.loginfo("录音线程已退出。")

    def websocket_thread(self):
        """
        WebSocket线程（消费者）。
        负责与ASR云服务建立连接，发送音频数据，并接收识别结果。
        """
        rospy.loginfo("WebSocket线程已启动。")
        
        while self.is_active and not rospy.is_shutdown():
            try:
                # 建立连接
                self.ws = websocket.WebSocketApp(self.ws_url,
                                                on_message=self.on_message,
                                                on_error=lambda ws, err: rospy.logerr(f"WS Error: {err}"),
                                                on_close=lambda ws, code, msg: rospy.loginfo("WS已关闭。"),
                                                on_open=lambda ws: ws.send(self.create_full_request(), websocket.ABNF.OPCODE_BINARY))
                
                # 阻塞运行，直到连接关闭
                self.ws.run_forever()

                # 发送音频数据
                while self.is_active:
                    data = self.audio_queue.get()
                    if data == b'STOP' or data == b'END':
                        break
                    if self.ws and self.ws.sock and self.ws.sock.connected:
                        self.ws.send(self.create_audio_request(data), websocket.ABNF.OPCODE_BINARY)
                    self.audio_queue.task_done()
                
                # 发送结束帧
                if self.ws and self.ws.sock and self.ws.sock.connected:
                     self.ws.send(b'', websocket.ABNF.OPCODE_BINARY) # 根据API文档发送结束信号

            except Exception as e:
                rospy.logerr(f"WebSocket线程发生错误: {e}")
            finally:
                if self.ws:
                    self.ws.close()
                self.ws = None
                self.is_connected = False
                rospy.loginfo("WebSocket连接已断开，等待重新连接...")
                time.sleep(2)

        rospy.loginfo("WebSocket线程已退出。")

    def shutdown(self):
        """节点关闭时的清理操作。"""
        rospy.loginfo("正在关闭ASR节点...")
        self.is_active = False
        if self.ws_thread and self.ws_thread.is_alive():
            self.ws_thread.join(timeout=2)
        if self.record_thread and self.record_thread.is_alive():
            self.record_thread.join(timeout=2)
        rospy.loginfo("ASR节点已关闭。")


if __name__ == '__main__':
    try:
        asr_node = VolcEngineASR()
        rospy.on_shutdown(asr_node.shutdown)
        rospy.spin()
    except rospy.ROSInterruptException:
        pass