package com.hui.qiniucloud.service.impl;

import com.hui.qiniucloud.config.AsrProperties;
import com.hui.qiniucloud.service.IAsrService;
import jakarta.annotation.PostConstruct;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.util.StringUtils;
import reactor.core.publisher.Mono;

import java.net.URI;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.List;
import java.util.ArrayList;
import java.util.Arrays;
import java.io.*;
import java.util.zip.GZIPInputStream;
import java.util.zip.GZIPOutputStream;

import org.java_websocket.client.WebSocketClient;
import org.java_websocket.handshake.ServerHandshake;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.core.type.TypeReference;
import com.google.gson.JsonObject;

@Slf4j
@Service
public class AsrService implements IAsrService {

    // V3大模型流式语音识别API端点
    private static final String WSS_ENDPOINT = "wss://openspeech.bytedance.com/api/v3/sauc/bigmodel";
    private static final String RESOURCE_ID = "volc.bigasr.sauc.duration";

    // 协议常量 - 基于V3 API规范
    private static final byte PROTOCOL_VERSION = 0b0001;
    private static final byte DEFAULT_HEADER_SIZE = 0b0001;
    
    // Message Type
    private static final byte CLIENT_FULL_REQUEST = 0b0001;
    private static final byte CLIENT_AUDIO_ONLY_REQUEST = 0b0010;
    private static final byte SERVER_FULL_RESPONSE = 0b1001;
    private static final byte SERVER_ERROR_RESPONSE = 0b1111;
    
    // Message Type Specific Flags
    private static final byte NO_SEQUENCE = 0b0000;
    private static final byte POS_SEQUENCE = 0b0001;
    private static final byte NEG_SEQUENCE = 0b0010;
    private static final byte NEG_WITH_SEQUENCE = 0b0011;
    
    // Serialization Type
    private static final byte NO_SERIALIZATION = 0b0000;
    private static final byte JSON = 0b0001;
    
    // Compression Type
    private static final byte GZIP = 0b0001;
    
    // 音频处理常量
    private static final int DEFAULT_SAMPLE_RATE = 16000;
    private static final int DEFAULT_BITS = 16;
    private static final int DEFAULT_CHANNELS = 1;
    private static final int DEFAULT_SEGMENT_DURATION_MS = 200;

    private final ObjectMapper objectMapper = new ObjectMapper();
    private final Map<String, WebSocketClient> activeConnections = new ConcurrentHashMap<>();

    @Autowired
    private AsrProperties asrProperties;

    @PostConstruct
    public void validateConfig() {
        if (!StringUtils.hasText(asrProperties.getAppId())) {
            log.error("ASR配置错误: appId 为空，请检查 volcengine.asr.app-id 配置");
        }
        if (!StringUtils.hasText(asrProperties.getToken())) {
            log.error("ASR配置错误: token 为空，请检查 volcengine.asr.token 配置");
        }

        log.info("V3 ASR配置加载成功: appId={}", asrProperties.getAppId());
    }

    /**
     * WebSocket流式语音识别 - V3大模型API
     * @param audioData 音频数据
     * @return 识别结果
     */
    public Mono<String> recognizeAudio(byte[] audioData) {
        // 验证配置
        if (!StringUtils.hasText(asrProperties.getAppId()) ||
            !StringUtils.hasText(asrProperties.getToken())) {
            log.error("ASR配置不完整，无法进行语音识别");
            return Mono.just("");
        }

        // 验证音频数据
        if (audioData == null || audioData.length == 0) {
            log.error("音频数据为空");
            return Mono.just("");
        }

        log.debug("开始V3大模型WebSocket语音识别，音频数据大小: {} bytes", audioData.length);

        String connectId = UUID.randomUUID().toString();
        CompletableFuture<String> resultFuture = new CompletableFuture<>();

        try {
            // 处理音频数据 - 提取纯音频数据和计算分段
            AudioProcessResult processResult = processAudioData(audioData);
            if (processResult == null) {
                log.error("音频数据处理失败");
                return Mono.just("");
            }

            WebSocketClient client = createV3WebSocketClient(connectId, resultFuture, processResult);
            activeConnections.put(connectId, client);

            // 连接WebSocket
            client.connect();

            // 添加超时保护机制
            return Mono.fromFuture(resultFuture)
                    .timeout(java.time.Duration.ofSeconds(45))  // 减少超时时间
                    .doOnError(throwable -> {
                        log.error("识别过程超时或出错: {}", throwable.getMessage());
                        // 强制关闭连接
                        WebSocketClient connection = activeConnections.remove(connectId);
                        if (connection != null && connection.isOpen()) {
                            connection.close();
                        }
                        // 强制完成Future
                        if (!resultFuture.isDone()) {
                            resultFuture.complete("");
                        }
                    })
                    .onErrorReturn("")  // 超时时返回空字符串
                    .doFinally(signal -> {
                        // 清理连接
                        WebSocketClient connection = activeConnections.remove(connectId);
                        if (connection != null && connection.isOpen()) {
                            connection.close();
                        }
                    });

        } catch (Exception e) {
            log.error("创建WebSocket连接失败", e);
            return Mono.just("");
        }
    }

    private WebSocketClient createV3WebSocketClient(String connectId, CompletableFuture<String> resultFuture, AudioProcessResult processResult) {
        try {
            URI serverUri = new URI(WSS_ENDPOINT);
            AtomicInteger sequenceNumber = new AtomicInteger(1);
            
            WebSocketClient client = new WebSocketClient(serverUri) {
                @Override
                public void onOpen(ServerHandshake handshake) {
                    log.info("V3 WebSocket连接已建立: {}, 状态码: {}", connectId, handshake.getHttpStatus());
                    String logId = handshake.getFieldValue("X-Tt-Logid");
                    if (logId != null) {
                        log.info("X-Tt-Logid: {}", logId);
                    }

                    // 连接建立后使用V3协议发送数据
                    try {
                        // 发送完整客户端请求 (Full Client Request)
                        sendV3FullClientRequest(this, processResult.audioInfo, sequenceNumber.getAndIncrement());
                        log.debug("发送V3完整客户端请求成功");

                        // 稍等确保请求发送成功
                        Thread.sleep(200);

                        // 分段发送音频数据
                        sendV3AudioInChunks(this, processResult.audioSegments, sequenceNumber);

                    } catch (Exception e) {
                        log.error("发送V3音频数据失败", e);
                        resultFuture.complete("");
                        if (isOpen()) {
                            close();
                        }
                    }
                }

                @Override
                public void onMessage(String message) {
                    log.debug("收到文本消息: {}", message);
                    try {
                        // 尝试解析JSON响应
                        Map<String, Object> response = objectMapper.readValue(message, new TypeReference<Map<String, Object>>() {});
                        String result = extractTextFromV3Response(message);
                        if (!result.isEmpty()) {
                            log.info("语音识别结果: {}", result);
                            resultFuture.complete(result);
                        }
                    } catch (Exception e) {
                        log.error("解析文本消息失败", e);
                    }
                }

                @Override
                public void onMessage(ByteBuffer bytes) {
                    try {
                        log.debug("收到V3二进制消息，长度: {} bytes", bytes.remaining());
                        V3AsrResponse response = parseV3BinaryResponse(bytes.array());
                        if (response != null) {
                            log.info("V3语音识别响应: {}", response);
                            
                            // 检查是否有识别结果
                            if (!response.payloadMsg.isEmpty()) {
                                String result = extractTextFromV3Response(response.payloadMsg);
                                if (!result.isEmpty()) {
                                    log.info("提取到识别文本: '{}'", result);
                                    // 立即完成Future，不等待最后一包
                                    if (!resultFuture.isDone()) {
                                        resultFuture.complete(result);
                                    }
                                }
                            }
                            
                            // 检查是否为最后一包
                            if (response.isLastPackage) {
                                log.info("收到最后一包，序号: {}", response.payloadSequence);
                                // 如果还没有完成，用空字符串完成
                                if (!resultFuture.isDone()) {
                                    resultFuture.complete("");
                                }
                                // 主动关闭连接
                                if (isOpen()) {
                                    close(1000, "识别完成");
                                }
                            }
                        }
                        
                        if (response != null && response.code != 0) {
                            log.error("V3服务器返回错误: 代码={}, 消息={}", response.code, response.payloadMsg);
                            if (!resultFuture.isDone()) {
                                resultFuture.complete("");
                            }
                            if (isOpen()) {
                                close(1000, "服务器错误");
                            }
                        }
                    } catch (Exception e) {
                        log.error("解析V3识别结果失败", e);
                        if (!resultFuture.isDone()) {
                            resultFuture.complete("");
                        }
                    }
                }

                @Override
                public void onClose(int code, String reason, boolean remote) {
                    log.warn("V3 WebSocket连接已关闭: {} - {} (remote: {})", code, reason, remote);
                    if (code == 1006) {
                        log.error("连接异常关闭，可能是认证失败或协议错误");
                    }
                    // 确保Future完成，避免无限等待
                    if (!resultFuture.isDone()) {
                        resultFuture.complete("");
                    }
                }

                @Override
                public void onError(Exception ex) {
                    log.error("V3 WebSocket连接错误: {}", ex.getMessage(), ex);
                    // 确保Future完成
                    if (!resultFuture.isDone()) {
                        resultFuture.complete("");
                    }
                }
            };

            // V3大模型流式语音识别API认证
            client.addHeader("X-Api-App-Key", asrProperties.getAppId());
            client.addHeader("X-Api-Access-Key", asrProperties.getToken());
            client.addHeader("X-Api-Resource-Id", RESOURCE_ID);
            client.addHeader("X-Api-Connect-Id", connectId);
            client.addHeader("User-Agent", "qiniucloud-asr-v3/2.0");

            log.debug("V3 WebSocket 认证头信息:");
            log.debug("  X-Api-App-Key: {}", asrProperties.getAppId());
            log.debug("  X-Api-Access-Key: {}", "***masked***");
            log.debug("  X-Api-Resource-Id: {}", RESOURCE_ID);
            log.debug("  X-Api-Connect-Id: {}", connectId);

            return client;

        } catch (Exception e) {
            throw new RuntimeException("创建V3 WebSocket客户端失败", e);
        }
    }

    // 音频处理结果类
    static class AudioProcessResult {
        public final AudioInfo audioInfo;
        public final List<byte[]> audioSegments;
        
        public AudioProcessResult(AudioInfo audioInfo, List<byte[]> audioSegments) {
            this.audioInfo = audioInfo;
            this.audioSegments = audioSegments;
        }
    }

    // 音频信息类
    static class AudioInfo {
        public final int sampleRate;
        public final int channels;
        public final int bitsPerSample;
        
        public AudioInfo(int sampleRate, int channels, int bitsPerSample) {
            this.sampleRate = sampleRate;
            this.channels = channels;
            this.bitsPerSample = bitsPerSample;
        }
    }
    
    // V3响应解析结果类
    static class V3AsrResponse {
        public int code;
        public int event;
        public boolean isLastPackage;
        public int payloadSequence;
        public int payloadSize;
        public String payloadMsg;
        
        @Override
        public String toString() {
            return String.format("V3AsrResponse{code=%d, event=%d, isLastPackage=%s, payloadSequence=%d, payloadSize=%d, payloadMsg='%s'}", 
                code, event, isLastPackage, payloadSequence, payloadSize, payloadMsg);
        }
    }

    /**
     * 处理音频数据，提取纯音频数据并计算分段
     */
    private AudioProcessResult processAudioData(byte[] audioData) {
        try {
            // 检查是否为WAV格式
            if (isWavFormat(audioData)) {
                // 解析WAV信息
                AudioInfo audioInfo = parseWavInfo(audioData);
                log.info("检测到WAV格式，音频信息: 采样率={}, 声道数={}, 位深度={}", 
                    audioInfo.sampleRate, audioInfo.channels, audioInfo.bitsPerSample);
                
                // V3 API要求严格的音频格式：16kHz, 16bit, 单声道
                if (audioInfo.sampleRate != 16000 || audioInfo.bitsPerSample != 16 || audioInfo.channels != 1) {
                    log.warn("音频格式不符合V3 API要求，期望：16000Hz/16bit/单声道，实际：{}Hz/{}bit/{}声道", 
                            audioInfo.sampleRate, audioInfo.bitsPerSample, audioInfo.channels);
                    // 使用标准参数重新处理
                    audioInfo = new AudioInfo(16000, 1, 16);
                }
                
                // 提取纯音频数据
                byte[] pureAudioData = extractWavAudioData(audioData);
                log.info("提取纯音频数据，长度: {} 字节", pureAudioData.length);
                
                // 分析音频质量
                analyzeAudioQuality(pureAudioData);
                
                // 计算分段大小并分割
                int segmentSize = calculateSegmentSize(audioInfo, DEFAULT_SEGMENT_DURATION_MS);
                List<byte[]> segments = splitAudio(pureAudioData, segmentSize);
                
                return new AudioProcessResult(audioInfo, segments);
            } else {
                // 假设是原始音频数据，使用V3 API要求的标准参数
                AudioInfo standardAudioInfo = new AudioInfo(16000, 1, 16);
                log.info("假设为原始PCM音频数据，使用V3标准参数: 16000Hz/16bit/单声道");
                
                // 分析音频质量
                analyzeAudioQuality(audioData);
                
                int segmentSize = calculateSegmentSize(standardAudioInfo, DEFAULT_SEGMENT_DURATION_MS);
                List<byte[]> segments = splitAudio(audioData, segmentSize);
                
                return new AudioProcessResult(standardAudioInfo, segments);
            }
        } catch (Exception e) {
            log.error("处理音频数据失败", e);
            return null;
        }
    }

    /**
     * 分析音频质量和内容
     */
    private void analyzeAudioQuality(byte[] audioData) {
        if (audioData == null || audioData.length == 0) {
            log.warn("音频数据为空");
            return;
        }
        
        // 检查音频数据是否全为零（静音）
        boolean isAllZero = true;
        int nonZeroCount = 0;
        int maxAmplitude = 0;
        
        // 按16位样本分析（小端序）
        for (int i = 0; i < audioData.length - 1; i += 2) {
            short sample = (short) ((audioData[i + 1] << 8) | (audioData[i] & 0xFF));
            int amplitude = Math.abs(sample);
            
            if (sample != 0) {
                isAllZero = false;
                nonZeroCount++;
                maxAmplitude = Math.max(maxAmplitude, amplitude);
            }
        }
        
        int totalSamples = audioData.length / 2;
        double nonZeroRatio = (double) nonZeroCount / totalSamples;
        double durationSeconds = (double) totalSamples / 16000;
        
        log.info("音频质量分析:");
        log.info("  总样本数: {}", totalSamples);
        log.info("  音频时长: {:.2f} 秒", durationSeconds);
        log.info("  非零样本数: {} ({:.2f}%)", nonZeroCount, nonZeroRatio * 100);
        log.info("  最大振幅: {} (16位范围: 0-32767)", maxAmplitude);
        
        if (isAllZero) {
            log.error("⚠️ 音频数据全为零，这是静音音频，无法进行语音识别");
        } else if (nonZeroRatio < 0.1) {
            log.warn("⚠️ 音频数据中有效信号较少 ({:.2f}%)，可能影响识别效果", nonZeroRatio * 100);
        } else if (maxAmplitude < 1000) {
            log.warn("⚠️ 音频信号振幅较小 (最大: {})，可能音量过低", maxAmplitude);
        } else {
            log.info("✓ 音频数据质量检查通过");
        }
    }

    /**
     * 检查是否为WAV格式
     */
    private boolean isWavFormat(byte[] data) {
        if (data.length < 44) {
            return false;
        }
        return data[0] == 'R' && data[1] == 'I' && data[2] == 'F' && data[3] == 'F' &&
               data[8] == 'W' && data[9] == 'A' && data[10] == 'V' && data[11] == 'E';
    }

    /**
     * 解析WAV文件信息
     */
    private AudioInfo parseWavInfo(byte[] data) {
        if (!isWavFormat(data)) {
            throw new IllegalArgumentException("Not a valid WAV file");
        }
        
        // 使用小端序解析WAV头
        // 声道数：偏移量22-23
        int channels = ((data[23] & 0xFF) << 8) | (data[22] & 0xFF);
        
        // 采样率：偏移量24-27
        int sampleRate = ((data[27] & 0xFF) << 24) | ((data[26] & 0xFF) << 16) | 
                       ((data[25] & 0xFF) << 8) | (data[24] & 0xFF);
        
        // 位深度：偏移量34-35
        int bitsPerSample = ((data[35] & 0xFF) << 8) | (data[34] & 0xFF);
        
        log.debug("解析WAV信息: 声道数={}, 采样率={}, 位深度={}", channels, sampleRate, bitsPerSample);
        
        return new AudioInfo(sampleRate, channels, bitsPerSample);
    }
    
    /**
     * 计算分段大小
     */
    private int calculateSegmentSize(AudioInfo audioInfo, int segmentDurationMs) {
        int sampWidth = audioInfo.bitsPerSample / 8;
        int bytesPerSec = audioInfo.channels * sampWidth * audioInfo.sampleRate;
        int segmentSize = bytesPerSec * segmentDurationMs / 1000;
        log.debug("计算分段大小: 声道数={}, 采样宽度={}, 采样率={}, 分段大小={} 字节", 
                 audioInfo.channels, sampWidth, audioInfo.sampleRate, segmentSize);
        return segmentSize;
    }
    
    /**
     * 提取WAV文件的纯音频数据并确保是PCM_S16LE格式
     */
    private byte[] extractWavAudioData(byte[] wavData) {
        if (!isWavFormat(wavData)) {
            throw new IllegalArgumentException("Not a valid WAV file");
        }
        
        // 查找data子块
        int pos = 36;
        while (pos < wavData.length - 8) {
            // 检查是否为data子块
            if (wavData[pos] == 'd' && wavData[pos + 1] == 'a' && 
                wavData[pos + 2] == 't' && wavData[pos + 3] == 'a') {
                
                // 读取data子块大小（小端序）
                int dataSize = ((wavData[pos + 7] & 0xFF) << 24) | 
                             ((wavData[pos + 6] & 0xFF) << 16) | 
                             ((wavData[pos + 5] & 0xFF) << 8) | 
                             (wavData[pos + 4] & 0xFF);
                
                log.debug("找到data子块，大小: {} 字节", dataSize);
                
                // 提取音频数据
                byte[] audioData = new byte[dataSize];
                System.arraycopy(wavData, pos + 8, audioData, 0, dataSize);
                
                // 验证并确保是PCM_S16LE格式
                return ensurePCMS16LE(audioData, wavData);
            }
            pos++;
        }
        
        throw new IllegalArgumentException("No data subchunk found in WAV file");
    }
    
    /**
     * 确保音频数据是PCM_S16LE格式（小端序16位）
     */
    private byte[] ensurePCMS16LE(byte[] audioData, byte[] originalWavData) {
        try {
            // 检查WAV文件的格式代码（偏移量20-21，小端序）
            int formatTag = ((originalWavData[21] & 0xFF) << 8) | (originalWavData[20] & 0xFF);
            
            // 检查位深度（偏移量34-35，小端序）
            int bitsPerSample = ((originalWavData[35] & 0xFF) << 8) | (originalWavData[34] & 0xFF);
            
            log.debug("WAV格式信息 - 格式代码: {}, 位深度: {}", formatTag, bitsPerSample);
            
            // 格式代码1表示PCM，位深度16表示16位
            if (formatTag == 1 && bitsPerSample == 16) {
                log.debug("音频数据已经是PCM_S16LE格式，直接返回");
                return audioData;
            } else {
                log.warn("音频格式不是标准PCM_S16LE (格式代码: {}, 位深度: {})", formatTag, bitsPerSample);
                // 对于V3 API，我们只接受标准PCM格式
                // 如果不是标准格式，可以尝试直接使用音频数据，让API处理
                return audioData;
            }
        } catch (Exception e) {
            log.warn("无法验证音频格式，直接返回原始音频数据: {}", e.getMessage());
            return audioData;
        }
    }
    
    /**
     * 分割音频数据
     */
    private List<byte[]> splitAudio(byte[] audioData, int segmentSize) {
        List<byte[]> segments = new ArrayList<>();
        for (int offset = 0; offset < audioData.length; offset += segmentSize) {
            int len = Math.min(segmentSize, audioData.length - offset);
            byte[] segment = new byte[len];
            System.arraycopy(audioData, offset, segment, 0, len);
            segments.add(segment);
        }
        log.debug("音频分割完成，共{}段，每段约{}字节", segments.size(), segmentSize);
        return segments;
    }

    /**
     * 发送V3完整客户端请求
     */
    private void sendV3FullClientRequest(WebSocketClient client, AudioInfo audioInfo, int seq) throws Exception {
        JsonObject user = new JsonObject();
        user.addProperty("uid", "qiniucloud_uid");

        JsonObject audio = new JsonObject();
        // V3 API要求使用 pcm 格式，内部必须是 PCM_S16LE 编码
        audio.addProperty("format", "pcm");
        audio.addProperty("rate", 16000);
        audio.addProperty("bits", 16);
        audio.addProperty("channel", 1);

        JsonObject request = new JsonObject();
        request.addProperty("model_name", "bigmodel");
        request.addProperty("enable_itn", true);
        request.addProperty("enable_punc", true);
        request.addProperty("enable_ddc", false);
        request.addProperty("show_utterances", true);
        request.addProperty("result_type", "full");
        // 添加一些可能有助于识别的参数
        request.addProperty("enable_accelerate_text", false);
        request.addProperty("accelerate_score", 0);

        JsonObject payload = new JsonObject();
        payload.add("user", user);
        payload.add("audio", audio);
        payload.add("request", request);

        log.info("发送V3完整客户端请求: {}", payload.toString());

        String payloadStr = payload.toString();
        byte[] payloadBytes = gzipCompress(payloadStr.getBytes(StandardCharsets.UTF_8));
        byte[] header = getV3Header(CLIENT_FULL_REQUEST, POS_SEQUENCE, JSON, GZIP, (byte) 0);
        byte[] payloadSize = intToBytes(payloadBytes.length);
        byte[] seqBytes = intToBytes(seq);

        byte[] fullClientRequest = new byte[header.length + seqBytes.length + payloadSize.length + payloadBytes.length];
        System.arraycopy(header, 0, fullClientRequest, 0, header.length);
        System.arraycopy(seqBytes, 0, fullClientRequest, header.length, seqBytes.length);
        System.arraycopy(payloadSize, 0, fullClientRequest, header.length + seqBytes.length, payloadSize.length);
        System.arraycopy(payloadBytes, 0, fullClientRequest, header.length + seqBytes.length + payloadSize.length,
                payloadBytes.length);
        
        client.send(fullClientRequest);
        log.info("V3完整客户端请求发送成功，总长度: {} 字节", fullClientRequest.length);
    }

    /**
     * 分段发送V3音频数据
     */
    private void sendV3AudioInChunks(WebSocketClient client, List<byte[]> audioSegments, AtomicInteger sequenceNumber) {
        log.info("开始发送V3音频数据，共{}段", audioSegments.size());
        
        for (int i = 0; i < audioSegments.size() && client.isOpen(); i++) {
            byte[] segment = audioSegments.get(i);
            boolean isLast = (i == audioSegments.size() - 1);
            
            int seq = sequenceNumber.getAndIncrement();
            int finalSeq = isLast ? -seq : seq;
            
            log.info("发送V3音频分段: 序号{}, 长度{}, 是否最后一段: {}", seq, segment.length, isLast);
            
            // 检查音频段是否包含有效数据（非全零）
            boolean hasValidData = false;
            for (byte b : segment) {
                if (b != 0) {
                    hasValidData = true;
                    break;
                }
            }
            
            if (!hasValidData) {
                log.warn("音频段{}为静音或空数据", seq);
            } else {
                log.debug("音频段{}包含有效数据", seq);
            }
            
            try {
                sendV3AudioSegment(client, segment, isLast, finalSeq);
                
                // 发送间隔
                Thread.sleep(DEFAULT_SEGMENT_DURATION_MS);
            } catch (Exception e) {
                log.error("发送V3音频分段失败", e);
                break;
            }
        }
        log.info("V3音频数据发送完成");
    }

    /**
     * 发送单个V3音频段
     */
    private void sendV3AudioSegment(WebSocketClient client, byte[] audioData, boolean isLast, int seq) {
        byte messageTypeSpecificFlags = isLast ? NEG_WITH_SEQUENCE : POS_SEQUENCE;
        byte[] header = getV3Header(CLIENT_AUDIO_ONLY_REQUEST, messageTypeSpecificFlags, NO_SERIALIZATION, GZIP, (byte) 0);
        byte[] sequenceBytes = intToBytes(seq);
        byte[] payloadBytes = gzipCompress(audioData);
        byte[] payloadSize = intToBytes(payloadBytes.length);

        byte[] audioRequest = new byte[header.length + sequenceBytes.length + payloadSize.length + payloadBytes.length];
        System.arraycopy(header, 0, audioRequest, 0, header.length);
        System.arraycopy(sequenceBytes, 0, audioRequest, header.length, sequenceBytes.length);
        System.arraycopy(payloadSize, 0, audioRequest, header.length + sequenceBytes.length, payloadSize.length);
        System.arraycopy(payloadBytes, 0, audioRequest, header.length + sequenceBytes.length + payloadSize.length,
                payloadBytes.length);

        client.send(audioRequest);
    }

    /**
     * 解析V3二进制响应
     */
    private V3AsrResponse parseV3BinaryResponse(byte[] responseData) {
        if (responseData == null || responseData.length == 0) {
            return null;
        }

        V3AsrResponse result = new V3AsrResponse();

        try {
            // 解析头部
            int protocolVersion = (responseData[0] >> 4) & 0x0f;
            int headerSize = responseData[0] & 0x0f;
            int messageType = (responseData[1] >> 4) & 0x0f;
            int messageTypeSpecificFlags = responseData[1] & 0x0f;
            int serializationMethod = (responseData[2] >> 4) & 0x0f;
            int messageCompression = responseData[2] & 0x0f;
            int reserved = responseData[3];

            log.debug("V3响应头部 - 协议版本: {}, 头大小: {}, 消息类型: {}, 标志: {}, 序列化: {}, 压缩: {}",
                     protocolVersion, headerSize, messageType, messageTypeSpecificFlags, serializationMethod, messageCompression);

            // 解析payload
            byte[] payload = Arrays.copyOfRange(responseData, headerSize * 4, responseData.length);
            
            // 解析messageTypeSpecificFlags
            if ((messageTypeSpecificFlags & 0x01) != 0) {
                result.payloadSequence = bytesToInt(Arrays.copyOfRange(payload, 0, 4));
                payload = Arrays.copyOfRange(payload, 4, payload.length);
            }
            if ((messageTypeSpecificFlags & 0x02) != 0) {
                result.isLastPackage = true;
            }
            if ((messageTypeSpecificFlags & 0x04) != 0) {
                result.event = bytesToInt(Arrays.copyOfRange(payload, 0, 4));
                payload = Arrays.copyOfRange(payload, 4, payload.length);
            }

            // 解析messageType
            switch (messageType) {
                case SERVER_FULL_RESPONSE:
                    result.payloadSize = bytesToInt(Arrays.copyOfRange(payload, 0, 4));
                    payload = Arrays.copyOfRange(payload, 4, payload.length);
                    break;
                case SERVER_ERROR_RESPONSE:
                    result.code = bytesToInt(Arrays.copyOfRange(payload, 0, 4));
                    result.payloadSize = bytesToInt(Arrays.copyOfRange(payload, 4, 8));
                    payload = Arrays.copyOfRange(payload, 8, payload.length);
                    break;
            }

            if (payload.length == 0) {
                return result;
            }

            // 解压缩
            if (messageCompression == GZIP) {
                payload = gzipDecompress(payload);
            }

            // 解析payload
            if (serializationMethod == JSON && payload != null) {
                result.payloadMsg = new String(payload, StandardCharsets.UTF_8);
            }

        } catch (Exception e) {
            log.error("解析V3二进制响应失败", e);
        }

        return result;
    }

    /**
     * 从V3响应中提取识别文本
     */
    private String extractTextFromV3Response(String jsonResponse) {
        try {
            Map<String, Object> response = objectMapper.readValue(jsonResponse, new TypeReference<Map<String, Object>>() {});
            
            // 检查result字段
            if (response.containsKey("result")) {
                Object resultObj = response.get("result");
                if (resultObj instanceof Map) {
                    Map<String, Object> resultMap = (Map<String, Object>) resultObj;
                    if (resultMap.containsKey("text")) {
                        String text = resultMap.get("text").toString();
                        log.debug("从V3响应提取识别文本: {}", text);
                        return text;
                    }
                    
                    // 检查utterances字段
                    if (resultMap.containsKey("utterances")) {
                        Object utterancesObj = resultMap.get("utterances");
                        if (utterancesObj instanceof List) {
                            List<?> utterances = (List<?>) utterancesObj;
                            StringBuilder textBuilder = new StringBuilder();
                            for (Object utterance : utterances) {
                                if (utterance instanceof Map) {
                                    Map<String, Object> utteranceMap = (Map<String, Object>) utterance;
                                    if (utteranceMap.containsKey("text")) {
                                        textBuilder.append(utteranceMap.get("text").toString());
                                    }
                                }
                            }
                            if (textBuilder.length() > 0) {
                                String text = textBuilder.toString();
                                log.debug("从V3响应utterances提取识别文本: {}", text);
                                return text;
                            }
                        }
                    }
                }
            }
            
            log.debug("V3响应中未找到识别结果，完整响应: {}", response);
            
        } catch (Exception e) {
            log.error("提取V3识别文本失败", e);
        }
        
        return "";
    }
    
    // V3协议辅助方法
    private byte[] getV3Header(byte messageType, byte messageTypeSpecificFlags,
            byte serialMethod, byte compressionType, byte reservedData) {
        final byte[] header = new byte[4];
        header[0] = (byte) ((PROTOCOL_VERSION << 4) | DEFAULT_HEADER_SIZE);
        header[1] = (byte) ((messageType << 4) | messageTypeSpecificFlags);
        header[2] = (byte) ((serialMethod << 4) | compressionType);
        header[3] = reservedData;
        return header;
    }

    private byte[] intToBytes(int a) {
        return new byte[] {
                (byte) ((a >> 24) & 0xFF),
                (byte) ((a >> 16) & 0xFF),
                (byte) ((a >> 8) & 0xFF),
                (byte) (a & 0xFF)
        };
    }

    private int bytesToInt(byte[] src) {
        if (src == null || (src.length != 4)) {
            throw new IllegalArgumentException("Invalid byte array for int conversion");
        }
        // 使用大端序（Big Endian）解析，与协议保持一致
        return ((src[0] & 0xFF) << 24)
                | ((src[1] & 0xff) << 16)
                | ((src[2] & 0xff) << 8)
                | ((src[3] & 0xff));
    }

    private byte[] gzipCompress(byte[] src) {
        if (src == null || src.length == 0) {
            return new byte[0];
        }
        ByteArrayOutputStream out = new ByteArrayOutputStream();
        try (GZIPOutputStream gzip = new GZIPOutputStream(out)) {
            gzip.write(src);
        } catch (IOException e) {
            log.error("GZIP压缩失败", e);
            return new byte[0];
        }
        return out.toByteArray();
    }

    private byte[] gzipDecompress(byte[] src) {
        if (src == null || src.length == 0) {
            return null;
        }
        ByteArrayOutputStream out = new ByteArrayOutputStream();
        ByteArrayInputStream ins = new ByteArrayInputStream(src);
        try (GZIPInputStream gzip = new GZIPInputStream(ins)) {
            byte[] buffer = new byte[256];
            int len;
            while ((len = gzip.read(buffer)) > 0) {
                out.write(buffer, 0, len);
            }
        } catch (IOException e) {
            log.error("GZIP解压缩失败", e);
            return null;
        }
        return out.toByteArray();
    }
}