package com.eduagent.xwqeduagent.api.InstantVoiceToTextConversion.service;

import com.eduagent.xwqeduagent.api.InstantVoiceToTextConversion.constant.VoiceRecognitionConstant;
import com.eduagent.xwqeduagent.api.InstantVoiceToTextConversion.model.BusinessParam;
import com.eduagent.xwqeduagent.api.InstantVoiceToTextConversion.model.RecognitionResult;
import com.eduagent.xwqeduagent.api.InstantVoiceToTextConversion.model.RequestData;
import com.eduagent.xwqeduagent.api.InstantVoiceToTextConversion.util.AuthUtil;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.extern.slf4j.Slf4j;
import org.java_websocket.client.WebSocketClient;
import org.java_websocket.handshake.ServerHandshake;
import org.springframework.stereotype.Service;

import java.io.IOException;
import java.net.URI;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.Base64;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;

/**
 * 语音识别服务
 */
@Service
@Slf4j
public class VoiceRecognitionService {

    private final ObjectMapper objectMapper = new ObjectMapper();
    
    /**
     * 执行语音识别（同步方法）
     *
     * @param audioData     音频数据
     * @param businessParam 业务参数
     * @return 识别结果文本
     * @throws Exception 识别过程中的异常
     */
    public String recognizeSpeech(byte[] audioData, BusinessParam businessParam) throws Exception {
        // 异步转同步
        StringBuilder resultBuilder = new StringBuilder();
        CountDownLatch latch = new CountDownLatch(1);
        
        // 创建回调
        RecognitionCallback callback = new RecognitionCallback() {
            @Override
            public void onSuccess(String result) {
                resultBuilder.append(result);
                latch.countDown();
            }
            
            @Override
            public void onError(int errorCode, String errorMessage) {
                log.error("语音识别失败：{}，错误码：{}", errorMessage, errorCode);
                latch.countDown();
            }
            
            @Override
            public void onPartialResult(String result, boolean isLast) {
                // 实时结果处理（如动态修正）
                log.info("识别中间结果：{}", result);
            }
        };
        
        // 执行异步识别
        recognizeSpeechAsync(audioData, businessParam, callback);
        
        // 等待结果
        latch.await(60, TimeUnit.SECONDS);
        return resultBuilder.toString();
    }
    
    /**
     * 执行语音识别（异步方法）
     *
     * @param audioData     音频数据
     * @param businessParam 业务参数
     * @param callback      识别结果回调
     * @throws Exception 识别过程中的异常
     */
    public void recognizeSpeechAsync(byte[] audioData, BusinessParam businessParam, 
                                     RecognitionCallback callback) throws Exception {
        // 获取鉴权URL
        String authUrl = AuthUtil.generateAuthUrl();
        URI uri = URI.create(authUrl);
        
        // 创建WebSocket客户端
        WebSocketClient client = new WebSocketClient(uri) {
            private StringBuilder finalResult = new StringBuilder();
            private String sessionId = null;
            
            @Override
            public void onOpen(ServerHandshake handshakedata) {
                log.info("WebSocket连接已打开");
                try {
                    // 发送第一帧音频数据和配置信息
                    sendFirstFrame(audioData);
                } catch (Exception e) {
                    log.error("发送第一帧数据失败", e);
                    callback.onError(-1, "发送数据失败：" + e.getMessage());
                    this.close();
                }
            }
            
            @Override
            public void onMessage(String message) {
                log.debug("收到WebSocket消息：{}", message);
                try {
                    RecognitionResult result = objectMapper.readValue(message, RecognitionResult.class);
                    
                    // 检查错误码
                    if (result.getCode() != 0) {
                        log.error("识别错误，错误码：{}，错误信息：{}", result.getCode(), result.getMessage());
                        callback.onError(result.getCode(), result.getMessage());
                        this.close();
                        return;
                    }
                    
                    // 保存会话ID
                    if (sessionId == null && result.getSid() != null) {
                        sessionId = result.getSid();
                        log.info("语音识别会话ID: {}", sessionId);
                    }
                    
                    // 处理识别结果
                    if (result.getData() != null && result.getData().getResult() != null) {
                        String recognizedText = result.getRecognizedText();
                        
                        // 处理动态修正结果
                        if (result.isDynamicCorrection() && "wpgs".equals(businessParam.getDwa())) {
                            int[] range = result.getReplaceRange();
                            handleDynamicCorrection(recognizedText, range);
                        } else {
                            finalResult.append(recognizedText);
                        }
                        
                        // 回调中间结果
                        callback.onPartialResult(finalResult.toString(), result.isLastResult());
                        
                        // 如果是最后一个结果，返回最终识别文本
                        if (result.isLastResult()) {
                            callback.onSuccess(finalResult.toString());
                            this.close();
                        }
                    }
                } catch (Exception e) {
                    log.error("处理识别结果失败", e);
                    callback.onError(-1, "处理识别结果失败：" + e.getMessage());
                    this.close();
                }
            }
            
            @Override
            public void onClose(int code, String reason, boolean remote) {
                log.info("WebSocket连接已关闭，code: {}, reason: {}, remote: {}", code, reason, remote);
                if (finalResult.length() == 0) {
                    callback.onError(code, "连接已关闭：" + reason);
                }
            }
            
            @Override
            public void onError(Exception ex) {
                log.error("WebSocket连接发生错误", ex);
                callback.onError(-1, "连接错误：" + ex.getMessage());
                this.close();
            }
            
            /**
             * 处理动态修正结果
             * 
             * @param newResult 新的识别结果
             * @param range 替换范围
             */
            private void handleDynamicCorrection(String newResult, int[] range) {
                // 如果range为[0,0]表示追加，否则替换
                if (range[0] == 0 && range[1] == 0) {
                    finalResult.append(newResult);
                } else {
                    // 获取待替换的文本片段
                    String[] words = finalResult.toString().split(" ");
                    StringBuilder sb = new StringBuilder();
                    for (int i = 0; i < words.length; i++) {
                        if (i >= range[0] - 1 && i <= range[1] - 1) {
                            // 跳过要替换的部分
                            continue;
                        }
                        sb.append(words[i]).append(" ");
                    }
                    // 添加新识别的结果
                    finalResult = new StringBuilder(sb.toString().trim() + " " + newResult);
                }
            }
            
            /**
             * 发送第一帧数据
             * 
             * @param audioData 完整音频数据
             * @throws Exception 发送过程中的异常
             */
            private void sendFirstFrame(byte[] audioData) throws Exception {
                // 准备第一帧数据
                int frameSize = 1280; // PCM格式每帧大小
                byte[] firstFrame = new byte[Math.min(frameSize, audioData.length)];
                System.arraycopy(audioData, 0, firstFrame, 0, firstFrame.length);
                
                // 构建请求
                RequestData requestData = createRequestData(firstFrame, 
                                                           VoiceRecognitionConstant.STATUS_FIRST_FRAME, 
                                                           businessParam);
                String firstFrameData = objectMapper.writeValueAsString(requestData);
                
                // 发送第一帧
                this.send(firstFrameData);
                log.debug("发送第一帧数据: {}", firstFrameData);
                
                // 计算剩余帧数
                int remainingLength = audioData.length - firstFrame.length;
                if (remainingLength > 0) {
                    // 发送中间帧
                    sendRemainingFrames(audioData, firstFrame.length, remainingLength);
                } else {
                    // 如果只有一帧，直接发送结束帧
                    sendEndFrame();
                }
            }
            
            /**
             * 发送剩余帧数据
             * 
             * @param audioData 完整音频数据
             * @param offset 起始偏移量
             * @param remainingLength 剩余长度
             * @throws Exception 发送过程中的异常
             */
            private void sendRemainingFrames(byte[] audioData, int offset, int remainingLength) throws Exception {
                // 每次发送的音频大小
                int frameSize = 1280;
                // 每次发送的间隔时间(ms)
                int interval = 40;
                
                int currentOffset = offset;
                while (currentOffset < audioData.length - frameSize) {
                    // 准备当前帧数据
                    byte[] frame = new byte[frameSize];
                    System.arraycopy(audioData, currentOffset, frame, 0, frameSize);
                    
                    // 构建请求
                    RequestData requestData = createAudioOnlyRequestData(frame, 
                                                                       VoiceRecognitionConstant.STATUS_CONTINUE_FRAME);
                    String frameData = objectMapper.writeValueAsString(requestData);
                    
                    // 发送当前帧
                    this.send(frameData);
                    log.debug("发送中间帧数据，偏移量：{}", currentOffset);
                    
                    // 更新偏移量
                    currentOffset += frameSize;
                    
                    // 等待一定时间再发送下一帧
                    Thread.sleep(interval);
                }
                
                // 发送最后一帧数据
                if (currentOffset < audioData.length) {
                    int lastFrameSize = audioData.length - currentOffset;
                    byte[] lastFrame = new byte[lastFrameSize];
                    System.arraycopy(audioData, currentOffset, lastFrame, 0, lastFrameSize);
                    
                    // 构建请求
                    RequestData requestData = createAudioOnlyRequestData(lastFrame, 
                                                                       VoiceRecognitionConstant.STATUS_CONTINUE_FRAME);
                    String lastFrameData = objectMapper.writeValueAsString(requestData);
                    
                    // 发送最后一帧
                    this.send(lastFrameData);
                    log.debug("发送最后一帧音频数据：{} 字节", lastFrameSize);
                }
                
                // 发送结束标记
                sendEndFrame();
            }
            
            /**
             * 发送结束标记
             * 
             * @throws Exception 发送过程中的异常
             */
            private void sendEndFrame() throws Exception {
                // 构建结束请求
                RequestData endRequest = new RequestData();
                RequestData.AudioData endData = new RequestData.AudioData();
                endData.setStatus(VoiceRecognitionConstant.STATUS_LAST_FRAME);
                endRequest.setData(endData);
                
                String endFrameData = objectMapper.writeValueAsString(endRequest);
                this.send(endFrameData);
                log.debug("发送结束标记");
            }
        };
        
        // 连接WebSocket服务器
        client.connect();
    }
    
    /**
     * 创建完整的请求数据（包括common、business和data）
     * 
     * @param audioFrame 音频帧数据
     * @param status 状态（0-第一帧，1-中间帧，2-最后一帧）
     * @param businessParam 业务参数
     * @return 请求数据对象
     */
    private RequestData createRequestData(byte[] audioFrame, int status, BusinessParam businessParam) {
        // 创建common参数
        RequestData.Common common = RequestData.Common.builder()
                .appId(VoiceRecognitionConstant.APP_ID)
                .build();
        
        // 创建business参数
        RequestData.Business business = RequestData.Business.builder()
                .language(businessParam.getLanguage() != null ? 
                          businessParam.getLanguage() : VoiceRecognitionConstant.LANGUAGE_CHINESE)
                .domain(businessParam.getDomain() != null ? 
                        businessParam.getDomain() : VoiceRecognitionConstant.DOMAIN_COMMON)
                .accent(businessParam.getAccent() != null ? 
                        businessParam.getAccent() : VoiceRecognitionConstant.ACCENT_MANDARIN)
                .vadEos(businessParam.getVadEos())
                .dwa(businessParam.getDwa())
                .pd(businessParam.getPd())
                .ptt(businessParam.getPtt())
                .pcm(businessParam.getPcm())
                .rlang(businessParam.getRlang())
                .vinfo(businessParam.getVinfo())
                .nunum(businessParam.getNunum())
                .speexSize(businessParam.getSpeexSize())
                .nbest(businessParam.getNbest())
                .wbest(businessParam.getWbest())
                .build();
        
        // 创建data参数
        RequestData.AudioData data = RequestData.AudioData.builder()
                .status(status)
                .format(VoiceRecognitionConstant.FORMAT_PCM_16K) // 默认使用16k采样率
                .encoding(VoiceRecognitionConstant.ENCODING_RAW) // 默认使用PCM原始格式
                .audio(Base64.getEncoder().encodeToString(audioFrame))
                .build();
        
        // 组合最终请求数据
        return RequestData.builder()
                .common(common)
                .business(business)
                .data(data)
                .build();
    }
    
    /**
     * 创建仅包含音频数据的请求（用于中间帧和最后一帧）
     * 
     * @param audioFrame 音频帧数据
     * @param status 状态（1-中间帧，2-最后一帧）
     * @return 请求数据对象
     */
    private RequestData createAudioOnlyRequestData(byte[] audioFrame, int status) {
        // 如果是最后一帧标记，不需要包含音频数据
        RequestData.AudioData data = RequestData.AudioData.builder()
                .status(status)
                .build();
        
        // 如果不是最后一帧，添加音频数据
        if (status != VoiceRecognitionConstant.STATUS_LAST_FRAME) {
            data.setFormat(VoiceRecognitionConstant.FORMAT_PCM_16K);
            data.setEncoding(VoiceRecognitionConstant.ENCODING_RAW);
            data.setAudio(Base64.getEncoder().encodeToString(audioFrame));
        }
        
        // 组合最终请求数据
        return RequestData.builder()
                .data(data)
                .build();
    }
    
    /**
     * 语音识别回调接口
     */
    public interface RecognitionCallback {
        /**
         * 识别成功回调
         * 
         * @param result 最终识别结果
         */
        void onSuccess(String result);
        
        /**
         * 识别错误回调
         * 
         * @param errorCode 错误码
         * @param errorMessage 错误信息
         */
        void onError(int errorCode, String errorMessage);
        
        /**
         * 部分识别结果回调
         * 
         * @param result 当前识别结果
         * @param isLast 是否为最后一个结果
         */
        void onPartialResult(String result, boolean isLast);
    }
} 