package com.example.demo.websocket;

import com.alibaba.dashscope.audio.asr.recognition.RecognitionResult;
import com.alibaba.dashscope.exception.NoApiKeyException;
import com.alibaba.fastjson.JSON;
import com.example.demo.config.TTSConfig;
import com.example.demo.service.MessageService;
import com.example.demo.service.SpeechRecognitionService;
import com.example.demo.Mapper.SessionMapper;
import com.alibaba.dashscope.audio.qwen_tts_realtime.QwenTtsRealtime;
import com.alibaba.dashscope.audio.qwen_tts_realtime.QwenTtsRealtimeAudioFormat;
import com.alibaba.dashscope.audio.qwen_tts_realtime.QwenTtsRealtimeCallback;
import com.alibaba.dashscope.audio.qwen_tts_realtime.QwenTtsRealtimeConfig;
import com.alibaba.dashscope.audio.qwen_tts_realtime.QwenTtsRealtimeParam;
import com.example.demo.service.TTSService;
import com.google.gson.JsonObject;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.codec.ServerSentEvent;
import org.springframework.stereotype.Component;
import org.springframework.web.socket.*;
import reactor.core.publisher.Flux;

import java.util.concurrent.atomic.AtomicReference;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Base64;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicInteger;
import java.nio.ByteOrder;
import java.util.concurrent.ConcurrentHashMap;
import java.net.URI;

@Component
@Slf4j
public class SpeechRecognitionWebSocketHandler implements WebSocketHandler {

    @Autowired
    private SpeechRecognitionService speechRecognitionService;

    @Autowired
    private MessageService messageService;

    @Autowired
    private SessionMapper sessionMapper;

    @Autowired
    private TTSService ttsService;


    // 存储WebSocket会话
    private final ConcurrentHashMap<String, WebSocketSession> sessions = new ConcurrentHashMap<>();
    // 存储WebSocket会话ID到业务sessionId的映射
    private final ConcurrentHashMap<String, Long> webSocketToSessionId = new ConcurrentHashMap<>();
    private final AtomicLong audioStreamCounter = new AtomicLong(0);

    @Override
    public void afterConnectionEstablished(WebSocketSession session) throws Exception {
        String webSocketSessionId = session.getId();
        sessions.put(webSocketSessionId, session);
        
        // 从URL参数中获取sessionId
        URI uri = session.getUri();
        String query = uri.getQuery();
        Long sessionId = null;
        
        if (query != null) {
            String[] params = query.split("&");
            for (String param : params) {
                String[] keyValue = param.split("=");
                if (keyValue.length == 2 && "sessionId".equals(keyValue[0])) {
                    try {
                        sessionId = Long.parseLong(keyValue[1]);
                        break;
                    } catch (NumberFormatException e) {
                        log.warn("无效的sessionId参数: {}", keyValue[1]);
                    }
                }
            }
        }
        
        if (sessionId != null) {
            webSocketToSessionId.put(webSocketSessionId, sessionId);
            log.info("🔗 WebSocket连接建立: webSocketId={}, sessionId={}", webSocketSessionId, sessionId);
        } else {
            log.warn("⚠️ WebSocket连接建立但未提供sessionId: webSocketId={}", webSocketSessionId);
        }
        
        // 仅发送连接成功消息，不在连接建立时就启动识别，避免重复启动
        sendMessage(session, createMessage("connection", "connected", "WebSocket连接成功"));
        log.info("📤 连接成功消息已发送: webSocketId={}", webSocketSessionId);
    }

    @Override
    public void handleMessage(WebSocketSession session, WebSocketMessage<?> message) throws Exception {
        String sessionId = session.getId();
        
        if (message instanceof BinaryMessage) {
            // 处理二进制音频数据（应为16kHz单声道PCM）
            BinaryMessage binaryMessage = (BinaryMessage) message;
            ByteBuffer payload = binaryMessage.getPayload();
            byte[] audioData = new byte[payload.remaining()];
            payload.get(audioData);
            
            log.info("📥 接收到音频数据: 会话={}, 数据长度={}字节", sessionId, audioData.length);
            
            // 发送音频数据到语音识别服务
            speechRecognitionService.sendAudioData(sessionId, audioData);
            log.info("🔄 音频数据已转发到语音识别服务: 会话={}", sessionId);
            
        } else if (message instanceof TextMessage) {
            // 处理文本控制消息
            TextMessage textMessage = (TextMessage) message;
            String payload = textMessage.getPayload();
            log.debug("接收到文本消息: 会话={}, 消息={}", sessionId, payload);
            
            try {
                ControlMessage controlMessage = JSON.parseObject(payload, ControlMessage.class);
                handleControlMessage(session, controlMessage);
            } catch (Exception e) {
                log.error("解析控制消息失败: ", e);
                sendMessage(session, createMessage("error", "invalid_message", "消息格式错误"));
            }
        }
    }

    @Override
    public void handleTransportError(WebSocketSession session, Throwable exception) throws Exception {
        String sessionId = session.getId();
        log.error("WebSocket传输错误: 会话={}", sessionId, exception);
        
        // 清理资源
        cleanupSession(sessionId);
    }

    @Override
    public void afterConnectionClosed(WebSocketSession session, CloseStatus closeStatus) throws Exception {
        String sessionId = session.getId();
        log.info("WebSocket连接关闭: 会话={}, 状态={}", sessionId, closeStatus);
        
        // 清理资源
        cleanupSession(sessionId);
    }

    @Override
    public boolean supportsPartialMessages() {
        return false;
    }

    /**
     * 处理识别结果
     */
    private void handleRecognitionResult(WebSocketSession session, RecognitionResult result) {
        try {
            String messageType = result.isSentenceEnd() ? "final_result" : "partial_result";
            String text = result.getSentence().getText();
            
            log.info("🎤 处理识别结果: 会话={}, 类型={}, 文本='{}'", 
                    session.getId(), messageType, text);
            
            ResponseMessage responseMessage = new ResponseMessage();
            responseMessage.setType("recognition");
            responseMessage.setSubType(messageType);
            responseMessage.setText(text);
            responseMessage.setIsFinal(result.isSentenceEnd());
            responseMessage.setTimestamp(System.currentTimeMillis());
            
            String jsonMessage = JSON.toJSONString(responseMessage);
            sendMessage(session, jsonMessage);
            
            // 当识别为最终结果时，触发调用流式AI对话接口，并将AI回复通过WebSocket回传前端
            if (result.isSentenceEnd()) {
                triggerAiResponse(session, text);
            }
            
            log.info("📤 识别结果已发送到前端: 会话={}, 消息={}", session.getId(), jsonMessage);
            
        } catch (Exception e) {
            log.error("处理识别结果异常: 会话={}", session.getId(), e);
        }
    }

    // ===== 触发AI对话并将流式内容通过WebSocket回传 =====
    private void triggerAiResponse(WebSocketSession session, String text) {
        String webSocketSessionId = session.getId();
        
        // 获取业务sessionId
        Long sessionId = webSocketToSessionId.get(webSocketSessionId);
        if (sessionId == null) {
            log.error("未找到对应的sessionId: webSocketId={}", webSocketSessionId);
            sendMessage(session, createMessage("error", "session_not_found", "未找到对应的会话"));
            return;
        }
        
        try {
            // 从数据库获取voice配置
            String voice = sessionMapper.getVoiceBySessionId(sessionId);
            if (voice == null || voice.isEmpty()) {
                voice = "Chelsie"; // 默认音色
                log.warn("未找到voice配置，使用默认音色: sessionId={}", sessionId);
            }
            log.info("获取到voice配置: sessionId={}, voice={}", sessionId, voice);

            // 初始化阿里云实时语音合成客户端
            String apiKey = ttsService.getKey();
            System.out.println(apiKey);
            if (apiKey == null || apiKey.isEmpty()) {
                log.error("DASHSCOPE_API_KEY 未配置");
                sendMessage(session, createMessage("error", "tts_api_key_error", "未配置DASHSCOPE_API_KEY"));
                return;
            }
            QwenTtsRealtimeParam param = QwenTtsRealtimeParam.builder()
                    .model("qwen3-tts-realtime")
                    .apikey(apiKey)
                    .build();

            final AtomicReference<QwenTtsRealtime> ttsRef = new AtomicReference<>(null);
            final long streamId = audioStreamCounter.incrementAndGet();
            final AtomicInteger seqCounter = new AtomicInteger(0);
            final AtomicReference<Boolean> ttsFinished = new AtomicReference<>(false);
            QwenTtsRealtime qwenTtsRealtime = new QwenTtsRealtime(param, new QwenTtsRealtimeCallback() {
                @Override
                public void onOpen() {
                    // TTS连接建立
                    sendMessage(session, JSON.toJSONString(createAudioMessage("ready", "", streamId)));
                }

                @Override
                public void onEvent(JsonObject message) {
                    String type = message.get("type").getAsString();
                    switch (type) {
                        case "response.audio.delta": {
                            // 将实时音频片段通过二进制帧发送（24字节头 + PCM16LE）
                            try {
                                String recvAudioB64 = message.get("delta").getAsString();
                                byte[] pcm = Base64.getDecoder().decode(recvAudioB64);
                                int seq = seqCounter.incrementAndGet();
                                long ts = System.currentTimeMillis();
                                ByteBuffer buf = ByteBuffer.allocate(4 + 8 + 4 + 8 + pcm.length);
                                buf.order(ByteOrder.BIG_ENDIAN);
                                buf.put((byte)0x41).put((byte)0x55).put((byte)0x44).put((byte)0x31); // 'AUD1'
                                buf.putLong(streamId);
                                buf.putInt(seq);
                                buf.putLong(ts);
                                buf.put(pcm);
                                buf.flip();
                                session.sendMessage(new BinaryMessage(buf));
                            } catch (Exception e) {
                                log.error("发送二进制音频帧失败: 会话={}", webSocketSessionId, e);
                            }
                            break;
                        }
                        case "response.audio.done":
                            // 当前响应的音频已全部生成
                            sendMessage(session, JSON.toJSONString(createAudioMessage("audio_done", "", streamId)));
                            break;
                        case "response.done":
                            // 单次响应完成（文本与音频部分均完成）
                            sendMessage(session, JSON.toJSONString(createAiMessage("response_done", "")));
                            break;
                        case "session.finished":
                            // 结束本次语音会话（后端不立即停止前端播放，交由前端排空缓冲后停止）
                            sendMessage(session, JSON.toJSONString(createAudioMessage("finish", "", streamId)));
                            break;
                        default:
                            break;
                    }
                }

                @Override
                public void onClose(int code, String reason) {
                    // 连接关闭时的处理
                }
            });
            ttsRef.set(qwenTtsRealtime);

            try {
                qwenTtsRealtime.connect();
            } catch (NoApiKeyException e) {
                log.error("DASHSCOPE_API_KEY 未配置或无效", e);
                sendMessage(session, createMessage("error", "tts_api_key_error", "TTS API密钥配置错误"));
                return;
            }

            // 更新TTS会话配置，使用从数据库获取的voice
            QwenTtsRealtimeConfig config = QwenTtsRealtimeConfig.builder()
                    .voice(voice) // 使用动态获取的voice配置
                    .responseFormat(QwenTtsRealtimeAudioFormat.PCM_24000HZ_MONO_16BIT)
                    .mode("server_commit")
                    .build();
            qwenTtsRealtime.updateSession(config);

            //根据sessionId获取指定人设
            String personal=sessionMapper.getDescriptionBySessionId(sessionId);
            // 触发AI流式回复，并将文本增量转交给TTS进行实时合成
            Flux<ServerSentEvent<Object>> flux = messageService.streamResponse(text, sessionId, false,personal);
            // 为了减少音频碎片化，缓冲小的文本段再提交给TTS
            final AtomicReference<StringBuilder> textBufferRef = new AtomicReference<>(new StringBuilder());
            flux.subscribe(sse -> {
                try {
                    String eventName = sse.event() != null ? sse.event() : null;
                    Object data = sse.data();
                    if (eventName == null) {
                        String chunk = null;
                        if (data instanceof java.util.Map) {
                            Object v = ((java.util.Map<?, ?>) data).get("value");
                            if (v != null) chunk = String.valueOf(v);
                        }
                        if (chunk != null && !chunk.isEmpty()) {
                            // 将文本片段缓冲，凑到一定长度或遇到分隔符再发送给TTS
                            StringBuilder buf = textBufferRef.get();
                            buf.append(chunk);
                            if (shouldFlushToTts(buf)) {
                                String toSend = buf.toString();
                                buf.setLength(0);
                                ttsRef.get().appendText(toSend);
                            }
                        }
                    } else {
                        switch (eventName) {
                            case "ready":
                                // 保留AI状态事件
                                sendMessage(session, JSON.toJSONString(createAiMessage("ready", "")));
                                break;
                            case "title":
                                String title = null;
                                if (data instanceof java.util.Map) {
                                    Object t = ((java.util.Map<?, ?>) data).get("title");
                                    if (t != null) title = String.valueOf(t);
                                }
                                sendMessage(session, JSON.toJSONString(createAiMessage("title", title != null ? title : "")));
                                break;
                            case "finish":
                                // 在结束前将剩余缓冲发送给TTS，再通知TTS完成合成
                                StringBuilder buf = textBufferRef.get();
                                if (buf.length() > 0) {
                                    String remaining = buf.toString();
                                    buf.setLength(0);
                                    ttsRef.get().appendText(remaining);
                                }
                                // 检查TTS是否已经关闭，避免重复调用finish
                                if (ttsFinished.compareAndSet(false, true)) {
                                    ttsRef.get().finish();
                                }
                                break;
                            default:
                                sendMessage(session, createMessage("ai_log", "event_" + eventName, JSON.toJSONString(data)));
                        }
                    }
                } catch (Exception inner) {
                     log.error("处理AI流事件失败: 会话= {}", webSocketSessionId, inner);
                     sendMessage(session, createMessage("error", "ai_stream_error", inner.getMessage()));
                 }
             }, err -> {
                 log.error("AI流式回复订阅出错: 会话= {}", webSocketSessionId, err);
                 sendMessage(session, createMessage("error", "ai_stream_error", err.getMessage()));
                try {
                    // 出错时也尽量结束TTS会话
                    StringBuilder buf = textBufferRef.get();
                    if (buf.length() > 0) {
                        String remaining = buf.toString();
                        buf.setLength(0);
                        ttsRef.get().appendText(remaining);
                    }
                    // 检查TTS是否已经关闭，避免重复调用finish
                    if (ttsFinished.compareAndSet(false, true)) {
                        ttsRef.get().finish();
                    }
                } catch (Exception ignore) {}
            });
        } catch (Exception e) {
            log.error("触发AI回复失败: 会话={}", sessionId, e);
            sendMessage(session, createMessage("error", "ai_invoke_failed", "AI回复调用失败"));
        }
    }

    // 根据缓冲内容判断是否需要向TTS刷出：遇到句末标点或超过一定长度
    private boolean shouldFlushToTts(StringBuilder buf) {
        final int MIN_LEN = 48; // 增大到48字符（中文约一句），进一步减少碎片
        final int MAX_LEN = 120; // 最大缓冲长度，避免单次过长
        
        if (buf.length() >= MAX_LEN) return true; // 强制刷出避免过长
        
        String s = buf.toString();
        // 优先在句末标点处刷出，但需要有一定长度
        boolean hasSentenceEnd = s.endsWith("。") || s.endsWith("！") || s.endsWith("？") || 
                                s.endsWith("!") || s.endsWith("?") || s.endsWith(".");
        if (hasSentenceEnd && buf.length() >= 12) return true; // 句末且有基本长度
        
        // 在逗号等停顿处刷出，但需要更多积累
        boolean hasPause = s.endsWith(",") || s.endsWith("，") || s.endsWith("；") || s.endsWith(";");
        if (hasPause && buf.length() >= MIN_LEN) return true;
        
        // 达到最小长度阈值
        return buf.length() >= MIN_LEN;
    }

    private ResponseMessage createAiMessage(String subType, String text) {
        ResponseMessage msg = new ResponseMessage();
        msg.setType("ai");
        msg.setSubType(subType);
        msg.setText(text);
        msg.setTimestamp(System.currentTimeMillis());
        return msg;
    }

    private ResponseMessage createAudioMessage(String subType, String base64, Long streamId) {
        ResponseMessage msg = new ResponseMessage();
        msg.setType("audio");
        msg.setSubType(subType);
        msg.setText(base64);
        msg.setStreamId(streamId);
        msg.setTimestamp(System.currentTimeMillis());
        return msg;
    }

    /**
     * 处理控制消息
     */
    private void handleControlMessage(WebSocketSession session, ControlMessage controlMessage) {
        String sessionId = session.getId();
        String action = controlMessage.getAction();
        
        switch (action) {
            case "start":
                log.info("开始录音: 会话={}", sessionId);
                try {
                    // 防重复启动
                    if (speechRecognitionService.isSessionActive(sessionId)) {
                        log.info("会话已处于激活状态，忽略重复启动: {}", sessionId);
                        sendMessage(session, createMessage("control", "already_started", "识别已启动"));
                        break;
                    }
                    // 启动语音识别服务
                    speechRecognitionService.startRecognition(sessionId, result -> {
                        handleRecognitionResult(session, result);
                    });
                    sendMessage(session, createMessage("control", "started", "开始录音"));
                    log.info("语音识别服务已启动: 会话={}", sessionId);
                } catch (NoApiKeyException e) {
                    log.error("API密钥异常: 会话={}", sessionId, e);
                    sendMessage(session, createMessage("error", "api_key_error", "API密钥配置错误"));
                } catch (Exception e) {
                    log.error("启动语音识别失败: 会话={}", sessionId, e);
                    sendMessage(session, createMessage("error", "start_failed", "启动语音识别失败"));
                }
                break;
                
            case "stop":
                log.info("停止录音: 会话={}", sessionId);
                speechRecognitionService.endRecognition(sessionId);
                sendMessage(session, createMessage("control", "stopped", "停止录音"));
                break;
                
            case "ping":
                sendMessage(session, createMessage("control", "pong", "pong"));
                break;
                
            default:
                log.warn("未知控制命令: 会话={}, 命令={}", sessionId, action);
                sendMessage(session, createMessage("error", "unknown_action", "未知命令"));
        }
    }

    /**
     * 发送消息到客户端
     */
    private void sendMessage(WebSocketSession session, String message) {
        try {
            if (session.isOpen()) {
                session.sendMessage(new TextMessage(message));
            }
        } catch (IOException e) {
            log.error("发送消息失败: ", e);
        }
    }

    /**
     * 创建响应消息
     */
    private String createMessage(String type, String subType, String message) {
        ResponseMessage responseMessage = new ResponseMessage();
        responseMessage.setType(type);
        responseMessage.setSubType(subType);
        responseMessage.setText(message);
        responseMessage.setTimestamp(System.currentTimeMillis());
        return JSON.toJSONString(responseMessage);
    }

    /**
     * 清理会话资源
     */
    private void cleanupSession(String sessionId) {
        sessions.remove(sessionId);
        speechRecognitionService.endRecognition(sessionId);
    }

    /**
     * 控制消息类
     */
    public static class ControlMessage {
        private String action;
        private Object data;

        public String getAction() {
            return action;
        }

        public void setAction(String action) {
            this.action = action;
        }

        public Object getData() {
            return data;
        }

        public void setData(Object data) {
            this.data = data;
        }
    }

    /**
     * 响应消息类
     */
    public static class ResponseMessage {
        private String type;
        private String subType;
        private String text;
        private Boolean isFinal;
        private Long timestamp;

        public String getType() {
            return type;
        }

        public void setType(String type) {
            this.type = type;
        }

        public String getSubType() {
            return subType;
        }

        public void setSubType(String subType) {
            this.subType = subType;
        }

        public String getText() {
            return text;
        }

        public void setText(String text) {
            this.text = text;
        }

        public Boolean getIsFinal() {
            return isFinal;
        }

        public void setIsFinal(Boolean isFinal) {
            this.isFinal = isFinal;
        }

        public Long getTimestamp() {
            return timestamp;
        }

        public void setTimestamp(Long timestamp) {
            this.timestamp = timestamp;
        }
        
        private Long streamId;
        public Long getStreamId() { return streamId; }
        public void setStreamId(Long streamId) { this.streamId = streamId; }
    }
}