package org.example.websocket;

import com.google.gson.Gson;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import okhttp3.*;
import org.example.service.VoiceService;
import org.example.service.SenseVoiceWebSocketClient;
import org.example.service.OlamaService;
import org.example.service.TTSBridgeService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;
import org.springframework.web.socket.BinaryMessage;
import org.springframework.web.socket.CloseStatus;
import org.springframework.web.socket.TextMessage;
import org.springframework.web.socket.WebSocketSession;
import org.springframework.web.socket.handler.AbstractWebSocketHandler;

import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;

@Component
public class VoiceWebSocketHandler extends AbstractWebSocketHandler {
    private static final Logger logger = LoggerFactory.getLogger(VoiceWebSocketHandler.class);
    private final Map<String, WebSocketSession> sessions = new ConcurrentHashMap<>();
    private final Map<String, SenseVoiceWebSocketClient> senseVoiceClients = new ConcurrentHashMap<>();

    // 存储每个会话的元数据
    private final Map<String, JsonObject> pendingMetadata = new ConcurrentHashMap<>();

    // 存储每个会话的当前识别文本，用于增量更新
    private final Map<String, String> currentRecognizedTexts = new ConcurrentHashMap<>();

    // 存储每个会话的流式处理状态
    private final Map<String, Boolean> streamingStatus = new ConcurrentHashMap<>();

    // 存储每个会话的AI处理状态
    private final Map<String, Boolean> aiProcessingStatus = new ConcurrentHashMap<>();

    // 存储每个会话的文本是否应发送到AI的标志
    private final Map<String, AtomicBoolean> sendToAiStatus = new ConcurrentHashMap<>();

    private final Gson gson = new Gson();

    @Autowired
    private VoiceService voiceService;

    @Autowired
    private OlamaService olamaService;

    @Autowired
    private TTSBridgeService ttsBridgeService;

    @Value("${sensevoice.local.url:http://localhost:8000/api/v1/asr}")
    private String localSenseVoiceHttpUrl;

    @Value("${sensevoice.websocket.url:ws://localhost:9001/api/v1/asr/stream}")
    private String localSenseVoiceWsUrl;

    @Value("${ai.enabled:true}")
    private boolean aiEnabled;

    @Value("${ai.auto_process:true}")
    private boolean aiAutoProcess;

    @Override
    public void afterConnectionEstablished(WebSocketSession session) {
        String sessionId = UUID.randomUUID().toString();
        sessions.put(sessionId, session);
        logger.info("WebSocket连接已建立，会话ID: {}", sessionId);

        // 为该会话创建一个SenseVoice WebSocket客户端
        SenseVoiceWebSocketClient senseVoiceClient = new SenseVoiceWebSocketClient(
                localSenseVoiceWsUrl,
                text -> handleSenseVoiceRecognitionResult(sessionId, text),
                error -> handleSenseVoiceError(sessionId, error));
        senseVoiceClients.put(sessionId, senseVoiceClient);

        // 初始化会话状态
        currentRecognizedTexts.put(sessionId, "");
        streamingStatus.put(sessionId, false);
        aiProcessingStatus.put(sessionId, false);
        sendToAiStatus.put(sessionId, new AtomicBoolean(aiAutoProcess));

        // 注册会话到TTSBridgeService
        ttsBridgeService.registerSession(sessionId, session);

        // 发送会话ID给客户端
        JsonObject response = new JsonObject();
        response.addProperty("type", "connection");
        response.addProperty("sessionId", sessionId);
        sendMessage(session, response.toString());
    }

    @Override
    public void handleTransportError(WebSocketSession session, Throwable exception) {
        logger.error("WebSocket传输错误", exception);
        sendError(session, "传输错误: " + exception.getMessage());
    }

    @Override
    public void afterConnectionClosed(WebSocketSession session, CloseStatus status) {
        String sessionId = getSessionId(session);
        if (sessionId != null) {
            // 关闭对应的SenseVoice WebSocket连接
            SenseVoiceWebSocketClient senseVoiceClient = senseVoiceClients.get(sessionId);
            if (senseVoiceClient != null) {
                senseVoiceClient.close();
                senseVoiceClients.remove(sessionId);
            }

            // 清理会话相关数据
            sessions.remove(sessionId);
            pendingMetadata.remove(sessionId);
            currentRecognizedTexts.remove(sessionId);
            streamingStatus.remove(sessionId);
            aiProcessingStatus.remove(sessionId);
            sendToAiStatus.remove(sessionId);

            // 从TTSBridgeService移除会话
            ttsBridgeService.removeSession(sessionId);

            logger.info("WebSocket连接已关闭，会话ID: {}, 状态码: {}, 原因: {}",
                    sessionId, status.getCode(), status.getReason());
        }
    }

    private void sendMessage(WebSocketSession session, String message) {
        try {
            if (session.isOpen()) {
                session.sendMessage(new TextMessage(message));
            }
        } catch (Exception e) {
            logger.error("发送WebSocket消息时出错", e);
        }
    }

    private void sendError(WebSocketSession session, String errorMessage) {
        JsonObject error = new JsonObject();
        error.addProperty("type", "error");
        error.addProperty("message", errorMessage);
        sendMessage(session, error.toString());
    }

    // 处理SenseVoice识别结果，支持将结果传递给AI处理
    private void handleSenseVoiceRecognitionResult(String sessionId, String resultJson) {
        try {
            WebSocketSession clientSession = sessions.get(sessionId);
            if (clientSession == null || !clientSession.isOpen()) {
                logger.warn("客户端会话已关闭，无法发送识别结果，会话ID: {}", sessionId);
                return;
            }

            // 解析SenseVoice返回的结果
            JsonObject resultObj = gson.fromJson(resultJson, JsonObject.class);
            String type = resultObj.get("type").getAsString();

            if ("recognition".equals(type)) {
                String text = resultObj.get("text").getAsString();
                boolean isFinal = resultObj.get("isFinal").getAsBoolean();

                // 检查是否为流式处理模式
                boolean isStreaming = streamingStatus.getOrDefault(sessionId, false);

                // 提取增量文本（如果有）
                String newText = null;

                if (resultObj.has("new_text")) {
                    // 服务器返回了增量文本
                    newText = resultObj.get("new_text").getAsString();
                    logger.debug("收到增量文本: {}", newText);
                } else if (isStreaming) {
                    // 服务器没有返回增量文本，但处于流式模式，计算增量文本
                    String previousText = currentRecognizedTexts.getOrDefault(sessionId, "");
                    if (text.startsWith(previousText) && !text.equals(previousText)) {
                        // 当前文本是之前文本的扩展，计算增量部分
                        newText = text.substring(previousText.length());
                        logger.debug("计算得到增量文本: {}", newText);
                    } else if (!text.equals(previousText)) {
                        // 文本发生了变化，但不是简单扩展，将整个文本作为增量
                        newText = text;
                        logger.debug("文本发生变化，使用全文作为增量: {}", newText);
                    }
                }

                // 更新当前识别文本
                if (text != null && !text.isEmpty()) {
                    currentRecognizedTexts.put(sessionId, text);
                }

                // 发送识别结果给客户端
                JsonObject response = new JsonObject();
                response.addProperty("type", "recognition");
                response.addProperty("text", text);

                // 如果有增量文本，也传递给前端
                if (newText != null && !newText.isEmpty()) {
                    response.addProperty("new_text", newText);
                }

                // 添加时间戳，方便前端处理
                response.addProperty("timestamp", System.currentTimeMillis());
                response.addProperty("isFinal", isFinal);

                // 添加流式标志，方便前端处理
                response.addProperty("streaming", isStreaming);

                sendMessage(clientSession, response.toString());

                logger.info("已发送识别结果给客户端，文本长度: {}, 是否最终: {}, 是否包含增量文本: {}, 会话ID: {}",
                        text.length(), isFinal, newText != null, sessionId);

                // 如果是最终结果，且启用了AI处理，则调用AI模型
                if (isFinal && text != null && !text.isEmpty() && aiEnabled && sendToAiStatus.get(sessionId).get()) {
                    // 设置AI处理状态为true，防止重复处理
                    if (aiProcessingStatus.getOrDefault(sessionId, false)) {
                        logger.info("AI正在处理中，跳过本次请求，会话ID: {}", sessionId);
                        return;
                    }

                    aiProcessingStatus.put(sessionId, true);

                    // 告知前端AI处理开始
                    JsonObject processingMsg = new JsonObject();
                    processingMsg.addProperty("type", "ai_processing");
                    processingMsg.addProperty("status", "started");
                    processingMsg.addProperty("text", text);
                    sendMessage(clientSession, processingMsg.toString());

                    logger.info("开始AI处理，文本: {}, 会话ID: {}", text, sessionId);

                    // 调用OlamaService处理识别文本，并将结果发送到TTS
                    olamaService.processRecognizedTextWithAI(text, clientSession, sessionId, voiceService);

                    // AI处理完成后的状态更新会在OlamaService的回调中处理
                }
                // 如果是最终结果，但未启用AI处理，直接生成TTS
                else if (isFinal && text != null && !text.isEmpty() && !sendToAiStatus.get(sessionId).get()) {
                    generateAndSendTTS(clientSession, sessionId, text);
                }
            } else if ("error".equals(type)) {
                // 处理错误消息
                String errorMsg = resultObj.has("message") ? resultObj.get("message").getAsString() : "未知错误";
                logger.error("SenseVoice返回错误: {}", errorMsg);
                sendError(clientSession, "SenseVoice错误: " + errorMsg);
            }
        } catch (Exception e) {
            logger.error("处理SenseVoice识别结果时出错: {}", e.getMessage(), e);

            // 尝试获取客户端会话并发送错误
            WebSocketSession clientSession = sessions.get(sessionId);
            if (clientSession != null && clientSession.isOpen()) {
                sendError(clientSession, "处理识别结果时出错: " + e.getMessage());
            }
        }
    }

    // 处理SenseVoice错误
    private void handleSenseVoiceError(String sessionId, String errorMessage) {
        WebSocketSession clientSession = sessions.get(sessionId);
        if (clientSession != null && clientSession.isOpen()) {
            sendError(clientSession, "SenseVoice错误: " + errorMessage);
        }
    }

    // 生成TTS并发送给客户端
    private void generateAndSendTTS(WebSocketSession session, String sessionId, String text) {
        try {
            // 调用VoiceService将文本转换为语音
            byte[] ttsData = voiceService.textToSpeech(text);

            // 发送二进制语音数据给客户端
            JsonObject ttsMetadata = new JsonObject();
            ttsMetadata.addProperty("type", "tts_metadata");
            ttsMetadata.addProperty("text", text);
            ttsMetadata.addProperty("ai_generated", false);
            sendMessage(session, ttsMetadata.toString());

            // 发送语音二进制数据
            if (session.isOpen()) {
                session.sendMessage(new BinaryMessage(ByteBuffer.wrap(ttsData)));
                logger.info("已发送TTS语音数据，大小: {} 字节, 会话ID: {}", ttsData.length, sessionId);
            }
        } catch (Exception e) {
            logger.error("生成或发送TTS数据时出错", e);
            sendError(session, "生成或发送TTS数据时出错: " + e.getMessage());
        }
    }

    // 添加二进制消息处理方法
    @Override
    protected void handleBinaryMessage(WebSocketSession session, BinaryMessage message) {
        String sessionId = getSessionId(session);
        logger.info("收到WebSocket二进制消息，大小: {} 字节, 会话ID: {}", message.getPayloadLength(), sessionId);

        // 获取之前保存的元数据
        JsonObject metadata = pendingMetadata.get(sessionId);
        if (metadata == null) {
            logger.error("收到二进制数据但没有对应的元数据，会话ID: {}", sessionId);
            sendError(session, "收到二进制数据但没有对应的元数据");
            return;
        }

        try {
            // 提取元数据信息
            boolean useLocalStorage = metadata.get("useLocalStorage").getAsBoolean();
            boolean isFinal = metadata.get("isFinal").getAsBoolean();
            boolean isStreaming = metadata.has("streaming") ? metadata.get("streaming").getAsBoolean() : false;
            String format = metadata.has("format") ? metadata.get("format").getAsString() : "audio/webm";

            // 检查是否需要发送到AI - 从元数据中提取
            if (metadata.has("sendToAI")) {
                boolean sendToAI = metadata.get("sendToAI").getAsBoolean();
                sendToAiStatus.get(sessionId).set(sendToAI);
                logger.info("设置AI处理标志为: {}, 会话ID: {}", sendToAI, sessionId);
            }

            // 更新流式处理状态
            streamingStatus.put(sessionId, isStreaming);

            // 如果开始新的流式处理或最终处理，重置当前文本
            if ((isStreaming && !isFinal && message.getPayloadLength() < 10000) ||
                    (!isStreaming && isFinal)) {
                currentRecognizedTexts.put(sessionId, "");

                // 重置AI处理状态
                aiProcessingStatus.put(sessionId, false);
            }

            logger.info("处理二进制音频数据，大小: {} 字节, 格式: {}, 使用本地存储: {}, 是否为最终数据: {}, 是否流式: {}",
                    message.getPayloadLength(), format, useLocalStorage, isFinal, isStreaming);

            // 获取二进制数据
            ByteBuffer buffer = message.getPayload();
            byte[] audioData = new byte[buffer.remaining()];
            buffer.get(audioData);

            // 处理结果
            if (useLocalStorage) {
                // 通过WebSocket发送音频数据到SenseVoice
                SenseVoiceWebSocketClient senseVoiceClient = senseVoiceClients.get(sessionId);
                if (senseVoiceClient != null && senseVoiceClient.isConnected()) {
                    try {
                        // 先发送元数据
                        JsonObject audioMetadata = new JsonObject();
                        audioMetadata.addProperty("type", "audio_metadata");
                        audioMetadata.addProperty("format", format);
                        audioMetadata.addProperty("lang", "zh");
                        audioMetadata.addProperty("streaming", isStreaming);
                        senseVoiceClient.sendMetadata(audioMetadata.toString());

                        // 确保WebSocket客户端中的流式状态与请求一致
                        if (senseVoiceClient.isStreaming() != isStreaming) {
                            senseVoiceClient.setStreaming(isStreaming);
                            logger.info("已更新WebSocket客户端的流式状态: {}", isStreaming);
                        }

                        // 再发送音频数据
                        senseVoiceClient.sendAudioData(audioData, isFinal);
                        logger.info("已发送音频数据到SenseVoice，大小: {} 字节, 是否最终: {}, 是否流式: {}",
                                audioData.length, isFinal, isStreaming);

                        // 如果不是流式模式，告知前端正在处理中
                        if (!isStreaming && !isFinal) {
                            JsonObject processingMessage = new JsonObject();
                            processingMessage.addProperty("type", "processing");
                            processingMessage.addProperty("status", "processing");
                            sendMessage(session, processingMessage.toString());
                        }
                    } catch (Exception e) {
                        logger.error("发送音频数据到SenseVoice时出错: {}", e.getMessage());
                        sendError(session, "发送音频数据失败: " + e.getMessage());
                    }
                } else {
                    logger.error("SenseVoice WebSocket客户端未连接或不可用，会话ID: {}，尝试重新连接", sessionId);

                    // 尝试重新连接
                    reconnectSenseVoiceClient(sessionId);

                    sendError(session, "SenseVoice服务不可用，正在尝试重新连接");
                }
            } else {
                // 使用阿里云服务
                logger.warn("阿里云服务处理暂未实现");
                sendError(session, "阿里云服务处理暂未实现");
            }

            // 处理完成后移除元数据（仅对最终数据）
            if (isFinal) {
                pendingMetadata.remove(sessionId);
                // 最终结果后重置流式状态
                streamingStatus.put(sessionId, false);

                // 如果使用SenseVoice客户端，同步流式状态
                SenseVoiceWebSocketClient client = senseVoiceClients.get(sessionId);
                if (client != null) {
                    client.setStreaming(false);
                }

                // 如果是最终结果，但未收到识别结果，发送空结果避免前端一直等待
                if (currentRecognizedTexts.getOrDefault(sessionId, "").isEmpty()) {
                    logger.warn("最终处理但未收到识别结果，发送空结果");
                    JsonObject emptyResult = new JsonObject();
                    emptyResult.addProperty("type", "recognition");
                    emptyResult.addProperty("text", "");
                    emptyResult.addProperty("isFinal", true);
                    sendMessage(session, emptyResult.toString());
                }
            }
        } catch (Exception e) {
            logger.error("处理WebSocket二进制消息时出错", e);
            sendError(session, "处理二进制消息时出错: " + e.getMessage());
            pendingMetadata.remove(sessionId);
        }
    }

    // 重新连接SenseVoice WebSocket客户端
    private void reconnectSenseVoiceClient(String sessionId) {
        try {
            // 关闭现有连接
            SenseVoiceWebSocketClient oldClient = senseVoiceClients.get(sessionId);
            if (oldClient != null) {
                oldClient.close();
            }

            // 创建新的连接
            SenseVoiceWebSocketClient newClient = new SenseVoiceWebSocketClient(
                    localSenseVoiceWsUrl,
                    text -> handleSenseVoiceRecognitionResult(sessionId, text),
                    error -> handleSenseVoiceError(sessionId, error));

            senseVoiceClients.put(sessionId, newClient);
            logger.info("已为会话 {} 重新创建SenseVoice WebSocket客户端", sessionId);
        } catch (Exception e) {
            logger.error("重新连接SenseVoice WebSocket客户端失败", e);
        }
    }

    // 修改文本消息处理方法，添加元数据处理和AI控制命令
    @Override
    protected void handleTextMessage(WebSocketSession session, TextMessage message) {
        try {
            String payload = message.getPayload();
            logger.info("收到WebSocket文本消息，长度: {}", payload.length());

            JsonObject request = gson.fromJson(payload, JsonObject.class);
            String type = request.get("type").getAsString();

            if ("audio_binary".equals(type) || "audio_metadata".equals(type)) {
                // 保存元数据，等待二进制数据
                String sessionId = getSessionId(session);
                pendingMetadata.put(sessionId, request);

                // 提取并记录是否为流式处理
                boolean isStreaming = request.has("streaming") ? request.get("streaming").getAsBoolean() : false;
                streamingStatus.put(sessionId, isStreaming);

                // 如果是新的非流式处理请求，或者是流式处理的开始，重置当前识别文本
                if (!isStreaming || (isStreaming && currentRecognizedTexts.getOrDefault(sessionId, "").isEmpty())) {
                    currentRecognizedTexts.put(sessionId, "");

                    // 重置AI处理状态
                    aiProcessingStatus.put(sessionId, false);
                }

                logger.info("收到音频元数据，等待二进制数据, 会话ID: {}, 格式: {}, 是否流式: {}",
                        sessionId,
                        request.has("format") ? request.get("format").getAsString() : "未知",
                        isStreaming);
            } else if ("ai_command".equals(type)) {
                // 处理AI相关命令
                String sessionId = getSessionId(session);
                String command = request.has("command") ? request.get("command").getAsString() : "";

                if ("enable".equals(command)) {
                    // 启用AI处理
                    sendToAiStatus.get(sessionId).set(true);
                    JsonObject response = new JsonObject();
                    response.addProperty("type", "ai_status");
                    response.addProperty("enabled", true);
                    sendMessage(session, response.toString());
                    logger.info("已启用AI处理，会话ID: {}", sessionId);
                } else if ("disable".equals(command)) {
                    // 禁用AI处理
                    sendToAiStatus.get(sessionId).set(false);
                    JsonObject response = new JsonObject();
                    response.addProperty("type", "ai_status");
                    response.addProperty("enabled", false);
                    sendMessage(session, response.toString());
                    logger.info("已禁用AI处理，会话ID: {}", sessionId);
                } else if ("process".equals(command) && request.has("text")) {
                    // 直接处理指定文本
                    String text = request.get("text").getAsString();
                    if (text != null && !text.isEmpty()) {
                        // 如果AI正在处理，忽略本次请求
                        if (aiProcessingStatus.getOrDefault(sessionId, false)) {
                            logger.info("AI正在处理中，忽略直接处理请求，会话ID: {}", sessionId);
                            JsonObject busyResponse = new JsonObject();
                            busyResponse.addProperty("type", "ai_status");
                            busyResponse.addProperty("status", "busy");
                            sendMessage(session, busyResponse.toString());
                            return;
                        }

                        // 设置AI处理状态
                        aiProcessingStatus.put(sessionId, true);

                        // 获取可选的TTS参数
                        String speaker = request.has("speaker") ? request.get("speaker").getAsString() : "中文女";
                        String model = request.has("model") ? request.get("model").getAsString() : null;

                        // 构建TTS选项
                        JsonObject ttsOptions = null;
                        if (request.has("tts_options")) {
                            ttsOptions = request.getAsJsonObject("tts_options");
                        }

                        // 调用AI处理
                        logger.info("直接处理文本: {}, 使用模型: {}, 说话人: {}, 会话ID: {}",
                                text,
                                model != null ? model : "默认",
                                speaker,
                                sessionId);

                        olamaService.processRecognizedTextWithAI(
                                text,
                                session,
                                sessionId,
                                voiceService,
                                speaker,
                                model,
                                ttsOptions);
                    }
                }
            }
        } catch (Exception e) {
            logger.error("处理WebSocket文本消息时出错", e);
            sendError(session, "处理消息时出错: " + e.getMessage());
        }
    }

    // 辅助方法：获取会话ID
    private String getSessionId(WebSocketSession session) {
        return sessions.entrySet().stream()
                .filter(entry -> entry.getValue().equals(session))
                .map(Map.Entry::getKey)
                .findFirst()
                .orElse("unknown");
    }

    /**
     * 更新AI处理状态
     * 
     * @param sessionId    会话ID
     * @param isProcessing 是否正在处理
     */
    public void updateAiProcessingStatus(String sessionId, boolean isProcessing) {
        WebSocketSession session = sessions.get(sessionId);
        if (session == null || !session.isOpen()) {
            logger.warn("无法更新AI处理状态，会话 {} 不存在或已关闭", sessionId);
            return;
        }

        aiProcessingStatus.put(sessionId, isProcessing);
        logger.info("更新AI处理状态: {}, 会话ID: {}", isProcessing, sessionId);

        // 使用TTSBridgeService更新AI处理状态
        ttsBridgeService.updateAiProcessingStatus(sessionId, isProcessing);
    }

    /**
     * 处理生成的AI响应文本，调用TTS进行语音合成
     * 该方法应由OlamaService在AI响应处理完成后调用
     * 
     * @param sessionId 会话ID
     * @param text      AI生成的文本
     * @param speaker   说话人ID
     */
    public void processTTSForAIResponse(String sessionId, String text, String speaker) {
        // 委托给TTSBridgeService处理
        ttsBridgeService.processTTSForAIResponse(sessionId, text, speaker);
    }
}