package com.smart.speaker.websocket;

import com.alibaba.dashscope.aigc.generation.Generation;
import com.alibaba.dashscope.audio.asr.recognition.Recognition;
import com.alibaba.dashscope.audio.tts.SpeechSynthesizer;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.smart.speaker.util.JsonUtils;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.pool2.impl.GenericObjectPool;
import org.springframework.web.socket.WebSocketSession;

import java.io.IOException;

@Slf4j
public class SessionHandler {

    private final RecognitionHandler recognizer;
    private final MessageHandler messageHandler;
    private final SessionState sessionState;
    private final ChatHandler chatHandler;
    private final WebSocketSession session;

    public SessionHandler(WebSocketSession session, Generation generation,
                          GenericObjectPool<Recognition> recognitionGenericObjectPool,
                          GenericObjectPool<SpeechSynthesizer> speechSynthesizerGenericObjectPool) {
        this.session = session;
        this.messageHandler = new MessageHandler(session);
        this.sessionState = new SessionState();
        this.recognizer = new RecognitionHandler(recognitionGenericObjectPool, sessionState);
        SynthesisHandler synthesizer = new SynthesisHandler(speechSynthesizerGenericObjectPool, messageHandler);
        this.chatHandler = new ChatHandler(generation, synthesizer, messageHandler, sessionState);
    }

    public void handleTextMessage(String message) {
        try {
            JsonNode jsonNode = JsonUtils.parseJsonToJsonNode(message);
            String messageType = jsonNode.path("type").asText();
            switch (messageType) {
                case "hello":
                    handleHelloMessage(jsonNode);
                    break;
                case "listen":
                    handleListenMessage(jsonNode);
                    break;
                case "abort":
//                dialogueService.abortDialogue(session, jsonNode.path("reason").asText());
                case "iot":
//                handleIotMessage(session, jsonNode);
                default:
                    log.warn("未知的消息类型: {}", messageType);
            }
        } catch (Exception e) {
            log.error("speaker parse message error", e);
        }
    }

    public void handleBinaryMessage(byte[] bytes) {
        try {
            if (sessionState.isListening()) {
                recognizer.initRecognizer();
                recognizer.sendAudioFrame(bytes);
                if (sessionState.getSilenceDuration() > 700 ||
                        (sessionState.getSilenceDuration() == 0 && sessionState.getListeningDuration() > 10_000)) {
                    toSpeaking();
                }
            }
        } catch (Exception e) {
            log.error("speaker parse binary message error", e);
        }
    }

    public void close() {
        recognizer.stop();
        messageHandler.close();
    }

    public void checkAndClose() {
        if (sessionState.isIdle() && sessionState.getIdleDuration() > 60_000) {
            try {
                session.close();
            } catch (IOException e) {
                log.warn("会话 {} 关闭失败", session.getId(), e);
            }
        }
    }

    private void handleHelloMessage(JsonNode jsonNode) {
        // 解析音频参数
        JsonNode audioParams = jsonNode.path("audio_params");
        String format = audioParams.path("format").asText();
        int sampleRate = audioParams.path("sample_rate").asInt();
        int channels = audioParams.path("channels").asInt();
        int frameDuration = audioParams.path("frame_duration").asInt();

        log.info("客户端音频参数 - 格式: {}, 采样率: {}, 声道: {}, 帧时长: {}ms", format, sampleRate, channels, frameDuration);

        // 回复hello消息
        ObjectNode response = JsonUtils.createObjectNode();
        response.put("type", "hello");
        response.put("transport", "websocket");

        // 添加音频参数（可以根据服务器配置调整）
        ObjectNode responseAudioParams = response.putObject("audio_params");
        responseAudioParams.put("format", format);
        responseAudioParams.put("sample_rate", sampleRate);
        responseAudioParams.put("channels", channels);
        responseAudioParams.put("frame_duration", frameDuration);
        messageHandler.sendMessage(response.toString());
    }

    private void handleListenMessage(JsonNode jsonNode) {
        // 解析listen消息中的state和mode字段
        String state = jsonNode.path("state").asText();
        String mode = jsonNode.path("mode").asText();

        log.info("收到listen消息 - State: {}, Mode: {}", state, mode);

        // 根据state处理不同的监听状态
        switch (state) {
            case "start":
                // 开始监听，准备接收音频数据
                sessionState.changeToListening();
                break;
            case "stop":
                // 停止监听
                if (sessionState.isListening()) {
                    toSpeaking();
                }
                break;
            case "detect":
                // 检测到唤醒词
                String text = jsonNode.path("text").asText();
                log.info("检测到唤醒词: {}", text);
                if (sessionState.isIdle() || sessionState.isListening()) {
                    sessionState.changeToSpeaking();
                    recognizer.stop();
                    messageHandler.sendMessage("tts", "start", null);
                    messageHandler.sendMessage("stt", null, text);
                    // TODO 发送默认回应
                }
                break;
            default:
                log.warn("未知的listen状态: {}", state);
        }
    }

    private void toSpeaking() {
        sessionState.changeToSpeaking();
        recognizer.stop();
        String req = sessionState.getRequest();
        if (req != null) {
            messageHandler.sendMessage("tts", "start", null);
            messageHandler.sendMessage("stt", null, req);
            chatHandler.chat(req);
            sessionState.changeToIdle();
        } else {
            sessionState.changeToIdle();
        }
    }
}
