#include "cloud_model_realtime_audio.h"
#include <libwebsockets.h>
#include <algorithm>
#include <cctype>
#include <cstring>
#include <functional>
#include "common_config_manager.h"
#include "realtime_audio_transcoder.h"
#include "media_stream.h"
#include "media_frame.h"
#include "media_speak.h"
#include "common_media_define.h"
#include "utils_crypto.h"
#include "utils_log.h"
#include "utils_string_ex.h"

namespace El {
namespace Cloud {

namespace {

std::string BuildContentPartKey(const std::string &itemId, int contentIndex)
{
    return itemId + "#" + std::to_string(contentIndex);
}

int ParsePcmFormatSampleRate(const std::string &format)
{
    std::string digits;
    digits.reserve(format.size());
    for (char ch : format) {
        if (std::isdigit(static_cast<unsigned char>(ch))) {
            digits.push_back(ch);
        }
    }

    if (digits.empty()) {
        return 0;
    }

    try {
        int value = std::stoi(digits);
        if (value >= 1000) {
            return value;
        }
        return value * 1000;
    } catch (...) {
        return 0;
    }
}

std::string SanitizeEventForLog(const nlohmann::json &event)
{
    nlohmann::json sanitized = event;
    std::function<void(nlohmann::json &)> sanitize = [&](nlohmann::json &node) {
        if (node.is_object()) {
            for (auto &item : node.items()) {
                const std::string &key = item.key();
                if ((key == "audio" || key == "bytes") && item.value().is_string()) {
                    item.value() = {{"length", item.value().get<std::string>().size()}};
                } else {
                    sanitize(item.value());
                }
            }
        } else if (node.is_array()) {
            for (auto &child : node) {
                sanitize(child);
            }
        }
    };
    sanitize(sanitized);
    return sanitized.dump();
}

void LogSendEventInfo(const nlohmann::json &event, size_t audioBytes)
{
    std::string type = event.value("type", "unknown");
    std::string eventId = event.value("event_id", "");
    if (type == "input_audio_buffer.append") {
        EL_DEBUG("Send event: type={}, event_id={}, audio_bytes={}", type, eventId, audioBytes);
    } else {
        EL_INFO("Send event: {}", SanitizeEventForLog(event));
    }
}

} // namespace

// WebSocket协议定义
static const struct lws_protocols protocols[] = {
    {
        "qwen-realtime-protocol",                                                              // name
        reinterpret_cast<lws_callback_function *>(CloudModelRealtimeAudio::WebSocketCallback), // callback
        0,                                                                                     // per_session_data_size
        4096,                                                                                  // rx_buffer_size
        0,                                                                                     // id
        nullptr,                                                                               // user
        0                                                                                      // tx_packet_size
    },
    {nullptr, nullptr, 0, 0, 0, nullptr, 0} // terminator
};

CloudModelRealtimeAudio &CloudModelRealtimeAudio::GetInstance()
{
    static CloudModelRealtimeAudio instance;
    return instance;
}

CloudModelRealtimeAudio::CloudModelRealtimeAudio() = default;

CloudModelRealtimeAudio::~CloudModelRealtimeAudio()
{
    Stop();
}

bool CloudModelRealtimeAudio::Start()
{
    if (running_) {
        EL_WARN("CloudModelRealtimeAudio already started");
        return true;
    }

    if (!InitConfig()) {
        EL_ERROR("Failed to initialize configuration");
        return false;
    }

    // 初始化音频转码器
    transcoder_ = std::make_unique<RealtimeAudioTranscoder>();
    if (!transcoder_->Initialize()) {
        EL_ERROR("Failed to initialize audio transcoder");
        return false;
    }
    EL_INFO("Audio transcoder initialized successfully");

    if (!InitWebSocket()) {
        EL_ERROR("Failed to initialize WebSocket");
        return false;
    }

    running_ = true;
    ws_thread_ = std::make_unique<std::thread>(&CloudModelRealtimeAudio::WebSocketThreadFunc, this);

    EL_INFO("CloudModelRealtimeAudio started successfully");
    return true;
}

void CloudModelRealtimeAudio::Stop()
{
    if (!running_) {
        return;
    }

    EL_INFO("Stopping CloudModelRealtimeAudio");
    running_ = false;

    // 停止自动音频流
    DisableAutoAudioStream();

    // 停止自动视频流
    DisableAutoVideoStream();

    // 等待WebSocket线程退出
    if (ws_thread_ && ws_thread_->joinable()) {
        ws_thread_->join();
    }

    // 清理会话
    {
        std::lock_guard<std::mutex> lock(session_mutex_);
        Disconnect();
    }

    CleanupWebSocket();

    // 清理转码器
    transcoder_.reset();

    EL_INFO("CloudModelRealtimeAudio stopped");
}

bool CloudModelRealtimeAudio::InitConfig()
{
    nlohmann::json root = El::Common::ConfigManager::GetInstance().GetConfig("/cloud_model");
    if (root.is_null()) {
        EL_ERROR("/cloud_model config is missing");
        return false;
    }

    // 功能映射：realtime_audio -> provider, model（精简结构）
    if (!root.contains("realtime_audio")) {
        EL_ERROR("/cloud_model.realtime_audio not configured");
        return false;
    }
    std::string provider = root["realtime_audio"].value("provider", std::string());
    model_name_ = root["realtime_audio"].value("model", std::string());
    if (provider.empty() || model_name_.empty()) {
        EL_ERROR("Invalid realtime_audio capability mapping");
        return false;
    }

    if (!root.contains("providers") || !root["providers"].contains(provider)) {
        EL_ERROR("Provider '{}' not configured under /cloud_model/providers", provider);
        return false;
    }
    const auto &p = root["providers"][provider];
    // API Key
    if (!p.contains("api_key") || p["api_key"].get<std::string>().empty()) {
        EL_ERROR("Provider '{}' api_key not set", provider);
        return false;
    }
    api_key_ = p["api_key"].get<std::string>();
    EL_INFO("Provider '{}' API key configured", provider);

    // WS端点（内置）
    auto mapWs = [&](const std::string &prov) -> std::string {
        if (prov == "aliyun")
            return std::string("wss://dashscope.aliyuncs.com/api-ws/v1/realtime");
        return std::string();
    };
    std::string ws_base = mapWs(provider);
    if (ws_base.empty()) {
        EL_ERROR("Provider '{}' ws_realtime endpoint not set", provider);
        return false;
    }
    if (ws_base.back() == '/')
        ws_base.pop_back();
    ws_url_ = ws_base + "?model=" + model_name_;
    EL_INFO("WebSocket URL: {}", ws_url_);

    // 默认音频与VAD参数（从顶层 realtime_audio 可选覆盖）
    default_audio_format_.sampleRate =
        root["realtime_audio"].value("audio", nlohmann::json::object()).value("sample_rate", 16000);
    default_audio_format_.channels =
        root["realtime_audio"].value("audio", nlohmann::json::object()).value("channels", 1);
    default_audio_format_.bitsPerSample =
        root["realtime_audio"].value("audio", nlohmann::json::object()).value("bits_per_sample", 16);
    default_vad_enabled_ = root["realtime_audio"].value("vad", nlohmann::json::object()).value("enabled", true);
    default_vad_threshold_ = root["realtime_audio"].value("vad", nlohmann::json::object()).value("threshold", 0.5f);

    // 读取视频配置（可选）
    if (root["realtime_audio"].contains("video")) {
        const auto &videoConfig = root["realtime_audio"]["video"];
        if (videoConfig.contains("frame_interval_ms")) {
            uint32_t frameInterval = videoConfig["frame_interval_ms"].get<uint32_t>();
            if (frameInterval >= 100 && frameInterval <= 10000) {
                session_.videoFrameInterval = frameInterval;
                EL_INFO("Video frame interval configured: {} ms", frameInterval);
            } else {
                EL_WARN("Invalid video frame interval: {}, using default 1000ms", frameInterval);
            }
        }
    }

    return true;
}

bool CloudModelRealtimeAudio::InitWebSocket()
{
    struct lws_context_creation_info info;
    memset(&info, 0, sizeof(info));

    info.port = CONTEXT_PORT_NO_LISTEN;
    info.protocols = protocols;
    info.gid = -1;
    info.uid = -1;
    info.options = LWS_SERVER_OPTION_DO_SSL_GLOBAL_INIT;
    info.user = this;

    ws_context_ = lws_create_context(&info);
    if (!ws_context_) {
        EL_ERROR("Failed to create WebSocket context");
        return false;
    }

    EL_INFO("WebSocket context created successfully");
    return true;
}

void CloudModelRealtimeAudio::ScheduleWritable(SessionContext *session)
{
    if (!session || !session->wsi) {
        return;
    }

    lws_callback_on_writable(session->wsi);
    if (ws_context_) {
        lws_cancel_service(ws_context_);
    }
}

void CloudModelRealtimeAudio::CleanupWebSocket()
{
    if (ws_context_) {
        lws_context_destroy(ws_context_);
        ws_context_ = nullptr;
        EL_INFO("WebSocket context destroyed");
    }
}

void CloudModelRealtimeAudio::WebSocketThreadFunc()
{
    EL_INFO("WebSocket thread started");

    int serviceCount = 0;
    while (running_) {
        if (ws_context_) {
            int n = lws_service(ws_context_, 50); // 50ms timeout
            serviceCount++;

            // 每 20 次循环（约1秒）输出一次状态
            if (serviceCount % 20 == 0) {
                EL_DEBUG("WebSocket service loop: {} iterations, return code: {}", serviceCount, n);
            }
        } else {
            std::this_thread::sleep_for(std::chrono::milliseconds(100));
        }
    }

    EL_INFO("WebSocket thread exiting after {} service calls", serviceCount);
}

int CloudModelRealtimeAudio::WebSocketCallback(struct lws *wsi, int reason, void *user, void *in, size_t len)
{
    (void)user; // 参数未使用,但libwebsockets回调接口需要
    auto *context = static_cast<lws_context *>(lws_get_context(wsi));
    auto *instance = static_cast<CloudModelRealtimeAudio *>(lws_context_user(context));

    // 记录所有回调
    EL_DEBUG("WebSocketCallback: wsi={}, reason={}, instance={}", static_cast<void *>(wsi), reason,
             static_cast<void *>(instance));

    if (!instance) {
        return 0;
    }

    // 直接使用单一会话
    SessionContext *session = &instance->session_;

    // 对于某些回调(如关闭、错误),wsi可能不匹配,这是正常的
    // 只在严重不匹配时才警告,但仍然处理事件
    if (session->wsi != nullptr && session->wsi != wsi) {
        // 检查是否是已知的可以不匹配的回调
        if (reason != LWS_CALLBACK_CLIENT_CLOSED && reason != LWS_CALLBACK_CLIENT_CONNECTION_ERROR &&
            reason != LWS_CALLBACK_WSI_DESTROY) {
            EL_DEBUG("WebSocketCallback: wsi changed (old: {}, new: {}), reason: {}", static_cast<void *>(session->wsi),
                     static_cast<void *>(wsi), reason);
        }
    }

    return instance->HandleWebSocketEvent(session, wsi, reason, in, len);
}

int CloudModelRealtimeAudio::HandleWebSocketEvent(SessionContext *session,
                                                  struct lws *wsi,
                                                  int reason,
                                                  void *in,
                                                  size_t len)
{
    switch (reason) {
        case LWS_CALLBACK_CLIENT_APPEND_HANDSHAKE_HEADER: {
            if (session->wsi != wsi) {
                EL_DEBUG("HandleWebSocketEvent: updating wsi for handshake (old: {}, new: {})",
                         static_cast<void *>(session->wsi), static_cast<void *>(wsi));
                session->wsi = wsi;
            }

            // 添加Authorization头
            unsigned char **p = (unsigned char **)in;
            unsigned char *end = (*p) + len;

            EL_INFO("Adding Authorization header to WebSocket handshake");

            // 使用 lws_add_http_header_by_name 添加头部
            if (lws_add_http_header_by_name(
                    wsi, (const unsigned char *)"Authorization:", (const unsigned char *)session->authHeader.c_str(),
                    session->authHeader.length(), p, end)) {
                EL_ERROR("Failed to add Authorization header");
                return -1;
            }

            EL_INFO("Authorization header added successfully (length={})", session->authHeader.length());
            break;
        }

        case LWS_CALLBACK_CLIENT_ESTABLISHED:
            if (session->wsi != wsi) {
                EL_INFO("HandleWebSocketEvent: WebSocket established with new wsi (old: {}, new: {})",
                        static_cast<void *>(session->wsi), static_cast<void *>(wsi));
                session->wsi = wsi;
            }

            EL_INFO("WebSocket connection established");
            UpdateSessionState(session, RealtimeSessionState::Connected);
            lws_callback_on_writable(wsi);
            break;

        case LWS_CALLBACK_CLIENT_RECEIVE: {
            if (session->wsi != wsi) {
                EL_DEBUG("HandleWebSocketEvent: updating wsi on receive (old: {}, new: {})",
                         static_cast<void *>(session->wsi), static_cast<void *>(wsi));
                session->wsi = wsi;
            }

            // 接收数据
            const char *data = static_cast<const char *>(in);
            session->recvBuffer.insert(session->recvBuffer.end(), data, data + len);
            EL_DEBUG("Received {} bytes, buffer total: {} bytes", len, session->recvBuffer.size());

            // 检查是否是完整的消息(JSON或二进制)
            if (lws_is_final_fragment(wsi)) {
                EL_DEBUG("Received final fragment, processing complete message");
                std::string message(session->recvBuffer.begin(), session->recvBuffer.end());
                HandleReceivedMessage(session, message);
                session->recvBuffer.clear();
            } else {
                EL_DEBUG("Waiting for more fragments");
            }
            break;
        }

        case LWS_CALLBACK_CLIENT_WRITEABLE: {
            if (session->wsi != wsi) {
                EL_INFO("HandleWebSocketEvent: writable callback for new wsi (old: {}, new: {})",
                        static_cast<void *>(session->wsi), static_cast<void *>(wsi));
                session->wsi = wsi;
            }

            std::lock_guard<std::mutex> lock(session->sendMutex);
            if (!session->sendQueue.empty()) {
                auto &data = session->sendQueue.front();
                EL_DEBUG("Writing {} bytes to WebSocket", data.size());

                // 预留LWS_PRE字节
                std::vector<uint8_t> buffer(LWS_PRE + data.size());
                std::copy(data.begin(), data.end(), buffer.begin() + LWS_PRE);

                int written = lws_write(wsi, buffer.data() + LWS_PRE, data.size(), LWS_WRITE_TEXT);
                if (written < 0) {
                    EL_ERROR("Failed to write data to WebSocket");
                    return -1;
                }
                EL_DEBUG("Successfully wrote {} bytes", written);

                session->sendQueue.pop();

                // 如果队列还有数据,继续请求写入
                if (!session->sendQueue.empty()) {
                    EL_DEBUG("Has {} more messages in queue, requesting writable", session->sendQueue.size());
                    lws_callback_on_writable(wsi);
                }
            }
            break;
        }

        case LWS_CALLBACK_CLIENT_CONNECTION_ERROR:
            EL_ERROR("WebSocket connection error: {}", in ? static_cast<const char *>(in) : "unknown");
            UpdateSessionState(session, RealtimeSessionState::Error);
            if (session->callbacks.onError) {
                session->callbacks.onError(in ? static_cast<const char *>(in) : "Connection error");
            }
            break;

        case LWS_CALLBACK_CLIENT_CLOSED:
            EL_INFO("WebSocket connection closed");
            UpdateSessionState(session, RealtimeSessionState::Disconnected);
            session->wsi = nullptr;
            break;

        default:
            EL_DEBUG("Received WebSocket callback: reason={}", reason);
            break;
    }

    return 0;
}

void CloudModelRealtimeAudio::HandleReceivedMessage(SessionContext *session, const std::string &message)
{
    try {
        auto event = nlohmann::json::parse(message);

        if (!event.contains("type")) {
            EL_WARN("Received event without type field");
            return;
        }

        std::string type = event["type"];

        // 打印解析后的event对象(最多120字节),确保包含type
        std::string eventStr = event.dump();
        if (eventStr.size() <= 120) {
            EL_DEBUG("Parsed event [{}]: {}", type, eventStr);
        } else {
            EL_DEBUG("Parsed event [{}]: {}... (total {} bytes)", type, eventStr.substr(0, 120), eventStr.size());
        }

        // 处理各种事件类型
        if (type == "session.created") {
            HandleSessionCreatedEvent(session, event);
        } else if (type == "session.updated") {
            HandleSessionUpdatedEvent(session, event);
        } else if (type == "response.created") {
            HandleResponseCreatedEvent(session, event);
        } else if (type == "response.audio.delta") {
            HandleResponseAudioDelta(session, event);
        } else if (type == "response.output_item.added") {
            HandleResponseOutputItemAdded(session, event);
        } else if (type == "response.content_part.added") {
            HandleResponseContentPartAdded(session, event);
        } else if (type == "response.content_part.delta") {
            HandleResponseContentPartDelta(session, event);
        } else if (type == "response.content_part.done") {
            HandleResponseContentPartDone(session, event);
        } else if (type == "response.audio_transcript.delta") {
            HandleResponseAudioTranscriptDelta(session, event);
        } else if (type == "response.completed") {
            HandleResponseCompleted(session, event);
        } else if (type == "response.audio.done") {
            HandleResponseAudioDone(session, event);
        } else if (type == "response.audio_transcript.done") {
            HandleResponseAudioTranscriptDone(session, event);
        } else if (type == "response.text.delta") {
            HandleResponseTextDelta(session, event);
        } else if (type == "response.text.done") {
            HandleResponseTextDone(session, event);
        } else if (type == "input_audio_buffer.speech_started") {
            HandleInputAudioBufferSpeechStarted(session, event);
        } else if (type == "input_audio_buffer.speech_stopped") {
            HandleInputAudioBufferSpeechStopped(session, event);
        } else if (type == "input_audio_buffer.committed") {
            HandleInputAudioBufferCommitted(session, event);
        } else if (type == "conversation.item.created") {
            HandleConversationItemCreated(session, event);
        } else if (type == "conversation.item.input_audio_transcription.completed") {
            HandleConversationInputAudioTranscriptionCompleted(session, event);
        } else if (type == "response.output_item.done") {
            HandleResponseOutputItemDone(session, event);
        } else if (type == "response.done") {
            HandleResponseDone(session, event);
        } else if (type == "error") {
            HandleErrorEvent(session, event);
        } else {
            EL_WARN("Received unknown event type: {}, event={}", type, SanitizeEventForLog(event));
        }
    } catch (const nlohmann::json::exception &e) {
        EL_ERROR("Failed to parse JSON event: {}, raw message: {}", e.what(), message);
    }
}

void CloudModelRealtimeAudio::HandleSessionCreatedEvent(SessionContext *session, const nlohmann::json &event)
{
    EL_INFO("Handling session.created event");

    if (!event.contains("session")) {
        EL_WARN("Received session.created without session field");
        return;
    }

    const auto &sessionData = event["session"];

    // 保存服务端分配的会话ID
    if (sessionData.contains("id")) {
        session->sessionId = sessionData["id"];
        EL_INFO("Received server session ID: {}", session->sessionId);
    }

    // 记录会话配置
    if (sessionData.contains("model")) {
        EL_INFO("Model: {}", sessionData["model"].get<std::string>());
    }
    if (sessionData.contains("voice")) {
        session->voice = sessionData["voice"];
        EL_INFO("Voice: {}", session->voice);
    }
    if (sessionData.contains("input_audio_format")) {
        EL_INFO("Input_audio_format: {}", sessionData["input_audio_format"].get<std::string>());
    }
    if (sessionData.contains("output_audio_format")) {
        std::string outputFormat = sessionData["output_audio_format"].get<std::string>();
        EL_INFO("Output_audio_format: {}", outputFormat);
        ConfigurePlaybackSampleRate(session, outputFormat);
    }

    // 打印 VAD 配置
    if (sessionData.contains("turn_detection")) {
        const auto &vad = sessionData["turn_detection"];
        EL_INFO("VAD config: type={}, threshold={}, silence_duration_ms={}", vad.value("type", "unknown"),
                vad.value("threshold", 0.0f), vad.value("silence_duration_ms", 0));
    }

    // 会话建立成功,更新状态
    UpdateSessionState(session, RealtimeSessionState::Connected);
}

void CloudModelRealtimeAudio::HandleSessionUpdatedEvent(SessionContext *session, const nlohmann::json &event)
{
    (void)session;
    (void)event;
    EL_DEBUG("Handling session.updated event");
}

void CloudModelRealtimeAudio::ConfigurePlaybackSampleRate(SessionContext *session, const std::string &pcmFormat)
{
    int parsedSampleRate = ParsePcmFormatSampleRate(pcmFormat);
    if (parsedSampleRate <= 0) {
        EL_WARN("Unable to parse PCM sample rate from output format '{}'", pcmFormat);
        return;
    }

    bool changed = (session->outputSampleRate != parsedSampleRate);
    if (changed) {
        session->outputSampleRate = parsedSampleRate;
        EL_INFO("Cloud PCM sample rate set to {} Hz", session->outputSampleRate);
    } else {
        EL_DEBUG("Playback sample rate unchanged at {} Hz", parsedSampleRate);
    }

    if (transcoder_) {
        if (changed) {
            if (!transcoder_->ReconfigurePlaybackEncoder(session->outputSampleRate)) {
                EL_WARN("Failed to reconfigure playback encoder to {} Hz", session->outputSampleRate);
            }
        } else {
            EL_DEBUG("Playback encoder already configured for {} Hz", session->outputSampleRate);
        }
    } else {
        EL_WARN("Transcoder not ready when configuring playback encoder");
    }
}

size_t CloudModelRealtimeAudio::CalculateInputChunkSizeBytes(const AudioFormat &format) const
{
    if (format.sampleRate <= 0 || format.bitsPerSample <= 0 || format.channels <= 0) {
        return 0;
    }

    size_t bytesPerSample = static_cast<size_t>((format.bitsPerSample + 7) / 8);
    size_t bytesPerFrame = bytesPerSample * static_cast<size_t>(format.channels);
    size_t chunkBytes = static_cast<size_t>(format.sampleRate) * bytesPerFrame * kInputChunkDurationMs / 1000;

    if (chunkBytes == 0) {
        chunkBytes = bytesPerFrame;
    }

    size_t remainder = chunkBytes % bytesPerFrame;
    if (remainder != 0) {
        chunkBytes -= remainder;
    }
    if (chunkBytes == 0) {
        chunkBytes = bytesPerFrame;
    }

    return chunkBytes;
}

bool CloudModelRealtimeAudio::FlushPendingInputAudio(SessionContext *session, bool flushPartial)
{
    if (!session) {
        return false;
    }

    size_t chunkSize = session->inputChunkSizeBytes;
    if (chunkSize == 0) {
        chunkSize = CalculateInputChunkSizeBytes(session->audioFormat);
        session->inputChunkSizeBytes = chunkSize;
    }

    std::vector<std::vector<uint8_t>> chunks;
    {
        std::lock_guard<std::mutex> lock(session->pendingPcmMutex);
        if (chunkSize == 0) {
            if (session->pendingPcmBuffer.empty()) {
                return false;
            }
            chunks.emplace_back(session->pendingPcmBuffer.begin(), session->pendingPcmBuffer.end());
            session->pendingPcmBuffer.clear();
        } else {
            while (session->pendingPcmBuffer.size() >= chunkSize) {
                chunks.emplace_back(session->pendingPcmBuffer.begin(), session->pendingPcmBuffer.begin() + chunkSize);
                session->pendingPcmBuffer.erase(session->pendingPcmBuffer.begin(),
                                                session->pendingPcmBuffer.begin() + chunkSize);
            }
            if (flushPartial && !session->pendingPcmBuffer.empty()) {
                chunks.emplace_back(session->pendingPcmBuffer.begin(), session->pendingPcmBuffer.end());
                session->pendingPcmBuffer.clear();
            }
        }
    }

    bool anySent = false;
    for (auto &chunk : chunks) {
        if (SendAudioData(chunk)) {
            anySent = true;
        } else {
            EL_WARN("FlushPendingInputAudio: Failed to send PCM chunk ({} bytes)", chunk.size());
        }
    }

    return anySent;
}

void CloudModelRealtimeAudio::HandleResponseCreatedEvent(SessionContext *session, const nlohmann::json &event)
{
    EL_DEBUG("Handling response.created event");

    if (!event.contains("response") || !event["response"].is_object()) {
        EL_WARN("response.created event missing response object");
        return;
    }

    const auto &response = event["response"];

    if (response.contains("output_audio_format")) {
        std::string outputFormat = response["output_audio_format"].get<std::string>();
        ConfigurePlaybackSampleRate(session, outputFormat);
    }

    UpdateSessionState(session, RealtimeSessionState::Responding);
}

void CloudModelRealtimeAudio::HandleResponseAudioDelta(SessionContext *session, const nlohmann::json &event)
{
    EL_DEBUG("Handling response.audio.delta event");

    if (!event.contains("delta")) {
        EL_WARN("Received response.audio.delta without delta field");
        return;
    }

    try {
        std::string base64Audio = event["delta"];
        std::vector<uint8_t> audioData = El::Utils::Base64Decode(base64Audio);
        EL_DEBUG("Decoded audio delta: {} bytes", audioData.size());

        // 首先调用用户的回调
        if (session->callbacks.onAudioReceived) {
            EL_DEBUG("Calling onAudioReceived callback");
            session->callbacks.onAudioReceived(audioData);
        } else {
            EL_WARN("onAudioReceived callback not set");
        }

        // 自动播放云端返回的音频
        EL_DEBUG("Preparing audio delta for playback");
        HandleCloudAudioForPlayback(session, audioData);

        UpdateSessionState(session, RealtimeSessionState::Responding);
    } catch (const std::exception &e) {
        EL_ERROR("Failed to decode audio delta: {}", e.what());
    }
}

void CloudModelRealtimeAudio::HandleResponseOutputItemAdded(SessionContext *session, const nlohmann::json &event)
{
    EL_DEBUG("Handling response.output_item.added event");

    if (event.contains("item")) {
        const auto &item = event["item"];
        std::string itemId = item.value("id", std::string());
        std::string role = item.value("role", std::string());
        std::string type = item.value("type", std::string());
        EL_INFO("Output item added: id={}, role={}, type={}", itemId, role, type);
    }

    UpdateSessionState(session, RealtimeSessionState::Responding);
}

void CloudModelRealtimeAudio::HandleResponseContentPartAdded(SessionContext *session, const nlohmann::json &event)
{
    EL_DEBUG("Handling response.content_part.added event");

    int contentIndex = event.value("content_index", 0);
    std::string itemId;
    if (event.contains("item_id")) {
        itemId = event["item_id"].get<std::string>();
    } else if (event.contains("item") && event["item"].is_object()) {
        itemId = event["item"].value("id", std::string());
    }

    if (itemId.empty()) {
        EL_WARN("response.content_part.added missing item_id");
        return;
    }

    std::string key = BuildContentPartKey(itemId, contentIndex);
    std::string partType;
    if (event.contains("part") && event["part"].is_object()) {
        partType = event["part"].value("type", std::string());
        EL_INFO("Content part added: item_id={}, content_index={}, type={}", itemId, contentIndex, partType);
    }

    if (!partType.empty()) {
        session->contentPartTypes[key] = partType;
    }

    if (event.contains("part") && event["part"].is_object()) {
        const auto &part = event["part"];
        if (part.contains("text") && part["text"].is_string()) {
            session->contentPartTextBuffer[key] = part["text"].get<std::string>();
        }
    }
}

void CloudModelRealtimeAudio::HandleResponseContentPartDelta(SessionContext *session, const nlohmann::json &event)
{
    EL_DEBUG("Handling response.content_part.delta event");

    if (!event.contains("delta")) {
        EL_WARN("response.content_part.delta missing delta field");
        return;
    }

    int contentIndex = event.value("content_index", 0);
    std::string itemId = event.value("item_id", std::string());
    std::string key = BuildContentPartKey(itemId, contentIndex);

    std::string partType;
    auto typeIter = session->contentPartTypes.find(key);
    if (typeIter != session->contentPartTypes.end()) {
        partType = typeIter->second;
    }

    const auto &delta = event["delta"];
    bool looksLikeAudio = (partType == "audio");
    if (delta.is_object() && (delta.contains("audio") || delta.contains("bytes"))) {
        looksLikeAudio = true;
    }

    if (looksLikeAudio) {
        std::string base64Audio;
        if (delta.is_string()) {
            base64Audio = delta.get<std::string>();
        } else if (delta.is_object()) {
            if (delta.contains("audio") && delta["audio"].is_string()) {
                base64Audio = delta["audio"].get<std::string>();
            } else if (delta.contains("bytes") && delta["bytes"].is_string()) {
                base64Audio = delta["bytes"].get<std::string>();
            }
        }

        if (base64Audio.empty()) {
            EL_WARN("Audio content part delta without audio payload");
            return;
        }

        try {
            std::vector<uint8_t> audioData = El::Utils::Base64Decode(base64Audio);
            EL_INFO("Decoded audio content part: {} bytes (item_id={}, index={})", audioData.size(), itemId,
                    contentIndex);

            if (session->callbacks.onAudioReceived) {
                session->callbacks.onAudioReceived(audioData);
            }

            HandleCloudAudioForPlayback(session, audioData);
            UpdateSessionState(session, RealtimeSessionState::Responding);
        } catch (const std::exception &e) {
            EL_ERROR("Failed to decode audio content part delta: {}", e.what());
        }
    } else {
        std::string textDelta;
        if (delta.is_string()) {
            textDelta = delta.get<std::string>();
        } else if (delta.is_object()) {
            if (delta.contains("text") && delta["text"].is_string()) {
                textDelta = delta["text"].get<std::string>();
            }
        }

        if (!textDelta.empty()) {
            EL_INFO("Content part text delta (item_id={}, index={}): '{}'", itemId, contentIndex, textDelta);
            session->contentPartTextBuffer[key] += textDelta;
            if (session->callbacks.onTextReceived) {
                session->callbacks.onTextReceived(textDelta);
            }
        } else {
            EL_DEBUG("Content part delta without text/audio payload");
        }
    }
}

void CloudModelRealtimeAudio::HandleResponseContentPartDone(SessionContext *session, const nlohmann::json &event)
{
    EL_DEBUG("Handling response.content_part.done event");

    int contentIndex = event.value("content_index", 0);
    std::string itemId = event.value("item_id", std::string());
    std::string key = BuildContentPartKey(itemId, contentIndex);

    session->contentPartTypes.erase(key);

    auto textIter = session->contentPartTextBuffer.find(key);
    if (textIter != session->contentPartTextBuffer.end()) {
        EL_INFO("Content part done: final text='{}'", textIter->second);
        if (session->callbacks.onTextReceived && !textIter->second.empty()) {
            session->callbacks.onTextReceived(textIter->second);
        }
        session->contentPartTextBuffer.erase(textIter);
    }
}

void CloudModelRealtimeAudio::HandleResponseAudioTranscriptDelta(SessionContext *session, const nlohmann::json &event)
{
    EL_DEBUG("Handling response.audio_transcript.delta event");

    if (!event.contains("delta") || !event["delta"].is_string()) {
        EL_WARN("response.audio_transcript.delta missing delta text");
        return;
    }

    std::string deltaText = event["delta"].get<std::string>();
    int contentIndex = event.value("content_index", 0);
    std::string itemId = event.value("item_id", std::string());
    std::string key = BuildContentPartKey(itemId, contentIndex);

    session->contentPartTextBuffer[key] += deltaText;

    EL_DEBUG("Audio transcript delta (item_id={}, index={}): '{}'", itemId, contentIndex, deltaText);
}

void CloudModelRealtimeAudio::HandleResponseCompleted(SessionContext *session, const nlohmann::json &event)
{
    (void)event;
    EL_DEBUG("Handling response.completed event");

    session->contentPartTypes.clear();
    session->contentPartTextBuffer.clear();

    UpdateSessionState(session, RealtimeSessionState::Connected);
}

void CloudModelRealtimeAudio::HandleResponseAudioDone(SessionContext *session, const nlohmann::json &event)
{
    (void)event;
    EL_INFO("Audio response completed");

    // 在音频段结束时刷新并切换到备用编码器，避免等待重建
    if (transcoder_) {
        std::vector<uint8_t> tailAac;
        if (!transcoder_->FlushActiveAndRotate(tailAac)) {
            EL_WARN("Failed to flush+rotate encoder at audio end");
        } else if (!tailAac.empty()) {
            EL_DEBUG("Flushed tail AAC data: {} bytes", tailAac.size());

            auto &speak = El::Media::Speak::GetInstance();
            if (!session->isPlaying.load()) {
                EL_INFO("Starting stream playback for flushed tail");
                HAL_ADEC_COM_CONFIG_S config;
                config.eType = HAL_AUDIO_TYPE_AAC;
                config.u32SampleRate = HAL_AUDIO_SAMPLE_RATE_16000;
                config.u8ChanNum = 1;
                config.u8Vol = 100;
                if (!speak.StartStreamPlay(config)) {
                    EL_ERROR("Failed to start stream playback for tail AAC");
                } else {
                    session->isPlaying.store(true);
                }
            }

            // 推送刷新得到的 AAC 尾巴
            if (!tailAac.empty()) {
                if (!speak.PushAudioData(reinterpret_cast<const char *>(tailAac.data()), tailAac.size())) {
                    EL_WARN("Failed to push tail AAC to speaker");
                }
            }
        } else {
            EL_DEBUG("No tail AAC data to flush");
        }

        // 旋转后备用编码器已复位，下一段可直接编码
    }

    UpdateSessionState(session, RealtimeSessionState::Connected);
}

void CloudModelRealtimeAudio::HandleResponseTextDelta(SessionContext *session, const nlohmann::json &event)
{
    EL_DEBUG("Handling response.text.delta event");

    if (!event.contains("delta")) {
        EL_WARN("Received response.text.delta without delta field");
        return;
    }

    std::string text = event["delta"];
    EL_INFO("Received text delta: '{}'", text);

    if (session->callbacks.onTextReceived) {
        EL_DEBUG("Calling onTextReceived callback");
        session->callbacks.onTextReceived(text);
    } else {
        EL_WARN("onTextReceived callback not set");
    }
}

void CloudModelRealtimeAudio::HandleResponseTextDone(SessionContext *session, const nlohmann::json &event)
{
    (void)session;
    (void)event;
    EL_INFO("Text response completed");
}

void CloudModelRealtimeAudio::HandleInputAudioBufferCommitted(SessionContext *session, const nlohmann::json &event)
{
    (void)session;
    std::string itemId = event.value("item_id", std::string());
    std::string previousItemId = event.value("previous_item_id", std::string());
    EL_DEBUG("input_audio_buffer.committed: item_id={}, previous_item_id={}", itemId, previousItemId);
}

void CloudModelRealtimeAudio::HandleConversationItemCreated(SessionContext *session, const nlohmann::json &event)
{
    (void)session;
    if (!event.contains("item") || !event["item"].is_object()) {
        return;
    }
    const auto &item = event["item"];
    std::string itemId = item.value("id", std::string());
    std::string role = item.value("role", std::string());
    std::string type = item.value("type", std::string());
    EL_DEBUG("conversation.item.created: id={}, role={}, type={}", itemId, role, type);
}

void CloudModelRealtimeAudio::HandleConversationInputAudioTranscriptionCompleted(SessionContext *session,
                                                                                 const nlohmann::json &event)
{
    (void)session;
    std::string transcript = event.value("transcript", std::string());
    if (!transcript.empty()) {
        EL_INFO("User transcript completed: '{}'", transcript);
    } else {
        EL_DEBUG("conversation.item.input_audio_transcription.completed without transcript");
    }
}

void CloudModelRealtimeAudio::HandleResponseAudioTranscriptDone(SessionContext *session, const nlohmann::json &event)
{
    (void)session;
    std::string transcript = event.value("transcript", std::string());
    std::string itemId = event.value("item_id", std::string());
    if (!transcript.empty()) {
        EL_DEBUG("Audio transcript done (item_id={}): '{}'", itemId, transcript);
    } else {
        EL_DEBUG("response.audio_transcript.done without transcript (item_id={})", itemId);
    }
}

void CloudModelRealtimeAudio::HandleResponseOutputItemDone(SessionContext *session, const nlohmann::json &event)
{
    (void)session;
    const auto &item = event.value("item", nlohmann::json::object());
    std::string itemId = item.value("id", std::string());
    std::string status = item.value("status", std::string());
    EL_DEBUG("response.output_item.done: id={}, status={}", itemId, status);
}

void CloudModelRealtimeAudio::HandleResponseDone(SessionContext *session, const nlohmann::json &event)
{
    (void)session;
    const auto &response = event.value("response", nlohmann::json::object());
    std::string responseId = response.value("id", std::string());
    std::string status = response.value("status", std::string());
    std::string conversationId = response.value("conversation_id", std::string());
    EL_INFO("Response done: id={}, status={}, conversation={}", responseId, status, conversationId);
}

void CloudModelRealtimeAudio::HandleInputAudioBufferSpeechStarted(SessionContext *session, const nlohmann::json &event)
{
    (void)event;
    EL_INFO("VAD: Speech started");
    UpdateSessionState(session, RealtimeSessionState::Speaking);

    // 用户开始说话时,停止播放AI的响应
    if (session->isPlaying.load()) {
        EL_INFO("Stopping AI playback (user started speaking)");
        El::Media::Speak::GetInstance().Stop();
        session->isPlaying.store(false);
    }

    if (session->callbacks.onSpeechStart) {
        EL_DEBUG("Calling onSpeechStart callback");
        session->callbacks.onSpeechStart();
    } else {
        EL_WARN("onSpeechStart callback not set");
    }
}

void CloudModelRealtimeAudio::HandleInputAudioBufferSpeechStopped(SessionContext *session, const nlohmann::json &event)
{
    (void)event;
    EL_INFO("VAD: Speech stopped");
    UpdateSessionState(session, RealtimeSessionState::Connected);

    if (FlushPendingInputAudio(session, true)) {
        EL_INFO("Buffered input audio flushed after speech stop");
    }

    if (session->callbacks.onSpeechEnd) {
        EL_DEBUG("Calling onSpeechEnd callback");
        session->callbacks.onSpeechEnd();
    } else {
        EL_WARN("onSpeechEnd callback not set");
    }
}

void CloudModelRealtimeAudio::HandleErrorEvent(SessionContext *session, const nlohmann::json &event)
{
    std::string error = event.value("error", nlohmann::json::object()).value("message", "Unknown error");
    EL_ERROR("Server error: {}", error);

    if (session->callbacks.onError) {
        session->callbacks.onError(error);
    }
}

bool CloudModelRealtimeAudio::CreateSession(const RealtimeAudioCallbacks &callbacks, const AudioFormat &audioFormat)
{
    std::lock_guard<std::mutex> lock(session_mutex_);

    // 检查是否已有活动会话
    if (session_.state != RealtimeSessionState::Disconnected) {
        EL_WARN("Session already exists in state: {}", static_cast<int>(session_.state));
        return false;
    }

    session_.callbacks = callbacks;
    session_.outputSampleRate = 24000;
    session_.contentPartTypes.clear();
    session_.contentPartTextBuffer.clear();
    // 如果调用方未提供自定义格式，则使用默认配置
    session_.audioFormat = audioFormat;
    if (audioFormat.sampleRate == 16000 && audioFormat.channels == 1 && audioFormat.bitsPerSample == 16) {
        session_.audioFormat = default_audio_format_;
    }
    session_.vadEnabled = default_vad_enabled_;
    session_.vadThreshold = default_vad_threshold_;
    {
        std::lock_guard<std::mutex> pcmLock(session_.pendingPcmMutex);
        session_.pendingPcmBuffer.clear();
    }
    session_.inputChunkSizeBytes = CalculateInputChunkSizeBytes(session_.audioFormat);
    session_.firstMessageLogged = false;

    // 初始化视频相关字段
    session_.lastVideoFrameTime = 0;
    session_.videoFrameInterval = 1000; // 默认1秒

    EL_INFO("Creating new realtime session");

    // 连接WebSocket
    if (!Connect()) {
        EL_ERROR("Failed to connect session");
        session_.state = RealtimeSessionState::Disconnected;
        return false;
    }

    return true;
}

bool CloudModelRealtimeAudio::DeleteSession()
{
    std::lock_guard<std::mutex> lock(session_mutex_);

    if (session_.state == RealtimeSessionState::Disconnected) {
        EL_WARN("No active session to delete");
        return false;
    }

    Disconnect();
    session_.contentPartTypes.clear();
    session_.contentPartTextBuffer.clear();
    {
        std::lock_guard<std::mutex> pcmLock(session_.pendingPcmMutex);
        session_.pendingPcmBuffer.clear();
    }
    session_.inputChunkSizeBytes = 0;
    session_.firstMessageLogged = false;

    // 重置视频相关字段
    session_.lastVideoFrameTime = 0;

    EL_INFO("Deleted session");
    return true;
}

bool CloudModelRealtimeAudio::Connect()
{
    if (!ws_context_) {
        EL_ERROR("Connect: WebSocket context not initialized");
        return false;
    }

    EL_INFO("Connect: Starting WebSocket connection to {}", ws_url_);

    // 解析URL
    const char *url = ws_url_.c_str();
    const char *prot, *p;
    char path[512];
    const char *address;
    int port;

    if (lws_parse_uri(const_cast<char *>(url), &prot, &address, &port, &p)) {
        EL_ERROR("Connect: Failed to parse WebSocket URL: {}", ws_url_);
        return false;
    }

    EL_INFO("Connect: Parsed URL - protocol: {}, address: {}, port: {}, path: {}", prot, address, port, p);

    snprintf(path, sizeof(path), "/%s", p);

    // 保存 Authorization 头到 session 以便在回调中使用
    session_.authHeader = "Bearer " + api_key_;

    EL_INFO("Connect: Preparing connection with Authorization");

    // 使用 lws_client_connect_via_info 建立连接
    struct lws_client_connect_info ccinfo;
    memset(&ccinfo, 0, sizeof(ccinfo));

    ccinfo.context = ws_context_;
    ccinfo.address = address;
    ccinfo.port = 443; // HTTPS port for wss://
    ccinfo.ssl_connection =
        LCCSCF_USE_SSL | LCCSCF_ALLOW_SELFSIGNED | LCCSCF_SKIP_SERVER_CERT_HOSTNAME_CHECK | LCCSCF_ALLOW_INSECURE;
    ccinfo.path = path;
    ccinfo.host = address;
    ccinfo.origin = address;
    ccinfo.protocol = protocols[0].name;

    // 将 session 指针传递给 libwebsockets，以便在回调中使用
    ccinfo.opaque_user_data = &session_;

    EL_DEBUG("Connect: Connection info - host: {}, origin: {}, protocol: {}, ssl: yes", ccinfo.host, ccinfo.origin,
             ccinfo.protocol);

    UpdateSessionState(&session_, RealtimeSessionState::Connecting);

    session_.wsi = lws_client_connect_via_info(&ccinfo);
    if (!session_.wsi) {
        EL_ERROR("Connect: Failed to create WebSocket connection");
        UpdateSessionState(&session_, RealtimeSessionState::Error);
        return false;
    }

    EL_INFO("Connect: WebSocket connection initiated (wsi: {})", static_cast<void *>(session_.wsi));

    return true;
}

void CloudModelRealtimeAudio::Disconnect()
{
    // 停止播放
    if (session_.isPlaying.load()) {
        El::Media::Speak::GetInstance().Stop();
        session_.isPlaying.store(false);
        EL_INFO("Stopped audio playback (disconnecting)");
    }

    if (session_.wsi) {
        // lws会在回调中自动清理
        lws_set_timeout(session_.wsi, PENDING_TIMEOUT_USER_OK, LWS_TO_KILL_ASYNC);
        session_.wsi = nullptr;
    }
    {
        std::lock_guard<std::mutex> pcmLock(session_.pendingPcmMutex);
        session_.pendingPcmBuffer.clear();
    }
    session_.inputChunkSizeBytes = 0;
    session_.firstMessageLogged = false;
    UpdateSessionState(&session_, RealtimeSessionState::Disconnected);
}

bool CloudModelRealtimeAudio::SendAudioData(const std::vector<uint8_t> &audioData)
{
    SessionContext *session = GetSession();

    if (session->state != RealtimeSessionState::Connected && session->state != RealtimeSessionState::Speaking) {
        EL_WARN("SendAudioData: Session not in valid state (current: {})", static_cast<int>(session->state));
        return false;
    }

    // 构建 input_audio_buffer.append 事件(Base64编码音频)
    std::string base64Audio = El::Utils::Base64Encode(audioData);

    // 生成唯一的event_id
    uint64_t eventId = session->nextEventId.fetch_add(1);

    nlohmann::json event = {{"type", "input_audio_buffer.append"},
                            {"event_id", "event_" + std::to_string(eventId)},
                            {"audio", base64Audio}};

    LogSendEventInfo(event, audioData.size());

    std::string jsonStr = event.dump();

    std::vector<uint8_t> data(jsonStr.begin(), jsonStr.end());

    {
        std::lock_guard<std::mutex> lock(session->sendMutex);
        if (session->sendQueue.size() >= kMaxSendQueueSize) {
            EL_WARN("SendAudioData: Send queue full (size: {}), dropping audio data", session->sendQueue.size());
            return false;
        }
        session->sendQueue.push(std::move(data));
    }

    if (!session->wsi) {
        EL_ERROR("SendAudioData: No WebSocket connection");
        return false;
    }

    ScheduleWritable(session);
    return true;
}

bool CloudModelRealtimeAudio::SendTextMessage(const std::string &text)
{
    SessionContext *session = GetSession();

    // 生成唯一的event_id
    uint64_t eventId = session->nextEventId.fetch_add(1);

    // 构建 conversation.item.create 事件
    nlohmann::json event = {
        {"type", "conversation.item.create"},
        {"event_id", "event_" + std::to_string(eventId)},
        {"item", {{"type", "message"}, {"role", "user"}, {"content", {{{"type", "input_text"}, {"text", text}}}}}}};

    LogSendEventInfo(event, 0);

    std::string jsonStr = event.dump();
    std::vector<uint8_t> data(jsonStr.begin(), jsonStr.end());

    {
        std::lock_guard<std::mutex> lock(session->sendMutex);
        if (session->sendQueue.size() >= kMaxSendQueueSize) {
            EL_WARN("Send queue full");
            return false;
        }
        session->sendQueue.push(std::move(data));
    }

    if (session->wsi) {
        ScheduleWritable(session);
    }

    EL_INFO("Text message sent: {}", text);
    return true;
}

RealtimeSessionState CloudModelRealtimeAudio::GetSessionState() const
{
    return session_.state;
}

bool CloudModelRealtimeAudio::ConfigureVAD(bool vadEnabled, float vadThreshold)
{
    SessionContext *session = const_cast<CloudModelRealtimeAudio *>(this)->GetSession();

    session->vadEnabled = vadEnabled;
    session->vadThreshold = std::clamp(vadThreshold, 0.0f, 1.0f);

    // 生成唯一的event_id
    uint64_t eventId = session->nextEventId.fetch_add(1);

    // 发送 session.update 事件配置VAD
    nlohmann::json event = {{"type", "session.update"},
                            {"event_id", "event_" + std::to_string(eventId)},
                            {"session",
                             {{"turn_detection",
                               {{"type", vadEnabled ? "server_vad" : "none"},
                                {"threshold", session->vadThreshold},
                                {"prefix_padding_ms", 300},
                                {"silence_duration_ms", 800}}}}}};

    LogSendEventInfo(event, 0);

    std::string jsonStr = event.dump();
    std::vector<uint8_t> data(jsonStr.begin(), jsonStr.end());

    {
        std::lock_guard<std::mutex> lock(session->sendMutex);
        session->sendQueue.push(std::move(data));
    }

    if (session->wsi) {
        ScheduleWritable(session);
    }

    EL_INFO("VAD configured: enabled={}, threshold={}", vadEnabled, session->vadThreshold);
    return true;
}

bool CloudModelRealtimeAudio::SendVideoFrame(const std::vector<uint8_t> &videoData)
{
    SessionContext *session = GetSession();

    if (session->state != RealtimeSessionState::Connected && session->state != RealtimeSessionState::Speaking) {
        EL_WARN("SendVideoFrame: Session not in valid state (current: {})", static_cast<int>(session->state));
        return false;
    }

    if (videoData.empty()) {
        EL_WARN("SendVideoFrame: Empty video data");
        return false;
    }

    // 构建 input_video.append 事件(Base64编码视频)
    std::string base64Video = El::Utils::Base64Encode(videoData);

    // 生成唯一的event_id
    uint64_t eventId = session->nextEventId.fetch_add(1);

    nlohmann::json event = {{"type", "input_video.append"},
                            {"event_id", "event_" + std::to_string(eventId)},
                            {"video", base64Video}};

    EL_INFO("Send video frame: event_id={}, video_bytes={}", "event_" + std::to_string(eventId), videoData.size());

    std::string jsonStr = event.dump();
    std::vector<uint8_t> data(jsonStr.begin(), jsonStr.end());

    {
        std::lock_guard<std::mutex> lock(session->sendMutex);
        if (session->sendQueue.size() >= kMaxSendQueueSize) {
            EL_WARN("SendVideoFrame: Send queue full (size: {}), dropping video frame", session->sendQueue.size());
            return false;
        }
        session->sendQueue.push(std::move(data));
    }

    if (!session->wsi) {
        EL_ERROR("SendVideoFrame: No WebSocket connection");
        return false;
    }

    ScheduleWritable(session);
    return true;
}

bool CloudModelRealtimeAudio::SetVideoFrameInterval(uint32_t intervalMs)
{
    SessionContext *session = GetSession();
    session->videoFrameInterval = intervalMs;
    EL_INFO("Video frame interval set to {} ms", intervalMs);
    return true;
}

void CloudModelRealtimeAudio::UpdateSessionState(SessionContext *session, RealtimeSessionState newState)
{
    if (session->state != newState) {
        EL_DEBUG("State changed: {} -> {}", static_cast<int>(session->state), static_cast<int>(newState));
        session->state = newState;

        if (session->callbacks.onStateChanged) {
            session->callbacks.onStateChanged(newState);
        }
    }
}

CloudModelRealtimeAudio::SessionContext *CloudModelRealtimeAudio::GetSession()
{
    return &session_;
}

bool CloudModelRealtimeAudio::EnableAutoAudioStream(int32_t channelId)
{
    if (!running_ || !transcoder_) {
        EL_ERROR(
            "EnableAutoAudioStream: Cannot enable audio stream - service not started (running: {}, transcoder: {})",
            running_, (transcoder_ != nullptr));
        return false;
    }

    if (audioStream_) {
        EL_WARN("EnableAutoAudioStream: Audio stream already enabled");
        return true;
    }

    EL_INFO("EnableAutoAudioStream: Creating audio stream source for channel {}", channelId);

    // 创建音频流源 (AAC格式)
    audioStream_ = El::Media::StreamSource::Create(channelId, 0);
    if (!audioStream_) {
        EL_ERROR("EnableAutoAudioStream: Failed to create audio stream source for channel {}", channelId);
        return false;
    }

    EL_INFO("EnableAutoAudioStream: Registering audio frame callback");

    // 注册音频帧回调
    streamHandle_ = audioStream_->Register([this](const El::Media::MediaFramePtr &frame) { ProcessAudioFrame(frame); });

    EL_INFO("EnableAutoAudioStream: Starting audio stream (handle: {})", streamHandle_);
    audioStream_->Start();

    EL_INFO("EnableAutoAudioStream: Auto audio stream enabled on channel {}", channelId);
    return true;
}

void CloudModelRealtimeAudio::DisableAutoAudioStream()
{
    if (!audioStream_) {
        return;
    }

    EL_INFO("Disabling auto audio stream");

    // 停止音频流
    if (streamHandle_ >= 0) {
        audioStream_->Unregister(streamHandle_);
        streamHandle_ = -1;
    }

    audioStream_->Stop();
    audioStream_.reset();

    SessionContext *session = GetSession();
    FlushPendingInputAudio(session, true);

    EL_INFO("Auto audio stream disabled");
}

bool CloudModelRealtimeAudio::EnableAutoVideoStream(int32_t channelId)
{
    if (!running_) {
        EL_ERROR("EnableAutoVideoStream: Cannot enable video stream - service not started");
        return false;
    }

    if (videoStream_) {
        EL_WARN("EnableAutoVideoStream: Video stream already enabled");
        return true;
    }

    EL_INFO("EnableAutoVideoStream: Creating video stream source for channel {}", channelId);

    // 创建视频流源 (参数1表示视频流)
    videoStream_ = El::Media::StreamSource::Create(channelId, 1);
    if (!videoStream_) {
        EL_ERROR("EnableAutoVideoStream: Failed to create video stream source for channel {}", channelId);
        return false;
    }

    EL_INFO("EnableAutoVideoStream: Registering video frame callback");

    // 注册视频帧回调
    videoStreamHandle_ =
        videoStream_->Register([this](const El::Media::MediaFramePtr &frame) { ProcessVideoFrame(frame); });

    EL_INFO("EnableAutoVideoStream: Starting video stream (handle: {})", videoStreamHandle_);
    videoStream_->Start();

    EL_INFO("EnableAutoVideoStream: Auto video stream enabled on channel {}", channelId);
    return true;
}

void CloudModelRealtimeAudio::DisableAutoVideoStream()
{
    if (!videoStream_) {
        return;
    }

    EL_INFO("Disabling auto video stream");

    // 停止视频流
    if (videoStreamHandle_ >= 0) {
        videoStream_->Unregister(videoStreamHandle_);
        videoStreamHandle_ = -1;
    }

    videoStream_->Stop();
    videoStream_.reset();

    EL_INFO("Auto video stream disabled");
}

void CloudModelRealtimeAudio::ProcessAudioFrame(const El::Media::MediaFramePtr &frame)
{
    if (!frame || !frame->IsAudioFrame()) {
        // EL_DEBUG("ProcessAudioFrame: Invalid or non-audio frame");
        return;
    }

    SessionContext *session = GetSession();

    // 检查是否为AAC格式
    if (frame->GetFrameType() != MEDIA_FRAME_AAC) {
        EL_WARN("ProcessAudioFrame: Expected AAC audio, got frame type: {}", frame->GetFrameType());
        return;
    }

    // 检查会话是否处于活动状态
    if (session->state != RealtimeSessionState::Connected && session->state != RealtimeSessionState::Speaking) {
        return;
    }

    // 提取AAC数据
    uint8_t *buffer = frame->GetBuffer();
    int32_t length = frame->GetLength();
    std::vector<uint8_t> aacData(buffer, buffer + length);

    // AAC -> PCM 转码 (16kHz)
    std::vector<uint8_t> pcmData;
    if (!transcoder_->DecodeAacToPcm(aacData, pcmData)) {
        EL_ERROR("ProcessAudioFrame: Failed to decode AAC to PCM");
        return;
    }

    // 如果解码器缓冲中,可能暂时没有输出
    if (pcmData.empty()) {
        return;
    }

    {
        std::lock_guard<std::mutex> lock(session->pendingPcmMutex);
        session->pendingPcmBuffer.insert(session->pendingPcmBuffer.end(), pcmData.begin(), pcmData.end());
    }

    FlushPendingInputAudio(session, false);
}

void CloudModelRealtimeAudio::ProcessVideoFrame(const El::Media::MediaFramePtr &frame)
{
    if (!frame || !frame->IsVideoFrame()) {
        return;
    }

    SessionContext *session = GetSession();

    // 检查会话是否处于活动状态
    if (session->state != RealtimeSessionState::Connected && session->state != RealtimeSessionState::Speaking) {
        return;
    }

    // 只处理I帧(关键帧)
    if (frame->GetFrameType() != MEDIA_FRAME_I) {
        return;
    }

    // 检查编码类型是否为H264或H265
    uint32_t codecType = frame->GetCodecType();
    if (codecType != MEDIA_VIDEO_CODEC_H264 && codecType != MEDIA_VIDEO_CODEC_H265) {
        EL_WARN("ProcessVideoFrame: Unsupported codec type: {}", codecType);
        return;
    }

    // 检查发送间隔
    uint64_t currentTime = frame->GetPts(); // 使用帧的PTS作为时间戳
    if (currentTime == 0) {
        // 如果PTS为0,使用UTC时间
        currentTime = frame->GetUtc();
    }

    // 计算间隔(微秒转毫秒)
    uint64_t intervalUs = static_cast<uint64_t>(session->videoFrameInterval) * 1000;
    if (session->lastVideoFrameTime > 0 && (currentTime - session->lastVideoFrameTime) < intervalUs) {
        // 未到发送间隔,跳过此帧
        return;
    }

    // 更新最后发送时间
    session->lastVideoFrameTime = currentTime;

    // 提取视频数据
    uint8_t *buffer = frame->GetBuffer();
    int32_t length = frame->GetLength();

    if (buffer == nullptr || length <= 0) {
        EL_WARN("ProcessVideoFrame: Invalid video frame data");
        return;
    }

    std::vector<uint8_t> videoData(buffer, buffer + length);

    // 发送视频帧
    EL_INFO("ProcessVideoFrame: Sending I-frame, codec={}, size={} bytes, pts={} us",
            codecType == MEDIA_VIDEO_CODEC_H264 ? "H264" : "H265", length, currentTime);

    if (!SendVideoFrame(videoData)) {
        EL_WARN("ProcessVideoFrame: Failed to send video frame");
    }
}

void CloudModelRealtimeAudio::HandleCloudAudioForPlayback(SessionContext *session, const std::vector<uint8_t> &pcmData)
{
    if (pcmData.empty()) {
        EL_WARN("HandleCloudAudioForPlayback: Received empty PCM data");
        return;
    }

    EL_DEBUG("HandleCloudAudioForPlayback: Encoding {} bytes of PCM ({} Hz) to AAC 16kHz", pcmData.size(),
             session->outputSampleRate);

    // PCM -> AAC (重采样至16kHz)
    std::vector<uint8_t> aacData;
    if (!transcoder_->EncodePcmToAac(pcmData, aacData)) {
        EL_ERROR("HandleCloudAudioForPlayback: Failed to encode PCM to AAC");
        return;
    }

    // 编码器可能在缓冲数据
    if (aacData.empty()) {
        EL_DEBUG("HandleCloudAudioForPlayback: Encoder buffering, no AAC output yet");
        return;
    }

    EL_DEBUG("HandleCloudAudioForPlayback: Encoded AAC data: {} bytes", aacData.size());

    auto &speak = El::Media::Speak::GetInstance();

    // 如果是第一次播放,启动流式播放
    if (!session->isPlaying.load()) {
        EL_INFO("HandleCloudAudioForPlayback: Starting stream playback");

        // 配置AAC解码器(media_speak内部会解码AAC)
        HAL_ADEC_COM_CONFIG_S config;
        config.eType = HAL_AUDIO_TYPE_AAC;
        config.u32SampleRate = HAL_AUDIO_SAMPLE_RATE_16000;
        config.u8ChanNum = 1;
        config.u8Vol = 100;

        if (!speak.StartStreamPlay(config)) {
            EL_ERROR("HandleCloudAudioForPlayback: Failed to start stream playback");
            return;
        }
        session->isPlaying.store(true);
        EL_INFO("HandleCloudAudioForPlayback: Audio playback started");
    }

    // 推送AAC数据到播放器
    EL_DEBUG("HandleCloudAudioForPlayback: Pushing {} bytes of AAC to speaker", aacData.size());

    if (!speak.PushAudioData(reinterpret_cast<const char *>(aacData.data()), aacData.size())) {
        EL_WARN("HandleCloudAudioForPlayback: Failed to push audio data to speaker");
    } else {
        EL_DEBUG("HandleCloudAudioForPlayback: Successfully pushed audio to speaker");
    }
}

} // namespace Cloud
} // namespace El
