#include "cloud_model_audio.h"
#include <curl/curl.h>
#include <fstream>
#include <iostream>
#include "common_config_manager.h"
#include "base_log.h"
#include "base_utils.h"
#include <algorithm>

namespace El {
namespace Cloud {

CloudModelAudio &CloudModelAudio::GetInstance()
{
    static CloudModelAudio instance;
    return instance;
}

CloudModelAudio::CloudModelAudio() {}

void CloudModelAudio::Start()
{
    InitConfig();
    curl_global_init(CURL_GLOBAL_DEFAULT);
}

void CloudModelAudio::Stop()
{
    curl_global_cleanup();
}

bool CloudModelAudio::InitConfig()
{
    nlohmann::json config = El::Common::ConfigManager::GetInstance().GetConfig("cloud_model");
    if (config.contains("api_key")) {
        api_key_ = config["api_key"].get<std::string>();
        LOG_INFO("API key: {}", api_key_);
    } else {
        LOG_ERROR("API key not set in configuration");
        return false;
    }

    if (config.contains("model_audio")) {
        model_audio_ = config["model_audio"].get<std::string>();
        LOG_INFO("Model: {}", model_audio_);
    } else {
        model_audio_ = "qwen2-audio-instruct";
        LOG_INFO("Using default model: {}", model_audio_);
    }

    if (config.contains("max_context_length_audio")) {
        max_context_length_audio_ = config["max_context_length_audio"].get<size_t>();
        LOG_INFO("Max context length: {} bytes", max_context_length_audio_);
    } else {
        LOG_ERROR("Max context length not set in configuration");
        return false;
    }

    api_url_ = "https://dashscope.aliyuncs.com/api/v1/services/aigc/multimodal-generation/generation";

    return true;
}

int32_t CloudModelAudio::NewConversation()
{
    int32_t conversation_id = next_conversation_id_++;

    if (conversations_.size() >= 5) {
        auto it = std::min_element(conversations_.begin(), conversations_.end(),
                                   [](const auto &a, const auto &b) { return a.first < b.first; });

        if (it != conversations_.end()) {
            LOG_INFO("Deleting oldest conversation: {}", it->first);
            conversations_.erase(it);
        }
    }

    conversations_[conversation_id] = std::vector<nlohmann::json>();
    LOG_INFO("Created new conversation: {}", conversation_id);
    return conversation_id;
}

bool CloudModelAudio::DeleteConversation(int32_t conversation_id)
{
    auto it = conversations_.find(conversation_id);
    if (it != conversations_.end()) {
        conversations_.erase(it);
        LOG_INFO("Deleted conversation: {}", conversation_id);
        return true;
    }
    LOG_WARN("Attempted to delete non-existent conversation: {}", conversation_id);
    return false;
}

std::string CloudModelAudio::ProcessAudio(int32_t conversation_id, const std::string& audio_file_path, const std::string& prompt)
{
    LOG_INFO("Processing audio: Conversation ID {}, Audio file {}", conversation_id, audio_file_path);

    std::ifstream file(audio_file_path, std::ios::binary);
    if (!file) {
        LOG_ERROR("Failed to open audio file: {}", audio_file_path);
        return "";
    }
    std::vector<unsigned char> audio_buffer((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
    std::string base64_audio = El::Base::Base64Encode(audio_buffer);

    nlohmann::json message = {
        {"role", "user"},
        {"content", nlohmann::json::array({
            {
                {"type", "text"},
                {"text", prompt}
            },
            {
                {"type", "audio"},
                {"audio", base64_audio}
            }
        })}
    };

    return ProcessMessage(conversation_id, message);
}

std::string CloudModelAudio::GenerateText(int32_t conversation_id, const std::string &prompt)
{
    LOG_INFO("Generating text: Conversation ID {}", conversation_id);

    nlohmann::json message = {{"role", "user"}, {"content", prompt}};

    return ProcessMessage(conversation_id, message);
}

std::string CloudModelAudio::ProcessMessage(int32_t conversation_id, const nlohmann::json &message)
{
    auto &conversation = conversations_[conversation_id];
    conversation.push_back(message);

    size_t total_tokens = 0;
    for (const auto &msg : conversation) {
        total_tokens += CalculateMessageTokens(msg);
    }

    while (total_tokens > max_context_length_audio_ && !conversation.empty()) {
        total_tokens -= CalculateMessageTokens(conversation.front());
        conversation.erase(conversation.begin());
    }

    nlohmann::json request_data = {
        {"model", model_audio_},
        {"input", {
            {"messages", conversation}
        }}
    };

    std::string post_data = request_data.dump();
    std::string response = SendHttpRequest(post_data, api_url_);
    if (response.empty()) {
        LOG_ERROR("HTTP request failed");
        return "";
    }

    std::string result = ParseResponse(response);
    if (!result.empty()) {
        nlohmann::json assistant_message = {{"role", "assistant"}, {"content", result}};
        conversation.push_back(assistant_message);
    }

    LOG_INFO("Processing complete, result length: {} characters", result.length());
    return result;
}

size_t CloudModelAudio::WriteCallback(void *contents, size_t size, size_t nmemb, std::string *s)
{
    size_t newLength = size * nmemb;
    try {
        s->append((char*)contents, newLength);
        return newLength;
    }
    catch(std::bad_alloc &e) {
        return 0;
    }
}

std::string CloudModelAudio::SendHttpRequest(const std::string &post_data, const std::string &url)
{
    CURL *curl = curl_easy_init();
    std::string response_string;

    if (curl) {
        struct curl_slist *headers = nullptr;
        headers = curl_slist_append(headers, "Content-Type: application/json");
        headers = curl_slist_append(headers, ("Authorization: Bearer " + api_key_).c_str());

        curl_easy_setopt(curl, CURLOPT_URL, url.c_str());
        curl_easy_setopt(curl, CURLOPT_POSTFIELDS, post_data.c_str());
        curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
        curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, WriteCallback);
        curl_easy_setopt(curl, CURLOPT_WRITEDATA, &response_string);
        curl_easy_setopt(curl, CURLOPT_TIMEOUT, 30L);
        curl_easy_setopt(curl, CURLOPT_CONNECTTIMEOUT, 10L);
        curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, 0L);
        curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, 0L);
        curl_easy_setopt(curl, CURLOPT_SSLVERSION, CURL_SSLVERSION_TLSv1_2);

        char errbuf[CURL_ERROR_SIZE];
        curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, errbuf);

        int retries = 3;
        CURLcode res;
        while (retries > 0) {
            res = curl_easy_perform(curl);
            if (res == CURLE_OK) {
                break;
            }
            LOG_WARN("HTTP request failed, error code: {} ({}), remaining retries: {}", curl_easy_strerror(res), errbuf, --retries);
            std::this_thread::sleep_for(std::chrono::seconds(1));
        }

        curl_slist_free_all(headers);
        curl_easy_cleanup(curl);

        if (res != CURLE_OK) {
            LOG_ERROR("HTTP request ultimately failed: {} ({})", curl_easy_strerror(res), errbuf);
            return "";
        }
    } else {
        LOG_ERROR("Failed to initialize CURL");
        return "";
    }

    return response_string;
}

std::string CloudModelAudio::ParseResponse(const std::string &response)
{
    nlohmann::json root;
    auto result = nlohmann::json::parse(response, nullptr, false);
    if (result.is_discarded()) {
        LOG_ERROR("Failed to parse response: JSON format error");
        return "";
    }
    root = std::move(result);

    if (root.contains("output") && root["output"].contains("text")) {
        return root["output"]["text"].get<std::string>();
    }

    LOG_ERROR("Failed to extract result from response: {}", response);
    return "";
}

size_t CloudModelAudio::CalculateAudioTokens(size_t audio_size)
{
    return audio_size / 1024;  // 假设每1KB音频数据对应1个token
}

size_t CloudModelAudio::CalculateTextTokens(const std::string &text)
{
    return text.length();
}

size_t CloudModelAudio::CalculateMessageTokens(const nlohmann::json &message)
{
    size_t total_tokens = 0;
    if (message.contains("content")) {
        if (message["content"].is_array()) {
            for (const auto &content : message["content"]) {
                if (content["type"] == "text") {
                    total_tokens += CalculateTextTokens(content["text"]);
                } else if (content["type"] == "audio") {
                    total_tokens += CalculateAudioTokens(content["audio"].get<std::string>().size());
                }
            }
        } else if (message["content"].is_string()) {
            total_tokens += CalculateTextTokens(message["content"]);
        }
    }
    return total_tokens;
}

} // namespace Cloud
} // namespace El