#include <thread>
#include <vector>

#include "api.h"
#include "log.h"
#include "server/defs.h"
#include "service/client_api.h"
#include "service/config.h"
#include "service/config_loader.h"
#include "service/service.h"
#ifdef ENABLE_DUMMY_PLUGIN
#include "dummy/model.h"
#include "dummy/session.h"
#endif

#ifdef ENABLE_LLAMACPP_PLUGIN
#include "llamacpp/plugin.h"
#endif

void RegisterDummy()
{
#ifdef ENABLE_DUMMY_PLUGIN
    bool ret = false;

    ret = lms::RegisterModelCreator(std::make_unique<lms::DummyModelCreator>("DummyModel"));
    ASSERT(ret);
    ret = lms::RegisterModelCreator(std::make_unique<lms::DummyModelCreator>("DummyModel2"));
    ASSERT(ret);

    ret = RegisterSessionCreator(
        std::make_unique<lms::DummySessionCreator>("DummySession", std::vector<std::string>{"DummyModel"}));
    ASSERT(ret);
    ret = RegisterSessionCreator(std::make_unique<lms::DummySessionCreator>(
        "DummySession2", std::vector<std::string>{"DummyModel", "DummyModel2"}));
    ASSERT(ret);
#endif
}

template<typename Object>
std::string Dump(const Object& obj)
{
    return json(obj).dump();
}

auto MakeEvalLLMRequest(const std::string& model, const std::string& input, pid_t pid = 1)
{
    return Dump(lms::Request{
        .type = lms::Request::EVAL_LLM,
        .pid = pid,
        .request =
            lms::EvalLLMRequest{
                .model = model,
                .input = input,
            },
    });
}

auto MakeGetLLMResultRequest(lms::TaskID id, pid_t pid = 1)
{
    return Dump(lms::Request{
        .type = lms::Request::GET_LLM_RESULT,
        .pid = pid,
        .request =
            lms::GetLLMResultRequest{
                .id = id,
            },
    });
}

auto MakeCancelRequest(lms::TaskID id, pid_t pid = 1)
{
    return Dump(lms::Request{
        .type = lms::Request::CANCEL,
        .pid = pid,
        .request =
            lms::CancelRequest{
                .id = id,
            },
    });
}

auto MakeListModelsRequest(pid_t pid = 1)
{
    return Dump(lms::Request{
        .type = lms::Request::LIST_MODELS,
        .pid = pid,
        .request = json::object(),
    });
}

auto MakeErrorRequest(std::string type, pid_t pid = 1)
{
    return Dump(lms::Request{
        .type = type,
        .pid = pid,
        .request = json::object(),
    });
}

int main(int argc, char* argv[])
{
    if (argc < 2) {
        LOGE("Usage: %s <config_file>", argv[0]);
        return 1;
    }
    lms::Config config;
    if (!lms::ConfigLoader::LoadFromFile(argv[1], config)) {
        LOGE("LoadFromFile failed");
        return 1;
    }
    lms::Service::Init(config);

    RegisterDummy();

#ifdef ENABLE_LLAMACPP_PLUGIN
    llamacpp::Init();
#endif

    std::string model = "DummySession";
    std::string text = "<|im_start|>user\n你好，你是谁？<|im_end|>\n<|im_start|>assistant\n";
    lms::TaskID id = 1;

    // 模拟 Binder 下发请求

    std::string result;
    result = lms::Service::Instance()->Serve(MakeErrorRequest("error"));
    LOGI("[main] ErrorRequestResponse %s", result.c_str());

    result = lms::Service::Instance()->Serve(MakeListModelsRequest());
    LOGI("[main] ListModelsResponse %s", result.c_str());

    result = lms::Service::Instance()->Serve(MakeEvalLLMRequest(model, text));
    LOGI("[main] EvalLLMResponse %s", result.c_str());

    result = lms::Service::Instance()->Serve(MakeCancelRequest(id));
    LOGI("[main] CancelResponse %s", result.c_str());

    result = lms::Service::Instance()->Serve(MakeGetLLMResultRequest(id));
    LOGI("[main] GetLLMResultResponse %s", result.c_str());

    result = lms::Service::Instance()->Serve(MakeGetLLMResultRequest(id));
    LOGI("[main] GetLLMResultResponse %s", result.c_str());

    result = lms::Service::Instance()->Serve(MakeGetLLMResultRequest(2));
    LOGI("[main] GetLLMResultResponse %s", result.c_str());

    std::this_thread::sleep_for(std::chrono::seconds(1));

#ifdef ENABLE_LLAMACPP_PLUGIN
    llamacpp::Finalize();
#endif

    lms::Service::Finalize();
    return 0;
}
