#ifndef LLM_SERVER_H
#define LLM_SERVER_H

#include <memory>

#include "deepseek_server.h"
#include "spark_llm_server.h"

#include "curl/curl.h"

#include "conf.h"

class LlmServer {

public:
    LlmServer() {
        curl_global_init(CURL_GLOBAL_DEFAULT);

        dserver = std::make_unique<DeepseekServer>();
        // sserver = std::make_unique<SparkServer>();
    }
    
    ~LlmServer() {
        sys_log("llm server is been destoryed. start......");

        sys_log("server class destory......");
        dserver.reset();
        // sserver.reset();

        sys_log("curl global cleanup ......");
        curl_global_cleanup();
        sys_log("llm server is been destoryed. end......");
    }

    void post_request(const std::string& question) {
        dserver->postRequest(question);
    }

    void deepseekPostRequest(const std::string& question) {
        dserver->postRequest(question);
    }

    void sparkPostRequest(const std::string& question) {
        // sserver->postRequest(question);
    }

    void setUserCallback(std::function<void(const std::string&)> func) {
        dserver->setUserCallback(func);
        // sserver->setUserCallback(func);
    }

    void setUserStreamCallback(std::function<void(const std::string&)> func) {
        dserver->setUserStreamCallback(func);
        // sserver->setUserStreamCallback(func);
    }

    bool check_patient_ready_to_go_back(const std::string & question, std::string llmRet) {
        return dserver->check_patient_ready_to_go_back(question, llmRet);
    }

    bool checkPatientWillingToCotinue(const std::string &question) {
        return dserver->checkPatientWillingToCotinue(question);
    }

private:
    std::unique_ptr<DeepseekServer> dserver;
    // std::unique_ptr<SparkServer> sserver;

};

#endif // LLM_SERVER_H
