//
// Created on 2025/6/26.
//
// Node APIs are not fully supported. To solve the compilation error of the interface cannot be found,
// please include "napi/native_api.h".

#ifndef MINDSPORE_LITE_LLM_INFER_H
#define MINDSPORE_LITE_LLM_INFER_H
#include "lite_turbo/llm.h"
#include "lite_turbo/tokenizer.h"
#include "thread_safe_queue.h"

struct Word {
    int token_id;
    std::string words;
    bool is_end;
};

struct PerfStat {
    float load_ms;
    float prefill_ms;
    uint32_t prefill_tokens;
    float prefill_sample_ms{0}; // 1
    float decoding_ms;
    int32_t decoding_tokens;
    float decoding_sample_ms{0}; // 2
    float sample_ms{0};          // 3
    int32_t sample_tokens;
    float total_ms;
};

class LLMInfer {
public:
    int Build(const char *generate_params, const char *model_file_path, bool is_record_perf_data = false);
    int Generate(const std::string &input_text, std::string &gene_text);
    int GenerateWithPerfStat(const std::string &input_text, std::string &gene_text);
    void Stop();
    // 异步生成接口
    int GenerateAsync(const std::string &input_text);
    void GenerateStream(const std::string &input_text);
    bool IsEnd() const; // 检查是否结束

    // 获取异步生成结果（会阻塞直到完成）
    int GetAsyncResult(std::string &gene_text);

    void Destroy();
    void SetLLMModelPath(const char *);
    void ParseLLMConfig(const char *configStr);
    const char *GetPerfStatString();
    Word *GetStreamOneWord();

private:
    // 内部异步执行函数
    int AsyncGenerateTask(const std::string &input_text, std::string &gene_text);

    // 原子变量用于停止控制
    std::atomic<bool> is_stop_{false};
    ThreadSafeQueue<Word *> que_;
    std::future<void> async_stream_generate_future_;
    // 用于存储异步任务结果
    std::future<int> async_future_;
    std::string async_result_;
    std::atomic<bool> is_processing_{false};

    int max_gen_len_{100};
    PerfStat perf_stat_;
    mindspore::LLMConfig llm_cfg_;
    bool is_perf_record_{false};
    std::string tokenizer_file_;
    std::shared_ptr<mindspore::LLM> llm_model_;
    std::shared_ptr<mindspore::Tokenizer> tokenizer_;
    bool is_prefill_{true};
    bool is_build_{false};
    std::vector<int> generate_tokens_;
    std::string generate_text;
};
#endif // MINDSPORE_LITE_LLM_INFER_H
