#include "llm_infer.h"
#include "lite_turbo/llm.h"
#include "lite_turbo/tokenizer.h"
#include "utils_ndk.h"
#include <chrono>
#include <iostream>
#include <memory>
#include <sstream>

using namespace mindspore;

void LLMInfer::ParseLLMConfig(const char *configStr) {
    // 复制字符串以便处理（strtok会修改原字符串）
    LOGI("configStr is %{public}s", configStr);
    char *copy = strdup(configStr);
    if (!copy)
        return;

    char *pair = strtok(copy, ";");
    while (pair != NULL) {
        // 使用 strchr 查找冒号位置，避免嵌套 strtok
        char *colon_pos = strchr(pair, ':');
        if (colon_pos != NULL) {
            *colon_pos = '\0'; // 分割键和值
            char *key = pair;
            char *value = colon_pos + 1;

            if (key && value) {
                if (strcmp(key, "max_length") == 0) {
                    max_gen_len_ = atoi(value);
                    LOGI("set max_length %{public}d", max_gen_len_);
                } else if (strcmp(key, "do_sample") == 0) {
                    if (strcmp(value, "true") == 0 || strcmp(value, "1") == 0) {
                        llm_cfg_.do_sample = true;
                    } else {
                        llm_cfg_.do_sample = false; // 处理 "false", "0" 或其他任何非true字符串
                    }
                    LOGI("set do_sample %{public}d", llm_cfg_.do_sample);
                } else if (strcmp(key, "temperature") == 0) {
                    llm_cfg_.temperature = atof(value);
                    LOGI("set temperature %{public}f", llm_cfg_.temperature); // 修正为 %f
                } else if (strcmp(key, "top_k") == 0) {
                    llm_cfg_.top_k = atoi(value);
                    LOGI("set top_k %{public}d", llm_cfg_.top_k);
                } else if (strcmp(key, "random_seed") == 0) { // 修正拼写
                    llm_cfg_.seed = atoi(value);
                    LOGI("set seed %{public}d", llm_cfg_.seed);
                } else if (strcmp(key, "scale_gp_size") == 0) { // 修正拼写
                    llm_cfg_.scale_gp_size = atoi(value);
                    if (llm_cfg_.scale_gp_size != 32 && llm_cfg_.scale_gp_size != 128) {
                        LOGE("llm_cfg_.scale_gp_size is %{public}d, not equal 32 or 128, error.",
                             llm_cfg_.scale_gp_size);
                    }
                    LOGI("set scale_gp_size %{public}d", llm_cfg_.scale_gp_size);
                }
            }
        }
        pair = strtok(NULL, ";");
    }
    free(copy);
    llm_cfg_.max_length = 1024;
    llm_cfg_.chunk_size = 128;
    llm_cfg_.embedding_quant = true;
    llm_cfg_.device = "HIAI";
    llm_cfg_.vocab_size = 151936;
    llm_cfg_.hidden_size = 896;
    llm_cfg_.num_attention_heads = 14;
    llm_cfg_.num_key_value_heads = 2;
    llm_cfg_.backend = "HIAI";
    llm_cfg_.eos_id = 151643;
}

const char *LLMInfer::GetPerfStatString() {
    if (!is_perf_record_)
        return NULL;
    // 使用静态缓冲区（注意线程安全）
    static char buffer[512];

    // 格式化字符串
    snprintf(buffer, sizeof(buffer),
             "load_ms:%.6f;"
             "prefill_ms:%.6f;"
             "prefill_tokens:%u;"
             "prefill_sample_ms:%.6f;"
             "decoding_ms:%.6f;"
             "decoding_tokens:%d;"
             "decoding_sample_ms:%.6f;"
             "sample_ms:%.6f;"
             "sample_tokens:%d;"
             "total_ms:%.6f",
             perf_stat_.load_ms, perf_stat_.prefill_ms, perf_stat_.prefill_tokens, perf_stat_.prefill_sample_ms,
             perf_stat_.decoding_ms, perf_stat_.decoding_tokens, perf_stat_.decoding_sample_ms, perf_stat_.sample_ms,
             perf_stat_.sample_tokens, perf_stat_.total_ms);
    return buffer;
}

void LLMInfer::SetLLMModelPath(const char *model_file_path) {
    std::string base_path(model_file_path);
    llm_cfg_.ms_packer.pack_path = base_path;
    llm_cfg_.ms_packer.model_name = "Qwen_0_5B_unify_1_128.omc";
    llm_cfg_.ms_packer.rope_cos_name = "cos_1024.bin";
    llm_cfg_.ms_packer.rope_sin_name = "sin_1024.bin";
    llm_cfg_.ms_packer.embedding_weight_name = "embedding_weight_4bit.bin";
    llm_cfg_.ms_packer.attention_mask_name = "attention_mask_1024.bin";
    llm_cfg_.ms_packer.tokenizer_name = "tokenizer.txt";
}

int LLMInfer::Build(const char *generate_params, const char *model_file_path, bool is_record_perf_data) {
    LOGI("start build model. is_perf_record: %{public}d", is_record_perf_data);
    is_perf_record_ = is_record_perf_data;
    ParseLLMConfig(generate_params);
    if (is_build_) {
        return 0;
    }
    SetLLMModelPath(model_file_path);
    LOGI("start load tokenizer.");
    Tokenizer::CreateTokenizerAsyncFromCfg(llm_cfg_);

    // 打印LLMConfig结构体所有成员
    LOGI("llm_cfg_.max_length: %{public}ld", llm_cfg_.max_length);
    LOGI("llm_cfg_.chunk_size: %{public}ld", llm_cfg_.chunk_size);
    LOGI("llm_cfg_.vocab_size: %{public}ld", llm_cfg_.vocab_size);
    LOGI("llm_cfg_.hidden_size: %{public}d", llm_cfg_.hidden_size);
    LOGI("llm_cfg_.num_attention_heads: %{public}d", llm_cfg_.num_attention_heads);
    LOGI("llm_cfg_.num_key_value_heads: %{public}d", llm_cfg_.num_key_value_heads);
    LOGI("llm_cfg_.eos_id: %{public}ld", llm_cfg_.eos_id);
    LOGI("llm_cfg_.model_path: %{public}s", llm_cfg_.model_path.c_str());
    LOGI("llm_cfg_.backend: %{public}s", llm_cfg_.backend.c_str());
    LOGI("llm_cfg_.device: %{public}s", llm_cfg_.device.c_str());
    LOGI("llm_cfg_.embedding_bin_path: %{public}s", llm_cfg_.embedding_bin_path.c_str());
    LOGI("llm_cfg_.sin_bin_path: %{public}s", llm_cfg_.sin_bin_path.c_str());
    LOGI("llm_cfg_.cos_bin_path: %{public}s", llm_cfg_.cos_bin_path.c_str());
    LOGI("llm_cfg_.om_weight_dir_p: %{public}s", llm_cfg_.om_weight_dir_p.c_str());
    LOGI("llm_cfg_.om_weight_dir_d: %{public}s", llm_cfg_.om_weight_dir_d.c_str());
    LOGI("llm_cfg_.embedding_quant: %{public}d", llm_cfg_.embedding_quant);
    LOGI("llm_cfg_.attention_mask_bin_path: %{public}s", llm_cfg_.attention_mask_bin_path.c_str());

    // sample config 部分
    LOGI("llm_cfg_.temperature: %{public}f", llm_cfg_.temperature);
    LOGI("llm_cfg_.top_k: %{public}u", llm_cfg_.top_k);
    LOGI("llm_cfg_.do_sample: %{public}d", llm_cfg_.do_sample);
    LOGI("llm_cfg_.seed: %{public}u", llm_cfg_.seed);

    // MSPackerConfig 嵌套结构体部分
    LOGI("llm_cfg_.ms_packer.pack_path: %{public}s", llm_cfg_.ms_packer.pack_path.c_str());
    LOGI("llm_cfg_.ms_packer.model_name: %{public}s", llm_cfg_.ms_packer.model_name.c_str());
    LOGI("llm_cfg_.ms_packer.rope_cos_name: %{public}s", llm_cfg_.ms_packer.rope_cos_name.c_str());
    LOGI("llm_cfg_.ms_packer.rope_sin_name: %{public}s", llm_cfg_.ms_packer.rope_sin_name.c_str());
    LOGI("llm_cfg_.ms_packer.embedding_weight_name: %{public}s", llm_cfg_.ms_packer.embedding_weight_name.c_str());
    LOGI("llm_cfg_.ms_packer.attention_mask_name: %{public}s", llm_cfg_.ms_packer.attention_mask_name.c_str());
    LOGI("llm_cfg_.ms_packer.tokenizer_name: %{public}s", llm_cfg_.ms_packer.tokenizer_name.c_str());

    if (is_perf_record_) {
        auto start = std::chrono::high_resolution_clock::now();
        llm_model_ = std::make_shared<LLM>();
        LOGI("start build llm model");

        int build_ret = llm_model_->Build(llm_cfg_);
        auto async_tokenizer = Tokenizer::GetTokenizerAsyncResult();
        if (async_tokenizer != nullptr) {
            tokenizer_ = std::shared_ptr<Tokenizer>(async_tokenizer);
            LOGI("load tokenizer end.");
        } else {
            LOGI("load tokenizer failed.");
            return -1;
        }
        if (build_ret != 0) {
            LOGE("=========== model build failed =============\n");
            return -1;
        }
        auto end = std::chrono::high_resolution_clock::now();
        // 计算时间差（单位：毫秒）
        auto duration = std::chrono::duration_cast<std::chrono::duration<double, std::milli>>(end - start);
        perf_stat_.load_ms = duration.count();
        is_build_ = true;
    } else {
        llm_model_ = std::make_shared<LLM>();
        LOGI("start build llm model");
        int build_ret = llm_model_->Build(llm_cfg_);
        auto async_tokenizer = Tokenizer::GetTokenizerAsyncResult();
        if (async_tokenizer != nullptr) {
            tokenizer_ = std::shared_ptr<Tokenizer>(async_tokenizer);
            LOGI("load tokenizer end.");
        } else {
            LOGI("load tokenizer failed.");
            return -1;
        }
        if (build_ret != 0) {
            LOGE("=========== model build failed =============\n");
            return -1;
        }
        LOGI("end build model");
        is_build_ = true;
    }
    return 0;
}

int LLMInfer::GenerateWithPerfStat(const std::string &input_text, std::string &gene_text) {
    std::string prompt = input_text;
    if (tokenizer_ == nullptr) {
        return -1;
    }
    std::vector<int> input_ids = tokenizer_->Encode(prompt);
    if (input_ids.size() <= 0) {
        LOGE("input_ids size: is 0, generate failed!");
        return -1;
    }
    perf_stat_.prefill_tokens = input_ids.size();
    int output_ids;
    is_prefill_ = true;
    int max_gen_token = llm_cfg_.max_length - input_ids.size();
    if (max_gen_token <= 0) {
        LOGE("input_ids size: %{public}d is out of limit: %{public}d, generate failed!", input_ids.size(),
             llm_cfg_.max_length);
        return -1;
    }
    double prefill_cost = 0;
    double decode_cost = 0;
    std::string result;
    int i = 0;
    for (; i < max_gen_token && i < max_gen_len_; ++i) {
        auto begin = std::chrono::high_resolution_clock::now();
        int ret = llm_model_->Generate(input_ids, &output_ids, is_prefill_);
        auto end = std::chrono::high_resolution_clock::now();
        auto cost = std::chrono::duration_cast<std::chrono::duration<double, std::milli>>(end - begin).count();
        if (i == 0) {
            prefill_cost = cost;
            perf_stat_.prefill_ms = prefill_cost;
        } else {
            decode_cost += cost;
        }

        if (ret != 0) {
            LOGE("generate failed!");
            return -1;
        }
        if (tokenizer_->IsStop(output_ids)) {
            break;
        }
        std::string out = tokenizer_->Decode(output_ids);
        result += out;
        input_ids.push_back(output_ids);
        is_prefill_ = false;
    }
    gene_text = result;
    perf_stat_.decoding_ms = decode_cost;
    perf_stat_.decoding_tokens = std::min(max_gen_token, i);
    perf_stat_.total_ms = perf_stat_.load_ms + perf_stat_.prefill_ms + perf_stat_.decoding_ms;
    LOGI("prompt: %{public}s\n", prompt.c_str());
    LOGI("generate result: %{public}s\n", result.c_str());
    auto perf_string = GetPerfStatString();
    LOGI("perf_str is %{public}s", perf_string);
    return 0;
}

int LLMInfer::Generate(const std::string &input_text, std::string &gene_text) {
    std::string prompt = input_text;
    std::vector<int> input_ids = tokenizer_->Encode(prompt);
    if (input_ids.size() <= 0) {
        LOGE("input_ids size: is 0, generate failed!");
        return -1;
    }
    int output_ids;
    is_prefill_ = true;
    int max_gen_token = llm_cfg_.max_length - input_ids.size();
    if (max_gen_token <= 0) {
        LOGE("input_ids size: %{public}d is out of limit: %{public}d, generate failed!", input_ids.size(),
             llm_cfg_.max_length);
        return -1;
    }
    std::string result;
    for (int i = 0; i < max_gen_token && i < max_gen_len_; ++i) {
        int ret = llm_model_->Generate(input_ids, &output_ids, is_prefill_);
        if (ret != 0) {
            LOGE("generate failed!");
            return -1;
        }

        if (tokenizer_->IsStop(output_ids)) {
            break;
        }
        input_ids.push_back(output_ids);
        std::string out = tokenizer_->Decode(output_ids);
        result += out;
        is_prefill_ = false;
    }
    gene_text = result;
    return 0;
}

void LLMInfer::GenerateStream(const std::string &input_text) {
    if (input_text == "") {
        LOGE("input_text is empty");
        return;
    }
    // 重置状态
    is_stop_.store(false);
    is_processing_.store(true);
    async_stream_generate_future_ = std::async(std::launch::async, [this, input_text]() -> void {
        std::vector<int> input_ids = tokenizer_->Encode(input_text);
        if (input_ids.size() <= 0) {
            LOGE("input_ids size: is 0, generate failed!");
            return;
        }
        int output_ids;
        is_prefill_ = true;
        int max_gen_token = llm_cfg_.max_length - input_ids.size();
        if (max_gen_token <= 0) {
            LOGE("input_ids size: %{public}d is out of limit: %{public}d, generate failed!", input_ids.size(),
                 llm_cfg_.max_length);
            return;
        }
        for (int i = 0; i < max_gen_token && i < max_gen_len_; ++i) {
            if (is_stop_.load()) {
                LOGI("Generation stopped by user.");
                break;
            }
            int ret = llm_model_->Generate(input_ids, &output_ids, is_prefill_);
            if (ret != 0) {
                LOGE("generate failed!");
                return;
            }

            if (tokenizer_->IsStop(output_ids)) {
                break;
            }
            input_ids.push_back(output_ids);
            if (i != 0) {
                std::string out = tokenizer_->Decode(output_ids);
                que_.enqueue(new Word{output_ids, out, false});
                // LOGI("enque %{public}s, token_id:%{public}d", out.c_str(), output_ids);
            }

            is_prefill_ = false;
        }
        que_.enqueue(new Word{output_ids, "", true});
        is_processing_.store(false);
        return;
    });
}

Word *LLMInfer::GetStreamOneWord() {
    if (!que_.empty()) {
        Word *word = que_.dequeue();
        if (word->is_end) {
            async_stream_generate_future_.get();
            while (!que_.empty()) {
                Word *tmp_word = que_.dequeue();
                delete tmp_word;
            }
            LOGI("Destroy async stream genreate thread.");
        }
        return word;
    }
    return nullptr;
}

// 异步生成接口
int LLMInfer::GenerateAsync(const std::string &input_text) {
    if (is_processing_.load()) {
        LOGE("Already processing another async request!");
        return -1;
    }

    // 重置状态
    is_stop_.store(false);
    is_processing_.store(true);
    async_result_.clear();

    // 启动异步任务
    async_future_ = std::async(std::launch::async, [this, input_text]() {
        std::string result;
        int ret = this->AsyncGenerateTask(input_text, result);
        if (ret == 0) {
            async_result_ = std::move(result);
        }
        is_processing_.store(false);
        return ret;
    });

    return 0;
}

// 内部异步执行函数
int LLMInfer::AsyncGenerateTask(const std::string &input_text, std::string &gene_text) {
    std::string prompt = input_text;
    std::vector<int> input_ids = tokenizer_->Encode(prompt);
    if (input_ids.size() <= 0) {
        LOGE("input_ids size: is 0, generate failed!");
        return -1;
    }
    int output_ids;
    is_prefill_ = true;
    int max_gen_token = llm_cfg_.max_length - input_ids.size();
    if (max_gen_token <= 0) {
        LOGE("input_ids size: %{public}d is out of limit: %{public}d, generate failed!", input_ids.size(),
             llm_cfg_.max_length);
        return -1;
    }
    std::string result;

    for (int i = 0; i < max_gen_token && i < max_gen_len_; ++i) {
        // 检查停止信号
        if (is_stop_.load()) {
            LOGI("Generation stopped by user.");
            gene_text = result;
            return 0;
        }

        int ret = llm_model_->Generate(input_ids, &output_ids, is_prefill_);
        if (ret != 0) {
            LOGE("generate failed!");
            return -1;
        }

        if (tokenizer_->IsStop(output_ids)) {
            break;
        }

        std::string out = tokenizer_->Decode(output_ids);
        result += out;

        input_ids.push_back(output_ids);
        is_prefill_ = false;
    }

    gene_text = result;
    LOGI("AsyncGenerateTask end, result is: %{public}s", gene_text.c_str());
    return 0;
}

void LLMInfer::Destroy() {
    llm_model_.reset();
    is_build_ = false;
    tokenizer_.reset();
    generate_tokens_.clear();
}

// 停止生成
void LLMInfer::Stop() { is_stop_.store(true); }

// 检查是否结束
bool LLMInfer::IsEnd() const {
    // 检查future的状态来判断是否完成
    if (!is_processing_.load()) {
        return true;
    }

    // 如果正在处理，检查future是否就绪
    return async_future_.valid() && async_future_.wait_for(std::chrono::seconds(0)) == std::future_status::ready;
}

// 获取异步结果
int LLMInfer::GetAsyncResult(std::string &gene_text) {
    if (!is_processing_.load()) {
        if (!async_result_.empty()) {
            gene_text = async_result_;
            return 0;
        }
        return -1;
    }

    // 等待异步任务完成
    int ret = async_future_.get();
    if (ret == 0) {
        gene_text = async_result_;
    }

    is_processing_.store(false);
    return ret;
}