// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements.  See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.  The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.  You may obtain a copy of the License at
//
//   http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.  See the License for the
// specific language governing permissions and limitations
// under the License.

// A server to receive HttpRequest and send back HttpResponse.

#include <llama.h>

#include <cstdio>
#include <cstring>
#include <vector>
#include <string>
#include <unordered_map>
#include <fstream>
#include <cmath>
#include <kllm/core/kv_override.h>
#include <kllm/tools/quantize/quantize.h>
#include <kllm/tools/service_context.h>

struct quant_option {
    std::string name;
    llama_ftype ftype;
    std::string desc;
};

static const std::vector<struct quant_option> QUANT_OPTIONS = {
        {"Q4_0",     LLAMA_FTYPE_MOSTLY_Q4_0,     " 4.34G, +0.4685 ppl @ Llama-3-8B",},
        {"Q4_1",     LLAMA_FTYPE_MOSTLY_Q4_1,     " 4.78G, +0.4511 ppl @ Llama-3-8B",},
        {"Q5_0",     LLAMA_FTYPE_MOSTLY_Q5_0,     " 5.21G, +0.1316 ppl @ Llama-3-8B",},
        {"Q5_1",     LLAMA_FTYPE_MOSTLY_Q5_1,     " 5.65G, +0.1062 ppl @ Llama-3-8B",},
        {"IQ2_XXS",  LLAMA_FTYPE_MOSTLY_IQ2_XXS,  " 2.06 bpw quantization",},
        {"IQ2_XS",   LLAMA_FTYPE_MOSTLY_IQ2_XS,   " 2.31 bpw quantization",},
        {"IQ2_S",    LLAMA_FTYPE_MOSTLY_IQ2_S,    " 2.5  bpw quantization",},
        {"IQ2_M",    LLAMA_FTYPE_MOSTLY_IQ2_M,    " 2.7  bpw quantization",},
        {"IQ1_S",    LLAMA_FTYPE_MOSTLY_IQ1_S,    " 1.56 bpw quantization",},
        {"IQ1_M",    LLAMA_FTYPE_MOSTLY_IQ1_M,    " 1.75 bpw quantization",},
        {"TQ1_0",    LLAMA_FTYPE_MOSTLY_TQ1_0,    " 1.69 bpw ternarization",},
        {"TQ2_0",    LLAMA_FTYPE_MOSTLY_TQ2_0,    " 2.06 bpw ternarization",},
        {"Q2_K",     LLAMA_FTYPE_MOSTLY_Q2_K,     " 2.96G, +3.5199 ppl @ Llama-3-8B",},
        {"Q2_K_S",   LLAMA_FTYPE_MOSTLY_Q2_K_S,   " 2.96G, +3.1836 ppl @ Llama-3-8B",},
        {"IQ3_XXS",  LLAMA_FTYPE_MOSTLY_IQ3_XXS,  " 3.06 bpw quantization",},
        {"IQ3_S",    LLAMA_FTYPE_MOSTLY_IQ3_S,    " 3.44 bpw quantization",},
        {"IQ3_M",    LLAMA_FTYPE_MOSTLY_IQ3_M,    " 3.66 bpw quantization mix",},
        {"Q3_K",     LLAMA_FTYPE_MOSTLY_Q3_K_M,   "alias for Q3_K_M"},
        {"IQ3_XS",   LLAMA_FTYPE_MOSTLY_IQ3_XS,   " 3.3 bpw quantization",},
        {"Q3_K_S",   LLAMA_FTYPE_MOSTLY_Q3_K_S,   " 3.41G, +1.6321 ppl @ Llama-3-8B",},
        {"Q3_K_M",   LLAMA_FTYPE_MOSTLY_Q3_K_M,   " 3.74G, +0.6569 ppl @ Llama-3-8B",},
        {"Q3_K_L",   LLAMA_FTYPE_MOSTLY_Q3_K_L,   " 4.03G, +0.5562 ppl @ Llama-3-8B",},
        {"IQ4_NL",   LLAMA_FTYPE_MOSTLY_IQ4_NL,   " 4.50 bpw non-linear quantization",},
        {"IQ4_XS",   LLAMA_FTYPE_MOSTLY_IQ4_XS,   " 4.25 bpw non-linear quantization",},
        {"Q4_K",     LLAMA_FTYPE_MOSTLY_Q4_K_M,   "alias for Q4_K_M",},
        {"Q4_K_S",   LLAMA_FTYPE_MOSTLY_Q4_K_S,   " 4.37G, +0.2689 ppl @ Llama-3-8B",},
        {"Q4_K_M",   LLAMA_FTYPE_MOSTLY_Q4_K_M,   " 4.58G, +0.1754 ppl @ Llama-3-8B",},
        {"Q5_K",     LLAMA_FTYPE_MOSTLY_Q5_K_M,   "alias for Q5_K_M",},
        {"Q5_K_S",   LLAMA_FTYPE_MOSTLY_Q5_K_S,   " 5.21G, +0.1049 ppl @ Llama-3-8B",},
        {"Q5_K_M",   LLAMA_FTYPE_MOSTLY_Q5_K_M,   " 5.33G, +0.0569 ppl @ Llama-3-8B",},
        {"Q6_K",     LLAMA_FTYPE_MOSTLY_Q6_K,     " 6.14G, +0.0217 ppl @ Llama-3-8B",},
        {"Q8_0",     LLAMA_FTYPE_MOSTLY_Q8_0,     " 7.96G, +0.0026 ppl @ Llama-3-8B",},
        {"Q4_0_4_4", LLAMA_FTYPE_MOSTLY_Q4_0_4_4, " 4.34G, +0.4685 ppl @ Llama-3-8B",},
        {"Q4_0_4_8", LLAMA_FTYPE_MOSTLY_Q4_0_4_8, " 4.34G, +0.4685 ppl @ Llama-3-8B",},
        {"Q4_0_8_8", LLAMA_FTYPE_MOSTLY_Q4_0_8_8, " 4.34G, +0.4685 ppl @ Llama-3-8B",},
        {"F16",      LLAMA_FTYPE_MOSTLY_F16,      "14.00G, +0.0020 ppl @ Mistral-7B",},
        {"BF16",     LLAMA_FTYPE_MOSTLY_BF16,     "14.00G, -0.0050 ppl @ Mistral-7B",},
        {"F32",      LLAMA_FTYPE_ALL_F32,         "26.00G              @ 7B",},
        // Note: Ensure COPY comes after F32 to avoid ftype 0 from matching.
        {"COPY",     LLAMA_FTYPE_ALL_F32,         "only copy tensors, no quantizing",},
};

static const char *const LLM_KV_QUANTIZE_IMATRIX_FILE = "quantize.imatrix.file";
static const char *const LLM_KV_QUANTIZE_IMATRIX_DATASET = "quantize.imatrix.dataset";
static const char *const LLM_KV_QUANTIZE_IMATRIX_N_ENTRIES = "quantize.imatrix.entries_count";
static const char *const LLM_KV_QUANTIZE_IMATRIX_N_CHUNKS = "quantize.imatrix.chunks_count";

static bool striequals(const char *a, const char *b) {
    while (*a && *b) {
        if (std::tolower(*a) != std::tolower(*b)) {
            return false;
        }
        a++;
        b++;
    }
    return *a == *b;
}

static bool try_parse_ftype(const std::string &ftype_str_in, llama_ftype &ftype, std::string &ftype_str_out) {
    std::string ftype_str;

    for (auto ch: ftype_str_in) {
        ftype_str.push_back(std::toupper(ch));
    }
    for (auto &it: QUANT_OPTIONS) {
        if (striequals(it.name.c_str(), ftype_str.c_str())) {
            ftype = it.ftype;
            ftype_str_out = it.name;
            return true;
        }
    }
    try {
        int ftype_int = std::stoi(ftype_str);
        for (auto &it: QUANT_OPTIONS) {
            if (it.ftype == ftype_int) {
                ftype = it.ftype;
                ftype_str_out = it.name;
                return true;
            }
        }
    }
    catch (...) {
        // stoi failed
    }
    return false;
}

static int load_imatrix(const std::string &imatrix_file, std::string &imatrix_dataset,
                        std::unordered_map<std::string, std::vector<float>> &imatrix_data) {
    std::ifstream in(imatrix_file.c_str(), std::ios::binary);
    if (!in) {
        printf("%s: failed to open %s\n", __func__, imatrix_file.c_str());
        exit(1);
    }
    int n_entries;
    in.read((char *) &n_entries, sizeof(n_entries));
    if (in.fail() || n_entries < 1) {
        printf("%s: no data in file %s\n", __func__, imatrix_file.c_str());
        exit(1);
    }
    for (int i = 0; i < n_entries; ++i) {
        int len;
        in.read((char *) &len, sizeof(len));
        std::vector<char> name_as_vec(len + 1);
        in.read((char *) name_as_vec.data(), len);
        if (in.fail()) {
            printf("%s: failed reading name for entry %d from %s\n", __func__, i + 1, imatrix_file.c_str());
            exit(1);
        }
        name_as_vec[len] = 0;
        std::string name{name_as_vec.data()};
        auto &e = imatrix_data[name];
        int ncall;
        in.read((char *) &ncall, sizeof(ncall));
        int nval;
        in.read((char *) &nval, sizeof(nval));
        if (in.fail() || nval < 1) {
            printf("%s: failed reading number of values for entry %d\n", __func__, i);
            imatrix_data = {};
            exit(1);
        }
        e.resize(nval);
        in.read((char *) e.data(), nval * sizeof(float));
        if (in.fail()) {
            printf("%s: failed reading data for entry %d\n", __func__, i);
            imatrix_data = {};
            exit(1);
        }
        if (ncall > 0) {
            for (auto &v: e) v /= ncall;
        }

        if (getenv("LLAMA_TRACE")) {
            printf("%s: loaded data (size = %6d, ncall = %6d) for '%s'\n", __func__, int(e.size()), ncall,
                   name.c_str());
        }
    }

    // latest imatrix version contains the dataset filename at the end of the file
    int m_last_call = 0;
    if (in.peek() != EOF) {
        in.read((char *) &m_last_call, sizeof(m_last_call));
        int dataset_len;
        in.read((char *) &dataset_len, sizeof(dataset_len));
        std::vector<char> dataset_as_vec(dataset_len);
        in.read(dataset_as_vec.data(), dataset_len);
        imatrix_dataset.assign(dataset_as_vec.begin(), dataset_as_vec.end());
        printf("%s: imatrix dataset='%s'\n", __func__, imatrix_dataset.c_str());
    }
    printf("%s: loaded %d importance matrix entries from %s computed on %d chunks\n", __func__,
           int(imatrix_data.size()), imatrix_file.c_str(), m_last_call);
    return m_last_call;
}

static int prepare_imatrix(const std::string &imatrix_file,
                           std::string &imatrix_dataset,
                           const std::vector<std::string> &included_weights,
                           const std::vector<std::string> &excluded_weights,
                           std::unordered_map<std::string, std::vector<float>> &imatrix_data) {
    int m_last_call = -1;
    if (!imatrix_file.empty()) {
        m_last_call = load_imatrix(imatrix_file, imatrix_dataset, imatrix_data);
    }
    if (imatrix_data.empty()) {
        return m_last_call;
    }
    if (!excluded_weights.empty()) {
        for (auto &name: excluded_weights) {
            for (auto it = imatrix_data.begin(); it != imatrix_data.end();) {
                auto pos = it->first.find(name);
                if (pos != std::string::npos) it = imatrix_data.erase(it);
                else ++it;
            }
        }
    }
    if (!included_weights.empty()) {
        std::unordered_map<std::string, std::vector<float>> tmp;
        for (auto &name: included_weights) {
            for (auto &e: imatrix_data) {
                auto pos = e.first.find(name);
                if (pos != std::string::npos) {
                    tmp.emplace(std::move(e));
                }
            }
        }
        imatrix_data = std::move(tmp);
    }
    if (!imatrix_data.empty()) {
        printf("%s: have %d importance matrix entries\n", __func__, int(imatrix_data.size()));
    }
    return m_last_call;
}

static ggml_type parse_ggml_type(const char *arg) {
    for (int i = 0; i < GGML_TYPE_COUNT; ++i) {
        auto type = (ggml_type) i;
        const auto *name = ggml_type_name(type);
        if (name && striequals(name, arg)) {
            return type;
        }
    }
    fprintf(stderr, "%s: invalid ggml_type '%s'\n", __func__, arg);
    return GGML_TYPE_COUNT;
}

namespace kllm {
    llama_model_quantize_params params = llama_model_quantize_default_params();
    std::vector<llama_model_kv_override> kv_overrides;
    std::vector<std::string> included_weights, excluded_weights;
    std::string imatrix_file;
    std::string input_file;
    std::string output_file;
    std::string f_type;

    static int run_quantize();

    turbo::Status setup_quantize_cmd(turbo::cli::App *app) {
        auto quantize_cmd = app->add_subcommand("quantize", "quantize");
        std::stringstream ss;
        ss<<"\nAllowed quantization types:\n";
        for (auto &it: QUANT_OPTIONS) {
            if (it.name != "COPY") {
                ss<<"  "<<it.ftype<<"  or  ";
            } else {
                ss<<"          ";
            }
            ss<<it.name.c_str()<<" : "<<it.desc.c_str()<<"\n";
        }

        quantize_cmd->add_option("input_file", input_file, "input")->required();

        quantize_cmd->add_option("output_file", output_file, "input file or the the type of output see @f_type")->required();
        quantize_cmd->add_option("f_type",  f_type, "will generate quantized model in the same shards as input\n" + ss.str());

        quantize_cmd->add_flag("--leave-output-tensor", [](int64_t) {
                                   params.quantize_output_tensor = false;
                               },
                               "Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing");

        quantize_cmd->add_option("--output-tensor-type", params.output_tensor_type,
                                 "ggml_type: use this ggml_type for the output.weight tensor");
        quantize_cmd->add_option_function<std::string>("--token-embedding-type",
                                                       [quantize_cmd](const ::std::string &value) {
                                                           params.token_embedding_type = parse_ggml_type(value.c_str());
                                                           if (params.token_embedding_type == GGML_TYPE_COUNT) {
                                                               std::cout << quantize_cmd->help() << std::endl;
                                                               exit(1);
                                                           }
                                                       }, "use this ggml_type for the token embeddings tensor");
        quantize_cmd->add_option_function<std::string>("--override-kv", [quantize_cmd](const std::string &value) {
            if (!kllm::string_parse_kv_override(value.c_str(), kv_overrides)) {
                std::cout << quantize_cmd->help() << std::endl;
                exit(1);
            }
        }, "KEY=TYPE:VALUE");
        quantize_cmd->add_flag("--allow-requantize", [](int64_t) {
                                   params.allow_requantize = true;
                               },
                               "Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit");

        quantize_cmd->add_flag("--pure", [](int64_t) {
            params.pure = true;
        }, "Disable k-quant mixtures and quantize all tensors to the same type");
        quantize_cmd->add_option("--imatrix", imatrix_file,
                                 "file_name: use data in file_name as importance matrix for quant optimizations");

        auto wo = quantize_cmd->add_option_group("weights");
        wo->add_option("--include-weights", included_weights,
                       "tensor_name: use importance matrix for this/these tensor(s)");

        wo->add_option("--exclude-weights", excluded_weights,
                       "tensor_name: use importance matrix for this/these tensor(s)");
        wo->require_option(0, 1);

        quantize_cmd->add_flag("--keep-split", [](int64_t) {
            params.keep_split = true;
        }, "will generate quantized model in the same shards as input");

        quantize_cmd->add_option("--n-thread",  params.nthread, "will generate quantized model in the same shards as input");

        quantize_cmd->callback(run_quantize);
        return turbo::OkStatus();
    }


    int run_quantize() {
        std::string imatrix_dataset;
        std::unordered_map<std::string, std::vector<float>> imatrix_data;
        int m_last_call = prepare_imatrix(imatrix_file, imatrix_dataset, included_weights, excluded_weights,
                                          imatrix_data);
        if (!imatrix_data.empty()) {
            params.imatrix = &imatrix_data;
            {
                llama_model_kv_override kvo;
                std::strcpy(kvo.key, LLM_KV_QUANTIZE_IMATRIX_FILE);
                kvo.tag = LLAMA_KV_OVERRIDE_TYPE_STR;
                strncpy(kvo.val_str, imatrix_file.c_str(), 127);
                kvo.val_str[127] = '\0';
                kv_overrides.emplace_back(std::move(kvo));
            }
            if (!imatrix_dataset.empty()) {
                llama_model_kv_override kvo;
                std::strcpy(kvo.key, LLM_KV_QUANTIZE_IMATRIX_DATASET);
                kvo.tag = LLAMA_KV_OVERRIDE_TYPE_STR;
                strncpy(kvo.val_str, imatrix_dataset.c_str(), 127);
                kvo.val_str[127] = '\0';
                kv_overrides.emplace_back(std::move(kvo));
            }

            {
                llama_model_kv_override kvo;
                std::strcpy(kvo.key, LLM_KV_QUANTIZE_IMATRIX_N_ENTRIES);
                kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
                kvo.val_i64 = imatrix_data.size();
                kv_overrides.emplace_back(std::move(kvo));
            }

            if (m_last_call > 0) {
                llama_model_kv_override kvo;
                std::strcpy(kvo.key, LLM_KV_QUANTIZE_IMATRIX_N_CHUNKS);
                kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
                kvo.val_i64 = m_last_call;
                kv_overrides.emplace_back(std::move(kvo));
            }
        }
        if (!kv_overrides.empty()) {
            kv_overrides.emplace_back();
            kv_overrides.back().key[0] = 0;
            params.kv_overrides = &kv_overrides;
        }

        llama_backend_init();

        std::string fname_out;

        std::string ftype_str;
        std::string suffix = ".gguf";
        if (try_parse_ftype(output_file, params.ftype, ftype_str)) {
            std::string fpath;
            const size_t pos = input_file.find_last_of("/\\");
            if (pos != std::string::npos) {
                fpath = input_file.substr(0, pos + 1);
            }

            // export as [inp path]/ggml-model-[ftype]. Only add extension if there is no splitting
            fname_out = fpath + "ggml-model-" + ftype_str;
            if (!params.keep_split) {
                fname_out += suffix;
            }
            if (ftype_str == "COPY") {
                params.only_copy = true;
            }
            LOG(INFO)<<NOPREFIX<<"using type from output file: "<<ftype_str<<" ignore the signed type: "<<f_type;
        } else {
            fname_out = output_file;
            if (params.keep_split && fname_out.find(suffix) != std::string::npos) {
                fname_out = fname_out.substr(0, fname_out.length() - suffix.length());
            }
            if (!try_parse_ftype(f_type, params.ftype, ftype_str)) {
                LOG(ERROR)<<__func__<<": invalid ftype '"<<f_type<<"'";
                return 1;
            }

            if (ftype_str == "COPY") {
                params.only_copy = true;
            }
        }
        LOG(INFO)<<NOPREFIX<<"inout file: "<<input_file;
        LOG(INFO)<<NOPREFIX<<"output file: "<<fname_out;
        LOG(INFO)<<NOPREFIX<<"type: "<<ftype_str;

        if ((params.ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || params.ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS ||
             params.ftype == LLAMA_FTYPE_MOSTLY_IQ2_S ||
             params.ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S ||
             params.ftype == LLAMA_FTYPE_MOSTLY_IQ1_S ||
             params.ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) && imatrix_data.empty()) {
            fprintf(stderr,
                    "\n==========================================================================================================\n");
            fprintf(stderr,
                    "Please do not use IQ1_S, IQ1_M, IQ2_S, IQ2_XXS, IQ2_XS or Q2_K_S quantization without an importance matrix\n");
            fprintf(stderr,
                    "==========================================================================================================\n\n\n");
            return 1;
        }

        fprintf(stderr, "%s: quantizing '%s' to '%s' as %s", __func__, input_file.c_str(), fname_out.c_str(),
                ftype_str.c_str());
        if (params.nthread > 0) {
            fprintf(stderr, " using %d threads", params.nthread);
        }
        fprintf(stderr, "\n");

        const int64_t t_main_start_us = llama_time_us();

        int64_t t_quantize_us = 0;

        // load the model
        {
            const int64_t t_start_us = llama_time_us();

            if (llama_model_quantize(input_file.c_str(), fname_out.c_str(), &params)) {
                fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, input_file.c_str());
                return 1;
            }

            t_quantize_us = llama_time_us() - t_start_us;
        }

        // report timing
        {
            const int64_t t_main_end_us = llama_time_us();

            printf("\n");
            printf("%s: quantize time = %8.2f ms\n", __func__, t_quantize_us / 1000.0);
            printf("%s:    total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us) / 1000.0);
        }

        llama_backend_free();

        return 0;
    }
}  // namespace kllm