// Copyright (C) 2024 Kumo inc.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//
//

#include <kllm/openai/metrics.h>
#include <turbo/log/logging.h>
#include <kllm/core/sampling.h>
#include <sstream>

namespace kllm {

    nlohmann::ordered_json format_aoi_metrics_response(const KaiResponse &response) {
        nlohmann::ordered_json all_metrics_def = nlohmann::ordered_json {
                {"counter", {{
                                     {"name",  "prompt_tokens_total"},
                                     {"help",  "Number of prompt tokens processed."},
                                     {"value",  response.metrics().n_prompt_tokens_processed_total()}
                             }, {
                                        {"name",  "prompt_seconds_total"},
                                        {"help",  "Prompt process time"},
                                        {"value",  (uint64_t) response.metrics().t_prompt_processing_total() / 1.e3}
                                }, {
                                           {"name",  "tokens_predicted_total"},
                                           {"help",  "Number of generation tokens processed."},
                                           {"value",  (uint64_t) response.metrics().n_tokens_predicted_total()}
                                   }, {
                                              {"name",  "tokens_predicted_seconds_total"},
                                              {"help",  "Predict process time"},
                                              {"value",  (uint64_t) response.metrics().t_tokens_generation_total() / 1.e3}
                                      }, {
                                                 {"name",  "n_decode_total"},
                                                 {"help",  "Total number of llama_decode() calls"},
                                                 {"value",  response.metrics().n_decode_total()}
                                         }, {
                                                    {"name",  "n_busy_slots_per_decode"},
                                                    {"help",  "Average number of busy slots per llama_decode() call"},
                                                    {"value",  (float) response.metrics().n_busy_slots_total() / (float) response.metrics().n_decode_total()}
                                            }}},
                {"gauge", {{
                                     {"name",  "prompt_tokens_seconds"},
                                     {"help",  "Average prompt throughput in tokens/s."},
                                     {"value",  response.metrics().n_prompt_tokens_processed() ? 1.e3 / response.metrics().t_prompt_processing() * response.metrics().n_prompt_tokens_processed() : 0.}
                             },{
                                        {"name",  "predicted_tokens_seconds"},
                                        {"help",  "Average generation throughput in tokens/s."},
                                        {"value",  response.metrics().n_tokens_predicted() ? 1.e3 / response.metrics().t_tokens_generation() * response.metrics().n_tokens_predicted() : 0.}
                                },{
                                           {"name",  "kv_cache_usage_ratio"},
                                           {"help",  "KV-cache usage. 1 means 100 percent usage."},
                                           {"value",  1. *response.metrics(). kv_cache_used_cells() / response.metrics().n_ctx()}
                                   },{
                                              {"name",  "kv_cache_tokens"},
                                              {"help",  "KV-cache tokens."},
                                              {"value",  (uint64_t) response.metrics().kv_cache_tokens_count()}
                                      },{
                                                 {"name",  "requests_processing"},
                                                 {"help",  "Number of request processing."},
                                                 {"value",  1} //(uint64_t) response.metrics().processing()}
                                         },{
                                                    {"name",  "requests_deferred"},
                                                    {"help",  "Number of request deferred."},
                                                    {"value",  (uint64_t) response.metrics().deferred()}
                                            }}}
        };
        return all_metrics_def;
    }

    void format_aoi_metrics_response(const KaiResponse &response, std::string &content) {
        nlohmann::ordered_json obj = format_aoi_metrics_response(response);
        content = obj.dump();
    }

    std::string format_aoi_prometheus_metrics_response(const KaiResponse &response) {
        nlohmann::ordered_json obj = format_aoi_metrics_response(response);
        std::stringstream prometheus;

        for (const auto & el : obj.items()) {
            const auto & type        = el.key();
            const auto & metrics_def = el.value();

            for (const auto & metric_def : metrics_def) {
                const std::string name = metric_def.at("name");
                const std::string help = metric_def.at("help");

                auto value = json_value(metric_def, "value", 0.);
                prometheus << "# HELP llamacpp:" << name << " " << help  << "\n"
                           << "# TYPE llamacpp:" << name << " " << type  << "\n"
                           << "llamacpp:"        << name << " " << value << "\n";
            }
        }
        return prometheus.str();
    }

    nlohmann::ordered_json get_formated_generation(const KaiSlotState &slot) {
        std::vector<std::string> samplers;
        samplers.reserve(slot.sample_param().samplers().size());
        for (const auto &sampler: slot.sample_param().samplers()) {
            samplers.emplace_back(common_sampler_type_to_str(static_cast<KaiSamplerType>(sampler)));
        }

        return nlohmann::ordered_json{
                {"id", slot.id()},
                {"n_ctx",                 slot.n_ctx()},
                {"n_predict",             slot.n_predict()},     // Server configured n_predict
                {"model",                 slot.model_alias()},
                {"seed",                  slot.sample_param().seed()},
                {"seed_cur",              slot.seed_cur()},
                {"temperature",           slot.sample_param().temp()},
                {"dynatemp_range",        slot.sample_param().dynatemp_range()},
                {"dynatemp_exponent",     slot.sample_param().dynatemp_exponent()},
                {"top_k",                 slot.sample_param().top_k()},
                {"top_p",                 slot.sample_param().top_p()},
                {"min_p",                 slot.sample_param().min_p()},
                {"xtc_probability",       slot.sample_param().xtc_probability()},
                {"xtc_threshold",         slot.sample_param().xtc_threshold()},
                {"typical_p",             slot.sample_param().typ_p()},
                {"repeat_last_n",         slot.sample_param().penalty_last_n()},
                {"repeat_penalty",        slot.sample_param().penalty_repeat()},
                {"presence_penalty",      slot.sample_param().penalty_present()},
                {"frequency_penalty",     slot.sample_param().penalty_freq()},
                {"dry_multiplier",        slot.sample_param().dry_multiplier()},
                {"dry_base",              slot.sample_param().dry_base()},
                {"dry_allowed_length",    slot.sample_param().dry_allowed_length()},
                {"dry_penalty_last_n",    slot.sample_param().dry_penalty_last_n()},
                {"dry_sequence_breakers", slot.sample_param().dry_sequence_breakers()},
                {"mirostat",              slot.sample_param().mirostat()},
                {"mirostat_tau",          slot.sample_param().mirostat_tau()},
                {"mirostat_eta",          slot.sample_param().mirostat_eta()},
                {"penalize_nl",           slot.sample_param().penalize_nl()},
                {"stop",                  slot.antiprompt()},
                {"max_tokens",            slot.max_tokens()}, // User configured n_predict
                {"n_keep",                slot.n_keep()},
                {"n_discard",             slot.n_discard()},
                {"ignore_eos",            slot.sample_param().ignore_eos()},
                {"stream",                slot.stream()},
                //{"logit_bias",                slot.sparams.logit_bias},
                {"n_probs",               slot.sample_param().n_probs()},
                {"min_keep",              slot.sample_param().min_keep()},
                {"grammar",               slot.sample_param().grammar()},
                {"samplers",              samplers},
        };
    }

    void format_aoi_slot_response(const KaiResponse &response, nlohmann::ordered_json &obj) {
        nlohmann::ordered_json data = nlohmann::ordered_json::array();
        for(auto &it : response.slot_state()) {
            nlohmann::ordered_json entity = get_formated_generation(it);
            data.push_back(entity);
        }
        obj["slots"] = data;
    }

    void format_aoi_slot_response(const KaiResponse &response, std::string &content) {
        nlohmann::ordered_json obj = format_aoi_metrics_response(response);
        content = obj.dump();
    }

    nlohmann::ordered_json get_formated_timings(const KaiSlotState &slot) {
        return nlohmann::ordered_json {
                {"prompt_n",               slot.n_prompt_tokens_processed()},
                {"prompt_ms",              slot.t_prompt_processing()},
                {"prompt_per_token_ms",    slot.t_prompt_processing() / slot.n_prompt_tokens_processed()},
                {"prompt_per_second",      1e3 / slot.t_prompt_processing() *slot. n_prompt_tokens_processed()},

                {"predicted_n",            slot.n_decoded()},
                {"predicted_ms",           slot.t_token_generation()},
                {"predicted_per_token_ms", slot.t_token_generation() / slot.n_decoded()},
                {"predicted_per_second",   1e3 / slot.t_token_generation() * slot.n_decoded()},
        };
    }
}  // namespace kllm
