// Copyright (C) 2024 Kumo inc.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//
//

#include <kllm/openai/completions.h>
#include <kllm/openai/props.h>
#include <kllm/openai/metrics.h>

namespace kllm {

    nlohmann::ordered_json probs_vector_to_json(const KMContext *ctx, const std::vector<completion_token_output> &probs) {
        nlohmann::ordered_json out = nlohmann::ordered_json::array();

        for (const auto &prob: probs) {
            nlohmann::ordered_json probs_for_token = nlohmann::ordered_json::array();

            for (const auto &p: prob.probs) {
                const std::string tok_str = ctx->tokens_to_output_formatted_string(p.tok);
                probs_for_token.push_back(nlohmann::ordered_json{
                        {"tok_str", tok_str},
                        {"prob",    p.prob},
                });
            }

            const std::string tok_str = ctx->tokens_to_output_formatted_string(prob.tok);
            out.push_back(nlohmann::ordered_json{
                    {"content", tok_str},
                    {"probs",   probs_for_token},
            });
        }

        return out;
    }

    void format_aoi_completions_response(const CompletionsResult &response, const KMContext *context,nlohmann::ordered_json &obj) {

        obj = nlohmann::ordered_json {
                {"content",    response.content()},
                {"stop",       response.stop()},
                {"id_slot",    response.slot().id()},
                {"multimodal", response.multimodal()},
                {"index",      response.slot().index()},
        };
        if(!response.completion_probabilities().empty()) {
            std::vector<completion_token_output> probs_output;
            for(auto &it : response.completion_probabilities()) {
                completion_token_output tt;
                tt.tok = it.tok();
                tt.text_to_send = it.text_to_send();
                for(auto &sit : it.probs()) {
                    completion_token_output::token_prob tp;
                    tp.tok = sit.tok();
                    tp.prob = sit.prob();
                    tt.probs.push_back(tp);
                }
            }
            obj["completion_probabilities"] = probs_vector_to_json(context, probs_output);
        }
        //
        obj["oaicompat_token_ctr"] = response.slot().n_decoded();
        obj["model"] = response.model();

        if(!response.stop()) {
            return;
        }
        obj["tokens_predicted"] = response.slot().n_decoded();
        obj["tokens_evaluated"] = response.slot().tokens_evaluated();
        obj["prompt"] = response.slot().prompt();
        obj["has_new_line"] = response.slot().has_new_line();
        obj["truncated"] = response.slot().truncated();
        obj["stopped_eos"] = response.slot().stopped_eos();
        obj["stopped_word"] = response.slot().stopped_word();
        obj["stopped_limit"] = response.slot().stopped_limit();
        obj["stopping_word"] = response.slot().stopping_word();
        obj["tokens_cached"] = response.slot().tokens_cached();
        obj["generation_settings"] = get_formated_generation(response.slot());
        obj["timings"] = get_formated_timings(response.slot());

    }
}  // namespace kllm

