// Copyright (C) 2024 Kumo inc.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//
//
#include <kllm/openai/completions.h>
#include <kllm/openai/chat_completions.h>
#include <kllm/openai/props.h>
#include <kllm/openai/metrics.h>

namespace kllm {

    void format_aoi_chat_completions_final_response(const CompletionsResult &response,
                                    const KMContext *context, nlohmann::ordered_json &result,
                                    bool streaming, bool verbose) {
        auto &completion = response;
        bool stopped_word = completion.slot().stopped_word() != 0;
        bool stopped_eos = completion.slot().stopped_eos();
        int num_tokens_predicted = completion.slot().n_decoded();
        int num_prompt_tokens = completion.slot().tokens_evaluated();
        std::string content = completion.content();

        std::string finish_reason = "length";
        if (stopped_word || stopped_eos) {
            finish_reason = "stop";
        }

        nlohmann::ordered_json choices =
                streaming ? nlohmann::ordered_json::array({nlohmann::ordered_json{{"finish_reason", finish_reason},
                                              {"index",         0},
                                              {"delta",         nlohmann::ordered_json::object()}}})
                          : nlohmann::ordered_json::array({nlohmann::ordered_json{{"finish_reason", finish_reason},
                                              {"index",         0},
                                              {"message",       nlohmann::ordered_json{{"content", content},
                                                                     {"role",    "assistant"}}}}});

        std::time_t t = std::time(0);

        auto model = completion.model();
        if(model.empty()) {
            model = std::string(DEFAULT_OAICOMPAT_MODEL);
        }
        nlohmann::ordered_json res = nlohmann::ordered_json{
                {"choices", choices},
                {"created", t},
                {"model", model},
                {"object",  streaming ? "chat.completion.chunk" : "chat.completion"},
                {"usage",   nlohmann::ordered_json{
                        {"completion_tokens", num_tokens_predicted},
                        {"prompt_tokens",     num_prompt_tokens},
                        {"total_tokens",      num_tokens_predicted + num_prompt_tokens}
                }},
                {"id",      completion.completion_id()}
        };

        // extra fields for debugging purposes
        /*
        if (verbose) {
            res["__verbose"] = response.Ser;
        }*/

        if(!completion.completion_probabilities().empty()) {
            std::vector<completion_token_output> probs_output;
            for(auto &it : completion.completion_probabilities()) {
                completion_token_output tt;
                tt.tok = it.tok();
                tt.text_to_send = it.text_to_send();
                for(auto &sit : it.probs()) {
                    completion_token_output::token_prob tp;
                    tp.tok = sit.tok();
                    tp.prob = sit.prob();
                    tt.probs.push_back(tp);
                }
            }
            res["completion_probabilities"] = probs_vector_to_json(context, probs_output);
        }
        result = std::move(res);
    }

    nlohmann::ordered_json format_partial_response_oaicompat(const CompletionsResult &response,
                                                               const KMContext *context) {

        bool first = response.slot().n_decoded() == 0;
        std::string modelname = response.model();

        bool stopped_word = response.slot().stopped_word();
        bool stopped_eos = response.slot().stopped_eos();
        bool stopped_limit = response.slot().stopped_limit();
        std::string content = response.content();

        std::string finish_reason;
        if (stopped_word || stopped_eos) {
            finish_reason = "stop";
        }
        if (stopped_limit) {
            finish_reason = "length";
        }

        std::time_t t = std::time(0);

        nlohmann::ordered_json choices;

        if (!finish_reason.empty()) {
            choices = nlohmann::ordered_json::array({nlohmann::ordered_json{{"finish_reason", finish_reason},
                                        {"index",         0},
                                        {"delta",         nlohmann::ordered_json::object()}}});
        } else {
            if (first) {
                if (content.empty()) {
                    choices = nlohmann::ordered_json::array({nlohmann::ordered_json{{"finish_reason", nullptr},
                                                {"index",         0},
                                                {"delta",         nlohmann::ordered_json{{"role", "assistant"}}}}});
                } else {
                    // We have to send this as two updates to conform to openai behavior
                    nlohmann::ordered_json initial_ret = nlohmann::ordered_json{{"choices", nlohmann::ordered_json::array({nlohmann::ordered_json{
                            {"finish_reason", nullptr},
                            {"index",         0},
                            {"delta",         nlohmann::ordered_json{
                                    {"role", "assistant"}
                            }}}})},
                                            {"created", t},
                                            {"id",      response.completion_id()},
                                            {"model",   modelname},
                                            {"object",  "chat.completion.chunk"}};

                    nlohmann::ordered_json second_ret = nlohmann::ordered_json{
                            {"choices", nlohmann::ordered_json::array({nlohmann::ordered_json{{"finish_reason", nullptr},
                                                          {"index",         0},
                                                          {"delta",         nlohmann::ordered_json{
                                                                  {"content", content}}}
                            }})},
                            {"created", t},
                            {"id",      response.completion_id()},
                            {"model",   modelname},
                            {"object",  "chat.completion.chunk"}};

                    return std::vector<nlohmann::ordered_json>({initial_ret, second_ret});
                }
            } else {
                // Some idiosyncrasy in task processing logic makes several trailing calls
                // with empty content, we ignore these at the calee site.
                if (content.empty()) {
                    return nlohmann::ordered_json::object();
                }

                choices = nlohmann::ordered_json::array({nlohmann::ordered_json{
                        {"finish_reason", nullptr},
                        {"index",         0},
                        {"delta",
                                nlohmann::ordered_json{
                                                  {"content", content},
                                          }},
                }});
            }
        }

        nlohmann::ordered_json ret = nlohmann::ordered_json{
                {"choices", choices},
                {"created", t},
                {"id",      response.completion_id()},
                {"model",   modelname},
                {"object",  "chat.completion.chunk"}
        };
        if (!finish_reason.empty()) {
            int num_tokens_predicted = response.slot().n_decoded();
            int num_prompt_tokens = response.slot().tokens_evaluated();
            ret.push_back({"usage", nlohmann::ordered_json{
                    {"completion_tokens", num_tokens_predicted},
                    {"prompt_tokens",     num_prompt_tokens},
                    {"total_tokens",      num_tokens_predicted + num_prompt_tokens}
            }});
        }

        return ret;
    }
}  // namespace kllm

