// Copyright (C) 2024 Kumo inc.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//
//

#include <kllm/kai/infill.h>

namespace kllm {

    void KaiInfill::infill(const KaiRequest &req, KaiResponse &res) {
        res.set_model_alias(_context->params.model_alias);
        PromptsValue value;
        value.set_string_value(req.infill_request().prompts());
        KaiPrompts prompts;
        *prompts.mutable_values()->Add() = std::move(value);
        std::vector<server_task> tasks = _context->create_tasks_inference(req, SERVER_TASK_INF_TYPE_INFILL, prompts);
        _context->queue_results.add_waiting_tasks(tasks);
        _context->queue_tasks.post(tasks);

        const auto task_ids = server_task::get_list_id(tasks);
        _context->receive_cmpl_results(task_ids, [&](std::vector<TaskResult> &results) {
            if (results.size() == 1) {
                // single result
                *res.mutable_completions()->Add() = results[0].completions;
            } else {
                // multiple results (multitask)
                for (const auto &r: results) {
                    *res.mutable_completions()->Add() = r.completions;
                }
            }
        }, [&](const turbo::Status &status) {
            res.mutable_status()->set_code(status.raw_code());
            res.mutable_status()->set_errmsg(std::string(status.message()));
        });

        _context->queue_results.remove_waiting_task_ids(task_ids);

    }

    void KaiInfill::infill_stream(const KaiRequest &req, KaiResponse &res,
                                            const std::function<bool(const CompletionsResult &)> &func,
                                            const std::function<void()> &on_complete) {
        res.set_model_alias(_context->params.model_alias);
        PromptsValue value;
        value.set_string_value(req.infill_request().prompts());
        KaiPrompts prompts;
        *prompts.mutable_values()->Add() = std::move(value);
        std::vector<server_task> tasks = _context->create_tasks_inference(req, SERVER_TASK_INF_TYPE_INFILL, prompts);
        _context->queue_results.add_waiting_tasks(tasks);
        _context->queue_tasks.post(tasks);
        const auto task_ids = server_task::get_list_id(tasks);

        auto context_call_back = [&](const TaskResult &result) -> bool {
            *res.mutable_completions()->Add() = result.completions;
            return func(result.completions);
        };
        auto context_err_back = [&](const turbo::Status &status) {
            res.mutable_status()->set_code(status.raw_code());
            res.mutable_status()->set_errmsg(std::string(status.message()));
            on_complete();
        };

        this->_context->receive_cmpl_results_stream(task_ids, context_call_back, context_err_back);
        _context->queue_results.remove_waiting_task_ids(task_ids);
        on_complete();
    }
}  // namespace kllm
