// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#pragma once

#include <utility>
#include <vector>

#include <turbo/utility/status.h>
#include <turbo/functional/functional.h>
#include <nebula/future/thread_pool.h>
#include <nebula/util/vector.h>

namespace nebula::internal {

    // A parallelizer that takes a `turbo::Status(int)` function and calls it with
    // arguments between 0 and `num_tasks - 1`, on an arbitrary number of threads.

    template<class FUNCTION>
    turbo::Status ParallelFor(int num_tasks, FUNCTION &&func,
                              Executor *executor = internal::get_cpu_thread_pool()) {
        std::vector<Future<>> futures(num_tasks);

        for (int i = 0; i < num_tasks; ++i) {
            TURBO_MOVE_OR_RAISE(futures[i], executor->submit(func, i));
        }
        auto st = turbo::OkStatus();
        for (auto &fut: futures) {
            st &= fut.status();
        }
        return st;
    }

    template<class FUNCTION, typename T,
            typename R = typename turbo::call_traits::return_type<FUNCTION>::value_type>
    Future<std::vector<R>> ParallelForAsync(
            std::vector<T> inputs, FUNCTION &&func,
            Executor *executor = internal::get_cpu_thread_pool()) {
        std::vector<Future<R>> futures(inputs.size());
        for (size_t i = 0; i < inputs.size(); ++i) {
            TURBO_MOVE_OR_RAISE(futures[i], executor->submit(func, i, std::move(inputs[i])));
        }
        return All(std::move(futures))
                .Then([](const std::vector<turbo::Result<R>> &results) -> turbo::Result<std::vector<R>> {
                    return UnwrapOrRaise(results);
                });
    }

    // A parallelizer that takes a `turbo::Status(int)` function and calls it with
    // arguments between 0 and `num_tasks - 1`, in sequence or in parallel,
    // depending on the input boolean.

    template<class FUNCTION>
    turbo::Status OptionalParallelFor(bool use_threads, int num_tasks, FUNCTION &&func,
                                      Executor *executor = internal::get_cpu_thread_pool()) {
        if (use_threads) {
            return ParallelFor(num_tasks, std::forward<FUNCTION>(func), executor);
        } else {
            for (int i = 0; i < num_tasks; ++i) {
                TURBO_RETURN_NOT_OK(func(i));
            }
            return turbo::OkStatus();
        }
    }

// A parallelizer that takes a `turbo::Result<R>(int index, T item)` function and
// calls it with each item from the input array, in sequence or in parallel,
// depending on the input boolean.

    template<class FUNCTION, typename T,
            typename R = typename turbo::call_traits::return_type<FUNCTION>::value_type>
    Future<std::vector<R>> OptionalParallelForAsync(
            bool use_threads, std::vector<T> inputs, FUNCTION &&func,
            Executor *executor = internal::get_cpu_thread_pool()) {
        if (use_threads) {
            return ParallelForAsync(std::move(inputs), std::forward<FUNCTION>(func), executor);
        } else {
            std::vector<R> result(inputs.size());
            for (size_t i = 0; i < inputs.size(); ++i) {
                TURBO_MOVE_OR_RAISE(result[i], func(i, inputs[i]));
            }
            return result;
        }
    }

}  // namespace nebula::internal
