// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#pragma once

#include <cstdint>
#include <limits>
#include <memory>
#include <string>
#include <vector>

#include <nebula/core/array.h>
#include <nebula/core/buffer.h>
#include <nebula/compute/exec.h>
#include <nebula/compute/kernel.h>
#include <turbo/utility/status.h>
#include <turbo/base/macros.h>

namespace nebula::compute {

    class Function;

    static constexpr int64_t kDefaultMaxChunksize = std::numeric_limits<int64_t>::max();

    namespace detail {

        /// \brief Break std::vector<Datum> into a sequence of non-owning
        /// ExecSpan for kernel execution. The lifetime of the Datum vector
        /// must be longer than the lifetime of this object
        class TURBO_EXPORT ExecSpanIterator {
        public:
            ExecSpanIterator() = default;

            /// \brief Initialize iterator and do basic argument validation
            ///
            /// \param[in] batch the input ExecBatch
            /// \param[in] max_chunksize the maximum length of each ExecSpan. Depending
            /// on the chunk layout of ChunkedArray.
            /// \param[in] promote_if_all_scalars if all of the values are scalars,
            /// return them in each ExecSpan as ArraySpan of length 1. This must be set
            /// to true for Scalar and Vector executors but false for Aggregators
            turbo::Status init(const ExecBatch &batch, int64_t max_chunksize = kDefaultMaxChunksize,
                               bool promote_if_all_scalars = true);

            /// \brief Compute the next span by updating the state of the
            /// previous span object. You must keep passing in the previous
            /// value for the results to be consistent. If you need to process
            /// in parallel, make a copy of the in-use ExecSpan while it's being
            /// used by another thread and pass it into Next. This function
            /// always populates at least one span. If you call this function
            /// with a blank ExecSpan after the first iteration, it will not
            /// work correctly (maybe we will change this later). Return false
            /// if the iteration is exhausted
            bool next(ExecSpan *span);

            int64_t length() const { return length_; }

            int64_t position() const { return position_; }

            bool have_all_scalars() const { return have_all_scalars_; }

        private:
            ExecSpanIterator(const std::vector<Datum> &args, int64_t length, int64_t max_chunksize);

            int64_t GetNextChunkSpan(int64_t iteration_size, ExecSpan *span);

            bool initialized_ = false;
            bool have_chunked_arrays_ = false;
            bool have_all_scalars_ = false;
            bool promote_if_all_scalars_ = true;
            const std::vector<Datum> *args_;
            std::vector<int> chunk_indexes_;
            std::vector<int64_t> value_positions_;

            // Keep track of the array offset in the "active" array (e.g. the
            // array or the particular chunk of an array) in each slot, separate
            // from the relative position within each chunk (which is in
            // value_positions_)
            std::vector<int64_t> value_offsets_;
            int64_t position_ = 0;
            int64_t length_ = 0;
            int64_t max_chunksize_;
        };

        // "Push" / listener API like IPC reader so that consumers can receive
        // processed chunks as soon as they're available.

        class TURBO_EXPORT ExecListener {
        public:
            virtual ~ExecListener() = default;

            virtual turbo::Status OnResult(Datum) { return turbo::unimplemented_error("OnResult"); }
        };

        class DatumAccumulator : public ExecListener {
        public:
            DatumAccumulator() = default;

            turbo::Status OnResult(Datum value) override {
                values_.emplace_back(value);
                return turbo::OkStatus();
            }

            std::vector<Datum> values() { return std::move(values_); }

        private:
            std::vector<Datum> values_;
        };

        class TURBO_EXPORT KernelExecutor {
        public:
            virtual ~KernelExecutor() = default;

            /// The Kernel's `init` method must be called and any KernelState set in the
            /// KernelContext *before* KernelExecutor::init is called. This is to facilitate
            /// the case where init may be expensive and does not need to be called again for
            /// each execution of the kernel, for example the same lookup table can be re-used
            /// for all scanned batches in a dataset filter.
            virtual turbo::Status init(KernelContext *, KernelInitArgs) = 0;

            // TODO(wesm): per ARROW-16819, adding ExecBatch variant so that a batch
            // length can be passed in for scalar functions; will have to return and
            // clean a bunch of things up
            virtual turbo::Status execute(const ExecBatch &batch, ExecListener *listener) = 0;

            virtual Datum wrap_results(const std::vector<Datum> &args,
                                      const std::vector<Datum> &outputs) = 0;

            /// \brief Check the actual result type against the resolved output type
            virtual turbo::Status CheckResultType(const Datum &out, const char *function_name) = 0;

            static std::unique_ptr<KernelExecutor> MakeScalar();

            static std::unique_ptr<KernelExecutor> MakeVector();

            static std::unique_ptr<KernelExecutor> MakeScalarAggregate();
        };

        int64_t InferBatchLength(const std::vector<Datum> &values, bool *all_same);

        /// \brief Populate validity bitmap with the intersection of the nullity of the
        /// arguments. If a preallocated bitmap is not provided, then one will be
        /// allocated if needed (in some cases a bitmap can be zero-copied from the
        /// arguments). If any Scalar value is null, then the entire validity bitmap
        /// will be set to null.
        ///
        /// \param[in] ctx kernel execution context, for memory allocation etc.
        /// \param[in] batch the data batch
        /// \param[in] out the output ArrayData, must not be null
        TURBO_EXPORT
        turbo::Status propagate_nulls(KernelContext *ctx, const ExecSpan &batch, ArrayData *out);

        TURBO_EXPORT
        void propagate_nulls_spans(const ExecSpan &batch, ArraySpan *out);

    }  // namespace detail
}  // namespace nebula::compute
