// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <cstdint>
#include <limits>
#include <memory>
#include <string>
#include <vector>

#include <nebula/array/array_binary.h>
#include <nebula/array/array_nested.h>
#include <nebula/array/builder_primitive.h>
#include <nebula/core/buffer_builder.h>
#include <nebula/core/chunked_array.h>
#include <nebula/compute/api_vector.h>
#include <nebula/compute/function.h>
#include <nebula/compute/kernel.h>
#include <nebula/compute/kernels/codegen_internal.h>
#include <nebula/compute/kernels/vector_selection_internal.h>
#include <nebula/compute/registry.h>
#include <nebula/types/type.h>
#include <nebula/types/type_traits.h>
#include <nebula/bits/bit_block_counter.h>
#include <nebula/bits/bit_run_reader.h>
#include <nebula/bits/bit_util.h>
#include <nebula/util/fixed_width_internal.h>
#include <nebula/numeric/int_util.h>
#include <turbo/log/logging.h>
#include <nebula/util/ree_util.h>

namespace nebula {

    using internal::CheckIndexBounds;
}

namespace nebula::compute::internal {

    void register_selection_function(const std::string &name, FunctionDoc doc,
                                   VectorKernel base_kernel,
                                   std::vector<SelectionKernelData> &&kernels,
                                   const FunctionOptions *default_options,
                                   FunctionRegistry *registry) {
        auto func = std::make_shared<VectorFunction>(name, Arity::Binary(), std::move(doc),
                                                     default_options);
        for (auto &&kernel_data: kernels) {
            base_kernel.signature = KernelSignature::create(
                    {std::move(kernel_data.value_type), std::move(kernel_data.selection_type)},
                    OutputType(FirstType));
            base_kernel.exec = kernel_data.exec;
            KCHECK_OK(func->add_kernel(base_kernel));
        }
        kernels.clear();
        KCHECK_OK(registry->add_function(std::move(func)));
    }

    namespace {

        /// \brief Iterate over a REE filter, emitting ranges of a plain values array that
        /// would pass the filter.
        ///
        /// Differently from REExREE, and REExPlain filtering, PlainxREE filtering
        /// does not produce a REE output, but rather a plain output array. As such it's
        /// much simpler.
        ///
        /// \param filter_may_have_nulls Only pass false if you know the filter has no nulls.
        template<typename FilterRunEndType>
        void VisitPlainxREEFilterOutputSegmentsImpl(
                const ArraySpan &filter, bool filter_may_have_nulls,
                FilterOptions::NullSelectionBehavior null_selection,
                const EmitREEFilterSegment &emit_segment) {
            using FilterRunEndCType = typename FilterRunEndType::c_type;
            const ArraySpan &filter_values = nebula::ree_util::ValuesArray(filter);
            const int64_t filter_values_offset = filter_values.offset;
            const uint8_t *filter_is_valid = filter_values.buffers[0].data;
            const uint8_t *filter_selection = filter_values.buffers[1].data;
            filter_may_have_nulls = filter_may_have_nulls && filter_is_valid != nullptr &&
                                    filter_values.null_count != 0;

            const nebula::ree_util::RunEndEncodedArraySpan<FilterRunEndCType> filter_span(filter);
            auto it = filter_span.begin();
            if (filter_may_have_nulls) {
                if (null_selection == FilterOptions::EMIT_NULL) {
                    while (!it.is_end(filter_span)) {
                        const int64_t i = filter_values_offset + it.index_into_array();
                        const bool valid = bit_util::get_bit(filter_is_valid, i);
                        const bool emit = !valid || bit_util::get_bit(filter_selection, i);
                        if (TURBO_UNLIKELY(
                                emit && !emit_segment(it.logical_position(), it.run_length(), valid))) {
                            break;
                        }
                        ++it;
                    }
                } else {  // DROP nulls
                    while (!it.is_end(filter_span)) {
                        const int64_t i = filter_values_offset + it.index_into_array();
                        const bool emit =
                                bit_util::get_bit(filter_is_valid, i) && bit_util::get_bit(filter_selection, i);
                        if (TURBO_UNLIKELY(
                                emit && !emit_segment(it.logical_position(), it.run_length(), true))) {
                            break;
                        }
                        ++it;
                    }
                }
            } else {
                while (!it.is_end(filter_span)) {
                    const int64_t i = filter_values_offset + it.index_into_array();
                    const bool emit = bit_util::get_bit(filter_selection, i);
                    if (TURBO_UNLIKELY(
                            emit && !emit_segment(it.logical_position(), it.run_length(), true))) {
                        break;
                    }
                    ++it;
                }
            }
        }

    }  // namespace

    void VisitPlainxREEFilterOutputSegments(
            const ArraySpan &filter, bool filter_may_have_nulls,
            FilterOptions::NullSelectionBehavior null_selection,
            const EmitREEFilterSegment &emit_segment) {
        if (filter.length == 0) {
            return;
        }
        const auto &ree_type = turbo::checked_cast<const RunEndEncodedType &>(*filter.type);
        switch (ree_type.run_end_type()->id()) {
            case Type::INT16:
                return VisitPlainxREEFilterOutputSegmentsImpl<Int16Type>(
                        filter, filter_may_have_nulls, null_selection, emit_segment);
            case Type::INT32:
                return VisitPlainxREEFilterOutputSegmentsImpl<Int32Type>(
                        filter, filter_may_have_nulls, null_selection, emit_segment);
            default:
                DKCHECK(ree_type.run_end_type()->id() == Type::INT64);
                return VisitPlainxREEFilterOutputSegmentsImpl<Int64Type>(
                        filter, filter_may_have_nulls, null_selection, emit_segment);
        }
    }

    namespace {

// ----------------------------------------------------------------------
// Implement take for other data types where there is less performance
// sensitivity by visiting the selected indices.

// Use CRTP to dispatch to type-specific processing of take indices for each
// unsigned integer type.
        template<typename Impl, typename ArrowType>
        struct Selection {
            using ValuesArrayType = typename TypeTraits<ArrowType>::ArrayType;

            // Forwards the generic value visitors to the VisitFilter template
            struct FilterAdapter {
                static constexpr bool is_take = false;

                Impl *impl;

                explicit FilterAdapter(Impl *impl) : impl(impl) {}

                template<typename ValidVisitor, typename NullVisitor>
                turbo::Status Generate(ValidVisitor &&visit_valid, NullVisitor &&visit_null) {
                    return impl->VisitFilter(std::forward<ValidVisitor>(visit_valid),
                                             std::forward<NullVisitor>(visit_null));
                }
            };

            // Forwards the generic value visitors to the take index visitor template
            template<typename IndexCType>
            struct TakeAdapter {
                static constexpr bool is_take = true;

                Impl *impl;

                explicit TakeAdapter(Impl *impl) : impl(impl) {}

                template<typename ValidVisitor, typename NullVisitor>
                turbo::Status Generate(ValidVisitor &&visit_valid, NullVisitor &&visit_null) {
                    return impl->template VisitTake<IndexCType>(std::forward<ValidVisitor>(visit_valid),
                                                                std::forward<NullVisitor>(visit_null));
                }
            };

            KernelContext *ctx;
            const ArraySpan &values;
            const ArraySpan &selection;
            int64_t output_length;
            ArrayData *out;
            TypedBufferBuilder<bool> validity_builder;

            Selection(KernelContext *ctx, const ExecSpan &batch, int64_t output_length,
                      ExecResult *out)
                    : ctx(ctx),
                      values(batch[0].array),
                      selection(batch[1].array),
                      output_length(output_length),
                      out(out->array_data().get()),
                      validity_builder(ctx->memory_pool()) {}

            virtual ~Selection() = default;

            turbo::Status FinishCommon() {
                out->buffers.resize(values.num_buffers());
                out->length = validity_builder.length();
                out->null_count = validity_builder.false_count();
                return validity_builder.finish(&out->buffers[0]);
            }

            template<typename IndexCType, typename ValidVisitor, typename NullVisitor>
            turbo::Status VisitTake(ValidVisitor &&visit_valid, NullVisitor &&visit_null) {
                const auto indices_values = selection.get_values<IndexCType>(1);
                const uint8_t *is_valid = selection.buffers[0].data;
                nebula::internal::OptionalBitIndexer indices_is_valid(is_valid, selection.offset);
                nebula::internal::OptionalBitIndexer values_is_valid(values.buffers[0].data,
                                                                     values.offset);

                const bool values_have_nulls = values.may_have_nulls();
                nebula::internal::OptionalBitBlockCounter bit_counter(is_valid, selection.offset,
                                                                      selection.length);
                int64_t position = 0;
                while (position < selection.length) {
                    BitBlockCount block = bit_counter.NextBlock();
                    const bool indices_have_nulls = block.popcount < block.length;
                    if (!indices_have_nulls && !values_have_nulls) {
                        // Fastest path, neither indices nor values have nulls
                        validity_builder.unsafe_append(block.length, true);
                        for (int64_t i = 0; i < block.length; ++i) {
                            TURBO_RETURN_NOT_OK(visit_valid(indices_values[position++]));
                        }
                    } else if (block.popcount > 0) {
                        // Since we have to branch on whether the indices are null or not, we
                        // combine the "non-null indices block but some values null" and
                        // "some-null indices block but values non-null" into a single loop.
                        for (int64_t i = 0; i < block.length; ++i) {
                            if ((!indices_have_nulls || indices_is_valid[position]) &&
                                values_is_valid[indices_values[position]]) {
                                validity_builder.unsafe_append(true);
                                TURBO_RETURN_NOT_OK(visit_valid(indices_values[position]));
                            } else {
                                validity_builder.unsafe_append(false);
                                TURBO_RETURN_NOT_OK(visit_null());
                            }
                            ++position;
                        }
                    } else {
                        // The whole block is null
                        validity_builder.unsafe_append(block.length, false);
                        for (int64_t i = 0; i < block.length; ++i) {
                            TURBO_RETURN_NOT_OK(visit_null());
                        }
                        position += block.length;
                    }
                }
                return turbo::OkStatus();
            }

            // We use the NullVisitor both for "selected" nulls as well as "emitted"
            // nulls coming from the filter when using FilterOptions::EMIT_NULL
            template<typename ValidVisitor, typename NullVisitor>
            turbo::Status VisitFilter(ValidVisitor &&visit_valid, NullVisitor &&visit_null) {
                const bool is_ree_filter = selection.type->id() == Type::RUN_END_ENCODED;
                const auto null_selection = FilterState::Get(ctx).null_selection_behavior;

                nebula::internal::OptionalBitIndexer values_is_valid(values.buffers[0].data,
                                                                     values.offset);

                auto AppendNotNull = [&](int64_t index) -> turbo::Status {
                    validity_builder.unsafe_append(true);
                    return visit_valid(index);
                };

                auto append_null = [&]() -> turbo::Status {
                    validity_builder.unsafe_append(false);
                    return visit_null();
                };

                auto AppendMaybeNull = [&](int64_t index) -> turbo::Status {
                    if (values_is_valid[index]) {
                        return AppendNotNull(index);
                    } else {
                        return append_null();
                    }
                };

                if (is_ree_filter) {
                    turbo::Status status;
                    VisitPlainxREEFilterOutputSegments(
                            selection, /*filter_may_have_nulls=*/true, null_selection,
                            [&](int64_t position, int64_t segment_length, bool filter_valid) {
                                if (filter_valid) {
                                    for (int64_t i = 0; i < segment_length; ++i) {
                                        status = AppendMaybeNull(position + i);
                                    }
                                } else {
                                    for (int64_t i = 0; i < segment_length; ++i) {
                                        status = append_null();
                                    }
                                }
                                return status.ok();
                            });
                    return status;
                }

                const uint8_t *filter_data = selection.buffers[1].data;
                const uint8_t *filter_is_valid = selection.buffers[0].data;
                const int64_t filter_offset = selection.offset;
                // We use 3 block counters for fast scanning of the filter
                //
                // * values_valid_counter: for values null/not-null
                // * filter_valid_counter: for filter null/not-null
                // * filter_counter: for filter true/false
                nebula::internal::OptionalBitBlockCounter values_valid_counter(
                        values.buffers[0].data, values.offset, values.length);
                nebula::internal::OptionalBitBlockCounter filter_valid_counter(
                        filter_is_valid, filter_offset, selection.length);
                nebula::internal::BitBlockCounter filter_counter(filter_data, filter_offset,
                                                                 selection.length);

                int64_t in_position = 0;
                while (in_position < selection.length) {
                    nebula::internal::BitBlockCount filter_valid_block = filter_valid_counter.NextWord();
                    nebula::internal::BitBlockCount values_valid_block = values_valid_counter.NextWord();
                    nebula::internal::BitBlockCount filter_block = filter_counter.NextWord();
                    if (filter_block.NoneSet() && null_selection == FilterOptions::DROP) {
                        // For this exceedingly common case in low-selectivity filters we can
                        // skip further analysis of the data and move on to the next block.
                        in_position += filter_block.length;
                    } else if (filter_valid_block.AllSet()) {
                        // Simpler path: no filter values are null
                        if (filter_block.AllSet()) {
                            // Fastest path: filter values are all true and not null
                            if (values_valid_block.AllSet()) {
                                // The values aren't null either
                                validity_builder.unsafe_append(filter_block.length, true);
                                for (int64_t i = 0; i < filter_block.length; ++i) {
                                    TURBO_RETURN_NOT_OK(visit_valid(in_position++));
                                }
                            } else {
                                // Some of the values in this block are null
                                for (int64_t i = 0; i < filter_block.length; ++i) {
                                    TURBO_RETURN_NOT_OK(AppendMaybeNull(in_position++));
                                }
                            }
                        } else {  // !filter_block.AllSet()
                            // Some of the filter values are false, but all not null
                            if (values_valid_block.AllSet()) {
                                // All the values are not-null, so we can skip null checking for
                                // them
                                for (int64_t i = 0; i < filter_block.length; ++i) {
                                    if (bit_util::get_bit(filter_data, filter_offset + in_position)) {
                                        TURBO_RETURN_NOT_OK(AppendNotNull(in_position));
                                    }
                                    ++in_position;
                                }
                            } else {
                                // Some of the values in the block are null, so we have to check
                                // each one
                                for (int64_t i = 0; i < filter_block.length; ++i) {
                                    if (bit_util::get_bit(filter_data, filter_offset + in_position)) {
                                        TURBO_RETURN_NOT_OK(AppendMaybeNull(in_position));
                                    }
                                    ++in_position;
                                }
                            }
                        }
                    } else {  // !filter_valid_block.AllSet()
                        // Some of the filter values are null, so we have to handle the DROP
                        // versus EMIT_NULL null selection behavior.
                        if (null_selection == FilterOptions::DROP) {
                            // Filter null values are treated as false.
                            for (int64_t i = 0; i < filter_block.length; ++i) {
                                if (bit_util::get_bit(filter_is_valid, filter_offset + in_position) &&
                                    bit_util::get_bit(filter_data, filter_offset + in_position)) {
                                    TURBO_RETURN_NOT_OK(AppendMaybeNull(in_position));
                                }
                                ++in_position;
                            }
                        } else {
                            // Filter null values are appended to output as null whether the
                            // value in the corresponding slot is valid or not
                            for (int64_t i = 0; i < filter_block.length; ++i) {
                                const bool filter_not_null =
                                        bit_util::get_bit(filter_is_valid, filter_offset + in_position);
                                if (filter_not_null &&
                                    bit_util::get_bit(filter_data, filter_offset + in_position)) {
                                    TURBO_RETURN_NOT_OK(AppendMaybeNull(in_position));
                                } else if (!filter_not_null) {
                                    // EMIT_NULL case
                                    TURBO_RETURN_NOT_OK(append_null());
                                }
                                ++in_position;
                            }
                        }
                    }
                }
                return turbo::OkStatus();
            }

            virtual turbo::Status init() { return turbo::OkStatus(); }

            // Implementation specific finish logic
            virtual turbo::Status finish() = 0;

            turbo::Status ExecTake() {
                TURBO_RETURN_NOT_OK(this->validity_builder.Reserve(output_length));
                TURBO_RETURN_NOT_OK(init());
                int index_width = this->selection.type->byte_width();

                // CTRP dispatch here
                switch (index_width) {
                    case 1: {
                        turbo::Status s =
                                static_cast<Impl *>(this)->template GenerateOutput<TakeAdapter<uint8_t>>();
                        TURBO_RETURN_NOT_OK(s);
                    }
                        break;
                    case 2: {
                        turbo::Status s =
                                static_cast<Impl *>(this)->template GenerateOutput<TakeAdapter<uint16_t>>();
                        TURBO_RETURN_NOT_OK(s);
                    }
                        break;
                    case 4: {
                        turbo::Status s =
                                static_cast<Impl *>(this)->template GenerateOutput<TakeAdapter<uint32_t>>();
                        TURBO_RETURN_NOT_OK(s);
                    }
                        break;
                    case 8: {
                        turbo::Status s =
                                static_cast<Impl *>(this)->template GenerateOutput<TakeAdapter<uint64_t>>();
                        TURBO_RETURN_NOT_OK(s);
                    }
                        break;
                    default:
                        DKCHECK(false) << "Invalid index width";
                        break;
                }
                TURBO_RETURN_NOT_OK(this->FinishCommon());
                return finish();
            }

            turbo::Status ExecFilter() {
                TURBO_RETURN_NOT_OK(this->validity_builder.Reserve(output_length));
                TURBO_RETURN_NOT_OK(init());
                // CRTP dispatch
                turbo::Status s = static_cast<Impl *>(this)->template GenerateOutput<FilterAdapter>();
                TURBO_RETURN_NOT_OK(s);
                TURBO_RETURN_NOT_OK(this->FinishCommon());
                return finish();
            }
        };

#define LIFT_BASE_MEMBERS()                               \
  using ValuesArrayType = typename Base::ValuesArrayType; \
  using Base::ctx;                                        \
  using Base::values;                                     \
  using Base::selection;                                  \
  using Base::output_length;                              \
  using Base::out;                                        \
  using Base::validity_builder

        inline turbo::Status VisitNoop() { return turbo::OkStatus(); }

// A selection implementation for 32-bit and 64-bit variable binary
// types. Common generated kernels are shared between Binary/String and
// LargeBinary/LargeString
        template<typename Type>
        struct VarBinarySelectionImpl : public Selection<VarBinarySelectionImpl<Type>, Type> {
            using offset_type = typename Type::offset_type;

            using Base = Selection<VarBinarySelectionImpl<Type>, Type>;
            LIFT_BASE_MEMBERS();

            TypedBufferBuilder<offset_type> offset_builder;
            TypedBufferBuilder<uint8_t> data_builder;

            static constexpr int64_t kOffsetLimit = std::numeric_limits<offset_type>::max() - 1;

            VarBinarySelectionImpl(KernelContext *ctx, const ExecSpan &batch, int64_t output_length,
                                   ExecResult *out)
                    : Base(ctx, batch, output_length, out),
                      offset_builder(ctx->memory_pool()),
                      data_builder(ctx->memory_pool()) {}

            template<typename Adapter>
            turbo::Status GenerateOutput() {
                const auto raw_offsets = this->values.template get_values<offset_type>(1);
                const uint8_t *raw_data = this->values.buffers[2].data;

                // Presize the data builder with a rough estimate of the required data size
                if (this->values.length > 0) {
                    int64_t data_length = raw_offsets[this->values.length] - raw_offsets[0];
                    const double mean_value_length =
                            data_length / static_cast<double>(this->values.length);

                    // TODO: See if possible to reduce output_length for take/filter cases
                    // where there are nulls in the selection array
                    TURBO_RETURN_NOT_OK(
                            data_builder.Reserve(static_cast<int64_t>(mean_value_length * output_length)));
                }
                int64_t space_available = data_builder.capacity();

                offset_type offset = 0;
                Adapter adapter(this);
                TURBO_RETURN_NOT_OK(adapter.Generate(
                        [&](int64_t index) {
                            offset_builder.unsafe_append(offset);
                            offset_type val_offset = raw_offsets[index];
                            offset_type val_size = raw_offsets[index + 1] - val_offset;

                            // Use static property to prune this code from the filter path in
                            // optimized builds
                            if (Adapter::is_take &&
                                TURBO_UNLIKELY(static_cast<int64_t>(offset) +
                                               static_cast<int64_t>(val_size)) > kOffsetLimit) {
                                return turbo::invalid_argument_error("Take operation overflowed binary array capacity");
                            }
                            offset += val_size;
                            if (TURBO_UNLIKELY(val_size > space_available)) {
                                TURBO_RETURN_NOT_OK(data_builder.Reserve(val_size));
                                space_available = data_builder.capacity() - data_builder.length();
                            }
                            data_builder.unsafe_append(raw_data + val_offset, val_size);
                            space_available -= val_size;
                            return turbo::OkStatus();
                        },
                        [&]() {
                            offset_builder.unsafe_append(offset);
                            return turbo::OkStatus();
                        }));
                offset_builder.unsafe_append(offset);
                return turbo::OkStatus();
            }

            turbo::Status init() override { return offset_builder.Reserve(output_length + 1); }

            turbo::Status finish() override {
                TURBO_RETURN_NOT_OK(offset_builder.finish(&out->buffers[1]));
                return data_builder.finish(&out->buffers[2]);
            }
        };

        template<typename Type>
        struct ListSelectionImpl : public Selection<ListSelectionImpl<Type>, Type> {
            using offset_type = typename Type::offset_type;

            using Base = Selection<ListSelectionImpl<Type>, Type>;
            LIFT_BASE_MEMBERS();

            TypedBufferBuilder<offset_type> offset_builder;
            typename TypeTraits<Type>::OffsetBuilderType child_index_builder;

            ListSelectionImpl(KernelContext *ctx, const ExecSpan &batch, int64_t output_length,
                              ExecResult *out)
                    : Base(ctx, batch, output_length, out),
                      offset_builder(ctx->memory_pool()),
                      child_index_builder(ctx->memory_pool()) {}

            template<typename Adapter>
            turbo::Status GenerateOutput() {
                ValuesArrayType typed_values(this->values.to_array_data());

                // TODO presize child_index_builder with a similar heuristic as VarBinarySelectionImpl

                offset_type offset = 0;
                Adapter adapter(this);
                TURBO_RETURN_NOT_OK(adapter.Generate(
                        [&](int64_t index) {
                            offset_builder.unsafe_append(offset);
                            offset_type value_offset = typed_values.value_offset(index);
                            offset_type value_length = typed_values.value_length(index);
                            offset += value_length;
                            TURBO_RETURN_NOT_OK(child_index_builder.Reserve(value_length));
                            for (offset_type j = value_offset; j < value_offset + value_length; ++j) {
                                child_index_builder.unsafe_append(j);
                            }
                            return turbo::OkStatus();
                        },
                        [&]() {
                            offset_builder.unsafe_append(offset);
                            return turbo::OkStatus();
                        }));
                offset_builder.unsafe_append(offset);
                return turbo::OkStatus();
            }

            turbo::Status init() override {
                TURBO_RETURN_NOT_OK(offset_builder.Reserve(output_length + 1));
                return turbo::OkStatus();
            }

            turbo::Status finish() override {
                std::shared_ptr<Array> child_indices;
                TURBO_RETURN_NOT_OK(child_index_builder.finish(&child_indices));

                ValuesArrayType typed_values(this->values.to_array_data());

                // No need to boundscheck the child values indices
                TURBO_MOVE_OR_RAISE(std::shared_ptr<Array> taken_child,
                                    Take(*typed_values.values(), *child_indices,
                                         TakeOptions::NoBoundsCheck(), ctx->exec_context()));
                TURBO_RETURN_NOT_OK(offset_builder.finish(&out->buffers[1]));
                out->child_data = {taken_child->data()};
                return turbo::OkStatus();
            }
        };

        template<typename Type>
        struct ListViewSelectionImpl : public Selection<ListViewSelectionImpl<Type>, Type> {
            using offset_type = typename Type::offset_type;

            using Base = Selection<ListViewSelectionImpl<Type>, Type>;
            LIFT_BASE_MEMBERS();

            TypedBufferBuilder<offset_type> offsets_builder;
            TypedBufferBuilder<offset_type> sizes_builder;

            ListViewSelectionImpl(KernelContext *ctx, const ExecSpan &batch, int64_t output_length,
                                  ExecResult *out)
                    : Base(ctx, batch, output_length, out),
                      offsets_builder(ctx->memory_pool()),
                      sizes_builder(ctx->memory_pool()) {}

            template<typename Adapter>
            turbo::Status GenerateOutput() {
                auto *offsets = this->values.template get_values<offset_type>(1);
                auto *sizes = this->values.template get_values<offset_type>(2);

                offset_type null_list_view_offset = 0;
                Adapter adapter(this);
                TURBO_RETURN_NOT_OK(adapter.Generate(
                        [&](int64_t index) {
                            offset_type value_offset = offsets[index];
                            offset_type value_length = sizes[index];
                            offsets_builder.unsafe_append(value_offset);
                            sizes_builder.unsafe_append(value_length);
                            null_list_view_offset = value_offset + value_length;
                            return turbo::OkStatus();
                        },
                        [&]() {
                            // 0 could be appended here, but by adding the last offset, we keep
                            // the buffer compatible with how offsets behave in ListType as well.
                            // The invariant that `offsets[i] + sizes[i] <= values.length` is
                            // trivially maintained by having `sizes[i]` set to 0 here.
                            offsets_builder.unsafe_append(null_list_view_offset);
                            sizes_builder.unsafe_append(0);
                            return turbo::OkStatus();
                        }));
                return turbo::OkStatus();
            }

            turbo::Status init() override {
                TURBO_RETURN_NOT_OK(offsets_builder.Reserve(output_length));
                return sizes_builder.Reserve(output_length);
            }

            turbo::Status finish() override {
                TURBO_RETURN_NOT_OK(offsets_builder.finish(&out->buffers[1]));
                TURBO_RETURN_NOT_OK(sizes_builder.finish(&out->buffers[2]));
                out->child_data = {this->values.child_data[0].to_array_data()};
                return turbo::OkStatus();
            }
        };

        struct DenseUnionSelectionImpl
                : public Selection<DenseUnionSelectionImpl, DenseUnionType> {
            using Base = Selection<DenseUnionSelectionImpl, DenseUnionType>;
            LIFT_BASE_MEMBERS();

            TypedBufferBuilder<int32_t> value_offset_buffer_builder_;
            TypedBufferBuilder<int8_t> child_id_buffer_builder_;
            std::vector<int8_t> type_codes_;
            std::vector<Int32Builder> child_indices_builders_;

            DenseUnionSelectionImpl(KernelContext *ctx, const ExecSpan &batch,
                                    int64_t output_length, ExecResult *out)
                    : Base(ctx, batch, output_length, out),
                      value_offset_buffer_builder_(ctx->memory_pool()),
                      child_id_buffer_builder_(ctx->memory_pool()),
                      type_codes_(turbo::checked_cast<const UnionType &>(*this->values.type).type_codes()),
                      child_indices_builders_(type_codes_.size()) {
                for (auto &child_indices_builder: child_indices_builders_) {
                    child_indices_builder = Int32Builder(ctx->memory_pool());
                }
            }

            template<typename Adapter>
            turbo::Status GenerateOutput() {
                DenseUnionArray typed_values(this->values.to_array_data());
                Adapter adapter(this);
                TURBO_RETURN_NOT_OK(adapter.Generate(
                        [&](int64_t index) {
                            int8_t child_id = typed_values.child_id(index);
                            child_id_buffer_builder_.unsafe_append(type_codes_[child_id]);
                            int32_t value_offset = typed_values.value_offset(index);
                            value_offset_buffer_builder_.unsafe_append(
                                    static_cast<int32_t>(child_indices_builders_[child_id].length()));
                            TURBO_RETURN_NOT_OK(child_indices_builders_[child_id].Reserve(1));
                            child_indices_builders_[child_id].unsafe_append(value_offset);
                            return turbo::OkStatus();
                        },
                        [&]() {
                            int8_t child_id = 0;
                            child_id_buffer_builder_.unsafe_append(type_codes_[child_id]);
                            value_offset_buffer_builder_.unsafe_append(
                                    static_cast<int32_t>(child_indices_builders_[child_id].length()));
                            TURBO_RETURN_NOT_OK(child_indices_builders_[child_id].Reserve(1));
                            child_indices_builders_[child_id].unsafe_append_null();
                            return turbo::OkStatus();
                        }));
                return turbo::OkStatus();
            }

            turbo::Status init() override {
                TURBO_RETURN_NOT_OK(child_id_buffer_builder_.Reserve(output_length));
                TURBO_RETURN_NOT_OK(value_offset_buffer_builder_.Reserve(output_length));
                return turbo::OkStatus();
            }

            turbo::Status finish() override {
                TURBO_MOVE_OR_RAISE(auto child_ids_buffer, child_id_buffer_builder_.finish());
                TURBO_MOVE_OR_RAISE(auto value_offsets_buffer,
                                    value_offset_buffer_builder_.finish());
                DenseUnionArray typed_values(this->values.to_array_data());
                auto num_fields = typed_values.num_fields();
                auto num_rows = child_ids_buffer->size();
                BufferVector buffers{nullptr, std::move(child_ids_buffer),
                                     std::move(value_offsets_buffer)};
                *out = ArrayData(typed_values.type(), num_rows, std::move(buffers), 0);
                for (auto i = 0; i < num_fields; i++) {
                    TURBO_MOVE_OR_RAISE(auto child_indices_array,
                                        child_indices_builders_[i].finish());
                    TURBO_MOVE_OR_RAISE(std::shared_ptr<Array> child_array,
                                        Take(*typed_values.field(i), *child_indices_array));
                    out->child_data.push_back(child_array->data());
                }
                return turbo::OkStatus();
            }
        };

// We need a slightly different approach for SparseUnion. For Take, we can
// invoke Take on each child's data with boundschecking disabled. For
// Filter on the other hand, if we naively call Filter on each child, then the
// filter output length will have to be redundantly computed. Thus, for Filter
// we instead convert the filter to selection indices and then invoke take.

// SparseUnion selection implementation. ONLY used for Take
        struct SparseUnionSelectionImpl
                : public Selection<SparseUnionSelectionImpl, SparseUnionType> {
            using Base = Selection<SparseUnionSelectionImpl, SparseUnionType>;
            LIFT_BASE_MEMBERS();

            TypedBufferBuilder<int8_t> child_id_buffer_builder_;
            const int8_t type_code_for_null_;

            SparseUnionSelectionImpl(KernelContext *ctx, const ExecSpan &batch,
                                     int64_t output_length, ExecResult *out)
                    : Base(ctx, batch, output_length, out),
                      child_id_buffer_builder_(ctx->memory_pool()),
                      type_code_for_null_(
                              turbo::checked_cast<const UnionType &>(*this->values.type).type_codes()[0]) {}

            template<typename Adapter>
            turbo::Status GenerateOutput() {
                SparseUnionArray typed_values(this->values.to_array_data());
                Adapter adapter(this);
                TURBO_RETURN_NOT_OK(adapter.Generate(
                        [&](int64_t index) {
                            child_id_buffer_builder_.unsafe_append(typed_values.type_code(index));
                            return turbo::OkStatus();
                        },
                        [&]() {
                            child_id_buffer_builder_.unsafe_append(type_code_for_null_);
                            return turbo::OkStatus();
                        }));
                return turbo::OkStatus();
            }

            turbo::Status init() override {
                TURBO_RETURN_NOT_OK(child_id_buffer_builder_.Reserve(output_length));
                return turbo::OkStatus();
            }

            turbo::Status finish() override {
                TURBO_MOVE_OR_RAISE(auto child_ids_buffer, child_id_buffer_builder_.finish());
                SparseUnionArray typed_values(this->values.to_array_data());
                auto num_fields = typed_values.num_fields();
                auto num_rows = child_ids_buffer->size();
                BufferVector buffers{nullptr, std::move(child_ids_buffer)};
                *out = ArrayData(typed_values.type(), num_rows, std::move(buffers), 0);
                out->child_data.reserve(num_fields);
                for (auto i = 0; i < num_fields; i++) {
                    TURBO_MOVE_OR_RAISE(auto child_datum,
                                        Take(*typed_values.field(i), *this->selection.to_array_data()));
                    out->child_data.emplace_back(std::move(child_datum).array());
                }
                return turbo::OkStatus();
            }
        };

        struct FSLSelectionImpl : public Selection<FSLSelectionImpl, FixedSizeListType> {
            Int64Builder child_index_builder;

            using Base = Selection<FSLSelectionImpl, FixedSizeListType>;
            LIFT_BASE_MEMBERS();

            FSLSelectionImpl(KernelContext *ctx, const ExecSpan &batch, int64_t output_length,
                             ExecResult *out)
                    : Base(ctx, batch, output_length, out), child_index_builder(ctx->memory_pool()) {}

            template<typename Adapter>
            turbo::Status GenerateOutput() {
                ValuesArrayType typed_values(this->values.to_array_data());
                const int32_t list_size = typed_values.list_type()->list_size();
                const int64_t base_offset = typed_values.offset();

                // We must take list_size elements even for null elements of
                // indices.
                TURBO_RETURN_NOT_OK(child_index_builder.Reserve(output_length * list_size));

                Adapter adapter(this);
                return adapter.Generate(
                        [&](int64_t index) {
                            int64_t offset = (base_offset + index) * list_size;
                            for (int64_t j = offset; j < offset + list_size; ++j) {
                                child_index_builder.unsafe_append(j);
                            }
                            return turbo::OkStatus();
                        },
                        [&]() { return child_index_builder.append_nulls(list_size); });
            }

            turbo::Status finish() override {
                std::shared_ptr<Array> child_indices;
                TURBO_RETURN_NOT_OK(child_index_builder.finish(&child_indices));

                ValuesArrayType typed_values(this->values.to_array_data());

                // No need to boundscheck the child values indices
                TURBO_MOVE_OR_RAISE(std::shared_ptr<Array> taken_child,
                                    Take(*typed_values.values(), *child_indices,
                                         TakeOptions::NoBoundsCheck(), ctx->exec_context()));
                out->child_data = {taken_child->data()};
                return turbo::OkStatus();
            }
        };

// ----------------------------------------------------------------------
// Struct selection implementations

// We need a slightly different approach for StructType. For Take, we can
// invoke Take on each struct field's data with boundschecking disabled. For
// Filter on the other hand, if we naively call Filter on each field, then the
// filter output length will have to be redundantly computed. Thus, for Filter
// we instead convert the filter to selection indices and then invoke take.

// Struct selection implementation. ONLY used for Take
        struct StructSelectionImpl : public Selection<StructSelectionImpl, StructType> {
            using Base = Selection<StructSelectionImpl, StructType>;
            LIFT_BASE_MEMBERS();
            using Base::Base;

            template<typename Adapter>
            turbo::Status GenerateOutput() {
                StructArray typed_values(this->values.to_array_data());
                Adapter adapter(this);
                // There's nothing to do for Struct except to generate the validity bitmap
                return adapter.Generate([&](int64_t index) { return turbo::OkStatus(); },
                        /*visit_null=*/VisitNoop);
            }

            turbo::Status finish() override {
                StructArray typed_values(this->values.to_array_data());

                // Select from children without boundschecking
                out->child_data.resize(this->values.type->num_fields());
                for (int field_index = 0; field_index < this->values.type->num_fields();
                     ++field_index) {
                    TURBO_MOVE_OR_RAISE(Datum taken_field,
                                        Take(Datum(typed_values.field(field_index)),
                                             Datum(this->selection.to_array_data()),
                                             TakeOptions::NoBoundsCheck(), ctx->exec_context()));
                    out->child_data[field_index] = taken_field.array();
                }
                return turbo::OkStatus();
            }
        };

#undef LIFT_BASE_MEMBERS

// ----------------------------------------------------------------------

        template<typename Impl>
        turbo::Status FilterExec(KernelContext *ctx, const ExecSpan &batch, ExecResult *out) {
            int64_t output_length =
                    GetFilterOutputSize(batch[1].array, FilterState::Get(ctx).null_selection_behavior);
            Impl kernel(ctx, batch, output_length, out);
            return kernel.ExecFilter();
        }

    }  // namespace

    turbo::Status ListFilterExec(KernelContext *ctx, const ExecSpan &batch, ExecResult *out) {
        return FilterExec < ListSelectionImpl < ListType >> (ctx, batch, out);
    }

    turbo::Status LargeListFilterExec(KernelContext *ctx, const ExecSpan &batch, ExecResult *out) {
        return FilterExec < ListSelectionImpl < LargeListType >> (ctx, batch, out);
    }

    turbo::Status ListViewFilterExec(KernelContext *ctx, const ExecSpan &batch, ExecResult *out) {
        return FilterExec < ListViewSelectionImpl < ListViewType >> (ctx, batch, out);
    }

    turbo::Status LargeListViewFilterExec(KernelContext *ctx, const ExecSpan &batch,
                                          ExecResult *out) {
        return FilterExec < ListViewSelectionImpl < LargeListViewType >> (ctx, batch, out);
    }

    turbo::Status FSLFilterExec(KernelContext *ctx, const ExecSpan &batch, ExecResult *out) {
        const ArraySpan &values = batch[0].array;

        // If a FixedSizeList wraps a fixed-width type we can, in some cases, use
        // PrimitiveFilterExec for a fixed-size list array.
        if (util::IsFixedWidthLike(values,
                /*force_null_count=*/true,
                /*exclude_bool_and_dictionary=*/true)) {
            const auto byte_width = util::FixedWidthInBytes(*values.type);
            // 0 is a valid byte width for FixedSizeList, but PrimitiveFilterExec
            // might not handle it correctly.
            if (byte_width > 0) {
                return PrimitiveFilterExec(ctx, batch, out);
            }
        }
        return FilterExec<FSLSelectionImpl>(ctx, batch, out);
    }

    turbo::Status DenseUnionFilterExec(KernelContext *ctx, const ExecSpan &batch, ExecResult *out) {
        return FilterExec<DenseUnionSelectionImpl>(ctx, batch, out);
    }

    turbo::Status MapFilterExec(KernelContext *ctx, const ExecSpan &batch, ExecResult *out) {
        return FilterExec < ListSelectionImpl < MapType >> (ctx, batch, out);
    }

    namespace {

        template<typename Impl>
        turbo::Status TakeExec(KernelContext *ctx, const ExecSpan &batch, ExecResult *out) {
            if (TakeState::Get(ctx).boundscheck) {
                TURBO_RETURN_NOT_OK(CheckIndexBounds(batch[1].array, batch[0].length()));
            }
            Impl kernel(ctx, batch, /*output_length=*/batch[1].length(), out);
            return kernel.ExecTake();
        }

    }  // namespace

    turbo::Status VarBinaryTakeExec(KernelContext *ctx, const ExecSpan &batch, ExecResult *out) {
        return TakeExec < VarBinarySelectionImpl < BinaryType >> (ctx, batch, out);
    }

    turbo::Status LargeVarBinaryTakeExec(KernelContext *ctx, const ExecSpan &batch,
                                         ExecResult *out) {
        return TakeExec < VarBinarySelectionImpl < LargeBinaryType >> (ctx, batch, out);
    }

    turbo::Status ListTakeExec(KernelContext *ctx, const ExecSpan &batch, ExecResult *out) {
        return TakeExec < ListSelectionImpl < ListType >> (ctx, batch, out);
    }

    turbo::Status LargeListTakeExec(KernelContext *ctx, const ExecSpan &batch, ExecResult *out) {
        return TakeExec < ListSelectionImpl < LargeListType >> (ctx, batch, out);
    }

    turbo::Status ListViewTakeExec(KernelContext *ctx, const ExecSpan &batch, ExecResult *out) {
        return TakeExec < ListViewSelectionImpl < ListViewType >> (ctx, batch, out);
    }

    turbo::Status LargeListViewTakeExec(KernelContext *ctx, const ExecSpan &batch, ExecResult *out) {
        return TakeExec < ListViewSelectionImpl < LargeListViewType >> (ctx, batch, out);
    }

    turbo::Status FSLTakeExec(KernelContext *ctx, const ExecSpan &batch, ExecResult *out) {
        const ArraySpan &values = batch[0].array;

        // If a FixedSizeList wraps a fixed-width type we can, in some cases, use
        // FixedWidthTakeExec for a fixed-size list array.
        if (util::IsFixedWidthLike(values,
                /*force_null_count=*/true,
                /*exclude_bool_and_dictionary=*/true)) {
            return FixedWidthTakeExec(ctx, batch, out);
        }
        return TakeExec<FSLSelectionImpl>(ctx, batch, out);
    }

    turbo::Status DenseUnionTakeExec(KernelContext *ctx, const ExecSpan &batch, ExecResult *out) {
        return TakeExec<DenseUnionSelectionImpl>(ctx, batch, out);
    }

    turbo::Status SparseUnionTakeExec(KernelContext *ctx, const ExecSpan &batch, ExecResult *out) {
        return TakeExec<SparseUnionSelectionImpl>(ctx, batch, out);
    }

    turbo::Status StructTakeExec(KernelContext *ctx, const ExecSpan &batch, ExecResult *out) {
        return TakeExec<StructSelectionImpl>(ctx, batch, out);
    }

    turbo::Status MapTakeExec(KernelContext *ctx, const ExecSpan &batch, ExecResult *out) {
        return TakeExec < ListSelectionImpl < MapType >> (ctx, batch, out);
    }

}  // namespace nebula::compute::internal
