// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <nebula/compute/row/grouper.h>

#include <memory>
#include <mutex>
#include <type_traits>

#include <nebula/array/builder_primitive.h>

#include <nebula/compute/api_vector.h>
#include <nebula/compute/function.h>
#include <nebula/compute/kernels/row_encoder_internal.h>
#include <nebula/compute/key_hash_internal.h>
#include <nebula/compute/light_array_internal.h>
#include <nebula/compute/registry.h>
#include <nebula/compute/row/compare_internal.h>
#include <nebula/compute/row/grouper_internal.h>
#include <nebula/types/type.h>
#include <nebula/types/type_traits.h>
#include <nebula/bits/bitmap_ops.h>
#include <turbo/base/checked_cast.h>
#include <nebula/util/cpu_info.h>
#include <turbo/log/logging.h>

namespace nebula::compute {

    namespace {

        constexpr uint32_t kNoGroupId = std::numeric_limits<uint32_t>::max();

        using group_id_t = std::remove_const<decltype(kNoGroupId)>::type;
        using GroupIdType = CTypeTraits<group_id_t>::ArrowType;
        auto g_group_id_type = std::make_shared<GroupIdType>();

        inline const uint8_t *GetValuesAsBytes(const ArraySpan &data, int64_t offset = 0) {
            DKCHECK_GT(data.type->byte_width(), 0);
            int64_t absolute_byte_offset = (data.offset + offset) * data.type->byte_width();
            return data.get_values<uint8_t>(1, absolute_byte_offset);
        }

        template<typename Value>
        turbo::Status CheckForGetNextSegment(const std::vector<Value> &values, int64_t length,
                                             int64_t offset, const std::vector<TypeHolder> &key_types) {
            if (offset < 0 || offset > length) {
                return turbo::invalid_argument_error("invalid grouping segmenter offset: ", offset);
            }
            if (values.size() != key_types.size()) {
                return turbo::invalid_argument_error("expected batch size ", key_types.size(), " but got ",
                                                     values.size());
            }
            for (size_t i = 0; i < key_types.size(); i++) {
                const auto &value = values[i];
                const auto &key_type = key_types[i];
                if (*value.type() != *key_type.type) {
                    return turbo::invalid_argument_error("expected batch value ", i, " of type ", *key_type.type,
                                                         " but got ", *value.type());
                }
            }
            return turbo::OkStatus();
        }

        template<typename Batch>
        enable_if_t<std::is_same<Batch, ExecSpan>::value || std::is_same<Batch, ExecBatch>::value,
                turbo::Status>
        CheckForGetNextSegment(const Batch &batch, int64_t offset,
                               const std::vector<TypeHolder> &key_types) {
            return CheckForGetNextSegment(batch.values, batch.length, offset, key_types);
        }

        struct BaseRowSegmenter : public RowSegmenter {
            explicit BaseRowSegmenter(const std::vector<TypeHolder> &key_types)
                    : key_types_(key_types) {}

            const std::vector<TypeHolder> &key_types() const override { return key_types_; }

            std::vector<TypeHolder> key_types_;
        };

        Segment MakeSegment(int64_t batch_length, int64_t offset, int64_t length, bool extends) {
            return Segment{offset, length, offset + length >= batch_length, extends};
        }

        // Used by SimpleKeySegmenter::GetNextSegment to find the match-length of a value within a
        // fixed-width buffer
        int64_t GetMatchLength(const uint8_t *match_bytes, int64_t match_width,
                               const uint8_t *array_bytes, int64_t offset, int64_t length) {
            int64_t cursor, byte_cursor;
            for (cursor = offset, byte_cursor = match_width * cursor; cursor < length;
                 cursor++, byte_cursor += match_width) {
                if (memcmp(match_bytes, array_bytes + byte_cursor,
                           static_cast<size_t>(match_width)) != 0) {
                    break;
                }
            }
            return std::min(cursor, length) - offset;
        }

        using ExtendFunc = std::function<bool(const void *)>;
        constexpr bool kDefaultExtends = true;  // by default, the first segment extends
        constexpr bool kEmptyExtends = true;    // an empty segment extends too

        struct NoKeysSegmenter : public BaseRowSegmenter {
            static std::unique_ptr<RowSegmenter> create() {
                return std::make_unique<NoKeysSegmenter>();
            }

            NoKeysSegmenter() : BaseRowSegmenter({}) {}

            turbo::Status reset() override { return turbo::OkStatus(); }

            turbo::Result<Segment> GetNextSegment(const ExecSpan &batch, int64_t offset) override {
                TURBO_RETURN_NOT_OK(CheckForGetNextSegment(batch, offset, {}));
                return MakeSegment(batch.length, offset, batch.length - offset, kDefaultExtends);
            }
        };

        struct SimpleKeySegmenter : public BaseRowSegmenter {
            static turbo::Result<std::unique_ptr<RowSegmenter>> create(TypeHolder key_type) {
                return std::make_unique<SimpleKeySegmenter>(key_type);
            }

            explicit SimpleKeySegmenter(TypeHolder key_type)
                    : BaseRowSegmenter({key_type}),
                      key_type_(key_types_[0]),
                      save_key_data_(static_cast<size_t>(key_type_.type->byte_width())),
                      extend_was_called_(false) {}

            turbo::Status CheckType(const DataType &type) {
                if (!is_fixed_width(type)) {
                    return turbo::invalid_argument_error("SimpleKeySegmenter does not support type ", type);
                }
                return turbo::OkStatus();
            }

            turbo::Status reset() override {
                extend_was_called_ = false;
                return turbo::OkStatus();
            }

            // Checks whether the given grouping data extends the current segment, i.e., is equal to
            // previously seen grouping data, which is updated with each invocation.
            bool Extend(const void *data) {
                bool extends = !extend_was_called_
                               ? kDefaultExtends
                               : 0 == memcmp(save_key_data_.data(), data, save_key_data_.size());
                extend_was_called_ = true;
                memcpy(save_key_data_.data(), data, save_key_data_.size());
                return extends;
            }

            turbo::Result<Segment> GetNextSegment(const Scalar &scalar, int64_t offset, int64_t length) {
                TURBO_RETURN_NOT_OK(CheckType(*scalar.type));
                if (!scalar.is_valid) {
                    return turbo::invalid_argument_error("segmenting an invalid scalar");
                }
                auto data = turbo::checked_cast<const nebula::internal::PrimitiveScalarBase &>(scalar).data();
                bool extends = length > 0 ? Extend(data) : kEmptyExtends;
                return MakeSegment(length, offset, length, extends);
            }

            turbo::Result<Segment> GetNextSegment(const DataType &array_type, const uint8_t *array_bytes,
                                                  int64_t offset, int64_t length) {
                TURBO_RETURN_NOT_OK(CheckType(array_type));
                DKCHECK_LE(offset, length);
                int64_t byte_width = array_type.byte_width();
                int64_t match_length = GetMatchLength(array_bytes + offset * byte_width, byte_width,
                                                      array_bytes, offset, length);
                bool extends = length > 0 ? Extend(array_bytes + offset * byte_width) : kEmptyExtends;
                return MakeSegment(length, offset, match_length, extends);
            }

            turbo::Result<Segment> GetNextSegment(const ExecSpan &batch, int64_t offset) override {
                TURBO_RETURN_NOT_OK(CheckForGetNextSegment(batch, offset, {key_type_}));
                if (offset == batch.length) {
                    return MakeSegment(batch.length, offset, 0, kEmptyExtends);
                }
                const auto &value = batch.values[0];
                if (value.is_scalar()) {
                    return GetNextSegment(*value.scalar, offset, batch.length);
                }
                DKCHECK(value.is_array());
                const auto &array = value.array;
                if (array.get_null_count() > 0) {
                    return turbo::unimplemented_error("segmenting a nullable array");
                }
                return GetNextSegment(*array.type, GetValuesAsBytes(array), offset, batch.length);
            }

        private:
            TypeHolder key_type_;
            std::vector<uint8_t> save_key_data_;  // previously seen segment-key grouping data
            bool extend_was_called_;
        };

        struct AnyKeysSegmenter : public BaseRowSegmenter {
            static turbo::Result<std::unique_ptr<RowSegmenter>> create(
                    const std::vector<TypeHolder> &key_types, ExecContext *ctx) {
                TURBO_MOVE_OR_RAISE(auto grouper, Grouper::create(key_types, ctx));  // check types
                return std::make_unique<AnyKeysSegmenter>(key_types, ctx, std::move(grouper));
            }

            AnyKeysSegmenter(const std::vector<TypeHolder> &key_types, ExecContext *ctx,
                             std::unique_ptr<Grouper> grouper)
                    : BaseRowSegmenter(key_types),
                      grouper_(std::move(grouper)),
                      save_group_id_(kNoGroupId) {}

            turbo::Status reset() override {
                TURBO_RETURN_NOT_OK(grouper_->reset());
                save_group_id_ = kNoGroupId;
                return turbo::OkStatus();
            }

            bool Extend(const void *data) {
                auto group_id = *static_cast<const group_id_t *>(data);
                bool extends =
                        save_group_id_ == kNoGroupId ? kDefaultExtends : save_group_id_ == group_id;
                save_group_id_ = group_id;
                return extends;
            }

            // Runs the grouper on a single row.  This is used to determine the group id of the
            // first row of a new segment to see if it extends the previous segment.
            template<typename Batch>
            turbo::Result<group_id_t> MapGroupIdAt(const Batch &batch, int64_t offset) {
                TURBO_MOVE_OR_RAISE(auto datum, grouper_->consume(batch, offset,
                        /*length=*/1));
                if (!datum.is_array()) {
                    return turbo::invalid_argument_error("accessing unsupported datum kind ", datum.kind());
                }
                const std::shared_ptr<ArrayData> &data = datum.array();
                DKCHECK(data->get_null_count() == 0);
                DKCHECK_EQ(data->type->id(), GroupIdType::type_id);
                DKCHECK_EQ(1, data->length);
                const group_id_t *values = data->get_values<group_id_t>(1);
                return values[0];
            }

            turbo::Result<Segment> GetNextSegment(const ExecSpan &batch, int64_t offset) override {
                TURBO_RETURN_NOT_OK(CheckForGetNextSegment(batch, offset, key_types_));
                if (offset == batch.length) {
                    return MakeSegment(batch.length, offset, 0, kEmptyExtends);
                }
                // the group id must be computed prior to resetting the grouper, since it is compared
                // to save_group_id_, and after resetting the grouper produces incomparable group ids
                TURBO_MOVE_OR_RAISE(auto group_id, MapGroupIdAt(batch, offset));
                ExtendFunc bound_extend = [this, group_id](const void *data) {
                    bool extends = Extend(&group_id);
                    save_group_id_ = *static_cast<const group_id_t *>(data);
                    return extends;
                };
                // resetting drops grouper's group-ids, freeing-up memory for the next segment
                TURBO_RETURN_NOT_OK(grouper_->reset());
                // GH-34475: cache the grouper-consume result across invocations of GetNextSegment
                TURBO_MOVE_OR_RAISE(auto datum, grouper_->consume(batch, offset));
                if (datum.is_array()) {
                    // `data` is an array whose index-0 corresponds to index `offset` of `batch`
                    const std::shared_ptr<ArrayData> &data = datum.array();
                    DKCHECK_EQ(data->length, batch.length - offset);
                    DKCHECK(data->get_null_count() == 0);
                    DKCHECK_EQ(data->type->id(), GroupIdType::type_id);
                    const group_id_t *values = data->get_values<group_id_t>(1);
                    int64_t cursor;
                    for (cursor = 1; cursor < data->length; cursor++) {
                        if (values[0] != values[cursor]) break;
                    }
                    int64_t length = cursor;
                    bool extends = length > 0 ? bound_extend(values) : kEmptyExtends;
                    return MakeSegment(batch.length, offset, length, extends);
                } else {
                    return turbo::invalid_argument_error("segmenting unsupported datum kind ", datum.kind());
                }
            }

        private:
            std::unique_ptr<Grouper> grouper_;
            group_id_t save_group_id_;
        };

        turbo::Status CheckAndCapLengthForConsume(int64_t batch_length, int64_t &consume_offset,
                                                  int64_t *consume_length) {
            if (consume_offset < 0) {
                return turbo::invalid_argument_error("invalid grouper consume offset: ", consume_offset);
            }
            if (*consume_length < 0) {
                *consume_length = batch_length - consume_offset;
            }
            return turbo::OkStatus();
        }

    }  // namespace

    turbo::Result<std::unique_ptr<RowSegmenter>> make_any_keys_segmenter(
            const std::vector<TypeHolder> &key_types, ExecContext *ctx) {
        return AnyKeysSegmenter::create(key_types, ctx);
    }

    turbo::Result<std::unique_ptr<RowSegmenter>> RowSegmenter::create(
            const std::vector<TypeHolder> &key_types, bool nullable_keys, ExecContext *ctx) {
        if (key_types.size() == 0) {
            return NoKeysSegmenter::create();
        } else if (!nullable_keys && key_types.size() == 1) {
            const DataType *type = key_types[0].type;
            if (type != nullptr && is_fixed_width(*type)) {
                return SimpleKeySegmenter::create(key_types[0]);
            }
        }
        return AnyKeysSegmenter::create(key_types, ctx);
    }

    namespace {

        struct GrouperNoKeysImpl : Grouper {
            turbo::Result<std::shared_ptr<Array>> MakeConstantGroupIdArray(int64_t length,
                                                                           group_id_t value) {
                std::unique_ptr<ArrayBuilder> a_builder;
                TURBO_RETURN_NOT_OK(MakeBuilder(default_memory_pool(), g_group_id_type, &a_builder));
                using GroupIdBuilder = typename TypeTraits<GroupIdType>::BuilderType;
                auto builder = turbo::checked_cast<GroupIdBuilder *>(a_builder.get());
                if (length != 0) {
                    TURBO_RETURN_NOT_OK(builder->resize(length));
                }
                for (int64_t i = 0; i < length; i++) {
                    builder->unsafe_append(value);
                }
                std::shared_ptr<Array> array;
                TURBO_RETURN_NOT_OK(builder->finish(&array));
                return array;
            }

            turbo::Status reset() override { return turbo::OkStatus(); }

            turbo::Result<Datum> consume(const ExecSpan &batch, int64_t offset, int64_t length) override {
                TURBO_MOVE_OR_RAISE(auto array, MakeConstantGroupIdArray(length, 0));
                return Datum(array);
            }

            turbo::Result<ExecBatch> GetUniques() override {
                auto data = ArrayData::create(uint32(), 1, 0);
                auto values = data->GetMutableValues<uint32_t>(0);
                values[0] = 0;
                ExecBatch out({Datum(data)}, 1);
                return out;
            }

            uint32_t num_groups() const override { return 1; }
        };

        struct GrouperImpl : public Grouper {
            static turbo::Result<std::unique_ptr<GrouperImpl>> create(
                    const std::vector<TypeHolder> &key_types, ExecContext *ctx) {
                auto impl = std::make_unique<GrouperImpl>();

                impl->encoders_.resize(key_types.size());
                impl->ctx_ = ctx;

                for (size_t i = 0; i < key_types.size(); ++i) {
                    // TODO(wesm): eliminate this probably unneeded shared_ptr copy
                    std::shared_ptr<DataType> key = key_types[i].get_shared_ptr();

                    if (key->id() == Type::BOOL) {
                        impl->encoders_[i] = std::make_unique<internal::BooleanKeyEncoder>();
                        continue;
                    }

                    if (key->id() == Type::DICTIONARY) {
                        impl->encoders_[i] =
                                std::make_unique<internal::DictionaryKeyEncoder>(key, ctx->memory_pool());
                        continue;
                    }

                    if (is_fixed_width(key->id())) {
                        impl->encoders_[i] = std::make_unique<internal::FixedWidthKeyEncoder>(key);
                        continue;
                    }

                    if (is_binary_like(key->id())) {
                        impl->encoders_[i] =
                                std::make_unique<internal::VarLengthKeyEncoder<BinaryType>>(key);
                        continue;
                    }

                    if (is_large_binary_like(key->id())) {
                        impl->encoders_[i] =
                                std::make_unique<internal::VarLengthKeyEncoder<LargeBinaryType>>(key);
                        continue;
                    }

                    if (key->id() == Type::NA) {
                        impl->encoders_[i] = std::make_unique<internal::NullKeyEncoder>();
                        continue;
                    }

                    return turbo::unimplemented_error("Keys of type ", *key);
                }

                return impl;
            }

            turbo::Status reset() override {
                map_.clear();
                offsets_.clear();
                key_bytes_.clear();
                num_groups_ = 0;
                return turbo::OkStatus();
            }

            turbo::Result<Datum> consume(const ExecSpan &batch, int64_t offset, int64_t length) override {
                TURBO_RETURN_NOT_OK(CheckAndCapLengthForConsume(batch.length, offset, &length));
                if (offset != 0 || length != batch.length) {
                    auto batch_slice = batch.to_exec_batch().slice(offset, length);
                    return consume(ExecSpan(batch_slice), 0, -1);
                }
                std::vector<int32_t> offsets_batch(batch.length + 1);
                for (int i = 0; i < batch.num_values(); ++i) {
                    encoders_[i]->AddLength(batch[i], batch.length, offsets_batch.data());
                }

                int32_t total_length = 0;
                for (int64_t i = 0; i < batch.length; ++i) {
                    auto total_length_before = total_length;
                    total_length += offsets_batch[i];
                    offsets_batch[i] = total_length_before;
                }
                offsets_batch[batch.length] = total_length;

                std::vector<uint8_t> key_bytes_batch(total_length);
                std::vector<uint8_t *> key_buf_ptrs(batch.length);
                for (int64_t i = 0; i < batch.length; ++i) {
                    key_buf_ptrs[i] = key_bytes_batch.data() + offsets_batch[i];
                }

                for (int i = 0; i < batch.num_values(); ++i) {
                    TURBO_RETURN_NOT_OK(encoders_[i]->Encode(batch[i], batch.length, key_buf_ptrs.data()));
                }

                TypedBufferBuilder<uint32_t> group_ids_batch(ctx_->memory_pool());
                TURBO_RETURN_NOT_OK(group_ids_batch.resize(batch.length));

                for (int64_t i = 0; i < batch.length; ++i) {
                    int32_t key_length = offsets_batch[i + 1] - offsets_batch[i];
                    std::string key(
                            reinterpret_cast<const char *>(key_bytes_batch.data() + offsets_batch[i]),
                            key_length);

                    auto it_success = map_.emplace(key, num_groups_);
                    auto group_id = it_success.first->second;

                    if (it_success.second) {
                        // new key; update offsets and key_bytes
                        ++num_groups_;
                        // Skip if there are no keys
                        if (key_length > 0) {
                            auto next_key_offset = static_cast<int32_t>(key_bytes_.size());
                            key_bytes_.resize(next_key_offset + key_length);
                            offsets_.push_back(next_key_offset + key_length);
                            memcpy(key_bytes_.data() + next_key_offset, key.c_str(), key_length);
                        }
                    }

                    group_ids_batch.unsafe_append(group_id);
                }

                TURBO_MOVE_OR_RAISE(auto group_ids, group_ids_batch.finish());
                return Datum(UInt32Array(batch.length, std::move(group_ids)));
            }

            uint32_t num_groups() const override { return num_groups_; }

            turbo::Result<ExecBatch> GetUniques() override {
                ExecBatch out({}, num_groups_);

                std::vector<uint8_t *> key_buf_ptrs(num_groups_);
                for (int64_t i = 0; i < num_groups_; ++i) {
                    key_buf_ptrs[i] = key_bytes_.data() + offsets_[i];
                }

                out.values.resize(encoders_.size());
                for (size_t i = 0; i < encoders_.size(); ++i) {
                    TURBO_MOVE_OR_RAISE(
                            out.values[i],
                            encoders_[i]->Decode(key_buf_ptrs.data(), static_cast<int32_t>(num_groups_),
                                                 ctx_->memory_pool()));
                }

                return out;
            }

            ExecContext *ctx_;
            std::unordered_map<std::string, uint32_t> map_;
            std::vector<int32_t> offsets_ = {0};
            std::vector<uint8_t> key_bytes_;
            uint32_t num_groups_ = 0;
            std::vector<std::unique_ptr<internal::KeyEncoder>> encoders_;
        };

        struct GrouperFastImpl : public Grouper {
            static constexpr int kBitmapPaddingForSIMD = 64;  // bits
            static constexpr int kPaddingForSIMD = 32;        // bytes

            static bool CanUse(const std::vector<TypeHolder> &key_types) {
                if (key_types.size() == 0) {
                    return false;
                }
#if NEBULA_LITTLE_ENDIAN
                for (size_t i = 0; i < key_types.size(); ++i) {
                    if (is_large_binary_like(key_types[i].id())) {
                        return false;
                    }
                }
                return true;
#else
                return false;
#endif
            }

            static turbo::Result<std::unique_ptr<GrouperFastImpl>> create(
                    const std::vector<TypeHolder> &keys, ExecContext *ctx) {
                auto impl = std::make_unique<GrouperFastImpl>();
                impl->ctx_ = ctx;

                TURBO_RETURN_NOT_OK(impl->temp_stack_.init(ctx->memory_pool(), 64 * minibatch_size_max_));
                impl->encode_ctx_.hardware_flags =
                        nebula::internal::CpuInfo::GetInstance()->hardware_flags();
                impl->encode_ctx_.stack = &impl->temp_stack_;

                auto num_columns = keys.size();
                impl->col_metadata_.resize(num_columns);
                impl->key_types_.resize(num_columns);
                impl->dictionaries_.resize(num_columns);
                for (size_t icol = 0; icol < num_columns; ++icol) {
                    const TypeHolder &key = keys[icol];
                    if (key.id() == Type::DICTIONARY) {
                        auto bit_width = turbo::checked_cast<const FixedWidthType &>(*key).bit_width();
                        DKCHECK(bit_width % 8 == 0);
                        impl->col_metadata_[icol] = KeyColumnMetadata(true, bit_width / 8);
                    } else if (key.id() == Type::BOOL) {
                        impl->col_metadata_[icol] = KeyColumnMetadata(true, 0);
                    } else if (is_fixed_width(key.id())) {
                        impl->col_metadata_[icol] = KeyColumnMetadata(
                                true, turbo::checked_cast<const FixedWidthType &>(*key).bit_width() / 8);
                    } else if (is_binary_like(key.id())) {
                        impl->col_metadata_[icol] = KeyColumnMetadata(false, sizeof(uint32_t));
                    } else if (key.id() == Type::NA) {
                        impl->col_metadata_[icol] = KeyColumnMetadata(true, 0, /*is_null_type_in=*/true);
                    } else {
                        return turbo::unimplemented_error("Keys of type ", *key);
                    }
                    impl->key_types_[icol] = key;
                }

                impl->encoder_.init(impl->col_metadata_,
                        /* row_alignment = */ sizeof(uint64_t),
                        /* string_alignment = */ sizeof(uint64_t));
                TURBO_RETURN_NOT_OK(impl->rows_.init(ctx->memory_pool(), impl->encoder_.row_metadata()));
                TURBO_RETURN_NOT_OK(
                        impl->rows_minibatch_.init(ctx->memory_pool(), impl->encoder_.row_metadata()));
                impl->minibatch_size_ = impl->minibatch_size_min_;
                GrouperFastImpl *impl_ptr = impl.get();
                impl->map_equal_impl_ =
                        [impl_ptr](int num_keys_to_compare, const uint16_t *selection_may_be_null,
                                   const uint32_t *group_ids, uint32_t *out_num_keys_mismatch,
                                   uint16_t *out_selection_mismatch, void *) {
                            KeyCompare::CompareColumnsToRows(
                                    num_keys_to_compare, selection_may_be_null, group_ids,
                                    &impl_ptr->encode_ctx_, out_num_keys_mismatch, out_selection_mismatch,
                                    impl_ptr->encoder_.batch_all_cols(), impl_ptr->rows_,
                                    /* are_cols_in_encoding_order=*/true);
                        };
                impl->map_append_impl_ = [impl_ptr](int num_keys, const uint16_t *selection, void *) {
                    TURBO_RETURN_NOT_OK(impl_ptr->encoder_.EncodeSelected(&impl_ptr->rows_minibatch_,
                                                                          num_keys, selection));
                    return impl_ptr->rows_.AppendSelectionFrom(impl_ptr->rows_minibatch_, num_keys,
                                                               nullptr);
                };
                TURBO_RETURN_NOT_OK(impl->map_.init(impl->encode_ctx_.hardware_flags, ctx->memory_pool()));
                impl->cols_.resize(num_columns);
                impl->minibatch_hashes_.resize(impl->minibatch_size_max_ +
                                               kPaddingForSIMD / sizeof(uint32_t));

                return impl;
            }

            turbo::Status reset() override {
                DKCHECK_EQ(temp_stack_.AllocatedSize(), 0);
                rows_.Clean();
                rows_minibatch_.Clean();
                map_.cleanup();
                TURBO_RETURN_NOT_OK(map_.init(encode_ctx_.hardware_flags, ctx_->memory_pool()));
                // TODO: It is now assumed that the dictionaries_ are identical to the first batch
                // throughout the grouper's lifespan so no resetting is needed. But if we want to
                // support different dictionaries for different batches, we need to reset the
                // dictionaries_ here.
                return turbo::OkStatus();
            }

            turbo::Result<Datum> consume(const ExecSpan &batch, int64_t offset, int64_t length) override {
                TURBO_RETURN_NOT_OK(CheckAndCapLengthForConsume(batch.length, offset, &length));
                if (offset != 0 || length != batch.length) {
                    auto batch_slice = batch.to_exec_batch().slice(offset, length);
                    return consume(ExecSpan(batch_slice), 0, -1);
                }
                // ARROW-14027: broadcast scalar arguments for now
                for (int i = 0; i < batch.num_values(); i++) {
                    if (batch[i].is_scalar()) {
                        ExecBatch expanded = batch.to_exec_batch();
                        for (int j = i; j < expanded.num_values(); j++) {
                            if (expanded.values[j].is_scalar()) {
                                TURBO_MOVE_OR_RAISE(
                                        expanded.values[j],
                                        MakeArrayFromScalar(*expanded.values[j].scalar(), expanded.length,
                                                            ctx_->memory_pool()));
                            }
                        }
                        return ConsumeImpl(ExecSpan(expanded));
                    }
                }
                return ConsumeImpl(batch);
            }

            turbo::Result<Datum> ConsumeImpl(const ExecSpan &batch) {
                int64_t num_rows = batch.length;
                int num_columns = batch.num_values();
                // process dictionaries
                for (int icol = 0; icol < num_columns; ++icol) {
                    if (key_types_[icol].id() == Type::DICTIONARY) {
                        const ArraySpan &data = batch[icol].array;
                        auto dict = make_array(data.dictionary().to_array_data());
                        if (dictionaries_[icol]) {
                            if (!dictionaries_[icol]->equals(dict)) {
                                // TODO(bkietz) unify if necessary. For now, just error if any batch's
                                // dictionary differs from the first we saw for this key
                                return turbo::unimplemented_error("Unifying differing dictionaries");
                            }
                        } else {
                            dictionaries_[icol] = std::move(dict);
                        }
                    }
                }

                std::shared_ptr<nebula::Buffer> group_ids;
                TURBO_MOVE_OR_RAISE(
                        group_ids, allocate_buffer(sizeof(uint32_t) * num_rows, ctx_->memory_pool()));

                for (int icol = 0; icol < num_columns; ++icol) {
                    const uint8_t *non_nulls = nullptr;
                    const uint8_t *fixedlen = nullptr;
                    const uint8_t *varlen = nullptr;

                    // Skip if the key's type is NULL
                    if (key_types_[icol].id() != Type::NA) {
                        if (batch[icol].array.buffers[0].data != nullptr) {
                            non_nulls = batch[icol].array.buffers[0].data;
                        }
                        fixedlen = batch[icol].array.buffers[1].data;
                        if (!col_metadata_[icol].is_fixed_length) {
                            varlen = batch[icol].array.buffers[2].data;
                        }
                    }

                    int64_t offset = batch[icol].array.offset;

                    auto col_base = KeyColumnArray(col_metadata_[icol], offset + num_rows, non_nulls,
                                                   fixedlen, varlen);

                    cols_[icol] = col_base.slice(offset, num_rows);
                }

                // Split into smaller mini-batches
                //
                for (uint32_t start_row = 0; start_row < num_rows;) {
                    uint32_t batch_size_next = std::min(static_cast<uint32_t>(minibatch_size_),
                                                        static_cast<uint32_t>(num_rows) - start_row);

                    // Encode
                    rows_minibatch_.Clean();
                    encoder_.PrepareEncodeSelected(start_row, batch_size_next, cols_);

                    // Compute hash
                    Hashing32::HashMultiColumn(encoder_.batch_all_cols(), &encode_ctx_,
                                               minibatch_hashes_.data());

                    // Map
                    auto match_bitvector =
                            util::TempVectorHolder<uint8_t>(&temp_stack_, (batch_size_next + 7) / 8);
                    {
                        auto local_slots = util::TempVectorHolder<uint8_t>(&temp_stack_, batch_size_next);
                        map_.early_filter(batch_size_next, minibatch_hashes_.data(),
                                          match_bitvector.mutable_data(), local_slots.mutable_data());
                        map_.find(batch_size_next, minibatch_hashes_.data(),
                                  match_bitvector.mutable_data(), local_slots.mutable_data(),
                                  reinterpret_cast<uint32_t *>(group_ids->mutable_data()) + start_row,
                                  &temp_stack_, map_equal_impl_, nullptr);
                    }
                    auto ids = util::TempVectorHolder<uint16_t>(&temp_stack_, batch_size_next);
                    int num_ids;
                    util::bit_util::bits_to_indexes(0, encode_ctx_.hardware_flags, batch_size_next,
                                                    match_bitvector.mutable_data(), &num_ids,
                                                    ids.mutable_data());

                    TURBO_RETURN_NOT_OK(map_.map_new_keys(
                            num_ids, ids.mutable_data(), minibatch_hashes_.data(),
                            reinterpret_cast<uint32_t *>(group_ids->mutable_data()) + start_row,
                            &temp_stack_, map_equal_impl_, map_append_impl_, nullptr));

                    start_row += batch_size_next;

                    if (minibatch_size_ * 2 <= minibatch_size_max_) {
                        minibatch_size_ *= 2;
                    }
                }

                return Datum(UInt32Array(batch.length, std::move(group_ids)));
            }

            uint32_t num_groups() const override { return static_cast<uint32_t>(rows_.length()); }

            // Make sure padded buffers end up with the right logical size

            turbo::Result<std::shared_ptr<Buffer>> AllocatePaddedBitmap(int64_t length) {
                TURBO_MOVE_OR_RAISE(
                        std::shared_ptr<Buffer> buf,
                        allocate_bitmap(length + kBitmapPaddingForSIMD, ctx_->memory_pool()));
                return SliceMutableBuffer(buf, 0, bit_util::BytesForBits(length));
            }

            turbo::Result<std::shared_ptr<Buffer>> AllocatePaddedBuffer(int64_t size) {
                TURBO_MOVE_OR_RAISE(
                        std::shared_ptr<Buffer> buf,
                        allocate_buffer(size + kBitmapPaddingForSIMD, ctx_->memory_pool()));
                return SliceMutableBuffer(buf, 0, size);
            }

            turbo::Result<ExecBatch> GetUniques() override {
                auto num_columns = static_cast<uint32_t>(col_metadata_.size());
                int64_t num_groups = rows_.length();

                std::vector<std::shared_ptr<Buffer>> non_null_bufs(num_columns);
                std::vector<std::shared_ptr<Buffer>> fixedlen_bufs(num_columns);
                std::vector<std::shared_ptr<Buffer>> varlen_bufs(num_columns);

                for (size_t i = 0; i < num_columns; ++i) {
                    if (col_metadata_[i].is_null_type) {
                        uint8_t *non_nulls = nullptr;
                        uint8_t *fixedlen = nullptr;
                        cols_[i] =
                                KeyColumnArray(col_metadata_[i], num_groups, non_nulls, fixedlen, nullptr);
                        continue;
                    }
                    TURBO_MOVE_OR_RAISE(non_null_bufs[i], AllocatePaddedBitmap(num_groups));
                    if (col_metadata_[i].is_fixed_length && !col_metadata_[i].is_null_type) {
                        if (col_metadata_[i].fixed_length == 0) {
                            TURBO_MOVE_OR_RAISE(fixedlen_bufs[i], AllocatePaddedBitmap(num_groups));
                        } else {
                            TURBO_MOVE_OR_RAISE(
                                    fixedlen_bufs[i],
                                    AllocatePaddedBuffer(num_groups * col_metadata_[i].fixed_length));
                        }
                    } else {
                        TURBO_MOVE_OR_RAISE(fixedlen_bufs[i],
                                            AllocatePaddedBuffer((num_groups + 1) * sizeof(uint32_t)));
                    }
                    cols_[i] =
                            KeyColumnArray(col_metadata_[i], num_groups, non_null_bufs[i]->mutable_data(),
                                           fixedlen_bufs[i]->mutable_data(), nullptr);
                }

                for (int64_t start_row = 0; start_row < num_groups;) {
                    int64_t batch_size_next =
                            std::min(num_groups - start_row, static_cast<int64_t>(minibatch_size_max_));
                    encoder_.DecodeFixedLengthBuffers(start_row, start_row, batch_size_next, rows_,
                                                      &cols_, encode_ctx_.hardware_flags, &temp_stack_);
                    start_row += batch_size_next;
                }

                if (!rows_.metadata().is_fixed_length) {
                    for (size_t i = 0; i < num_columns; ++i) {
                        if (!col_metadata_[i].is_fixed_length) {
                            auto varlen_size =
                                    reinterpret_cast<const uint32_t *>(fixedlen_bufs[i]->data())[num_groups];
                            TURBO_MOVE_OR_RAISE(varlen_bufs[i], AllocatePaddedBuffer(varlen_size));
                            cols_[i] = KeyColumnArray(
                                    col_metadata_[i], num_groups, non_null_bufs[i]->mutable_data(),
                                    fixedlen_bufs[i]->mutable_data(), varlen_bufs[i]->mutable_data());
                        }
                    }

                    for (int64_t start_row = 0; start_row < num_groups;) {
                        int64_t batch_size_next =
                                std::min(num_groups - start_row, static_cast<int64_t>(minibatch_size_max_));
                        encoder_.DecodeVaryingLengthBuffers(start_row, start_row, batch_size_next, rows_,
                                                            &cols_, encode_ctx_.hardware_flags,
                                                            &temp_stack_);
                        start_row += batch_size_next;
                    }
                }

                ExecBatch out({}, num_groups);
                out.values.resize(num_columns);
                for (size_t i = 0; i < num_columns; ++i) {
                    if (col_metadata_[i].is_null_type) {
                        out.values[i] = ArrayData::create(null(), num_groups, {nullptr}, num_groups);
                        continue;
                    }
                    auto valid_count = nebula::internal::CountSetBits(
                            non_null_bufs[i]->data(), /*offset=*/0, static_cast<int64_t>(num_groups));
                    int null_count = static_cast<int>(num_groups) - static_cast<int>(valid_count);

                    if (col_metadata_[i].is_fixed_length) {
                        out.values[i] = ArrayData::create(
                                key_types_[i].get_shared_ptr(), num_groups,
                                {std::move(non_null_bufs[i]), std::move(fixedlen_bufs[i])}, null_count);
                    } else {
                        out.values[i] =
                                ArrayData::create(key_types_[i].get_shared_ptr(), num_groups,
                                                  {std::move(non_null_bufs[i]), std::move(fixedlen_bufs[i]),
                                                   std::move(varlen_bufs[i])},
                                                  null_count);
                    }
                }

                // process dictionaries
                for (size_t icol = 0; icol < num_columns; ++icol) {
                    if (key_types_[icol].id() == Type::DICTIONARY) {
                        if (dictionaries_[icol]) {
                            out.values[icol].array()->dictionary = dictionaries_[icol]->data();
                        } else {
                            TURBO_MOVE_OR_RAISE(auto dict,
                                                MakeArrayOfNull(key_types_[icol].get_shared_ptr(), 0));
                            out.values[icol].array()->dictionary = dict->data();
                        }
                    }
                }

                return out;
            }

            static constexpr int minibatch_size_max_ = nebula::util::MiniBatch::kMiniBatchLength;
            static constexpr int minibatch_size_min_ = 128;
            int minibatch_size_;

            ExecContext *ctx_;
            nebula::util::TempVectorStack temp_stack_;
            LightContext encode_ctx_;

            std::vector<TypeHolder> key_types_;
            std::vector<KeyColumnMetadata> col_metadata_;
            std::vector<KeyColumnArray> cols_;
            std::vector<uint32_t> minibatch_hashes_;

            std::vector<std::shared_ptr<Array>> dictionaries_;

            RowTableImpl rows_;
            RowTableImpl rows_minibatch_;
            RowTableEncoder encoder_;
            SwissTable map_;
            SwissTable::EqualImpl map_equal_impl_;
            SwissTable::AppendImpl map_append_impl_;
        };

    }  // namespace

    turbo::Result<std::unique_ptr<Grouper>> Grouper::create(const std::vector<TypeHolder> &key_types,
                                                            ExecContext *ctx) {
        if (GrouperFastImpl::CanUse(key_types)) {
            return GrouperFastImpl::create(key_types, ctx);
        }
        return GrouperImpl::create(key_types, ctx);
    }

    turbo::Result<std::shared_ptr<ListArray>> Grouper::ApplyGroupings(const ListArray &groupings,
                                                                      const Array &array,
                                                                      ExecContext *ctx) {
        TURBO_MOVE_OR_RAISE(Datum sorted,
                            compute::Take(array, groupings.data()->child_data[0],
                                          TakeOptions::NoBoundsCheck(), ctx));

        return std::make_shared<ListArray>(list(array.type()), groupings.length(),
                                           groupings.value_offsets(), sorted.make_array());
    }

    turbo::Result<std::shared_ptr<ListArray>> Grouper::MakeGroupings(const UInt32Array &ids,
                                                                     uint32_t num_groups,
                                                                     ExecContext *ctx) {
        if (ids.null_count() != 0) {
            return turbo::invalid_argument_error("MakeGroupings with null ids");
        }

        TURBO_MOVE_OR_RAISE(auto offsets, allocate_buffer(sizeof(int32_t) * (num_groups + 1),
                                                          ctx->memory_pool()));
        auto raw_offsets = reinterpret_cast<int32_t *>(offsets->mutable_data());

        std::memset(raw_offsets, 0, offsets->size());
        for (int i = 0; i < ids.length(); ++i) {
            DKCHECK_LT(ids.value(i), num_groups);
            raw_offsets[ids.value(i)] += 1;
        }
        int32_t length = 0;
        for (uint32_t id = 0; id < num_groups; ++id) {
            auto offset = raw_offsets[id];
            raw_offsets[id] = length;
            length += offset;
        }
        raw_offsets[num_groups] = length;
        DKCHECK_EQ(ids.length(), length);

        TURBO_MOVE_OR_RAISE(auto offsets_copy,
                            offsets->copy_slice(0, offsets->size(), ctx->memory_pool()));
        raw_offsets = reinterpret_cast<int32_t *>(offsets_copy->mutable_data());

        TURBO_MOVE_OR_RAISE(auto sort_indices, allocate_buffer(sizeof(int32_t) * ids.length(),
                                                               ctx->memory_pool()));
        auto raw_sort_indices = reinterpret_cast<int32_t *>(sort_indices->mutable_data());
        for (int i = 0; i < ids.length(); ++i) {
            raw_sort_indices[raw_offsets[ids.value(i)]++] = i;
        }

        return std::make_shared<ListArray>(
                list(int32()), num_groups, std::move(offsets),
                std::make_shared<Int32Array>(ids.length(), std::move(sort_indices)));
    }

}  // namespace nebula::compute
