// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <cmath>
#include <functional>
#include <memory>
#include <mutex>
#include <string>
#include <thread>
#include <unordered_map>
#include <vector>

#include <nebula/array/builder_nested.h>
#include <nebula/array/builder_primitive.h>
#include <nebula/core/buffer_builder.h>
#include <nebula/compute/api_aggregate.h>
#include <nebula/compute/api_vector.h>
#include <nebula/compute/kernel.h>
#include <nebula/compute/kernels/aggregate_internal.h>
#include <nebula/compute/kernels/aggregate_var_std_internal.h>
#include <nebula/compute/kernels/common_internal.h>
#include <nebula/compute/kernels/row_encoder_internal.h>
#include <nebula/compute/kernels/util_internal.h>
#include <nebula/compute/row/grouper.h>
#include <nebula/core/record_batch.h>
#include <nebula/core/stl_allocator.h>
#include <nebula/types/type_traits.h>
#include <nebula/bits/bit_run_reader.h>
#include <nebula/bits/bitmap_ops.h>
#include <nebula/bits/bitmap_writer.h>
#include <turbo/base/checked_cast.h>
#include <nebula/util/cpu_info.h>
#include <turbo/numeric/int128.h>
#include <nebula/numeric/int_util_overflow.h>
#include <nebula/util/ree_util.h>
#include <turbo/utility/tdigest.h>
#include <nebula/core/visit_type_inline.h>

namespace nebula {

    using internal::FirstTimeBitmapWriter;
}  // namespace nebula

namespace nebula::compute::internal {

    namespace {

        /// C++ abstract base class for the HashAggregateKernel interface.
        /// Implementations should be default constructible and perform initialization in
        /// init().
        struct GroupedAggregator : KernelState {
            virtual turbo::Status init(ExecContext *, const KernelInitArgs &args) = 0;

            virtual turbo::Status resize(int64_t new_num_groups) = 0;

            virtual turbo::Status consume(const ExecSpan &batch) = 0;

            virtual turbo::Status Merge(GroupedAggregator &&other, const ArrayData &group_id_mapping) = 0;

            virtual turbo::Result<Datum> Finalize() = 0;

            virtual std::shared_ptr<DataType> out_type() const = 0;
        };

        template<typename Impl>
        turbo::Result<std::unique_ptr<KernelState>> HashAggregateInit(KernelContext *ctx,
                                                                      const KernelInitArgs &args) {
            auto impl = std::make_unique<Impl>();
            TURBO_RETURN_NOT_OK(impl->init(ctx->exec_context(), args));
            // R build with openSUSE155 requires an explicit unique_ptr construction
            return std::unique_ptr<KernelState>(std::move(impl));
        }

        turbo::Status HashAggregateResize(KernelContext *ctx, int64_t num_groups) {
            return turbo::checked_cast<GroupedAggregator *>(ctx->state())->resize(num_groups);
        }

        turbo::Status HashAggregateConsume(KernelContext *ctx, const ExecSpan &batch) {
            return turbo::checked_cast<GroupedAggregator *>(ctx->state())->consume(batch);
        }

        turbo::Status HashAggregateMerge(KernelContext *ctx, KernelState &&other,
                                         const ArrayData &group_id_mapping) {
            return turbo::checked_cast<GroupedAggregator *>(ctx->state())
                    ->Merge(turbo::checked_cast<GroupedAggregator &&>(other), group_id_mapping);
        }

        turbo::Status HashAggregateFinalize(KernelContext *ctx, Datum *out) {
            return turbo::checked_cast<GroupedAggregator *>(ctx->state())->Finalize().try_value(out);
        }

        turbo::Result<TypeHolder> ResolveGroupOutputType(KernelContext *ctx,
                                                         const std::vector<TypeHolder> &) {
            return turbo::checked_cast<GroupedAggregator *>(ctx->state())->out_type();
        }

        HashAggregateKernel MakeKernel(std::shared_ptr<KernelSignature> signature,
                                       KernelInit init, const bool ordered = false) {
            HashAggregateKernel kernel(std::move(signature), std::move(init), HashAggregateResize,
                                       HashAggregateConsume, HashAggregateMerge,
                                       HashAggregateFinalize, ordered);
            return kernel;
        }

        HashAggregateKernel MakeKernel(InputType argument_type, KernelInit init,
                                       const bool ordered = false) {
            return MakeKernel(
                    KernelSignature::create({std::move(argument_type), InputType(Type::UINT32)},
                                          OutputType(ResolveGroupOutputType)),
                    std::move(init), ordered);
        }

        HashAggregateKernel MakeUnaryKernel(KernelInit init) {
            return MakeKernel(KernelSignature::create({InputType(Type::UINT32)},
                                                    OutputType(ResolveGroupOutputType)),
                              std::move(init));
        }

        turbo::Status AddHashAggKernels(
                const std::vector<std::shared_ptr<DataType>> &types,
                turbo::Result<HashAggregateKernel> make_kernel(const std::shared_ptr<DataType> &),
                HashAggregateFunction *function) {
            for (const auto &ty: types) {
                TURBO_MOVE_OR_RAISE(auto kernel, make_kernel(ty));
                TURBO_RETURN_NOT_OK(function->add_kernel(std::move(kernel)));
            }
            return turbo::OkStatus();
        }

        // ----------------------------------------------------------------------
        // Helpers for more easily implementing hash aggregates

        template<typename T>
        struct GroupedValueTraits {
            using CType = typename TypeTraits<T>::CType;

            static CType Get(const CType *values, uint32_t g) { return values[g]; }

            static void Set(CType *values, uint32_t g, CType v) { values[g] = v; }

            static turbo::Status AppendBuffers(TypedBufferBuilder<CType> *destination,
                                               const uint8_t *values, int64_t offset, int64_t num_values) {
                TURBO_RETURN_NOT_OK(
                        destination->append(reinterpret_cast<const CType *>(values) + offset, num_values));
                return turbo::OkStatus();
            }
        };

        template<>
        struct GroupedValueTraits<BooleanType> {
            static bool Get(const uint8_t *values, uint32_t g) {
                return bit_util::get_bit(values, g);
            }

            static void Set(uint8_t *values, uint32_t g, bool v) {
                bit_util::SetBitTo(values, g, v);
            }

            static turbo::Status AppendBuffers(TypedBufferBuilder<bool> *destination,
                                               const uint8_t *values, int64_t offset, int64_t num_values) {
                TURBO_RETURN_NOT_OK(destination->Reserve(num_values));
                destination->unsafe_append(values, offset, num_values);
                return turbo::OkStatus();
            }
        };

        template<typename Type, typename ConsumeValue, typename ConsumeNull>
        typename turbo::call_traits::enable_if_return<ConsumeValue, void>::type
        VisitGroupedValues(const ExecSpan &batch, ConsumeValue &&valid_func,
                           ConsumeNull &&null_func) {
            auto g = batch[1].array.get_values<uint32_t>(1);
            if (batch[0].is_array()) {
                VisitArrayValuesInline<Type>(
                        batch[0].array,
                        [&](typename TypeTraits<Type>::CType val) { valid_func(*g++, val); },
                        [&]() { null_func(*g++); });
                return;
            }
            const Scalar &input = *batch[0].scalar;
            if (input.is_valid) {
                const auto val = UnboxScalar<Type>::Unbox(input);
                for (int64_t i = 0; i < batch.length; i++) {
                    valid_func(*g++, val);
                }
            } else {
                for (int64_t i = 0; i < batch.length; i++) {
                    null_func(*g++);
                }
            }
        }

        template<typename Type, typename ConsumeValue, typename ConsumeNull>
        typename turbo::call_traits::enable_if_return<ConsumeValue, turbo::Status>::type
        VisitGroupedValues(const ExecSpan &batch, ConsumeValue &&valid_func,
                           ConsumeNull &&null_func) {
            auto g = batch[1].array.get_values<uint32_t>(1);
            if (batch[0].is_array()) {
                return VisitArrayValuesInline<Type>(
                        batch[0].array,
                        [&](typename GetViewType<Type>::T val) { return valid_func(*g++, val); },
                        [&]() { return null_func(*g++); });
            }
            const Scalar &input = *batch[0].scalar;
            if (input.is_valid) {
                const auto val = UnboxScalar<Type>::Unbox(input);
                for (int64_t i = 0; i < batch.length; i++) {
                    TURBO_RETURN_NOT_OK(valid_func(*g++, val));
                }
            } else {
                for (int64_t i = 0; i < batch.length; i++) {
                    TURBO_RETURN_NOT_OK(null_func(*g++));
                }
            }
            return turbo::OkStatus();
        }

        template<typename Type, typename ConsumeValue>
        void VisitGroupedValuesNonNull(const ExecSpan &batch, ConsumeValue &&valid_func) {
            VisitGroupedValues<Type>(batch, std::forward<ConsumeValue>(valid_func),
                                     [](uint32_t) {});
        }

        // ----------------------------------------------------------------------
        // Count implementation

        // Nullary-count implementation -- COUNT(*).
        struct GroupedCountAllImpl : public GroupedAggregator {
            turbo::Status init(ExecContext *ctx, const KernelInitArgs &args) override {
                counts_ = BufferBuilder(ctx->memory_pool());
                return turbo::OkStatus();
            }

            turbo::Status resize(int64_t new_num_groups) override {
                auto added_groups = new_num_groups - num_groups_;
                num_groups_ = new_num_groups;
                return counts_.append(added_groups * sizeof(int64_t), 0);
            }

            turbo::Status Merge(GroupedAggregator &&raw_other,
                                const ArrayData &group_id_mapping) override {
                auto other = turbo::checked_cast<GroupedCountAllImpl *>(&raw_other);

                auto *counts = counts_.mutable_data_as<int64_t>();
                const auto *other_counts = other->counts_.data_as<int64_t>();

                auto *g = group_id_mapping.get_values<uint32_t>(1);
                for (int64_t other_g = 0; other_g < group_id_mapping.length; ++other_g, ++g) {
                    counts[*g] += other_counts[other_g];
                }
                return turbo::OkStatus();
            }

            turbo::Status consume(const ExecSpan &batch) override {
                auto *counts = counts_.mutable_data_as<int64_t>();
                auto *g_begin = batch[0].array.get_values<uint32_t>(1);
                for (auto g_itr = g_begin, end = g_itr + batch.length; g_itr != end; g_itr++) {
                    counts[*g_itr] += 1;
                }
                return turbo::OkStatus();
            }

            turbo::Result<Datum> Finalize() override {
                TURBO_MOVE_OR_RAISE(auto counts, counts_.finish());
                return std::make_shared<Int64Array>(num_groups_, std::move(counts));
            }

            std::shared_ptr<DataType> out_type() const override { return int64(); }

            int64_t num_groups_ = 0;
            BufferBuilder counts_;
        };

        struct GroupedCountImpl : public GroupedAggregator {
            turbo::Status init(ExecContext *ctx, const KernelInitArgs &args) override {
                options_ = turbo::checked_cast<const CountOptions &>(*args.options);
                counts_ = BufferBuilder(ctx->memory_pool());
                return turbo::OkStatus();
            }

            turbo::Status resize(int64_t new_num_groups) override {
                auto added_groups = new_num_groups - num_groups_;
                num_groups_ = new_num_groups;
                return counts_.append(added_groups * sizeof(int64_t), 0);
            }

            turbo::Status Merge(GroupedAggregator &&raw_other,
                                const ArrayData &group_id_mapping) override {
                auto other = turbo::checked_cast<GroupedCountImpl *>(&raw_other);

                auto *counts = counts_.mutable_data_as<int64_t>();
                const auto *other_counts = other->counts_.data_as<int64_t>();

                auto *g = group_id_mapping.get_values<uint32_t>(1);
                for (int64_t other_g = 0; other_g < group_id_mapping.length; ++other_g, ++g) {
                    counts[*g] += other_counts[other_g];
                }
                return turbo::OkStatus();
            }

            template<bool count_valid>
            struct RunEndEncodedCountImpl {
                /// Count the number of valid or invalid values in a run-end-encoded array.
                ///
                /// \param[in] input the run-end-encoded array
                /// \param[out] counts the counts being accumulated
                /// \param[in] g the group ids of the values in the array
                template<typename RunEndCType>
                void DoCount(const ArraySpan &input, int64_t *counts, const uint32_t *g) {
                    ree_util::RunEndEncodedArraySpan<RunEndCType> ree_span(input);
                    const auto *physical_validity = ree_util::ValuesArray(input).get_values<uint8_t>(0);
                    auto end = ree_span.end();
                    for (auto it = ree_span.begin(); it != end; ++it) {
                        const bool is_valid = bit_util::get_bit(physical_validity, it.index_into_array());
                        if (is_valid == count_valid) {
                            for (int64_t i = 0; i < it.run_length(); ++i, ++g) {
                                counts[*g] += 1;
                            }
                        } else {
                            g += it.run_length();
                        }
                    }
                }

                void operator()(const ArraySpan &input, int64_t *counts, const uint32_t *g) {
                    auto ree_type = turbo::checked_cast<const RunEndEncodedType *>(input.type);
                    switch (ree_type->run_end_type()->id()) {
                        case Type::INT16:
                            DoCount<int16_t>(input, counts, g);
                            break;
                        case Type::INT32:
                            DoCount<int32_t>(input, counts, g);
                            break;
                        default:
                            DoCount<int64_t>(input, counts, g);
                            break;
                    }
                }
            };

            turbo::Status consume(const ExecSpan &batch) override {
                auto *counts = counts_.mutable_data_as<int64_t>();
                auto *g_begin = batch[1].array.get_values<uint32_t>(1);

                if (options_.mode == CountOptions::ALL) {
                    for (int64_t i = 0; i < batch.length; ++i, ++g_begin) {
                        counts[*g_begin] += 1;
                    }
                } else if (batch[0].is_array()) {
                    const ArraySpan &input = batch[0].array;
                    if (options_.mode == CountOptions::ONLY_VALID) {  // ONLY_VALID
                        if (input.type->id() != nebula::Type::NA) {
                            const uint8_t *bitmap = input.buffers[0].data;
                            if (bitmap) {
                                nebula::internal::VisitSetBitRunsVoid(
                                        bitmap, input.offset, input.length, [&](int64_t offset, int64_t length) {
                                            auto g = g_begin + offset;
                                            for (int64_t i = 0; i < length; ++i, ++g) {
                                                counts[*g] += 1;
                                            }
                                        });
                            } else {
                                // Array without validity bitmaps require special handling of nulls.
                                const bool all_valid = !input.may_have_logical_nulls();
                                if (all_valid) {
                                    for (int64_t i = 0; i < input.length; ++i, ++g_begin) {
                                        counts[*g_begin] += 1;
                                    }
                                } else {
                                    switch (input.type->id()) {
                                        case Type::RUN_END_ENCODED:
                                            RunEndEncodedCountImpl<true>{}(input, counts, g_begin);
                                            break;
                                        default:  // Generic and forward-compatible version.
                                            for (int64_t i = 0; i < input.length; ++i, ++g_begin) {
                                                counts[*g_begin] += input.is_valid(i);
                                            }
                                            break;
                                    }
                                }
                            }
                        }
                    } else {  // ONLY_NULL
                        if (input.type->id() == nebula::Type::NA) {
                            for (int64_t i = 0; i < batch.length; ++i, ++g_begin) {
                                counts[*g_begin] += 1;
                            }
                        } else if (input.may_have_logical_nulls()) {
                            if (input.has_validity_bitmap()) {
                                auto end = input.offset + input.length;
                                for (int64_t i = input.offset; i < end; ++i, ++g_begin) {
                                    counts[*g_begin] += !bit_util::get_bit(input.buffers[0].data, i);
                                }
                            } else {
                                // Arrays without validity bitmaps require special handling of nulls.
                                switch (input.type->id()) {
                                    case Type::RUN_END_ENCODED:
                                        RunEndEncodedCountImpl<false>{}(input, counts, g_begin);
                                        break;
                                    default:  // Generic and forward-compatible version.
                                        for (int64_t i = 0; i < input.length; ++i, ++g_begin) {
                                            counts[*g_begin] += input.is_null(i);
                                        }
                                        break;
                                }
                            }
                        }
                    }
                } else {
                    const Scalar &input = *batch[0].scalar;
                    if (options_.mode == CountOptions::ONLY_VALID) {
                        for (int64_t i = 0; i < batch.length; ++i, ++g_begin) {
                            counts[*g_begin] += input.is_valid;
                        }
                    } else {  // ONLY_NULL
                        for (int64_t i = 0; i < batch.length; ++i, ++g_begin) {
                            counts[*g_begin] += !input.is_valid;
                        }
                    }
                }
                return turbo::OkStatus();
            }

            turbo::Result<Datum> Finalize() override {
                TURBO_MOVE_OR_RAISE(auto counts, counts_.finish());
                return std::make_shared<Int64Array>(num_groups_, std::move(counts));
            }

            std::shared_ptr<DataType> out_type() const override { return int64(); }

            int64_t num_groups_ = 0;
            CountOptions options_;
            BufferBuilder counts_;
        };

        // ----------------------------------------------------------------------
        // Sum/Mean/Product implementation

        template<typename Type, typename Impl,
                typename AccumulateType = typename FindAccumulatorType<Type>::Type>
        struct GroupedReducingAggregator : public GroupedAggregator {
            using AccType = AccumulateType;
            using CType = typename TypeTraits<AccType>::CType;
            using InputCType = typename TypeTraits<Type>::CType;

            turbo::Status init(ExecContext *ctx, const KernelInitArgs &args) override {
                pool_ = ctx->memory_pool();
                options_ = turbo::checked_cast<const ScalarAggregateOptions &>(*args.options);
                reduced_ = TypedBufferBuilder<CType>(pool_);
                counts_ = TypedBufferBuilder<int64_t>(pool_);
                no_nulls_ = TypedBufferBuilder<bool>(pool_);
                out_type_ = GetOutType(args.inputs[0].get_shared_ptr());
                return turbo::OkStatus();
            }

            turbo::Status resize(int64_t new_num_groups) override {
                auto added_groups = new_num_groups - num_groups_;
                num_groups_ = new_num_groups;
                TURBO_RETURN_NOT_OK(reduced_.append(added_groups, Impl::NullValue(*out_type_)));
                TURBO_RETURN_NOT_OK(counts_.append(added_groups, 0));
                TURBO_RETURN_NOT_OK(no_nulls_.append(added_groups, true));
                return turbo::OkStatus();
            }

            turbo::Status consume(const ExecSpan &batch) override {
                CType *reduced = reduced_.mutable_data();
                int64_t *counts = counts_.mutable_data();
                uint8_t *no_nulls = no_nulls_.mutable_data();

                VisitGroupedValues<Type>(
                        batch,
                        [&](uint32_t g, InputCType value) {
                            reduced[g] = Impl::Reduce(*out_type_, reduced[g], value);
                            counts[g]++;
                        },
                        [&](uint32_t g) { bit_util::SetBitTo(no_nulls, g, false); });
                return turbo::OkStatus();
            }

            turbo::Status Merge(GroupedAggregator &&raw_other,
                                const ArrayData &group_id_mapping) override {
                auto other =
                        turbo::checked_cast<GroupedReducingAggregator<Type, Impl, AccType> *>(&raw_other);

                CType *reduced = reduced_.mutable_data();
                int64_t *counts = counts_.mutable_data();
                uint8_t *no_nulls = no_nulls_.mutable_data();

                const CType *other_reduced = other->reduced_.data();
                const int64_t *other_counts = other->counts_.data();
                const uint8_t *other_no_nulls = other->no_nulls_.data();

                auto g = group_id_mapping.get_values<uint32_t>(1);
                for (int64_t other_g = 0; other_g < group_id_mapping.length; ++other_g, ++g) {
                    counts[*g] += other_counts[other_g];
                    reduced[*g] = Impl::Reduce(*out_type_, reduced[*g], other_reduced[other_g]);
                    bit_util::SetBitTo(
                            no_nulls, *g,
                            bit_util::get_bit(no_nulls, *g) && bit_util::get_bit(other_no_nulls, other_g));
                }
                return turbo::OkStatus();
            }

            // Generate the values/nulls buffers
            static turbo::Result<std::shared_ptr<Buffer>> finish(MemoryPool *pool,
                                                                 const ScalarAggregateOptions &options,
                                                                 const int64_t *counts,
                                                                 TypedBufferBuilder<CType> *reduced,
                                                                 int64_t num_groups, int64_t *null_count,
                                                                 std::shared_ptr<Buffer> *null_bitmap) {
                for (int64_t i = 0; i < num_groups; ++i) {
                    if (counts[i] >= options.min_count) continue;

                    if ((*null_bitmap) == nullptr) {
                        TURBO_MOVE_OR_RAISE(*null_bitmap, allocate_bitmap(num_groups, pool));
                        bit_util::SetBitsTo((*null_bitmap)->mutable_data(), 0, num_groups, true);
                    }

                    (*null_count)++;
                    bit_util::SetBitTo((*null_bitmap)->mutable_data(), i, false);
                }
                return reduced->finish();
            }

            turbo::Result<Datum> Finalize() override {
                std::shared_ptr<Buffer> null_bitmap = nullptr;
                const int64_t *counts = counts_.data();
                int64_t null_count = 0;

                TURBO_MOVE_OR_RAISE(auto values,
                                    Impl::finish(pool_, options_, counts, &reduced_, num_groups_,
                                                 &null_count, &null_bitmap));

                if (!options_.skip_nulls) {
                    null_count = kUnknownNullCount;
                    if (null_bitmap) {
                        nebula::internal::BitmapAnd(null_bitmap->data(), /*left_offset=*/0,
                                                    no_nulls_.data(), /*right_offset=*/0, num_groups_,
                                /*out_offset=*/0, null_bitmap->mutable_data());
                    } else {
                        TURBO_MOVE_OR_RAISE(null_bitmap, no_nulls_.finish());
                    }
                }

                return ArrayData::create(out_type(), num_groups_,
                                       {std::move(null_bitmap), std::move(values)}, null_count);
            }

            std::shared_ptr<DataType> out_type() const override { return out_type_; }

            template<typename T = Type>
            static enable_if_t<!is_decimal_type<T>::value, std::shared_ptr<DataType>> GetOutType(
                    const std::shared_ptr<DataType> &in_type) {
                return TypeTraits<AccType>::type_singleton();
            }

            template<typename T = Type>
            static enable_if_decimal<T, std::shared_ptr<DataType>> GetOutType(
                    const std::shared_ptr<DataType> &in_type) {
                return in_type;
            }

            int64_t num_groups_ = 0;
            ScalarAggregateOptions options_;
            TypedBufferBuilder<CType> reduced_;
            TypedBufferBuilder<int64_t> counts_;
            TypedBufferBuilder<bool> no_nulls_;
            std::shared_ptr<DataType> out_type_;
            MemoryPool *pool_;
        };

        struct GroupedNullImpl : public GroupedAggregator {
            turbo::Status init(ExecContext *ctx, const KernelInitArgs &args) override {
                pool_ = ctx->memory_pool();
                options_ = turbo::checked_cast<const ScalarAggregateOptions &>(*args.options);
                return turbo::OkStatus();
            }

            turbo::Status resize(int64_t new_num_groups) override {
                num_groups_ = new_num_groups;
                return turbo::OkStatus();
            }

            turbo::Status consume(const ExecSpan &batch) override { return turbo::OkStatus(); }

            turbo::Status Merge(GroupedAggregator &&raw_other,
                                const ArrayData &group_id_mapping) override {
                return turbo::OkStatus();
            }

            turbo::Result<Datum> Finalize() override {
                if (options_.skip_nulls && options_.min_count == 0) {
                    TURBO_MOVE_OR_RAISE(std::shared_ptr<Buffer> data,
                                        allocate_buffer(num_groups_ * sizeof(int64_t), pool_));
                    output_empty(data);
                    return ArrayData::create(out_type(), num_groups_, {nullptr, std::move(data)});
                } else {
                    return MakeArrayOfNull(out_type(), num_groups_, pool_);
                }
            }

            virtual void output_empty(const std::shared_ptr<Buffer> &data) = 0;

            int64_t num_groups_;
            ScalarAggregateOptions options_;
            MemoryPool *pool_;
        };

        template<template<typename> class Impl, const char *kFriendlyName, class NullImpl>
        struct GroupedReducingFactory {
            template<typename T, typename AccType = typename FindAccumulatorType<T>::Type>
            turbo::Status Visit(const T &) {
                kernel = MakeKernel(std::move(argument_type), HashAggregateInit<Impl<T>>);
                return turbo::OkStatus();
            }

            turbo::Status Visit(const Decimal128Type &) {
                kernel =
                        MakeKernel(std::move(argument_type), HashAggregateInit<Impl<Decimal128Type>>);
                return turbo::OkStatus();
            }

            turbo::Status Visit(const Decimal256Type &) {
                kernel =
                        MakeKernel(std::move(argument_type), HashAggregateInit<Impl<Decimal256Type>>);
                return turbo::OkStatus();
            }

            turbo::Status Visit(const NullType &) {
                kernel = MakeKernel(std::move(argument_type), HashAggregateInit<NullImpl>);
                return turbo::OkStatus();
            }

            turbo::Status Visit(const Fp16Type &type) {
                return turbo::unimplemented_error("Computing ", kFriendlyName, " of type ", type);
            }

            turbo::Status Visit(const DataType &type) {
                return turbo::unimplemented_error("Computing ", kFriendlyName, " of type ", type);
            }

            static turbo::Result<HashAggregateKernel> create(const std::shared_ptr<DataType> &type) {
                GroupedReducingFactory<Impl, kFriendlyName, NullImpl> factory;
                factory.argument_type = type->id();
                TURBO_RETURN_NOT_OK(visit_type_inline(*type, &factory));
                return std::move(factory.kernel);
            }

            HashAggregateKernel kernel;
            InputType argument_type;
        };

        // ----------------------------------------------------------------------
        // Sum implementation

        template<typename Type>
        struct GroupedSumImpl : public GroupedReducingAggregator<Type, GroupedSumImpl<Type>> {
            using Base = GroupedReducingAggregator<Type, GroupedSumImpl<Type>>;
            using CType = typename Base::CType;
            using InputCType = typename Base::InputCType;

            // Default value for a group
            static CType NullValue(const DataType &) { return CType(0); }

            template<typename T = Type>
            static enable_if_number<T, CType> Reduce(const DataType &, const CType u,
                                                     const InputCType v) {
                return static_cast<CType>(to_unsigned(u) + to_unsigned(static_cast<CType>(v)));
            }

            static CType Reduce(const DataType &, const CType u, const CType v) {
                return static_cast<CType>(to_unsigned(u) + to_unsigned(v));
            }

            using Base::finish;
        };

        struct GroupedSumNullImpl final : public GroupedNullImpl {
            std::shared_ptr<DataType> out_type() const override { return int64(); }

            void output_empty(const std::shared_ptr<Buffer> &data) override {
                std::fill_n(data->mutable_data_as<int64_t>(), num_groups_, 0);
            }
        };

        static constexpr const char kSumName[] = "sum";
        using GroupedSumFactory =
                GroupedReducingFactory<GroupedSumImpl, kSumName, GroupedSumNullImpl>;

        // ----------------------------------------------------------------------
        // Product implementation

        template<typename Type>
        struct GroupedProductImpl final
                : public GroupedReducingAggregator<Type, GroupedProductImpl<Type>> {
            using Base = GroupedReducingAggregator<Type, GroupedProductImpl<Type>>;
            using AccType = typename Base::AccType;
            using CType = typename Base::CType;
            using InputCType = typename Base::InputCType;

            static CType NullValue(const DataType &out_type) {
                return MultiplyTraits<AccType>::one(out_type);
            }

            template<typename T = Type>
            static enable_if_number<T, CType> Reduce(const DataType &out_type, const CType u,
                                                     const InputCType v) {
                return MultiplyTraits<AccType>::Multiply(out_type, u, static_cast<CType>(v));
            }

            static CType Reduce(const DataType &out_type, const CType u, const CType v) {
                return MultiplyTraits<AccType>::Multiply(out_type, u, v);
            }

            using Base::finish;
        };

        struct GroupedProductNullImpl final : public GroupedNullImpl {
            std::shared_ptr<DataType> out_type() const override { return int64(); }

            void output_empty(const std::shared_ptr<Buffer> &data) override {
                std::fill_n(data->mutable_data_as<int64_t>(), num_groups_, 1);
            }
        };

        static constexpr const char kProductName[] = "product";
        using GroupedProductFactory =
                GroupedReducingFactory<GroupedProductImpl, kProductName, GroupedProductNullImpl>;

        // ----------------------------------------------------------------------
        // Mean implementation

        template<typename T>
        struct GroupedMeanAccType {
            using Type = typename std::conditional<is_number_type<T>::value, Fp64Type,
                    typename FindAccumulatorType<T>::Type>::type;
        };

        template<typename Type>
        struct GroupedMeanImpl
                : public GroupedReducingAggregator<Type, GroupedMeanImpl<Type>,
                        typename GroupedMeanAccType<Type>::Type> {
            using Base = GroupedReducingAggregator<Type, GroupedMeanImpl<Type>,
                    typename GroupedMeanAccType<Type>::Type>;
            using CType = typename Base::CType;
            using InputCType = typename Base::InputCType;
            using MeanType =
                    typename std::conditional<is_decimal_type<Type>::value, CType, double>::type;

            static CType NullValue(const DataType &) { return CType(0); }

            template<typename T = Type>
            static enable_if_number<T, CType> Reduce(const DataType &, const CType u,
                                                     const InputCType v) {
                return static_cast<CType>(u) + static_cast<CType>(v);
            }

            static CType Reduce(const DataType &, const CType u, const CType v) {
                return static_cast<CType>(to_unsigned(u) + to_unsigned(v));
            }

            template<typename T = Type>
            static enable_if_decimal<T, turbo::Result<MeanType>> DoMean(CType reduced, int64_t count) {
                static_assert(std::is_same<MeanType, CType>::value, "");
                CType quotient, remainder;
                TURBO_MOVE_OR_RAISE(std::tie(quotient, remainder), reduced.Divide(count));
                // Round the decimal result based on the remainder
                remainder.Abs();
                if (remainder * 2 >= count) {
                    if (reduced >= 0) {
                        quotient += 1;
                    } else {
                        quotient -= 1;
                    }
                }
                return quotient;
            }

            template<typename T = Type>
            static enable_if_t<!is_decimal_type<T>::value, turbo::Result<MeanType>> DoMean(CType reduced,
                                                                                           int64_t count) {
                return static_cast<MeanType>(reduced) / count;
            }

            static turbo::Result<std::shared_ptr<Buffer>> finish(MemoryPool *pool,
                                                                 const ScalarAggregateOptions &options,
                                                                 const int64_t *counts,
                                                                 TypedBufferBuilder<CType> *reduced_,
                                                                 int64_t num_groups, int64_t *null_count,
                                                                 std::shared_ptr<Buffer> *null_bitmap) {
                const CType *reduced = reduced_->data();
                TURBO_MOVE_OR_RAISE(std::shared_ptr<Buffer> values,
                                    allocate_buffer(num_groups * sizeof(MeanType), pool));
                auto *means = values->mutable_data_as<MeanType>();
                for (int64_t i = 0; i < num_groups; ++i) {
                    if (counts[i] >= options.min_count) {
                        TURBO_MOVE_OR_RAISE(means[i], DoMean(reduced[i], counts[i]));
                        continue;
                    }
                    means[i] = MeanType(0);

                    if ((*null_bitmap) == nullptr) {
                        TURBO_MOVE_OR_RAISE(*null_bitmap, allocate_bitmap(num_groups, pool));
                        bit_util::SetBitsTo((*null_bitmap)->mutable_data(), 0, num_groups, true);
                    }

                    (*null_count)++;
                    bit_util::SetBitTo((*null_bitmap)->mutable_data(), i, false);
                }
                return values;
            }

            std::shared_ptr<DataType> out_type() const override {
                if (is_decimal_type<Type>::value) return this->out_type_;
                return float64();
            }
        };

        struct GroupedMeanNullImpl final : public GroupedNullImpl {
            std::shared_ptr<DataType> out_type() const override { return float64(); }

            void output_empty(const std::shared_ptr<Buffer> &data) override {
                std::fill_n(data->mutable_data_as<double>(), num_groups_, 0);
            }
        };

        static constexpr const char kMeanName[] = "mean";
        using GroupedMeanFactory =
                GroupedReducingFactory<GroupedMeanImpl, kMeanName, GroupedMeanNullImpl>;

        // Variance/Stdev implementation

        template<typename Type>
        struct GroupedVarStdImpl : public GroupedAggregator {
            using CType = typename TypeTraits<Type>::CType;

            turbo::Status init(ExecContext *ctx, const KernelInitArgs &args) override {
                options_ = *turbo::checked_cast<const VarianceOptions *>(args.options);
                if (is_decimal_type<Type>::value) {
                    const int32_t scale =
                            turbo::checked_cast<const DecimalType &>(*args.inputs[0].type).scale();
                    return InitInternal(ctx, scale, args.options);
                }
                return InitInternal(ctx, 0, args.options);
            }

            turbo::Status InitInternal(ExecContext *ctx, int32_t decimal_scale,
                                       const FunctionOptions *options) {
                options_ = *turbo::checked_cast<const VarianceOptions *>(options);
                decimal_scale_ = decimal_scale;
                ctx_ = ctx;
                pool_ = ctx->memory_pool();
                counts_ = TypedBufferBuilder<int64_t>(pool_);
                means_ = TypedBufferBuilder<double>(pool_);
                m2s_ = TypedBufferBuilder<double>(pool_);
                no_nulls_ = TypedBufferBuilder<bool>(pool_);
                return turbo::OkStatus();
            }

            turbo::Status resize(int64_t new_num_groups) override {
                auto added_groups = new_num_groups - num_groups_;
                num_groups_ = new_num_groups;
                TURBO_RETURN_NOT_OK(counts_.append(added_groups, 0));
                TURBO_RETURN_NOT_OK(means_.append(added_groups, 0));
                TURBO_RETURN_NOT_OK(m2s_.append(added_groups, 0));
                TURBO_RETURN_NOT_OK(no_nulls_.append(added_groups, true));
                return turbo::OkStatus();
            }

            template<typename T>
            double ToDouble(T value) const {
                return static_cast<double>(value);
            }

            double ToDouble(const Decimal128 &value) const {
                return value.ToDouble(decimal_scale_);
            }

            double ToDouble(const Decimal256 &value) const {
                return value.ToDouble(decimal_scale_);
            }

            turbo::Status consume(const ExecSpan &batch) override { return ConsumeImpl(batch); }

            // float/double/int64/decimal: calculate `m2` (sum((X-mean)^2)) with
            // `two pass algorithm` (see aggregate_var_std.cc)
            template<typename T = Type>
            enable_if_t<is_floating_type<T>::value || (sizeof(CType) > 4), turbo::Status> ConsumeImpl(
                    const ExecSpan &batch) {
                using SumType = typename internal::GetSumType<T>::SumType;

                GroupedVarStdImpl<Type> state;
                TURBO_RETURN_NOT_OK(state.InitInternal(ctx_, decimal_scale_, &options_));
                TURBO_RETURN_NOT_OK(state.resize(num_groups_));
                int64_t *counts = state.counts_.mutable_data();
                double *means = state.means_.mutable_data();
                double *m2s = state.m2s_.mutable_data();
                uint8_t *no_nulls = state.no_nulls_.mutable_data();

                // XXX this uses naive summation; we should switch to pairwise summation as was
                // done for the scalar aggregate kernel in ARROW-11567
                std::vector<SumType> sums(num_groups_);
                VisitGroupedValues<Type>(
                        batch,
                        [&](uint32_t g, typename TypeTraits<Type>::CType value) {
                            sums[g] += value;
                            counts[g]++;
                        },
                        [&](uint32_t g) { bit_util::ClearBit(no_nulls, g); });

                for (int64_t i = 0; i < num_groups_; i++) {
                    means[i] = ToDouble(sums[i]) / counts[i];
                }

                VisitGroupedValuesNonNull<Type>(
                        batch, [&](uint32_t g, typename TypeTraits<Type>::CType value) {
                            const double v = ToDouble(value);
                            m2s[g] += (v - means[g]) * (v - means[g]);
                        });

                TURBO_MOVE_OR_RAISE(auto mapping,
                                    allocate_buffer(num_groups_ * sizeof(uint32_t), pool_));
                for (uint32_t i = 0; static_cast<int64_t>(i) < num_groups_; i++) {
                    mapping->template mutable_data_as<uint32_t>()[i] = i;
                }
                ArrayData group_id_mapping(uint32(), num_groups_, {nullptr, std::move(mapping)},
                        /*null_count=*/0);
                return this->Merge(std::move(state), group_id_mapping);
            }

            // int32/16/8: textbook one pass algorithm with integer arithmetic (see
            // aggregate_var_std.cc)
            template<typename T = Type>
            enable_if_t<is_integer_type<T>::value && (sizeof(CType) <= 4), turbo::Status> ConsumeImpl(
                    const ExecSpan &batch) {
                // max number of elements that sum will not overflow int64 (2Gi int32 elements)
                // for uint32:    0 <= sum < 2^63 (int64 >= 0)
                // for int32: -2^62 <= sum < 2^62
                constexpr int64_t max_length = 1ULL << (63 - sizeof(CType) * 8);

                const auto *g = batch[1].array.get_values<uint32_t>(1);
                if (batch[0].is_scalar() && !batch[0].scalar->is_valid) {
                    uint8_t *no_nulls = no_nulls_.mutable_data();
                    for (int64_t i = 0; i < batch.length; i++) {
                        bit_util::ClearBit(no_nulls, g[i]);
                    }
                    return turbo::OkStatus();
                }

                std::vector<IntegerVarStd<Type>> var_std(num_groups_);

                TURBO_MOVE_OR_RAISE(auto mapping,
                                    allocate_buffer(num_groups_ * sizeof(uint32_t), pool_));
                for (uint32_t i = 0; static_cast<int64_t>(i) < num_groups_; i++) {
                    mapping->template mutable_data_as<uint32_t>()[i] = i;
                }
                ArrayData group_id_mapping(uint32(), num_groups_, {nullptr, std::move(mapping)},
                        /*null_count=*/0);

                for (int64_t start_index = 0; start_index < batch.length; start_index += max_length) {
                    // process in chunks that overflow will never happen

                    // reset state
                    var_std.clear();
                    var_std.resize(num_groups_);
                    GroupedVarStdImpl<Type> state;
                    TURBO_RETURN_NOT_OK(state.InitInternal(ctx_, decimal_scale_, &options_));
                    TURBO_RETURN_NOT_OK(state.resize(num_groups_));
                    int64_t *other_counts = state.counts_.mutable_data();
                    double *other_means = state.means_.mutable_data();
                    double *other_m2s = state.m2s_.mutable_data();
                    uint8_t *other_no_nulls = state.no_nulls_.mutable_data();

                    if (batch[0].is_array()) {
                        const ArraySpan &array = batch[0].array;
                        const CType *values = array.get_values<CType>(1);
                        auto visit_values = [&](int64_t pos, int64_t len) {
                            for (int64_t i = 0; i < len; ++i) {
                                const int64_t index = start_index + pos + i;
                                const auto value = values[index];
                                var_std[g[index]].ConsumeOne(value);
                            }
                        };

                        if (array.may_have_nulls()) {
                            nebula::internal::BitRunReader reader(
                                    array.buffers[0].data, array.offset + start_index,
                                    std::min(max_length, batch.length - start_index));
                            int64_t position = 0;
                            while (true) {
                                auto run = reader.NextRun();
                                if (run.length == 0) break;
                                if (run.set) {
                                    visit_values(position, run.length);
                                } else {
                                    for (int64_t i = 0; i < run.length; ++i) {
                                        bit_util::ClearBit(other_no_nulls, g[start_index + position + i]);
                                    }
                                }
                                position += run.length;
                            }
                        } else {
                            visit_values(0, array.length);
                        }
                    } else {
                        const auto value = UnboxScalar<Type>::Unbox(*batch[0].scalar);
                        for (int64_t i = 0; i < std::min(max_length, batch.length - start_index); ++i) {
                            const int64_t index = start_index + i;
                            var_std[g[index]].ConsumeOne(value);
                        }
                    }

                    for (int64_t i = 0; i < num_groups_; i++) {
                        if (var_std[i].count == 0) continue;

                        other_counts[i] = var_std[i].count;
                        other_means[i] = var_std[i].mean();
                        other_m2s[i] = var_std[i].m2();
                    }
                    TURBO_RETURN_NOT_OK(this->Merge(std::move(state), group_id_mapping));
                }
                return turbo::OkStatus();
            }

            turbo::Status Merge(GroupedAggregator &&raw_other,
                                const ArrayData &group_id_mapping) override {
                // Combine m2 from two chunks (see aggregate_var_std.cc)
                auto other = turbo::checked_cast<GroupedVarStdImpl *>(&raw_other);

                int64_t *counts = counts_.mutable_data();
                double *means = means_.mutable_data();
                double *m2s = m2s_.mutable_data();
                uint8_t *no_nulls = no_nulls_.mutable_data();

                const int64_t *other_counts = other->counts_.data();
                const double *other_means = other->means_.data();
                const double *other_m2s = other->m2s_.data();
                const uint8_t *other_no_nulls = other->no_nulls_.data();

                auto g = group_id_mapping.get_values<uint32_t>(1);
                for (int64_t other_g = 0; other_g < group_id_mapping.length; ++other_g, ++g) {
                    if (!bit_util::get_bit(other_no_nulls, other_g)) {
                        bit_util::ClearBit(no_nulls, *g);
                    }
                    if (other_counts[other_g] == 0) continue;
                    MergeVarStd(counts[*g], means[*g], other_counts[other_g], other_means[other_g],
                                other_m2s[other_g], &counts[*g], &means[*g], &m2s[*g]);
                }
                return turbo::OkStatus();
            }

            turbo::Result<Datum> Finalize() override {
                std::shared_ptr<Buffer> null_bitmap;
                TURBO_MOVE_OR_RAISE(std::shared_ptr<Buffer> values,
                                    allocate_buffer(num_groups_ * sizeof(double), pool_));
                int64_t null_count = 0;

                auto *results = values->mutable_data_as<double>();
                const int64_t *counts = counts_.data();
                const double *m2s = m2s_.data();
                for (int64_t i = 0; i < num_groups_; ++i) {
                    if (counts[i] > options_.ddof && counts[i] >= options_.min_count) {
                        const double variance = m2s[i] / (counts[i] - options_.ddof);
                        results[i] = result_type_ == VarOrStd::Var ? variance : std::sqrt(variance);
                        continue;
                    }

                    results[i] = 0;
                    if (null_bitmap == nullptr) {
                        TURBO_MOVE_OR_RAISE(null_bitmap, allocate_bitmap(num_groups_, pool_));
                        bit_util::SetBitsTo(null_bitmap->mutable_data(), 0, num_groups_, true);
                    }

                    null_count += 1;
                    bit_util::SetBitTo(null_bitmap->mutable_data(), i, false);
                }
                if (!options_.skip_nulls) {
                    if (null_bitmap) {
                        nebula::internal::BitmapAnd(null_bitmap->data(), 0, no_nulls_.data(), 0,
                                                    num_groups_, 0, null_bitmap->mutable_data());
                    } else {
                        TURBO_MOVE_OR_RAISE(null_bitmap, no_nulls_.finish());
                    }
                    null_count = kUnknownNullCount;
                }

                return ArrayData::create(float64(), num_groups_,
                                       {std::move(null_bitmap), std::move(values)}, null_count);
            }

            std::shared_ptr<DataType> out_type() const override { return float64(); }

            VarOrStd result_type_;
            int32_t decimal_scale_;
            VarianceOptions options_;
            int64_t num_groups_ = 0;
            // m2 = count * s2 = sum((X-mean)^2)
            TypedBufferBuilder<int64_t> counts_;
            TypedBufferBuilder<double> means_, m2s_;
            TypedBufferBuilder<bool> no_nulls_;
            ExecContext *ctx_;
            MemoryPool *pool_;
        };

        template<typename T, VarOrStd result_type>
        turbo::Result<std::unique_ptr<KernelState>> VarStdInit(KernelContext *ctx,
                                                               const KernelInitArgs &args) {
            auto impl = std::make_unique<GroupedVarStdImpl<T>>();
            impl->result_type_ = result_type;
            TURBO_RETURN_NOT_OK(impl->init(ctx->exec_context(), args));
            // R build with openSUSE155 requires an explicit unique_ptr construction
            return std::unique_ptr<KernelState>(std::move(impl));
        }

        template<VarOrStd result_type>
        struct GroupedVarStdFactory {
            template<typename T, typename Enable = enable_if_t<is_integer_type<T>::value ||
                                                               is_floating_type<T>::value ||
                                                               is_decimal_type<T>::value>>
            turbo::Status Visit(const T &) {
                kernel = MakeKernel(std::move(argument_type), VarStdInit<T, result_type>);
                return turbo::OkStatus();
            }

            turbo::Status Visit(const Fp16Type &type) {
                return turbo::unimplemented_error("Computing variance/stddev of data of type ", type);
            }

            turbo::Status Visit(const DataType &type) {
                return turbo::unimplemented_error("Computing variance/stddev of data of type ", type);
            }

            static turbo::Result<HashAggregateKernel> create(const std::shared_ptr<DataType> &type) {
                GroupedVarStdFactory factory;
                factory.argument_type = type->id();
                TURBO_RETURN_NOT_OK(visit_type_inline(*type, &factory));
                return std::move(factory.kernel);
            }

            HashAggregateKernel kernel;
            InputType argument_type;
        };

        template<typename Type>
        struct GroupedTDigestImpl : public GroupedAggregator {
            using CType = typename TypeTraits<Type>::CType;

            turbo::Status init(ExecContext *ctx, const KernelInitArgs &args) override {
                options_ = *turbo::checked_cast<const TDigestOptions *>(args.options);
                if (is_decimal_type<Type>::value) {
                    decimal_scale_ = turbo::checked_cast<const DecimalType &>(*args.inputs[0].type).scale();
                } else {
                    decimal_scale_ = 0;
                }
                ctx_ = ctx;
                pool_ = ctx->memory_pool();
                counts_ = TypedBufferBuilder<int64_t>(pool_);
                no_nulls_ = TypedBufferBuilder<bool>(pool_);
                return turbo::OkStatus();
            }

            turbo::Status resize(int64_t new_num_groups) override {
                const int64_t added_groups = new_num_groups - tdigests_.size();
                tdigests_.reserve(new_num_groups);
                for (int64_t i = 0; i < added_groups; i++) {
                    tdigests_.emplace_back(options_.delta, options_.buffer_size);
                }
                TURBO_RETURN_NOT_OK(counts_.append(new_num_groups, 0));
                TURBO_RETURN_NOT_OK(no_nulls_.append(new_num_groups, true));
                return turbo::OkStatus();
            }

            template<typename T>
            double ToDouble(T value) const {
                return static_cast<double>(value);
            }

            double ToDouble(const Decimal128 &value) const {
                return value.ToDouble(decimal_scale_);
            }

            double ToDouble(const Decimal256 &value) const {
                return value.ToDouble(decimal_scale_);
            }

            turbo::Status consume(const ExecSpan &batch) override {
                int64_t *counts = counts_.mutable_data();
                uint8_t *no_nulls = no_nulls_.mutable_data();
                VisitGroupedValues<Type>(
                        batch,
                        [&](uint32_t g, CType value) {
                            tdigests_[g].nan_add(ToDouble(value));
                            counts[g]++;
                        },
                        [&](uint32_t g) { bit_util::SetBitTo(no_nulls, g, false); });
                return turbo::OkStatus();
            }

            turbo::Status Merge(GroupedAggregator &&raw_other,
                                const ArrayData &group_id_mapping) override {
                auto other = turbo::checked_cast<GroupedTDigestImpl *>(&raw_other);

                int64_t *counts = counts_.mutable_data();
                uint8_t *no_nulls = no_nulls_.mutable_data();

                const int64_t *other_counts = other->counts_.data();
                const uint8_t *other_no_nulls = no_nulls_.mutable_data();

                auto g = group_id_mapping.get_values<uint32_t>(1);
                for (int64_t other_g = 0; other_g < group_id_mapping.length; ++other_g, ++g) {
                    tdigests_[*g].merge(other->tdigests_[other_g]);
                    counts[*g] += other_counts[other_g];
                    bit_util::SetBitTo(
                            no_nulls, *g,
                            bit_util::get_bit(no_nulls, *g) && bit_util::get_bit(other_no_nulls, other_g));
                }

                return turbo::OkStatus();
            }

            turbo::Result<Datum> Finalize() override {
                const int64_t slot_length = options_.q.size();
                const int64_t num_values = tdigests_.size() * slot_length;
                const int64_t *counts = counts_.data();
                std::shared_ptr<Buffer> null_bitmap;
                TURBO_MOVE_OR_RAISE(std::shared_ptr<Buffer> values,
                                    allocate_buffer(num_values * sizeof(double), pool_));
                int64_t null_count = 0;

                auto *results = values->mutable_data_as<double>();
                for (int64_t i = 0; static_cast<size_t>(i) < tdigests_.size(); ++i) {
                    if (!tdigests_[i].is_empty() && counts[i] >= options_.min_count &&
                        (options_.skip_nulls || bit_util::get_bit(no_nulls_.data(), i))) {
                        for (int64_t j = 0; j < slot_length; j++) {
                            results[i * slot_length + j] = tdigests_[i].quantile(options_.q[j]);
                        }
                        continue;
                    }

                    if (!null_bitmap) {
                        TURBO_MOVE_OR_RAISE(null_bitmap, allocate_bitmap(num_values, pool_));
                        bit_util::SetBitsTo(null_bitmap->mutable_data(), 0, num_values, true);
                    }
                    null_count += slot_length;
                    bit_util::SetBitsTo(null_bitmap->mutable_data(), i * slot_length, slot_length,
                                        false);
                    std::fill(&results[i * slot_length], &results[(i + 1) * slot_length], 0.0);
                }

                auto child = ArrayData::create(float64(), num_values,
                                             {std::move(null_bitmap), std::move(values)}, null_count);
                return ArrayData::create(out_type(), tdigests_.size(), {nullptr}, {std::move(child)},
                        /*null_count=*/0);
            }

            std::shared_ptr<DataType> out_type() const override {
                return fixed_size_list(float64(), static_cast<int32_t>(options_.q.size()));
            }

            TDigestOptions options_;
            int32_t decimal_scale_;
            std::vector<turbo::TDigest> tdigests_;
            TypedBufferBuilder<int64_t> counts_;
            TypedBufferBuilder<bool> no_nulls_;
            ExecContext *ctx_;
            MemoryPool *pool_;
        };

        struct GroupedTDigestFactory {
            template<typename T>
            enable_if_number<T, turbo::Status> Visit(const T &) {
                kernel =
                        MakeKernel(std::move(argument_type), HashAggregateInit<GroupedTDigestImpl<T>>);
                return turbo::OkStatus();
            }

            template<typename T>
            enable_if_decimal<T, turbo::Status> Visit(const T &) {
                kernel =
                        MakeKernel(std::move(argument_type), HashAggregateInit<GroupedTDigestImpl<T>>);
                return turbo::OkStatus();
            }

            turbo::Status Visit(const Fp16Type &type) {
                return turbo::unimplemented_error("Computing t-digest of data of type ", type);
            }

            turbo::Status Visit(const DataType &type) {
                return turbo::unimplemented_error("Computing t-digest of data of type ", type);
            }

            static turbo::Result<HashAggregateKernel> create(const std::shared_ptr<DataType> &type) {
                GroupedTDigestFactory factory;
                factory.argument_type = type->id();
                TURBO_RETURN_NOT_OK(visit_type_inline(*type, &factory));
                return std::move(factory.kernel);
            }

            HashAggregateKernel kernel;
            InputType argument_type;
        };

        HashAggregateKernel MakeApproximateMedianKernel(HashAggregateFunction *tdigest_func) {
            HashAggregateKernel kernel;
            kernel.init = [tdigest_func](
                    KernelContext *ctx,
                    const KernelInitArgs &args) -> turbo::Result<std::unique_ptr<KernelState>> {
                TURBO_MOVE_OR_RAISE(auto kernel, tdigest_func->dispatch_exact(args.inputs));
                const auto &scalar_options =
                        turbo::checked_cast<const ScalarAggregateOptions &>(*args.options);
                TDigestOptions options;
                // Default q = 0.5
                options.min_count = scalar_options.min_count;
                options.skip_nulls = scalar_options.skip_nulls;
                KernelInitArgs new_args{kernel, args.inputs, &options};
                return kernel->init(ctx, new_args);
            };
            kernel.signature = KernelSignature::create({InputType::Any(), Type::UINT32}, float64());
            kernel.resize = HashAggregateResize;
            kernel.consume = HashAggregateConsume;
            kernel.merge = HashAggregateMerge;
            kernel.finalize = [](KernelContext *ctx, Datum *out) {
                TURBO_MOVE_OR_RAISE(Datum temp,
                                    turbo::checked_cast<GroupedAggregator *>(ctx->state())->Finalize());
                *out = temp.array_as<FixedSizeListArray>()->values();
                return turbo::OkStatus();
            };
            return kernel;
        }

        // ----------------------------------------------------------------------
        // MinMax implementation

        template<typename CType>
        struct AntiExtrema {
            static constexpr CType anti_min() { return std::numeric_limits<CType>::max(); }

            static constexpr CType anti_max() { return std::numeric_limits<CType>::min(); }
        };

        template<>
        struct AntiExtrema<bool> {
            static constexpr bool anti_min() { return true; }

            static constexpr bool anti_max() { return false; }
        };

        template<>
        struct AntiExtrema<float> {
            static constexpr float anti_min() { return std::numeric_limits<float>::infinity(); }

            static constexpr float anti_max() { return -std::numeric_limits<float>::infinity(); }
        };

        template<>
        struct AntiExtrema<double> {
            static constexpr double anti_min() { return std::numeric_limits<double>::infinity(); }

            static constexpr double anti_max() { return -std::numeric_limits<double>::infinity(); }
        };

        template<>
        struct AntiExtrema<Decimal128> {
            static constexpr Decimal128 anti_min() { return BasicDecimal128::GetMaxSentinel(); }

            static constexpr Decimal128 anti_max() { return BasicDecimal128::GetMinSentinel(); }
        };

        template<>
        struct AntiExtrema<Decimal256> {
            static constexpr Decimal256 anti_min() { return BasicDecimal256::GetMaxSentinel(); }

            static constexpr Decimal256 anti_max() { return BasicDecimal256::GetMinSentinel(); }
        };

        template<typename Type, typename Enable = void>
        struct GroupedMinMaxImpl final : public GroupedAggregator {
            using CType = typename TypeTraits<Type>::CType;
            using GetSet = GroupedValueTraits<Type>;
            using ArrType =
                    typename std::conditional<is_boolean_type<Type>::value, uint8_t, CType>::type;

            turbo::Status init(ExecContext *ctx, const KernelInitArgs &args) override {
                options_ = *turbo::checked_cast<const ScalarAggregateOptions *>(args.options);
                // type_ initialized by MinMaxInit
                mins_ = TypedBufferBuilder<CType>(ctx->memory_pool());
                maxes_ = TypedBufferBuilder<CType>(ctx->memory_pool());
                has_values_ = TypedBufferBuilder<bool>(ctx->memory_pool());
                has_nulls_ = TypedBufferBuilder<bool>(ctx->memory_pool());
                return turbo::OkStatus();
            }

            turbo::Status resize(int64_t new_num_groups) override {
                auto added_groups = new_num_groups - num_groups_;
                num_groups_ = new_num_groups;
                TURBO_RETURN_NOT_OK(mins_.append(added_groups, AntiExtrema<CType>::anti_min()));
                TURBO_RETURN_NOT_OK(maxes_.append(added_groups, AntiExtrema<CType>::anti_max()));
                TURBO_RETURN_NOT_OK(has_values_.append(added_groups, false));
                TURBO_RETURN_NOT_OK(has_nulls_.append(added_groups, false));
                return turbo::OkStatus();
            }

            turbo::Status consume(const ExecSpan &batch) override {
                auto raw_mins = mins_.mutable_data();
                auto raw_maxes = maxes_.mutable_data();

                VisitGroupedValues<Type>(
                        batch,
                        [&](uint32_t g, CType val) {
                            GetSet::Set(raw_mins, g, std::min(GetSet::Get(raw_mins, g), val));
                            GetSet::Set(raw_maxes, g, std::max(GetSet::Get(raw_maxes, g), val));
                            bit_util::SetBit(has_values_.mutable_data(), g);
                        },
                        [&](uint32_t g) { bit_util::SetBit(has_nulls_.mutable_data(), g); });
                return turbo::OkStatus();
            }

            turbo::Status Merge(GroupedAggregator &&raw_other,
                                const ArrayData &group_id_mapping) override {
                auto other = turbo::checked_cast<GroupedMinMaxImpl *>(&raw_other);

                auto raw_mins = mins_.mutable_data();
                auto raw_maxes = maxes_.mutable_data();

                auto other_raw_mins = other->mins_.mutable_data();
                auto other_raw_maxes = other->maxes_.mutable_data();

                auto g = group_id_mapping.get_values<uint32_t>(1);
                for (uint32_t other_g = 0; static_cast<int64_t>(other_g) < group_id_mapping.length;
                     ++other_g, ++g) {
                    GetSet::Set(
                            raw_mins, *g,
                            std::min(GetSet::Get(raw_mins, *g), GetSet::Get(other_raw_mins, other_g)));
                    GetSet::Set(
                            raw_maxes, *g,
                            std::max(GetSet::Get(raw_maxes, *g), GetSet::Get(other_raw_maxes, other_g)));

                    if (bit_util::get_bit(other->has_values_.data(), other_g)) {
                        bit_util::SetBit(has_values_.mutable_data(), *g);
                    }
                    if (bit_util::get_bit(other->has_nulls_.data(), other_g)) {
                        bit_util::SetBit(has_nulls_.mutable_data(), *g);
                    }
                }
                return turbo::OkStatus();
            }

            turbo::Result<Datum> Finalize() override {
                // aggregation for group is valid if there was at least one value in that group
                TURBO_MOVE_OR_RAISE(auto null_bitmap, has_values_.finish());

                if (!options_.skip_nulls) {
                    // ... and there were no nulls in that group
                    TURBO_MOVE_OR_RAISE(auto has_nulls, has_nulls_.finish());
                    nebula::internal::BitmapAndNot(null_bitmap->data(), 0, has_nulls->data(), 0,
                                                   num_groups_, 0, null_bitmap->mutable_data());
                }

                auto mins = ArrayData::create(type_, num_groups_, {null_bitmap, nullptr});
                auto maxes = ArrayData::create(type_, num_groups_, {std::move(null_bitmap), nullptr});
                TURBO_MOVE_OR_RAISE(mins->buffers[1], mins_.finish());
                TURBO_MOVE_OR_RAISE(maxes->buffers[1], maxes_.finish());

                return ArrayData::create(out_type(), num_groups_, {nullptr},
                                       {std::move(mins), std::move(maxes)});
            }

            std::shared_ptr<DataType> out_type() const override {
                return STRUCT({field("min", type_), field("max", type_)});
            }

            int64_t num_groups_;
            TypedBufferBuilder<CType> mins_, maxes_;
            TypedBufferBuilder<bool> has_values_, has_nulls_;
            std::shared_ptr<DataType> type_;
            ScalarAggregateOptions options_;
        };

        // For binary-like types
        // In principle, FixedSizeBinary could use base implementation
        template<typename Type>
        struct GroupedMinMaxImpl<Type,
                enable_if_t<is_base_binary_type<Type>::value ||
                            std::is_same<Type, FixedSizeBinaryType>::value>>
        final : public GroupedAggregator {
            using Allocator = nebula::stl::allocator<char>;
            using StringType = std::basic_string<char, std::char_traits<char>, Allocator>;

            turbo::Status init(ExecContext *ctx, const KernelInitArgs &args) override {
                ctx_ = ctx;
                allocator_ = Allocator(ctx->memory_pool());
                options_ = *turbo::checked_cast<const ScalarAggregateOptions *>(args.options);
                // type_ initialized by MinMaxInit
                has_values_ = TypedBufferBuilder<bool>(ctx->memory_pool());
                has_nulls_ = TypedBufferBuilder<bool>(ctx->memory_pool());
                return turbo::OkStatus();
            }

            turbo::Status resize(int64_t new_num_groups) override {
                auto added_groups = new_num_groups - num_groups_;
                        DKCHECK_GE(added_groups, 0);
                num_groups_ = new_num_groups;
                mins_.resize(new_num_groups);
                maxes_.resize(new_num_groups);
                TURBO_RETURN_NOT_OK(has_values_.append(added_groups, false));
                TURBO_RETURN_NOT_OK(has_nulls_.append(added_groups, false));
                return turbo::OkStatus();
            }

            turbo::Status consume(const ExecSpan &batch) override {
                return VisitGroupedValues<Type>(
                        batch,
                        [&](uint32_t g, std::string_view val) {
                            if (!mins_[g] || val < *mins_[g]) {
                                mins_[g].emplace(val.data(), val.size(), allocator_);
                            }
                            if (!maxes_[g] || val > *maxes_[g]) {
                                maxes_[g].emplace(val.data(), val.size(), allocator_);
                            }
                            bit_util::SetBit(has_values_.mutable_data(), g);
                            return turbo::OkStatus();
                        },
                        [&](uint32_t g) {
                            bit_util::SetBit(has_nulls_.mutable_data(), g);
                            return turbo::OkStatus();
                        });
            }

            turbo::Status Merge(GroupedAggregator &&raw_other,
                                const ArrayData &group_id_mapping) override {
                auto other = turbo::checked_cast<GroupedMinMaxImpl *>(&raw_other);
                auto g = group_id_mapping.get_values<uint32_t>(1);
                for (uint32_t other_g = 0; static_cast<int64_t>(other_g) < group_id_mapping.length;
                     ++other_g, ++g) {
                    if (!mins_[*g] ||
                        (mins_[*g] && other->mins_[other_g] && *mins_[*g] > *other->mins_[other_g])) {
                        mins_[*g] = std::move(other->mins_[other_g]);
                    }
                    if (!maxes_[*g] || (maxes_[*g] && other->maxes_[other_g] &&
                                        *maxes_[*g] < *other->maxes_[other_g])) {
                        maxes_[*g] = std::move(other->maxes_[other_g]);
                    }

                    if (bit_util::get_bit(other->has_values_.data(), other_g)) {
                        bit_util::SetBit(has_values_.mutable_data(), *g);
                    }
                    if (bit_util::get_bit(other->has_nulls_.data(), other_g)) {
                        bit_util::SetBit(has_nulls_.mutable_data(), *g);
                    }
                }
                return turbo::OkStatus();
            }

            turbo::Result<Datum> Finalize() override {
                // aggregation for group is valid if there was at least one value in that group
                TURBO_MOVE_OR_RAISE(auto null_bitmap, has_values_.finish());

                if (!options_.skip_nulls) {
                    // ... and there were no nulls in that group
                    TURBO_MOVE_OR_RAISE(auto has_nulls, has_nulls_.finish());
                    nebula::internal::BitmapAndNot(null_bitmap->data(), 0, has_nulls->data(), 0,
                                                   num_groups_, 0, null_bitmap->mutable_data());
                }

                auto mins = ArrayData::create(type_, num_groups_, {null_bitmap, nullptr});
                auto maxes = ArrayData::create(type_, num_groups_, {std::move(null_bitmap), nullptr});
                TURBO_RETURN_NOT_OK(MakeOffsetsValues(mins.get(), mins_));
                TURBO_RETURN_NOT_OK(MakeOffsetsValues(maxes.get(), maxes_));
                return ArrayData::create(out_type(), num_groups_, {nullptr},
                                       {std::move(mins), std::move(maxes)});
            }

            template<typename T = Type>
            enable_if_base_binary<T, turbo::Status> MakeOffsetsValues(
                    ArrayData *array, const std::vector<std::optional<StringType>> &values) {
                using offset_type = typename T::offset_type;
                TURBO_MOVE_OR_RAISE(
                        auto raw_offsets,
                        allocate_buffer((1 + values.size()) * sizeof(offset_type), ctx_->memory_pool()));
                auto *offsets = raw_offsets->mutable_data_as<offset_type>();
                offsets[0] = 0;
                offsets++;
                const uint8_t *null_bitmap = array->buffers[0]->data();
                offset_type total_length = 0;
                for (size_t i = 0; i < values.size(); i++) {
                    if (bit_util::get_bit(null_bitmap, i)) {
                        const std::optional<StringType> &value = values[i];
                                DKCHECK(value.has_value());
                        if (value->size() >
                            static_cast<size_t>(std::numeric_limits<offset_type>::max()) ||
                            nebula::internal::AddWithOverflow(
                                    total_length, static_cast<offset_type>(value->size()), &total_length)) {
                            return turbo::invalid_argument_error("turbo::Result is too large to fit in ", *array->type,
                                                                 " cast to large_ variant of type");
                        }
                    }
                    offsets[i] = total_length;
                }
                TURBO_MOVE_OR_RAISE(auto data, allocate_buffer(total_length, ctx_->memory_pool()));
                int64_t offset = 0;
                for (size_t i = 0; i < values.size(); i++) {
                    if (bit_util::get_bit(null_bitmap, i)) {
                        const std::optional<StringType> &value = values[i];
                                DKCHECK(value.has_value());
                        std::memcpy(data->mutable_data() + offset, value->data(), value->size());
                        offset += value->size();
                    }
                }
                array->buffers[1] = std::move(raw_offsets);
                array->buffers.push_back(std::move(data));
                return turbo::OkStatus();
            }

            template<typename T = Type>
            enable_if_same<T, FixedSizeBinaryType, turbo::Status> MakeOffsetsValues(
                    ArrayData *array, const std::vector<std::optional<StringType>> &values) {
                const uint8_t *null_bitmap = array->buffers[0]->data();
                const int32_t slot_width =
                        turbo::checked_cast<const FixedSizeBinaryType &>(*array->type).byte_width();
                int64_t total_length = values.size() * slot_width;
                TURBO_MOVE_OR_RAISE(auto data, allocate_buffer(total_length, ctx_->memory_pool()));
                int64_t offset = 0;
                for (size_t i = 0; i < values.size(); i++) {
                    if (bit_util::get_bit(null_bitmap, i)) {
                        const std::optional<StringType> &value = values[i];
                                DKCHECK(value.has_value());
                        std::memcpy(data->mutable_data() + offset, value->data(), slot_width);
                    } else {
                        std::memset(data->mutable_data() + offset, 0x00, slot_width);
                    }
                    offset += slot_width;
                }
                array->buffers[1] = std::move(data);
                return turbo::OkStatus();
            }

            std::shared_ptr<DataType> out_type() const override {
                return STRUCT({field("min", type_), field("max", type_)});
            }

            ExecContext *ctx_;
            Allocator allocator_;
            int64_t num_groups_;
            std::vector<std::optional<StringType>> mins_, maxes_;
            TypedBufferBuilder<bool> has_values_, has_nulls_;
            std::shared_ptr<DataType> type_;
            ScalarAggregateOptions options_;
        };

        struct GroupedNullMinMaxImpl final : public GroupedAggregator {
            turbo::Status init(ExecContext *ctx, const KernelInitArgs &) override { return turbo::OkStatus(); }

            turbo::Status resize(int64_t new_num_groups) override {
                num_groups_ = new_num_groups;
                return turbo::OkStatus();
            }

            turbo::Status consume(const ExecSpan &batch) override { return turbo::OkStatus(); }

            turbo::Status Merge(GroupedAggregator &&raw_other,
                                const ArrayData &group_id_mapping) override {
                return turbo::OkStatus();
            }

            turbo::Result<Datum> Finalize() override {
                return ArrayData::create(
                        out_type(), num_groups_, {nullptr},
                        {
                                ArrayData::create(null(), num_groups_, {nullptr}, num_groups_),
                                ArrayData::create(null(), num_groups_, {nullptr}, num_groups_),
                        });
            }

            std::shared_ptr<DataType> out_type() const override {
                return STRUCT({field("min", null()), field("max", null())});
            }

            int64_t num_groups_;
        };

        template<typename T>
        turbo::Result<std::unique_ptr<KernelState>> MinMaxInit(KernelContext *ctx,
                                                               const KernelInitArgs &args) {
            TURBO_MOVE_OR_RAISE(auto impl, HashAggregateInit<GroupedMinMaxImpl<T>>(ctx, args));
            static_cast<GroupedMinMaxImpl<T> *>(impl.get())->type_ = args.inputs[0].get_shared_ptr();
            return impl;
        }

        template<MinOrMax min_or_max>
        HashAggregateKernel MakeMinOrMaxKernel(HashAggregateFunction *min_max_func) {
            HashAggregateKernel kernel;
            kernel.init = [min_max_func](
                    KernelContext *ctx,
                    const KernelInitArgs &args) -> turbo::Result<std::unique_ptr<KernelState>> {
                std::vector<TypeHolder> inputs = args.inputs;
                TURBO_MOVE_OR_RAISE(auto kernel, min_max_func->dispatch_exact(args.inputs));
                KernelInitArgs new_args{kernel, inputs, args.options};
                return kernel->init(ctx, new_args);
            };
            kernel.signature =
                    KernelSignature::create({InputType::Any(), Type::UINT32}, OutputType(FirstType));
            kernel.resize = HashAggregateResize;
            kernel.consume = HashAggregateConsume;
            kernel.merge = HashAggregateMerge;
            kernel.finalize = [](KernelContext *ctx, Datum *out) {
                TURBO_MOVE_OR_RAISE(Datum temp,
                                    turbo::checked_cast<GroupedAggregator *>(ctx->state())->Finalize());
                *out = temp.array_as<StructArray>()->field(static_cast<uint8_t>(min_or_max));
                return turbo::OkStatus();
            };
            return kernel;
        }

        struct GroupedMinMaxFactory {
            template<typename T>
            enable_if_physical_integer<T, turbo::Status> Visit(const T &) {
                using PhysicalType = typename T::PhysicalType;
                kernel = MakeKernel(std::move(argument_type), MinMaxInit<PhysicalType>);
                return turbo::OkStatus();
            }

            // MSVC2015 apparently doesn't compile this properly if we use
            // enable_if_floating_point
            turbo::Status Visit(const Fp32Type &) {
                kernel = MakeKernel(std::move(argument_type), MinMaxInit<Fp32Type>);
                return turbo::OkStatus();
            }

            turbo::Status Visit(const Fp64Type &) {
                kernel = MakeKernel(std::move(argument_type), MinMaxInit<Fp64Type>);
                return turbo::OkStatus();
            }

            template<typename T>
            enable_if_decimal<T, turbo::Status> Visit(const T &) {
                kernel = MakeKernel(std::move(argument_type), MinMaxInit<T>);
                return turbo::OkStatus();
            }

            template<typename T>
            enable_if_base_binary<T, turbo::Status> Visit(const T &) {
                kernel = MakeKernel(std::move(argument_type), MinMaxInit<T>);
                return turbo::OkStatus();
            }

            turbo::Status Visit(const FixedSizeBinaryType &) {
                kernel = MakeKernel(std::move(argument_type), MinMaxInit<FixedSizeBinaryType>);
                return turbo::OkStatus();
            }

            turbo::Status Visit(const BooleanType &) {
                kernel = MakeKernel(std::move(argument_type), MinMaxInit<BooleanType>);
                return turbo::OkStatus();
            }

            turbo::Status Visit(const NullType &) {
                kernel =
                        MakeKernel(std::move(argument_type), HashAggregateInit<GroupedNullMinMaxImpl>);
                return turbo::OkStatus();
            }

            turbo::Status Visit(const Fp16Type &type) {
                return turbo::unimplemented_error("Computing min/max of data of type ", type);
            }

            turbo::Status Visit(const DataType &type) {
                return turbo::unimplemented_error("Computing min/max of data of type ", type);
            }

            static turbo::Result<HashAggregateKernel> create(const std::shared_ptr<DataType> &type) {
                GroupedMinMaxFactory factory;
                factory.argument_type = type->id();
                TURBO_RETURN_NOT_OK(visit_type_inline(*type, &factory));
                return std::move(factory.kernel);
            }

            HashAggregateKernel kernel;
            InputType argument_type;
        };

        // ----------------------------------------------------------------------
        // FirstLast implementation

        template<typename Type, typename Enable = void>
        struct GroupedFirstLastImpl final : public GroupedAggregator {
            using CType = typename TypeTraits<Type>::CType;
            using GetSet = GroupedValueTraits<Type>;
            using ArrType =
                    typename std::conditional<is_boolean_type<Type>::value, uint8_t, CType>::type;

            turbo::Status init(ExecContext *ctx, const KernelInitArgs &args) override {
                options_ = *turbo::checked_cast<const ScalarAggregateOptions *>(args.options);

                // First and last non-null values
                firsts_ = TypedBufferBuilder<CType>(ctx->memory_pool());
                lasts_ = TypedBufferBuilder<CType>(ctx->memory_pool());

                // Whether the first/last element is null
                first_is_nulls_ = TypedBufferBuilder<bool>(ctx->memory_pool());
                last_is_nulls_ = TypedBufferBuilder<bool>(ctx->memory_pool());

                has_values_ = TypedBufferBuilder<bool>(ctx->memory_pool());
                has_any_values_ = TypedBufferBuilder<bool>(ctx->memory_pool());
                return turbo::OkStatus();
            }

            turbo::Status resize(int64_t new_num_groups) override {
                auto added_groups = new_num_groups - num_groups_;
                num_groups_ = new_num_groups;
                // Reusing AntiExtrema as uninitialized value here because it doesn't
                // matter what the value is. We never output the uninitialized
                // first/last value.
                TURBO_RETURN_NOT_OK(firsts_.append(added_groups, AntiExtrema<CType>::anti_min()));
                TURBO_RETURN_NOT_OK(lasts_.append(added_groups, AntiExtrema<CType>::anti_max()));
                TURBO_RETURN_NOT_OK(has_values_.append(added_groups, false));
                TURBO_RETURN_NOT_OK(first_is_nulls_.append(added_groups, false));
                TURBO_RETURN_NOT_OK(last_is_nulls_.append(added_groups, false));
                TURBO_RETURN_NOT_OK(has_any_values_.append(added_groups, false));
                return turbo::OkStatus();
            }

            turbo::Status consume(const ExecSpan &batch) override {
                auto raw_firsts = firsts_.mutable_data();
                auto raw_lasts = lasts_.mutable_data();
                auto raw_has_values = has_values_.mutable_data();
                auto raw_has_any_values = has_any_values_.mutable_data();
                auto raw_first_is_nulls = first_is_nulls_.mutable_data();
                auto raw_last_is_nulls = last_is_nulls_.mutable_data();

                VisitGroupedValues<Type>(
                        batch,
                        [&](uint32_t g, CType val) {
                            if (!bit_util::get_bit(raw_has_values, g)) {
                                GetSet::Set(raw_firsts, g, val);
                                bit_util::SetBit(raw_has_values, g);
                                bit_util::SetBit(raw_has_any_values, g);
                            }
                            // No not need to set first_is_nulls because
                            // Once first_is_nulls is set to true it never
                            // changes
                            bit_util::SetBitTo(raw_last_is_nulls, g, false);
                            GetSet::Set(raw_lasts, g, val);
                                    DKCHECK(bit_util::get_bit(raw_has_values, g));
                        },
                        [&](uint32_t g) {
                            // We update first_is_null to true if this is called
                            // before we see any non-null values
                            if (!bit_util::get_bit(raw_has_values, g)) {
                                bit_util::SetBit(raw_first_is_nulls, g);
                                bit_util::SetBit(raw_has_any_values, g);
                            }
                            bit_util::SetBit(raw_last_is_nulls, g);
                        });
                return turbo::OkStatus();
            }

            turbo::Status Merge(GroupedAggregator &&raw_other,
                                const ArrayData &group_id_mapping) override {
                // The merge is asymmetric. "first" from this state gets pick over "first" from other
                // state. "last" from other state gets pick over from this state. This is so that when
                // using with segmented aggregation, we still get the correct "first" and "last"
                // value for the entire segment.
                auto other = turbo::checked_cast<GroupedFirstLastImpl *>(&raw_other);

                auto raw_firsts = firsts_.mutable_data();
                auto raw_lasts = lasts_.mutable_data();
                auto raw_has_values = has_values_.mutable_data();
                auto raw_has_any_values = has_any_values_.mutable_data();
                auto raw_first_is_nulls = first_is_nulls_.mutable_data();
                auto raw_last_is_nulls = last_is_nulls_.mutable_data();

                auto other_raw_firsts = other->firsts_.mutable_data();
                auto other_raw_lasts = other->lasts_.mutable_data();
                auto other_raw_has_values = other->has_values_.mutable_data();
                auto other_raw_has_any_values = other->has_values_.mutable_data();
                auto other_raw_last_is_nulls = other->last_is_nulls_.mutable_data();

                auto g = group_id_mapping.get_values<uint32_t>(1);

                for (uint32_t other_g = 0; static_cast<int64_t>(other_g) < group_id_mapping.length;
                     ++other_g, ++g) {
                    if (!bit_util::get_bit(raw_has_values, *g)) {
                        if (bit_util::get_bit(other_raw_has_values, other_g)) {
                            GetSet::Set(raw_firsts, *g, GetSet::Get(other_raw_firsts, other_g));
                        }
                    }
                    if (bit_util::get_bit(other_raw_has_values, other_g)) {
                        GetSet::Set(raw_lasts, *g, GetSet::Get(other_raw_lasts, other_g));
                    }
                    // If the current state doesn't have any nulls (null or non-null), then
                    // We take the "first_is_null" from rhs
                    if (!bit_util::get_bit(raw_has_any_values, *g)) {
                        bit_util::SetBitTo(raw_first_is_nulls, *g,
                                           bit_util::get_bit(other->first_is_nulls_.data(), other_g));
                    }
                    if (bit_util::get_bit(other_raw_last_is_nulls, other_g)) {
                        bit_util::SetBit(raw_last_is_nulls, *g);
                    }

                    if (bit_util::get_bit(other_raw_has_values, other_g)) {
                        bit_util::SetBit(raw_has_values, *g);
                    }

                    if (bit_util::get_bit(other_raw_has_any_values, other_g)) {
                        bit_util::SetBit(raw_has_any_values, *g);
                    }
                }
                return turbo::OkStatus();
            }

            turbo::Result<Datum> Finalize() override {
                // We initialize the null bitmap with first_is_nulls and last_is_nulls
                // then update it depending on has_values
                TURBO_MOVE_OR_RAISE(auto first_null_bitmap, first_is_nulls_.finish());
                TURBO_MOVE_OR_RAISE(auto last_null_bitmap, last_is_nulls_.finish());
                TURBO_MOVE_OR_RAISE(auto has_values, has_values_.finish());

                auto raw_first_null_bitmap = first_null_bitmap->mutable_data();
                auto raw_last_null_bitmap = last_null_bitmap->mutable_data();
                auto raw_has_values = has_values->data();

                if (options_.skip_nulls) {
                    for (int i = 0; i < num_groups_; i++) {
                        const bool has_value = bit_util::get_bit(has_values->data(), i);
                        bit_util::SetBitTo(raw_first_null_bitmap, i, has_value);
                        bit_util::SetBitTo(raw_last_null_bitmap, i, has_value);
                    }
                } else {
                    for (int i = 0; i < num_groups_; i++) {
                        // If first is null, we set the mask to false to output null
                        if (bit_util::get_bit(raw_first_null_bitmap, i)) {
                            bit_util::SetBitTo(raw_first_null_bitmap, i, false);
                        } else {
                            bit_util::SetBitTo(raw_first_null_bitmap, i,
                                               bit_util::get_bit(raw_has_values, i));
                        }
                    }
                    for (int i = 0; i < num_groups_; i++) {
                        // If last is null, we set the mask to false to output null
                        if (bit_util::get_bit(raw_last_null_bitmap, i)) {
                            bit_util::SetBitTo(raw_last_null_bitmap, i, false);
                        } else {
                            bit_util::SetBitTo(raw_last_null_bitmap, i,
                                               bit_util::get_bit(raw_has_values, i));
                        }
                    }
                }

                auto firsts =
                        ArrayData::create(type_, num_groups_, {std::move(first_null_bitmap), nullptr});
                auto lasts =
                        ArrayData::create(type_, num_groups_, {std::move(last_null_bitmap), nullptr});
                TURBO_MOVE_OR_RAISE(firsts->buffers[1], firsts_.finish());
                TURBO_MOVE_OR_RAISE(lasts->buffers[1], lasts_.finish());

                return ArrayData::create(out_type(), num_groups_, {nullptr},
                                       {std::move(firsts), std::move(lasts)});
            }

            std::shared_ptr<DataType> out_type() const override {
                return STRUCT({field("first", type_), field("last", type_)});
            }

            int64_t num_groups_;
            TypedBufferBuilder<CType> firsts_, lasts_;
            // has_values is true if there is non-null values
            // has_any_values is true if there is either null or non-null values
            TypedBufferBuilder<bool> has_values_, has_any_values_, first_is_nulls_, last_is_nulls_;
            std::shared_ptr<DataType> type_;
            ScalarAggregateOptions options_;
        };

        template<typename Type>
        struct GroupedFirstLastImpl<Type,
                enable_if_t<is_base_binary_type<Type>::value ||
                            std::is_same<Type, FixedSizeBinaryType>::value>>
        final : public GroupedAggregator {
            using Allocator = nebula::stl::allocator<char>;
            using StringType = std::basic_string<char, std::char_traits<char>, Allocator>;

            turbo::Status init(ExecContext *ctx, const KernelInitArgs &args) override {
                ctx_ = ctx;
                allocator_ = Allocator(ctx->memory_pool());
                options_ = *turbo::checked_cast<const ScalarAggregateOptions *>(args.options);
                // type_ initialized by FirstLastInit
                // Whether the first/last element is null
                first_is_nulls_ = TypedBufferBuilder<bool>(ctx->memory_pool());
                last_is_nulls_ = TypedBufferBuilder<bool>(ctx->memory_pool());
                has_values_ = TypedBufferBuilder<bool>(ctx->memory_pool());
                has_any_values_ = TypedBufferBuilder<bool>(ctx->memory_pool());
                return turbo::OkStatus();
            }

            turbo::Status resize(int64_t new_num_groups) override {
                auto added_groups = new_num_groups - num_groups_;
                        DKCHECK_GE(added_groups, 0);
                num_groups_ = new_num_groups;
                firsts_.resize(new_num_groups);
                lasts_.resize(new_num_groups);
                TURBO_RETURN_NOT_OK(has_values_.append(added_groups, false));
                TURBO_RETURN_NOT_OK(has_any_values_.append(added_groups, false));
                TURBO_RETURN_NOT_OK(first_is_nulls_.append(added_groups, false));
                TURBO_RETURN_NOT_OK(last_is_nulls_.append(added_groups, false));
                return turbo::OkStatus();
            }

            turbo::Status consume(const ExecSpan &batch) override {
                auto raw_has_values = has_values_.mutable_data();
                auto raw_has_any_values = has_any_values_.mutable_data();
                auto raw_first_is_nulls = first_is_nulls_.mutable_data();
                auto raw_last_is_nulls = last_is_nulls_.mutable_data();

                return VisitGroupedValues<Type>(
                        batch,
                        [&](uint32_t g, std::string_view val) {
                            if (!firsts_[g]) {
                                firsts_[g].emplace(val.data(), val.size(), allocator_);
                                bit_util::SetBit(raw_has_values, g);
                                bit_util::SetBit(raw_has_any_values, g);
                            }
                            bit_util::SetBitTo(raw_last_is_nulls, g, false);
                            lasts_[g].emplace(val.data(), val.size(), allocator_);
                            return turbo::OkStatus();
                        },
                        [&](uint32_t g) {
                            if (!bit_util::get_bit(raw_has_values, g)) {
                                bit_util::SetBit(raw_first_is_nulls, g);
                                bit_util::SetBit(raw_has_any_values, g);
                            }
                            bit_util::SetBit(raw_last_is_nulls, g);
                            return turbo::OkStatus();
                        });
            }

            turbo::Status Merge(GroupedAggregator &&raw_other,
                                const ArrayData &group_id_mapping) override {
                auto other = turbo::checked_cast<GroupedFirstLastImpl *>(&raw_other);
                auto g = group_id_mapping.get_values<uint32_t>(1);
                for (uint32_t other_g = 0; static_cast<int64_t>(other_g) < group_id_mapping.length;
                     ++other_g, ++g) {
                    if (!firsts_[*g]) {
                        firsts_[*g] = std::move(other->firsts_[other_g]);
                    }
                    lasts_[*g] = std::move(other->lasts_[other_g]);

                    if (!bit_util::get_bit(has_any_values_.data(), *g)) {
                        bit_util::SetBitTo(first_is_nulls_.mutable_data(), *g,
                                           bit_util::get_bit(other->first_is_nulls_.data(), other_g));
                    }
                    if (bit_util::get_bit(other->last_is_nulls_.data(), other_g)) {
                        bit_util::SetBit(last_is_nulls_.mutable_data(), *g);
                    }
                    if (bit_util::get_bit(other->has_values_.data(), other_g)) {
                        bit_util::SetBit(has_values_.mutable_data(), *g);
                    }
                    if (bit_util::get_bit(other->has_any_values_.data(), other_g)) {
                        bit_util::SetBit(has_any_values_.mutable_data(), *g);
                    }
                }
                return turbo::OkStatus();
            }

            turbo::Result<Datum> Finalize() override {
                TURBO_MOVE_OR_RAISE(auto first_null_bitmap, first_is_nulls_.finish());
                TURBO_MOVE_OR_RAISE(auto last_null_bitmap, last_is_nulls_.finish());
                TURBO_MOVE_OR_RAISE(auto has_values, has_values_.finish());

                if (!options_.skip_nulls) {
                    for (int i = 0; i < num_groups_; i++) {
                        const bool first_is_null = bit_util::get_bit(first_null_bitmap->data(), i);
                        const bool has_value = bit_util::get_bit(has_values->data(), i);
                        if (first_is_null) {
                            bit_util::SetBitTo(first_null_bitmap->mutable_data(), i, false);
                        } else {
                            bit_util::SetBitTo(first_null_bitmap->mutable_data(), i, has_value);
                        }
                    }

                    for (int i = 0; i < num_groups_; i++) {
                        const bool last_is_null = bit_util::get_bit(last_null_bitmap->data(), i);
                        const bool has_value = bit_util::get_bit(has_values->data(), i);
                        if (last_is_null) {
                            bit_util::SetBitTo(last_null_bitmap->mutable_data(), i, false);
                        } else {
                            bit_util::SetBitTo(last_null_bitmap->mutable_data(), i, has_value);
                        }
                    }
                } else {
                    for (int i = 0; i < num_groups_; i++) {
                        const bool has_value = bit_util::get_bit(has_values->data(), i);
                        bit_util::SetBitTo(first_null_bitmap->mutable_data(), i, has_value);
                        bit_util::SetBitTo(last_null_bitmap->mutable_data(), i, has_value);
                    }
                }

                auto firsts =
                        ArrayData::create(type_, num_groups_, {std::move(first_null_bitmap), nullptr});
                auto lasts =
                        ArrayData::create(type_, num_groups_, {std::move(last_null_bitmap), nullptr});
                TURBO_RETURN_NOT_OK(MakeOffsetsValues(firsts.get(), firsts_));
                TURBO_RETURN_NOT_OK(MakeOffsetsValues(lasts.get(), lasts_));
                return ArrayData::create(out_type(), num_groups_, {nullptr},
                                       {std::move(firsts), std::move(lasts)});
            }

            template<typename T = Type>
            enable_if_base_binary<T, turbo::Status> MakeOffsetsValues(
                    ArrayData *array, const std::vector<std::optional<StringType>> &values) {
                using offset_type = typename T::offset_type;
                TURBO_MOVE_OR_RAISE(
                        auto raw_offsets,
                        allocate_buffer((1 + values.size()) * sizeof(offset_type), ctx_->memory_pool()));
                auto *offsets = raw_offsets->mutable_data_as<offset_type>();
                offsets[0] = 0;
                offsets++;
                const uint8_t *null_bitmap = array->buffers[0]->data();
                offset_type total_length = 0;
                for (size_t i = 0; i < values.size(); i++) {
                    if (bit_util::get_bit(null_bitmap, i)) {
                        const std::optional<StringType> &value = values[i];
                                DKCHECK(value.has_value());
                        if (value->size() >
                            static_cast<size_t>(std::numeric_limits<offset_type>::max()) ||
                            nebula::internal::AddWithOverflow(
                                    total_length, static_cast<offset_type>(value->size()), &total_length)) {
                            return turbo::invalid_argument_error("turbo::Result is too large to fit in ", *array->type,
                                                                 " cast to large_ variant of type");
                        }
                    }
                    offsets[i] = total_length;
                }
                TURBO_MOVE_OR_RAISE(auto data, allocate_buffer(total_length, ctx_->memory_pool()));
                int64_t offset = 0;
                for (size_t i = 0; i < values.size(); i++) {
                    if (bit_util::get_bit(null_bitmap, i)) {
                        const std::optional<StringType> &value = values[i];
                                DKCHECK(value.has_value());
                        std::memcpy(data->mutable_data() + offset, value->data(), value->size());
                        offset += value->size();
                    }
                }
                array->buffers[1] = std::move(raw_offsets);
                array->buffers.push_back(std::move(data));
                return turbo::OkStatus();
            }

            template<typename T = Type>
            enable_if_same<T, FixedSizeBinaryType, turbo::Status> MakeOffsetsValues(
                    ArrayData *array, const std::vector<std::optional<StringType>> &values) {
                const uint8_t *null_bitmap = array->buffers[0]->data();
                const int32_t slot_width =
                        turbo::checked_cast<const FixedSizeBinaryType &>(*array->type).byte_width();
                int64_t total_length = values.size() * slot_width;
                TURBO_MOVE_OR_RAISE(auto data, allocate_buffer(total_length, ctx_->memory_pool()));
                int64_t offset = 0;
                for (size_t i = 0; i < values.size(); i++) {
                    if (bit_util::get_bit(null_bitmap, i)) {
                        const std::optional<StringType> &value = values[i];
                                DKCHECK(value.has_value());
                        std::memcpy(data->mutable_data() + offset, value->data(), slot_width);
                    } else {
                        std::memset(data->mutable_data() + offset, 0x00, slot_width);
                    }
                    offset += slot_width;
                }
                array->buffers[1] = std::move(data);
                return turbo::OkStatus();
            }

            std::shared_ptr<DataType> out_type() const override {
                return STRUCT({field("first", type_), field("last", type_)});
            }

            ExecContext *ctx_;
            Allocator allocator_;
            int64_t num_groups_;
            std::vector<std::optional<StringType>> firsts_, lasts_;
            TypedBufferBuilder<bool> has_values_, has_any_values_, first_is_nulls_, last_is_nulls_;
            std::shared_ptr<DataType> type_;
            ScalarAggregateOptions options_;
        };

        template<typename T>
        turbo::Result<std::unique_ptr<KernelState>> FirstLastInit(KernelContext *ctx,
                                                                  const KernelInitArgs &args) {
            TURBO_MOVE_OR_RAISE(auto impl, HashAggregateInit<GroupedFirstLastImpl<T>>(ctx, args));
            static_cast<GroupedFirstLastImpl<T> *>(impl.get())->type_ =
                    args.inputs[0].get_shared_ptr();
            return impl;
        }

        template<FirstOrLast first_or_last>
        HashAggregateKernel MakeFirstOrLastKernel(HashAggregateFunction *first_last_func) {
            HashAggregateKernel kernel;
            kernel.init = [first_last_func](
                    KernelContext *ctx,
                    const KernelInitArgs &args) -> turbo::Result<std::unique_ptr<KernelState>> {
                std::vector<TypeHolder> inputs = args.inputs;
                TURBO_MOVE_OR_RAISE(auto kernel, first_last_func->dispatch_exact(args.inputs));
                KernelInitArgs new_args{kernel, inputs, args.options};
                return kernel->init(ctx, new_args);
            };

            kernel.signature =
                    KernelSignature::create({InputType::Any(), Type::UINT32}, OutputType(FirstType));
            kernel.resize = HashAggregateResize;
            kernel.consume = HashAggregateConsume;
            kernel.merge = HashAggregateMerge;
            kernel.finalize = [](KernelContext *ctx, Datum *out) {
                TURBO_MOVE_OR_RAISE(Datum temp,
                                    turbo::checked_cast<GroupedAggregator *>(ctx->state())->Finalize());
                *out = temp.array_as<StructArray>()->field(static_cast<uint8_t>(first_or_last));
                return turbo::OkStatus();
            };
            kernel.ordered = true;
            return kernel;
        }

        struct GroupedFirstLastFactory {
            template<typename T>
            enable_if_physical_integer<T, turbo::Status> Visit(const T &) {
                using PhysicalType = typename T::PhysicalType;
                kernel = MakeKernel(std::move(argument_type), FirstLastInit<PhysicalType>,
                        /*ordered*/ true);
                return turbo::OkStatus();
            }

            turbo::Status Visit(const Fp32Type &) {
                kernel =
                        MakeKernel(std::move(argument_type), FirstLastInit<Fp32Type>, /*ordered*/ true);
                return turbo::OkStatus();
            }

            turbo::Status Visit(const Fp64Type &) {
                kernel =
                        MakeKernel(std::move(argument_type), FirstLastInit<Fp64Type>, /*ordered*/ true);
                return turbo::OkStatus();
            }

            template<typename T>
            enable_if_base_binary<T, turbo::Status> Visit(const T &) {
                kernel = MakeKernel(std::move(argument_type), FirstLastInit<T>);
                return turbo::OkStatus();
            }

            turbo::Status Visit(const FixedSizeBinaryType &) {
                kernel = MakeKernel(std::move(argument_type), FirstLastInit<FixedSizeBinaryType>);
                return turbo::OkStatus();
            }

            turbo::Status Visit(const BooleanType &) {
                kernel = MakeKernel(std::move(argument_type), FirstLastInit<BooleanType>);
                return turbo::OkStatus();
            }

            turbo::Status Visit(const Fp16Type &type) {
                return turbo::unimplemented_error("Computing first/last of data of type ", type);
            }

            turbo::Status Visit(const DataType &type) {
                return turbo::unimplemented_error("Computing first/last of data of type ", type);
            }

            static turbo::Result<HashAggregateKernel> create(const std::shared_ptr<DataType> &type) {
                GroupedFirstLastFactory factory;
                factory.argument_type = type->id();
                TURBO_RETURN_NOT_OK(visit_type_inline(*type, &factory));
                return factory.kernel;
            }

            HashAggregateKernel kernel;
            InputType argument_type;
        };

        // ----------------------------------------------------------------------
        // Any/All implementation

        template<typename Impl>
        struct GroupedBooleanAggregator : public GroupedAggregator {
            turbo::Status init(ExecContext *ctx, const KernelInitArgs &args) override {
                options_ = turbo::checked_cast<const ScalarAggregateOptions &>(*args.options);
                pool_ = ctx->memory_pool();
                reduced_ = TypedBufferBuilder<bool>(pool_);
                no_nulls_ = TypedBufferBuilder<bool>(pool_);
                counts_ = TypedBufferBuilder<int64_t>(pool_);
                return turbo::OkStatus();
            }

            turbo::Status resize(int64_t new_num_groups) override {
                auto added_groups = new_num_groups - num_groups_;
                num_groups_ = new_num_groups;
                TURBO_RETURN_NOT_OK(reduced_.append(added_groups, Impl::NullValue()));
                TURBO_RETURN_NOT_OK(no_nulls_.append(added_groups, true));
                return counts_.append(added_groups, 0);
            }

            turbo::Status consume(const ExecSpan &batch) override {
                uint8_t *reduced = reduced_.mutable_data();
                uint8_t *no_nulls = no_nulls_.mutable_data();
                int64_t *counts = counts_.mutable_data();
                auto g = batch[1].array.get_values<uint32_t>(1);

                if (batch[0].is_array()) {
                    const ArraySpan &input = batch[0].array;
                    const uint8_t *bitmap = input.buffers[1].data;
                    if (input.may_have_nulls()) {
                        nebula::internal::VisitBitBlocksVoid(
                                input.buffers[0].data, input.offset, input.length,
                                [&](int64_t position) {
                                    counts[*g]++;
                                    Impl::UpdateGroupWith(reduced, *g, bit_util::get_bit(bitmap, position));
                                    g++;
                                },
                                [&] { bit_util::SetBitTo(no_nulls, *g++, false); });
                    } else {
                        nebula::internal::VisitBitBlocksVoid(
                                bitmap, input.offset, input.length,
                                [&](int64_t) {
                                    Impl::UpdateGroupWith(reduced, *g, true);
                                    counts[*g++]++;
                                },
                                [&]() {
                                    Impl::UpdateGroupWith(reduced, *g, false);
                                    counts[*g++]++;
                                });
                    }
                } else {
                    const Scalar &input = *batch[0].scalar;
                    if (input.is_valid) {
                        const bool value = UnboxScalar<BooleanType>::Unbox(input);
                        for (int64_t i = 0; i < batch.length; i++) {
                            Impl::UpdateGroupWith(reduced, *g, value);
                            counts[*g++]++;
                        }
                    } else {
                        for (int64_t i = 0; i < batch.length; i++) {
                            bit_util::SetBitTo(no_nulls, *g++, false);
                        }
                    }
                }
                return turbo::OkStatus();
            }

            turbo::Status Merge(GroupedAggregator &&raw_other,
                                const ArrayData &group_id_mapping) override {
                auto other = turbo::checked_cast<GroupedBooleanAggregator<Impl> *>(&raw_other);

                uint8_t *reduced = reduced_.mutable_data();
                uint8_t *no_nulls = no_nulls_.mutable_data();
                int64_t *counts = counts_.mutable_data();

                const uint8_t *other_reduced = other->reduced_.mutable_data();
                const uint8_t *other_no_nulls = other->no_nulls_.mutable_data();
                const int64_t *other_counts = other->counts_.mutable_data();

                auto g = group_id_mapping.get_values<uint32_t>(1);
                for (int64_t other_g = 0; other_g < group_id_mapping.length; ++other_g, ++g) {
                    counts[*g] += other_counts[other_g];
                    Impl::UpdateGroupWith(reduced, *g, bit_util::get_bit(other_reduced, other_g));
                    bit_util::SetBitTo(
                            no_nulls, *g,
                            bit_util::get_bit(no_nulls, *g) && bit_util::get_bit(other_no_nulls, other_g));
                }
                return turbo::OkStatus();
            }

            turbo::Result<Datum> Finalize() override {
                std::shared_ptr<Buffer> null_bitmap;
                const int64_t *counts = counts_.data();
                int64_t null_count = 0;

                for (int64_t i = 0; i < num_groups_; ++i) {
                    if (counts[i] >= options_.min_count) continue;

                    if (null_bitmap == nullptr) {
                        TURBO_MOVE_OR_RAISE(null_bitmap, allocate_bitmap(num_groups_, pool_));
                        bit_util::SetBitsTo(null_bitmap->mutable_data(), 0, num_groups_, true);
                    }

                    null_count += 1;
                    bit_util::SetBitTo(null_bitmap->mutable_data(), i, false);
                }

                TURBO_MOVE_OR_RAISE(auto reduced, reduced_.finish());
                if (!options_.skip_nulls) {
                    null_count = kUnknownNullCount;
                    TURBO_MOVE_OR_RAISE(auto no_nulls, no_nulls_.finish());
                    Impl::AdjustForMinCount(no_nulls->mutable_data(), reduced->data(), num_groups_);
                    if (null_bitmap) {
                        nebula::internal::BitmapAnd(null_bitmap->data(), /*left_offset=*/0,
                                                    no_nulls->data(), /*right_offset=*/0, num_groups_,
                                /*out_offset=*/0, null_bitmap->mutable_data());
                    } else {
                        null_bitmap = std::move(no_nulls);
                    }
                }

                return ArrayData::create(out_type(), num_groups_,
                                       {std::move(null_bitmap), std::move(reduced)}, null_count);
            }

            std::shared_ptr<DataType> out_type() const override { return boolean(); }

            int64_t num_groups_ = 0;
            ScalarAggregateOptions options_;
            TypedBufferBuilder<bool> reduced_, no_nulls_;
            TypedBufferBuilder<int64_t> counts_;
            MemoryPool *pool_;
        };

        struct GroupedAnyImpl : public GroupedBooleanAggregator<GroupedAnyImpl> {
            // The default value for a group.
            static bool NullValue() { return false; }

            // Update the value for a group given an observation.
            static void UpdateGroupWith(uint8_t *seen, uint32_t g, bool value) {
                if (!bit_util::get_bit(seen, g) && value) {
                    bit_util::SetBit(seen, g);
                }
            }

            // Combine the array of observed nulls with the array of group values.
            static void AdjustForMinCount(uint8_t *no_nulls, const uint8_t *seen,
                                          int64_t num_groups) {
                nebula::internal::BitmapOr(no_nulls, /*left_offset=*/0, seen, /*right_offset=*/0,
                                           num_groups, /*out_offset=*/0, no_nulls);
            }
        };

        struct GroupedAllImpl : public GroupedBooleanAggregator<GroupedAllImpl> {
            static bool NullValue() { return true; }

            static void UpdateGroupWith(uint8_t *seen, uint32_t g, bool value) {
                if (!value) {
                    bit_util::ClearBit(seen, g);
                }
            }

            static void AdjustForMinCount(uint8_t *no_nulls, const uint8_t *seen,
                                          int64_t num_groups) {
                nebula::internal::BitmapOrNot(no_nulls, /*left_offset=*/0, seen, /*right_offset=*/0,
                                              num_groups, /*out_offset=*/0, no_nulls);
            }
        };

        // ----------------------------------------------------------------------
        // CountDistinct/Distinct implementation

        struct GroupedCountDistinctImpl : public GroupedAggregator {
            turbo::Status init(ExecContext *ctx, const KernelInitArgs &args) override {
                ctx_ = ctx;
                pool_ = ctx->memory_pool();
                options_ = turbo::checked_cast<const CountOptions &>(*args.options);
                return turbo::OkStatus();
            }

            turbo::Status resize(int64_t new_num_groups) override {
                num_groups_ = new_num_groups;
                return turbo::OkStatus();
            }

            turbo::Status consume(const ExecSpan &batch) override {
                TURBO_MOVE_OR_RAISE(std::ignore, grouper_->consume(batch));
                return turbo::OkStatus();
            }

            turbo::Status Merge(GroupedAggregator &&raw_other,
                                const ArrayData &group_id_mapping) override {
                auto other = turbo::checked_cast<GroupedCountDistinctImpl *>(&raw_other);

                // Get (value, group_id) pairs, then translate the group IDs and consume them
                // ourselves
                TURBO_MOVE_OR_RAISE(ExecBatch uniques, other->grouper_->GetUniques());
                TURBO_MOVE_OR_RAISE(std::shared_ptr<Buffer> remapped_g,
                                    allocate_buffer(uniques.length * sizeof(uint32_t), pool_));

                const auto *g_mapping = group_id_mapping.buffers[1]->data_as<uint32_t>();
                const auto *other_g = uniques[1].array()->buffers[1]->data_as<uint32_t>();
                auto *g = remapped_g->mutable_data_as<uint32_t>();

                for (int64_t i = 0; i < uniques.length; i++) {
                    g[i] = g_mapping[other_g[i]];
                }

                ExecSpan uniques_span(uniques);
                uniques_span.values[1].array.SetBuffer(1, remapped_g);
                return consume(uniques_span);
            }

            turbo::Result<Datum> Finalize() override {
                TURBO_MOVE_OR_RAISE(std::shared_ptr<Buffer> values,
                                    allocate_buffer(num_groups_ * sizeof(int64_t), pool_));
                auto *counts = values->mutable_data_as<int64_t>();
                std::fill(counts, counts + num_groups_, 0);

                TURBO_MOVE_OR_RAISE(auto uniques, grouper_->GetUniques());
                auto *g = uniques[1].array()->get_values<uint32_t>(1);
                const auto &items = *uniques[0].array();
                const auto *valid = items.get_values<uint8_t>(0, 0);
                if (options_.mode == CountOptions::ALL ||
                    (options_.mode == CountOptions::ONLY_VALID && !valid)) {
                    for (int64_t i = 0; i < uniques.length; i++) {
                        counts[g[i]]++;
                    }
                } else if (options_.mode == CountOptions::ONLY_VALID) {
                    for (int64_t i = 0; i < uniques.length; i++) {
                        counts[g[i]] += bit_util::get_bit(valid, items.offset + i);
                    }
                } else if (valid) {  // ONLY_NULL
                    for (int64_t i = 0; i < uniques.length; i++) {
                        counts[g[i]] += !bit_util::get_bit(valid, items.offset + i);
                    }
                }

                return ArrayData::create(int64(), num_groups_, {nullptr, std::move(values)},
                        /*null_count=*/0);
            }

            std::shared_ptr<DataType> out_type() const override { return int64(); }

            ExecContext *ctx_;
            MemoryPool *pool_;
            int64_t num_groups_;
            CountOptions options_;
            std::unique_ptr<Grouper> grouper_;
            std::shared_ptr<DataType> out_type_;
        };

        struct GroupedDistinctImpl : public GroupedCountDistinctImpl {
            turbo::Result<Datum> Finalize() override {
                TURBO_MOVE_OR_RAISE(auto uniques, grouper_->GetUniques());
                TURBO_MOVE_OR_RAISE(auto groupings, grouper_->MakeGroupings(
                        *uniques[1].array_as<UInt32Array>(),
                        static_cast<uint32_t>(num_groups_), ctx_));
                TURBO_MOVE_OR_RAISE(
                        auto list, grouper_->ApplyGroupings(*groupings, *uniques[0].make_array(), ctx_));
                const auto &values = list->values();
                        DKCHECK_EQ(values->offset(), 0);
                auto *offsets = list->value_offsets()->mutable_data_as<int32_t>();
                if (options_.mode == CountOptions::ALL ||
                    (options_.mode == CountOptions::ONLY_VALID && values->null_count() == 0)) {
                    return list;
                } else if (options_.mode == CountOptions::ONLY_VALID) {
                    int32_t prev_offset = offsets[0];
                    for (int64_t i = 0; i < list->length(); i++) {
                        const int32_t slot_length = offsets[i + 1] - prev_offset;
                        const int64_t null_count =
                                slot_length - nebula::internal::CountSetBits(values->null_bitmap()->data(),
                                                                             prev_offset, slot_length);
                                DKCHECK_LE(null_count, 1);
                        const int32_t offset = null_count > 0 ? slot_length - 1 : slot_length;
                        prev_offset = offsets[i + 1];
                        offsets[i + 1] = offsets[i] + offset;
                    }
                    auto filter =
                            std::make_shared<BooleanArray>(values->length(), values->null_bitmap());
                    TURBO_MOVE_OR_RAISE(
                            auto new_values,
                            Filter(std::move(values), filter, FilterOptions(FilterOptions::DROP), ctx_));
                    return std::make_shared<ListArray>(list->type(), list->length(),
                                                       list->value_offsets(), new_values.make_array());
                }
                // ONLY_NULL
                if (values->null_count() == 0) {
                    std::fill(offsets + 1, offsets + list->length() + 1, offsets[0]);
                } else {
                    int32_t prev_offset = offsets[0];
                    for (int64_t i = 0; i < list->length(); i++) {
                        const int32_t slot_length = offsets[i + 1] - prev_offset;
                        const int64_t null_count =
                                slot_length - nebula::internal::CountSetBits(values->null_bitmap()->data(),
                                                                             prev_offset, slot_length);
                        const int32_t offset = null_count > 0 ? 1 : 0;
                        prev_offset = offsets[i + 1];
                        offsets[i + 1] = offsets[i] + offset;
                    }
                }
                TURBO_MOVE_OR_RAISE(
                        auto new_values,
                        MakeArrayOfNull(out_type_,
                                        list->length() > 0 ? offsets[list->length()] - offsets[0] : 0,
                                        pool_));
                return std::make_shared<ListArray>(list->type(), list->length(),
                                                   list->value_offsets(), std::move(new_values));
            }

            std::shared_ptr<DataType> out_type() const override { return list(out_type_); }
        };

        template<typename Impl>
        turbo::Result<std::unique_ptr<KernelState>> GroupedDistinctInit(KernelContext *ctx,
                                                                        const KernelInitArgs &args) {
            TURBO_MOVE_OR_RAISE(auto impl, HashAggregateInit<Impl>(ctx, args));
            auto instance = static_cast<Impl *>(impl.get());
            instance->out_type_ = args.inputs[0].get_shared_ptr();
            TURBO_MOVE_OR_RAISE(instance->grouper_,
                                Grouper::create(args.inputs, ctx->exec_context()));
            return impl;
        }

        // ----------------------------------------------------------------------
        // One implementation

        template<typename Type, typename Enable = void>
        struct GroupedOneImpl final : public GroupedAggregator {
            using CType = typename TypeTraits<Type>::CType;
            using GetSet = GroupedValueTraits<Type>;

            turbo::Status init(ExecContext *ctx, const KernelInitArgs &) override {
                // out_type_ initialized by GroupedOneInit
                ones_ = TypedBufferBuilder<CType>(ctx->memory_pool());
                has_one_ = TypedBufferBuilder<bool>(ctx->memory_pool());
                return turbo::OkStatus();
            }

            turbo::Status resize(int64_t new_num_groups) override {
                auto added_groups = new_num_groups - num_groups_;
                num_groups_ = new_num_groups;
                TURBO_RETURN_NOT_OK(ones_.append(added_groups, static_cast<CType>(0)));
                TURBO_RETURN_NOT_OK(has_one_.append(added_groups, false));
                return turbo::OkStatus();
            }

            turbo::Status consume(const ExecSpan &batch) override {
                auto raw_ones_ = ones_.mutable_data();

                return VisitGroupedValues<Type>(
                        batch,
                        [&](uint32_t g, CType val) -> turbo::Status {
                            if (!bit_util::get_bit(has_one_.data(), g)) {
                                GetSet::Set(raw_ones_, g, val);
                                bit_util::SetBit(has_one_.mutable_data(), g);
                            }
                            return turbo::OkStatus();
                        },
                        [&](uint32_t g) -> turbo::Status { return turbo::OkStatus(); });
            }

            turbo::Status Merge(GroupedAggregator &&raw_other,
                                const ArrayData &group_id_mapping) override {
                auto other = turbo::checked_cast<GroupedOneImpl *>(&raw_other);

                auto raw_ones = ones_.mutable_data();
                auto other_raw_ones = other->ones_.mutable_data();

                auto g = group_id_mapping.get_values<uint32_t>(1);
                for (uint32_t other_g = 0; static_cast<int64_t>(other_g) < group_id_mapping.length;
                     ++other_g, ++g) {
                    if (!bit_util::get_bit(has_one_.data(), *g)) {
                        if (bit_util::get_bit(other->has_one_.data(), other_g)) {
                            GetSet::Set(raw_ones, *g, GetSet::Get(other_raw_ones, other_g));
                            bit_util::SetBit(has_one_.mutable_data(), *g);
                        }
                    }
                }

                return turbo::OkStatus();
            }

            turbo::Result<Datum> Finalize() override {
                TURBO_MOVE_OR_RAISE(auto null_bitmap, has_one_.finish());
                TURBO_MOVE_OR_RAISE(auto data, ones_.finish());
                return ArrayData::create(out_type_, num_groups_,
                                       {std::move(null_bitmap), std::move(data)});
            }

            std::shared_ptr<DataType> out_type() const override { return out_type_; }

            int64_t num_groups_;
            TypedBufferBuilder<CType> ones_;
            TypedBufferBuilder<bool> has_one_;
            std::shared_ptr<DataType> out_type_;
        };

        struct GroupedNullOneImpl : public GroupedAggregator {
            turbo::Status init(ExecContext *ctx, const KernelInitArgs &) override { return turbo::OkStatus(); }

            turbo::Status resize(int64_t new_num_groups) override {
                num_groups_ = new_num_groups;
                return turbo::OkStatus();
            }

            turbo::Status consume(const ExecSpan &batch) override { return turbo::OkStatus(); }

            turbo::Status Merge(GroupedAggregator &&raw_other,
                                const ArrayData &group_id_mapping) override {
                return turbo::OkStatus();
            }

            turbo::Result<Datum> Finalize() override {
                return ArrayData::create(null(), num_groups_, {nullptr}, num_groups_);
            }

            std::shared_ptr<DataType> out_type() const override { return null(); }

            int64_t num_groups_;
        };

        template<typename Type>
        struct GroupedOneImpl<Type, enable_if_t<is_base_binary_type<Type>::value ||
                                                std::is_same<Type, FixedSizeBinaryType>::value>>
        final : public GroupedAggregator {
            using Allocator = nebula::stl::allocator<char>;
            using StringType = std::basic_string<char, std::char_traits<char>, Allocator>;

            turbo::Status init(ExecContext *ctx, const KernelInitArgs &) override {
                ctx_ = ctx;
                allocator_ = Allocator(ctx->memory_pool());
                // out_type_ initialized by GroupedOneInit
                has_one_ = TypedBufferBuilder<bool>(ctx->memory_pool());
                return turbo::OkStatus();
            }

            turbo::Status resize(int64_t new_num_groups) override {
                auto added_groups = new_num_groups - num_groups_;
                        DKCHECK_GE(added_groups, 0);
                num_groups_ = new_num_groups;
                ones_.resize(new_num_groups);
                TURBO_RETURN_NOT_OK(has_one_.append(added_groups, false));
                return turbo::OkStatus();
            }

            turbo::Status consume(const ExecSpan &batch) override {
                return VisitGroupedValues<Type>(
                        batch,
                        [&](uint32_t g, std::string_view val) -> turbo::Status {
                            if (!bit_util::get_bit(has_one_.data(), g)) {
                                ones_[g].emplace(val.data(), val.size(), allocator_);
                                bit_util::SetBit(has_one_.mutable_data(), g);
                            }
                            return turbo::OkStatus();
                        },
                        [&](uint32_t g) -> turbo::Status { return turbo::OkStatus(); });
            }

            turbo::Status Merge(GroupedAggregator &&raw_other,
                                const ArrayData &group_id_mapping) override {
                auto other = turbo::checked_cast<GroupedOneImpl *>(&raw_other);
                auto g = group_id_mapping.get_values<uint32_t>(1);
                for (uint32_t other_g = 0; static_cast<int64_t>(other_g) < group_id_mapping.length;
                     ++other_g, ++g) {
                    if (!bit_util::get_bit(has_one_.data(), *g)) {
                        if (bit_util::get_bit(other->has_one_.data(), other_g)) {
                            ones_[*g] = std::move(other->ones_[other_g]);
                            bit_util::SetBit(has_one_.mutable_data(), *g);
                        }
                    }
                }
                return turbo::OkStatus();
            }

            turbo::Result<Datum> Finalize() override {
                TURBO_MOVE_OR_RAISE(auto null_bitmap, has_one_.finish());
                auto ones =
                        ArrayData::create(out_type(), num_groups_, {std::move(null_bitmap), nullptr});
                TURBO_RETURN_NOT_OK(MakeOffsetsValues(ones.get(), ones_));
                return ones;
            }

            template<typename T = Type>
            enable_if_base_binary<T, turbo::Status> MakeOffsetsValues(
                    ArrayData *array, const std::vector<std::optional<StringType>> &values) {
                using offset_type = typename T::offset_type;
                TURBO_MOVE_OR_RAISE(
                        auto raw_offsets,
                        allocate_buffer((1 + values.size()) * sizeof(offset_type), ctx_->memory_pool()));
                auto *offsets = raw_offsets->mutable_data_as<offset_type>();
                offsets[0] = 0;
                offsets++;
                const uint8_t *null_bitmap = array->buffers[0]->data();
                offset_type total_length = 0;
                for (size_t i = 0; i < values.size(); i++) {
                    if (bit_util::get_bit(null_bitmap, i)) {
                        const std::optional<StringType> &value = values[i];
                                DKCHECK(value.has_value());
                        if (value->size() >
                            static_cast<size_t>(std::numeric_limits<offset_type>::max()) ||
                            nebula::internal::AddWithOverflow(
                                    total_length, static_cast<offset_type>(value->size()), &total_length)) {
                            return turbo::invalid_argument_error("turbo::Result is too large to fit in ", *array->type,
                                                                 " cast to large_ variant of type");
                        }
                    }
                    offsets[i] = total_length;
                }
                TURBO_MOVE_OR_RAISE(auto data, allocate_buffer(total_length, ctx_->memory_pool()));
                int64_t offset = 0;
                for (size_t i = 0; i < values.size(); i++) {
                    if (bit_util::get_bit(null_bitmap, i)) {
                        const std::optional<StringType> &value = values[i];
                                DKCHECK(value.has_value());
                        std::memcpy(data->mutable_data() + offset, value->data(), value->size());
                        offset += value->size();
                    }
                }
                array->buffers[1] = std::move(raw_offsets);
                array->buffers.push_back(std::move(data));
                return turbo::OkStatus();
            }

            template<typename T = Type>
            enable_if_same<T, FixedSizeBinaryType, turbo::Status> MakeOffsetsValues(
                    ArrayData *array, const std::vector<std::optional<StringType>> &values) {
                const uint8_t *null_bitmap = array->buffers[0]->data();
                const int32_t slot_width =
                        turbo::checked_cast<const FixedSizeBinaryType &>(*array->type).byte_width();
                int64_t total_length = values.size() * slot_width;
                TURBO_MOVE_OR_RAISE(auto data, allocate_buffer(total_length, ctx_->memory_pool()));
                int64_t offset = 0;
                for (size_t i = 0; i < values.size(); i++) {
                    if (bit_util::get_bit(null_bitmap, i)) {
                        const std::optional<StringType> &value = values[i];
                                DKCHECK(value.has_value());
                        std::memcpy(data->mutable_data() + offset, value->data(), slot_width);
                    } else {
                        std::memset(data->mutable_data() + offset, 0x00, slot_width);
                    }
                    offset += slot_width;
                }
                array->buffers[1] = std::move(data);
                return turbo::OkStatus();
            }

            std::shared_ptr<DataType> out_type() const override { return out_type_; }

            ExecContext *ctx_;
            Allocator allocator_;
            int64_t num_groups_;
            std::vector<std::optional<StringType>> ones_;
            TypedBufferBuilder<bool> has_one_;
            std::shared_ptr<DataType> out_type_;
        };

        template<typename T>
        turbo::Result<std::unique_ptr<KernelState>> GroupedOneInit(KernelContext *ctx,
                                                                   const KernelInitArgs &args) {
            TURBO_MOVE_OR_RAISE(auto impl, HashAggregateInit<GroupedOneImpl<T>>(ctx, args));
            auto instance = static_cast<GroupedOneImpl<T> *>(impl.get());
            instance->out_type_ = args.inputs[0].get_shared_ptr();
            return impl;
        }

        struct GroupedOneFactory {
            template<typename T>
            enable_if_physical_integer<T, turbo::Status> Visit(const T &) {
                using PhysicalType = typename T::PhysicalType;
                kernel = MakeKernel(std::move(argument_type), GroupedOneInit<PhysicalType>);
                return turbo::OkStatus();
            }

            template<typename T>
            enable_if_floating_point<T, turbo::Status> Visit(const T &) {
                kernel = MakeKernel(std::move(argument_type), GroupedOneInit<T>);
                return turbo::OkStatus();
            }

            template<typename T>
            enable_if_decimal<T, turbo::Status> Visit(const T &) {
                kernel = MakeKernel(std::move(argument_type), GroupedOneInit<T>);
                return turbo::OkStatus();
            }

            template<typename T>
            enable_if_base_binary<T, turbo::Status> Visit(const T &) {
                kernel = MakeKernel(std::move(argument_type), GroupedOneInit<T>);
                return turbo::OkStatus();
            }

            turbo::Status Visit(const FixedSizeBinaryType &) {
                kernel = MakeKernel(std::move(argument_type), GroupedOneInit<FixedSizeBinaryType>);
                return turbo::OkStatus();
            }

            turbo::Status Visit(const BooleanType &) {
                kernel = MakeKernel(std::move(argument_type), GroupedOneInit<BooleanType>);
                return turbo::OkStatus();
            }

            turbo::Status Visit(const NullType &) {
                kernel = MakeKernel(std::move(argument_type), HashAggregateInit<GroupedNullOneImpl>);
                return turbo::OkStatus();
            }

            turbo::Status Visit(const Fp16Type &type) {
                return turbo::unimplemented_error("Outputting one of data of type ", type);
            }

            turbo::Status Visit(const DataType &type) {
                return turbo::unimplemented_error("Outputting one of data of type ", type);
            }

            static turbo::Result<HashAggregateKernel> create(const std::shared_ptr<DataType> &type) {
                GroupedOneFactory factory;
                factory.argument_type = type->id();
                TURBO_RETURN_NOT_OK(visit_type_inline(*type, &factory));
                return std::move(factory.kernel);
            }

            HashAggregateKernel kernel;
            InputType argument_type;
        };

        // ----------------------------------------------------------------------
        // List implementation

        template<typename Type, typename Enable = void>
        struct GroupedListImpl final : public GroupedAggregator {
            using CType = typename TypeTraits<Type>::CType;
            using GetSet = GroupedValueTraits<Type>;

            turbo::Status init(ExecContext *ctx, const KernelInitArgs &) override {
                ctx_ = ctx;
                has_nulls_ = false;
                // out_type_ initialized by GroupedListInit
                values_ = TypedBufferBuilder<CType>(ctx_->memory_pool());
                groups_ = TypedBufferBuilder<uint32_t>(ctx_->memory_pool());
                values_bitmap_ = TypedBufferBuilder<bool>(ctx_->memory_pool());
                return turbo::OkStatus();
            }

            turbo::Status resize(int64_t new_num_groups) override {
                num_groups_ = new_num_groups;
                return turbo::OkStatus();
            }

            turbo::Status consume(const ExecSpan &batch) override {
                const ArraySpan &values_array_data = batch[0].array;
                const ArraySpan &groups_array_data = batch[1].array;

                int64_t num_values = values_array_data.length;
                const auto *groups = groups_array_data.get_values<uint32_t>(1, 0);
                        DKCHECK_EQ(groups_array_data.offset, 0);
                TURBO_RETURN_NOT_OK(groups_.append(groups, num_values));

                int64_t offset = values_array_data.offset;
                const uint8_t *values = values_array_data.buffers[1].data;
                TURBO_RETURN_NOT_OK(GetSet::AppendBuffers(&values_, values, offset, num_values));

                if (batch[0].null_count() > 0) {
                    if (!has_nulls_) {
                        has_nulls_ = true;
                        TURBO_RETURN_NOT_OK(values_bitmap_.append(num_args_, true));
                    }
                    const uint8_t *values_bitmap = values_array_data.buffers[0].data;
                    TURBO_RETURN_NOT_OK(GroupedValueTraits<BooleanType>::AppendBuffers(
                            &values_bitmap_, values_bitmap, offset, num_values));
                } else if (has_nulls_) {
                    TURBO_RETURN_NOT_OK(values_bitmap_.append(num_values, true));
                }
                num_args_ += num_values;
                return turbo::OkStatus();
            }

            turbo::Status Merge(GroupedAggregator &&raw_other,
                                const ArrayData &group_id_mapping) override {
                auto other = turbo::checked_cast<GroupedListImpl *>(&raw_other);
                const auto *other_raw_groups = other->groups_.data();
                const auto *g = group_id_mapping.get_values<uint32_t>(1);

                for (uint32_t other_g = 0; static_cast<int64_t>(other_g) < other->num_args_;
                     ++other_g) {
                    TURBO_RETURN_NOT_OK(groups_.append(g[other_raw_groups[other_g]]));
                }

                const auto *values = reinterpret_cast<const uint8_t *>(other->values_.data());
                TURBO_RETURN_NOT_OK(GetSet::AppendBuffers(&values_, values, 0, other->num_args_));

                if (other->has_nulls_) {
                    if (!has_nulls_) {
                        has_nulls_ = true;
                        TURBO_RETURN_NOT_OK(values_bitmap_.append(num_args_, true));
                    }
                    const uint8_t *values_bitmap = other->values_bitmap_.data();
                    TURBO_RETURN_NOT_OK(GroupedValueTraits<BooleanType>::AppendBuffers(
                            &values_bitmap_, values_bitmap, 0, other->num_args_));
                } else if (has_nulls_) {
                    TURBO_RETURN_NOT_OK(values_bitmap_.append(other->num_args_, true));
                }
                num_args_ += other->num_args_;
                return turbo::OkStatus();
            }

            turbo::Result<Datum> Finalize() override {
                TURBO_MOVE_OR_RAISE(auto values_buffer, values_.finish());
                TURBO_MOVE_OR_RAISE(auto groups_buffer, groups_.finish());
                TURBO_MOVE_OR_RAISE(auto null_bitmap_buffer, values_bitmap_.finish());

                auto groups = UInt32Array(num_args_, groups_buffer);
                TURBO_MOVE_OR_RAISE(
                        auto groupings,
                        Grouper::MakeGroupings(groups, static_cast<uint32_t>(num_groups_), ctx_));

                auto values_array_data = ArrayData::create(
                        out_type_, num_args_,
                        {has_nulls_ ? std::move(null_bitmap_buffer) : nullptr, std::move(values_buffer)});
                auto values = make_array(values_array_data);
                return Grouper::ApplyGroupings(*groupings, *values);
            }

            std::shared_ptr<DataType> out_type() const override { return list(out_type_); }

            ExecContext *ctx_;
            int64_t num_groups_, num_args_ = 0;
            bool has_nulls_ = false;
            TypedBufferBuilder<CType> values_;
            TypedBufferBuilder<uint32_t> groups_;
            TypedBufferBuilder<bool> values_bitmap_;
            std::shared_ptr<DataType> out_type_;
        };

        template<typename Type>
        struct GroupedListImpl<Type, enable_if_t<is_base_binary_type<Type>::value ||
                                                 std::is_same<Type, FixedSizeBinaryType>::value>>
        final : public GroupedAggregator {
            using Allocator = nebula::stl::allocator<char>;
            using StringType = std::basic_string<char, std::char_traits<char>, Allocator>;
            using GetSet = GroupedValueTraits<Type>;

            turbo::Status init(ExecContext *ctx, const KernelInitArgs &) override {
                ctx_ = ctx;
                allocator_ = Allocator(ctx_->memory_pool());
                // out_type_ initialized by GroupedListInit
                groups_ = TypedBufferBuilder<uint32_t>(ctx_->memory_pool());
                values_bitmap_ = TypedBufferBuilder<bool>(ctx_->memory_pool());
                return turbo::OkStatus();
            }

            turbo::Status resize(int64_t new_num_groups) override {
                num_groups_ = new_num_groups;
                return turbo::OkStatus();
            }

            turbo::Status consume(const ExecSpan &batch) override {
                const ArraySpan &values_array_data = batch[0].array;
                int64_t num_values = values_array_data.length;
                int64_t offset = values_array_data.offset;

                const ArraySpan &groups_array_data = batch[1].array;
                const uint32_t *groups = groups_array_data.get_values<uint32_t>(1, 0);
                        DKCHECK_EQ(groups_array_data.offset, 0);
                TURBO_RETURN_NOT_OK(groups_.append(groups, num_values));

                if (batch[0].null_count() == 0) {
                    TURBO_RETURN_NOT_OK(values_bitmap_.append(num_values, true));
                } else {
                    const uint8_t *values_bitmap = values_array_data.buffers[0].data;
                    TURBO_RETURN_NOT_OK(GroupedValueTraits<BooleanType>::AppendBuffers(
                            &values_bitmap_, values_bitmap, offset, num_values));
                }
                num_args_ += num_values;
                return VisitGroupedValues<Type>(
                        batch,
                        [&](uint32_t group, std::string_view val) -> turbo::Status {
                            values_.emplace_back(StringType(val.data(), val.size(), allocator_));
                            return turbo::OkStatus();
                        },
                        [&](uint32_t group) -> turbo::Status {
                            values_.emplace_back("");
                            return turbo::OkStatus();
                        });
            }

            turbo::Status Merge(GroupedAggregator &&raw_other,
                                const ArrayData &group_id_mapping) override {
                auto other = turbo::checked_cast<GroupedListImpl *>(&raw_other);
                const auto *other_raw_groups = other->groups_.data();
                const auto *g = group_id_mapping.get_values<uint32_t>(1);

                for (uint32_t other_g = 0; static_cast<int64_t>(other_g) < other->num_args_;
                     ++other_g) {
                    TURBO_RETURN_NOT_OK(groups_.append(g[other_raw_groups[other_g]]));
                }

                values_.insert(values_.end(), other->values_.begin(), other->values_.end());

                const uint8_t *values_bitmap = other->values_bitmap_.data();
                TURBO_RETURN_NOT_OK(GroupedValueTraits<BooleanType>::AppendBuffers(
                        &values_bitmap_, values_bitmap, 0, other->num_args_));
                num_args_ += other->num_args_;
                return turbo::OkStatus();
            }

            turbo::Result<Datum> Finalize() override {
                TURBO_MOVE_OR_RAISE(auto groups_buffer, groups_.finish());
                TURBO_MOVE_OR_RAISE(auto null_bitmap_buffer, values_bitmap_.finish());

                auto groups = UInt32Array(num_args_, groups_buffer);
                TURBO_MOVE_OR_RAISE(
                        auto groupings,
                        Grouper::MakeGroupings(groups, static_cast<uint32_t>(num_groups_), ctx_));

                auto values_array_data =
                        ArrayData::create(out_type_, num_args_, {std::move(null_bitmap_buffer), nullptr});
                TURBO_RETURN_NOT_OK(MakeOffsetsValues(values_array_data.get(), values_));
                auto values = make_array(values_array_data);
                return Grouper::ApplyGroupings(*groupings, *values);
            }

            template<typename T = Type>
            enable_if_base_binary<T, turbo::Status> MakeOffsetsValues(
                    ArrayData *array, const std::vector<std::optional<StringType>> &values) {
                using offset_type = typename T::offset_type;
                TURBO_MOVE_OR_RAISE(
                        auto raw_offsets,
                        allocate_buffer((1 + values.size()) * sizeof(offset_type), ctx_->memory_pool()));
                auto *offsets = raw_offsets->mutable_data_as<offset_type>();
                offsets[0] = 0;
                offsets++;
                const uint8_t *null_bitmap = array->buffers[0]->data();
                offset_type total_length = 0;
                for (size_t i = 0; i < values.size(); i++) {
                    if (bit_util::get_bit(null_bitmap, i)) {
                        const std::optional<StringType> &value = values[i];
                                DKCHECK(value.has_value());
                        if (value->size() >
                            static_cast<size_t>(std::numeric_limits<offset_type>::max()) ||
                            nebula::internal::AddWithOverflow(
                                    total_length, static_cast<offset_type>(value->size()), &total_length)) {
                            return turbo::invalid_argument_error("turbo::Result is too large to fit in ", *array->type,
                                                                 " cast to large_ variant of type");
                        }
                    }
                    offsets[i] = total_length;
                }
                TURBO_MOVE_OR_RAISE(auto data, allocate_buffer(total_length, ctx_->memory_pool()));
                int64_t offset = 0;
                for (size_t i = 0; i < values.size(); i++) {
                    if (bit_util::get_bit(null_bitmap, i)) {
                        const std::optional<StringType> &value = values[i];
                                DKCHECK(value.has_value());
                        std::memcpy(data->mutable_data() + offset, value->data(), value->size());
                        offset += value->size();
                    }
                }
                array->buffers[1] = std::move(raw_offsets);
                array->buffers.push_back(std::move(data));
                return turbo::OkStatus();
            }

            template<typename T = Type>
            enable_if_same<T, FixedSizeBinaryType, turbo::Status> MakeOffsetsValues(
                    ArrayData *array, const std::vector<std::optional<StringType>> &values) {
                const uint8_t *null_bitmap = array->buffers[0]->data();
                const int32_t slot_width =
                        turbo::checked_cast<const FixedSizeBinaryType &>(*array->type).byte_width();
                int64_t total_length = values.size() * slot_width;
                TURBO_MOVE_OR_RAISE(auto data, allocate_buffer(total_length, ctx_->memory_pool()));
                int64_t offset = 0;
                for (size_t i = 0; i < values.size(); i++) {
                    if (bit_util::get_bit(null_bitmap, i)) {
                        const std::optional<StringType> &value = values[i];
                                DKCHECK(value.has_value());
                        std::memcpy(data->mutable_data() + offset, value->data(), slot_width);
                    } else {
                        std::memset(data->mutable_data() + offset, 0x00, slot_width);
                    }
                    offset += slot_width;
                }
                array->buffers[1] = std::move(data);
                return turbo::OkStatus();
            }

            std::shared_ptr<DataType> out_type() const override { return list(out_type_); }

            ExecContext *ctx_;
            Allocator allocator_;
            int64_t num_groups_, num_args_ = 0;
            std::vector<std::optional<StringType>> values_;
            TypedBufferBuilder<uint32_t> groups_;
            TypedBufferBuilder<bool> values_bitmap_;
            std::shared_ptr<DataType> out_type_;
        };

        struct GroupedNullListImpl : public GroupedAggregator {
            turbo::Status init(ExecContext *ctx, const KernelInitArgs &) override {
                ctx_ = ctx;
                counts_ = TypedBufferBuilder<int64_t>(ctx_->memory_pool());
                return turbo::OkStatus();
            }

            turbo::Status resize(int64_t new_num_groups) override {
                auto added_groups = new_num_groups - num_groups_;
                num_groups_ = new_num_groups;
                return counts_.append(added_groups, 0);
            }

            turbo::Status consume(const ExecSpan &batch) override {
                int64_t *counts = counts_.mutable_data();
                const auto *g_begin = batch[1].array.get_values<uint32_t>(1);
                for (int64_t i = 0; i < batch.length; ++i, ++g_begin) {
                    counts[*g_begin] += 1;
                }
                return turbo::OkStatus();
            }

            turbo::Status Merge(GroupedAggregator &&raw_other,
                                const ArrayData &group_id_mapping) override {
                auto other = turbo::checked_cast<GroupedNullListImpl *>(&raw_other);

                int64_t *counts = counts_.mutable_data();
                const int64_t *other_counts = other->counts_.data();

                const auto *g = group_id_mapping.get_values<uint32_t>(1);
                for (int64_t other_g = 0; other_g < group_id_mapping.length; ++other_g, ++g) {
                    counts[*g] += other_counts[other_g];
                }

                return turbo::OkStatus();
            }

            turbo::Result<Datum> Finalize() override {
                std::unique_ptr<ArrayBuilder> builder;
                TURBO_RETURN_NOT_OK(MakeBuilder(ctx_->memory_pool(), list(null()), &builder));
                auto list_builder = turbo::checked_cast<ListBuilder *>(builder.get());
                auto value_builder = turbo::checked_cast<NullBuilder *>(list_builder->value_builder());
                const int64_t *counts = counts_.data();

                for (int64_t group = 0; group < num_groups_; ++group) {
                    TURBO_RETURN_NOT_OK(list_builder->append(true));
                    TURBO_RETURN_NOT_OK(value_builder->append_nulls(counts[group]));
                }
                return list_builder->finish();
            }

            std::shared_ptr<DataType> out_type() const override { return list(null()); }

            ExecContext *ctx_;
            int64_t num_groups_ = 0;
            TypedBufferBuilder<int64_t> counts_;
        };

        template<typename T>
        turbo::Result<std::unique_ptr<KernelState>> GroupedListInit(KernelContext *ctx,
                                                                    const KernelInitArgs &args) {
            TURBO_MOVE_OR_RAISE(auto impl, HashAggregateInit<GroupedListImpl<T>>(ctx, args));
            auto instance = static_cast<GroupedListImpl<T> *>(impl.get());
            instance->out_type_ = args.inputs[0].get_shared_ptr();
            return impl;
        }

        struct GroupedListFactory {
            template<typename T>
            enable_if_physical_integer<T, turbo::Status> Visit(const T &) {
                using PhysicalType = typename T::PhysicalType;
                kernel = MakeKernel(std::move(argument_type), GroupedListInit<PhysicalType>);
                return turbo::OkStatus();
            }

            template<typename T>
            enable_if_floating_point<T, turbo::Status> Visit(const T &) {
                kernel = MakeKernel(std::move(argument_type), GroupedListInit<T>);
                return turbo::OkStatus();
            }

            template<typename T>
            enable_if_decimal<T, turbo::Status> Visit(const T &) {
                kernel = MakeKernel(std::move(argument_type), GroupedListInit<T>);
                return turbo::OkStatus();
            }

            template<typename T>
            enable_if_base_binary<T, turbo::Status> Visit(const T &) {
                kernel = MakeKernel(std::move(argument_type), GroupedListInit<T>);
                return turbo::OkStatus();
            }

            turbo::Status Visit(const FixedSizeBinaryType &) {
                kernel = MakeKernel(std::move(argument_type), GroupedListInit<FixedSizeBinaryType>);
                return turbo::OkStatus();
            }

            turbo::Status Visit(const BooleanType &) {
                kernel = MakeKernel(std::move(argument_type), GroupedListInit<BooleanType>);
                return turbo::OkStatus();
            }

            turbo::Status Visit(const NullType &) {
                kernel = MakeKernel(std::move(argument_type), HashAggregateInit<GroupedNullListImpl>);
                return turbo::OkStatus();
            }

            turbo::Status Visit(const Fp16Type &type) {
                return turbo::unimplemented_error("Outputting list of data of type ", type);
            }

            turbo::Status Visit(const DataType &type) {
                return turbo::unimplemented_error("Outputting list of data of type ", type);
            }

            static turbo::Result<HashAggregateKernel> create(const std::shared_ptr<DataType> &type) {
                GroupedListFactory factory;
                factory.argument_type = type->id();
                TURBO_RETURN_NOT_OK(visit_type_inline(*type, &factory));
                return std::move(factory.kernel);
            }

            HashAggregateKernel kernel;
            InputType argument_type;
        };
    }  // namespace

    namespace {
        const FunctionDoc hash_count_doc{
                "Count the number of null / non-null values in each group",
                ("By default, only non-null values are counted.\n"
                 "This can be changed through ScalarAggregateOptions."),
                {"array", "group_id_array"},
                "CountOptions"};

        const FunctionDoc hash_count_all_doc{"Count the number of rows in each group",
                                             ("Not caring about the values of any column."),
                                             {"group_id_array"}};

        const FunctionDoc hash_sum_doc{"Sum values in each group",
                                       ("Null values are ignored."),
                                       {"array", "group_id_array"},
                                       "ScalarAggregateOptions"};

        const FunctionDoc hash_product_doc{
                "Compute the product of values in each group",
                ("Null values are ignored.\n"
                 "On integer overflow, the result will wrap around as if the calculation\n"
                 "was done with unsigned integers."),
                {"array", "group_id_array"},
                "ScalarAggregateOptions"};

        const FunctionDoc hash_mean_doc{
                "Compute the mean of values in each group",
                ("Null values are ignored.\n"
                 "For integers and floats, NaN is returned if min_count = 0 and\n"
                 "there are no values. For decimals, null is returned instead."),
                {"array", "group_id_array"},
                "ScalarAggregateOptions"};

        const FunctionDoc hash_stddev_doc{
                "Compute the standard deviation of values in each group",
                ("The number of degrees of freedom can be controlled using VarianceOptions.\n"
                 "By default (`ddof` = 0), the population standard deviation is calculated.\n"
                 "Nulls are ignored.  If there are not enough non-null values in the array\n"
                 "to satisfy `ddof`, null is returned."),
                {"array", "group_id_array"}};

        const FunctionDoc hash_variance_doc{
                "Compute the variance of values in each group",
                ("The number of degrees of freedom can be controlled using VarianceOptions.\n"
                 "By default (`ddof` = 0), the population variance is calculated.\n"
                 "Nulls are ignored.  If there are not enough non-null values in the array\n"
                 "to satisfy `ddof`, null is returned."),
                {"array", "group_id_array"}};

        const FunctionDoc hash_tdigest_doc{
                "Compute approximate quantiles of values in each group",
                ("The T-Digest algorithm is used for a fast approximation.\n"
                 "By default, the 0.5 quantile (i.e. median) is returned.\n"
                 "Nulls and NaNs are ignored.\n"
                 "Nulls are returned if there are no valid data points."),
                {"array", "group_id_array"},
                "TDigestOptions"};

        const FunctionDoc hash_approximate_median_doc{
                "Compute approximate medians of values in each group",
                ("The T-Digest algorithm is used for a fast approximation.\n"
                 "Nulls and NaNs are ignored.\n"
                 "Nulls are returned if there are no valid data points."),
                {"array", "group_id_array"},
                "ScalarAggregateOptions"};

        const FunctionDoc hash_first_last_doc{
                "Compute the first and last of values in each group",
                ("Null values are ignored by default.\n"
                 "If skip_nulls = false, then this will return the first and last values\n"
                 "regardless if it is null"),
                {"array", "group_id_array"},
                "ScalarAggregateOptions"};

        const FunctionDoc hash_first_doc{
                "Compute the first value in each group",
                ("Null values are ignored by default.\n"
                 "If skip_nulls = false, then this will return the first and last values\n"
                 "regardless if it is null"),
                {"array", "group_id_array"},
                "ScalarAggregateOptions"};

        const FunctionDoc hash_last_doc{
                "Compute the first value in each group",
                ("Null values are ignored by default.\n"
                 "If skip_nulls = false, then this will return the first and last values\n"
                 "regardless if it is null"),
                {"array", "group_id_array"},
                "ScalarAggregateOptions"};

        const FunctionDoc hash_min_max_doc{
                "Compute the minimum and maximum of values in each group",
                ("Null values are ignored by default.\n"
                 "This can be changed through ScalarAggregateOptions."),
                {"array", "group_id_array"},
                "ScalarAggregateOptions"};

        const FunctionDoc hash_min_or_max_doc{
                "Compute the minimum or maximum of values in each group",
                ("Null values are ignored by default.\n"
                 "This can be changed through ScalarAggregateOptions."),
                {"array", "group_id_array"},
                "ScalarAggregateOptions"};

        const FunctionDoc hash_any_doc{"Whether any element in each group evaluates to true",
                                       ("Null values are ignored."),
                                       {"array", "group_id_array"},
                                       "ScalarAggregateOptions"};

        const FunctionDoc hash_all_doc{"Whether all elements in each group evaluate to true",
                                       ("Null values are ignored."),
                                       {"array", "group_id_array"},
                                       "ScalarAggregateOptions"};

        const FunctionDoc hash_count_distinct_doc{
                "Count the distinct values in each group",
                ("Whether nulls/values are counted is controlled by CountOptions.\n"
                 "NaNs and signed zeroes are not normalized."),
                {"array", "group_id_array"},
                "CountOptions"};

        const FunctionDoc hash_distinct_doc{
                "Keep the distinct values in each group",
                ("Whether nulls/values are kept is controlled by CountOptions.\n"
                 "NaNs and signed zeroes are not normalized."),
                {"array", "group_id_array"},
                "CountOptions"};

        const FunctionDoc hash_one_doc{"Get one value from each group",
                                       ("Null values are also returned."),
                                       {"array", "group_id_array"}};

        const FunctionDoc hash_list_doc{"List all values in each group",
                                        ("Null values are also returned."),
                                        {"array", "group_id_array"}};
    }  // namespace

    void register_hash_aggregate_basic(FunctionRegistry *registry) {
        static auto default_count_options = CountOptions::defaults();
        static auto default_scalar_aggregate_options = ScalarAggregateOptions::defaults();
        static auto default_tdigest_options = TDigestOptions::defaults();
        static auto default_variance_options = VarianceOptions::defaults();

        {
            auto func = std::make_shared<HashAggregateFunction>(
                    "hash_count", Arity::Binary(), hash_count_doc, &default_count_options);

                    KCHECK_OK(func->add_kernel(
                    MakeKernel(InputType::Any(), HashAggregateInit<GroupedCountImpl>)));
                    KCHECK_OK(registry->add_function(std::move(func)));
        }

        {
            auto func = std::make_shared<HashAggregateFunction>("hash_count_all", Arity::Unary(),
                                                                hash_count_all_doc, nullptr);

                    KCHECK_OK(func->add_kernel(MakeUnaryKernel(HashAggregateInit<GroupedCountAllImpl>)));
            auto status = registry->add_function(std::move(func));
                    KCHECK_OK(status);
        }

        {
            auto func = std::make_shared<HashAggregateFunction>(
                    "hash_sum", Arity::Binary(), hash_sum_doc, &default_scalar_aggregate_options);
                    KCHECK_OK(AddHashAggKernels({boolean()}, GroupedSumFactory::create, func.get()));
                    KCHECK_OK(AddHashAggKernels(signed_int_types(), GroupedSumFactory::create, func.get()));
                    KCHECK_OK(AddHashAggKernels(unsigned_int_types(), GroupedSumFactory::create, func.get()));
                    KCHECK_OK(
                    AddHashAggKernels(floating_point_types(), GroupedSumFactory::create, func.get()));
                    // Type parameters are ignored
                    KCHECK_OK(AddHashAggKernels({decimal128(1, 1), decimal256(1, 1)},
                                                       GroupedSumFactory::create, func.get()));
                    KCHECK_OK(AddHashAggKernels({null()}, GroupedSumFactory::create, func.get()));
                    KCHECK_OK(registry->add_function(std::move(func)));
        }

        {
            auto func = std::make_shared<HashAggregateFunction>(
                    "hash_product", Arity::Binary(), hash_product_doc,
                    &default_scalar_aggregate_options);
                    KCHECK_OK(AddHashAggKernels({boolean()}, GroupedProductFactory::create, func.get()));
                    KCHECK_OK(
                    AddHashAggKernels(signed_int_types(), GroupedProductFactory::create, func.get()));
                    KCHECK_OK(
                    AddHashAggKernels(unsigned_int_types(), GroupedProductFactory::create, func.get()));
                    KCHECK_OK(
                    AddHashAggKernels(floating_point_types(), GroupedProductFactory::create, func.get()));
                    // Type parameters are ignored
                    KCHECK_OK(AddHashAggKernels({decimal128(1, 1), decimal256(1, 1)},
                                                       GroupedProductFactory::create, func.get()));
                    KCHECK_OK(AddHashAggKernels({null()}, GroupedProductFactory::create, func.get()));
                    KCHECK_OK(registry->add_function(std::move(func)));
        }

        {
            auto func = std::make_shared<HashAggregateFunction>(
                    "hash_mean", Arity::Binary(), hash_mean_doc, &default_scalar_aggregate_options);
                    KCHECK_OK(AddHashAggKernels({boolean()}, GroupedMeanFactory::create, func.get()));
                    KCHECK_OK(AddHashAggKernels(signed_int_types(), GroupedMeanFactory::create, func.get()));
                    KCHECK_OK(
                    AddHashAggKernels(unsigned_int_types(), GroupedMeanFactory::create, func.get()));
                    KCHECK_OK(
                    AddHashAggKernels(floating_point_types(), GroupedMeanFactory::create, func.get()));
            // Type parameters are ignored
                    KCHECK_OK(AddHashAggKernels({decimal128(1, 1), decimal256(1, 1)},
                                                       GroupedMeanFactory::create, func.get()));
                    KCHECK_OK(AddHashAggKernels({null()}, GroupedMeanFactory::create, func.get()));
                    KCHECK_OK(registry->add_function(std::move(func)));
        }

        {
            auto func = std::make_shared<HashAggregateFunction>(
                    "hash_stddev", Arity::Binary(), hash_stddev_doc, &default_variance_options);
                    KCHECK_OK(AddHashAggKernels(signed_int_types(),
                                                       GroupedVarStdFactory<VarOrStd::Std>::create, func.get()));
                    KCHECK_OK(AddHashAggKernels(unsigned_int_types(),
                                                       GroupedVarStdFactory<VarOrStd::Std>::create, func.get()));
                    KCHECK_OK(AddHashAggKernels(floating_point_types(),
                                                       GroupedVarStdFactory<VarOrStd::Std>::create, func.get()));
                    KCHECK_OK(AddHashAggKernels({decimal128(1, 1), decimal256(1, 1)},
                                                       GroupedVarStdFactory<VarOrStd::Std>::create, func.get()));
                    KCHECK_OK(registry->add_function(std::move(func)));
        }

        {
            auto func = std::make_shared<HashAggregateFunction>(
                    "hash_variance", Arity::Binary(), hash_variance_doc, &default_variance_options);
                    KCHECK_OK(AddHashAggKernels(signed_int_types(),
                                                       GroupedVarStdFactory<VarOrStd::Var>::create, func.get()));
                    KCHECK_OK(AddHashAggKernels(unsigned_int_types(),
                                                       GroupedVarStdFactory<VarOrStd::Var>::create, func.get()));
                    KCHECK_OK(AddHashAggKernels(floating_point_types(),
                                                       GroupedVarStdFactory<VarOrStd::Var>::create, func.get()));
                    KCHECK_OK(AddHashAggKernels({decimal128(1, 1), decimal256(1, 1)},
                                                       GroupedVarStdFactory<VarOrStd::Var>::create, func.get()));
                    KCHECK_OK(registry->add_function(std::move(func)));
        }

        HashAggregateFunction *tdigest_func = nullptr;
        {
            auto func = std::make_shared<HashAggregateFunction>(
                    "hash_tdigest", Arity::Binary(), hash_tdigest_doc, &default_tdigest_options);
                    KCHECK_OK(
                    AddHashAggKernels(signed_int_types(), GroupedTDigestFactory::create, func.get()));
                    KCHECK_OK(
                    AddHashAggKernels(unsigned_int_types(), GroupedTDigestFactory::create, func.get()));
                    KCHECK_OK(
                    AddHashAggKernels(floating_point_types(), GroupedTDigestFactory::create, func.get()));
            // Type parameters are ignored
                    KCHECK_OK(AddHashAggKernels({decimal128(1, 1), decimal256(1, 1)},
                                                       GroupedTDigestFactory::create, func.get()));
            tdigest_func = func.get();
                    KCHECK_OK(registry->add_function(std::move(func)));
        }

        {
            auto func = std::make_shared<HashAggregateFunction>(
                    "hash_approximate_median", Arity::Binary(), hash_approximate_median_doc,
                    &default_scalar_aggregate_options);
                    KCHECK_OK(func->add_kernel(MakeApproximateMedianKernel(tdigest_func)));
                    KCHECK_OK(registry->add_function(std::move(func)));
        }

        HashAggregateFunction *first_last_func = nullptr;
        {
            auto func = std::make_shared<HashAggregateFunction>(
                    "hash_first_last", Arity::Binary(), hash_first_last_doc,
                    &default_scalar_aggregate_options);

                    KCHECK_OK(
                    AddHashAggKernels(numeric_types(), GroupedFirstLastFactory::create, func.get()));
                    KCHECK_OK(
                    AddHashAggKernels(temporal_types(), GroupedFirstLastFactory::create, func.get()));
                    KCHECK_OK(
                    AddHashAggKernels(base_binary_types(), GroupedFirstLastFactory::create, func.get()));
                    KCHECK_OK(AddHashAggKernels({boolean(), fixed_size_binary(1)},
                                                       GroupedFirstLastFactory::create, func.get()));

            first_last_func = func.get();
                    KCHECK_OK(registry->add_function(std::move(func)));
        }

        {
            auto func = std::make_shared<HashAggregateFunction>(
                    "hash_first", Arity::Binary(), hash_first_doc, &default_scalar_aggregate_options);
                    KCHECK_OK(
                    func->add_kernel(MakeFirstOrLastKernel<FirstOrLast::First>(first_last_func)));
                    KCHECK_OK(registry->add_function(std::move(func)));
        }

        {
            auto func = std::make_shared<HashAggregateFunction>(
                    "hash_last", Arity::Binary(), hash_last_doc, &default_scalar_aggregate_options);
                    KCHECK_OK(func->add_kernel(MakeFirstOrLastKernel<FirstOrLast::Last>(first_last_func)));
                    KCHECK_OK(registry->add_function(std::move(func)));
        }

        HashAggregateFunction *min_max_func = nullptr;
        {
            auto func = std::make_shared<HashAggregateFunction>(
                    "hash_min_max", Arity::Binary(), hash_min_max_doc,
                    &default_scalar_aggregate_options);
                    KCHECK_OK(AddHashAggKernels(numeric_types(), GroupedMinMaxFactory::create, func.get()));
                    KCHECK_OK(AddHashAggKernels(temporal_types(), GroupedMinMaxFactory::create, func.get()));
                    KCHECK_OK(
                    AddHashAggKernels(base_binary_types(), GroupedMinMaxFactory::create, func.get()));
            // Type parameters are ignored
                    KCHECK_OK(AddHashAggKernels({null(), boolean(), decimal128(1, 1), decimal256(1, 1),
                                                        month_interval(), fixed_size_binary(1)},
                                                       GroupedMinMaxFactory::create, func.get()));
            min_max_func = func.get();
                    KCHECK_OK(registry->add_function(std::move(func)));
        }

        {
            auto func = std::make_shared<HashAggregateFunction>(
                    "hash_min", Arity::Binary(), hash_min_or_max_doc,
                    &default_scalar_aggregate_options);
                    KCHECK_OK(func->add_kernel(MakeMinOrMaxKernel<MinOrMax::Min>(min_max_func)));
                    KCHECK_OK(registry->add_function(std::move(func)));
        }

        {
            auto func = std::make_shared<HashAggregateFunction>(
                    "hash_max", Arity::Binary(), hash_min_or_max_doc,
                    &default_scalar_aggregate_options);
                    KCHECK_OK(func->add_kernel(MakeMinOrMaxKernel<MinOrMax::Max>(min_max_func)));
                    KCHECK_OK(registry->add_function(std::move(func)));
        }

        {
            auto func = std::make_shared<HashAggregateFunction>(
                    "hash_any", Arity::Binary(), hash_any_doc, &default_scalar_aggregate_options);
                    KCHECK_OK(func->add_kernel(MakeKernel(boolean(), HashAggregateInit<GroupedAnyImpl>)));
                    KCHECK_OK(registry->add_function(std::move(func)));
        }

        {
            auto func = std::make_shared<HashAggregateFunction>(
                    "hash_all", Arity::Binary(), hash_all_doc, &default_scalar_aggregate_options);
                    KCHECK_OK(func->add_kernel(MakeKernel(boolean(), HashAggregateInit<GroupedAllImpl>)));
                    KCHECK_OK(registry->add_function(std::move(func)));
        }

        {
            auto func = std::make_shared<HashAggregateFunction>(
                    "hash_count_distinct", Arity::Binary(), hash_count_distinct_doc,
                    &default_count_options);
                    KCHECK_OK(func->add_kernel(
                    MakeKernel(InputType::Any(), GroupedDistinctInit<GroupedCountDistinctImpl>)));
                    KCHECK_OK(registry->add_function(std::move(func)));
        }

        {
            auto func = std::make_shared<HashAggregateFunction>(
                    "hash_distinct", Arity::Binary(), hash_distinct_doc, &default_count_options);
                    KCHECK_OK(func->add_kernel(
                    MakeKernel(InputType::Any(), GroupedDistinctInit<GroupedDistinctImpl>)));
                    KCHECK_OK(registry->add_function(std::move(func)));
        }

        {
            auto func = std::make_shared<HashAggregateFunction>("hash_one", Arity::Binary(),
                                                                hash_one_doc);
                    KCHECK_OK(AddHashAggKernels(numeric_types(), GroupedOneFactory::create, func.get()));
                    KCHECK_OK(AddHashAggKernels(temporal_types(), GroupedOneFactory::create, func.get()));
                    KCHECK_OK(AddHashAggKernels(base_binary_types(), GroupedOneFactory::create, func.get()));
                    KCHECK_OK(AddHashAggKernels({null(), boolean(), decimal128(1, 1), decimal256(1, 1),
                                                        month_interval(), fixed_size_binary(1)},
                                                       GroupedOneFactory::create, func.get()));
                    KCHECK_OK(registry->add_function(std::move(func)));
        }

        {
            auto func = std::make_shared<HashAggregateFunction>("hash_list", Arity::Binary(),
                                                                hash_list_doc);
                    KCHECK_OK(AddHashAggKernels(numeric_types(), GroupedListFactory::create, func.get()));
                    KCHECK_OK(AddHashAggKernels(temporal_types(), GroupedListFactory::create, func.get()));
                    KCHECK_OK(AddHashAggKernels(base_binary_types(), GroupedListFactory::create, func.get()));
                    KCHECK_OK(AddHashAggKernels({null(), boolean(), decimal128(1, 1), decimal256(1, 1),
                                                        month_interval(), fixed_size_binary(1)},
                                                       GroupedListFactory::create, func.get()));
                    KCHECK_OK(registry->add_function(std::move(func)));
        }
    }

}  // namespace nebula::compute::internal
