// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <nebula/compute/api_aggregate.h>
#include <nebula/compute/kernels/aggregate_basic_internal.h>
#include <nebula/compute/kernels/aggregate_internal.h>
#include <nebula/compute/kernels/common_internal.h>
#include <nebula/compute/kernels/util_internal.h>
#include <nebula/util/cpu_info.h>
#include <nebula/util/hashing.h>

#include <memory>

namespace nebula::compute::internal {

    namespace {

        turbo::Status AggregateConsume(KernelContext *ctx, const ExecSpan &batch) {
            return turbo::checked_cast<ScalarAggregator *>(ctx->state())->consume(ctx, batch);
        }

        turbo::Status AggregateMerge(KernelContext *ctx, KernelState &&src, KernelState *dst) {
            return turbo::checked_cast<ScalarAggregator *>(dst)->MergeFrom(ctx, std::move(src));
        }

        turbo::Status AggregateFinalize(KernelContext *ctx, Datum *out) {
            return turbo::checked_cast<ScalarAggregator *>(ctx->state())->Finalize(ctx, out);
        }

    }  // namespace

    void add_agg_kernel(std::shared_ptr<KernelSignature> sig, KernelInit init,
                      ScalarAggregateFunction *func, SimdLevel::type simd_level,
                      const bool ordered) {
        ScalarAggregateKernel kernel(std::move(sig), std::move(init), AggregateConsume,
                                     AggregateMerge, AggregateFinalize, ordered);
        // Set the simd level
        kernel.simd_level = simd_level;
                KCHECK_OK(func->add_kernel(std::move(kernel)));
    }

    void add_agg_kernel(std::shared_ptr<KernelSignature> sig, KernelInit init,
                      ScalarAggregateFinalize finalize, ScalarAggregateFunction *func,
                      SimdLevel::type simd_level, const bool ordered) {
        ScalarAggregateKernel kernel(std::move(sig), std::move(init), AggregateConsume,
                                     AggregateMerge, std::move(finalize), ordered);
        // Set the simd level
        kernel.simd_level = simd_level;
                KCHECK_OK(func->add_kernel(std::move(kernel)));
    }

    namespace {

        // ----------------------------------------------------------------------
        // Count implementations

        struct CountAllImpl : public ScalarAggregator {
            turbo::Status consume(KernelContext *, const ExecSpan &batch) override {
                this->count += batch.length;
                return turbo::OkStatus();
            }

            turbo::Status MergeFrom(KernelContext *, KernelState &&src) override {
                const auto &other_state = turbo::checked_cast<const CountAllImpl &>(src);
                this->count += other_state.count;
                return turbo::OkStatus();
            }

            turbo::Status Finalize(KernelContext *ctx, Datum *out) override {
                const auto &state = turbo::checked_cast<const CountAllImpl &>(*ctx->state());
                *out = Datum(state.count);
                return turbo::OkStatus();
            }

            int64_t count = 0;
        };

        struct CountImpl : public ScalarAggregator {
            explicit CountImpl(CountOptions options) : options(std::move(options)) {}

            turbo::Status consume(KernelContext *, const ExecSpan &batch) override {
                if (options.mode == CountOptions::ALL) {
                    this->non_nulls += batch.length;
                } else if (batch[0].is_array()) {
                    const ArraySpan &input = batch[0].array;
                    const int64_t nulls = input.get_null_count();
                    this->nulls += nulls;
                    this->non_nulls += input.length - nulls;
                } else {
                    const Scalar &input = *batch[0].scalar;
                    this->nulls += !input.is_valid * batch.length;
                    this->non_nulls += input.is_valid * batch.length;
                }
                return turbo::OkStatus();
            }

            turbo::Status MergeFrom(KernelContext *, KernelState &&src) override {
                const auto &other_state = turbo::checked_cast<const CountImpl &>(src);
                this->non_nulls += other_state.non_nulls;
                this->nulls += other_state.nulls;
                return turbo::OkStatus();
            }

            turbo::Status Finalize(KernelContext *ctx, Datum *out) override {
                const auto &state = turbo::checked_cast<const CountImpl &>(*ctx->state());
                switch (state.options.mode) {
                    case CountOptions::ONLY_VALID:
                    case CountOptions::ALL:
                        // ALL is equivalent since we don't count the null/non-null
                        // separately to avoid potentially computing null count
                        *out = Datum(state.non_nulls);
                        break;
                    case CountOptions::ONLY_NULL:
                        *out = Datum(state.nulls);
                        break;
                    default:
                                DKCHECK(false) << "unreachable";
                }
                return turbo::OkStatus();
            }

            CountOptions options;
            int64_t non_nulls = 0;
            int64_t nulls = 0;
        };

        turbo::Result<std::unique_ptr<KernelState>> CountAllInit(KernelContext *,
                                                                 const KernelInitArgs &args) {
            return std::make_unique<CountAllImpl>();
        }

        turbo::Result<std::unique_ptr<KernelState>> CountInit(KernelContext *,
                                                              const KernelInitArgs &args) {
            return std::make_unique<CountImpl>(static_cast<const CountOptions &>(*args.options));
        }

        // ----------------------------------------------------------------------
        // Distinct Count implementation

        template<typename Type, typename VisitorArgType>
        struct CountDistinctImpl : public ScalarAggregator {
            using MemoTable = typename nebula::internal::HashTraits<Type>::MemoTableType;

            explicit CountDistinctImpl(MemoryPool *memory_pool, CountOptions options)
                    : options(std::move(options)), memo_table_(new MemoTable(memory_pool, 0)) {}

            turbo::Status consume(KernelContext *, const ExecSpan &batch) override {
                if (batch[0].is_array()) {
                    const ArraySpan &arr = batch[0].array;
                    this->has_nulls = arr.get_null_count() > 0;

                    auto visit_null = []() { return turbo::OkStatus(); };
                    auto visit_value = [&](VisitorArgType arg) {
                        int32_t y;
                        return memo_table_->GetOrInsert(arg, &y);
                    };
                    TURBO_RETURN_NOT_OK(VisitArraySpanInline<Type>(arr, visit_value, visit_null));
                } else {
                    const Scalar &input = *batch[0].scalar;
                    this->has_nulls = !input.is_valid;

                    if (input.is_valid) {
                        int32_t unused;
                        TURBO_RETURN_NOT_OK(memo_table_->GetOrInsert(UnboxScalar<Type>::Unbox(input), &unused));
                    }
                }

                this->non_nulls = memo_table_->size();
                return turbo::OkStatus();
            }

            turbo::Status MergeFrom(KernelContext *, KernelState &&src) override {
                const auto &other_state = turbo::checked_cast<const CountDistinctImpl &>(src);
                TURBO_RETURN_NOT_OK(this->memo_table_->MergeTable(*(other_state.memo_table_)));
                this->non_nulls = this->memo_table_->size();
                this->has_nulls = this->has_nulls || other_state.has_nulls;
                return turbo::OkStatus();
            }

            turbo::Status Finalize(KernelContext *ctx, Datum *out) override {
                const auto &state = turbo::checked_cast<const CountDistinctImpl &>(*ctx->state());
                const int64_t nulls = state.has_nulls ? 1 : 0;
                switch (state.options.mode) {
                    case CountOptions::ONLY_VALID:
                        *out = Datum(state.non_nulls);
                        break;
                    case CountOptions::ALL:
                        *out = Datum(state.non_nulls + nulls);
                        break;
                    case CountOptions::ONLY_NULL:
                        *out = Datum(nulls);
                        break;
                    default:
                                DKCHECK(false) << "unreachable";
                }
                return turbo::OkStatus();
            }

            const CountOptions options;
            int64_t non_nulls = 0;
            bool has_nulls = false;
            std::unique_ptr<MemoTable> memo_table_;
        };

        template<typename Type, typename VisitorArgType>
        turbo::Result<std::unique_ptr<KernelState>> CountDistinctInit(KernelContext *ctx,
                                                                      const KernelInitArgs &args) {
            return std::make_unique<CountDistinctImpl<Type, VisitorArgType>>(
                    ctx->memory_pool(), static_cast<const CountOptions &>(*args.options));
        }

        template<typename Type, typename VisitorArgType = typename Type::c_type>
        void AddCountDistinctKernel(InputType type, ScalarAggregateFunction *func) {
            add_agg_kernel(KernelSignature::create({type}, int64()),
                         CountDistinctInit<Type, VisitorArgType>, func);
        }

        void AddCountDistinctKernels(ScalarAggregateFunction *func) {
            // Boolean
            AddCountDistinctKernel<BooleanType>(boolean(), func);
            // Number
            AddCountDistinctKernel<Int8Type>(int8(), func);
            AddCountDistinctKernel<Int16Type>(int16(), func);
            AddCountDistinctKernel<Int32Type>(int32(), func);
            AddCountDistinctKernel<Int64Type>(int64(), func);
            AddCountDistinctKernel<UInt8Type>(uint8(), func);
            AddCountDistinctKernel<UInt16Type>(uint16(), func);
            AddCountDistinctKernel<UInt32Type>(uint32(), func);
            AddCountDistinctKernel<UInt64Type>(uint64(), func);
            AddCountDistinctKernel<Fp16Type>(float16(), func);
            AddCountDistinctKernel<Fp32Type>(float32(), func);
            AddCountDistinctKernel<Fp64Type>(float64(), func);
            // Date
            AddCountDistinctKernel<Date32Type>(date32(), func);
            AddCountDistinctKernel<Date64Type>(date64(), func);
            // Time
            AddCountDistinctKernel<Time32Type>(match::SameTypeId(Type::TIME32), func);
            AddCountDistinctKernel<Time64Type>(match::SameTypeId(Type::TIME64), func);
            // Timestamp & Duration
            AddCountDistinctKernel<TimestampType>(match::SameTypeId(Type::TIMESTAMP), func);
            AddCountDistinctKernel<DurationType>(match::SameTypeId(Type::DURATION), func);
            // Interval
            AddCountDistinctKernel<MonthIntervalType>(month_interval(), func);
            AddCountDistinctKernel<DayTimeIntervalType>(day_time_interval(), func);
            AddCountDistinctKernel<MonthDayNanoIntervalType>(month_day_nano_interval(), func);
            // Binary & String
            AddCountDistinctKernel<BinaryType, std::string_view>(match::BinaryLike(), func);
            AddCountDistinctKernel<LargeBinaryType, std::string_view>(match::LargeBinaryLike(),
                                                                      func);
            // Fixed binary & Decimal
            AddCountDistinctKernel<FixedSizeBinaryType, std::string_view>(
                    match::FixedSizeBinaryLike(), func);
        }

        // ----------------------------------------------------------------------
        // Sum implementation

        template<typename ArrowType>
        struct SumImplDefault : public SumImpl<ArrowType, SimdLevel::NONE> {
            using SumImpl<ArrowType, SimdLevel::NONE>::SumImpl;
        };

        template<typename ArrowType>
        struct MeanImplDefault : public MeanImpl<ArrowType, SimdLevel::NONE> {
            using MeanImpl<ArrowType, SimdLevel::NONE>::MeanImpl;
        };

        turbo::Result<std::unique_ptr<KernelState>> SumInit(KernelContext *ctx,
                                                            const KernelInitArgs &args) {
            SumLikeInit<SumImplDefault> visitor(
                    ctx, args.inputs[0].get_shared_ptr(),
                    static_cast<const ScalarAggregateOptions &>(*args.options));
            return visitor.create();
        }

        turbo::Result<std::unique_ptr<KernelState>> MeanInit(KernelContext *ctx,
                                                             const KernelInitArgs &args) {
            MeanKernelInit<MeanImplDefault> visitor(
                    ctx, args.inputs[0].get_shared_ptr(),
                    static_cast<const ScalarAggregateOptions &>(*args.options));
            return visitor.create();
        }

        // ----------------------------------------------------------------------
        // Product implementation

        using nebula::compute::internal::to_unsigned;

        template<typename ArrowType>
        struct ProductImpl : public ScalarAggregator {
            using ThisType = ProductImpl<ArrowType>;
            using AccType = typename FindAccumulatorType<ArrowType>::Type;
            using ProductType = typename TypeTraits<AccType>::CType;
            using OutputType = typename TypeTraits<AccType>::ScalarType;

            explicit ProductImpl(std::shared_ptr<DataType> out_type,
                                 const ScalarAggregateOptions &options)
                    : out_type(out_type),
                      options(options),
                      count(0),
                      product(MultiplyTraits<AccType>::one(*out_type)),
                      nulls_observed(false) {}

            turbo::Status consume(KernelContext *, const ExecSpan &batch) override {
                if (batch[0].is_array()) {
                    const ArraySpan &data = batch[0].array;
                    this->count += data.length - data.get_null_count();
                    this->nulls_observed = this->nulls_observed || data.get_null_count();

                    if (!options.skip_nulls && this->nulls_observed) {
                        // Short-circuit
                        return turbo::OkStatus();
                    }

                    internal::VisitArrayValuesInline<ArrowType>(
                            data,
                            [&](typename TypeTraits<ArrowType>::CType value) {
                                this->product =
                                        MultiplyTraits<AccType>::Multiply(*out_type, this->product, value);
                            },
                            [] {});
                } else {
                    const Scalar &data = *batch[0].scalar;
                    this->count += data.is_valid * batch.length;
                    this->nulls_observed = this->nulls_observed || !data.is_valid;
                    if (data.is_valid) {
                        for (int64_t i = 0; i < batch.length; i++) {
                            auto value = internal::UnboxScalar<ArrowType>::Unbox(data);
                            this->product =
                                    MultiplyTraits<AccType>::Multiply(*out_type, this->product, value);
                        }
                    }
                }
                return turbo::OkStatus();
            }

            turbo::Status MergeFrom(KernelContext *, KernelState &&src) override {
                const auto &other = turbo::checked_cast<const ThisType &>(src);
                this->count += other.count;
                this->product =
                        MultiplyTraits<AccType>::Multiply(*out_type, this->product, other.product);
                this->nulls_observed = this->nulls_observed || other.nulls_observed;
                return turbo::OkStatus();
            }

            turbo::Status Finalize(KernelContext *, Datum *out) override {
                if ((!options.skip_nulls && this->nulls_observed) ||
                    (this->count < options.min_count)) {
                    out->value = std::make_shared<OutputType>(out_type);
                } else {
                    out->value = std::make_shared<OutputType>(this->product, out_type);
                }
                return turbo::OkStatus();
            }

            std::shared_ptr<DataType> out_type;
            ScalarAggregateOptions options;
            size_t count;
            ProductType product;
            bool nulls_observed;
        };

        struct NullProductImpl : public NullImpl<Int64Type> {
            explicit NullProductImpl(const ScalarAggregateOptions &options_)
                    : NullImpl<Int64Type>(options_) {}

            std::shared_ptr<Scalar> output_empty() override {
                return std::make_shared<Int64Scalar>(1);
            }
        };

        struct ProductInit {
            std::unique_ptr<KernelState> state;
            KernelContext *ctx;
            std::shared_ptr<DataType> type;
            const ScalarAggregateOptions &options;

            ProductInit(KernelContext *ctx, std::shared_ptr<DataType> type,
                        const ScalarAggregateOptions &options)
                    : ctx(ctx), type(type), options(options) {}

            turbo::Status Visit(const DataType &) {
                return turbo::unimplemented_error("No product implemented");
            }

            turbo::Status Visit(const Fp16Type &) {
                return turbo::unimplemented_error("No product implemented");
            }

            turbo::Status Visit(const BooleanType &) {
                auto ty = TypeTraits<typename ProductImpl<BooleanType>::AccType>::type_singleton();
                state.reset(new ProductImpl<BooleanType>(ty, options));
                return turbo::OkStatus();
            }

            template<typename Type>
            enable_if_number<Type, turbo::Status> Visit(const Type &) {
                auto ty = TypeTraits<typename ProductImpl<Type>::AccType>::type_singleton();
                state.reset(new ProductImpl<Type>(ty, options));
                return turbo::OkStatus();
            }

            template<typename Type>
            enable_if_decimal<Type, turbo::Status> Visit(const Type &) {
                state.reset(new ProductImpl<Type>(type, options));
                return turbo::OkStatus();
            }

            turbo::Status Visit(const NullType &) {
                state.reset(new NullProductImpl(options));
                return turbo::OkStatus();
            }

            turbo::Result<std::unique_ptr<KernelState>> create() {
                TURBO_RETURN_NOT_OK(visit_type_inline(*type, this));
                return std::move(state);
            }

            static turbo::Result<std::unique_ptr<KernelState>> init(KernelContext *ctx,
                                                                    const KernelInitArgs &args) {
                ProductInit visitor(ctx, args.inputs[0].get_shared_ptr(),
                                    static_cast<const ScalarAggregateOptions &>(*args.options));
                return visitor.create();
            }
        };

        // ----------------------------------------------------------------------
        // FirstLast implementation

        turbo::Result<std::unique_ptr<KernelState>> FirstLastInit(KernelContext *ctx,
                                                                  const KernelInitArgs &args) {
            TURBO_MOVE_OR_RAISE(TypeHolder out_type,
                                args.kernel->signature->out_type().resolve(ctx, args.inputs));

            FirstLastInitState visitor(ctx, *args.inputs[0], out_type.get_shared_ptr(),
                                       static_cast<const ScalarAggregateOptions &>(*args.options));
            return visitor.create();
        }

        // For "first" and "last" functions: override finalize and return the actual value
        template<FirstOrLast first_or_last>
        void AddFirstOrLastAggKernel(ScalarAggregateFunction *func,
                                     ScalarAggregateFunction *first_last_func) {
            auto sig = KernelSignature::create({InputType::Any()}, FirstType);
            auto init = [first_last_func](
                    KernelContext *ctx,
                    const KernelInitArgs &args) -> turbo::Result<std::unique_ptr<KernelState>> {
                TURBO_MOVE_OR_RAISE(auto kernel, first_last_func->dispatch_exact(args.inputs));
                KernelInitArgs new_args{kernel, args.inputs, args.options};
                return kernel->init(ctx, new_args);
            };

            auto finalize = [](KernelContext *ctx, Datum *out) -> turbo::Status {
                Datum temp;
                TURBO_RETURN_NOT_OK(turbo::checked_cast<ScalarAggregator *>(ctx->state())->Finalize(ctx, &temp));
                const auto &result = temp.scalar_as<StructScalar>();
                        DKCHECK(result.is_valid);
                *out = result.value[static_cast<uint8_t>(first_or_last)];
                return turbo::OkStatus();
            };

            add_agg_kernel(std::move(sig), std::move(init), std::move(finalize), func,
                         SimdLevel::NONE, /*ordered=*/true);
        }

        // ----------------------------------------------------------------------
        // MinMax implementation

        turbo::Result<std::unique_ptr<KernelState>> MinMaxInit(KernelContext *ctx,
                                                               const KernelInitArgs &args) {
            TURBO_MOVE_OR_RAISE(TypeHolder out_type,
                                args.kernel->signature->out_type().resolve(ctx, args.inputs));
            MinMaxInitState<SimdLevel::NONE> visitor(
                    ctx, *args.inputs[0], out_type.get_shared_ptr(),
                    static_cast<const ScalarAggregateOptions &>(*args.options));
            return visitor.create();
        }

        // For "min" and "max" functions: override finalize and return the actual value
        template<MinOrMax min_or_max>
        void AddMinOrMaxAggKernel(ScalarAggregateFunction *func,
                                  ScalarAggregateFunction *min_max_func) {
            auto sig = KernelSignature::create({InputType::Any()}, FirstType);
            auto init = [min_max_func](
                    KernelContext *ctx,
                    const KernelInitArgs &args) -> turbo::Result<std::unique_ptr<KernelState>> {
                TURBO_MOVE_OR_RAISE(auto kernel, min_max_func->dispatch_exact(args.inputs));
                KernelInitArgs new_args{kernel, args.inputs, args.options};
                return kernel->init(ctx, new_args);
            };

            auto finalize = [](KernelContext *ctx, Datum *out) -> turbo::Status {
                Datum temp;
                TURBO_RETURN_NOT_OK(turbo::checked_cast<ScalarAggregator *>(ctx->state())->Finalize(ctx, &temp));
                const auto &result = temp.scalar_as<StructScalar>();
                        DKCHECK(result.is_valid);
                *out = result.value[static_cast<uint8_t>(min_or_max)];
                return turbo::OkStatus();
            };

            // Note SIMD level is always NONE, but the convenience kernel will
            // dispatch to an appropriate implementation
            add_agg_kernel(std::move(sig), std::move(init), std::move(finalize), func);
        }

        // ----------------------------------------------------------------------
        // Any implementation

        struct BooleanAnyImpl : public ScalarAggregator {
            explicit BooleanAnyImpl(ScalarAggregateOptions options) : options(std::move(options)) {}

            turbo::Status consume(KernelContext *, const ExecSpan &batch) override {
                // short-circuit if seen a True already
                if (this->any == true && this->count >= options.min_count) {
                    return turbo::OkStatus();
                }
                if (batch[0].is_scalar()) {
                    const Scalar &scalar = *batch[0].scalar;
                    this->has_nulls = !scalar.is_valid;
                    this->any = scalar.is_valid && turbo::checked_cast<const BooleanScalar &>(scalar).value;
                    this->count += scalar.is_valid;
                    return turbo::OkStatus();
                }
                const ArraySpan &data = batch[0].array;
                this->has_nulls = data.get_null_count() > 0;
                this->count += data.length - data.get_null_count();
                nebula::internal::OptionalBinaryBitBlockCounter counter(
                        data.buffers[0].data, data.offset, data.buffers[1].data, data.offset,
                        data.length);
                int64_t position = 0;
                while (position < data.length) {
                    const auto block = counter.NextAndBlock();
                    if (block.popcount > 0) {
                        this->any = true;
                        break;
                    }
                    position += block.length;
                }
                return turbo::OkStatus();
            }

            turbo::Status MergeFrom(KernelContext *, KernelState &&src) override {
                const auto &other = turbo::checked_cast<const BooleanAnyImpl &>(src);
                this->any |= other.any;
                this->has_nulls |= other.has_nulls;
                this->count += other.count;
                return turbo::OkStatus();
            }

            turbo::Status Finalize(KernelContext *ctx, Datum *out) override {
                if ((!options.skip_nulls && !this->any && this->has_nulls) ||
                    this->count < options.min_count) {
                    out->value = std::make_shared<BooleanScalar>();
                } else {
                    out->value = std::make_shared<BooleanScalar>(this->any);
                }
                return turbo::OkStatus();
            }

            bool any = false;
            bool has_nulls = false;
            int64_t count = 0;
            ScalarAggregateOptions options;
        };

        turbo::Result<std::unique_ptr<KernelState>> AnyInit(KernelContext *, const KernelInitArgs &args) {
            const ScalarAggregateOptions options =
                    static_cast<const ScalarAggregateOptions &>(*args.options);
            return std::make_unique<BooleanAnyImpl>(
                    static_cast<const ScalarAggregateOptions &>(*args.options));
        }

        // ----------------------------------------------------------------------
        // All implementation

        struct BooleanAllImpl : public ScalarAggregator {
            explicit BooleanAllImpl(ScalarAggregateOptions options) : options(std::move(options)) {}

            turbo::Status consume(KernelContext *, const ExecSpan &batch) override {
                // short-circuit if seen a false already
                if (this->all == false && this->count >= options.min_count) {
                    return turbo::OkStatus();
                }
                // short-circuit if seen a null already
                if (!options.skip_nulls && this->has_nulls) {
                    return turbo::OkStatus();
                }
                if (batch[0].is_scalar()) {
                    const Scalar &scalar = *batch[0].scalar;
                    this->has_nulls = !scalar.is_valid;
                    this->count += scalar.is_valid;
                    this->all = !scalar.is_valid || turbo::checked_cast<const BooleanScalar &>(scalar).value;
                    return turbo::OkStatus();
                }
                const ArraySpan &data = batch[0].array;
                this->has_nulls = data.get_null_count() > 0;
                this->count += data.length - data.get_null_count();
                nebula::internal::OptionalBinaryBitBlockCounter counter(
                        data.buffers[1].data, data.offset, data.buffers[0].data, data.offset,
                        data.length);
                int64_t position = 0;
                while (position < data.length) {
                    const auto block = counter.NextOrNotBlock();
                    if (!block.AllSet()) {
                        this->all = false;
                        break;
                    }
                    position += block.length;
                }

                return turbo::OkStatus();
            }

            turbo::Status MergeFrom(KernelContext *, KernelState &&src) override {
                const auto &other = turbo::checked_cast<const BooleanAllImpl &>(src);
                this->all &= other.all;
                this->has_nulls |= other.has_nulls;
                this->count += other.count;
                return turbo::OkStatus();
            }

            turbo::Status Finalize(KernelContext *, Datum *out) override {
                if ((!options.skip_nulls && this->all && this->has_nulls) ||
                    this->count < options.min_count) {
                    out->value = std::make_shared<BooleanScalar>();
                } else {
                    out->value = std::make_shared<BooleanScalar>(this->all);
                }
                return turbo::OkStatus();
            }

            bool all = true;
            bool has_nulls = false;
            int64_t count = 0;
            ScalarAggregateOptions options;
        };

        turbo::Result<std::unique_ptr<KernelState>> AllInit(KernelContext *, const KernelInitArgs &args) {
            return std::make_unique<BooleanAllImpl>(
                    static_cast<const ScalarAggregateOptions &>(*args.options));
        }

        // ----------------------------------------------------------------------
        // Index implementation

        template<typename ArgType>
        struct IndexImpl : public ScalarAggregator {
            using ArgValue = typename internal::GetViewType<ArgType>::T;

            explicit IndexImpl(IndexOptions options, KernelState *raw_state)
                    : options(std::move(options)), seen(0), index(-1) {
                if (auto state = static_cast<IndexImpl<ArgType> *>(raw_state)) {
                    seen = state->seen;
                    index = state->index;
                }
            }

            turbo::Status consume(KernelContext *ctx, const ExecSpan &batch) override {
                // short-circuit
                if (index >= 0 || !options.value->is_valid) {
                    return turbo::OkStatus();
                }

                const ArgValue desired = internal::UnboxScalar<ArgType>::Unbox(*options.value);

                if (batch[0].is_scalar()) {
                    seen = batch.length;
                    if (batch[0].scalar->is_valid) {
                        const ArgValue v = internal::UnboxScalar<ArgType>::Unbox(*batch[0].scalar);
                        if (v == desired) {
                            index = 0;
                            return turbo::cancelled_error("Found");
                        }
                    }
                    return turbo::OkStatus();
                }

                const ArraySpan &input = batch[0].array;
                seen = input.length;
                int64_t i = 0;

                TURBO_UNUSED(internal::VisitArrayValuesInline<ArgType>(
                        input,
                        [&](ArgValue v) -> turbo::Status {
                            if (v == desired) {
                                index = i;
                                return turbo::cancelled_error("Found");
                            } else {
                                ++i;
                                return turbo::OkStatus();
                            }
                        },
                        [&]() -> turbo::Status {
                            ++i;
                            return turbo::OkStatus();
                        }));

                return turbo::OkStatus();
            }

            turbo::Status MergeFrom(KernelContext *, KernelState &&src) override {
                const auto &other = turbo::checked_cast<const IndexImpl &>(src);
                if (index < 0 && other.index >= 0) {
                    index = seen + other.index;
                }
                seen += other.seen;
                return turbo::OkStatus();
            }

            turbo::Status Finalize(KernelContext *, Datum *out) override {
                out->value = std::make_shared<Int64Scalar>(index >= 0 ? index : -1);
                return turbo::OkStatus();
            }

            const IndexOptions options;
            int64_t seen = 0;
            int64_t index = -1;
        };

        template<>
        struct IndexImpl<NullType> : public ScalarAggregator {
            explicit IndexImpl(IndexOptions, KernelState *) {}

            turbo::Status consume(KernelContext *, const ExecSpan &) override { return turbo::OkStatus(); }

            turbo::Status MergeFrom(KernelContext *, KernelState &&) override { return turbo::OkStatus(); }

            turbo::Status Finalize(KernelContext *, Datum *out) override {
                out->value = std::make_shared<Int64Scalar>(-1);
                return turbo::OkStatus();
            }
        };

        struct IndexInit {
            std::unique_ptr<KernelState> state;
            KernelContext *ctx;
            const IndexOptions &options;
            const DataType &type;

            IndexInit(KernelContext *ctx, const IndexOptions &options, const DataType &type)
                    : ctx(ctx), options(options), type(type) {}

            turbo::Status Visit(const DataType &type) {
                return turbo::unimplemented_error("Index kernel not implemented for ", type.to_string());
            }

            turbo::Status Visit(const NullType &) {
                state.reset(new IndexImpl<NullType>(options, ctx->state()));
                return turbo::OkStatus();
            }

            turbo::Status Visit(const BooleanType &) {
                state.reset(new IndexImpl<BooleanType>(options, ctx->state()));
                return turbo::OkStatus();
            }

            template<typename Type>
            enable_if_number<Type, turbo::Status> Visit(const Type &) {
                state.reset(new IndexImpl<Type>(options, ctx->state()));
                return turbo::OkStatus();
            }

            template<typename Type>
            enable_if_base_binary<Type, turbo::Status> Visit(const Type &) {
                state.reset(new IndexImpl<Type>(options, ctx->state()));
                return turbo::OkStatus();
            }

            turbo::Status Visit(const FixedSizeBinaryType &) {
                state.reset(new IndexImpl<FixedSizeBinaryType>(options, ctx->state()));
                return turbo::OkStatus();
            }

            template<typename Type>
            enable_if_decimal<Type, turbo::Status> Visit(const Type &) {
                state.reset(new IndexImpl<Type>(options, ctx->state()));
                return turbo::OkStatus();
            }

            template<typename Type>
            enable_if_date<Type, turbo::Status> Visit(const Type &) {
                state.reset(new IndexImpl<Type>(options, ctx->state()));
                return turbo::OkStatus();
            }

            template<typename Type>
            enable_if_time<Type, turbo::Status> Visit(const Type &) {
                state.reset(new IndexImpl<Type>(options, ctx->state()));
                return turbo::OkStatus();
            }

            template<typename Type>
            enable_if_timestamp<Type, turbo::Status> Visit(const Type &) {
                state.reset(new IndexImpl<Type>(options, ctx->state()));
                return turbo::OkStatus();
            }

            turbo::Result<std::unique_ptr<KernelState>> create() {
                TURBO_RETURN_NOT_OK(visit_type_inline(type, this));
                return std::move(state);
            }

            static turbo::Result<std::unique_ptr<KernelState>> init(KernelContext *ctx,
                                                                    const KernelInitArgs &args) {
                if (!args.options) {
                    return turbo::invalid_argument_error("Must provide IndexOptions for index kernel");
                }
                const auto &options = static_cast<const IndexOptions &>(*args.options);
                if (!options.value) {
                    return turbo::invalid_argument_error("Must provide IndexOptions.value for index kernel");
                } else if (!options.value->type->equals(*args.inputs[0].type)) {
                    return turbo::failed_precondition_error("Expected IndexOptions.value to be of type ",
                                                            *args.inputs[0].type, ", but got ", *options.value->type);
                }
                IndexInit visitor(ctx, options, *args.inputs[0].type);
                return visitor.create();
            }
        };

    }  // namespace

    void add_basic_agg_kernels(KernelInit init,
                            const std::vector<std::shared_ptr<DataType>> &types,
                            std::shared_ptr<DataType> out_ty, ScalarAggregateFunction *func,
                            SimdLevel::type simd_level) {
        for (const auto &ty: types) {
            // array[InT] -> scalar[OutT]
            auto sig = KernelSignature::create({ty->id()}, out_ty);
            add_agg_kernel(std::move(sig), init, func, simd_level);
        }
    }

    void add_scalar_agg_kernels(KernelInit init,
                             const std::vector<std::shared_ptr<DataType>> &types,
                             std::shared_ptr<DataType> out_ty,
                             ScalarAggregateFunction *func) {
        for (const auto &ty: types) {
            auto sig = KernelSignature::create({ty->id()}, out_ty);
            add_agg_kernel(std::move(sig), init, func, SimdLevel::NONE);
        }
    }

    void add_array_scalar_agg_kernels(KernelInit init,
                                  const std::vector<std::shared_ptr<DataType>> &types,
                                  std::shared_ptr<DataType> out_ty,
                                  ScalarAggregateFunction *func,
                                  SimdLevel::type simd_level = SimdLevel::NONE) {
        add_basic_agg_kernels(init, types, out_ty, func, simd_level);
        add_scalar_agg_kernels(init, types, out_ty, func);
    }

    namespace {

        turbo::Result<TypeHolder> min_max_type(KernelContext *, const std::vector<TypeHolder> &types) {
            // T -> struct<min: T, max: T>
            auto ty = types.front().get_shared_ptr();
            return STRUCT({field("min", ty), field("max", ty)});
        }

    }  // namespace

    turbo::Result<TypeHolder> first_last_type(KernelContext *, const std::vector<TypeHolder> &types) {
        auto ty = types.front().get_shared_ptr();
        return STRUCT({field("first", ty), field("last", ty)});
    }

    void add_first_last_kernel(KernelInit init, internal::detail::GetTypeId get_id,
                            ScalarAggregateFunction *func, SimdLevel::type simd_level) {
        auto sig = KernelSignature::create({InputType(get_id.id)}, first_last_type);
        add_agg_kernel(std::move(sig), init, func, simd_level);
    }

    void add_first_last_kernels(KernelInit init,
                             const std::vector<std::shared_ptr<DataType>> &types,
                             ScalarAggregateFunction *func) {
        for (const auto &ty: types) {
            add_first_last_kernel(init, ty, func, SimdLevel::NONE);
        }
    }

    void add_min_max_kernel(KernelInit init, internal::detail::GetTypeId get_id,
                         ScalarAggregateFunction *func, SimdLevel::type simd_level) {
        auto sig = KernelSignature::create({InputType(get_id.id)}, min_max_type);
        add_agg_kernel(std::move(sig), init, func, simd_level);
    }

    void add_min_max_kernels(KernelInit init,
                          const std::vector<std::shared_ptr<DataType>> &types,
                          ScalarAggregateFunction *func, SimdLevel::type simd_level) {
        for (const auto &ty: types) {
            add_min_max_kernel(init, ty, func, simd_level);
        }
    }

    namespace {

        const FunctionDoc count_all_doc{
                "Count the number of rows", "This version of count takes no arguments.", {}};

        const FunctionDoc count_doc{"Count the number of null / non-null values",
                                    ("By default, only non-null values are counted.\n"
                                     "This can be changed through CountOptions."),
                                    {"array"},
                                    "CountOptions"};

        const FunctionDoc count_distinct_doc{"Count the number of unique values",
                                             ("By default, only non-null values are counted.\n"
                                              "This can be changed through CountOptions."),
                                             {"array"},
                                             "CountOptions"};

        const FunctionDoc sum_doc{
                "Compute the sum of a numeric array",
                ("Null values are ignored by default. Minimum count of non-null\n"
                 "values can be set and null is returned if too few are present.\n"
                 "This can be changed through ScalarAggregateOptions."),
                {"array"},
                "ScalarAggregateOptions"};

        const FunctionDoc product_doc{
                "Compute the product of values in a numeric array",
                ("Null values are ignored by default. Minimum count of non-null\n"
                 "values can be set and null is returned if too few are present.\n"
                 "This can be changed through ScalarAggregateOptions."),
                {"array"},
                "ScalarAggregateOptions"};

        const FunctionDoc mean_doc{
                "Compute the mean of a numeric array",
                ("Null values are ignored by default. Minimum count of non-null\n"
                 "values can be set and null is returned if too few are present.\n"
                 "This can be changed through ScalarAggregateOptions.\n"
                 "The result is a double for integer and floating point arguments,\n"
                 "and a decimal with the same bit-width/precision/scale for decimal arguments.\n"
                 "For integers and floats, NaN is returned if min_count = 0 and\n"
                 "there are no values. For decimals, null is returned instead."),
                {"array"},
                "ScalarAggregateOptions"};

        const FunctionDoc first_last_doc{
                "Compute the first and last values of an array",
                ("Null values are ignored by default.\n"
                 "If skip_nulls = false, then this will return the first and last values\n"
                 "regardless if it is null"),
                {"array"},
                "ScalarAggregateOptions"};

        const FunctionDoc first_doc{
                "Compute the first value in each group",
                ("Null values are ignored by default.\n"
                 "If skip_nulls = false, then this will return the first and last values\n"
                 "regardless if it is null"),
                {"array"},
                "ScalarAggregateOptions"};

        const FunctionDoc last_doc{
                "Compute the first value in each group",
                ("Null values are ignored by default.\n"
                 "If skip_nulls = false, then this will return the first and last values\n"
                 "regardless if it is null"),
                {"array"},
                "ScalarAggregateOptions"};

        const FunctionDoc min_max_doc{"Compute the minimum and maximum values of a numeric array",
                                      ("Null values are ignored by default.\n"
                                       "This can be changed through ScalarAggregateOptions."),
                                      {"array"},
                                      "ScalarAggregateOptions"};

        const FunctionDoc min_or_max_doc{
                "Compute the minimum or maximum values of a numeric array",
                ("Null values are ignored by default.\n"
                 "This can be changed through ScalarAggregateOptions."),
                {"array"},
                "ScalarAggregateOptions"};

        const FunctionDoc any_doc{
                "Test whether any element in a boolean array evaluates to true",
                ("Null values are ignored by default.\n"
                 "If the `skip_nulls` option is set to false, then Kleene logic is used.\n"
                 "See \"kleene_or\" for more details on Kleene logic."),
                {"array"},
                "ScalarAggregateOptions"};

        const FunctionDoc all_doc{
                "Test whether all elements in a boolean array evaluate to true",
                ("Null values are ignored by default.\n"
                 "If the `skip_nulls` option is set to false, then Kleene logic is used.\n"
                 "See \"kleene_and\" for more details on Kleene logic."),
                {"array"},
                "ScalarAggregateOptions"};

        const FunctionDoc index_doc{"Find the index of the first occurrence of a given value",
                                    ("-1 is returned if the value is not found in the array.\n"
                                     "The search value is specified in IndexOptions."),
                                    {"array"},
                                    "IndexOptions",
                /*options_required=*/true};

    }  // namespace

    void register_scalar_aggregate_basic(FunctionRegistry *registry) {
        static auto default_scalar_aggregate_options = ScalarAggregateOptions::defaults();
        static auto default_count_options = CountOptions::defaults();

        auto func = std::make_shared<ScalarAggregateFunction>("count_all", Arity::Nullary(),
                                                              count_all_doc, nullptr);

        // Takes no input (counts all rows), outputs int64 scalar
        add_agg_kernel(KernelSignature::create({}, int64()), CountAllInit, func.get());
                KCHECK_OK(registry->add_function(std::move(func)));

        func = std::make_shared<ScalarAggregateFunction>("count", Arity::Unary(), count_doc,
                                                         &default_count_options);

        // Takes any input, outputs int64 scalar
        InputType any_input;
        add_agg_kernel(KernelSignature::create({any_input}, int64()), CountInit, func.get());
                KCHECK_OK(registry->add_function(std::move(func)));

        func = std::make_shared<ScalarAggregateFunction>(
                "count_distinct", Arity::Unary(), count_distinct_doc, &default_count_options);
        // Takes any input, outputs int64 scalar
        AddCountDistinctKernels(func.get());
                KCHECK_OK(registry->add_function(std::move(func)));

        func = std::make_shared<ScalarAggregateFunction>("sum", Arity::Unary(), sum_doc,
                                                         &default_scalar_aggregate_options);
        add_array_scalar_agg_kernels(SumInit, {boolean()}, uint64(), func.get());
        add_agg_kernel(KernelSignature::create({Type::DECIMAL128}, FirstType), SumInit, func.get(),
                     SimdLevel::NONE);
        add_agg_kernel(KernelSignature::create({Type::DECIMAL256}, FirstType), SumInit, func.get(),
                     SimdLevel::NONE);
        add_array_scalar_agg_kernels(SumInit, signed_int_types(), int64(), func.get());
        add_array_scalar_agg_kernels(SumInit, unsigned_int_types(), uint64(), func.get());
        add_array_scalar_agg_kernels(SumInit, floating_point_types(), float64(), func.get());
        add_array_scalar_agg_kernels(SumInit, {null()}, int64(), func.get());
        // Add the SIMD variants for sum
#if NEBULA_HAVE_RUNTIME_AVX2_SUPPORTED || defined(NEBULA_HAVE_RUNTIME_AVX512)
        auto cpu_info = nebula::internal::CpuInfo::GetInstance();
#endif
#if NEBULA_HAVE_RUNTIME_AVX2_SUPPORTED
        if (cpu_info->is_supported(nebula::internal::CpuInfo::AVX2)) {
            AddSumAvx2AggKernels(func.get());
        }
#endif
#if defined(NEBULA_HAVE_RUNTIME_AVX512)
            if (cpu_info->is_supported(nebula::internal::CpuInfo::AVX512)) {
              AddSumAvx512AggKernels(func.get());
            }
#endif
                KCHECK_OK(registry->add_function(std::move(func)));

        func = std::make_shared<ScalarAggregateFunction>("mean", Arity::Unary(), mean_doc,
                                                         &default_scalar_aggregate_options);
        add_array_scalar_agg_kernels(MeanInit, {boolean()}, float64(), func.get());
        add_array_scalar_agg_kernels(MeanInit, numeric_types(), float64(), func.get());
        add_agg_kernel(KernelSignature::create({Type::DECIMAL128}, FirstType), MeanInit, func.get(),
                     SimdLevel::NONE);
        add_agg_kernel(KernelSignature::create({Type::DECIMAL256}, FirstType), MeanInit, func.get(),
                     SimdLevel::NONE);
        add_array_scalar_agg_kernels(MeanInit, {null()}, float64(), func.get());
        // Add the SIMD variants for mean
#if NEBULA_HAVE_RUNTIME_AVX2_SUPPORTED
        if (cpu_info->is_supported(nebula::internal::CpuInfo::AVX2)) {
            AddMeanAvx2AggKernels(func.get());
        }
#endif
#if defined(NEBULA_HAVE_RUNTIME_AVX512)
            if (cpu_info->is_supported(nebula::internal::CpuInfo::AVX512)) {
              AddMeanAvx512AggKernels(func.get());
            }
#endif
                KCHECK_OK(registry->add_function(std::move(func)));

        // Add first last function
        func = std::make_shared<ScalarAggregateFunction>(
                "first_last", Arity::Unary(), first_last_doc, &default_scalar_aggregate_options);
        auto first_last_func = func.get();

        add_first_last_kernels(FirstLastInit, {boolean(), fixed_size_binary(1)}, func.get());
        add_first_last_kernels(FirstLastInit, numeric_types(), func.get());
        add_first_last_kernels(FirstLastInit, base_binary_types(), func.get());
        add_first_last_kernels(FirstLastInit, temporal_types(), func.get());
                KCHECK_OK(registry->add_function(std::move(func)));

        // Add first/last as convenience functions
        func = std::make_shared<ScalarAggregateFunction>("first", Arity::Unary(), first_doc,
                                                         &default_scalar_aggregate_options);
        AddFirstOrLastAggKernel<FirstOrLast::First>(func.get(), first_last_func);
                KCHECK_OK(registry->add_function(std::move(func)));

        func = std::make_shared<ScalarAggregateFunction>("last", Arity::Unary(), last_doc,
                                                         &default_scalar_aggregate_options);
        AddFirstOrLastAggKernel<FirstOrLast::Last>(func.get(), first_last_func);
                KCHECK_OK(registry->add_function(std::move(func)));

        // Add min max function
        func = std::make_shared<ScalarAggregateFunction>("min_max", Arity::Unary(), min_max_doc,
                                                         &default_scalar_aggregate_options);
        add_min_max_kernels(MinMaxInit, {null(), boolean()}, func.get());
        add_min_max_kernels(MinMaxInit, numeric_types(), func.get());
        add_min_max_kernels(MinMaxInit, temporal_types(), func.get());
        add_min_max_kernels(MinMaxInit, base_binary_types(), func.get());
        add_min_max_kernel(MinMaxInit, Type::FIXED_SIZE_BINARY, func.get());
        add_min_max_kernel(MinMaxInit, Type::INTERVAL_MONTHS, func.get());
        add_min_max_kernel(MinMaxInit, Type::DECIMAL128, func.get());
        add_min_max_kernel(MinMaxInit, Type::DECIMAL256, func.get());
        // Add the SIMD variants for min max
#if NEBULA_HAVE_RUNTIME_AVX2_SUPPORTED
        if (cpu_info->is_supported(nebula::internal::CpuInfo::AVX2)) {
            AddMinMaxAvx2AggKernels(func.get());
        }
#endif
#if defined(NEBULA_HAVE_RUNTIME_AVX512)
        if (cpu_info->is_supported(nebula::internal::CpuInfo::AVX512)) {
          AddMinMaxAvx512AggKernels(func.get());
        }
#endif

        auto min_max_func = func.get();
                KCHECK_OK(registry->add_function(std::move(func)));

        // Add min/max as convenience functions
        func = std::make_shared<ScalarAggregateFunction>("min", Arity::Unary(), min_or_max_doc,
                                                         &default_scalar_aggregate_options);
        AddMinOrMaxAggKernel<MinOrMax::Min>(func.get(), min_max_func);
                KCHECK_OK(registry->add_function(std::move(func)));

        func = std::make_shared<ScalarAggregateFunction>("max", Arity::Unary(), min_or_max_doc,
                                                         &default_scalar_aggregate_options);
        AddMinOrMaxAggKernel<MinOrMax::Max>(func.get(), min_max_func);
                KCHECK_OK(registry->add_function(std::move(func)));

        func = std::make_shared<ScalarAggregateFunction>("product", Arity::Unary(), product_doc,
                                                         &default_scalar_aggregate_options);
        add_array_scalar_agg_kernels(ProductInit::init, {boolean()}, uint64(), func.get());
        add_array_scalar_agg_kernels(ProductInit::init, signed_int_types(), int64(), func.get());
        add_array_scalar_agg_kernels(ProductInit::init, unsigned_int_types(), uint64(), func.get());
        add_array_scalar_agg_kernels(ProductInit::init, floating_point_types(), float64(),
                                 func.get());
        add_agg_kernel(KernelSignature::create({Type::DECIMAL128}, FirstType), ProductInit::init,
                     func.get(), SimdLevel::NONE);
        add_agg_kernel(KernelSignature::create({Type::DECIMAL256}, FirstType), ProductInit::init,
                     func.get(), SimdLevel::NONE);
        add_array_scalar_agg_kernels(ProductInit::init, {null()}, int64(), func.get());
                KCHECK_OK(registry->add_function(std::move(func)));

        // any
        func = std::make_shared<ScalarAggregateFunction>("any", Arity::Unary(), any_doc,
                                                         &default_scalar_aggregate_options);
        add_array_scalar_agg_kernels(AnyInit, {boolean()}, boolean(), func.get());
                KCHECK_OK(registry->add_function(std::move(func)));

        // all
        func = std::make_shared<ScalarAggregateFunction>("all", Arity::Unary(), all_doc,
                                                         &default_scalar_aggregate_options);
        add_array_scalar_agg_kernels(AllInit, {boolean()}, boolean(), func.get());
                KCHECK_OK(registry->add_function(std::move(func)));

        // index
        func = std::make_shared<ScalarAggregateFunction>("index", Arity::Unary(), index_doc);
        add_basic_agg_kernels(IndexInit::init, base_binary_types(), int64(), func.get());
        add_basic_agg_kernels(IndexInit::init, primitive_types(), int64(), func.get());
        add_basic_agg_kernels(IndexInit::init, temporal_types(), int64(), func.get());
        add_basic_agg_kernels(IndexInit::init,
                           {fixed_size_binary(1), decimal128(1, 0), decimal256(1, 0), null()},
                           int64(), func.get());
                KCHECK_OK(registry->add_function(std::move(func)));
    }

}  // namespace nebula::compute::internal
