// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <nebula/compute/kernels/aggregate_basic_internal.h>

namespace nebula::compute::internal {

// ----------------------------------------------------------------------
// Sum implementation

    template<typename ArrowType>
    struct SumImplAvx512 : public SumImpl<ArrowType, SimdLevel::AVX512> {
        using SumImpl<ArrowType, SimdLevel::AVX512>::SumImpl;
    };

    template<typename ArrowType>
    struct MeanImplAvx512 : public MeanImpl<ArrowType, SimdLevel::AVX512> {
        using MeanImpl<ArrowType, SimdLevel::AVX512>::MeanImpl;
    };

    turbo::Result<std::unique_ptr<KernelState>> SumInitAvx512(KernelContext *ctx,
                                                              const KernelInitArgs &args) {
        SumLikeInit<SumImplAvx512> visitor(
                ctx, args.inputs[0].get_shared_ptr(),
                static_cast<const ScalarAggregateOptions &>(*args.options));
        return visitor.create();
    }

    turbo::Result<std::unique_ptr<KernelState>> MeanInitAvx512(KernelContext *ctx,
                                                               const KernelInitArgs &args) {
        SumLikeInit<MeanImplAvx512> visitor(
                ctx, args.inputs[0].get_shared_ptr(),
                static_cast<const ScalarAggregateOptions &>(*args.options));
        return visitor.create();
    }

// ----------------------------------------------------------------------
// MinMax implementation

    turbo::Result<std::unique_ptr<KernelState>> MinMaxInitAvx512(KernelContext *ctx,
                                                                 const KernelInitArgs &args) {
        TURBO_MOVE_OR_RAISE(TypeHolder out_type,
                            args.kernel->signature->out_type().resolve(ctx, args.inputs));
        MinMaxInitState<SimdLevel::AVX512> visitor(
                ctx, *args.inputs[0], out_type.get_shared_ptr(),
                static_cast<const ScalarAggregateOptions &>(*args.options));
        return visitor.create();
    }

    void AddSumAvx512AggKernels(ScalarAggregateFunction *func) {
        add_basic_agg_kernels(SumInitAvx512, signed_int_types(), int64(), func, SimdLevel::AVX512);
        add_basic_agg_kernels(SumInitAvx512, unsigned_int_types(), uint64(), func,
                              SimdLevel::AVX512);
        add_basic_agg_kernels(SumInitAvx512, floating_point_types(), float64(), func,
                              SimdLevel::AVX512);
    }

    void AddMeanAvx512AggKernels(ScalarAggregateFunction *func) {
        add_basic_agg_kernels(MeanInitAvx512, numeric_types(), float64(), func, SimdLevel::AVX512);
    }

    void AddMinMaxAvx512AggKernels(ScalarAggregateFunction *func) {
        // Enable 32/64 int types for avx512 variants, no advantage on 8/16 int.
        add_min_max_kernels(MinMaxInitAvx512, {int32(), uint32(), int64(), uint64()}, func,
                            SimdLevel::AVX512);
        add_min_max_kernels(MinMaxInitAvx512, temporal_types(), func, SimdLevel::AVX512);
        add_min_max_kernels(MinMaxInitAvx512, base_binary_types(), func, SimdLevel::AVX2);
        add_min_max_kernel(MinMaxInitAvx512, Type::FIXED_SIZE_BINARY, func, SimdLevel::AVX2);
        add_min_max_kernel(MinMaxInitAvx512, Type::INTERVAL_MONTHS, func, SimdLevel::AVX512);
    }

}  // namespace nebula::compute::internal
