// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <nebula/compute/kernels/aggregate_basic_internal.h>

namespace nebula::compute::internal {

// ----------------------------------------------------------------------
// Sum implementation

    template<typename ArrowType>
    struct SumImplAvx2 : public SumImpl<ArrowType, SimdLevel::AVX2> {
        using SumImpl<ArrowType, SimdLevel::AVX2>::SumImpl;
    };

    template<typename ArrowType>
    struct MeanImplAvx2 : public MeanImpl<ArrowType, SimdLevel::AVX2> {
        using MeanImpl<ArrowType, SimdLevel::AVX2>::MeanImpl;
    };

    turbo::Result<std::unique_ptr<KernelState>> SumInitAvx2(KernelContext *ctx,
                                                            const KernelInitArgs &args) {
        SumLikeInit<SumImplAvx2> visitor(
                ctx, args.inputs[0].get_shared_ptr(),
                static_cast<const ScalarAggregateOptions &>(*args.options));
        return visitor.create();
    }

    turbo::Result<std::unique_ptr<KernelState>> MeanInitAvx2(KernelContext *ctx,
                                                             const KernelInitArgs &args) {
        SumLikeInit<MeanImplAvx2> visitor(
                ctx, args.inputs[0].get_shared_ptr(),
                static_cast<const ScalarAggregateOptions &>(*args.options));
        return visitor.create();
    }

// ----------------------------------------------------------------------
// MinMax implementation

    turbo::Result<std::unique_ptr<KernelState>> MinMaxInitAvx2(KernelContext *ctx,
                                                               const KernelInitArgs &args) {
        TURBO_MOVE_OR_RAISE(TypeHolder out_type,
                            args.kernel->signature->out_type().resolve(ctx, args.inputs));
        MinMaxInitState<SimdLevel::AVX2> visitor(
                ctx, *args.inputs[0], out_type.get_shared_ptr(),
                static_cast<const ScalarAggregateOptions &>(*args.options));
        return visitor.create();
    }

    void AddSumAvx2AggKernels(ScalarAggregateFunction *func) {
        add_basic_agg_kernels(SumInitAvx2, signed_int_types(), int64(), func, SimdLevel::AVX2);
        add_basic_agg_kernels(SumInitAvx2, unsigned_int_types(), uint64(), func, SimdLevel::AVX2);
        add_basic_agg_kernels(SumInitAvx2, floating_point_types(), float64(), func, SimdLevel::AVX2);
    }

    void AddMeanAvx2AggKernels(ScalarAggregateFunction *func) {
        add_basic_agg_kernels(MeanInitAvx2, numeric_types(), float64(), func, SimdLevel::AVX2);
    }

    void AddMinMaxAvx2AggKernels(ScalarAggregateFunction *func) {
        // Enable int types for AVX2 variants.
        // No auto vectorize for float/double as it use fmin/fmax which has NaN handling.
        add_min_max_kernels(MinMaxInitAvx2, int_types(), func, SimdLevel::AVX2);
        add_min_max_kernels(MinMaxInitAvx2, temporal_types(), func, SimdLevel::AVX2);
        add_min_max_kernels(MinMaxInitAvx2, base_binary_types(), func, SimdLevel::AVX2);
        add_min_max_kernel(MinMaxInitAvx2, Type::FIXED_SIZE_BINARY, func, SimdLevel::AVX2);
        add_min_max_kernel(MinMaxInitAvx2, Type::INTERVAL_MONTHS, func, SimdLevel::AVX2);
    }

}  // namespace nebula::compute::internal
