// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//


#include <pollux/functions/sparksql/aggregates/collect_list_aggregate.h>

#include <pollux/exec/simple_aggregate_adapter.h>
#include <pollux/functions/lib/aggregates/value_list.h>

using namespace kumo::pollux::aggregate;
using namespace kumo::pollux::exec;

namespace kumo::pollux::functions::aggregate::sparksql {
    namespace {
        class CollectListAggregate {
        public:
            using InputType = Row<Generic<T1> >;

            using IntermediateType = Array<Generic<T1> >;

            using OutputType = Array<Generic<T1> >;

            /// In Spark, when all inputs are null, the output is an empty array instead
            /// of null. Therefore, in the writeIntermediateResult and writeFinalResult,
            /// we still need to output the empty element_ when the group is null. This
            /// behavior can only be achieved when the default-null behavior is disabled.
            static constexpr bool default_null_behavior_ = false;

            static bool toIntermediate(
                exec::out_type<Array<Generic<T1> > > &out,
                exec::optional_arg_type<Generic<T1> > in) {
                if (in.has_value()) {
                    out.add_item().copy_from(in.value());
                    return true;
                }
                return false;
            }

            struct AccumulatorType {
                ValueList elements_;

                explicit AccumulatorType(
                    HashStringAllocator * /*allocator*/,
                    CollectListAggregate * /*fn*/)
                    : elements_{} {
                }

                static constexpr bool is_fixed_size_ = false;

                bool addInput(
                    HashStringAllocator *allocator,
                    exec::optional_arg_type<Generic<T1> > data) {
                    if (data.has_value()) {
                        elements_.appendValue(data, allocator);
                        return true;
                    }
                    return false;
                }

                bool combine(
                    HashStringAllocator *allocator,
                    exec::optional_arg_type<IntermediateType> other) {
                    if (!other.has_value()) {
                        return false;
                    }
                    for (auto element: other.value()) {
                        elements_.appendValue(element, allocator);
                    }
                    return true;
                }

                bool writeIntermediateResult(
                    bool /*nonNullGroup*/,
                    exec::out_type<IntermediateType> &out) {
                    // If the group's accumulator is null, the corresponding intermediate
                    // result is an empty array.
                    copyValueListToArrayWriter(out, elements_);
                    return true;
                }

                bool writeFinalResult(
                    bool /*nonNullGroup*/,
                    exec::out_type<OutputType> &out) {
                    // If the group's accumulator is null, the corresponding result is an
                    // empty array.
                    copyValueListToArrayWriter(out, elements_);
                    return true;
                }

                void destroy(HashStringAllocator *allocator) {
                    elements_.free(allocator);
                }
            };
        };

        AggregateRegistrationResult registerCollectList(
            const std::string &name,
            bool withCompanionFunctions,
            bool overwrite) {
            std::vector<std::shared_ptr<exec::AggregateFunctionSignature> > signatures{
                exec::AggregateFunctionSignatureBuilder()
                .typeVariable("E")
                .returnType("array(E)")
                .intermediateType("array(E)")
                .argumentType("E")
                .build()
            };
            return exec::registerAggregateFunction(
                name,
                std::move(signatures),
                [name](
            core::AggregationNode::Step step,
            const std::vector<TypePtr> &argTypes,
            const TypePtr &resultType,
            const core::QueryConfig & /*config*/)
            -> std::unique_ptr<exec::Aggregate> {
                    POLLUX_CHECK_EQ(
                        argTypes.size(), 1, "{} takes at most one argument", name);
                    return std::make_unique<SimpleAggregateAdapter<CollectListAggregate> >(
                        step, argTypes, resultType);
                },
                withCompanionFunctions,
                overwrite);
        }
    } // namespace

    void registerCollectListAggregate(
        const std::string &prefix,
        bool withCompanionFunctions,
        bool overwrite) {
        registerCollectList(
            prefix + "collect_list", withCompanionFunctions, overwrite);
    }
} // namespace kumo::pollux::functions::aggregate::sparksql
