// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <algorithm>
#include <cstdint>
#include <cstring>
#include <limits>
#include <memory>
#include <type_traits>

#include <nebula/array/array_binary.h>
#include <nebula/array/array_dict.h>
#include <nebula/array/array_nested.h>
#include <nebula/array/builder_primitive.h>
#include <nebula/array/concatenate.h>
#include <nebula/core/buffer_builder.h>
#include <nebula/core/chunked_array.h>
#include <nebula/compute/api_vector.h>
#include <nebula/compute/kernels/common_internal.h>
#include <nebula/compute/kernels/util_internal.h>
#include <nebula/compute/kernels/vector_selection_filter_internal.h>
#include <nebula/compute/kernels/vector_selection_take_internal.h>
#include <nebula/core/extension_type.h>
#include <nebula/core/record_batch.h>

#include <nebula/core/table.h>
#include <nebula/types/type.h>
#include <nebula/bits/bit_block_counter.h>
#include <nebula/bits/bit_run_reader.h>
#include <nebula/bits/bit_util.h>
#include <nebula/bits/bitmap_ops.h>
#include <nebula/bits/bitmap_reader.h>
#include <nebula/numeric/int_util.h>

namespace nebula {

    using internal::BinaryBitBlockCounter;
    using internal::BitBlockCount;
    using internal::BitBlockCounter;
    using internal::CheckIndexBounds;
    using internal::CopyBitmap;
    using internal::CountSetBits;
    using internal::OptionalBitBlockCounter;
    using internal::OptionalBitIndexer;
}

namespace nebula::compute::internal {

    namespace {

        using FilterState = OptionsWrapper<FilterOptions>;
        using TakeState = OptionsWrapper<TakeOptions>;

// ----------------------------------------------------------------------
// DropNull Implementation

        std::shared_ptr<nebula::BooleanArray> MakeDropNullFilter(const Array &values) {
            auto &bitmap_buffer = values.null_bitmap();
            return std::make_shared<BooleanArray>(values.length(), bitmap_buffer, nullptr, 0,
                                                  values.offset());
        }

        turbo::Result<Datum> DropNullArray(const std::shared_ptr<Array> &values, ExecContext *ctx) {
            if (values->null_count() == 0) {
                return values;
            }
            if (values->null_count() == values->length()) {
                return MakeEmptyArray(values->type(), ctx->memory_pool());
            }
            if (values->type()->id() == Type::type::NA) {
                return std::make_shared<NullArray>(0);
            }
            auto drop_null_filter = Datum{MakeDropNullFilter(*values)};
            return Filter(values, drop_null_filter, FilterOptions::defaults(), ctx);
        }

        turbo::Result<Datum> DropNullChunkedArray(const std::shared_ptr<ChunkedArray> &values,
                                                  ExecContext *ctx) {
            if (values->null_count() == 0) {
                return values;
            }
            if (values->null_count() == values->length()) {
                return ChunkedArray::make_empty(values->type(), ctx->memory_pool());
            }
            std::vector<std::shared_ptr<Array>> new_chunks;
            for (const auto &chunk: values->chunks()) {
                TURBO_MOVE_OR_RAISE(auto new_chunk, DropNullArray(chunk, ctx));
                if (new_chunk.length() > 0) {
                    new_chunks.push_back(new_chunk.make_array());
                }
            }
            return std::make_shared<ChunkedArray>(std::move(new_chunks));
        }

        turbo::Result<Datum> DropNullRecordBatch(const std::shared_ptr<RecordBatch> &batch,
                                                 ExecContext *ctx) {
            // Compute an upper bound of the final null count
            int64_t null_count = 0;
            for (const auto &column: batch->columns()) {
                null_count += column->null_count();
            }
            if (null_count == 0) {
                return batch;
            }
            TURBO_MOVE_OR_RAISE(auto dst,
                                allocate_empty_bitmap(batch->num_rows(), ctx->memory_pool()));
            bit_util::SetBitsTo(dst->mutable_data(), 0, batch->num_rows(), true);
            for (const auto &column: batch->columns()) {
                if (column->type()->id() == Type::type::NA) {
                    bit_util::SetBitsTo(dst->mutable_data(), 0, batch->num_rows(), false);
                    break;
                }
                if (column->null_bitmap_data()) {
                    ::nebula::internal::BitmapAnd(column->null_bitmap_data(), column->offset(),
                                                  dst->data(), 0, column->length(), 0,
                                                  dst->mutable_data());
                }
            }
            auto drop_null_filter = std::make_shared<BooleanArray>(batch->num_rows(), dst);
            if (drop_null_filter->true_count() == 0) {
                return RecordBatch::make_empty(batch->schema(), ctx->memory_pool());
            }
            return Filter(Datum(batch), Datum(drop_null_filter), FilterOptions::defaults(), ctx);
        }

        turbo::Result<Datum> DropNullTable(const std::shared_ptr<Table> &table, ExecContext *ctx) {
            if (table->num_rows() == 0) {
                return table;
            }
            // Compute an upper bound of the final null count
            int64_t null_count = 0;
            for (const auto &col: table->columns()) {
                for (const auto &column_chunk: col->chunks()) {
                    null_count += column_chunk->null_count();
                }
            }
            if (null_count == 0) {
                return table;
            }

            nebula::RecordBatchVector filtered_batches;
            TableBatchReader batch_iter(*table);
            while (true) {
                TURBO_MOVE_OR_RAISE(auto batch, batch_iter.next());
                if (batch == nullptr) {
                    break;
                }
                TURBO_MOVE_OR_RAISE(auto filtered_datum, DropNullRecordBatch(batch, ctx));
                if (filtered_datum.length() > 0) {
                    filtered_batches.push_back(filtered_datum.record_batch());
                }
            }

            return Table::from_record_batches(table->schema(), filtered_batches);
        }

        const FunctionDoc drop_null_doc(
                "Drop nulls from the input",
                ("The output is populated with values from the input (Array, ChunkedArray,\n"
                 "RecordBatch, or Table) without the null values.\n"
                 "For the RecordBatch and Table cases, `drop_null` drops the full row if\n"
                 "there is any null."),
                {"input"});

        class DropNullMetaFunction : public MetaFunction {
        public:
            DropNullMetaFunction() : MetaFunction("drop_null", Arity::Unary(), drop_null_doc) {}

            turbo::Result<Datum> ExecuteImpl(const std::vector<Datum> &args,
                                             const FunctionOptions *options,
                                             ExecContext *ctx) const override {
                auto &values = args[0];
                switch (values.kind()) {
                    case Datum::ARRAY:
                        return DropNullArray(values.make_array(), ctx);
                    case Datum::CHUNKED_ARRAY:
                        return DropNullChunkedArray(values.chunked_array(), ctx);
                    case Datum::RECORD_BATCH:
                        return DropNullRecordBatch(values.record_batch(), ctx);
                    case Datum::TABLE:
                        return DropNullTable(values.table(), ctx);
                    default:
                        break;
                }
                return turbo::unimplemented_error(
                        "Unsupported types for drop_null operation: "
                        "values=",
                        args[0].to_string());
            }
        };

// ----------------------------------------------------------------------

        const FunctionDoc array_filter_doc(
                "Filter with a boolean selection filter",
                ("The output is populated with values from the input `array` at positions\n"
                 "where the selection filter is non-zero.  Nulls in the selection filter\n"
                 "are handled based on FilterOptions."),
                {"array", "selection_filter"}, "FilterOptions");

        const FunctionDoc array_take_doc(
                "Select values from an array based on indices from another array",
                ("The output is populated with values from the input array at positions\n"
                 "given by `indices`.  Nulls in `indices` emit null in the output."),
                {"array", "indices"}, "TakeOptions");

        const FunctionDoc indices_nonzero_doc(
                "Return the indices of the values in the array that are non-zero",
                ("For each input value, check if it's zero, false or null. Emit the index\n"
                 "of the value in the array if it's none of the those."),
                {"values"});

        struct NonZeroVisitor {
            UInt64Builder *builder;
            const std::vector<ArraySpan> &arrays;

            NonZeroVisitor(UInt64Builder *builder, const std::vector<ArraySpan> &arrays)
                    : builder(builder), arrays(arrays) {}

            turbo::Status Visit(const DataType &type) { return turbo::unimplemented_error(type.to_string()); }

            template<typename Type>
            enable_if_t<is_decimal_type<Type>::value || is_primitive_ctype<Type>::value ||
                        is_boolean_type<Type>::value,
                    turbo::Status>
            Visit(const Type &) {
                using T = typename GetOutputType<Type>::T;
                const T zero{};
                uint64_t index = 0;

                for (const ArraySpan &current_array: arrays) {
                    VisitArrayValuesInline<Type>(
                            current_array,
                            [&](T v) {
                                if (v != zero) {
                                    this->builder->unsafe_append(index++);
                                } else {
                                    ++index;
                                }
                            },
                            [&]() { ++index; });
                }
                return turbo::OkStatus();
            }
        };

        turbo::Status DoNonZero(const std::vector<ArraySpan> &arrays, int64_t total_length,
                                std::shared_ptr<ArrayData> *out) {
            UInt64Builder builder;
            TURBO_RETURN_NOT_OK(builder.Reserve(total_length));

            NonZeroVisitor visitor(&builder, arrays);
            TURBO_RETURN_NOT_OK(visit_type_inline(*arrays[0].type, &visitor));
            return builder.finish_internal(out);
        }

        turbo::Status IndicesNonZeroExec(KernelContext *ctx, const ExecSpan &batch, ExecResult *out) {
            std::shared_ptr<ArrayData> result;
            TURBO_RETURN_NOT_OK(DoNonZero({batch[0].array}, batch.length, &result));
            out->value = std::move(result);
            return turbo::OkStatus();
        }

        turbo::Status IndicesNonZeroExecChunked(KernelContext *ctx, const ExecBatch &batch, Datum *out) {
            const ChunkedArray &arr = *batch[0].chunked_array();
            std::vector<ArraySpan> arrays;
            for (int i = 0; i < arr.num_chunks(); ++i) {
                arrays.push_back(ArraySpan(*arr.chunk(i)->data()));
            }
            std::shared_ptr<ArrayData> result;
            TURBO_RETURN_NOT_OK(DoNonZero(arrays, arr.length(), &result));
            out->value = std::move(result);
            return turbo::OkStatus();
        }

        std::shared_ptr<VectorFunction> makeIndices_non_zero_function(std::string name,
                                                                   FunctionDoc doc) {
            auto func = std::make_shared<VectorFunction>(name, Arity::Unary(), std::move(doc));

            VectorKernel kernel;
            kernel.null_handling = NullHandling::OUTPUT_NOT_NULL;
            kernel.mem_allocation = MemAllocation::NO_PREALLOCATE;
            kernel.output_chunked = false;
            kernel.exec = IndicesNonZeroExec;
            kernel.exec_chunked = IndicesNonZeroExecChunked;
            kernel.can_execute_chunkwise = false;

            auto AddKernels = [&](const std::vector<std::shared_ptr<DataType>> &types) {
                for (const std::shared_ptr<DataType> &ty: types) {
                    kernel.signature = KernelSignature::create({ty}, uint64());
                    KCHECK_OK(func->add_kernel(kernel));
                }
            };

            AddKernels(numeric_types());
            AddKernels({boolean()});

            for (const auto &ty: {Type::DECIMAL128, Type::DECIMAL256}) {
                kernel.signature = KernelSignature::create({ty}, uint64());
                KCHECK_OK(func->add_kernel(kernel));
            }

            return func;
        }

    }  // namespace

    void register_vector_selection(FunctionRegistry *registry) {
        // Filter kernels
        std::vector<SelectionKernelData> filter_kernels;
        PopulateFilterKernels(&filter_kernels);

        VectorKernel filter_base;
        filter_base.init = FilterState::init;
        register_selection_function("array_filter", array_filter_doc, filter_base,
                                  std::move(filter_kernels), GetDefaultFilterOptions(),
                                  registry);

        KCHECK_OK(registry->add_function(MakeFilterMetaFunction()));

        // Take kernels
        std::vector<SelectionKernelData> take_kernels;
        PopulateTakeKernels(&take_kernels);

        VectorKernel take_base;
        take_base.init = TakeState::init;
        take_base.can_execute_chunkwise = false;
        register_selection_function("array_take", array_take_doc, take_base,
                                  std::move(take_kernels), GetDefaultTakeOptions(), registry);

        KCHECK_OK(registry->add_function(MakeTakeMetaFunction()));

        // DropNull kernel
        KCHECK_OK(registry->add_function(std::make_shared<DropNullMetaFunction>()));

        KCHECK_OK(registry->add_function(
                makeIndices_non_zero_function("indices_nonzero", indices_nonzero_doc)));
    }


}  // namespace nebula::compute::internal
