// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#pragma once

#include <algorithm>
#include <cmath>
#include <cstdint>
#include <functional>

#include <nebula/core/array.h>
#include <nebula/compute/api_vector.h>
#include <nebula/compute/kernels/chunked_internal.h>
#include <nebula/core/table.h>
#include <nebula/types/type.h>
#include <nebula/types/type_traits.h>

namespace nebula::compute::internal {

// Visit all physical types for which sorting is implemented.
#define VISIT_SORTABLE_PHYSICAL_TYPES(VISIT) \
  VISIT(BooleanType)                         \
  VISIT(Int8Type)                            \
  VISIT(Int16Type)                           \
  VISIT(Int32Type)                           \
  VISIT(Int64Type)                           \
  VISIT(UInt8Type)                           \
  VISIT(UInt16Type)                          \
  VISIT(UInt32Type)                          \
  VISIT(UInt64Type)                          \
  VISIT(Fp32Type)                           \
  VISIT(Fp64Type)                          \
  VISIT(BinaryType)                          \
  VISIT(LargeBinaryType)                     \
  VISIT(FixedSizeBinaryType)                 \
  VISIT(Decimal128Type)                      \
  VISIT(Decimal256Type)

// NOTE: std::partition is usually faster than std::stable_partition.

    struct NonStablePartitioner {
        template<typename Predicate>
        uint64_t *operator()(uint64_t *indices_begin, uint64_t *indices_end, Predicate &&pred) {
            return std::partition(indices_begin, indices_end, std::forward<Predicate>(pred));
        }
    };

    struct StablePartitioner {
        template<typename Predicate>
        uint64_t *operator()(uint64_t *indices_begin, uint64_t *indices_end, Predicate &&pred) {
            return std::stable_partition(indices_begin, indices_end,
                                         std::forward<Predicate>(pred));
        }
    };

    template<typename TypeClass, typename Enable = void>
    struct NullTraits {
        using has_null_like_values = std::false_type;
    };

    template<typename TypeClass>
    struct NullTraits<TypeClass, enable_if_physical_floating_point<TypeClass>> {
        using has_null_like_values = std::true_type;
    };

    template<typename TypeClass>
    using has_null_like_values = typename NullTraits<TypeClass>::has_null_like_values;

// Compare two values, taking NaNs into account

    template<typename Type, typename Enable = void>
    struct ValueComparator;

    template<typename Type>
    struct ValueComparator<Type, enable_if_t<!has_null_like_values<Type>::value>> {
        template<typename Value>
        static int compare(const Value &left, const Value &right, SortOrder order,
                           NullPlacement null_placement) {
            int compared;
            if (left == right) {
                compared = 0;
            } else if (left > right) {
                compared = 1;
            } else {
                compared = -1;
            }
            if (order == SortOrder::Descending) {
                compared = -compared;
            }
            return compared;
        }
    };

    template<typename Type>
    struct ValueComparator<Type, enable_if_t<has_null_like_values<Type>::value>> {
        template<typename Value>
        static int compare(const Value &left, const Value &right, SortOrder order,
                           NullPlacement null_placement) {
            const bool is_nan_left = std::isnan(left);
            const bool is_nan_right = std::isnan(right);
            if (is_nan_left && is_nan_right) {
                return 0;
            } else if (is_nan_left) {
                return null_placement == NullPlacement::AtStart ? -1 : 1;
            } else if (is_nan_right) {
                return null_placement == NullPlacement::AtStart ? 1 : -1;
            }
            int compared;
            if (left == right) {
                compared = 0;
            } else if (left > right) {
                compared = 1;
            } else {
                compared = -1;
            }
            if (order == SortOrder::Descending) {
                compared = -compared;
            }
            return compared;
        }
    };

    template<typename Type, typename Value>
    int CompareTypeValues(const Value &left, const Value &right, SortOrder order,
                          NullPlacement null_placement) {
        return ValueComparator<Type>::compare(left, right, order, null_placement);
    }

    struct NullPartitionResult {
        uint64_t *non_nulls_begin;
        uint64_t *non_nulls_end;
        uint64_t *nulls_begin;
        uint64_t *nulls_end;

        [[nodiscard]] uint64_t *overall_begin() const { return std::min(nulls_begin, non_nulls_begin); }

        [[nodiscard]] uint64_t *overall_end() const { return std::max(nulls_end, non_nulls_end); }

        [[nodiscard]] int64_t non_null_count() const { return non_nulls_end - non_nulls_begin; }

        [[nodiscard]] int64_t null_count() const { return nulls_end - nulls_begin; }

        static NullPartitionResult NoNulls(uint64_t *indices_begin, uint64_t *indices_end,
                                           NullPlacement null_placement) {
            if (null_placement == NullPlacement::AtStart) {
                return {indices_begin, indices_end, indices_begin, indices_begin};
            } else {
                return {indices_begin, indices_end, indices_end, indices_end};
            }
        }

        static NullPartitionResult NullsOnly(uint64_t *indices_begin, uint64_t *indices_end,
                                             NullPlacement null_placement) {
            if (null_placement == NullPlacement::AtStart) {
                return {indices_end, indices_end, indices_begin, indices_end};
            } else {
                return {indices_begin, indices_begin, indices_begin, indices_end};
            }
        }

        static NullPartitionResult NullsAtEnd(uint64_t *indices_begin, uint64_t *indices_end,
                                              uint64_t *midpoint) {
            DKCHECK_GE(midpoint, indices_begin);
            DKCHECK_LE(midpoint, indices_end);
            return {indices_begin, midpoint, midpoint, indices_end};
        }

        static NullPartitionResult NullsAtStart(uint64_t *indices_begin, uint64_t *indices_end,
                                                uint64_t *midpoint) {
            DKCHECK_GE(midpoint, indices_begin);
            DKCHECK_LE(midpoint, indices_end);
            return {midpoint, indices_end, indices_begin, midpoint};
        }
    };

    // Move nulls (not null-like values) to end of array.
    //
    // `offset` is used when this is called on a chunk of a chunked array
    template<typename Partitioner>
    NullPartitionResult PartitionNullsOnly(uint64_t *indices_begin, uint64_t *indices_end,
                                           const Array &values, int64_t offset,
                                           NullPlacement null_placement) {
        if (values.null_count() == 0) {
            return NullPartitionResult::NoNulls(indices_begin, indices_end, null_placement);
        }
        Partitioner partitioner;
        if (null_placement == NullPlacement::AtStart) {
            auto nulls_end = partitioner(
                    indices_begin, indices_end,
                    [&values, &offset](uint64_t ind) { return values.is_null(ind - offset); });
            return NullPartitionResult::NullsAtStart(indices_begin, indices_end, nulls_end);
        } else {
            auto nulls_begin = partitioner(
                    indices_begin, indices_end,
                    [&values, &offset](uint64_t ind) { return !values.is_null(ind - offset); });
            return NullPartitionResult::NullsAtEnd(indices_begin, indices_end, nulls_begin);
        }
    }

    // Move non-null null-like values to end of array.
    //
    // `offset` is used when this is called on a chunk of a chunked array
    template<typename ArrayType, typename Partitioner>
    enable_if_t<!has_null_like_values<typename ArrayType::TypeClass>::value,
            NullPartitionResult>
    PartitionNullLikes(uint64_t *indices_begin, uint64_t *indices_end,
                       const ArrayType &values, int64_t offset,
                       NullPlacement null_placement) {
        return NullPartitionResult::NoNulls(indices_begin, indices_end, null_placement);
    }

    template<typename ArrayType, typename Partitioner>
    enable_if_t<has_null_like_values<typename ArrayType::TypeClass>::value,
            NullPartitionResult>
    PartitionNullLikes(uint64_t *indices_begin, uint64_t *indices_end,
                       const ArrayType &values, int64_t offset,
                       NullPlacement null_placement) {
        Partitioner partitioner;
        if (null_placement == NullPlacement::AtStart) {
            auto null_likes_end =
                    partitioner(indices_begin, indices_end, [&values, &offset](uint64_t ind) {
                        return std::isnan(values.get_view(ind - offset));
                    });
            return NullPartitionResult::NullsAtStart(indices_begin, indices_end, null_likes_end);
        } else {
            auto null_likes_begin =
                    partitioner(indices_begin, indices_end, [&values, &offset](uint64_t ind) {
                        return !std::isnan(values.get_view(ind - offset));
                    });
            return NullPartitionResult::NullsAtEnd(indices_begin, indices_end, null_likes_begin);
        }
    }

    // Move nulls to end of array.
    //
    // `offset` is used when this is called on a chunk of a chunked array
    template<typename ArrayType, typename Partitioner>
    NullPartitionResult PartitionNulls(uint64_t *indices_begin, uint64_t *indices_end,
                                       const ArrayType &values, int64_t offset,
                                       NullPlacement null_placement) {
        // Partition nulls at start (resp. end), and null-like values just before (resp. after)
        NullPartitionResult p = PartitionNullsOnly<Partitioner>(indices_begin, indices_end,
                                                                values, offset, null_placement);
        NullPartitionResult q = PartitionNullLikes<ArrayType, Partitioner>(
                p.non_nulls_begin, p.non_nulls_end, values, offset, null_placement);
        return NullPartitionResult{q.non_nulls_begin, q.non_nulls_end,
                                   std::min(q.nulls_begin, p.nulls_begin),
                                   std::max(q.nulls_end, p.nulls_end)};
    }

    //
    // Null partitioning on chunked arrays
    //

    template<typename Partitioner>
    NullPartitionResult PartitionNullsOnly(uint64_t *indices_begin, uint64_t *indices_end,
                                           const ChunkedArrayResolver &resolver,
                                           int64_t null_count, NullPlacement null_placement) {
        if (null_count == 0) {
            return NullPartitionResult::NoNulls(indices_begin, indices_end, null_placement);
        }
        Partitioner partitioner;
        if (null_placement == NullPlacement::AtStart) {
            auto nulls_end = partitioner(indices_begin, indices_end, [&](uint64_t ind) {
                const auto chunk = resolver.resolve(ind);
                return chunk.is_null();
            });
            return NullPartitionResult::NullsAtStart(indices_begin, indices_end, nulls_end);
        } else {
            auto nulls_begin = partitioner(indices_begin, indices_end, [&](uint64_t ind) {
                const auto chunk = resolver.resolve(ind);
                return !chunk.is_null();
            });
            return NullPartitionResult::NullsAtEnd(indices_begin, indices_end, nulls_begin);
        }
    }

    template<typename ArrayType, typename Partitioner>
    enable_if_t<!has_null_like_values<typename ArrayType::TypeClass>::value,
            NullPartitionResult>
    PartitionNullLikes(uint64_t *indices_begin, uint64_t *indices_end,
                       const ChunkedArrayResolver &resolver, NullPlacement null_placement) {
        return NullPartitionResult::NoNulls(indices_begin, indices_end, null_placement);
    }

    template<typename ArrayType, typename Partitioner,
            typename TypeClass = typename ArrayType::TypeClass>
    enable_if_t<has_null_like_values<TypeClass>::value, NullPartitionResult>
    PartitionNullLikes(uint64_t *indices_begin, uint64_t *indices_end,
                       const ChunkedArrayResolver &resolver, NullPlacement null_placement) {
        Partitioner partitioner;
        if (null_placement == NullPlacement::AtStart) {
            auto null_likes_end = partitioner(indices_begin, indices_end, [&](uint64_t ind) {
                const auto chunk = resolver.resolve(ind);
                return std::isnan(chunk.value<TypeClass>());
            });
            return NullPartitionResult::NullsAtStart(indices_begin, indices_end, null_likes_end);
        } else {
            auto null_likes_begin = partitioner(indices_begin, indices_end, [&](uint64_t ind) {
                const auto chunk = resolver.resolve(ind);
                return !std::isnan(chunk.value<TypeClass>());
            });
            return NullPartitionResult::NullsAtEnd(indices_begin, indices_end, null_likes_begin);
        }
    }

    template<typename ArrayType, typename Partitioner>
    NullPartitionResult PartitionNulls(uint64_t *indices_begin, uint64_t *indices_end,
                                       const ChunkedArrayResolver &resolver,
                                       int64_t null_count, NullPlacement null_placement) {
        // Partition nulls at start (resp. end), and null-like values just before (resp. after)
        NullPartitionResult p = PartitionNullsOnly<Partitioner>(
                indices_begin, indices_end, resolver, null_count, null_placement);
        NullPartitionResult q = PartitionNullLikes<ArrayType, Partitioner>(
                p.non_nulls_begin, p.non_nulls_end, resolver, null_placement);
        return NullPartitionResult{q.non_nulls_begin, q.non_nulls_end,
                                   std::min(q.nulls_begin, p.nulls_begin),
                                   std::max(q.nulls_end, p.nulls_end)};
    }

    struct MergeImpl {
        using MergeNullsFunc = std::function<void(uint64_t *nulls_begin, uint64_t *nulls_middle,
                                                  uint64_t *nulls_end, uint64_t *temp_indices,
                                                  int64_t null_count)>;

        using MergeNonNullsFunc =
                std::function<void(uint64_t *range_begin, uint64_t *range_middle,
                                   uint64_t *range_end, uint64_t *temp_indices)>;

        MergeImpl(NullPlacement null_placement, MergeNullsFunc &&merge_nulls,
                  MergeNonNullsFunc &&merge_non_nulls)
                : null_placement_(null_placement),
                  merge_nulls_(std::move(merge_nulls)),
                  merge_non_nulls_(std::move(merge_non_nulls)) {}

        turbo::Status init(ExecContext *ctx, int64_t temp_indices_length) {
            TURBO_MOVE_OR_RAISE(
                    temp_buffer_,
                    allocate_buffer(sizeof(int64_t) * temp_indices_length, ctx->memory_pool()));
            temp_indices_ = reinterpret_cast<uint64_t *>(temp_buffer_->mutable_data());
            return turbo::OkStatus();
        }

        NullPartitionResult Merge(const NullPartitionResult &left,
                                  const NullPartitionResult &right, int64_t null_count) const {
            if (null_placement_ == NullPlacement::AtStart) {
                return MergeNullsAtStart(left, right, null_count);
            } else {
                return MergeNullsAtEnd(left, right, null_count);
            }
        }

        NullPartitionResult MergeNullsAtStart(const NullPartitionResult &left,
                                              const NullPartitionResult &right,
                                              int64_t null_count) const {
            // Input layout:
            // [left nulls .... left non-nulls .... right nulls .... right non-nulls]
            DKCHECK_EQ(left.nulls_end, left.non_nulls_begin);
            DKCHECK_EQ(left.non_nulls_end, right.nulls_begin);
            DKCHECK_EQ(right.nulls_end, right.non_nulls_begin);

            // Mutate the input, stably, to obtain the following layout:
            // [left nulls .... right nulls .... left non-nulls .... right non-nulls]
            std::rotate(left.non_nulls_begin, right.nulls_begin, right.nulls_end);

            const auto p = NullPartitionResult::NullsAtStart(
                    left.nulls_begin, right.non_nulls_end,
                    left.nulls_begin + left.null_count() + right.null_count());

            // If the type has null-like values (such as NaN), ensure those plus regular
            // nulls are partitioned in the right order.  Note this assumes that all
            // null-like values (e.g. NaN) are ordered equally.
            if (p.null_count()) {
                merge_nulls_(p.nulls_begin, p.nulls_begin + left.null_count(), p.nulls_end,
                             temp_indices_, null_count);
            }

            // Merge the non-null values into temp area
            DKCHECK_EQ(right.non_nulls_begin - p.non_nulls_begin, left.non_null_count());
            DKCHECK_EQ(p.non_nulls_end - right.non_nulls_begin, right.non_null_count());
            if (p.non_null_count()) {
                merge_non_nulls_(p.non_nulls_begin, right.non_nulls_begin, p.non_nulls_end,
                                 temp_indices_);
            }
            return p;
        }

        NullPartitionResult MergeNullsAtEnd(const NullPartitionResult &left,
                                            const NullPartitionResult &right,
                                            int64_t null_count) const {
            // Input layout:
            // [left non-nulls .... left nulls .... right non-nulls .... right nulls]
            DKCHECK_EQ(left.non_nulls_end, left.nulls_begin);
            DKCHECK_EQ(left.nulls_end, right.non_nulls_begin);
            DKCHECK_EQ(right.non_nulls_end, right.nulls_begin);

            // Mutate the input, stably, to obtain the following layout:
            // [left non-nulls .... right non-nulls .... left nulls .... right nulls]
            std::rotate(left.nulls_begin, right.non_nulls_begin, right.non_nulls_end);

            const auto p = NullPartitionResult::NullsAtEnd(
                    left.non_nulls_begin, right.nulls_end,
                    left.non_nulls_begin + left.non_null_count() + right.non_null_count());

            // If the type has null-like values (such as NaN), ensure those plus regular
            // nulls are partitioned in the right order.  Note this assumes that all
            // null-like values (e.g. NaN) are ordered equally.
            if (p.null_count()) {
                merge_nulls_(p.nulls_begin, p.nulls_begin + left.null_count(), p.nulls_end,
                             temp_indices_, null_count);
            }

            // Merge the non-null values into temp area
            DKCHECK_EQ(left.non_nulls_end - p.non_nulls_begin, left.non_null_count());
            DKCHECK_EQ(p.non_nulls_end - left.non_nulls_end, right.non_null_count());
            if (p.non_null_count()) {
                merge_non_nulls_(p.non_nulls_begin, left.non_nulls_end, p.non_nulls_end,
                                 temp_indices_);
            }
            return p;
        }

    private:
        NullPlacement null_placement_;
        MergeNullsFunc merge_nulls_;
        MergeNonNullsFunc merge_non_nulls_;
        std::unique_ptr<Buffer> temp_buffer_;
        uint64_t *temp_indices_ = nullptr;
    };

// TODO make this usable if indices are non trivial on input
// (see ConcreteRecordBatchColumnSorter)
// `offset` is used when this is called on a chunk of a chunked array
    using ArraySortFunc = std::function<turbo::Result<NullPartitionResult>(
            uint64_t *indices_begin, uint64_t *indices_end, const Array &values, int64_t offset,
            const ArraySortOptions &options, ExecContext *ctx)>;

    turbo::Result<ArraySortFunc> GetArraySorter(const DataType &type);

    turbo::Result<NullPartitionResult> SortChunkedArray(ExecContext *ctx, uint64_t *indices_begin,
                                                        uint64_t *indices_end,
                                                        const ChunkedArray &chunked_array,
                                                        SortOrder sort_order,
                                                        NullPlacement null_placement);

    turbo::Result<NullPartitionResult> SortChunkedArray(
            ExecContext *ctx, uint64_t *indices_begin, uint64_t *indices_end,
            const std::shared_ptr<DataType> &physical_type, const ArrayVector &physical_chunks,
            SortOrder sort_order, NullPlacement null_placement);

    turbo::Result<NullPartitionResult> SortStructArray(ExecContext *ctx, uint64_t *indices_begin,
                                                       uint64_t *indices_end,
                                                       const StructArray &array,
                                                       SortOrder sort_order,
                                                       NullPlacement null_placement);

// ----------------------------------------------------------------------
// Helpers for Sort/SelectK/Rank implementations

    struct SortField {
        SortField() = default;

        SortField(FieldPath path, SortOrder order, const DataType *type)
                : path(std::move(path)), order(order), type(type) {}

        SortField(int index, SortOrder order, const DataType *type)
                : SortField(FieldPath({index}), order, type) {}

        bool is_nested() const { return path.indices().size() > 1; }

        FieldPath path;
        SortOrder order;
        const DataType *type;
    };

    inline turbo::Status CheckNonNested(const FieldRef &ref) {
        if (ref.is_nested()) {
            return turbo::unavailable_error("Nested keys not supported for SortKeys");
        }
        return turbo::OkStatus();
    }

    template<typename T>
    turbo::Result<T> PrependInvalidColumn(turbo::Result<T> res) {
        if (res.ok()) return res;
        return res.status().with_message("Invalid sort key column: ", res.status().message());
    }

    // Return the field indices of the sort keys, deduplicating them along the way
    turbo::Result<std::vector<SortField>> find_sort_keys(const Schema &schema,
                                                         const std::vector<SortKey> &sort_keys);

    template<typename ResolvedSortKey, typename ResolvedSortKeyFactory>
    turbo::Result<std::vector<ResolvedSortKey>> ResolveSortKeys(
            const Schema &schema, const std::vector<SortKey> &sort_keys,
            ResolvedSortKeyFactory &&factory) {
        TURBO_MOVE_OR_RAISE(const auto fields, find_sort_keys(schema, sort_keys));
        std::vector<ResolvedSortKey> resolved;
        resolved.reserve(fields.size());
        for (const auto &f: fields) {
            TURBO_MOVE_OR_RAISE(auto resolved_key, factory(f));
            resolved.push_back(std::move(resolved_key));
        }
        return resolved;
    }

    template<typename ResolvedSortKey, typename TableOrBatch>
    turbo::Result<std::vector<ResolvedSortKey>> ResolveSortKeys(
            const TableOrBatch &table_or_batch, const std::vector<SortKey> &sort_keys) {
        return ResolveSortKeys<ResolvedSortKey>(
                *table_or_batch.schema(), sort_keys,
                [&](const SortField &f) -> turbo::Result<ResolvedSortKey> {
                    if (f.is_nested()) {
                        // TODO: Some room for improvement here, as we potentially duplicate some of the
                        // null-flattening work for nested sort keys. For instance, given two keys with
                        // paths [0,0,0,0] and [0,0,0,1], we shouldn't need to flatten the first three
                        // components more than once.
                        TURBO_MOVE_OR_RAISE(auto child, f.path.get_flattened(table_or_batch));
                        return ResolvedSortKey{std::move(child), f.order};
                    }
                    return ResolvedSortKey{table_or_batch.column(f.path[0]), f.order};
                });
    }

// // Returns an error status if no column matching `ref` is found, or if the FieldRef is
// // a nested reference.
    inline turbo::Result<std::shared_ptr<ChunkedArray>> GetColumn(const Table &table,
                                                                  const FieldRef &ref) {
        TURBO_RETURN_NOT_OK(CheckNonNested(ref));
        TURBO_MOVE_OR_RAISE(auto path, ref.find_one(*table.schema()));
        return table.column(path[0]);
    }

    inline turbo::Result<std::shared_ptr<Array>> GetColumn(const RecordBatch &batch,
                                                           const FieldRef &ref) {
        TURBO_RETURN_NOT_OK(CheckNonNested(ref));
        return ref.get_one(batch);
    }

// We could try to reproduce the concrete Array classes' facilities
// (such as cached raw values pointer) in a separate hierarchy of
// physical accessors, but doing so ends up too cumbersome.
// Instead, we simply create the desired concrete Array objects.
    inline std::shared_ptr<Array> GetPhysicalArray(
            const Array &array, const std::shared_ptr<DataType> &physical_type) {
        auto new_data = array.data()->copy();
        new_data->type = physical_type;
        return make_array(std::move(new_data));
    }

    inline ArrayVector GetPhysicalChunks(const ArrayVector &chunks,
                                         const std::shared_ptr<DataType> &physical_type) {
        ArrayVector physical(chunks.size());
        std::transform(chunks.begin(), chunks.end(), physical.begin(),
                       [&](const std::shared_ptr<Array> &array) {
                           return GetPhysicalArray(*array, physical_type);
                       });
        return physical;
    }

    inline ArrayVector GetPhysicalChunks(const ChunkedArray &chunked_array,
                                         const std::shared_ptr<DataType> &physical_type) {
        return GetPhysicalChunks(chunked_array.chunks(), physical_type);
    }

// Compare two records in a single column (either from a batch or table)
    template<typename ResolvedSortKey>
    struct ColumnComparator {
        using Location = typename ResolvedSortKey::LocationType;

        ColumnComparator(const ResolvedSortKey &sort_key, NullPlacement null_placement)
                : sort_key_(sort_key), null_placement_(null_placement) {}

        virtual ~ColumnComparator() = default;

        virtual int compare(const Location &left, const Location &right) const = 0;

        ResolvedSortKey sort_key_;
        NullPlacement null_placement_;
    };

    template<typename ResolvedSortKey, typename Type>
    struct ConcreteColumnComparator : public ColumnComparator<ResolvedSortKey> {
        using Location = typename ResolvedSortKey::LocationType;

        using ColumnComparator<ResolvedSortKey>::ColumnComparator;

        int compare(const Location &left, const Location &right) const override {
            const auto &sort_key = this->sort_key_;

            const auto chunk_left = sort_key.GetChunk(left);
            const auto chunk_right = sort_key.GetChunk(right);
            if (sort_key.null_count > 0) {
                const bool is_null_left = chunk_left.is_null();
                const bool is_null_right = chunk_right.is_null();
                if (is_null_left && is_null_right) {
                    return 0;
                } else if (is_null_left) {
                    return this->null_placement_ == NullPlacement::AtStart ? -1 : 1;
                } else if (is_null_right) {
                    return this->null_placement_ == NullPlacement::AtStart ? 1 : -1;
                }
            }
            return CompareTypeValues<Type>(chunk_left.template value<Type>(),
                                           chunk_right.template value<Type>(), sort_key.order,
                                           this->null_placement_);
        }
    };

    template<typename ResolvedSortKey>
    struct ConcreteColumnComparator<ResolvedSortKey, NullType>
            : public ColumnComparator<ResolvedSortKey> {
        using Location = typename ResolvedSortKey::LocationType;

        using ColumnComparator<ResolvedSortKey>::ColumnComparator;

        int compare(const Location &left, const Location &right) const override { return 0; }
    };

// Compare two records in the same RecordBatch or Table
// (indexing is handled through ResolvedSortKey)
    template<typename ResolvedSortKey>
    class MultipleKeyComparator {
    public:
        using Location = typename ResolvedSortKey::LocationType;

        MultipleKeyComparator(const std::vector<ResolvedSortKey> &sort_keys,
                              NullPlacement null_placement)
                : sort_keys_(sort_keys), null_placement_(null_placement) {
            status_ &= MakeComparators();
        }

        turbo::Status status() const { return status_; }

        // Returns true if the left-th value should be ordered before the
        // right-th value, false otherwise. The start_sort_key_index-th
        // sort key and subsequent sort keys are used for comparison.
        bool compare(const Location &left, const Location &right, size_t start_sort_key_index) {
            return CompareInternal(left, right, start_sort_key_index) < 0;
        }

        bool equals(const Location &left, const Location &right, size_t start_sort_key_index) {
            return CompareInternal(left, right, start_sort_key_index) == 0;
        }

    private:
        struct ColumnComparatorFactory {
#define VISIT(TYPE) \
  turbo::Status Visit(const TYPE& type) { return VisitGeneric(type); }

            VISIT_SORTABLE_PHYSICAL_TYPES(VISIT)

            VISIT(NullType)

#undef VISIT

            turbo::Status Visit(const DataType &type) {
                return turbo::failed_precondition_error("Unsupported type for batch or table sorting: ",
                                                        type.to_string());
            }

            template<typename Type>
            turbo::Status VisitGeneric(const Type &type) {
                res.reset(
                        new ConcreteColumnComparator <ResolvedSortKey, Type>{sort_key, null_placement});
                return turbo::OkStatus();
            }

            const ResolvedSortKey &sort_key;
            NullPlacement null_placement;
            std::unique_ptr<ColumnComparator<ResolvedSortKey>> res;
        };

        turbo::Status MakeComparators() {
            column_comparators_.reserve(sort_keys_.size());

            for (const auto &sort_key: sort_keys_) {
                ColumnComparatorFactory factory{sort_key, null_placement_, nullptr};
                TURBO_RETURN_NOT_OK(visit_type_inline(*sort_key.type, &factory));
                column_comparators_.push_back(std::move(factory.res));
            }
            return turbo::OkStatus();
        }

        // Compare two records in the same table and return -1, 0 or 1.
        //
        // -1: The left is less than the right.
        // 0: The left equals to the right.
        // 1: The left is greater than the right.
        //
        // This supports null and NaN. Null is processed in this and NaN
        // is processed in CompareTypeValue().
        int CompareInternal(const Location &left, const Location &right,
                            size_t start_sort_key_index) {
            const auto num_sort_keys = sort_keys_.size();
            for (size_t i = start_sort_key_index; i < num_sort_keys; ++i) {
                const int r = column_comparators_[i]->compare(left, right);
                if (r != 0) {
                    return r;
                }
            }
            return 0;
        }

        const std::vector<ResolvedSortKey> &sort_keys_;
        const NullPlacement null_placement_;
        std::vector<std::unique_ptr<ColumnComparator<ResolvedSortKey>>> column_comparators_;
        turbo::Status status_;
    };

    struct ResolvedRecordBatchSortKey {
        ResolvedRecordBatchSortKey(const std::shared_ptr<Array> &array, SortOrder order)
                : type(GetPhysicalType(array->type())),
                  owned_array(GetPhysicalArray(*array, type)),
                  array(*owned_array),
                  order(order),
                  null_count(array->null_count()) {}

        using LocationType = int64_t;

        ResolvedChunk GetChunk(int64_t index) const { return {&array, index}; }

        const std::shared_ptr<DataType> type;
        std::shared_ptr<Array> owned_array;
        const Array &array;
        SortOrder order;
        int64_t null_count;
    };

    struct ResolvedTableSortKey {
        ResolvedTableSortKey(const std::shared_ptr<DataType> &type, ArrayVector chunks,
                             SortOrder order, int64_t null_count)
                : type(GetPhysicalType(type)),
                  owned_chunks(std::move(chunks)),
                  chunks(get_array_pointers(owned_chunks)),
                  order(order),
                  null_count(null_count) {}

        using LocationType = ::nebula::internal::ChunkLocation;

        ResolvedChunk GetChunk(::nebula::internal::ChunkLocation loc) const {
            return {chunks[loc.chunk_index], loc.index_in_chunk};
        }

        // Make a vector of ResolvedSortKeys for the sort keys and the given table.
        // `batches` must be a chunking of `table`.
        static turbo::Result<std::vector<ResolvedTableSortKey>> create(
                const Table &table, const RecordBatchVector &batches,
                const std::vector<SortKey> &sort_keys) {
            auto factory = [&](const SortField &f) -> turbo::Result<ResolvedTableSortKey> {
                // We must expose a homogenous chunking for all ResolvedSortKey,
                // so we can't simply access the column from the table directly.
                ArrayVector chunks;
                chunks.reserve(batches.size());
                int64_t null_count = 0;
                for (const auto &batch: batches) {
                    TURBO_MOVE_OR_RAISE(auto child, f.path.get_flattened(*batch));
                    null_count += child->null_count();
                    chunks.push_back(std::move(child));
                }

                return ResolvedTableSortKey(f.type->get_shared_ptr(), std::move(chunks), f.order,
                                            null_count);
            };

            return ::nebula::compute::internal::ResolveSortKeys<ResolvedTableSortKey>(
                    *table.schema(), sort_keys, factory);
        }

        std::shared_ptr<DataType> type;
        ArrayVector owned_chunks;
        std::vector<const Array *> chunks;
        SortOrder order;
        int64_t null_count;
    };

    inline turbo::Result<std::shared_ptr<ArrayData>> MakeMutableUInt64Array(
            int64_t length, MemoryPool *memory_pool) {
        auto buffer_size = length * sizeof(uint64_t);
        TURBO_MOVE_OR_RAISE(auto data, allocate_buffer(buffer_size, memory_pool));
        return ArrayData::create(uint64(), length, {nullptr, std::move(data)}, /*null_count=*/0);
    }

}  // namespace nebula::compute::internal
