// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <nebula/array/concatenate.h>

#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <memory>
#include <utility>
#include <vector>

#include <nebula/core/array.h>
#include <nebula/array/builder_primitive.h>
#include <nebula/array/builder_run_end.h>
#include <nebula/array/data.h>
#include <nebula/array/util.h>
#include <nebula/core/buffer.h>

#include <turbo/utility/status.h>
#include <nebula/types/type.h>
#include <nebula/types/type_fwd.h>
#include <nebula/bits/bit_block_counter.h>
#include <nebula/bits/bit_run_reader.h>
#include <nebula/bits/bit_util.h>
#include <nebula/bits/bitmap_ops.h>
#include <turbo/base/checked_cast.h>
#include <nebula/numeric/int_util.h>
#include <nebula/numeric/int_util_overflow.h>
#include <nebula/util/list_util.h>
#include <turbo/log/logging.h>
#include <nebula/util/ree_util.h>
#include <nebula/util/slice_util_internal.h>
#include <nebula/core/visit_data_inline.h>
#include <nebula/core/visit_type_inline.h>

namespace nebula {

    using internal::SafeSignedAdd;

    namespace {
        /// offset, length pair for representing a Range of a buffer or array
        struct Range {
            int64_t offset = -1, length = 0;

            Range() = default;

            Range(int64_t o, int64_t l) : offset(o), length(l) {}
        };

        /// non-owning view into a range of bits
        struct Bitmap {
            Bitmap() = default;

            Bitmap(const uint8_t *d, Range r) : data(d), range(r) {}

            explicit Bitmap(const std::shared_ptr<Buffer> &buffer, Range r)
                    : Bitmap(buffer ? buffer->data() : nullptr, r) {}

            const uint8_t *data = nullptr;
            Range range;

            bool AllSet() const { return data == nullptr; }
        };

        // Allocate a buffer and concatenate bitmaps into it.
        turbo::Status ConcatenateBitmaps(const std::vector<Bitmap> &bitmaps, MemoryPool *pool,
                                         std::shared_ptr<Buffer> *out) {
            int64_t out_length = 0;
            for (const auto &bitmap: bitmaps) {
                if (internal::AddWithOverflow(out_length, bitmap.range.length, &out_length)) {
                    return turbo::invalid_argument_error("Length overflow when concatenating arrays");
                }
            }
            TURBO_MOVE_OR_RAISE(*out, allocate_bitmap(out_length, pool));
            uint8_t *dst = (*out)->mutable_data();

            int64_t bitmap_offset = 0;
            for (auto bitmap: bitmaps) {
                if (bitmap.AllSet()) {
                    bit_util::SetBitsTo(dst, bitmap_offset, bitmap.range.length, true);
                } else {
                    internal::CopyBitmap(bitmap.data, bitmap.range.offset, bitmap.range.length, dst,
                                         bitmap_offset);
                }
                bitmap_offset += bitmap.range.length;
            }

            return turbo::OkStatus();
        }

        int64_t SumBufferSizesInBytes(const BufferVector &buffers) {
            int64_t size = 0;
            for (const auto &buffer: buffers) {
                size += buffer->size();
            }
            return size;
        }

        // write offsets in src into dst, adjusting them such that first_offset
        // will be the first offset written.
        template<typename Offset>
        turbo::Status PutOffsets(BufferSpan src, Offset first_offset, Offset *dst,
                                 Range *values_range);

        // concatenate buffers holding offsets into a single buffer of offsets,
        // also computing the ranges of values spanned by each buffer of offsets.
        template<typename Offset>
        turbo::Status ConcatenateOffsets(const BufferVector &buffers, MemoryPool *pool,
                                         std::shared_ptr<Buffer> *out,
                                         std::vector<Range> *values_ranges) {
            values_ranges->resize(buffers.size());

            // allocate output buffer
            const int64_t out_size_in_bytes = SumBufferSizesInBytes(buffers);
            TURBO_MOVE_OR_RAISE(*out, allocate_buffer(sizeof(Offset) + out_size_in_bytes, pool));
            auto *out_data = (*out)->mutable_data_as<Offset>();

            int64_t elements_length = 0;
            Offset values_length = 0;
            for (size_t i = 0; i < buffers.size(); ++i) {
                // the first offset from buffers[i] will be adjusted to values_length
                // (the cumulative length of values spanned by offsets in previous buffers)
                TURBO_RETURN_NOT_OK(PutOffsets<Offset>(*buffers[i], values_length,
                                                       out_data + elements_length, &(*values_ranges)[i]));
                elements_length += buffers[i]->size() / sizeof(Offset);
                values_length += static_cast<Offset>((*values_ranges)[i].length);
            }

            // the final element in out_data is the length of all values spanned by the offsets
            out_data[out_size_in_bytes / sizeof(Offset)] = values_length;
            return turbo::OkStatus();
        }

        template<typename Offset>
        turbo::Status PutOffsets(BufferSpan src, Offset first_offset, Offset *dst,
                                 Range *values_range) {
            if (src.size() == 0) {
                // It's allowed to have an empty offsets buffer for a 0-length array
                // (see Array::Validate)
                values_range->offset = 0;
                values_range->length = 0;
                return turbo::OkStatus();
            }

            // Get the range of offsets to transfer from src
            auto src_begin = span_data_as<Offset>(src);
            auto src_end = reinterpret_cast<const Offset *>(src.data() + src.size());

            // Compute the range of values which is spanned by this range of offsets
            values_range->offset = src_begin[0];
            values_range->length = *src_end - values_range->offset;
            if (first_offset > std::numeric_limits<Offset>::max() - values_range->length) {
                return turbo::invalid_argument_error("offset overflow while concatenating arrays");
            }

            // write offsets into dst, ensuring that the first offset written is
            // first_offset
            auto displacement = first_offset - src_begin[0];
            // NOTE: concatenate can be called during IPC reads to append delta dictionaries.
            // Avoid UB on non-validated input by doing the addition in the unsigned domain.
            // (the result can later be validated using Array::validate_full)
            std::transform(src_begin, src_end, dst, [displacement](Offset offset) {
                return SafeSignedAdd(offset, displacement);
            });
            return turbo::OkStatus();
        }

        template<typename offset_type>
        turbo::Status PutListViewOffsets(const ArrayData &input, offset_type *sizes, BufferSpan src,
                                         offset_type displacement, offset_type *dst);

        // concatenate buffers holding list-view offsets into a single buffer of offsets
        //
        // value_ranges contains the relevant ranges of values in the child array actually
        // referenced to by the views. Most commonly, these ranges will start from 0,
        // but when that is not the case, we need to adjust the displacement of offsets.
        // The concatenated child array does not contain values from the beginning
        // if they are not referenced to by any view.
        //
        // The child arrays and the sizes buffer are used to ensure we can trust the offsets in
        // offset_buffers to be within the valid range.
        //
        // This function also mutates sizes so that null list-view entries have size 0.
        //
        // \param[in] in The child arrays
        // \param[in,out] sizes The concatenated sizes buffer
        template<typename offset_type>
        turbo::Status ConcatenateListViewOffsets(const ArrayDataVector &in, offset_type *sizes,
                                                 const BufferVector &offset_buffers,
                                                 const std::vector<Range> &value_ranges,
                                                 MemoryPool *pool, std::shared_ptr<Buffer> *out) {
                    DKCHECK_EQ(offset_buffers.size(), value_ranges.size());

            // Allocate resulting offsets buffer and initialize it with zeros
            const int64_t out_size_in_bytes = SumBufferSizesInBytes(offset_buffers);
            TURBO_MOVE_OR_RAISE(*out, allocate_buffer(out_size_in_bytes, pool));
            memset((*out)->mutable_data(), 0, static_cast<size_t>((*out)->size()));

            auto *out_offsets = (*out)->mutable_data_as<offset_type>();

            int64_t num_child_values = 0;
            int64_t elements_length = 0;
            for (size_t i = 0; i < offset_buffers.size(); ++i) {
                const auto displacement =
                        static_cast<offset_type>(num_child_values - value_ranges[i].offset);
                TURBO_RETURN_NOT_OK(PutListViewOffsets(*in[i], /*sizes=*/sizes + elements_length,
                        /*src=*/*offset_buffers[i], displacement,
                        /*dst=*/out_offsets + elements_length));
                elements_length += offset_buffers[i]->size() / sizeof(offset_type);
                num_child_values += value_ranges[i].length;
                if (num_child_values > std::numeric_limits<offset_type>::max()) {
                    return turbo::invalid_argument_error("offset overflow while concatenating arrays");
                }
            }
                    DKCHECK_EQ(elements_length,
                                     static_cast<int64_t>(out_size_in_bytes / sizeof(offset_type)));

            return turbo::OkStatus();
        }

        template<typename offset_type>
        turbo::Status PutListViewOffsets(const ArrayData &input, offset_type *sizes, BufferSpan src,
                                         offset_type displacement, offset_type *dst) {
            if (src.size() == 0) {
                return turbo::OkStatus();
            }
            const auto &validity_buffer = input.buffers[0];
            if (validity_buffer) {
                // Ensure that it is safe to access all the bits in the validity bitmap of input.
                TURBO_RETURN_NOT_OK(internal::CheckSliceParams(/*size=*/8 * validity_buffer->size(),
                                                                        input.offset, input.length, "buffer"));
            }

            const auto offsets = span_data_as<offset_type>(src);
                    DKCHECK_EQ(static_cast<int64_t>(src.size() / sizeof(offset_type)), input.length);

            auto visit_not_null = [&](int64_t position) {
                if (sizes[position] > 0) {
                    // NOTE: concatenate can be called during IPC reads to append delta
                    // dictionaries. Avoid UB on non-validated input by doing the addition in the
                    // unsigned domain. (the result can later be validated using
                    // Array::validate_full)
                    const auto displaced_offset = SafeSignedAdd(offsets[position], displacement);
                    // displaced_offset>=0 is guaranteed by RangeOfValuesUsed returning the
                    // smallest offset of valid and non-empty list-views.
                            DKCHECK_GE(displaced_offset, 0);
                    dst[position] = displaced_offset;
                } else {
                    // Do nothing to leave the dst[position] as 0.
                }
            };

            const auto *validity = validity_buffer ? validity_buffer->data_as<uint8_t>() : nullptr;
            internal::OptionalBitBlockCounter bit_counter(validity, input.offset, input.length);
            int64_t position = 0;
            while (position < input.length) {
                internal::BitBlockCount block = bit_counter.NextBlock();
                if (block.AllSet()) {
                    for (int64_t i = 0; i < block.length; ++i, ++position) {
                        visit_not_null(position);
                    }
                } else if (block.NoneSet()) {
                    // NOTE: we don't have to do anything for the null entries regarding the
                    // offsets as the buffer is initialized to 0 when it is allocated.

                    // Zero-out the sizes of the null entries to ensure these sizes are not
                    // greater than the new values length of the concatenated array.
                    memset(sizes + position, 0, block.length * sizeof(offset_type));
                    position += block.length;
                } else {
                    for (int64_t i = 0; i < block.length; ++i, ++position) {
                        if (bit_util::get_bit(validity, input.offset + position)) {
                            visit_not_null(position);
                        } else {
                            // Zero-out the size at position.
                            sizes[position] = 0;
                        }
                    }
                }
            }
            return turbo::OkStatus();
        }

        class ConcatenateImpl {
        public:
            ConcatenateImpl(const ArrayDataVector &in, MemoryPool *pool)
                    : in_(in), pool_(pool), out_(std::make_shared<ArrayData>()) {
                out_->type = in_[0]->type;
                for (const auto &in_array: in_) {
                    out_->length = SafeSignedAdd(out_->length, in_array->length);
                    if (out_->null_count == kUnknownNullCount ||
                        in_array->null_count == kUnknownNullCount) {
                        out_->null_count = kUnknownNullCount;
                        continue;
                    }
                    out_->null_count =
                            SafeSignedAdd(out_->null_count.load(), in_array->null_count.load());
                }
                out_->buffers.resize(in_[0]->buffers.size());
                out_->child_data.resize(in_[0]->child_data.size());
                for (auto &data: out_->child_data) {
                    data = std::make_shared<ArrayData>();
                }
            }

            turbo::Status concatenate(std::shared_ptr<ArrayData> *out) &&{
                if (out_->null_count != 0 && internal::may_have_validity_bitmap(out_->type->id())) {
                    TURBO_RETURN_NOT_OK(ConcatenateBitmaps(Bitmaps(0), pool_, &out_->buffers[0]));
                }
                TURBO_RETURN_NOT_OK(visit_type_inline(*out_->type, this));
                *out = std::move(out_);
                return turbo::OkStatus();
            }

            turbo::Status Visit(const NullType &) { return turbo::OkStatus(); }

            turbo::Status Visit(const BooleanType &) {
                return ConcatenateBitmaps(Bitmaps(1), pool_, &out_->buffers[1]);
            }

            turbo::Status Visit(const FixedWidthType &fixed) {
                // Handles numbers, decimal128, decimal256, fixed_size_binary
                TURBO_MOVE_OR_RAISE(auto buffers, Buffers(1, fixed));
                return concatenate_buffers(buffers, pool_).try_value(&out_->buffers[1]);
            }

            turbo::Status Visit(const BinaryType &) {
                std::vector<Range> value_ranges;
                TURBO_MOVE_OR_RAISE(auto index_buffers, Buffers(1, sizeof(int32_t)));
                TURBO_RETURN_NOT_OK(ConcatenateOffsets<int32_t>(index_buffers, pool_, &out_->buffers[1],
                                                                &value_ranges));
                TURBO_MOVE_OR_RAISE(auto value_buffers, Buffers(2, value_ranges));
                return concatenate_buffers(value_buffers, pool_).try_value(&out_->buffers[2]);
            }

            turbo::Status Visit(const LargeBinaryType &) {
                std::vector<Range> value_ranges;
                TURBO_MOVE_OR_RAISE(auto index_buffers, Buffers(1, sizeof(int64_t)));
                TURBO_RETURN_NOT_OK(ConcatenateOffsets<int64_t>(index_buffers, pool_, &out_->buffers[1],
                                                                &value_ranges));
                TURBO_MOVE_OR_RAISE(auto value_buffers, Buffers(2, value_ranges));
                return concatenate_buffers(value_buffers, pool_).try_value(&out_->buffers[2]);
            }

            turbo::Status Visit(const BinaryViewType &type) {
                out_->buffers.resize(2);

                for (const auto &in_data: in_) {
                    for (const auto &buf: turbo::span(in_data->buffers.data(), in_data->buffers.size()).subspan(2)) {
                        out_->buffers.push_back(buf);
                    }
                }

                TURBO_MOVE_OR_RAISE(auto view_buffers, Buffers(1, BinaryViewType::kSize));
                TURBO_MOVE_OR_RAISE(auto view_buffer, concatenate_buffers(view_buffers, pool_));

                auto *views = view_buffer->mutable_data_as<BinaryViewType::c_type>();
                size_t preceding_buffer_count = 0;

                int64_t i = in_[0]->length;
                for (size_t in_index = 1; in_index < in_.size(); ++in_index) {
                    preceding_buffer_count += in_[in_index - 1]->buffers.size() - 2;

                    for (int64_t end_i = i + in_[in_index]->length; i < end_i; ++i) {
                        if (views[i].is_inline()) continue;
                        views[i].ref.buffer_index = SafeSignedAdd(
                                views[i].ref.buffer_index, static_cast<int32_t>(preceding_buffer_count));
                    }
                }

                if (out_->buffers[0] != nullptr) {
                    i = in_[0]->length;
                    VisitNullBitmapInline(
                            out_->buffers[0]->data(), i, out_->length - i, out_->null_count, [&] { ++i; },
                            [&] {
                                views[i++] = {};  // overwrite views under null bits with an empty view
                            });
                }

                out_->buffers[1] = std::move(view_buffer);
                return turbo::OkStatus();
            }

            turbo::Status Visit(const ListType &) {
                std::vector<Range> value_ranges;
                TURBO_MOVE_OR_RAISE(auto index_buffers, Buffers(1, sizeof(int32_t)));
                TURBO_RETURN_NOT_OK(ConcatenateOffsets<int32_t>(index_buffers, pool_, &out_->buffers[1],
                                                                &value_ranges));
                TURBO_MOVE_OR_RAISE(auto child_data, ChildData(0, value_ranges));
                return ConcatenateImpl(child_data, pool_).concatenate(&out_->child_data[0]);
            }

            turbo::Status Visit(const LargeListType &) {
                std::vector<Range> value_ranges;
                TURBO_MOVE_OR_RAISE(auto index_buffers, Buffers(1, sizeof(int64_t)));
                TURBO_RETURN_NOT_OK(ConcatenateOffsets<int64_t>(index_buffers, pool_, &out_->buffers[1],
                                                                &value_ranges));
                TURBO_MOVE_OR_RAISE(auto child_data, ChildData(0, value_ranges));
                return ConcatenateImpl(child_data, pool_).concatenate(&out_->child_data[0]);
            }

            template<typename T>
            enable_if_list_view<T, turbo::Status> Visit(const T &type) {
                using offset_type = typename T::offset_type;
                out_->buffers.resize(3);
                out_->child_data.resize(1);

                // Calculate the ranges of values that each list-view array uses
                std::vector<Range> value_ranges;
                value_ranges.reserve(in_.size());
                for (const auto &input: in_) {
                    ArraySpan input_span(*input);
                    Range range;
                    TURBO_MOVE_OR_RAISE(std::tie(range.offset, range.length),
                                        list_util::internal::RangeOfValuesUsed(input_span));
                    value_ranges.push_back(range);
                }

                // concatenate the values
                TURBO_MOVE_OR_RAISE(ArrayDataVector value_data, ChildData(0, value_ranges));
                TURBO_RETURN_NOT_OK(ConcatenateImpl(value_data, pool_).concatenate(&out_->child_data[0]));
                out_->child_data[0]->type = type.get_value_type();

                // concatenate the sizes first
                TURBO_MOVE_OR_RAISE(auto size_buffers, Buffers(2, sizeof(offset_type)));
                TURBO_RETURN_NOT_OK(concatenate_buffers(size_buffers, pool_).try_value(&out_->buffers[2]));

                // concatenate the offsets
                TURBO_MOVE_OR_RAISE(auto offset_buffers, Buffers(1, sizeof(offset_type)));
                TURBO_RETURN_NOT_OK(ConcatenateListViewOffsets<offset_type>(
                        in_, /*sizes=*/out_->buffers[2]->mutable_data_as<offset_type>(), offset_buffers,
                        value_ranges, pool_, &out_->buffers[1]));

                return turbo::OkStatus();
            }

            turbo::Status Visit(const FixedSizeListType &fixed_size_list) {
                TURBO_MOVE_OR_RAISE(auto child_data, ChildData(0, fixed_size_list.list_size()));
                return ConcatenateImpl(child_data, pool_).concatenate(&out_->child_data[0]);
            }

            turbo::Status Visit(const StructType &s) {
                for (int i = 0; i < s.num_fields(); ++i) {
                    TURBO_MOVE_OR_RAISE(auto child_data, ChildData(i));
                    TURBO_RETURN_NOT_OK(ConcatenateImpl(child_data, pool_).concatenate(&out_->child_data[i]));
                }
                return turbo::OkStatus();
            }

            turbo::Result<BufferVector> UnifyDictionaries(const DictionaryType &d) {
                BufferVector new_index_lookup;
                TURBO_MOVE_OR_RAISE(auto unifier, DictionaryUnifier::create(d.get_value_type()));
                new_index_lookup.resize(in_.size());
                for (size_t i = 0; i < in_.size(); i++) {
                    auto item = in_[i];
                    auto dictionary_array = make_array(item->dictionary);
                    TURBO_RETURN_NOT_OK(unifier->unify(*dictionary_array, &new_index_lookup[i]));
                }
                std::shared_ptr<Array> out_dictionary;
                TURBO_RETURN_NOT_OK(unifier->get_result_with_index_type(d.index_type(), &out_dictionary));
                out_->dictionary = out_dictionary->data();
                return new_index_lookup;
            }

            // transpose and concatenate dictionary indices
            turbo::Result<std::shared_ptr<Buffer>> ConcatenateDictionaryIndices(
                    const DataType &index_type, const BufferVector &index_transpositions) {
                const auto index_width =
                        turbo::checked_cast<const FixedWidthType &>(index_type).bit_width() / 8;
                int64_t out_length = 0;
                for (const auto &data: in_) {
                    out_length += data->length;
                }
                TURBO_MOVE_OR_RAISE(auto out, allocate_buffer(out_length * index_width, pool_));
                uint8_t *out_data = out->mutable_data();
                for (size_t i = 0; i < in_.size(); i++) {
                    const auto &data = in_[i];
                    auto transpose_map =
                            reinterpret_cast<const int32_t *>(index_transpositions[i]->data());
                    const uint8_t *src = data->get_values<uint8_t>(1, 0);
                    if (!data->buffers[0]) {
                        TURBO_RETURN_NOT_OK(internal::TransposeInts(index_type, index_type,
                                /*src=*/data->get_values<uint8_t>(1, 0),
                                /*dest=*/out_data,
                                /*src_offset=*/data->offset,
                                /*dest_offset=*/0, /*length=*/data->length,
                                                                    transpose_map));
                    } else {
                        internal::BitRunReader reader(data->buffers[0]->data(), data->offset,
                                                      data->length);
                        int64_t position = 0;
                        while (true) {
                            internal::BitRun run = reader.NextRun();
                            if (run.length == 0) break;

                            if (run.set) {
                                TURBO_RETURN_NOT_OK(internal::TransposeInts(index_type, index_type, src,
                                        /*dest=*/out_data,
                                        /*src_offset=*/data->offset + position,
                                        /*dest_offset=*/position, run.length,
                                                                            transpose_map));
                            } else {
                                std::fill(out_data + (position * index_width),
                                          out_data + (position + run.length) * index_width, 0x00);
                            }

                            position += run.length;
                        }
                    }
                    out_data += data->length * index_width;
                }
                // R build with openSUSE155 requires an explicit shared_ptr construction
                return std::shared_ptr<Buffer>(std::move(out));
            }

            turbo::Status Visit(const DictionaryType &d) {
                auto fixed = turbo::checked_cast<const FixedWidthType *>(d.index_type().get());

                // Two cases: all the dictionaries are the same, or unification is
                // required
                bool dictionaries_same = true;
                std::shared_ptr<Array> dictionary0 = make_array(in_[0]->dictionary);
                for (size_t i = 1; i < in_.size(); ++i) {
                    if (!make_array(in_[i]->dictionary)->equals(dictionary0)) {
                        dictionaries_same = false;
                        break;
                    }
                }

                TURBO_MOVE_OR_RAISE(auto index_buffers, Buffers(1, *fixed));
                if (dictionaries_same) {
                    out_->dictionary = in_[0]->dictionary;
                    return concatenate_buffers(index_buffers, pool_).try_value(&out_->buffers[1]);
                } else {
                    TURBO_MOVE_OR_RAISE(auto index_lookup, UnifyDictionaries(d));
                    TURBO_MOVE_OR_RAISE(out_->buffers[1],
                                        ConcatenateDictionaryIndices(*fixed, index_lookup));
                    return turbo::OkStatus();
                }
            }

            turbo::Status Visit(const UnionType &u) {
                // This implementation assumes that all input arrays are valid union arrays
                // with same number of variants.

                // concatenate the type buffers.
                TURBO_MOVE_OR_RAISE(auto type_buffers, Buffers(1, sizeof(int8_t)));
                TURBO_RETURN_NOT_OK(concatenate_buffers(type_buffers, pool_).try_value(&out_->buffers[1]));

                // concatenate the child data. For sparse unions the child data is sliced
                // based on the offset and length of the array data. For dense unions the
                // child data is not sliced because this makes constructing the concatenated
                // offsets buffer more simple. We could however choose to modify this and
                // slice the child arrays and reflect this in the concatenated offsets
                // buffer.
                switch (u.mode()) {
                    case UnionMode::SPARSE: {
                        for (int i = 0; i < u.num_fields(); i++) {
                            TURBO_MOVE_OR_RAISE(auto child_data, ChildData(i));
                            TURBO_RETURN_NOT_OK(
                                    ConcatenateImpl(child_data, pool_).concatenate(&out_->child_data[i]));
                        }
                        break;
                    }
                    case UnionMode::DENSE: {
                        for (int i = 0; i < u.num_fields(); i++) {
                            ArrayDataVector child_data(in_.size());
                            for (size_t j = 0; j < in_.size(); j++) {
                                child_data[j] = in_[j]->child_data[i];
                            }
                            TURBO_RETURN_NOT_OK(
                                    ConcatenateImpl(child_data, pool_).concatenate(&out_->child_data[i]));
                        }
                        break;
                    }
                }

                // concatenate offsets buffers for dense union arrays.
                if (u.mode() == UnionMode::DENSE) {
                    // The number of offset values is equal to the number of type_ids in the
                    // concatenated type buffers.
                    TypedBufferBuilder<int32_t> builder;
                    TURBO_RETURN_NOT_OK(builder.Reserve(out_->length));

                    // Initialize a vector for child array lengths. These are updated during
                    // iteration over the input arrays to track the concatenated child array
                    // lengths. These lengths are used as offsets for the concatenated offsets
                    // buffer.
                    std::vector<int32_t> offset_map(u.num_fields());

                    // Iterate over all input arrays.
                    for (size_t i = 0; i < in_.size(); i++) {
                        // Get sliced type ids and offsets.
                        auto type_ids = in_[i]->get_values<int8_t>(1);
                        auto offset_values = in_[i]->get_values<int32_t>(2);

                        // Iterate over all elements in the type buffer and append the updated
                        // offset to the concatenated offsets buffer.
                        for (auto j = 0; j < in_[i]->length; j++) {
                            int32_t offset;
                            if (internal::AddWithOverflow(offset_map[u.child_ids()[type_ids[j]]],
                                                          offset_values[j], &offset)) {
                                return turbo::invalid_argument_error("Offset value overflow when concatenating arrays");
                            }
                            TURBO_RETURN_NOT_OK(builder.append(offset));
                        }

                        // Increment the offsets in the offset map for the next iteration.
                        for (int j = 0; j < u.num_fields(); j++) {
                            int64_t length;
                            if (internal::AddWithOverflow(static_cast<int64_t>(offset_map[j]),
                                                          in_[i]->child_data[j]->length, &length)) {
                                return turbo::invalid_argument_error("Offset value overflow when concatenating arrays");
                            }
                            // Make sure we can safely downcast to int32_t.
                            if (length > std::numeric_limits<int32_t>::max()) {
                                return turbo::invalid_argument_error("Length overflow when concatenating arrays");
                            }
                            offset_map[j] = static_cast<int32_t>(length);
                        }
                    }

                    TURBO_MOVE_OR_RAISE(out_->buffers[2], builder.finish());
                }

                return turbo::OkStatus();
            }

            turbo::Status Visit(const RunEndEncodedType &type) {
                int64_t physical_length = 0;
                for (const auto &input: in_) {
                    if (internal::AddWithOverflow(physical_length,
                                                  ree_util::FindPhysicalLength(ArraySpan(*input)),
                                                  &physical_length)) {
                        return turbo::invalid_argument_error("Length overflow when concatenating arrays");
                    }
                }
                TURBO_MOVE_OR_RAISE(auto builder, MakeBuilder(in_[0]->type, pool_));
                TURBO_RETURN_NOT_OK(turbo::checked_cast<RunEndEncodedBuilder &>(*builder).ReservePhysical(
                        physical_length));
                for (const auto &input: in_) {
                    TURBO_RETURN_NOT_OK(builder->append_array_slice(ArraySpan(*input), 0, input->length));
                }
                TURBO_MOVE_OR_RAISE(std::shared_ptr<Array> out_array, builder->finish());
                out_ = out_array->data();
                return turbo::OkStatus();
            }

            turbo::Status Visit(const ExtensionType &e) {
                ArrayDataVector storage_data(in_.size());
                for (size_t i = 0; i < in_.size(); ++i) {
                    storage_data[i] = in_[i]->copy();
                    storage_data[i]->type = e.storage_type();
                }
                std::shared_ptr<ArrayData> out_storage;
                TURBO_RETURN_NOT_OK(ConcatenateImpl(storage_data, pool_).concatenate(&out_storage));
                out_storage->type = in_[0]->type;
                out_ = std::move(out_storage);
                return turbo::OkStatus();
            }

        private:
            // NOTE: concatenate() can be called during IPC reads to append delta dictionaries
            // on non-validated input.  Therefore, the input-checking SliceBufferSafe and
            // ArrayData::slice_safe are used below.

            // Gather the index-th buffer of each input into a vector.
            // Bytes are sliced with that input's offset and length.
            // Note that BufferVector will not contain the buffer of in_[i] if it's
            // nullptr.
            turbo::Result<BufferVector> Buffers(size_t index) {
                BufferVector buffers;
                buffers.reserve(in_.size());
                for (const auto &array_data: in_) {
                    const auto &buffer = array_data->buffers[index];
                    if (buffer != nullptr) {
                        TURBO_MOVE_OR_RAISE(
                                auto sliced_buffer,
                                SliceBufferSafe(buffer, array_data->offset, array_data->length));
                        buffers.push_back(std::move(sliced_buffer));
                    }
                }
                return buffers;
            }

            // Gather the index-th buffer of each input into a vector.
            // Bytes are sliced with the explicitly passed ranges.
            // Note that BufferVector will not contain the buffer of in_[i] if it's
            // nullptr.
            turbo::Result<BufferVector> Buffers(size_t index, const std::vector<Range> &ranges) {
                        DKCHECK_EQ(in_.size(), ranges.size());
                BufferVector buffers;
                buffers.reserve(in_.size());
                for (size_t i = 0; i < in_.size(); ++i) {
                    const auto &buffer = in_[i]->buffers[index];
                    if (buffer != nullptr) {
                        TURBO_MOVE_OR_RAISE(
                                auto sliced_buffer,
                                SliceBufferSafe(buffer, ranges[i].offset, ranges[i].length));
                        buffers.push_back(std::move(sliced_buffer));
                    } else {
                                DKCHECK_EQ(ranges[i].length, 0);
                    }
                }
                return buffers;
            }

            // Gather the index-th buffer of each input into a vector.
            // Buffers are assumed to contain elements of the given byte_width,
            // those elements are sliced with that input's offset and length.
            // Note that BufferVector will not contain the buffer of in_[i] if it's
            // nullptr.
            turbo::Result<BufferVector> Buffers(size_t index, int byte_width) {
                BufferVector buffers;
                buffers.reserve(in_.size());
                for (const auto &array_data: in_) {
                    const auto &buffer = array_data->buffers[index];
                    if (buffer != nullptr) {
                        TURBO_MOVE_OR_RAISE(auto sliced_buffer,
                                            SliceBufferSafe(buffer, array_data->offset * byte_width,
                                                            array_data->length * byte_width));
                        buffers.push_back(std::move(sliced_buffer));
                    }
                }
                return buffers;
            }

            // Gather the index-th buffer of each input into a vector.
            // Buffers are assumed to contain elements of fixed.bit_width(),
            // those elements are sliced with that input's offset and length.
            // Note that BufferVector will not contain the buffer of in_[i] if it's
            // nullptr.
            turbo::Result<BufferVector> Buffers(size_t index, const FixedWidthType &fixed) {
                        DKCHECK_EQ(fixed.bit_width() % 8, 0);
                return Buffers(index, fixed.bit_width() / 8);
            }

            // Gather the index-th buffer of each input as a Bitmap
            // into a vector of Bitmaps.
            std::vector<Bitmap> Bitmaps(size_t index) {
                std::vector<Bitmap> bitmaps(in_.size());
                for (size_t i = 0; i < in_.size(); ++i) {
                    Range range(in_[i]->offset, in_[i]->length);
                    bitmaps[i] = Bitmap(in_[i]->buffers[index], range);
                }
                return bitmaps;
            }

            // Gather the index-th child_data of each input into a vector.
            // Elements are sliced with that input's offset and length.
            turbo::Result<ArrayDataVector> ChildData(size_t index) {
                ArrayDataVector child_data(in_.size());
                for (size_t i = 0; i < in_.size(); ++i) {
                    TURBO_MOVE_OR_RAISE(child_data[i], in_[i]->child_data[index]->slice_safe(
                            in_[i]->offset, in_[i]->length));
                }
                return child_data;
            }

            // Gather the index-th child_data of each input into a vector.
            // Elements are sliced with that input's offset and length multiplied by multiplier.
            turbo::Result<ArrayDataVector> ChildData(size_t index, size_t multiplier) {
                ArrayDataVector child_data(in_.size());
                for (size_t i = 0; i < in_.size(); ++i) {
                    TURBO_MOVE_OR_RAISE(
                            child_data[i], in_[i]->child_data[index]->slice_safe(
                            in_[i]->offset * multiplier, in_[i]->length * multiplier));
                }
                return child_data;
            }

            // Gather the index-th child_data of each input into a vector.
            // Elements are sliced with the explicitly passed ranges.
            turbo::Result<ArrayDataVector> ChildData(size_t index, const std::vector<Range> &ranges) {
                        DKCHECK_EQ(in_.size(), ranges.size());
                ArrayDataVector child_data(in_.size());
                for (size_t i = 0; i < in_.size(); ++i) {
                    TURBO_MOVE_OR_RAISE(child_data[i], in_[i]->child_data[index]->slice_safe(
                            ranges[i].offset, ranges[i].length));
                }
                return child_data;
            }

            const ArrayDataVector &in_;
            MemoryPool *pool_;
            std::shared_ptr<ArrayData> out_;
        };

    }  // namespace

    turbo::Result<std::shared_ptr<Array>> concatenate(const ArrayVector &arrays, MemoryPool *pool) {
        if (arrays.size() == 0) {
            return turbo::invalid_argument_error("Must pass at least one array");
        }

        // gather ArrayData of input arrays
        ArrayDataVector data(arrays.size());
        for (size_t i = 0; i < arrays.size(); ++i) {
            if (!arrays[i]->type()->equals(*arrays[0]->type())) {
                return turbo::invalid_argument_error("arrays to be concatenated must be identically typed, but ",
                                                     *arrays[0]->type(), " and ", *arrays[i]->type(),
                                                     " were encountered.");
            }
            data[i] = arrays[i]->data();
        }

        std::shared_ptr<ArrayData> out_data;
        TURBO_RETURN_NOT_OK(ConcatenateImpl(data, pool).concatenate(&out_data));
        return make_array(std::move(out_data));
    }

}  // namespace nebula
