// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <nebula/ipc/reader.h>

#include <algorithm>
#include <cstdint>
#include <cstring>
#include <memory>
#include <numeric>
#include <string>
#include <type_traits>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>

#include <flatbuffers/flatbuffers.h>  // IWYU pragma: export

#include <nebula/core/array.h>
#include <nebula/core/buffer.h>
#include <nebula/core/extension_type.h>
#include <nebula/io/caching.h>
#include <nebula/io/interfaces.h>
#include <nebula/io/memory.h>
#include <nebula/ipc/message.h>
#include <nebula/ipc/metadata_internal.h>
#include <nebula/ipc/reader_internal.h>
#include <nebula/ipc/writer.h>
#include <nebula/core/record_batch.h>
#include <nebula/core/sparse_tensor.h>
#include <turbo/utility/status.h>
#include <nebula/core/table.h>
#include <nebula/types/type.h>
#include <nebula/types/type_traits.h>
#include <nebula/bits/bit_util.h>
#include <nebula/bits/bitmap_ops.h>
#include <turbo/base/checked_cast.h>
#include <nebula/compression/compression.h>
#include <nebula/util/endian.h>
#include <nebula/util/key_value_metadata.h>
#include <turbo/log/logging.h>
#include <nebula/future/parallel.h>
#include <nebula/util/string.h>
#include <nebula/future/thread_pool.h>
#include <turbo/base/ubsan.h>
#include <nebula/util/vector.h>
#include <nebula/core/visit_type_inline.h>

#include <nebula/fbs/File_generated.h>  // IWYU pragma: export
#include <nebula/fbs/Message_generated.h>
#include <nebula/fbs/Schema_generated.h>
#include <nebula/fbs/SparseTensor_generated.h>

namespace nebula {

    namespace flatbuf = tech::kumo::nebula::flatbuf;

    namespace ipc {

        using internal::FileBlock;
        using internal::kArrowMagicBytes;

        namespace {

            enum class DictionaryKind {
                New, Delta, Replacement
            };

            turbo::Status InvalidMessageType(MessageType expected, MessageType actual) {
                return turbo::io_error("Expected IPC message of type ", FormatMessageType(expected),
                                       " but got ", FormatMessageType(actual));
            }

#define CHECK_MESSAGE_TYPE(expected, actual)           \
      do {                                                 \
        if ((actual) != (expected)) {                      \
          return InvalidMessageType((expected), (actual)); \
        }                                                  \
      } while (0)

#define CHECK_HAS_BODY(message)                                       \
      do {                                                                \
        if ((message).body() == nullptr) {                                \
          return turbo::io_error("Expected body in IPC message of type ", \
                                 FormatMessageType((message).type()));    \
        }                                                                 \
      } while (0)

#define CHECK_HAS_NO_BODY(message)                                      \
      do {                                                                  \
        if ((message).body_length() != 0) {                                 \
          return turbo::io_error("Unexpected body in IPC message of type ", \
                                 FormatMessageType((message).type()));      \
        }                                                                   \
      } while (0)

            // ----------------------------------------------------------------------
            // Record batch read path

            /// \brief Structure to keep common arguments to be passed
            struct IpcReadContext {
                IpcReadContext(DictionaryMemo *memo, const IpcReadOptions &option, bool swap,
                               MetadataVersion version = MetadataVersion::V5,
                               CompressionType kind = CompressionType::UNCOMPRESSED)
                        : dictionary_memo(memo),
                          options(option),
                          metadata_version(version),
                          compression(kind),
                          swap_endian(swap) {}

                DictionaryMemo *dictionary_memo;

                const IpcReadOptions &options;

                MetadataVersion metadata_version;

                CompressionType compression;

                /// \brief load_record_batch() or load_record_batch_subset() swaps endianness of elements
                /// if this flag is true
                const bool swap_endian;
            };

            /// A collection of ranges to read and pointers to set to those ranges when they are
            /// available.  This allows the ArrayLoader to utilize a two pass cache-then-read
            /// strategy with a ReadRangeCache
            class BatchDataReadRequest {
            public:
                const std::vector<io::ReadRange> &ranges_to_read() const { return ranges_to_read_; }

                void RequestRange(int64_t offset, int64_t length, std::shared_ptr<Buffer> *out) {
                    ranges_to_read_.push_back({offset, length});
                    destinations_.push_back(out);
                }

                void FulfillRequest(const std::vector<std::shared_ptr<Buffer>> &buffers) {
                    for (std::size_t i = 0; i < buffers.size(); i++) {
                        *destinations_[i] = buffers[i];
                    }
                }

            private:
                std::vector<io::ReadRange> ranges_to_read_;
                std::vector<std::shared_ptr<Buffer> *> destinations_;
            };

            /// The field_index and buffer_index are incremented based on how much of the
            /// batch is "consumed" (through nested data reconstruction, for example)
            class ArrayLoader {
            public:
                explicit ArrayLoader(const flatbuf::RecordBatch *metadata,
                                     MetadataVersion metadata_version, const IpcReadOptions &options,
                                     io::RandomAccessFile *file)
                        : metadata_(metadata),
                          metadata_version_(metadata_version),
                          file_(file),
                          file_offset_(0),
                          max_recursion_depth_(options.max_recursion_depth) {}

                explicit ArrayLoader(const flatbuf::RecordBatch *metadata,
                                     MetadataVersion metadata_version, const IpcReadOptions &options,
                                     int64_t file_offset)
                        : metadata_(metadata),
                          metadata_version_(metadata_version),
                          file_(nullptr),
                          file_offset_(file_offset),
                          max_recursion_depth_(options.max_recursion_depth) {}

                turbo::Status read_buffer(int64_t offset, int64_t length, std::shared_ptr<Buffer> *out) {
                    if (skip_io_) {
                        return turbo::OkStatus();
                    }
                    if (offset < 0) {
                        return turbo::invalid_argument_error("Negative offset for reading buffer ", buffer_index_);
                    }
                    if (length < 0) {
                        return turbo::invalid_argument_error("Negative length for reading buffer ", buffer_index_);
                    }
                    // This construct permits overriding get_buffer at compile time
                    if (!bit_util::IsMultipleOf8(offset)) {
                        return turbo::invalid_argument_error("Buffer ", buffer_index_,
                                                             " did not start on 8-byte aligned offset: ", offset);
                    }
                    if (file_) {
                        return file_->read_at(offset, length).try_value(out);
                    } else {
                        read_request_.RequestRange(offset + file_offset_, length, out);
                        return turbo::OkStatus();
                    }
                }

                turbo::Status load_type(const DataType &type) {
                            DKCHECK_NE(out_, nullptr);
                    return visit_type_inline(type, this);
                }

                turbo::Status Load(const Field *field, ArrayData *out) {
                    if (max_recursion_depth_ <= 0) {
                        return turbo::invalid_argument_error("Max recursion depth reached");
                    }

                    field_ = field;
                    out_ = out;
                    out_->type = field_->type();
                    return load_type(*field_->type());
                }

                turbo::Status skip_field(const Field *field) {
                    ArrayData dummy;
                    skip_io_ = true;
                    turbo::Status status = Load(field, &dummy);
                    skip_io_ = false;
                    // GH-37851: reset state. Load will set `out_` to `&dummy`, which would
                    // be a dangling pointer.
                    out_ = nullptr;
                    return status;
                }

                turbo::Status get_buffer(int buffer_index, std::shared_ptr<Buffer> *out) {
                    auto buffers = metadata_->buffers();
                    CHECK_FLATBUFFERS_NOT_NULL(buffers, "RecordBatch.buffers");
                    if (buffer_index >= static_cast<int>(buffers->size())) {
                        return turbo::io_error("buffer_index out of range.");
                    }
                    const flatbuf::Buffer *buffer = buffers->Get(buffer_index);
                    if (buffer->length() == 0) {
                        // Should never return a null buffer here.
                        // (zero-sized buffer allocations are cheap)
                        return allocate_buffer(0).try_value(out);
                    } else {
                        return read_buffer(buffer->offset(), buffer->length(), out);
                    }
                }

                turbo::Result<size_t> get_variadic_count(int i) {
                    auto *variadic_counts = metadata_->variadic_buffer_counts();
                    CHECK_FLATBUFFERS_NOT_NULL(variadic_counts, "RecordBatch.variadic_buffer_counts");
                    if (i >= static_cast<int>(variadic_counts->size())) {
                        return turbo::io_error("variadic_count_index out of range.");
                    }
                    int64_t count = variadic_counts->Get(i);
                    if (count < 0 || count > std::numeric_limits<int32_t>::max()) {
                        return turbo::io_error(
                                "variadic_count must be representable as a positive int32_t, got ", count, ".");
                    }
                    return static_cast<size_t>(count);
                }

                turbo::Status get_field_metadata(int field_index, ArrayData *out) {
                    auto nodes = metadata_->nodes();
                    CHECK_FLATBUFFERS_NOT_NULL(nodes, "Table.nodes");
                    // pop off a field
                    if (field_index >= static_cast<int>(nodes->size())) {
                        return turbo::invalid_argument_error("Ran out of field metadata, likely malformed");
                    }
                    const flatbuf::FieldNode *node = nodes->Get(field_index);

                    out->length = node->length();
                    out->null_count = node->null_count();
                    out->offset = 0;
                    return turbo::OkStatus();
                }

                turbo::Status load_common(Type::type type_id) {
                            DKCHECK_NE(out_, nullptr);
                    // This only contains the length and null count, which we need to figure
                    // out what to do with the buffers. For example, if null_count == 0, then
                    // we can skip that buffer without reading from shared memory
                    TURBO_RETURN_NOT_OK(get_field_metadata(field_index_++, out_));

                    if (internal::has_validity_bitmap(type_id, metadata_version_)) {
                        // Extract null_bitmap which is common to all arrays except for unions
                        // and nulls.
                        if (out_->null_count != 0) {
                            TURBO_RETURN_NOT_OK(get_buffer(buffer_index_, &out_->buffers[0]));
                        }
                        buffer_index_++;
                    }
                    return turbo::OkStatus();
                }

                template<typename TYPE>
                turbo::Status load_primitive(Type::type type_id) {
                            DKCHECK_NE(out_, nullptr);
                    out_->buffers.resize(2);

                    TURBO_RETURN_NOT_OK(load_common(type_id));
                    if (out_->length > 0) {
                        TURBO_RETURN_NOT_OK(get_buffer(buffer_index_++, &out_->buffers[1]));
                    } else {
                        buffer_index_++;
                        out_->buffers[1].reset(new Buffer(nullptr, 0));
                    }
                    return turbo::OkStatus();
                }

                turbo::Status load_binary(Type::type type_id) {
                            DKCHECK_NE(out_, nullptr);
                    out_->buffers.resize(3);

                    TURBO_RETURN_NOT_OK(load_common(type_id));
                    TURBO_RETURN_NOT_OK(get_buffer(buffer_index_++, &out_->buffers[1]));
                    return get_buffer(buffer_index_++, &out_->buffers[2]);
                }

                template<typename TYPE>
                turbo::Status load_list(const TYPE &type) {
                            DKCHECK_NE(out_, nullptr);
                    out_->buffers.resize(2);

                    TURBO_RETURN_NOT_OK(load_common(type.id()));
                    TURBO_RETURN_NOT_OK(get_buffer(buffer_index_++, &out_->buffers[1]));

                    const int num_children = type.num_fields();
                    if (num_children != 1) {
                        return turbo::invalid_argument_error("Wrong number of children: ", num_children);
                    }

                    return load_children(type.fields());
                }

                template<typename TYPE>
                turbo::Status load_list_view(const TYPE &type) {
                    out_->buffers.resize(3);

                    TURBO_RETURN_NOT_OK(load_common(type.id()));
                    TURBO_RETURN_NOT_OK(get_buffer(buffer_index_++, &out_->buffers[1]));
                    TURBO_RETURN_NOT_OK(get_buffer(buffer_index_++, &out_->buffers[2]));

                    const int num_children = type.num_fields();
                    if (num_children != 1) {
                        return turbo::invalid_argument_error("Wrong number of children: ", num_children);
                    }

                    return load_children(type.fields());
                }

                turbo::Status load_children(const std::vector<std::shared_ptr<Field>> &child_fields) {
                            DKCHECK_NE(out_, nullptr);
                    ArrayData *parent = out_;

                    parent->child_data.resize(child_fields.size());
                    for (int i = 0; i < static_cast<int>(child_fields.size()); ++i) {
                        parent->child_data[i] = std::make_shared<ArrayData>();
                        --max_recursion_depth_;
                        TURBO_RETURN_NOT_OK(Load(child_fields[i].get(), parent->child_data[i].get()));
                        ++max_recursion_depth_;
                    }
                    out_ = parent;
                    return turbo::OkStatus();
                }

                turbo::Status Visit(const NullType &type) {
                    out_->buffers.resize(1);

                    // ARROW-6379: NullType has no buffers in the IPC payload
                    return get_field_metadata(field_index_++, out_);
                }

                template<typename T>
                enable_if_t<std::is_base_of<FixedWidthType, T>::value &&
                            !std::is_base_of<FixedSizeBinaryType, T>::value &&
                            !std::is_base_of<DictionaryType, T>::value,
                        turbo::Status>
                Visit(const T &type) {
                    return load_primitive<T>(type.id());
                }

                template<typename T>
                enable_if_base_binary<T, turbo::Status> Visit(const T &type) {
                    return load_binary(type.id());
                }

                turbo::Status Visit(const BinaryViewType &type) {
                    out_->buffers.resize(2);

                    TURBO_RETURN_NOT_OK(load_common(type.id()));
                    TURBO_RETURN_NOT_OK(get_buffer(buffer_index_++, &out_->buffers[1]));

                    TURBO_MOVE_OR_RAISE(auto data_buffer_count,
                                        get_variadic_count(variadic_count_index_++));
                    out_->buffers.resize(data_buffer_count + 2);
                    for (size_t i = 0; i < data_buffer_count; ++i) {
                        TURBO_RETURN_NOT_OK(get_buffer(buffer_index_++, &out_->buffers[i + 2]));
                    }
                    return turbo::OkStatus();
                }

                turbo::Status Visit(const FixedSizeBinaryType &type) {
                    out_->buffers.resize(2);
                    TURBO_RETURN_NOT_OK(load_common(type.id()));
                    return get_buffer(buffer_index_++, &out_->buffers[1]);
                }

                template<typename T>
                enable_if_var_size_list<T, turbo::Status> Visit(const T &type) {
                    return load_list(type);
                }

                template<typename T>
                enable_if_list_view<T, turbo::Status> Visit(const T &type) {
                    return load_list_view(type);
                }

                turbo::Status Visit(const MapType &type) {
                    TURBO_RETURN_NOT_OK(load_list(type));
                    return MapArray::ValidateChildData(out_->child_data);
                }

                turbo::Status Visit(const FixedSizeListType &type) {
                    out_->buffers.resize(1);

                    TURBO_RETURN_NOT_OK(load_common(type.id()));

                    const int num_children = type.num_fields();
                    if (num_children != 1) {
                        return turbo::invalid_argument_error("Wrong number of children: ", num_children);
                    }

                    return load_children(type.fields());
                }

                turbo::Status Visit(const StructType &type) {
                    out_->buffers.resize(1);
                    TURBO_RETURN_NOT_OK(load_common(type.id()));
                    return load_children(type.fields());
                }

                turbo::Status Visit(const UnionType &type) {
                    int n_buffers = type.mode() == UnionMode::SPARSE ? 2 : 3;
                    out_->buffers.resize(n_buffers);

                    TURBO_RETURN_NOT_OK(load_common(type.id()));

                    // With metadata V4, we can get a validity bitmap.
                    // Trying to fix up union data to do without the top-level validity bitmap
                    // is hairy:
                    // - type ids must be rewritten to all have valid values (even for former
                    //   null slots)
                    // - sparse union children must have their validity bitmaps rewritten
                    //   by ANDing the top-level validity bitmap
                    // - dense union children must be rewritten (at least one of them)
                    //   to insert the required null slots that were formerly omitted
                    // So instead we bail out.
                    if (out_->null_count != 0 && out_->buffers[0] != nullptr) {
                        return turbo::invalid_argument_error(
                                "Cannot read pre-1.0.0 Union array with top-level validity bitmap");
                    }
                    out_->buffers[0] = nullptr;
                    out_->null_count = 0;

                    if (out_->length > 0) {
                        TURBO_RETURN_NOT_OK(get_buffer(buffer_index_, &out_->buffers[1]));
                        if (type.mode() == UnionMode::DENSE) {
                            TURBO_RETURN_NOT_OK(get_buffer(buffer_index_ + 1, &out_->buffers[2]));
                        }
                    }
                    buffer_index_ += n_buffers - 1;
                    return load_children(type.fields());
                }

                turbo::Status Visit(const DictionaryType &type) {
                    // out_->dictionary will be filled later in ResolveDictionaries()
                    return load_type(*type.index_type());
                }

                turbo::Status Visit(const RunEndEncodedType &type) {
                    out_->buffers.resize(1);
                    TURBO_RETURN_NOT_OK(load_common(type.id()));
                    return load_children(type.fields());
                }

                turbo::Status Visit(const ExtensionType &type) { return load_type(*type.storage_type()); }

                BatchDataReadRequest &read_request() { return read_request_; }

            private:
                const flatbuf::RecordBatch *metadata_;
                const MetadataVersion metadata_version_;
                io::RandomAccessFile *file_;
                int64_t file_offset_;
                int max_recursion_depth_;
                int buffer_index_ = 0;
                int field_index_ = 0;
                bool skip_io_ = false;
                int variadic_count_index_ = 0;

                BatchDataReadRequest read_request_;
                const Field *field_ = nullptr;
                ArrayData *out_ = nullptr;
            };

            turbo::Result<std::shared_ptr<Buffer>> decompress_buffer(const std::shared_ptr<Buffer> &buf,
                                                                    const IpcReadOptions &options,
                                                                    Codec *codec) {
                if (buf == nullptr || buf->size() == 0) {
                    return buf;
                }

                if (buf->size() < 8) {
                    return turbo::invalid_argument_error(
                            "Likely corrupted message, compressed buffers "
                            "are larger than 8 bytes by construction");
                }

                const uint8_t *data = buf->data();
                int64_t compressed_size = buf->size() - sizeof(int64_t);
                int64_t uncompressed_size = bit_util::FromLittleEndian(turbo::safe_load_as<int64_t>(data));

                if (uncompressed_size == -1) {
                    return SliceBuffer(buf, sizeof(int64_t), compressed_size);
                }

                TURBO_MOVE_OR_RAISE(auto uncompressed,
                                    allocate_buffer(uncompressed_size, options.memory_pool));

                TURBO_MOVE_OR_RAISE(
                        int64_t actual_decompressed,
                        codec->Decompress(compressed_size, data + sizeof(int64_t), uncompressed_size,
                                          uncompressed->mutable_data()));
                if (actual_decompressed != uncompressed_size) {
                    return turbo::invalid_argument_error("Failed to fully decompress buffer, expected ",
                                                         uncompressed_size, " bytes but decompressed ",
                                                         actual_decompressed);
                }

                // R build with openSUSE155 requires an explicit shared_ptr construction
                return std::shared_ptr<Buffer>(std::move(uncompressed));
            }

            turbo::Status decompress_buffers(CompressionType compression, const IpcReadOptions &options,
                                            ArrayDataVector *fields) {
                struct BufferAccumulator {
                    using BufferPtrVector = std::vector<std::shared_ptr<Buffer> *>;

                    void AppendFrom(const ArrayDataVector &fields) {
                        for (const auto &field: fields) {
                            for (auto &buffer: field->buffers) {
                                buffers_.push_back(&buffer);
                            }
                            AppendFrom(field->child_data);
                        }
                    }

                    BufferPtrVector Get(const ArrayDataVector &fields) &&{
                        AppendFrom(fields);
                        return std::move(buffers_);
                    }

                    BufferPtrVector buffers_;
                };

                // Flatten all buffers
                auto buffers = BufferAccumulator{}.Get(*fields);

                std::unique_ptr<Codec> codec;
                TURBO_MOVE_OR_RAISE(codec, Codec::create(compression));

                return ::nebula::internal::OptionalParallelFor(
                        options.use_threads, static_cast<int>(buffers.size()), [&](int i) {
                            TURBO_MOVE_OR_RAISE(*buffers[i],
                                                decompress_buffer(*buffers[i], options, codec.get()));
                            return turbo::OkStatus();
                        });
            }

            turbo::Result<std::shared_ptr<RecordBatch>> load_record_batch_subset(
                    const flatbuf::RecordBatch *metadata, const std::shared_ptr<Schema> &schema,
                    const std::vector<bool> *inclusion_mask, const IpcReadContext &context,
                    io::RandomAccessFile *file) {
                ArrayLoader loader(metadata, context.metadata_version, context.options, file);

                ArrayDataVector columns(schema->num_fields());
                ArrayDataVector filtered_columns;
                FieldVector filtered_fields;
                std::shared_ptr<Schema> filtered_schema;

                for (int i = 0; i < schema->num_fields(); ++i) {
                    const Field &field = *schema->field(i);
                    if (!inclusion_mask || (*inclusion_mask)[i]) {
                        // read field
                        auto column = std::make_shared<ArrayData>();
                        TURBO_RETURN_NOT_OK(loader.Load(&field, column.get()));
                        if (metadata->length() != column->length) {
                            return turbo::io_error("Array length did not match record batch length");
                        }
                        columns[i] = std::move(column);
                        if (inclusion_mask) {
                            filtered_columns.push_back(columns[i]);
                            filtered_fields.push_back(schema->field(i));
                        }
                    } else {
                        // Skip field. This logic must be executed to advance the state of the
                        // loader to the next field
                        TURBO_RETURN_NOT_OK(loader.skip_field(&field));
                    }
                }

                // Dictionary resolution needs to happen on the unfiltered columns,
                // because fields are mapped structurally (by path in the original schema).
                TURBO_RETURN_NOT_OK(ResolveDictionaries(columns, *context.dictionary_memo,
                                                        context.options.memory_pool));

                if (inclusion_mask) {
                    filtered_schema = ::nebula::schema(std::move(filtered_fields), schema->metadata());
                    columns.clear();
                } else {
                    filtered_schema = schema;
                    filtered_columns = std::move(columns);
                }
                if (context.compression != CompressionType::UNCOMPRESSED) {
                    TURBO_RETURN_NOT_OK(
                            decompress_buffers(context.compression, context.options, &filtered_columns));
                }

                // swap endian in a set of ArrayData if necessary (swap_endian == true)
                if (context.swap_endian) {
                    for (auto &filtered_column: filtered_columns) {
                        TURBO_MOVE_OR_RAISE(filtered_column,
                                            nebula::internal::SwapEndianArrayData(filtered_column));
                    }
                }
                return RecordBatch::create(std::move(filtered_schema), metadata->length(),
                                           std::move(filtered_columns));
            }

            turbo::Result<std::shared_ptr<RecordBatch>> load_record_batch(
                    const flatbuf::RecordBatch *metadata, const std::shared_ptr<Schema> &schema,
                    const std::vector<bool> &inclusion_mask, const IpcReadContext &context,
                    io::RandomAccessFile *file) {
                if (inclusion_mask.size() > 0) {
                    return load_record_batch_subset(metadata, schema, &inclusion_mask, context, file);
                } else {
                    return load_record_batch_subset(metadata, schema, /*inclusion_mask=*/nullptr, context,
                                                 file);
                }
            }

            // ----------------------------------------------------------------------
            // Array loading

            turbo::Status get_compression(const flatbuf::RecordBatch *batch, CompressionType *out) {
                *out = CompressionType::UNCOMPRESSED;
                const flatbuf::BodyCompression *compression = batch->compression();
                if (compression != nullptr) {
                    if (compression->method() != flatbuf::BodyCompressionMethod::BUFFER) {
                        // Forward compatibility
                        return turbo::invalid_argument_error("This library only supports BUFFER compression method");
                    }

                    if (compression->codec() == flatbuf::CompressionType::LZ4_FRAME) {
                        *out = CompressionType::LZ4_FRAME;
                    } else if (compression->codec() == flatbuf::CompressionType::ZSTD) {
                        *out = CompressionType::ZSTD;
                    } else {
                        return turbo::invalid_argument_error("Unsupported codec in RecordBatch::compression metadata");
                    }
                    return turbo::OkStatus();
                }
                return turbo::OkStatus();
            }

            turbo::Status get_compression_experimental(const flatbuf::Message *message,
                                                     CompressionType *out) {
                *out = CompressionType::UNCOMPRESSED;
                if (message->custom_metadata() != nullptr) {
                    // TODO: Ensure this deserialization only ever happens once
                    std::shared_ptr<KeyValueMetadata> metadata;
                    TURBO_RETURN_NOT_OK(internal::GetKeyValueMetadata(message->custom_metadata(), &metadata));
                    int index = metadata->FindKey("ARROW:experimental_compression");
                    if (index != -1) {
                        // Nebula 0.17 stored string in upper case, internal utils now require lower case
                        auto name = nebula::internal::AsciiToLower(metadata->value(index));
                        TURBO_MOVE_OR_RAISE(*out, Codec::GetCompressionType(name));
                    }
                    return internal::CheckCompressionSupported(*out);
                }
                return turbo::OkStatus();
            }

            turbo::Status read_contiguous_payload(io::InputStream *file, std::unique_ptr<Message> *message) {
                TURBO_MOVE_OR_RAISE(*message, ReadMessage(file));
                if (*message == nullptr) {
                    return turbo::invalid_argument_error("Unable to read metadata at offset");
                }
                return turbo::OkStatus();
            }

            turbo::Result<RecordBatchWithMetadata> read_record_batch_internal(BufferSpan metadata, const std::shared_ptr<Schema> &schema,
                    const std::vector<bool> &inclusion_mask, IpcReadContext &context,
                    io::RandomAccessFile *file) {
                const flatbuf::Message *message = nullptr;
                TURBO_RETURN_NOT_OK(internal::VerifyMessage(metadata.data(), metadata.size(), &message));
                auto batch = message->header_as_RecordBatch();
                if (batch == nullptr) {
                    return turbo::io_error(
                            "Header-type of flatbuffer-encoded Message is not RecordBatch.");
                }

                CompressionType compression;
                TURBO_RETURN_NOT_OK(get_compression(batch, &compression));
                if (context.compression == CompressionType::UNCOMPRESSED &&
                    message->version() == flatbuf::MetadataVersion::V4) {
                    // Possibly obtain codec information from experimental serialization format
                    // in 0.17.x
                    TURBO_RETURN_NOT_OK(get_compression_experimental(message, &compression));
                }
                context.compression = compression;
                context.metadata_version = internal::GetMetadataVersion(message->version());

                std::shared_ptr<KeyValueMetadata> custom_metadata;
                if (message->custom_metadata() != nullptr) {
                    TURBO_RETURN_NOT_OK(
                            internal::GetKeyValueMetadata(message->custom_metadata(), &custom_metadata));
                }
                TURBO_MOVE_OR_RAISE(auto record_batch,
                                    load_record_batch(batch, schema, inclusion_mask, context, file));
                return RecordBatchWithMetadata{record_batch, custom_metadata};
            }

            // If we are selecting only certain fields, populate an inclusion mask for fast lookups.
            // Additionally, drop deselected fields from the reader's schema.
            turbo::Status GetInclusionMaskAndOutSchema(const std::shared_ptr<Schema> &full_schema,
                                                       const std::vector<int> &included_indices,
                                                       std::vector<bool> *inclusion_mask,
                                                       std::shared_ptr<Schema> *out_schema) {
                inclusion_mask->clear();
                if (included_indices.empty()) {
                    *out_schema = full_schema;
                    return turbo::OkStatus();
                }

                inclusion_mask->resize(full_schema->num_fields(), false);

                auto included_indices_sorted = included_indices;
                std::sort(included_indices_sorted.begin(), included_indices_sorted.end());

                FieldVector included_fields;
                for (int i: included_indices_sorted) {
                    // Ignore out of bounds indices
                    if (i < 0 || i >= full_schema->num_fields()) {
                        return turbo::invalid_argument_error("Out of bounds field index: ", i);
                    }

                    if (inclusion_mask->at(i)) continue;

                    inclusion_mask->at(i) = true;
                    included_fields.push_back(full_schema->field(i));
                }

                *out_schema = schema(std::move(included_fields), full_schema->endianness(),
                                     full_schema->metadata());
                return turbo::OkStatus();
            }

            turbo::Status UnpackSchemaMessage(const void *opaque_schema, const IpcReadOptions &options,
                                              DictionaryMemo *dictionary_memo,
                                              std::shared_ptr<Schema> *schema,
                                              std::shared_ptr<Schema> *out_schema,
                                              std::vector<bool> *field_inclusion_mask, bool *swap_endian) {
                TURBO_RETURN_NOT_OK(internal::get_schema(opaque_schema, dictionary_memo, schema));

                // If we are selecting only certain fields, populate the inclusion mask now
                // for fast lookups
                TURBO_RETURN_NOT_OK(GetInclusionMaskAndOutSchema(*schema, options.included_fields,
                                                                 field_inclusion_mask, out_schema));
                *swap_endian = options.ensure_native_endian && !out_schema->get()->is_native_endian();
                if (*swap_endian) {
                    // create a new schema with native endianness before swapping endian in ArrayData
                    *schema = schema->get()->with_endianness(Endianness::Native);
                    *out_schema = out_schema->get()->with_endianness(Endianness::Native);
                }
                return turbo::OkStatus();
            }

            turbo::Status UnpackSchemaMessage(const Message &message, const IpcReadOptions &options,
                                              DictionaryMemo *dictionary_memo,
                                              std::shared_ptr<Schema> *schema,
                                              std::shared_ptr<Schema> *out_schema,
                                              std::vector<bool> *field_inclusion_mask, bool *swap_endian) {
                CHECK_MESSAGE_TYPE(MessageType::SCHEMA, message.type());
                CHECK_HAS_NO_BODY(message);

                return UnpackSchemaMessage(message.header(), options, dictionary_memo, schema,
                                           out_schema, field_inclusion_mask, swap_endian);
            }

            turbo::Status ReadDictionary(BufferSpan metadata, const IpcReadContext &context,
                                         DictionaryKind *kind, io::RandomAccessFile *file) {
                const flatbuf::Message *message = nullptr;
                TURBO_RETURN_NOT_OK(internal::VerifyMessage(metadata.data(), metadata.size(), &message));
                const auto dictionary_batch = message->header_as_DictionaryBatch();
                if (dictionary_batch == nullptr) {
                    return turbo::io_error(
                            "Header-type of flatbuffer-encoded Message is not DictionaryBatch.");
                }

                // The dictionary is embedded in a record batch with a single column
                const auto batch_meta = dictionary_batch->data();

                CHECK_FLATBUFFERS_NOT_NULL(batch_meta, "DictionaryBatch.data");

                CompressionType compression;
                TURBO_RETURN_NOT_OK(get_compression(batch_meta, &compression));
                if (compression == CompressionType::UNCOMPRESSED &&
                    message->version() == flatbuf::MetadataVersion::V4) {
                    // Possibly obtain codec information from experimental serialization format
                    // in 0.17.x
                    TURBO_RETURN_NOT_OK(get_compression_experimental(message, &compression));
                }

                const int64_t id = dictionary_batch->id();

                // Look up the dictionary value type, which must have been added to the
                // DictionaryMemo already prior to invoking this function
                TURBO_MOVE_OR_RAISE(auto value_type, context.dictionary_memo->GetDictionaryType(id));

                // Load the dictionary data from the dictionary batch
                ArrayLoader loader(batch_meta, internal::GetMetadataVersion(message->version()),
                                   context.options, file);
                auto dict_data = std::make_shared<ArrayData>();
                const Field dummy_field("", value_type);
                TURBO_RETURN_NOT_OK(loader.Load(&dummy_field, dict_data.get()));

                if (compression != CompressionType::UNCOMPRESSED) {
                    ArrayDataVector dict_fields{dict_data};
                    TURBO_RETURN_NOT_OK(decompress_buffers(compression, context.options, &dict_fields));
                }

                // swap endian in dict_data if necessary (swap_endian == true)
                if (context.swap_endian) {
                    TURBO_MOVE_OR_RAISE(dict_data, ::nebula::internal::SwapEndianArrayData(
                            dict_data, context.options.memory_pool));
                }

                if (dictionary_batch->is_delta()) {
                    if (kind != nullptr) {
                        *kind = DictionaryKind::Delta;
                    }
                    return context.dictionary_memo->AddDictionaryDelta(id, dict_data);
                }
                TURBO_MOVE_OR_RAISE(bool inserted,
                                    context.dictionary_memo->AddOrReplaceDictionary(id, dict_data));
                if (kind != nullptr) {
                    *kind = inserted ? DictionaryKind::New : DictionaryKind::Replacement;
                }
                return turbo::OkStatus();
            }

            turbo::Status ReadDictionary(const Message &message, const IpcReadContext &context,
                                         DictionaryKind *kind) {
                // Only invoke this method if we already know we have a dictionary message
                        DKCHECK(message.type() == MessageType::DICTIONARY_BATCH);
                CHECK_HAS_BODY(message);
                TURBO_MOVE_OR_RAISE(auto reader, Buffer::get_reader(message.body()));
                return ReadDictionary(*message.metadata(), context, kind, reader.get());
            }

        }  // namespace

        turbo::Result<std::shared_ptr<RecordBatch>> read_record_batch(
                BufferSpan metadata, const std::shared_ptr<Schema> &schema,
                const DictionaryMemo *dictionary_memo, const IpcReadOptions &options,
                io::RandomAccessFile *file) {
            std::shared_ptr<Schema> out_schema;
            // Empty means do not use
            std::vector<bool> inclusion_mask;
            IpcReadContext context(const_cast<DictionaryMemo *>(dictionary_memo), options, false);
            TURBO_RETURN_NOT_OK(GetInclusionMaskAndOutSchema(schema, context.options.included_fields,
                                                             &inclusion_mask, &out_schema));
            TURBO_MOVE_OR_RAISE(
                    auto batch_and_custom_metadata,
                    read_record_batch_internal(metadata, schema, inclusion_mask, context, file));
            return batch_and_custom_metadata.batch;
        }

        turbo::Result<std::shared_ptr<RecordBatch>> read_record_batch(
                const std::shared_ptr<Schema> &schema, const DictionaryMemo *dictionary_memo,
                const IpcReadOptions &options, io::InputStream *file) {
            std::unique_ptr<Message> message;
            TURBO_RETURN_NOT_OK(read_contiguous_payload(file, &message));
            CHECK_HAS_BODY(*message);
            TURBO_MOVE_OR_RAISE(auto reader, Buffer::get_reader(message->body()));
            return read_record_batch(*message->metadata(), schema, dictionary_memo, options,
                                   reader.get());
        }

        turbo::Result<std::shared_ptr<RecordBatch>> read_record_batch(
                const Message &message, const std::shared_ptr<Schema> &schema,
                const DictionaryMemo *dictionary_memo, const IpcReadOptions &options) {
            CHECK_MESSAGE_TYPE(MessageType::RECORD_BATCH, message.type());
            CHECK_HAS_BODY(message);
            TURBO_MOVE_OR_RAISE(auto reader, Buffer::get_reader(message.body()));
            return read_record_batch(*message.metadata(), schema, dictionary_memo, options,
                                   reader.get());
        }

        // Streaming format decoder
        class StreamDecoderInternal : public MessageDecoderListener {
        public:
            enum State {
                SCHEMA,
                INITIAL_DICTIONARIES,
                RECORD_BATCHES,
                EOS,
            };

            explicit StreamDecoderInternal(std::shared_ptr<Listener> listener,
                                           IpcReadOptions options)
                    : listener_(std::move(listener)),
                      options_(std::move(options)),
                      state_(State::SCHEMA),
                      field_inclusion_mask_(),
                      num_required_initial_dictionaries_(0),
                      num_read_initial_dictionaries_(0),
                      dictionary_memo_(),
                      schema_(nullptr),
                      filtered_schema_(nullptr),
                      stats_(),
                      swap_endian_(false) {}

            turbo::Status OnMessageDecoded(std::unique_ptr<Message> message) override {
                ++stats_.num_messages;
                switch (state_) {
                    case State::SCHEMA:
                        TURBO_RETURN_NOT_OK(OnSchemaMessageDecoded(std::move(message)));
                        break;
                    case State::INITIAL_DICTIONARIES:
                        TURBO_RETURN_NOT_OK(OnInitialDictionaryMessageDecoded(std::move(message)));
                        break;
                    case State::RECORD_BATCHES:
                        TURBO_RETURN_NOT_OK(OnRecordBatchMessageDecoded(std::move(message)));
                        break;
                    case State::EOS:
                        break;
                }
                return turbo::OkStatus();
            }

            turbo::Status on_eos() override {
                state_ = State::EOS;
                return listener_->on_eos();
            }

            std::shared_ptr<Listener> listener() const { return listener_; }

            Listener *raw_listener() const { return listener_.get(); }

            IpcReadOptions options() const { return options_; }

            State state() const { return state_; }

            std::shared_ptr<Schema> schema() const { return filtered_schema_; }

            ReadStats stats() const { return stats_; }

            int num_required_initial_dictionaries() const {
                return num_required_initial_dictionaries_;
            }

            int num_read_initial_dictionaries() const { return num_read_initial_dictionaries_; }

        private:
            turbo::Status OnSchemaMessageDecoded(std::unique_ptr<Message> message) {
                TURBO_RETURN_NOT_OK(UnpackSchemaMessage(*message, options_, &dictionary_memo_, &schema_,
                                                        &filtered_schema_, &field_inclusion_mask_,
                                                        &swap_endian_));

                num_required_initial_dictionaries_ = dictionary_memo_.fields().num_dicts();
                num_read_initial_dictionaries_ = 0;
                if (num_required_initial_dictionaries_ == 0) {
                    state_ = State::RECORD_BATCHES;
                    TURBO_RETURN_NOT_OK(listener_->on_schema_decoded(schema_, filtered_schema_));
                } else {
                    state_ = State::INITIAL_DICTIONARIES;
                }
                return turbo::OkStatus();
            }

            turbo::Status OnInitialDictionaryMessageDecoded(std::unique_ptr<Message> message) {
                if (message->type() != MessageType::DICTIONARY_BATCH) {
                    return turbo::invalid_argument_error("IPC stream did not have the expected number (",
                                                         num_required_initial_dictionaries_,
                                                         ") of dictionaries at the start of the stream");
                }
                TURBO_RETURN_NOT_OK(ReadDictionary(*message));
                num_read_initial_dictionaries_++;
                if (num_read_initial_dictionaries_ == num_required_initial_dictionaries_) {
                    state_ = State::RECORD_BATCHES;
                    TURBO_RETURN_NOT_OK(listener_->on_schema_decoded(schema_, filtered_schema_));
                }
                return turbo::OkStatus();
            }

            turbo::Status OnRecordBatchMessageDecoded(std::unique_ptr<Message> message) {
                if (message->type() == MessageType::DICTIONARY_BATCH) {
                    return ReadDictionary(*message);
                } else {
                    CHECK_HAS_BODY(*message);
                    TURBO_MOVE_OR_RAISE(auto reader, Buffer::get_reader(message->body()));
                    IpcReadContext context(&dictionary_memo_, options_, swap_endian_);
                    TURBO_MOVE_OR_RAISE(
                            auto batch_with_metadata,
                            read_record_batch_internal(*message->metadata(), schema_, field_inclusion_mask_,
                                                    context, reader.get()));
                    ++stats_.num_record_batches;
                    return listener_->on_record_batch_with_metadata_decoded(batch_with_metadata);
                }
            }

            // read dictionary from dictionary batch
            turbo::Status ReadDictionary(const Message &message) {
                DictionaryKind kind;
                IpcReadContext context(&dictionary_memo_, options_, swap_endian_);
                TURBO_RETURN_NOT_OK(::nebula::ipc::ReadDictionary(message, context, &kind));
                ++stats_.num_dictionary_batches;
                switch (kind) {
                    case DictionaryKind::New:
                        break;
                    case DictionaryKind::Delta:
                        ++stats_.num_dictionary_deltas;
                        break;
                    case DictionaryKind::Replacement:
                        ++stats_.num_replaced_dictionaries;
                        break;
                }
                return turbo::OkStatus();
            }

            std::shared_ptr<Listener> listener_;
            const IpcReadOptions options_;
            State state_;
            std::vector<bool> field_inclusion_mask_;
            int num_required_initial_dictionaries_;
            int num_read_initial_dictionaries_;
            DictionaryMemo dictionary_memo_;
            std::shared_ptr<Schema> schema_;
            std::shared_ptr<Schema> filtered_schema_;
            ReadStats stats_;
            bool swap_endian_;
        };

        // ----------------------------------------------------------------------
        // RecordBatchStreamReader implementation

        class RecordBatchStreamReaderImpl : public RecordBatchStreamReader,
                                            public StreamDecoderInternal {
        public:
            RecordBatchStreamReaderImpl(std::unique_ptr<MessageReader> message_reader,
                                        const IpcReadOptions &options)
                    : RecordBatchStreamReader(),
                      StreamDecoderInternal(std::make_shared<CollectListener>(), options),
                      message_reader_(std::move(message_reader)) {}

            turbo::Status init() {
                // read schema
                TURBO_MOVE_OR_RAISE(auto message, message_reader_->ReadNextMessage());
                if (!message) {
                    return turbo::invalid_argument_error("Tried reading schema message, was null or length 0");
                }
                return OnMessageDecoded(std::move(message));
            }

            turbo::Status read_next(std::shared_ptr<RecordBatch> *batch) override {
                TURBO_MOVE_OR_RAISE(auto batch_with_metadata, read_next());
                *batch = std::move(batch_with_metadata.batch);
                return turbo::OkStatus();
            }

            turbo::Result<RecordBatchWithMetadata> read_next() override {
                auto collect_listener = turbo::checked_cast<CollectListener *>(raw_listener());
                while (collect_listener->num_record_batches() == 0 &&
                       state() != StreamDecoderInternal::State::EOS) {
                    TURBO_MOVE_OR_RAISE(auto message, message_reader_->ReadNextMessage());
                    if (!message) {  // End of stream
                        if (state() == StreamDecoderInternal::State::INITIAL_DICTIONARIES) {
                            if (num_read_initial_dictionaries() == 0) {
                                // ARROW-6006: If we fail to find any dictionaries in the
                                // stream, then it may be that the stream has a schema
                                // but no actual data. In such case we communicate that
                                // we were unable to find the dictionaries (but there was
                                // no failure otherwise), so the caller can decide what
                                // to do
                                return RecordBatchWithMetadata{nullptr, nullptr};
                            } else {
                                // ARROW-6126, the stream terminated before receiving the
                                // expected number of dictionaries
                                return turbo::invalid_argument_error(
                                        "IPC stream ended without reading the "
                                        "expected number (",
                                        num_required_initial_dictionaries(), ") of dictionaries");
                            }
                        } else {
                            return RecordBatchWithMetadata{nullptr, nullptr};
                        }
                    }
                    TURBO_RETURN_NOT_OK(OnMessageDecoded(std::move(message)));
                }
                return collect_listener->PopRecordBatchWithMetadata();
            }

            std::shared_ptr<Schema> schema() const override {
                return StreamDecoderInternal::schema();
            }

            ReadStats stats() const override { return StreamDecoderInternal::stats(); }

        private:
            std::unique_ptr<MessageReader> message_reader_;
        };

        // ----------------------------------------------------------------------
        // Stream reader constructors

        turbo::Result<std::shared_ptr<RecordBatchStreamReader>> RecordBatchStreamReader::open(
                std::unique_ptr<MessageReader> message_reader, const IpcReadOptions &options) {
            // Private ctor
            auto result =
                    std::make_shared<RecordBatchStreamReaderImpl>(std::move(message_reader), options);
            TURBO_RETURN_NOT_OK(result->init());
            return result;
        }

        turbo::Result<std::shared_ptr<RecordBatchStreamReader>> RecordBatchStreamReader::open(
                io::InputStream *stream, const IpcReadOptions &options) {
            return open(MessageReader::open(stream), options);
        }

        turbo::Result<std::shared_ptr<RecordBatchStreamReader>> RecordBatchStreamReader::open(
                const std::shared_ptr<io::InputStream> &stream, const IpcReadOptions &options) {
            return open(MessageReader::open(stream), options);
        }

        // ----------------------------------------------------------------------
        // Reader implementation

        // Common functions used in both the random-access file reader and the
        // asynchronous generator
        static inline FileBlock FileBlockFromFlatbuffer(const flatbuf::Block *block) {
            return FileBlock{block->offset(), block->meta_data_length(), block->body_length()};
        }

        turbo::Status CheckAligned(const FileBlock &block) {
            if (!bit_util::IsMultipleOf8(block.offset) ||
                !bit_util::IsMultipleOf8(block.metadata_length) ||
                !bit_util::IsMultipleOf8(block.body_length)) {
                return turbo::invalid_argument_error("Unaligned block in IPC file");
            }
            return turbo::OkStatus();
        }

        static turbo::Result<std::unique_ptr<Message>> ReadMessageFromBlock(
                const FileBlock &block, io::RandomAccessFile *file,
                const FieldsLoaderFunction &fields_loader) {
            TURBO_RETURN_NOT_OK(CheckAligned(block));
            // TODO(wesm): this breaks integration tests, see ARROW-3256
            // DKCHECK_EQ((*out)->body_length(), block.body_length);

            TURBO_MOVE_OR_RAISE(auto message, ReadMessage(block.offset, block.metadata_length,
                                                          file, fields_loader));
            return message;
        }

        static Future<std::shared_ptr<Message>> ReadMessageFromBlockAsync(
                const FileBlock &block, io::RandomAccessFile *file, const io::IOContext &io_context) {
            if (!bit_util::IsMultipleOf8(block.offset) ||
                !bit_util::IsMultipleOf8(block.metadata_length) ||
                !bit_util::IsMultipleOf8(block.body_length)) {
                return turbo::invalid_argument_error("Unaligned block in IPC file");
            }

            // TODO(wesm): this breaks integration tests, see ARROW-3256
            // DKCHECK_EQ((*out)->body_length(), block.body_length);

            return ReadMessageAsync(block.offset, block.metadata_length, block.body_length, file,
                                    io_context);
        }

        class RecordBatchFileReaderImpl;

        /// A generator of record batches.
        ///
        /// All batches are yielded in order.
        class TURBO_EXPORT WholeIpcFileRecordBatchGenerator {
        public:
            using Item = std::shared_ptr<RecordBatch>;

            explicit WholeIpcFileRecordBatchGenerator(
                    std::shared_ptr<RecordBatchFileReaderImpl> state,
                    std::shared_ptr<io::internal::ReadRangeCache> cached_source,
                    const io::IOContext &io_context, nebula::internal::Executor *executor)
                    : state_(std::move(state)),
                      cached_source_(std::move(cached_source)),
                      io_context_(io_context),
                      executor_(executor),
                      index_(0) {}

            Future<Item> operator()();

            Future<std::shared_ptr<Message>> ReadBlock(const FileBlock &block);

            static turbo::Status ReadDictionaries(
                    RecordBatchFileReaderImpl *state,
                    std::vector<std::shared_ptr<Message>> dictionary_messages);

            static turbo::Result<std::shared_ptr<RecordBatch>> read_record_batch(
                    RecordBatchFileReaderImpl *state, Message *message);

        private:
            std::shared_ptr<RecordBatchFileReaderImpl> state_;
            std::shared_ptr<io::internal::ReadRangeCache> cached_source_;
            io::IOContext io_context_;
            nebula::internal::Executor *executor_;
            int index_;
            // Odd Future type, but this lets us use All() easily
            Future<> read_dictionaries_;
        };

        /// A generator of record batches for use when reading
        /// a subset of columns from the file.
        ///
        /// All batches are yielded in order.
        class TURBO_EXPORT SelectiveIpcFileRecordBatchGenerator {
        public:
            using Item = std::shared_ptr<RecordBatch>;

            explicit SelectiveIpcFileRecordBatchGenerator(
                    std::shared_ptr<RecordBatchFileReaderImpl> state)
                    : state_(std::move(state)), index_(0) {}

            Future<Item> operator()();

        private:
            std::shared_ptr<RecordBatchFileReaderImpl> state_;
            int index_;
        };

        class RecordBatchFileReaderImpl : public RecordBatchFileReader {
        public:
            RecordBatchFileReaderImpl() : file_(nullptr), footer_offset_(0), footer_(nullptr) {}

            int num_record_batches() const override {
                return static_cast<int>(internal::FlatBuffersVectorSize(footer_->record_batches()));
            }

            MetadataVersion version() const override {
                return internal::GetMetadataVersion(footer_->version());
            }

            static turbo::Status LoadFieldsSubset(const flatbuf::RecordBatch *metadata,
                                                  const IpcReadOptions &options,
                                                  io::RandomAccessFile *file,
                                                  const std::shared_ptr<Schema> &schema,
                                                  const std::vector<bool> *inclusion_mask,
                                                  MetadataVersion metadata_version = MetadataVersion::V5) {
                ArrayLoader loader(metadata, metadata_version, options, file);
                for (int i = 0; i < schema->num_fields(); ++i) {
                    const Field &field = *schema->field(i);
                    if (!inclusion_mask || (*inclusion_mask)[i]) {
                        // read field
                        ArrayData column;
                        TURBO_RETURN_NOT_OK(loader.Load(&field, &column));
                        if (metadata->length() != column.length) {
                            return turbo::io_error("Array length did not match record batch length");
                        }
                    } else {
                        // Skip field. This logic must be executed to advance the state of the
                        // loader to the next field
                        TURBO_RETURN_NOT_OK(loader.skip_field(&field));
                    }
                }
                return turbo::OkStatus();
            }

            Future<std::shared_ptr<RecordBatch>> ReadRecordBatchAsync(int i) {
                        DKCHECK_GE(i, 0);
                        DKCHECK_LT(i, num_record_batches());

                auto cached_metadata = cached_metadata_.find(i);
                if (cached_metadata != cached_metadata_.end()) {
                    return ReadCachedRecordBatch(i, cached_metadata->second);
                }

                return turbo::invalid_argument_error(
                        "Asynchronous record batch reading is only supported after a call to "
                        "pre_buffer_metadata or PreBufferBatches");
            }

            turbo::Result<std::shared_ptr<RecordBatch>> read_record_batch(int i) override {
                TURBO_MOVE_OR_RAISE(auto batch_with_metadata, read_record_batch_with_custom_metadata(i));
                return batch_with_metadata.batch;
            }

            turbo::Result<RecordBatchWithMetadata> read_record_batch_with_custom_metadata(int i) override {
                        DKCHECK_GE(i, 0);
                        DKCHECK_LT(i, num_record_batches());

                auto cached_metadata = cached_metadata_.find(i);
                if (cached_metadata != cached_metadata_.end()) {
                    auto result = ReadCachedRecordBatch(i, cached_metadata->second).result();
                    TURBO_MOVE_OR_RAISE(auto batch, result);
                    TURBO_MOVE_OR_RAISE(auto message_obj, cached_metadata->second.result());
                    TURBO_MOVE_OR_RAISE(auto message, GetFlatbufMessage(message_obj));
                    std::shared_ptr<KeyValueMetadata> custom_metadata;
                    if (message->custom_metadata() != nullptr) {
                        TURBO_RETURN_NOT_OK(
                                internal::GetKeyValueMetadata(message->custom_metadata(), &custom_metadata));
                    }
                    return RecordBatchWithMetadata{std::move(batch), std::move(custom_metadata)};
                }

                TURBO_RETURN_NOT_OK(WaitForDictionaryReadFinished());

                FieldsLoaderFunction fields_loader = {};
                if (!field_inclusion_mask_.empty()) {
                    auto &schema = schema_;
                    auto &inclusion_mask = field_inclusion_mask_;
                    auto &read_options = options_;
                    fields_loader = [schema, inclusion_mask, read_options](const void *metadata,
                                                                           io::RandomAccessFile *file) {
                        return LoadFieldsSubset(static_cast<const flatbuf::RecordBatch *>(metadata),
                                                read_options, file, schema, &inclusion_mask);
                    };
                }
                TURBO_MOVE_OR_RAISE(auto message,
                                    ReadMessageFromBlock(GetRecordBatchBlock(i), fields_loader));

                CHECK_HAS_BODY(*message);
                TURBO_MOVE_OR_RAISE(auto reader, Buffer::get_reader(message->body()));
                IpcReadContext context(&dictionary_memo_, options_, swap_endian_);
                TURBO_MOVE_OR_RAISE(
                        auto batch_with_metadata,
                        read_record_batch_internal(*message->metadata(), schema_, field_inclusion_mask_,
                                                context, reader.get()));
                stats_.num_record_batches.fetch_add(1, std::memory_order_relaxed);
                return batch_with_metadata;
            }

            turbo::Result<int64_t> count_rows() override {
                int64_t total = 0;
                for (int i = 0; i < num_record_batches(); i++) {
                    TURBO_MOVE_OR_RAISE(auto outer_message,
                                        ReadMessageFromBlock(GetRecordBatchBlock(i)));
                    auto metadata = outer_message->metadata();
                    const flatbuf::Message *message = nullptr;
                    TURBO_RETURN_NOT_OK(
                            internal::VerifyMessage(metadata->data(), metadata->size(), &message));
                    auto batch = message->header_as_RecordBatch();
                    if (batch == nullptr) {
                        return turbo::io_error(
                                "Header-type of flatbuffer-encoded Message is not RecordBatch.");
                    }
                    total += batch->length();
                }
                return total;
            }

            turbo::Status open(const std::shared_ptr<io::RandomAccessFile> &file, int64_t footer_offset,
                               const IpcReadOptions &options) {
                owned_file_ = file;
                metadata_cache_ = std::make_shared<io::internal::ReadRangeCache>(
                        file, file->io_context(), options.pre_buffer_cache_options);
                return open(file.get(), footer_offset, options);
            }

            turbo::Status open(io::RandomAccessFile *file, int64_t footer_offset,
                               const IpcReadOptions &options) {
                // The metadata_cache_ may have already been constructed with an owned file in the
                // owning overload of open
                if (!metadata_cache_) {
                    metadata_cache_ = std::make_shared<io::internal::ReadRangeCache>(
                            file, file->io_context(), options.pre_buffer_cache_options);
                }
                file_ = file;
                options_ = options;
                footer_offset_ = footer_offset;
                TURBO_RETURN_NOT_OK(ReadFooter());

                // Get the schema and record any observed dictionaries
                TURBO_RETURN_NOT_OK(UnpackSchemaMessage(footer_->schema(), options, &dictionary_memo_,
                                                        &schema_, &out_schema_, &field_inclusion_mask_,
                                                        &swap_endian_));
                stats_.num_messages.fetch_add(1, std::memory_order_relaxed);
                return turbo::OkStatus();
            }

            Future<> open_async(const std::shared_ptr<io::RandomAccessFile> &file,
                               int64_t footer_offset, const IpcReadOptions &options) {
                owned_file_ = file;
                metadata_cache_ = std::make_shared<io::internal::ReadRangeCache>(
                        file, file->io_context(), options.pre_buffer_cache_options);
                return open_async(file.get(), footer_offset, options);
            }

            Future<> open_async(io::RandomAccessFile *file, int64_t footer_offset,
                               const IpcReadOptions &options) {
                // The metadata_cache_ may have already been constructed with an owned file in the
                // owning overload of open_async
                if (!metadata_cache_) {
                    metadata_cache_ = std::make_shared<io::internal::ReadRangeCache>(
                            file, file->io_context(), options.pre_buffer_cache_options);
                }
                file_ = file;
                options_ = options;
                footer_offset_ = footer_offset;
                auto cpu_executor = ::nebula::internal::get_cpu_thread_pool();
                auto self = std::dynamic_pointer_cast<RecordBatchFileReaderImpl>(shared_from_this());
                return ReadFooterAsync(cpu_executor).Then([self, options]() -> turbo::Status {
                    // Get the schema and record any observed dictionaries
                    TURBO_RETURN_NOT_OK(UnpackSchemaMessage(
                            self->footer_->schema(), options, &self->dictionary_memo_, &self->schema_,
                            &self->out_schema_, &self->field_inclusion_mask_, &self->swap_endian_));
                    self->stats_.num_messages.fetch_add(1, std::memory_order_relaxed);
                    return turbo::OkStatus();
                });
            }

            std::shared_ptr<Schema> schema() const override { return out_schema_; }

            std::shared_ptr<const KeyValueMetadata> metadata() const override { return metadata_; }

            ReadStats stats() const override { return stats_.poll(); }

            turbo::Result<AsyncGenerator<std::shared_ptr<RecordBatch>>> get_record_batch_generator(
                    const bool coalesce, const io::IOContext &io_context,
                    const io::CacheOptions cache_options,
                    nebula::internal::Executor *executor) override {
                auto state = std::dynamic_pointer_cast<RecordBatchFileReaderImpl>(shared_from_this());
                // Prebuffering causes us to use a lot of futures which, at the moment,
                // can only slow things down when we are doing zero-copy in-memory reads.
                //
                // Prebuffering's read patterns are also slightly worse than the alternative
                // when doing whole-file reads because the logic is not in place to recognize
                // we can just read the entire file up-front
                if (options_.included_fields.size() != 0 &&
                    options_.included_fields.size() != schema_->fields().size() &&
                    !file_->supports_zero_copy()) {
                    TURBO_RETURN_NOT_OK(state->pre_buffer_metadata({}));
                    return SelectiveIpcFileRecordBatchGenerator(std::move(state));
                }

                std::shared_ptr<io::internal::ReadRangeCache> cached_source;
                if (coalesce && !file_->supports_zero_copy()) {
                    if (!owned_file_) return turbo::invalid_argument_error("Cannot coalesce without an owned file");
                    // Since the user is asking for all fields then we can cache the entire
                    // file (up to the footer)
                    cached_source = std::make_shared<io::internal::ReadRangeCache>(file_, io_context,
                                                                                   cache_options);
                    TURBO_RETURN_NOT_OK(cached_source->Cache({{0, footer_offset_}}));
                }
                return WholeIpcFileRecordBatchGenerator(std::move(state), std::move(cached_source),
                                                        io_context, executor);
            }

            turbo::Status DoPreBufferMetadata(const std::vector<int> &indices) {
                TURBO_RETURN_NOT_OK(CacheMetadata(indices));
                EnsureDictionaryReadStarted();
                Future<> all_metadata_ready = WaitForMetadatas(indices);
                for (int index: indices) {
                    Future<std::shared_ptr<Message>> metadata_loaded =
                            all_metadata_ready.Then([this, index]() -> turbo::Result<std::shared_ptr<Message>> {
                                stats_.num_messages.fetch_add(1, std::memory_order_relaxed);
                                FileBlock block = GetRecordBatchBlock(index);
                                TURBO_MOVE_OR_RAISE(
                                        std::shared_ptr<Buffer> metadata,
                                        metadata_cache_->read({block.offset, block.metadata_length}));
                                return ReadMessage(std::move(metadata), nullptr);
                            });
                    cached_metadata_.emplace(index, metadata_loaded);
                }
                return turbo::OkStatus();
            }

            std::vector<int> AllIndices() const {
                std::vector<int> all_indices(num_record_batches());
                std::iota(all_indices.begin(), all_indices.end(), 0);
                return all_indices;
            }

            turbo::Status pre_buffer_metadata(const std::vector<int> &indices) override {
                if (indices.size() == 0) {
                    return DoPreBufferMetadata(AllIndices());
                } else {
                    return DoPreBufferMetadata(indices);
                }
            }

        private:
            friend class WholeIpcFileRecordBatchGenerator;

            struct AtomicReadStats {
                std::atomic<int64_t> num_messages{0};
                std::atomic<int64_t> num_record_batches{0};
                std::atomic<int64_t> num_dictionary_batches{0};
                std::atomic<int64_t> num_dictionary_deltas{0};
                std::atomic<int64_t> num_replaced_dictionaries{0};

                /// \brief Capture a copy of the current counters
                ReadStats poll() const {
                    ReadStats stats;
                    stats.num_messages = num_messages.load(std::memory_order_relaxed);
                    stats.num_record_batches = num_record_batches.load(std::memory_order_relaxed);
                    stats.num_dictionary_batches =
                            num_dictionary_batches.load(std::memory_order_relaxed);
                    stats.num_dictionary_deltas = num_dictionary_deltas.load(std::memory_order_relaxed);
                    stats.num_replaced_dictionaries =
                            num_replaced_dictionaries.load(std::memory_order_relaxed);
                    return stats;
                }
            };

            FileBlock GetRecordBatchBlock(int i) const {
                return FileBlockFromFlatbuffer(footer_->record_batches()->Get(i));
            }

            FileBlock GetDictionaryBlock(int i) const {
                return FileBlockFromFlatbuffer(footer_->dictionaries()->Get(i));
            }

            turbo::Result<std::unique_ptr<Message>> ReadMessageFromBlock(
                    const FileBlock &block, const FieldsLoaderFunction &fields_loader = {}) {
                TURBO_MOVE_OR_RAISE(auto message,
                                    nebula::ipc::ReadMessageFromBlock(block, file_, fields_loader));
                stats_.num_messages.fetch_add(1, std::memory_order_relaxed);
                return message;
            }

            turbo::Status ReadDictionaries() {
                // read all the dictionaries
                IpcReadContext context(&dictionary_memo_, options_, swap_endian_);
                for (int i = 0; i < num_dictionaries(); ++i) {
                    TURBO_MOVE_OR_RAISE(auto message, ReadMessageFromBlock(GetDictionaryBlock(i)));
                    TURBO_RETURN_NOT_OK(ReadOneDictionary(message.get(), context));
                    stats_.num_dictionary_batches.fetch_add(1, std::memory_order_relaxed);
                }
                return turbo::OkStatus();
            }

            turbo::Status ReadOneDictionary(Message *message, const IpcReadContext &context) {
                CHECK_HAS_BODY(*message);
                TURBO_MOVE_OR_RAISE(auto reader, Buffer::get_reader(message->body()));
                DictionaryKind kind;
                TURBO_RETURN_NOT_OK(ReadDictionary(*message->metadata(), context, &kind, reader.get()));
                if (kind == DictionaryKind::Replacement) {
                    return turbo::invalid_argument_error("Unsupported dictionary replacement in IPC file");
                } else if (kind == DictionaryKind::Delta) {
                    stats_.num_dictionary_deltas.fetch_add(1, std::memory_order_relaxed);
                }
                return turbo::OkStatus();
            }

            void AddDictionaryRanges(std::vector<io::ReadRange> *ranges) const {
                // Adds all dictionaries to the range cache
                for (int i = 0; i < num_dictionaries(); ++i) {
                    FileBlock block = GetDictionaryBlock(i);
                    ranges->push_back({block.offset, block.metadata_length + block.body_length});
                }
            }

            void AddMetadataRanges(const std::vector<int> &indices,
                                   std::vector<io::ReadRange> *ranges) {
                for (int index: indices) {
                    FileBlock block = GetRecordBatchBlock(static_cast<int>(index));
                    ranges->push_back({block.offset, block.metadata_length});
                }
            }

            turbo::Status CacheMetadata(const std::vector<int> &indices) {
                std::vector<io::ReadRange> ranges;
                if (!read_dictionaries_) {
                    AddDictionaryRanges(&ranges);
                }
                AddMetadataRanges(indices, &ranges);
                return metadata_cache_->Cache(std::move(ranges));
            }

            void EnsureDictionaryReadStarted() {
                if (!dictionary_load_finished_.is_valid()) {
                    read_dictionaries_ = true;
                    std::vector<io::ReadRange> ranges;
                    AddDictionaryRanges(&ranges);
                    dictionary_load_finished_ =
                            metadata_cache_->WaitFor(std::move(ranges)).Then([this] {
                                return ReadDictionaries();
                            });
                }
            }

            turbo::Status WaitForDictionaryReadFinished() {
                if (!read_dictionaries_) {
                    TURBO_RETURN_NOT_OK(ReadDictionaries());
                    read_dictionaries_ = true;
                    return turbo::OkStatus();
                }
                if (dictionary_load_finished_.is_valid()) {
                    return dictionary_load_finished_.status();
                }
                // Dictionaries were previously loaded synchronously
                return turbo::OkStatus();
            }

            Future<> WaitForMetadatas(const std::vector<int> &indices) {
                std::vector<io::ReadRange> ranges;
                AddMetadataRanges(indices, &ranges);
                return metadata_cache_->WaitFor(std::move(ranges));
            }

            turbo::Result<IpcReadContext> GetIpcReadContext(const flatbuf::Message *message,
                                                            const flatbuf::RecordBatch *batch) {
                IpcReadContext context(&dictionary_memo_, options_, swap_endian_);
                CompressionType compression;
                TURBO_RETURN_NOT_OK(get_compression(batch, &compression));
                if (context.compression == CompressionType::UNCOMPRESSED &&
                    message->version() == flatbuf::MetadataVersion::V4) {
                    // Possibly obtain codec information from experimental serialization format
                    // in 0.17.x
                    TURBO_RETURN_NOT_OK(get_compression_experimental(message, &compression));
                }
                context.compression = compression;
                context.metadata_version = internal::GetMetadataVersion(message->version());
                return context;
            }

            turbo::Result<const flatbuf::RecordBatch *> GetBatchFromMessage(
                    const flatbuf::Message *message) {
                auto batch = message->header_as_RecordBatch();
                if (batch == nullptr) {
                    return turbo::io_error(
                            "Header-type of flatbuffer-encoded Message is not RecordBatch.");
                }
                return batch;
            }

            turbo::Result<const flatbuf::Message *> GetFlatbufMessage(
                    const std::shared_ptr<Message> &message) {
                BufferSpan metadata = *message->metadata();
                const flatbuf::Message *flatbuf_message = nullptr;
                TURBO_RETURN_NOT_OK(
                        internal::VerifyMessage(metadata.data(), metadata.size(), &flatbuf_message));
                return flatbuf_message;
            }

            struct CachedRecordBatchReadContext {
                CachedRecordBatchReadContext(std::shared_ptr<Schema> sch,
                                             const flatbuf::RecordBatch *batch,
                                             IpcReadContext context, io::RandomAccessFile *file,
                                             std::shared_ptr<io::RandomAccessFile> owned_file,
                                             int64_t block_data_offset)
                        : schema(std::move(sch)),
                          context(std::move(context)),
                          file(file),
                          owned_file(std::move(owned_file)),
                          loader(batch, context.metadata_version, context.options, block_data_offset),
                          columns(schema->num_fields()),
                          cache(file, file->io_context(), io::CacheOptions::lazy_defaults()),
                          length(batch->length()) {}

                turbo::Status CalculateLoadRequest() {
                    std::shared_ptr<Schema> out_schema;
                    TURBO_RETURN_NOT_OK(GetInclusionMaskAndOutSchema(schema, context.options.included_fields,
                                                                     &inclusion_mask, &out_schema));

                    for (int i = 0; i < schema->num_fields(); ++i) {
                        const Field &field = *schema->field(i);
                        if (inclusion_mask.size() == 0 || inclusion_mask[i]) {
                            // read field
                            auto column = std::make_shared<ArrayData>();
                            TURBO_RETURN_NOT_OK(loader.Load(&field, column.get()));
                            if (length != column->length) {
                                return turbo::io_error("Array length did not match record batch length");
                            }
                            columns[i] = std::move(column);
                            if (inclusion_mask.size() > 0) {
                                filtered_columns.push_back(columns[i]);
                                filtered_fields.push_back(schema->field(i));
                            }
                        } else {
                            // Skip field. This logic must be executed to advance the state of the
                            // loader to the next field
                            TURBO_RETURN_NOT_OK(loader.skip_field(&field));
                        }
                    }
                    if (inclusion_mask.size() > 0) {
                        filtered_schema = ::nebula::schema(std::move(filtered_fields), schema->metadata());
                    } else {
                        filtered_schema = schema;
                    }
                    return turbo::OkStatus();
                }

                Future<> read_async() {
                    TURBO_RETURN_NOT_OK(cache.Cache(loader.read_request().ranges_to_read()));
                    return cache.WaitFor(loader.read_request().ranges_to_read());
                }

                turbo::Result<std::shared_ptr<RecordBatch>> CreateRecordBatch() {
                    std::vector<std::shared_ptr<Buffer>> buffers;
                    for (const auto &range_to_read: loader.read_request().ranges_to_read()) {
                        TURBO_MOVE_OR_RAISE(auto buffer, cache.read(range_to_read));
                        buffers.push_back(std::move(buffer));
                    }
                    loader.read_request().FulfillRequest(buffers);

                    // Dictionary resolution needs to happen on the unfiltered columns,
                    // because fields are mapped structurally (by path in the original schema).
                    TURBO_RETURN_NOT_OK(ResolveDictionaries(columns, *context.dictionary_memo,
                                                            context.options.memory_pool));
                    if (inclusion_mask.size() > 0) {
                        columns.clear();
                    } else {
                        filtered_columns = std::move(columns);
                    }

                    if (context.compression != CompressionType::UNCOMPRESSED) {
                        TURBO_RETURN_NOT_OK(
                                decompress_buffers(context.compression, context.options, &filtered_columns));
                    }

                    // swap endian in a set of ArrayData if necessary (swap_endian == true)
                    if (context.swap_endian) {
                        for (int i = 0; i < static_cast<int>(filtered_columns.size()); ++i) {
                            TURBO_MOVE_OR_RAISE(filtered_columns[i],
                                                nebula::internal::SwapEndianArrayData(
                                                        filtered_columns[i], context.options.memory_pool));
                        }
                    }
                    return RecordBatch::create(std::move(filtered_schema), length,
                                               std::move(filtered_columns));
                }

                std::shared_ptr<Schema> schema;
                IpcReadContext context;
                io::RandomAccessFile *file;
                std::shared_ptr<io::RandomAccessFile> owned_file;

                ArrayLoader loader;
                ArrayDataVector columns;
                io::internal::ReadRangeCache cache;
                int64_t length;
                ArrayDataVector filtered_columns;
                FieldVector filtered_fields;
                std::shared_ptr<Schema> filtered_schema;
                std::vector<bool> inclusion_mask;
            };

            Future<std::shared_ptr<RecordBatch>> ReadCachedRecordBatch(
                    int index, Future<std::shared_ptr<Message>> message_fut) {
                stats_.num_record_batches.fetch_add(1, std::memory_order_relaxed);
                return dictionary_load_finished_.Then([message_fut] { return message_fut; })
                        .Then([this, index](const std::shared_ptr<Message> &message_obj)
                                      -> Future<std::shared_ptr<RecordBatch>> {
                            FileBlock block = GetRecordBatchBlock(index);
                            TURBO_MOVE_OR_RAISE(auto message, GetFlatbufMessage(message_obj));
                            TURBO_MOVE_OR_RAISE(auto batch, GetBatchFromMessage(message));
                            TURBO_MOVE_OR_RAISE(auto context, GetIpcReadContext(message, batch));

                            auto read_context = std::make_shared<CachedRecordBatchReadContext>(
                                    schema_, batch, std::move(context), file_, owned_file_,
                                    block.offset + static_cast<int64_t>(block.metadata_length));
                            TURBO_RETURN_NOT_OK(read_context->CalculateLoadRequest());
                            return read_context->read_async().Then(
                                    [read_context] { return read_context->CreateRecordBatch(); });
                        });
            }

            turbo::Status ReadFooter() {
                auto fut = ReadFooterAsync(/*executor=*/nullptr);
                return fut.status();
            }

            Future<> ReadFooterAsync(nebula::internal::Executor *executor) {
                const int32_t magic_size = static_cast<int>(strlen(kArrowMagicBytes));

                if (footer_offset_ <= magic_size * 2 + 4) {
                    return turbo::invalid_argument_error("File is too small: ", footer_offset_);
                }

                int file_end_size = static_cast<int>(magic_size + sizeof(int32_t));
                auto self = std::dynamic_pointer_cast<RecordBatchFileReaderImpl>(shared_from_this());
                auto read_magic = file_->read_async(footer_offset_ - file_end_size, file_end_size);
                if (executor) read_magic = executor->transfer(std::move(read_magic));
                return read_magic
                        .Then([=](const std::shared_ptr<Buffer> &buffer)
                                      -> Future<std::shared_ptr<Buffer>> {
                            const int64_t expected_footer_size = magic_size + sizeof(int32_t);
                            if (buffer->size() < expected_footer_size) {
                                return turbo::invalid_argument_error("Unable to read ", expected_footer_size,
                                                                     "from end of file");
                            }

                            if (memcmp(buffer->data() + sizeof(int32_t), kArrowMagicBytes, magic_size)) {
                                return turbo::invalid_argument_error("Not an Nebula file");
                            }

                            int32_t footer_length = bit_util::FromLittleEndian(
                                    *reinterpret_cast<const int32_t *>(buffer->data()));

                            if (footer_length <= 0 ||
                                footer_length > self->footer_offset_ - magic_size * 2 - 4) {
                                return turbo::invalid_argument_error("File is smaller than indicated metadata size");
                            }

                            // Now read the footer
                            auto read_footer = self->file_->read_async(
                                    self->footer_offset_ - footer_length - file_end_size, footer_length);
                            if (executor) read_footer = executor->transfer(std::move(read_footer));
                            return read_footer;
                        })
                        .Then([=](const std::shared_ptr<Buffer> &buffer) -> turbo::Status {
                            self->footer_buffer_ = buffer;
                            const auto data = self->footer_buffer_->data();
                            const auto size = self->footer_buffer_->size();
                            if (!internal::VerifyFlatbuffers<flatbuf::Footer>(data, size)) {
                                return turbo::io_error("Verification of flatbuffer-encoded Footer failed.");
                            }
                            self->footer_ = flatbuf::GetFooter(data);

                            auto fb_metadata = self->footer_->custom_metadata();
                            if (fb_metadata != nullptr) {
                                std::shared_ptr<KeyValueMetadata> md;
                                TURBO_RETURN_NOT_OK(internal::GetKeyValueMetadata(fb_metadata, &md));
                                self->metadata_ = std::move(md);  // const-ify
                            }
                            return turbo::OkStatus();
                        });
            }

            int num_dictionaries() const {
                return static_cast<int>(internal::FlatBuffersVectorSize(footer_->dictionaries()));
            }

            io::RandomAccessFile *file_;
            IpcReadOptions options_;
            std::vector<bool> field_inclusion_mask_;

            std::shared_ptr<io::RandomAccessFile> owned_file_;

            // The location where the Nebula file layout ends. May be the end of the file
            // or some other location if embedded in a larger file.
            int64_t footer_offset_;

            // Footer metadata
            std::shared_ptr<Buffer> footer_buffer_;
            const flatbuf::Footer *footer_;
            std::shared_ptr<const KeyValueMetadata> metadata_;

            bool read_dictionaries_ = false;
            DictionaryMemo dictionary_memo_;

            // Reconstructed schema, including any read dictionaries
            std::shared_ptr<Schema> schema_;
            // Schema with deselected fields dropped
            std::shared_ptr<Schema> out_schema_;

            AtomicReadStats stats_;
            std::shared_ptr<io::internal::ReadRangeCache> metadata_cache_;
            std::unordered_set<int> cached_data_blocks_;
            Future<> dictionary_load_finished_;
            std::unordered_map<int, Future<std::shared_ptr<Message>>> cached_metadata_;
            std::unordered_map<int, Future<>> cached_data_requests_;

            bool swap_endian_;
        };

        turbo::Result<std::shared_ptr<RecordBatchFileReader>> RecordBatchFileReader::open(
                io::RandomAccessFile *file, const IpcReadOptions &options) {
            TURBO_MOVE_OR_RAISE(int64_t footer_offset, file->get_size());
            return open(file, footer_offset, options);
        }

        turbo::Result<std::shared_ptr<RecordBatchFileReader>> RecordBatchFileReader::open(
                io::RandomAccessFile *file, int64_t footer_offset, const IpcReadOptions &options) {
            auto result = std::make_shared<RecordBatchFileReaderImpl>();
            TURBO_RETURN_NOT_OK(result->open(file, footer_offset, options));
            return result;
        }

        turbo::Result<std::shared_ptr<RecordBatchFileReader>> RecordBatchFileReader::open(
                const std::shared_ptr<io::RandomAccessFile> &file, const IpcReadOptions &options) {
            TURBO_MOVE_OR_RAISE(int64_t footer_offset, file->get_size());
            return open(file, footer_offset, options);
        }

        turbo::Result<std::shared_ptr<RecordBatchFileReader>> RecordBatchFileReader::open(
                const std::shared_ptr<io::RandomAccessFile> &file, int64_t footer_offset,
                const IpcReadOptions &options) {
            auto result = std::make_shared<RecordBatchFileReaderImpl>();
            TURBO_RETURN_NOT_OK(result->open(file, footer_offset, options));
            return result;
        }

        Future<std::shared_ptr<RecordBatchFileReader>> RecordBatchFileReader::open_async(
                const std::shared_ptr<io::RandomAccessFile> &file, const IpcReadOptions &options) {
            TURBO_MOVE_OR_RAISE(int64_t footer_offset, file->get_size());
            return open_async(std::move(file), footer_offset, options);
        }

        Future<std::shared_ptr<RecordBatchFileReader>> RecordBatchFileReader::open_async(
                io::RandomAccessFile *file, const IpcReadOptions &options) {
            TURBO_MOVE_OR_RAISE(int64_t footer_offset, file->get_size());
            return open_async(file, footer_offset, options);
        }

        Future<std::shared_ptr<RecordBatchFileReader>> RecordBatchFileReader::open_async(
                const std::shared_ptr<io::RandomAccessFile> &file, int64_t footer_offset,
                const IpcReadOptions &options) {
            auto result = std::make_shared<RecordBatchFileReaderImpl>();
            return result->open_async(file, footer_offset, options)
                    .Then([=]() -> turbo::Result<std::shared_ptr<RecordBatchFileReader>> { return result; });
        }

        Future<std::shared_ptr<RecordBatchFileReader>> RecordBatchFileReader::open_async(
                io::RandomAccessFile *file, int64_t footer_offset, const IpcReadOptions &options) {
            auto result = std::make_shared<RecordBatchFileReaderImpl>();
            return result->open_async(file, footer_offset, options)
                    .Then([=]() -> turbo::Result<std::shared_ptr<RecordBatchFileReader>> { return result; });
        }

        turbo::Result<RecordBatchVector> RecordBatchFileReader::to_record_batches() {
            RecordBatchVector batches;
            const auto n = num_record_batches();
            for (int i = 0; i < n; ++i) {
                TURBO_MOVE_OR_RAISE(auto batch, read_record_batch(i));
                batches.emplace_back(std::move(batch));
            }
            return batches;
        }

        turbo::Result<std::shared_ptr<Table>> RecordBatchFileReader::to_table() {
            TURBO_MOVE_OR_RAISE(auto batches, to_record_batches());
            return Table::from_record_batches(schema(), std::move(batches));
        }

        Future<SelectiveIpcFileRecordBatchGenerator::Item>
        SelectiveIpcFileRecordBatchGenerator::operator()() {
            int index = index_++;
            if (index >= state_->num_record_batches()) {
                return turbo::iteration_end<SelectiveIpcFileRecordBatchGenerator::Item>();
            }
            return state_->ReadRecordBatchAsync(index);
        }

        Future<WholeIpcFileRecordBatchGenerator::Item>
        WholeIpcFileRecordBatchGenerator::operator()() {
            auto state = state_;
            if (!read_dictionaries_.is_valid()) {
                std::vector<Future<std::shared_ptr<Message>>> messages(state->num_dictionaries());
                for (int i = 0; i < state->num_dictionaries(); i++) {
                    auto block = FileBlockFromFlatbuffer(state->footer_->dictionaries()->Get(i));
                    messages[i] = ReadBlock(block);
                }
                auto read_messages = All(std::move(messages));
                if (executor_) read_messages = executor_->transfer(read_messages);
                read_dictionaries_ = read_messages.Then(
                        [=](const std::vector<turbo::Result<std::shared_ptr<Message>>> &maybe_messages)
                                -> turbo::Status {
                            TURBO_MOVE_OR_RAISE(auto messages,
                                                nebula::internal::UnwrapOrRaise(maybe_messages));
                            return ReadDictionaries(state.get(), std::move(messages));
                        });
            }
            if (index_ >= state_->num_record_batches()) {
                return Future<Item>::make_finished(turbo::IterationTraits<Item>::end());
            }
            auto block = FileBlockFromFlatbuffer(state->footer_->record_batches()->Get(index_++));
            auto read_message = ReadBlock(block);
            auto read_messages = read_dictionaries_.Then([read_message]() { return read_message; });
            // Force transfer. This may be wasteful in some cases, but ensures we get off the
            // I/O threads as soon as possible, and ensures we don't decode record batches
            // synchronously in the case that the message read has already finished.
            if (executor_) {
                auto executor = executor_;
                return read_messages.Then(
                        [=](const std::shared_ptr<Message> &message) -> Future<Item> {
                            return DeferNotOk(executor->submit(
                                    [=]() { return read_record_batch(state.get(), message.get()); }));
                        });
            }
            return read_messages.Then([=](const std::shared_ptr<Message> &message) -> turbo::Result<Item> {
                return read_record_batch(state.get(), message.get());
            });
        }

        Future<std::shared_ptr<Message>> WholeIpcFileRecordBatchGenerator::ReadBlock(
                const FileBlock &block) {
            if (cached_source_) {
                auto cached_source = cached_source_;
                io::ReadRange range{block.offset, block.metadata_length + block.body_length};
                auto pool = state_->options_.memory_pool;
                return cached_source->WaitFor({range}).Then(
                        [cached_source, pool, range]() -> turbo::Result<std::shared_ptr<Message>> {
                            TURBO_MOVE_OR_RAISE(auto buffer, cached_source->read(range));
                            io::BufferReader stream(std::move(buffer));
                            return ReadMessage(&stream, pool);
                        });
            } else {
                return ReadMessageFromBlockAsync(block, state_->file_, io_context_);
            }
        }

        turbo::Status WholeIpcFileRecordBatchGenerator::ReadDictionaries(
                RecordBatchFileReaderImpl *state,
                std::vector<std::shared_ptr<Message>> dictionary_messages) {
            IpcReadContext context(&state->dictionary_memo_, state->options_, state->swap_endian_);
            for (const auto &message: dictionary_messages) {
                TURBO_RETURN_NOT_OK(state->ReadOneDictionary(message.get(), context));
            }
            return turbo::OkStatus();
        }

        turbo::Result<std::shared_ptr<RecordBatch>> WholeIpcFileRecordBatchGenerator::read_record_batch(
                RecordBatchFileReaderImpl *state, Message *message) {
            CHECK_HAS_BODY(*message);
            TURBO_MOVE_OR_RAISE(auto reader, Buffer::get_reader(message->body()));
            IpcReadContext context(&state->dictionary_memo_, state->options_, state->swap_endian_);
            TURBO_MOVE_OR_RAISE(
                    auto batch_with_metadata,
                    read_record_batch_internal(*message->metadata(), state->schema_,
                                            state->field_inclusion_mask_, context, reader.get()));
            return batch_with_metadata.batch;
        }

        turbo::Status Listener::on_eos() { return turbo::OkStatus(); }

        turbo::Status Listener::on_schema_decoded(std::shared_ptr<Schema> schema) { return turbo::OkStatus(); }

        turbo::Status Listener::on_schema_decoded(std::shared_ptr<Schema> schema,
                                                std::shared_ptr<Schema> filtered_schema) {
            return on_schema_decoded(std::move(schema));
        }

        turbo::Status Listener::on_record_batch_decoded(std::shared_ptr<RecordBatch> record_batch) {
            return turbo::unimplemented_error("on_record_batch_decoded() callback isn't implemented");
        }

        turbo::Status Listener::on_record_batch_with_metadata_decoded(
                RecordBatchWithMetadata record_batch_with_metadata) {
            return on_record_batch_decoded(std::move(record_batch_with_metadata.batch));
        }

        class StreamDecoder::StreamDecoderImpl : public StreamDecoderInternal {
        public:
            explicit StreamDecoderImpl(std::shared_ptr<Listener> listener, IpcReadOptions options)
                    : StreamDecoderInternal(std::move(listener), options),
                      message_decoder_(std::shared_ptr<StreamDecoderImpl>(this, [](void *) {}),
                                       options.memory_pool) {}

            turbo::Status consume(const uint8_t *data, int64_t size) {
                return message_decoder_.consume(data, size);
            }

            turbo::Status consume(std::shared_ptr<Buffer> buffer) {
                return message_decoder_.consume(std::move(buffer));
            }

            int64_t next_required_size() const { return message_decoder_.next_required_size(); }

            const MessageDecoder *message_decoder() const { return &message_decoder_; }

        private:
            MessageDecoder message_decoder_;
        };

        StreamDecoder::StreamDecoder(std::shared_ptr<Listener> listener, IpcReadOptions options) {
            impl_ = std::make_unique<StreamDecoderImpl>(std::move(listener), options);
        }

        StreamDecoder::~StreamDecoder() {}

        turbo::Status StreamDecoder::consume(const uint8_t *data, int64_t size) {
            while (size > 0) {
                const auto next_required_size = impl_->next_required_size();
                if (next_required_size == 0) {
                    break;
                }
                if (size < next_required_size) {
                    break;
                }
                TURBO_RETURN_NOT_OK(impl_->consume(data, next_required_size));
                data += next_required_size;
                size -= next_required_size;
            }
            if (size > 0) {
                return impl_->consume(data, size);
            } else {
                return turbo::OkStatus();
            }
        }

        turbo::Status StreamDecoder::consume(std::shared_ptr<Buffer> buffer) {
            if (buffer->size() == 0) {
                return turbo::OkStatus();
            }
            if (impl_->next_required_size() == 0 || buffer->size() <= impl_->next_required_size()) {
                return impl_->consume(std::move(buffer));
            } else {
                int64_t offset = 0;
                while (true) {
                    const auto next_required_size = impl_->next_required_size();
                    if (next_required_size == 0) {
                        break;
                    }
                    if (buffer->size() - offset <= next_required_size) {
                        break;
                    }
                    if (buffer->is_cpu()) {
                        switch (impl_->message_decoder()->state()) {
                            case MessageDecoder::State::INITIAL:
                            case MessageDecoder::State::METADATA_LENGTH:
                                // We don't need to pass a sliced buffer because
                                // MessageDecoder doesn't keep reference of the given
                                // buffer on these states.
                                TURBO_RETURN_NOT_OK(
                                        impl_->consume(buffer->data() + offset, next_required_size));
                                break;
                            default:
                                TURBO_RETURN_NOT_OK(
                                        impl_->consume(SliceBuffer(buffer, offset, next_required_size)));
                                break;
                        }
                    } else {
                        TURBO_RETURN_NOT_OK(
                                impl_->consume(SliceBuffer(buffer, offset, next_required_size)));
                    }
                    offset += next_required_size;
                }
                if (buffer->size() - offset == 0) {
                    return turbo::OkStatus();
                } else if (offset == 0) {
                    return impl_->consume(std::move(buffer));
                } else {
                    return impl_->consume(SliceBuffer(std::move(buffer), offset));
                }
            }
        }

        turbo::Status StreamDecoder::reset() {
            impl_ = std::make_unique<StreamDecoderImpl>(impl_->listener(), impl_->options());
            return turbo::OkStatus();
        }

        std::shared_ptr<Schema> StreamDecoder::schema() const { return impl_->schema(); }

        int64_t StreamDecoder::next_required_size() const { return impl_->next_required_size(); }

        ReadStats StreamDecoder::stats() const { return impl_->stats(); }

        turbo::Result<std::shared_ptr<Schema>> read_schema(io::InputStream *stream,
                                                          DictionaryMemo *dictionary_memo) {
            std::unique_ptr<MessageReader> reader = MessageReader::open(stream);
            TURBO_MOVE_OR_RAISE(std::unique_ptr<Message> message, reader->ReadNextMessage());
            if (!message) {
                return turbo::invalid_argument_error("Tried reading schema message, was null or length 0");
            }
            CHECK_MESSAGE_TYPE(MessageType::SCHEMA, message->type());
            return read_schema(*message, dictionary_memo);
        }

        turbo::Result<std::shared_ptr<Schema>> read_schema(const Message &message,
                                                          DictionaryMemo *dictionary_memo) {
            std::shared_ptr<Schema> result;
            TURBO_RETURN_NOT_OK(internal::get_schema(message.header(), dictionary_memo, &result));
            return result;
        }

        turbo::Result<std::shared_ptr<Tensor>> read_tensor(io::InputStream *file) {
            std::unique_ptr<Message> message;
            TURBO_RETURN_NOT_OK(read_contiguous_payload(file, &message));
            return read_tensor(*message);
        }

        turbo::Result<std::shared_ptr<Tensor>> read_tensor(const Message &message) {
            std::shared_ptr<DataType> type;
            std::vector<int64_t> shape;
            std::vector<int64_t> strides;
            std::vector<std::string> dim_names;
            CHECK_HAS_BODY(message);
            TURBO_RETURN_NOT_OK(internal::GetTensorMetadata(*message.metadata(), &type, &shape, &strides,
                                                            &dim_names));
            return Tensor::create(type, message.body(), shape, strides, dim_names);
        }

        namespace {

            turbo::Result<std::shared_ptr<SparseIndex>> ReadSparseCOOIndex(
                    const flatbuf::SparseTensor *sparse_tensor, const std::vector<int64_t> &shape,
                    int64_t non_zero_length, io::RandomAccessFile *file) {
                auto *sparse_index = sparse_tensor->sparse_index_as_SparseTensorIndexCOO();
                const auto ndim = static_cast<int64_t>(shape.size());

                std::shared_ptr<DataType> indices_type;
                TURBO_RETURN_NOT_OK(internal::GetSparseCOOIndexMetadata(sparse_index, &indices_type));
                const int64_t indices_elsize = indices_type->byte_width();

                auto *indices_buffer = sparse_index->indices_buffer();
                TURBO_MOVE_OR_RAISE(auto indices_data,
                                    file->read_at(indices_buffer->offset(), indices_buffer->length()));
                std::vector<int64_t> indices_shape({non_zero_length, ndim});
                auto *indices_strides = sparse_index->indices_strides();
                std::vector<int64_t> strides(2);
                if (indices_strides && indices_strides->size() > 0) {
                    if (indices_strides->size() != 2) {
                        return turbo::invalid_argument_error("Wrong size for indices_strides in SparseCOOIndex");
                    }
                    strides[0] = indices_strides->Get(0);
                    strides[1] = indices_strides->Get(1);
                } else {
                    // Row-major by default
                    strides[0] = indices_elsize * ndim;
                    strides[1] = indices_elsize;
                }
                return SparseCOOIndex::create(
                        std::make_shared<Tensor>(indices_type, indices_data, indices_shape, strides),
                        sparse_index->is_canonical());
            }

            turbo::Result<std::shared_ptr<SparseIndex>> ReadSparseCSXIndex(
                    const flatbuf::SparseTensor *sparse_tensor, const std::vector<int64_t> &shape,
                    int64_t non_zero_length, io::RandomAccessFile *file) {
                if (shape.size() != 2) {
                    return turbo::invalid_argument_error("Invalid shape length for a sparse matrix");
                }

                auto *sparse_index = sparse_tensor->sparse_index_as_SparseMatrixIndexCSX();

                std::shared_ptr<DataType> indptr_type, indices_type;
                TURBO_RETURN_NOT_OK(
                        internal::GetSparseCSXIndexMetadata(sparse_index, &indptr_type, &indices_type));
                const int indptr_byte_width = indptr_type->byte_width();

                auto *indptr_buffer = sparse_index->indptr_buffer();
                TURBO_MOVE_OR_RAISE(auto indptr_data,
                                    file->read_at(indptr_buffer->offset(), indptr_buffer->length()));

                auto *indices_buffer = sparse_index->indices_buffer();
                TURBO_MOVE_OR_RAISE(auto indices_data,
                                    file->read_at(indices_buffer->offset(), indices_buffer->length()));

                std::vector<int64_t> indices_shape({non_zero_length});
                const auto indices_minimum_bytes = indices_shape[0] * indices_type->byte_width();
                if (indices_minimum_bytes > indices_buffer->length()) {
                    return turbo::invalid_argument_error("shape is inconsistent to the size of indices buffer");
                }

                switch (sparse_index->compressed_axis()) {
                    case flatbuf::SparseMatrixCompressedAxis::Row: {
                        std::vector<int64_t> indptr_shape({shape[0] + 1});
                        const int64_t indptr_minimum_bytes = indptr_shape[0] * indptr_byte_width;
                        if (indptr_minimum_bytes > indptr_buffer->length()) {
                            return turbo::invalid_argument_error("shape is inconsistent to the size of indptr buffer");
                        }
                        return std::make_shared<SparseCSRIndex>(
                                std::make_shared<Tensor>(indptr_type, indptr_data, indptr_shape),
                                std::make_shared<Tensor>(indices_type, indices_data, indices_shape));
                    }
                    case flatbuf::SparseMatrixCompressedAxis::Column: {
                        std::vector<int64_t> indptr_shape({shape[1] + 1});
                        const int64_t indptr_minimum_bytes = indptr_shape[0] * indptr_byte_width;
                        if (indptr_minimum_bytes > indptr_buffer->length()) {
                            return turbo::invalid_argument_error("shape is inconsistent to the size of indptr buffer");
                        }
                        return std::make_shared<SparseCSCIndex>(
                                std::make_shared<Tensor>(indptr_type, indptr_data, indptr_shape),
                                std::make_shared<Tensor>(indices_type, indices_data, indices_shape));
                    }
                    default:
                        return turbo::invalid_argument_error("Invalid value of SparseMatrixCompressedAxis");
                }
            }

            turbo::Result<std::shared_ptr<SparseIndex>> ReadSparseCSFIndex(
                    const flatbuf::SparseTensor *sparse_tensor, const std::vector<int64_t> &shape,
                    io::RandomAccessFile *file) {
                auto *sparse_index = sparse_tensor->sparse_index_as_SparseTensorIndexCSF();
                const auto ndim = static_cast<int64_t>(shape.size());
                auto *indptr_buffers = sparse_index->indptr_buffers();
                auto *indices_buffers = sparse_index->indices_buffers();
                std::vector<std::shared_ptr<Buffer>> indptr_data(ndim - 1);
                std::vector<std::shared_ptr<Buffer>> indices_data(ndim);

                std::shared_ptr<DataType> indptr_type, indices_type;
                std::vector<int64_t> axis_order, indices_size;

                TURBO_RETURN_NOT_OK(internal::GetSparseCSFIndexMetadata(
                        sparse_index, &axis_order, &indices_size, &indptr_type, &indices_type));
                for (int i = 0; i < static_cast<int>(indptr_buffers->size()); ++i) {
                    TURBO_MOVE_OR_RAISE(indptr_data[i], file->read_at(indptr_buffers->Get(i)->offset(),
                                                                     indptr_buffers->Get(i)->length()));
                }
                for (int i = 0; i < static_cast<int>(indices_buffers->size()); ++i) {
                    TURBO_MOVE_OR_RAISE(indices_data[i],
                                        file->read_at(indices_buffers->Get(i)->offset(),
                                                     indices_buffers->Get(i)->length()));
                }

                return SparseCSFIndex::create(indptr_type, indices_type, indices_size, axis_order,
                                              indptr_data, indices_data);
            }

            turbo::Result<std::shared_ptr<SparseTensor>> MakeSparseTensorWithSparseCOOIndex(
                    const std::shared_ptr<DataType> &type, const std::vector<int64_t> &shape,
                    const std::vector<std::string> &dim_names,
                    const std::shared_ptr<SparseCOOIndex> &sparse_index, int64_t non_zero_length,
                    const std::shared_ptr<Buffer> &data) {
                return SparseCOOTensor::create(sparse_index, type, data, shape, dim_names);
            }

            turbo::Result<std::shared_ptr<SparseTensor>> MakeSparseTensorWithSparseCSRIndex(
                    const std::shared_ptr<DataType> &type, const std::vector<int64_t> &shape,
                    const std::vector<std::string> &dim_names,
                    const std::shared_ptr<SparseCSRIndex> &sparse_index, int64_t non_zero_length,
                    const std::shared_ptr<Buffer> &data) {
                return SparseCSRMatrix::create(sparse_index, type, data, shape, dim_names);
            }

            turbo::Result<std::shared_ptr<SparseTensor>> MakeSparseTensorWithSparseCSCIndex(
                    const std::shared_ptr<DataType> &type, const std::vector<int64_t> &shape,
                    const std::vector<std::string> &dim_names,
                    const std::shared_ptr<SparseCSCIndex> &sparse_index, int64_t non_zero_length,
                    const std::shared_ptr<Buffer> &data) {
                return SparseCSCMatrix::create(sparse_index, type, data, shape, dim_names);
            }

            turbo::Result<std::shared_ptr<SparseTensor>> MakeSparseTensorWithSparseCSFIndex(
                    const std::shared_ptr<DataType> &type, const std::vector<int64_t> &shape,
                    const std::vector<std::string> &dim_names,
                    const std::shared_ptr<SparseCSFIndex> &sparse_index,
                    const std::shared_ptr<Buffer> &data) {
                return SparseCSFTensor::create(sparse_index, type, data, shape, dim_names);
            }

            turbo::Status ReadSparseTensorMetadata(BufferSpan metadata,
                                                   std::shared_ptr<DataType> *out_type,
                                                   std::vector<int64_t> *out_shape,
                                                   std::vector<std::string> *out_dim_names,
                                                   int64_t *out_non_zero_length,
                                                   SparseTensorFormat::type *out_format_id,
                                                   const flatbuf::SparseTensor **out_fb_sparse_tensor,
                                                   const flatbuf::Buffer **out_buffer) {
                TURBO_RETURN_NOT_OK(internal::GetSparseTensorMetadata(
                        metadata, out_type, out_shape, out_dim_names, out_non_zero_length, out_format_id));

                const flatbuf::Message *message = nullptr;
                TURBO_RETURN_NOT_OK(internal::VerifyMessage(metadata.data(), metadata.size(), &message));

                auto sparse_tensor = message->header_as_SparseTensor();
                if (sparse_tensor == nullptr) {
                    return turbo::io_error(
                            "Header-type of flatbuffer-encoded Message is not SparseTensor.");
                }
                *out_fb_sparse_tensor = sparse_tensor;

                auto buffer = sparse_tensor->data();
                if (!bit_util::IsMultipleOf8(buffer->offset())) {
                    return turbo::invalid_argument_error(
                            "Buffer of sparse index data did not start on 8-byte aligned offset: ",
                            buffer->offset());
                }
                *out_buffer = buffer;

                return turbo::OkStatus();
            }

        }  // namespace

        namespace internal {

            namespace {

                turbo::Result<size_t> GetSparseTensorBodyBufferCount(SparseTensorFormat::type format_id,
                                                                     const size_t ndim) {
                    switch (format_id) {
                        case SparseTensorFormat::COO:
                            return 2;

                        case SparseTensorFormat::CSR:
                            return 3;

                        case SparseTensorFormat::CSC:
                            return 3;

                        case SparseTensorFormat::CSF:
                            return 2 * ndim;

                        default:
                            return turbo::invalid_argument_error("Unrecognized sparse tensor format");
                    }
                }

                turbo::Status CheckSparseTensorBodyBufferCount(const IpcPayload &payload,
                                                               SparseTensorFormat::type sparse_tensor_format_id,
                                                               const size_t ndim) {
                    size_t expected_body_buffer_count = 0;
                    TURBO_MOVE_OR_RAISE(expected_body_buffer_count,
                                        GetSparseTensorBodyBufferCount(sparse_tensor_format_id, ndim));
                    if (payload.body_buffers.size() != expected_body_buffer_count) {
                        return turbo::invalid_argument_error("Invalid body buffer count for a sparse tensor");
                    }

                    return turbo::OkStatus();
                }

            }  // namespace

            turbo::Result<size_t> read_sparse_tensor_body_buffer_count(BufferSpan metadata) {
                SparseTensorFormat::type format_id{};
                std::vector<int64_t> shape;

                TURBO_RETURN_NOT_OK(internal::GetSparseTensorMetadata(metadata, nullptr, &shape, nullptr,
                                                                      nullptr, &format_id));

                return GetSparseTensorBodyBufferCount(format_id, static_cast<size_t>(shape.size()));
            }

            turbo::Result<std::shared_ptr<SparseTensor>> read_sparse_tensor_payload(const IpcPayload &payload) {
                std::shared_ptr<DataType> type;
                std::vector<int64_t> shape;
                std::vector<std::string> dim_names;
                int64_t non_zero_length;
                SparseTensorFormat::type sparse_tensor_format_id;
                const flatbuf::SparseTensor *sparse_tensor;
                const flatbuf::Buffer *buffer;

                TURBO_RETURN_NOT_OK(ReadSparseTensorMetadata(*payload.metadata, &type, &shape, &dim_names,
                                                             &non_zero_length, &sparse_tensor_format_id,
                                                             &sparse_tensor, &buffer));

                TURBO_RETURN_NOT_OK(CheckSparseTensorBodyBufferCount(payload, sparse_tensor_format_id,
                                                                     static_cast<size_t>(shape.size())));

                switch (sparse_tensor_format_id) {
                    case SparseTensorFormat::COO: {
                        std::shared_ptr<SparseCOOIndex> sparse_index;
                        std::shared_ptr<DataType> indices_type;
                        TURBO_RETURN_NOT_OK(internal::GetSparseCOOIndexMetadata(
                                sparse_tensor->sparse_index_as_SparseTensorIndexCOO(), &indices_type));
                        TURBO_MOVE_OR_RAISE(sparse_index,
                                            SparseCOOIndex::create(indices_type, shape, non_zero_length,
                                                                   payload.body_buffers[0]));
                        return MakeSparseTensorWithSparseCOOIndex(type, shape, dim_names, sparse_index,
                                                                  non_zero_length, payload.body_buffers[1]);
                    }
                    case SparseTensorFormat::CSR: {
                        std::shared_ptr<SparseCSRIndex> sparse_index;
                        std::shared_ptr<DataType> indptr_type;
                        std::shared_ptr<DataType> indices_type;
                        TURBO_RETURN_NOT_OK(internal::GetSparseCSXIndexMetadata(
                                sparse_tensor->sparse_index_as_SparseMatrixIndexCSX(), &indptr_type,
                                &indices_type));
                        KCHECK_EQ(indptr_type, indices_type);
                        TURBO_MOVE_OR_RAISE(
                                sparse_index,
                                SparseCSRIndex::create(indices_type, shape, non_zero_length,
                                                       payload.body_buffers[0], payload.body_buffers[1]));
                        return MakeSparseTensorWithSparseCSRIndex(type, shape, dim_names, sparse_index,
                                                                  non_zero_length, payload.body_buffers[2]);
                    }
                    case SparseTensorFormat::CSC: {
                        std::shared_ptr<SparseCSCIndex> sparse_index;
                        std::shared_ptr<DataType> indptr_type;
                        std::shared_ptr<DataType> indices_type;
                        TURBO_RETURN_NOT_OK(internal::GetSparseCSXIndexMetadata(
                                sparse_tensor->sparse_index_as_SparseMatrixIndexCSX(), &indptr_type,
                                &indices_type));
                        KCHECK_EQ(indptr_type, indices_type);
                        TURBO_MOVE_OR_RAISE(
                                sparse_index,
                                SparseCSCIndex::create(indices_type, shape, non_zero_length,
                                                       payload.body_buffers[0], payload.body_buffers[1]));
                        return MakeSparseTensorWithSparseCSCIndex(type, shape, dim_names, sparse_index,
                                                                  non_zero_length, payload.body_buffers[2]);
                    }
                    case SparseTensorFormat::CSF: {
                        std::shared_ptr<SparseCSFIndex> sparse_index;
                        std::shared_ptr<DataType> indptr_type, indices_type;
                        std::vector<int64_t> axis_order, indices_size;

                        TURBO_RETURN_NOT_OK(internal::GetSparseCSFIndexMetadata(
                                sparse_tensor->sparse_index_as_SparseTensorIndexCSF(), &axis_order,
                                &indices_size, &indptr_type, &indices_type));
                        KCHECK_EQ(indptr_type, indices_type);

                        const int64_t ndim = shape.size();
                        std::vector<std::shared_ptr<Buffer>> indptr_data(ndim - 1);
                        std::vector<std::shared_ptr<Buffer>> indices_data(ndim);

                        for (int64_t i = 0; i < ndim - 1; ++i) {
                            indptr_data[i] = payload.body_buffers[i];
                        }
                        for (int64_t i = 0; i < ndim; ++i) {
                            indices_data[i] = payload.body_buffers[i + ndim - 1];
                        }

                        TURBO_MOVE_OR_RAISE(sparse_index,
                                            SparseCSFIndex::create(indptr_type, indices_type, indices_size,
                                                                   axis_order, indptr_data, indices_data));
                        return MakeSparseTensorWithSparseCSFIndex(type, shape, dim_names, sparse_index,
                                                                  payload.body_buffers[2 * ndim - 1]);
                    }
                    default:
                        return turbo::invalid_argument_error("Unsupported sparse index format");
                }
            }

        }  // namespace internal

        turbo::Result<std::shared_ptr<SparseTensor>> read_sparse_tensor(BufferSpan metadata,
                                                                      io::RandomAccessFile *file) {
            std::shared_ptr<DataType> type;
            std::vector<int64_t> shape;
            std::vector<std::string> dim_names;
            int64_t non_zero_length;
            SparseTensorFormat::type sparse_tensor_format_id;
            const flatbuf::SparseTensor *sparse_tensor;
            const flatbuf::Buffer *buffer;

            TURBO_RETURN_NOT_OK(ReadSparseTensorMetadata(metadata, &type, &shape, &dim_names,
                                                         &non_zero_length, &sparse_tensor_format_id,
                                                         &sparse_tensor, &buffer));

            TURBO_MOVE_OR_RAISE(auto data, file->read_at(buffer->offset(), buffer->length()));

            std::shared_ptr<SparseIndex> sparse_index;
            switch (sparse_tensor_format_id) {
                case SparseTensorFormat::COO: {
                    TURBO_MOVE_OR_RAISE(
                            sparse_index, ReadSparseCOOIndex(sparse_tensor, shape, non_zero_length, file));
                    return MakeSparseTensorWithSparseCOOIndex(
                            type, shape, dim_names, turbo::checked_pointer_cast<SparseCOOIndex>(sparse_index),
                            non_zero_length, data);
                }
                case SparseTensorFormat::CSR: {
                    TURBO_MOVE_OR_RAISE(
                            sparse_index, ReadSparseCSXIndex(sparse_tensor, shape, non_zero_length, file));
                    return MakeSparseTensorWithSparseCSRIndex(
                            type, shape, dim_names, turbo::checked_pointer_cast<SparseCSRIndex>(sparse_index),
                            non_zero_length, data);
                }
                case SparseTensorFormat::CSC: {
                    TURBO_MOVE_OR_RAISE(
                            sparse_index, ReadSparseCSXIndex(sparse_tensor, shape, non_zero_length, file));
                    return MakeSparseTensorWithSparseCSCIndex(
                            type, shape, dim_names, turbo::checked_pointer_cast<SparseCSCIndex>(sparse_index),
                            non_zero_length, data);
                }
                case SparseTensorFormat::CSF: {
                    TURBO_MOVE_OR_RAISE(sparse_index, ReadSparseCSFIndex(sparse_tensor, shape, file));
                    return MakeSparseTensorWithSparseCSFIndex(
                            type, shape, dim_names, turbo::checked_pointer_cast<SparseCSFIndex>(sparse_index),
                            data);
                }
                default:
                    return turbo::invalid_argument_error("Unsupported sparse index format");
            }
        }

        turbo::Result<std::shared_ptr<SparseTensor>> read_sparse_tensor(const Message &message) {
            CHECK_HAS_BODY(message);
            TURBO_MOVE_OR_RAISE(auto reader, Buffer::get_reader(message.body()));
            return read_sparse_tensor(*message.metadata(), reader.get());
        }

        turbo::Result<std::shared_ptr<SparseTensor>> read_sparse_tensor(io::InputStream *file) {
            std::unique_ptr<Message> message;
            TURBO_RETURN_NOT_OK(read_contiguous_payload(file, &message));
            CHECK_MESSAGE_TYPE(MessageType::SPARSE_TENSOR, message->type());
            CHECK_HAS_BODY(*message);
            TURBO_MOVE_OR_RAISE(auto reader, Buffer::get_reader(message->body()));
            return read_sparse_tensor(*message->metadata(), reader.get());
        }

        ///////////////////////////////////////////////////////////////////////////
        // Helpers for fuzzing

        namespace internal {
            namespace {

                turbo::Status ValidateFuzzBatch(const RecordBatch &batch) {
                    auto st = batch.validate_full();
                    if (st.ok()) {
                        // If the batch is valid, printing should succeed
                        batch.to_string();
                    }
                    return st;
                }

            }  // namespace

            turbo::Status FuzzIpcStream(const uint8_t *data, int64_t size) {
                auto buffer = std::make_shared<Buffer>(data, size);
                io::BufferReader buffer_reader(buffer);

                std::shared_ptr<RecordBatchReader> batch_reader;
                TURBO_MOVE_OR_RAISE(batch_reader, RecordBatchStreamReader::open(&buffer_reader));
                turbo::Status st;

                while (true) {
                    std::shared_ptr<nebula::RecordBatch> batch;
                    TURBO_RETURN_NOT_OK(batch_reader->read_next(&batch));
                    if (batch == nullptr) {
                        break;
                    }
                    st &= ValidateFuzzBatch(*batch);
                }

                return st;
            }

            turbo::Status FuzzIpcFile(const uint8_t *data, int64_t size) {
                auto buffer = std::make_shared<Buffer>(data, size);
                io::BufferReader buffer_reader(buffer);

                std::shared_ptr<RecordBatchFileReader> batch_reader;
                TURBO_MOVE_OR_RAISE(batch_reader, RecordBatchFileReader::open(&buffer_reader));
                turbo::Status st;

                const int n_batches = batch_reader->num_record_batches();
                for (int i = 0; i < n_batches; ++i) {
                    TURBO_MOVE_OR_RAISE(auto batch, batch_reader->read_record_batch(i));
                    st &= ValidateFuzzBatch(*batch);
                }

                return st;
            }

            turbo::Status FuzzIpcTensorStream(const uint8_t *data, int64_t size) {
                auto buffer = std::make_shared<Buffer>(data, size);
                io::BufferReader buffer_reader(buffer);

                std::shared_ptr<Tensor> tensor;

                while (true) {
                    TURBO_MOVE_OR_RAISE(tensor, read_tensor(&buffer_reader));
                    if (tensor == nullptr) {
                        break;
                    }
                    TURBO_RETURN_NOT_OK(tensor->validate());
                }

                return turbo::OkStatus();
            }

            turbo::Result<int64_t> IoRecordedRandomAccessFile::get_size() { return file_size_; }

            turbo::Result<int64_t> IoRecordedRandomAccessFile::read_at(int64_t position, int64_t nbytes,
                                                                      void *out) {
                auto num_bytes_read = std::min(file_size_, position + nbytes) - position;

                if (!read_ranges_.empty() &&
                    position == read_ranges_.back().offset + read_ranges_.back().length) {
                    // merge continuous IOs into one if possible
                    read_ranges_.back().length += num_bytes_read;
                } else {
                    // no real IO is performed, it is only saved into a vector for replaying later
                    read_ranges_.emplace_back(io::ReadRange{position, num_bytes_read});
                }
                return num_bytes_read;
            }

            turbo::Result<std::shared_ptr<Buffer>> IoRecordedRandomAccessFile::read_at(int64_t position,
                                                                                      int64_t nbytes) {
                std::shared_ptr<Buffer> out;
                auto result = read_at(position, nbytes, &out);
                return out;
            }

            turbo::Status IoRecordedRandomAccessFile::close() {
                closed_ = true;
                return turbo::OkStatus();
            }

            turbo::Status IoRecordedRandomAccessFile::abort() { return turbo::OkStatus(); }

            turbo::Result<int64_t> IoRecordedRandomAccessFile::tell() const { return position_; }

            bool IoRecordedRandomAccessFile::closed() const { return closed_; }

            turbo::Status IoRecordedRandomAccessFile::Seek(int64_t position) {
                position_ = position;
                return turbo::OkStatus();
            }

            turbo::Result<int64_t> IoRecordedRandomAccessFile::read(int64_t nbytes, void *out) {
                TURBO_MOVE_OR_RAISE(int64_t bytes_read, read_at(position_, nbytes, out));
                position_ += bytes_read;
                return bytes_read;
            }

            turbo::Result<std::shared_ptr<Buffer>> IoRecordedRandomAccessFile::read(int64_t nbytes) {
                TURBO_MOVE_OR_RAISE(std::shared_ptr<Buffer> buffer, read_at(position_, nbytes));
                auto num_bytes_read = std::min(file_size_, position_ + nbytes) - position_;
                position_ += num_bytes_read;
                return buffer;
            }

            const io::IOContext &IoRecordedRandomAccessFile::io_context() const {
                return io_context_;
            }

            const std::vector<io::ReadRange> &IoRecordedRandomAccessFile::GetReadRanges() const {
                return read_ranges_;
            }

        }  // namespace internal
    }  // namespace ipc
}  // namespace nebula
