// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <algorithm>
#include <cstdint>
#include <limits>
#include <memory>
#include <numeric>
#include <ostream>
#include <string>
#include <unordered_set>

#include <flatbuffers/flatbuffers.h>
#include <kmock/kmock-matchers.h>
#include <ktest/ktest.h>

#include <nebula/core/array.h>
#include <nebula/array/builder_primitive.h>
#include <nebula/core/buffer_builder.h>
#include <nebula/io/file.h>
#include <nebula/io/memory.h>
#include <tests/io/test_common.h>
#include <nebula/ipc/message.h>
#include <nebula/ipc/metadata_internal.h>
#include <nebula/ipc/reader.h>
#include <nebula/ipc/reader_internal.h>
#include <tests/ipc/test_common.h>
#include <nebula/ipc/writer.h>
#include <nebula/core/record_batch.h>
#include <turbo/utility/status.h>
#include <nebula/core/table.h>
#include <nebula/testing/extension_type.h>
#include <nebula/testing/future_util.h>
#include <tests/config.h>
#include <nebula/testing/ktest_util.h>
#include <nebula/testing/random.h>
#include <nebula/testing/util.h>
#include <nebula/types/type_fwd.h>
#include <nebula/bits/bit_util.h>
#include <turbo/base/checked_cast.h>
#include <nebula/util/key_value_metadata.h>
#include <turbo/base/ubsan.h>
#include <turbo/files/scoped_temp_dir.h>
#include <nebula/fbs/Message_generated.h>  // IWYU pragma: keep

namespace nebula {


    namespace ipc {

        using internal::FieldPosition;
        using internal::IoRecordedRandomAccessFile;
        using MetadataVector = std::vector<std::shared_ptr<KeyValueMetadata>>;

        namespace test {

            const std::vector<MetadataVersion> kMetadataVersions = {MetadataVersion::V4,
                                                                    MetadataVersion::V5};

            class TestMessage : public ::testing::TestWithParam<MetadataVersion> {
            public:
                void SetUp() {
                    version_ = GetParam();
                    fb_version_ = internal::MetadataVersionToFlatbuffer(version_);
                    options_ = IpcWriteOptions::defaults();
                    options_.metadata_version = version_;
                }

            protected:
                MetadataVersion version_;
                flatbuf::MetadataVersion fb_version_;
                IpcWriteOptions options_;
            };

            TEST(TestMessage, Equals) {
                std::string metadata = "foo";
                std::string body = "bar";

                auto b1 = std::make_shared<Buffer>(metadata);
                auto b2 = std::make_shared<Buffer>(metadata);
                auto b3 = std::make_shared<Buffer>(body);
                auto b4 = std::make_shared<Buffer>(body);

                Message msg1(b1, b3);
                Message msg2(b2, b4);
                Message msg3(b1, nullptr);
                Message msg4(b2, nullptr);

                ASSERT_TRUE(msg1.equals(msg2));
                ASSERT_TRUE(msg3.equals(msg4));

                ASSERT_FALSE(msg1.equals(msg3));
                ASSERT_FALSE(msg3.equals(msg1));

                // same metadata as msg1, different body
                Message msg5(b2, b1);
                ASSERT_FALSE(msg1.equals(msg5));
                ASSERT_FALSE(msg5.equals(msg1));
            }

            TEST_P(TestMessage, serialize_to) {
                const int64_t body_length = 64;

                flatbuffers::FlatBufferBuilder fbb;
                fbb.Finish(flatbuf::CreateMessage(fbb, fb_version_, flatbuf::MessageHeader::RecordBatch,
                                                  0 /* header */, body_length));

                std::shared_ptr<Buffer> metadata;
                ASSERT_OK_AND_ASSIGN(metadata, internal::WriteFlatbufferBuilder(fbb));

                std::string body = "abcdef";

                ASSERT_OK_AND_ASSIGN(std::unique_ptr<Message> message,
                                     Message::open(metadata, std::make_shared<Buffer>(body)));

                auto CheckWithAlignment = [&](int32_t alignment) {
                    options_.alignment = alignment;
                    const int32_t prefix_size = 8;
                    int64_t output_length = 0;
                    ASSERT_OK_AND_ASSIGN(auto stream, io::BufferOutputStream::create(1 << 10));
                    ASSERT_OK(message->serialize_to(stream.get(), options_, &output_length));
                    ASSERT_EQ(bit_util::RoundUp(metadata->size() + prefix_size, alignment) + body_length,
                              output_length);
                    ASSERT_OK_AND_EQ(output_length, stream->tell());
                    ASSERT_OK_AND_ASSIGN(auto buffer, stream->finish());
                    // check whether length is written in little endian
                    auto buffer_ptr = buffer.get()->data();
                    ASSERT_EQ(output_length - body_length - prefix_size,
                              bit_util::FromLittleEndian(*(uint32_t *) (buffer_ptr + 4)));
                };

                CheckWithAlignment(8);
                CheckWithAlignment(64);
            }

            TEST_P(TestMessage, SerializeCustomMetadata) {
                std::vector<std::shared_ptr<KeyValueMetadata>> cases = {
                        nullptr, key_value_metadata({}, {}),
                        key_value_metadata({"foo", "bar"}, {"fizz", "buzz"})};
                for (auto metadata: cases) {
                    std::shared_ptr<Buffer> serialized;
                    ASSERT_OK(internal::WriteRecordBatchMessage(
                            /*length=*/0, /*body_length=*/0, metadata,
                            /*nodes=*/{},
                            /*buffers=*/{}, /*variadic_counts=*/{}, options_, &serialized));
                    ASSERT_OK_AND_ASSIGN(std::unique_ptr<Message> message,
                                         Message::open(serialized, /*body=*/nullptr));

                    if (metadata) {
                        ASSERT_TRUE(message->custom_metadata()->equals(*metadata));
                    } else {
                        ASSERT_EQ(nullptr, message->custom_metadata());
                    }
                }
            }

            void BuffersOverlapEquals(const Buffer &left, const Buffer &right) {
                ASSERT_GT(left.size(), 0);
                ASSERT_GT(right.size(), 0);
                ASSERT_TRUE(left.equals(right, std::min(left.size(), right.size())));
            }

            TEST_P(TestMessage, LegacyIpcBackwardsCompatibility) {
                std::shared_ptr<RecordBatch> batch;
                ASSERT_OK(MakeIntBatchSized(36, &batch));

                auto RoundtripWithOptions = [&](std::shared_ptr<Buffer> *out_serialized,
                                                std::unique_ptr<Message> *out) {
                    IpcPayload payload;
                    ASSERT_OK(get_record_batch_payload(*batch, options_, &payload));

                    ASSERT_OK_AND_ASSIGN(auto stream, io::BufferOutputStream::create(1 << 20));

                    int32_t metadata_length = -1;
                    ASSERT_OK(write_ipc_payload(payload, options_, stream.get(), &metadata_length));

                    ASSERT_OK_AND_ASSIGN(*out_serialized, stream->finish());
                    io::BufferReader io_reader(*out_serialized);
                    ASSERT_OK(ReadMessage(&io_reader).try_value(out));
                };

                std::shared_ptr<Buffer> serialized, legacy_serialized;
                std::unique_ptr<Message> message, legacy_message;

                RoundtripWithOptions(&serialized, &message);

                // First 4 bytes 0xFFFFFFFF Continuation marker
                ASSERT_EQ(-1, turbo::safe_load_as<int32_t>(serialized->data()));

                options_.write_legacy_ipc_format = true;
                RoundtripWithOptions(&legacy_serialized, &legacy_message);

                // Check that the continuation marker is not written
                ASSERT_NE(-1, turbo::safe_load_as<int32_t>(legacy_serialized->data()));

                // Have to use the smaller size to exclude padding
                BuffersOverlapEquals(*legacy_message->metadata(), *message->metadata());
                ASSERT_TRUE(legacy_message->body()->equals(*message->body()));
            }

            TEST(TestMessage, Verify) {
                std::string metadata = "invalid";
                std::string body = "abcdef";

                Message message(std::make_shared<Buffer>(metadata), std::make_shared<Buffer>(body));
                ASSERT_FALSE(message.Verify());
            }

            INSTANTIATE_TEST_SUITE_P(TestMessage, TestMessage,
                                     ::testing::ValuesIn(kMetadataVersions));

            class TestSchemaMetadata : public ::testing::Test {
            public:
                void SetUp() {}

                void CheckSchemaRoundtrip(const Schema &schema) {
                    ASSERT_OK_AND_ASSIGN(std::shared_ptr<Buffer> buffer, serialize_schema(schema));

                    io::BufferReader reader(buffer);
                    DictionaryMemo in_memo;
                    ASSERT_OK_AND_ASSIGN(auto actual_schema, read_schema(&reader, &in_memo));
                    AssertSchemaEqual(schema, *actual_schema, /* check_metadata= */ true);
                }
            };

            TEST_F(TestSchemaMetadata, PrimitiveFields) {
                CheckSchemaRoundtrip(Schema({
                                                    field("f0", int8()),
                                                    field("f1", int16(), false),
                                                    field("f2", int32()),
                                                    field("f3", int64()),
                                                    field("f4", uint8()),
                                                    field("f5", uint16()),
                                                    field("f6", uint32()),
                                                    field("f7", uint64()),
                                                    field("f8", float32()),
                                                    field("f9", float64(), false),
                                                    field("f10", boolean()),
                                            }));
            }

            TEST_F(TestSchemaMetadata, BinaryFields) {
                CheckSchemaRoundtrip(Schema({
                                                    field("f0", utf8()),
                                                    field("f1", binary()),
                                                    field("f2", large_utf8()),
                                                    field("f3", large_binary()),
                                                    field("f4", utf8_view()),
                                                    field("f5", binary_view()),
                                                    field("f6", fixed_size_binary(3)),
                                                    field("f7", fixed_size_binary(33)),
                                            }));
            }

            TEST_F(TestSchemaMetadata, PrimitiveFieldsWithKeyValueMetadata) {
                auto f1 = field("f1", std::make_shared<Int64Type>(), false,
                                key_value_metadata({"k1"}, {"v1"}));
                auto f2 = field("f2", std::make_shared<StringType>(), true,
                                key_value_metadata({"k2"}, {"v2"}));
                Schema schema({f1, f2});
                CheckSchemaRoundtrip(schema);
            }

            TEST_F(TestSchemaMetadata, NestedFields) {
                CheckSchemaRoundtrip(Schema({
                                                    field("f0", list(int32())),
                                                    field("f1", STRUCT({
                                                                                field("k1", int32()),
                                                                                field("k2", int32()),
                                                                                field("k3", int32()),
                                                                        })),
                                            }));
            }

// Verify that nullable=false is well-preserved for child fields of map type.
            TEST_F(TestSchemaMetadata, MapField) {
                auto key = field("key", int32(), false);
                auto item = field("value", int32(), false);
                auto f0 = field("f0", std::make_shared<MapType>(key, item));
                Schema schema({f0});
                CheckSchemaRoundtrip(schema);
            }

// Verify that key value metadata is well-preserved for child fields of nested type.
            TEST_F(TestSchemaMetadata, NestedFieldsWithKeyValueMetadata) {
                auto metadata = key_value_metadata({"foo"}, {"bar"});
                auto inner_field = field("inner", int32(), false, metadata);
                auto f0 = field("f0", list(inner_field), false, metadata);
                auto f1 = field("f1", STRUCT({inner_field}), false, metadata);
                auto f2 = field("f2",
                                std::make_shared<MapType>(field("key", int32(), false, metadata),
                                                          field("value", int32(), false, metadata)),
                                false, metadata);
                Schema schema({f0, f1, f2});
                CheckSchemaRoundtrip(schema);
            }

            TEST_F(TestSchemaMetadata, DictionaryFields) {
                {
                    auto dict_type = dictionary(int8(), int32(), /*ordered=*/true);
                    CheckSchemaRoundtrip(Schema({
                                                        field("f0", dict_type),
                                                        field("f1", list(dict_type)),
                                                }));
                }
                {
                    auto dict_type = dictionary(int8(), list(int32()));
                    CheckSchemaRoundtrip(Schema({field("f0", dict_type)}));
                }
            }

            TEST_F(TestSchemaMetadata, NestedDictionaryFields) {
                {
                    auto inner_dict_type = dictionary(int8(), int32(), /*ordered=*/true);
                    auto dict_type = dictionary(int16(), list(inner_dict_type));
                    CheckSchemaRoundtrip(Schema({field("f0", dict_type)}));
                }
                {
                    auto dict_type1 = dictionary(int8(), utf8(), /*ordered=*/true);
                    auto dict_type2 = dictionary(int32(), fixed_size_binary(24));
                    auto dict_type3 = dictionary(int32(), binary());
                    auto dict_type4 = dictionary(int8(), decimal(19, 7));

                    auto struct_type1 = STRUCT({field("s1", dict_type1), field("s2", dict_type2)});
                    auto struct_type2 = STRUCT({field("s3", dict_type3), field("s4", dict_type4)});

                    Schema schema({field("f1", dictionary(int32(), struct_type1)),
                                   field("f2", dictionary(int32(), struct_type2))});
                    CheckSchemaRoundtrip(schema);
                }
            }

            TEST_F(TestSchemaMetadata, KeyValueMetadata) {
                auto field_metadata = key_value_metadata({{"key", "value"}});
                auto schema_metadata = key_value_metadata({{"foo",  "bar"},
                                                           {"bizz", "buzz"}});

                auto f0 = field("f0", std::make_shared<Int8Type>());
                auto f1 = field("f1", std::make_shared<Int16Type>(), false, field_metadata);

                Schema schema({f0, f1}, schema_metadata);
                CheckSchemaRoundtrip(schema);
            }

            TEST_F(TestSchemaMetadata, MetadataVersionForwardCompatibility) {
                // ARROW-9399
                std::string root = nebula::test_data_dir;
                //ASSERT_OK(GetTestResourceRoot(&root));

                // schema_v6.nebula with currently nonexistent MetadataVersion::V6
                std::stringstream schema_v6_path;
                schema_v6_path << root << "/forward-compatibility/schema_v6.ipc";

                ASSERT_OK_AND_ASSIGN(auto schema_v6_file, io::ReadableFile::open(schema_v6_path.str()));

                DictionaryMemo placeholder_memo;
                ASSERT_RAISES(turbo::StatusCode::kInvalidArgument, read_schema(schema_v6_file.get(), &placeholder_memo));
            }

            const std::vector<test::MakeRecordBatch *> kBatchCases = {
                    &MakeIntRecordBatch,
                    &MakeListRecordBatch,
                    &MakeListViewRecordBatch,
                    &MakeFixedSizeListRecordBatch,
                    &MakeNonNullRecordBatch,
                    &MakeZeroLengthRecordBatch,
                    &MakeDeeplyNestedList,
                    &MakeDeeplyNestedListView,
                    &MakeStringTypesRecordBatchWithNulls,
                    &MakeStruct,
                    &MakeUnion,
                    &MakeDictionary,
                    &MakeNestedDictionary,
                    &MakeMap,
                    &MakeMapOfDictionary,
                    &MakeDates,
                    &MakeTimestamps,
                    &MakeTimes,
                    &MakeFWBinary,
                    &MakeNull,
                    &MakeDecimal,
                    &MakeBooleanBatch,
                    &MakeFloatBatch,
                    &MakeIntervals,
                    &MakeUuid,
                    &MakeComplex128,
                    &MakeDictExtension};

            static int g_file_number = 0;

            class ExtensionTypesMixin {
            public:
                // Register the extension types required to ensure roundtripping
                ExtensionTypesMixin() : ext_guard_({uuid(), dict_extension_type(), complex128()}) {}

            protected:
                ExtensionTypeGuard ext_guard_;
            };

            class IpcTestFixture : public io::MemoryMapFixture, public ExtensionTypesMixin {
            public:
                void SetUp() {
                    options_ = IpcWriteOptions::defaults();
                    ASSERT_OK_AND_ASSIGN(temp_dir_, turbo::ScopedTempDir::create("ipc-test-"));
                }

                std::string TempFile(std::string_view file) {
                    return temp_dir_->path()/(std::string(file));
                }

                void DoSchemaRoundTrip(const Schema &schema, std::shared_ptr<Schema> *result) {
                    ASSERT_OK_AND_ASSIGN(std::shared_ptr<Buffer> serialized_schema,
                                         serialize_schema(schema, options_.memory_pool));

                    DictionaryMemo in_memo;
                    io::BufferReader buf_reader(serialized_schema);
                    ASSERT_OK_AND_ASSIGN(*result, read_schema(&buf_reader, &in_memo));
                }

                turbo::Result<std::shared_ptr<RecordBatch>> DoStandardRoundTrip(
                        const RecordBatch &batch, const IpcWriteOptions &options,
                        DictionaryMemo *dictionary_memo,
                        const IpcReadOptions &read_options = IpcReadOptions::defaults()) {
                    TURBO_MOVE_OR_RAISE(std::shared_ptr<Buffer> serialized_batch,
                                           serialize_record_batch(batch, options));

                    io::BufferReader buf_reader(serialized_batch);
                    return read_record_batch(batch.schema(), dictionary_memo, read_options, &buf_reader);
                }

                turbo::Result<std::shared_ptr<RecordBatch>> DoLargeRoundTrip(const RecordBatch &batch,
                                                                      bool zero_data) {
                    if (zero_data) {
                        TURBO_RETURN_NOT_OK(ZeroMemoryMap(mmap_.get()));
                    }
                    TURBO_RETURN_NOT_OK(mmap_->Seek(0));

                    auto options = options_;
                    options.allow_64bit = true;

                    TURBO_MOVE_OR_RAISE(auto file_writer,
                                           make_file_writer(mmap_, batch.schema(), options));
                    TURBO_RETURN_NOT_OK(file_writer->write_record_batch(batch));
                    TURBO_RETURN_NOT_OK(file_writer->close());

                    TURBO_MOVE_OR_RAISE(int64_t offset, mmap_->tell());

                    std::shared_ptr<RecordBatchFileReader> file_reader;
                    TURBO_MOVE_OR_RAISE(file_reader, RecordBatchFileReader::open(mmap_.get(), offset));

                    return file_reader->read_record_batch(0);
                }

                void CheckReadResult(const RecordBatch &result, const RecordBatch &expected) {
                    ASSERT_OK(result.validate_full());
                    EXPECT_EQ(expected.num_rows(), result.num_rows());

                    ASSERT_TRUE(expected.schema()->equals(*result.schema()));
                    ASSERT_EQ(expected.num_columns(), result.num_columns())
                                                << expected.schema()->to_string() << " result: "
                                                << result.schema()->to_string();

                    CompareBatchColumnsDetailed(result, expected);
                }

                void CheckRoundtrip(const RecordBatch &batch,
                                    IpcWriteOptions options = IpcWriteOptions::defaults(),
                                    IpcReadOptions read_options = IpcReadOptions::defaults(),
                                    int64_t buffer_size = 1 << 20) {
                    std::stringstream ss;
                    ss << "test-write-row-batch-" << g_file_number++;
                    ASSERT_OK_AND_ASSIGN(
                            mmap_, io::MemoryMapFixture::InitMemoryMap(buffer_size, TempFile(ss.str())));

                    std::shared_ptr<Schema> schema_result;
                    DoSchemaRoundTrip(*batch.schema(), &schema_result);
                    ASSERT_TRUE(batch.schema()->equals(*schema_result));

                    DictionaryMemo dictionary_memo;
                    ASSERT_OK(::nebula::ipc::internal::CollectDictionaries(batch, &dictionary_memo));

                    ASSERT_OK_AND_ASSIGN(
                            auto result, DoStandardRoundTrip(batch, options, &dictionary_memo, read_options));
                    CheckReadResult(*result, batch);

                    ASSERT_OK_AND_ASSIGN(result, DoLargeRoundTrip(batch, /*zero_data=*/true));
                    CheckReadResult(*result, batch);
                }

                void CheckRoundtrip(const std::shared_ptr<Array> &array,
                                    IpcWriteOptions options = IpcWriteOptions::defaults(),
                                    int64_t buffer_size = 1 << 20) {
                    auto f0 = nebula::field("f0", array->type());
                    std::vector<std::shared_ptr<Field>> fields = {f0};
                    auto schema = std::make_shared<Schema>(fields);

                    auto batch = RecordBatch::create(schema, array->length(), {array});
                    CheckRoundtrip(*batch, options, IpcReadOptions::defaults(), buffer_size);
                }

            protected:
                std::shared_ptr<io::MemoryMappedFile> mmap_;
                IpcWriteOptions options_;
                std::unique_ptr<turbo::ScopedTempDir> temp_dir_;
            };

            TEST(MetadataVersion, ForwardsCompatCheck) {
                // Verify UBSAN is ok with casting out of range metadata version.
                EXPECT_LT(flatbuf::MetadataVersion::MAX, static_cast<flatbuf::MetadataVersion>(72));
            }

            class TestWriteRecordBatch : public ::testing::Test, public IpcTestFixture {
            public:
                void SetUp() override { IpcTestFixture::SetUp(); }

                void TearDown() override { IpcTestFixture::TearDown(); }
            };

            class TestIpcRoundTrip : public ::testing::TestWithParam<MakeRecordBatch *>,
                                     public IpcTestFixture {
            public:
                void SetUp() override { IpcTestFixture::SetUp(); }

                void TearDown() override { IpcTestFixture::TearDown(); }

                void TestMetadataVersion(MetadataVersion expected_version) {
                    std::shared_ptr<RecordBatch> batch;
                    ASSERT_OK(MakeIntRecordBatch(&batch));

                    mmap_.reset();  // Ditch previous mmap view, to avoid errors on Windows
                    ASSERT_OK_AND_ASSIGN(mmap_,
                                         io::MemoryMapFixture::InitMemoryMap(1 << 16, "test-metadata"));

                    int32_t metadata_length;
                    int64_t body_length;
                    const int64_t buffer_offset = 0;
                    ASSERT_OK(write_record_batch(*batch, buffer_offset, mmap_.get(), &metadata_length,
                                               &body_length, options_));

                    ASSERT_OK_AND_ASSIGN(std::unique_ptr<Message> message,
                                         ReadMessage(0, metadata_length, mmap_.get()));
                    ASSERT_EQ(expected_version, message->metadata_version());
                }
            };

            TEST_P(TestIpcRoundTrip, RoundTrip) {
                std::shared_ptr<RecordBatch> batch;
                ASSERT_OK((*GetParam())(&batch));  // NOLINT clang-tidy gtest issue

                for (const auto version: kMetadataVersions) {
                    options_.metadata_version = version;
                    CheckRoundtrip(*batch);
                }
            }

            TEST_F(TestIpcRoundTrip, DefaultMetadataVersion) {
                TestMetadataVersion(MetadataVersion::V5);
            }

            TEST_F(TestIpcRoundTrip, SpecificMetadataVersion) {
                options_.metadata_version = MetadataVersion::V4;
                TestMetadataVersion(MetadataVersion::V4);
                options_.metadata_version = MetadataVersion::V5;
                TestMetadataVersion(MetadataVersion::V5);
            }

            TEST(TestReadMessage, CorruptedSmallInput) {
                std::string data = "abc";
                auto reader = io::BufferReader::from_string(data);
                ASSERT_RAISES(turbo::StatusCode::kInvalidArgument, ReadMessage(reader.get()));

                // But no error on unsignaled EOS
                auto reader2 = io::BufferReader::from_string("");
                ASSERT_OK_AND_ASSIGN(auto message, ReadMessage(reader2.get()));
                ASSERT_EQ(nullptr, message);
            }

            TEST(TestMetadata, GetMetadataVersion) {
                ASSERT_EQ(MetadataVersion::V1,
                          ipc::internal::GetMetadataVersion(flatbuf::MetadataVersion::V1));
                ASSERT_EQ(MetadataVersion::V2,
                          ipc::internal::GetMetadataVersion(flatbuf::MetadataVersion::V2));
                ASSERT_EQ(MetadataVersion::V3,
                          ipc::internal::GetMetadataVersion(flatbuf::MetadataVersion::V3));
                ASSERT_EQ(MetadataVersion::V4,
                          ipc::internal::GetMetadataVersion(flatbuf::MetadataVersion::V4));
                ASSERT_EQ(MetadataVersion::V5,
                          ipc::internal::GetMetadataVersion(flatbuf::MetadataVersion::V5));
                ASSERT_EQ(MetadataVersion::V1,
                          ipc::internal::GetMetadataVersion(flatbuf::MetadataVersion::MIN));
                ASSERT_EQ(MetadataVersion::V5,
                          ipc::internal::GetMetadataVersion(flatbuf::MetadataVersion::MAX));
            }

            TEST_P(TestIpcRoundTrip, SliceRoundTrip) {
                std::shared_ptr<RecordBatch> batch;
                ASSERT_OK((*GetParam())(&batch));  // NOLINT clang-tidy gtest issue

                // Skip the zero-length case
                if (batch->num_rows() < 2) {
                    return;
                }

                auto sliced_batch = batch->slice(2, 10);
                CheckRoundtrip(*sliced_batch);
            }

            TEST_P(TestIpcRoundTrip, ZeroLengthArrays) {
                std::shared_ptr<RecordBatch> batch;
                ASSERT_OK((*GetParam())(&batch));  // NOLINT clang-tidy gtest issue

                std::shared_ptr<RecordBatch> zero_length_batch;
                if (batch->num_rows() > 2) {
                    zero_length_batch = batch->slice(2, 0);
                } else {
                    zero_length_batch = batch->slice(0, 0);
                }

                CheckRoundtrip(*zero_length_batch);

                // ARROW-544: check binary array
                ASSERT_OK_AND_ASSIGN(auto value_offsets,
                                     allocate_buffer(sizeof(int32_t), options_.memory_pool));
                *reinterpret_cast<int32_t *>(value_offsets->mutable_data()) = 0;

                std::shared_ptr<Array> bin_array = std::make_shared<BinaryArray>(
                        0, std::move(value_offsets), std::make_shared<Buffer>(nullptr, 0),
                        std::make_shared<Buffer>(nullptr, 0));

                // null value_offsets
                std::shared_ptr<Array> bin_array2 = std::make_shared<BinaryArray>(0, nullptr, nullptr);

                CheckRoundtrip(bin_array);
                CheckRoundtrip(bin_array2);
            }

            TEST_F(TestIpcRoundTrip, SparseUnionOfStructsWithReusedBuffers) {
                auto storage_type = STRUCT({
                                                    field("i", int32()),
                                                    field("f", float32()),
                                                    field("s", utf8()),
                                            });
                auto storage = turbo::checked_pointer_cast<StructArray>(assert_array_from_json(storage_type,
                                                                               R"([
    {"i": 0, "f": 0.0, "s": "a"},
    {"i": 1, "f": 0.5, "s": "b"},
    {"i": 2, "f": 1.5, "s": "c"},
    {"i": 3, "f": 3.0, "s": "d"}
  ])"));

                ASSERT_OK_AND_ASSIGN(
                        auto m01, StructArray::create({storage->field(0), storage->field(1)},
                                                    {storage_type->field(0), storage_type->field(1)}));

                ASSERT_OK_AND_ASSIGN(
                        auto m12, StructArray::create({storage->field(1), storage->field(2)},
                                                    {storage_type->field(1), storage_type->field(2)}));

                ASSERT_OK_AND_ASSIGN(
                        auto m20, StructArray::create({storage->field(2), storage->field(0)},
                                                    {storage_type->field(2), storage_type->field(0)}));

                auto ids = assert_array_from_json(int8(), "[1, 12, 20, 1]");

                ASSERT_OK_AND_ASSIGN(
                        auto sparse,
                        SparseUnionArray::create(*ids, {m01, m12, m20}, {"m01", "m12", "m20"}, {01, 12, 20}));

                auto expected = assert_array_from_json(sparse_union(
                                                      {
                                                              field("m01", m01->type()),
                                                              field("m12", m12->type()),
                                                              field("m20", m20->type()),
                                                      },
                                                      {01, 12, 20}),
                                              R"([
    [1,  {"i": 0,   "f": 0.0}],
    [12, {"f": 0.5, "s": "b"}],
    [20, {"s": "c", "i": 2  }],
    [1,  {"i": 3,   "f": 3.0}]
  ])");

                AssertArraysEqual(*expected, *sparse, /*verbose=*/true);

                DictionaryMemo ignored;
                ASSERT_OK_AND_ASSIGN(
                        auto roundtripped_batch,
                        DoStandardRoundTrip(*RecordBatch::create(schema({field("", sparse->type())}),
                                                               sparse->length(), {sparse}),
                                            IpcWriteOptions::defaults(), &ignored));

                auto roundtripped =
                        turbo::checked_pointer_cast<SparseUnionArray>(roundtripped_batch->column(0));
                AssertArraysEqual(*expected, *roundtripped, /*verbose=*/true);

                auto roundtripped_m01 = turbo::checked_pointer_cast<StructArray>(roundtripped->field(0));
                auto roundtripped_m12 = turbo::checked_pointer_cast<StructArray>(roundtripped->field(1));
                auto roundtripped_m20 = turbo::checked_pointer_cast<StructArray>(roundtripped->field(2));

                // The IPC writer does not take advantage of reusable buffers

                ASSERT_NE(roundtripped_m01->field(0)->data()->buffers,
                          roundtripped_m20->field(1)->data()->buffers);

                ASSERT_NE(roundtripped_m01->field(1)->data()->buffers,
                          roundtripped_m12->field(0)->data()->buffers);

                ASSERT_NE(roundtripped_m12->field(1)->data()->buffers,
                          roundtripped_m20->field(0)->data()->buffers);
            }

            TEST_F(TestWriteRecordBatch, WriteWithCompression) {
                random::RandomArrayGenerator rg(/*seed=*/0);

                // Generate both regular and dictionary encoded because the dictionary batch
                // gets compressed also

                int64_t length = 500;

                int dict_size = 50;
                std::shared_ptr<Array> dict = rg.String(dict_size, /*min_length=*/5, /*max_length=*/5,
                        /*null_probability=*/0);
                std::shared_ptr<Array> indices = rg.Int32(length, /*min=*/0, /*max=*/dict_size - 1,
                        /*null_probability=*/0.1);

                auto dict_type = dictionary(int32(), utf8());
                auto dict_field = field("f1", dict_type);
                ASSERT_OK_AND_ASSIGN(auto dict_array,
                                     DictionaryArray::from_arrays(dict_type, indices, dict));

                auto schema = ::nebula::schema({field("f0", utf8()), dict_field});
                auto batch =
                        RecordBatch::create(schema, length, {rg.String(500, 0, 10, 0.1), dict_array});

                std::vector<CompressionType> codecs = {CompressionType::LZ4_FRAME, CompressionType::ZSTD};
                for (auto codec: codecs) {
                    if (!Codec::IsAvailable(codec)) {
                        continue;
                    }
                    IpcWriteOptions write_options = IpcWriteOptions::defaults();
                    ASSERT_OK_AND_ASSIGN(write_options.codec, Codec::create(codec));
                    CheckRoundtrip(*batch, write_options);

                    // Check non-parallel read and write
                    IpcReadOptions read_options = IpcReadOptions::defaults();
                    write_options.use_threads = false;
                    read_options.use_threads = false;
                    CheckRoundtrip(*batch, write_options, read_options);
                }

                std::vector<CompressionType> disallowed_codecs = {
                        CompressionType::BROTLI, CompressionType::BZ2, CompressionType::LZ4, CompressionType::GZIP,
                        CompressionType::SNAPPY};
                for (auto codec: disallowed_codecs) {
                    if (!Codec::IsAvailable(codec)) {
                        continue;
                    }
                    IpcWriteOptions write_options = IpcWriteOptions::defaults();
                    ASSERT_OK_AND_ASSIGN(write_options.codec, Codec::create(codec));
                    ASSERT_RAISES(turbo::StatusCode::kInvalidArgument, serialize_record_batch(*batch, write_options));
                }
            }

            TEST_F(TestWriteRecordBatch, WriteWithCompressionAndMinSavings) {
                // A small batch that's known to be compressible
                auto batch = assert_record_batch_from_json(schema({field("n", int64())}), R"([
    {"n":0},{"n":1},{"n":2},{"n":3},{"n":4},
    {"n":5},{"n":6},{"n":7},{"n":8},{"n":9}])");

                auto prefixed_size = [](const Buffer &buffer) -> int64_t {
                    if (buffer.size() < int64_t(sizeof(int64_t))) return 0;
                    return bit_util::FromLittleEndian(turbo::safe_load_as<int64_t>(buffer.data()));
                };
                auto content_size = [](const Buffer &buffer) -> int64_t {
                    return buffer.size() - sizeof(int64_t);
                };

                for (auto codec: {CompressionType::LZ4_FRAME, CompressionType::ZSTD}) {
                    if (!Codec::IsAvailable(codec)) {
                        continue;
                    }

                    auto write_options = IpcWriteOptions::defaults();
                    ASSERT_OK_AND_ASSIGN(write_options.codec, Codec::create(codec));
                    auto read_options = IpcReadOptions::defaults();

                    IpcPayload payload;
                    ASSERT_OK(get_record_batch_payload(*batch, write_options, &payload));
                    ASSERT_EQ(payload.body_buffers.size(), 2);
                    // Compute the savings when body buffers are compressed unconditionally. We also
                    // validate that our test batch is indeed compressible.
                    const int64_t uncompressed_size = prefixed_size(*payload.body_buffers[1]);
                    const int64_t compressed_size = content_size(*payload.body_buffers[1]);
                    ASSERT_LT(compressed_size, uncompressed_size);
                    ASSERT_GT(compressed_size, 0);
                    const double expected_savings =
                            1.0 - static_cast<double>(compressed_size) / uncompressed_size;

                    // Using the known savings % as the minimum should yield the same outcome.
                    write_options.min_space_savings = expected_savings;
                    payload = IpcPayload();
                    ASSERT_OK(get_record_batch_payload(*batch, write_options, &payload));
                    ASSERT_EQ(payload.body_buffers.size(), 2);
                    ASSERT_EQ(prefixed_size(*payload.body_buffers[1]), uncompressed_size);
                    ASSERT_EQ(content_size(*payload.body_buffers[1]), compressed_size);
                    CheckRoundtrip(*batch, write_options, read_options);

                    // Slightly bump the threshold. The body buffer should now be prefixed with -1 and its
                    // content left uncompressed.
                    write_options.min_space_savings = std::nextafter(expected_savings, 1.0);
                    payload = IpcPayload();
                    ASSERT_OK(get_record_batch_payload(*batch, write_options, &payload));
                    ASSERT_EQ(payload.body_buffers.size(), 2);
                    ASSERT_EQ(prefixed_size(*payload.body_buffers[1]), -1);
                    ASSERT_EQ(content_size(*payload.body_buffers[1]), uncompressed_size);
                    CheckRoundtrip(*batch, write_options, read_options);

                    for (double out_of_range: {std::nextafter(1.0, 2.0), std::nextafter(0.0, -1.0)}) {
                        write_options.min_space_savings = out_of_range;
                        EXPECT_RAISES_WITH_MESSAGE_THAT(
                                turbo::StatusCode::kInvalidArgument, ::testing::StartsWith("min_space_savings not in range [0,1]"),
                                serialize_record_batch(*batch, write_options));
                    }
                }
            }

            TEST_F(TestWriteRecordBatch, SliceTruncatesBinaryOffsets) {
                // ARROW-6046
                std::shared_ptr<Array> array;
                ASSERT_OK(MakeRandomStringArray(500, false, default_memory_pool(), &array));

                auto f0 = field("f0", array->type());
                auto schema = ::nebula::schema({f0});
                auto batch = RecordBatch::create(schema, array->length(), {array});
                auto sliced_batch = batch->slice(0, 5);

                ASSERT_OK_AND_ASSIGN(
                        mmap_, io::MemoryMapFixture::InitMemoryMap(/*buffer_size=*/1 << 20,
                                                                                   TempFile("test-truncate-offsets")));
                DictionaryMemo dictionary_memo;
                ASSERT_OK_AND_ASSIGN(
                        auto result,
                        DoStandardRoundTrip(*sliced_batch, IpcWriteOptions::defaults(), &dictionary_memo));
                ASSERT_EQ(6 * sizeof(int32_t), result->column(0)->data()->buffers[1]->size());
            }

            TEST_F(TestWriteRecordBatch, SliceTruncatesBuffers) {
                auto CheckArray = [this](const std::shared_ptr<Array> &array) {
                    auto f0 = field("f0", array->type());
                    auto schema = ::nebula::schema({f0});
                    auto batch = RecordBatch::create(schema, array->length(), {array});
                    auto sliced_batch = batch->slice(0, 5);

                    int64_t full_size;
                    int64_t sliced_size;

                    ASSERT_OK(get_record_batch_size(*batch, &full_size));
                    ASSERT_OK(get_record_batch_size(*sliced_batch, &sliced_size));
                    ASSERT_TRUE(sliced_size < full_size) << sliced_size << " " << full_size;

                    // make sure we can write and read it
                    this->CheckRoundtrip(*sliced_batch);
                };

                std::shared_ptr<Array> a0, a1;
                auto pool = default_memory_pool();

                // Integer
                ASSERT_OK(MakeRandomInt32Array(500, false, pool, &a0));
                CheckArray(a0);

                // String / Binary
                {
                    auto s = MakeRandomStringArray(500, false, pool, &a0);
                    ASSERT_TRUE(s.ok());
                }
                CheckArray(a0);

                // Boolean
                ASSERT_OK(MakeRandomBooleanArray(10000, false, &a0));
                CheckArray(a0);

                // List
                ASSERT_OK(MakeRandomInt32Array(500, false, pool, &a0));
                ASSERT_OK(MakeRandomListArray(a0, 200, false, pool, &a1));
                CheckArray(a1);

                // Struct
                auto struct_type = STRUCT({field("f0", a0->type())});
                std::vector<std::shared_ptr<Array>> struct_children = {a0};
                a1 = std::make_shared<StructArray>(struct_type, a0->length(), struct_children);
                CheckArray(a1);

                // Sparse Union
                auto union_type = sparse_union({field("f0", a0->type())}, {0});
                std::vector<int32_t> type_ids(a0->length());
                std::shared_ptr<Buffer> ids_buffer;
                ASSERT_OK(CopyBufferFromVector(type_ids, default_memory_pool(), &ids_buffer));
                a1 = std::make_shared<SparseUnionArray>(union_type, a0->length(), struct_children,
                                                        ids_buffer);
                CheckArray(a1);

                // Dense union
                auto dense_union_type = dense_union({field("f0", a0->type())}, {0});
                std::vector<int32_t> type_offsets;
                for (int32_t i = 0; i < a0->length(); ++i) {
                    type_offsets.push_back(i);
                }
                std::shared_ptr<Buffer> offsets_buffer;
                ASSERT_OK(CopyBufferFromVector(type_offsets, default_memory_pool(), &offsets_buffer));
                a1 = std::make_shared<DenseUnionArray>(dense_union_type, a0->length(), struct_children,
                                                       ids_buffer, offsets_buffer);
                CheckArray(a1);
            }

            TEST_F(TestWriteRecordBatch, RoundtripPreservesBufferSizes) {
                // ARROW-7975
                random::RandomArrayGenerator rg(/*seed=*/0);

                int64_t length = 15;
                auto arr = rg.String(length, 0, 10, 0.1);
                auto batch = RecordBatch::create(::nebula::schema({field("f0", utf8())}), length, {arr});

                ASSERT_OK_AND_ASSIGN(
                        mmap_, io::MemoryMapFixture::InitMemoryMap(
                        /*buffer_size=*/1 << 20, TempFile("test-roundtrip-buffer-sizes")));
                DictionaryMemo dictionary_memo;
                ASSERT_OK_AND_ASSIGN(
                        auto result,
                        DoStandardRoundTrip(*batch, IpcWriteOptions::defaults(), &dictionary_memo));

                // Make sure that the validity bitmap is size 2 as expected
                ASSERT_EQ(2, arr->data()->buffers[0]->size());

                for (size_t i = 0; i < arr->data()->buffers.size(); ++i) {
                    ASSERT_EQ(arr->data()->buffers[i]->size(),
                              result->column(0)->data()->buffers[i]->size());
                }
            }

            void TestGetRecordBatchSize(const IpcWriteOptions &options,
                                        std::shared_ptr<RecordBatch> batch) {
                io::MockOutputStream mock;
                ipc::IpcPayload payload;
                int32_t mock_metadata_length = -1;
                int64_t mock_body_length = -1;
                int64_t size = -1;
                ASSERT_OK(write_record_batch(*batch, 0, &mock, &mock_metadata_length, &mock_body_length,
                                           options));
                ASSERT_OK(get_record_batch_payload(*batch, options, &payload));
                int64_t payload_size = get_payload_size(payload, options);
                ASSERT_OK(get_record_batch_size(*batch, options, &size));
                ASSERT_EQ(mock.GetExtentBytesWritten(), size);
                ASSERT_EQ(mock.GetExtentBytesWritten(), payload_size);
            }

            TEST_F(TestWriteRecordBatch, IntegerGetRecordBatchSize) {
                std::shared_ptr<RecordBatch> batch;

                ASSERT_OK(MakeIntRecordBatch(&batch));
                TestGetRecordBatchSize(options_, batch);

                ASSERT_OK(MakeListRecordBatch(&batch));
                TestGetRecordBatchSize(options_, batch);

                ASSERT_OK(MakeListViewRecordBatch(&batch));
                TestGetRecordBatchSize(options_, batch);

                ASSERT_OK(MakeZeroLengthRecordBatch(&batch));
                TestGetRecordBatchSize(options_, batch);

                ASSERT_OK(MakeNonNullRecordBatch(&batch));
                TestGetRecordBatchSize(options_, batch);

                ASSERT_OK(MakeDeeplyNestedList(&batch));
                TestGetRecordBatchSize(options_, batch);

                ASSERT_OK(MakeDeeplyNestedListView(&batch));
                TestGetRecordBatchSize(options_, batch);
            }

            class RecursionLimits : public ::testing::Test, public io::MemoryMapFixture {
            public:
                void SetUp() {
                    pool_ = default_memory_pool();
                    ASSERT_OK_AND_ASSIGN(temp_dir_, turbo::ScopedTempDir::create("ipc-recursion-limits-test-"));
                }

                std::string TempFile(std::string_view file) {
                    return temp_dir_->path()/(std::string(file));
                }

                void TearDown() { io::MemoryMapFixture::TearDown(); }

                turbo::Status WriteToMmap(int recursion_level, bool override_level, int32_t *metadata_length,
                                   int64_t *body_length, std::shared_ptr<RecordBatch> *batch,
                                   std::shared_ptr<Schema> *schema) {
                    const int batch_length = 5;
                    auto type = int32();
                    std::shared_ptr<Array> array;
                    const bool include_nulls = true;
                    TURBO_RETURN_NOT_OK(MakeRandomInt32Array(1000, include_nulls, pool_, &array));
                    for (int i = 0; i < recursion_level; ++i) {
                        type = list(type);
                        TURBO_RETURN_NOT_OK(
                                MakeRandomListArray(array, batch_length, include_nulls, pool_, &array));
                    }

                    auto f0 = field("f0", type);

                    *schema = ::nebula::schema({f0});

                    *batch = RecordBatch::create(*schema, batch_length, {array});

                    std::stringstream ss;
                    ss << "test-write-past-max-recursion-" << g_file_number++;
                    const int memory_map_size = 1 << 20;
                    TURBO_MOVE_OR_RAISE(
                            mmap_, io::MemoryMapFixture::InitMemoryMap(memory_map_size, TempFile(ss.str())));

                    auto options = IpcWriteOptions::defaults();
                    if (override_level) {
                        options.max_recursion_depth = recursion_level + 1;
                    }
                    return write_record_batch(**batch, 0, mmap_.get(), metadata_length, body_length,
                                            options);
                }

            protected:
                std::shared_ptr<io::MemoryMappedFile> mmap_;
                std::unique_ptr<turbo::ScopedTempDir> temp_dir_;
                MemoryPool *pool_;
            };

            TEST_F(RecursionLimits, WriteLimit) {
#ifdef __EMSCRIPTEN__
                KTEST_SKIP() << "This crashes the Emscripten runtime.";
#endif
                int32_t metadata_length = -1;
                int64_t body_length = -1;
                std::shared_ptr<Schema> schema;
                std::shared_ptr<RecordBatch> batch;
                ASSERT_RAISES(turbo::StatusCode::kInvalidArgument, WriteToMmap((1 << 8) + 1, false, &metadata_length, &body_length,
                                                   &batch, &schema));
            }

            TEST_F(RecursionLimits, ReadLimit) {
                int32_t metadata_length = -1;
                int64_t body_length = -1;
                std::shared_ptr<Schema> schema;

                const int recursion_depth = 64;

                std::shared_ptr<RecordBatch> batch;
                ASSERT_OK(WriteToMmap(recursion_depth, true, &metadata_length, &body_length, &batch,
                                      &schema));

                ASSERT_OK_AND_ASSIGN(std::unique_ptr<Message> message,
                                     ReadMessage(0, metadata_length, mmap_.get()));

                io::BufferReader reader(message->body());

                DictionaryMemo empty_memo;
                ASSERT_RAISES(turbo::StatusCode::kInvalidArgument, read_record_batch(*message->metadata(), schema, &empty_memo,
                                                       IpcReadOptions::defaults(), &reader));
            }

// Test fails with a structured exception on Windows + Debug
#if !defined(_WIN32) || defined(NDEBUG)
            TEST_F(RecursionLimits, StressLimit) {
#ifdef __EMSCRIPTEN__
                KTEST_SKIP() << "This crashes the Emscripten runtime.";
#endif

                auto CheckDepth = [this](int recursion_depth, bool *it_works) {
                    int32_t metadata_length = -1;
                    int64_t body_length = -1;
                    std::shared_ptr<Schema> schema;
                    std::shared_ptr<RecordBatch> batch;
                    ASSERT_OK(WriteToMmap(recursion_depth, true, &metadata_length, &body_length, &batch,
                                          &schema));

                    ASSERT_OK_AND_ASSIGN(std::unique_ptr<Message> message,
                                         ReadMessage(0, metadata_length, mmap_.get()));

                    DictionaryMemo empty_memo;

                    auto options = IpcReadOptions::defaults();
                    options.max_recursion_depth = recursion_depth + 1;
                    io::BufferReader reader(message->body());
                    std::shared_ptr<RecordBatch> result;
                    ASSERT_OK_AND_ASSIGN(result, read_record_batch(*message->metadata(), schema,
                                                                 &empty_memo, options, &reader));
                    *it_works = result->equals(*batch);
                };

                bool it_works = false;
                CheckDepth(100, &it_works);
                ASSERT_TRUE(it_works);

// Mitigate Valgrind's slowness
#if !defined(NEBULA_VALGRIND)
                CheckDepth(500, &it_works);
                ASSERT_TRUE(it_works);
#endif
            }

#endif  // !defined(_WIN32) || defined(NDEBUG)

            struct FileWriterHelper {
                static constexpr bool kIsFileFormat = true;

                turbo::Status init(const std::shared_ptr<Schema> &schema, const IpcWriteOptions &options,
                            const std::shared_ptr<const KeyValueMetadata> &metadata = nullptr) {
                    num_batches_written_ = 0;

                    TURBO_MOVE_OR_RAISE(buffer_, allocate_resizable_buffer(0));
                    sink_.reset(new io::BufferOutputStream(buffer_));
                    TURBO_MOVE_OR_RAISE(writer_,
                                           make_file_writer(sink_.get(), schema, options, metadata));
                    return turbo::OkStatus();
                }

                turbo::Status WriteBatch(const std::shared_ptr<RecordBatch> &batch,
                                  const std::shared_ptr<const KeyValueMetadata> &metadata = nullptr) {
                    TURBO_RETURN_NOT_OK(writer_->write_record_batch(*batch, metadata));
                    num_batches_written_++;
                    return turbo::OkStatus();
                }

                turbo::Status write_table(const RecordBatchVector &batches) {
                    num_batches_written_ += static_cast<int>(batches.size());
                    TURBO_MOVE_OR_RAISE(auto table, Table::from_record_batches(batches));
                    return writer_->write_table(*table);
                }

                turbo::Status finish(WriteStats *out_stats = nullptr) {
                    TURBO_RETURN_NOT_OK(writer_->close());
                    if (out_stats) {
                        *out_stats = writer_->stats();
                    }
                    TURBO_RETURN_NOT_OK(sink_->close());
                    // Current offset into stream is the end of the file
                    return sink_->tell().try_value(&footer_offset_);
                }

                virtual turbo::Status ReadBatches(const IpcReadOptions &options,
                                           RecordBatchVector *out_batches,
                                           ReadStats *out_stats = nullptr,
                                           MetadataVector *out_metadata_list = nullptr) {
                    auto buf_reader = std::make_shared<io::BufferReader>(buffer_);
                    TURBO_MOVE_OR_RAISE(auto reader, RecordBatchFileReader::open(
                            buf_reader.get(), footer_offset_, options));

                    EXPECT_EQ(num_batches_written_, reader->num_record_batches());
                    for (int i = 0; i < num_batches_written_; ++i) {
                        TURBO_MOVE_OR_RAISE(auto chunk_with_metadata,
                                               reader->read_record_batch_with_custom_metadata(i));
                        auto chunk = chunk_with_metadata.batch;
                        out_batches->push_back(chunk);
                        if (out_metadata_list) {
                            auto metadata = chunk_with_metadata.custom_metadata;
                            out_metadata_list->push_back(metadata);
                        }
                    }
                    if (out_stats) {
                        *out_stats = reader->stats();
                    }
                    return turbo::OkStatus();
                }

                turbo::Status read_schema(std::shared_ptr<Schema> *out) {
                    return read_schema(ipc::IpcReadOptions::defaults(), out);
                }

                turbo::Status read_schema(const IpcReadOptions &read_options, std::shared_ptr<Schema> *out) {
                    auto buf_reader = std::make_shared<io::BufferReader>(buffer_);
                    TURBO_MOVE_OR_RAISE(
                            auto reader,
                            RecordBatchFileReader::open(buf_reader.get(), footer_offset_, read_options));

                    *out = reader->schema();
                    return turbo::OkStatus();
                }

                turbo::Result<std::shared_ptr<const KeyValueMetadata>> ReadFooterMetadata() {
                    auto buf_reader = std::make_shared<io::BufferReader>(buffer_);
                    TURBO_MOVE_OR_RAISE(auto reader,
                                           RecordBatchFileReader::open(buf_reader.get(), footer_offset_));
                    return reader->metadata();
                }

                turbo::Result<std::shared_ptr<Table>> ReadAll() {
                    auto buf_reader = std::make_shared<io::BufferReader>(buffer_);
                    TURBO_MOVE_OR_RAISE(auto reader,
                                           RecordBatchFileReader::open(buf_reader.get(), footer_offset_));
                    return reader->to_table();
                }

                std::shared_ptr<ResizableBuffer> buffer_;
                std::unique_ptr<io::BufferOutputStream> sink_;
                std::shared_ptr<RecordBatchWriter> writer_;
                int num_batches_written_;
                int64_t footer_offset_;
            };

// Helper class since coalescing will not happen if the file is zero copy
            class NoZeroCopyBufferReader : public io::BufferReader {
                using BufferReader::BufferReader;

                bool supports_zero_copy() const override { return false; }
            };

            template<bool kCoalesce>
            struct FileGeneratorWriterHelper : public FileWriterHelper {
                turbo::Status ReadBatches(const IpcReadOptions &options, RecordBatchVector *out_batches,
                                   ReadStats *out_stats = nullptr,
                                   MetadataVector *out_metadata_list = nullptr) override {
                    std::shared_ptr<io::RandomAccessFile> buf_reader;
                    if (kCoalesce) {
                        // Use a non-zero-copy enabled BufferReader so we can test paths properly
                        buf_reader = std::make_shared<NoZeroCopyBufferReader>(buffer_);
                    } else {
                        buf_reader = std::make_shared<io::BufferReader>(buffer_);
                    }
                    AsyncGenerator<std::shared_ptr<RecordBatch>> generator;

                    {
                        auto fut = RecordBatchFileReader::open_async(buf_reader, footer_offset_, options);
                        // Do NOT assert OK since some tests check whether this fails properly
                        EXPECT_FINISHES(fut);
                        TURBO_MOVE_OR_RAISE(auto reader, fut.result());
                        EXPECT_EQ(num_batches_written_, reader->num_record_batches());
                        // Generator will keep reader alive internally
                        TURBO_MOVE_OR_RAISE(generator, reader->get_record_batch_generator(kCoalesce));
                    }

                    // Generator is async-reentrant
                    std::vector<Future<std::shared_ptr<RecordBatch>>> futures;
                    for (int i = 0; i < num_batches_written_; ++i) {
                        futures.push_back(generator());
                    }
                    auto fut = generator();
                    EXPECT_FINISHES_OK_AND_EQ(nullptr, fut);
                    for (auto &future: futures) {
                        EXPECT_FINISHES_OK_AND_ASSIGN(auto batch, future);
                        out_batches->push_back(batch);
                    }

                    // The generator doesn't track stats.
                    EXPECT_EQ(nullptr, out_stats);

                    return turbo::OkStatus();
                }
            };

            struct StreamWriterHelper {
                static constexpr bool kIsFileFormat = false;

                turbo::Status init(const std::shared_ptr<Schema> &schema, const IpcWriteOptions &options) {
                    TURBO_MOVE_OR_RAISE(buffer_, allocate_resizable_buffer(0));
                    sink_.reset(new io::BufferOutputStream(buffer_));
                    TURBO_MOVE_OR_RAISE(writer_, make_stream_writer(sink_.get(), schema, options));
                    return turbo::OkStatus();
                }

                turbo::Status WriteBatch(const std::shared_ptr<RecordBatch> &batch,
                                  const std::shared_ptr<const KeyValueMetadata> &metadata = nullptr) {
                    TURBO_RETURN_NOT_OK(writer_->write_record_batch(*batch, metadata));
                    return turbo::OkStatus();
                }

                turbo::Status write_table(const RecordBatchVector &batches) {
                    TURBO_MOVE_OR_RAISE(auto table, Table::from_record_batches(batches));
                    return writer_->write_table(*table);
                }

                turbo::Status finish(WriteStats *out_stats = nullptr) {
                    TURBO_RETURN_NOT_OK(writer_->close());
                    if (out_stats) {
                        *out_stats = writer_->stats();
                    }
                    return sink_->close();
                }

                virtual turbo::Status ReadBatches(const IpcReadOptions &options,
                                           RecordBatchVector *out_batches,
                                           ReadStats *out_stats = nullptr,
                                           MetadataVector *out_metadata_list = nullptr) {
                    auto buf_reader = std::make_shared<io::BufferReader>(buffer_);
                    TURBO_MOVE_OR_RAISE(auto reader, RecordBatchStreamReader::open(buf_reader, options));
                    if (out_metadata_list) {
                        while (true) {
                            TURBO_MOVE_OR_RAISE(auto chunk_with_metadata, reader->read_next());
                            if (chunk_with_metadata.batch == nullptr) {
                                break;
                            }
                            out_batches->push_back(chunk_with_metadata.batch);
                            out_metadata_list->push_back(chunk_with_metadata.custom_metadata);
                        }
                    } else {
                        TURBO_MOVE_OR_RAISE(*out_batches, reader->to_record_batches());
                    }

                    if (out_stats) {
                        *out_stats = reader->stats();
                    }
                    return turbo::OkStatus();
                }

                turbo::Status read_schema(std::shared_ptr<Schema> *out) {
                    return read_schema(ipc::IpcReadOptions::defaults(), out);
                }

                virtual turbo::Status read_schema(const IpcReadOptions &read_options,
                                          std::shared_ptr<Schema> *out) {
                    auto buf_reader = std::make_shared<io::BufferReader>(buffer_);
                    TURBO_MOVE_OR_RAISE(auto reader,
                                           RecordBatchStreamReader::open(buf_reader.get(), read_options));
                    *out = reader->schema();
                    return turbo::OkStatus();
                }

                std::shared_ptr<ResizableBuffer> buffer_;
                std::unique_ptr<io::BufferOutputStream> sink_;
                std::shared_ptr<RecordBatchWriter> writer_;
            };

            class CopyCollectListener : public CollectListener {
            public:
                CopyCollectListener() : CollectListener() {}

                turbo::Status on_record_batch_with_metadata_decoded(
                        RecordBatchWithMetadata record_batch_with_metadata) override {
                    TURBO_MOVE_OR_RAISE(
                            record_batch_with_metadata.batch,
                            record_batch_with_metadata.batch->copy_to(default_cpu_memory_manager()));

                    return CollectListener::on_record_batch_with_metadata_decoded(record_batch_with_metadata);
                }
            };

            struct StreamDecoderWriterHelper : public StreamWriterHelper {
                turbo::Status ReadBatches(const IpcReadOptions &options, RecordBatchVector *out_batches,
                                   ReadStats *out_stats = nullptr,
                                   MetadataVector *out_metadata_list = nullptr) override {
                    auto listener = std::make_shared<CopyCollectListener>();
                    StreamDecoder decoder(listener, options);
                    TURBO_RETURN_NOT_OK(do_consume(&decoder));
                    *out_batches = listener->record_batches();
                    if (out_stats) {
                        *out_stats = decoder.stats();
                    }
                    return turbo::OkStatus();
                }

                turbo::Status read_schema(const IpcReadOptions &read_options,
                                  std::shared_ptr<Schema> *out) override {
                    auto listener = std::make_shared<CollectListener>();
                    StreamDecoder decoder(listener, read_options);
                    TURBO_RETURN_NOT_OK(do_consume(&decoder));
                    *out = listener->schema();
                    return turbo::OkStatus();
                }

                virtual turbo::Status do_consume(StreamDecoder *decoder) = 0;
            };

            struct StreamDecoderDataWriterHelper : public StreamDecoderWriterHelper {
                turbo::Status do_consume(StreamDecoder *decoder) override {
                    // This data is valid only in this function.
                    TURBO_MOVE_OR_RAISE(auto temporary_buffer,
                                           Buffer::copy(buffer_, nebula::default_cpu_memory_manager()));
                    return decoder->consume(temporary_buffer->data(), temporary_buffer->size());
                }
            };

            struct StreamDecoderBufferWriterHelper : public StreamDecoderWriterHelper {
                turbo::Status do_consume(StreamDecoder *decoder) override { return decoder->consume(buffer_); }
            };

            struct StreamDecoderSmallChunksWriterHelper : public StreamDecoderWriterHelper {
                turbo::Status do_consume(StreamDecoder *decoder) override {
                    for (int64_t offset = 0; offset < buffer_->size() - 1; ++offset) {
                        // This data is valid only in this block.
                        TURBO_MOVE_OR_RAISE(auto temporary_buffer, buffer_->copy_slice(offset, 1));
                        TURBO_RETURN_NOT_OK(decoder->consume(temporary_buffer->data(), temporary_buffer->size()));
                    }
                    return turbo::OkStatus();
                }
            };

            struct StreamDecoderLargeChunksWriterHelper : public StreamDecoderWriterHelper {
                turbo::Status do_consume(StreamDecoder *decoder) override {
                    TURBO_RETURN_NOT_OK(decoder->consume(SliceBuffer(buffer_, 0, 1)));
                    TURBO_RETURN_NOT_OK(decoder->consume(SliceBuffer(buffer_, 1)));
                    return turbo::OkStatus();
                }
            };

// Parameterized mixin with tests for stream / file writer

            template<class WriterHelperType>
            class ReaderWriterMixin : public ExtensionTypesMixin {
            public:
                using WriterHelper = WriterHelperType;

                // Check simple RecordBatch roundtripping
                void TestRoundTrip(test::MakeRecordBatch param, const IpcWriteOptions &options) {
                    std::shared_ptr<RecordBatch> batch1;
                    std::shared_ptr<RecordBatch> batch2;
                    ASSERT_OK(param(&batch1));  // NOLINT clang-tidy gtest issue
                    ASSERT_OK(param(&batch2));  // NOLINT clang-tidy gtest issue

                    RecordBatchVector in_batches = {batch1, batch2};
                    RecordBatchVector out_batches;

                    WriterHelper writer_helper;
                    ASSERT_OK(RoundTripHelper(writer_helper, in_batches, options,
                                              IpcReadOptions::defaults(), &out_batches));
                    ASSERT_EQ(out_batches.size(), in_batches.size());

                    // Compare batches
                    for (size_t i = 0; i < in_batches.size(); ++i) {
                        CompareBatch(*in_batches[i], *out_batches[i]);
                    }
                }

                void TestZeroLengthRoundTrip(test::MakeRecordBatch param,
                                             const IpcWriteOptions &options) {
                    std::shared_ptr<RecordBatch> batch1;
                    std::shared_ptr<RecordBatch> batch2;
                    ASSERT_OK(param(&batch1));  // NOLINT clang-tidy gtest issue
                    ASSERT_OK(param(&batch2));  // NOLINT clang-tidy gtest issue
                    batch1 = batch1->slice(0, 0);
                    batch2 = batch2->slice(0, 0);

                    RecordBatchVector in_batches = {batch1, batch2};
                    RecordBatchVector out_batches;

                    WriterHelper writer_helper;
                    ASSERT_OK(RoundTripHelper(writer_helper, in_batches, options,
                                              IpcReadOptions::defaults(), &out_batches));
                    ASSERT_EQ(out_batches.size(), in_batches.size());

                    // Compare batches
                    for (size_t i = 0; i < in_batches.size(); ++i) {
                        CompareBatch(*in_batches[i], *out_batches[i]);
                    }
                }

                void TestRoundTripWithOptions(test::MakeRecordBatch make_record_batch) {
                    TestRoundTrip(make_record_batch, IpcWriteOptions::defaults());
                    TestZeroLengthRoundTrip(make_record_batch, IpcWriteOptions::defaults());

                    IpcWriteOptions options;
                    options.write_legacy_ipc_format = true;
                    TestRoundTrip(make_record_batch, options);
                    TestZeroLengthRoundTrip(make_record_batch, options);
                }

                void TestDictionaryRoundtrip() {
                    std::shared_ptr<RecordBatch> batch;
                    ASSERT_OK(MakeDictionary(&batch));

                    WriterHelper writer_helper;
                    RecordBatchVector out_batches;
                    ASSERT_OK(RoundTripHelper(writer_helper, {batch}, IpcWriteOptions::defaults(),
                                              IpcReadOptions::defaults(), &out_batches));
                    ASSERT_EQ(out_batches.size(), 1);

                    // TODO(wesm): This was broken in ARROW-3144. I'm not sure how to
                    // restore the deduplication logic yet because dictionaries are
                    // corresponded to the Schema using Field pointers rather than
                    // DataType as before

                    // CheckDictionariesDeduplicated(*out_batches[0]);
                }

                void TestReadSubsetOfFields() {
                    // Part of ARROW-7979
                    auto a0 = assert_array_from_json(utf8(), "[\"a0\", null]");
                    auto a1 = assert_array_from_json(utf8(), "[\"a1\", null]");
                    auto a2 = assert_array_from_json(utf8(), "[\"a2\", null]");
                    auto a3 = assert_array_from_json(utf8(), "[\"a3\", null]");

                    auto my_schema = schema({field("a0", utf8()), field("a1", utf8()),
                                             field("a2", utf8()), field("a3", utf8())},
                                            key_value_metadata({"key1"}, {"value1"}));
                    auto batch = RecordBatch::create(my_schema, a0->length(), {a0, a1, a2, a3});

                    IpcReadOptions options = IpcReadOptions::defaults();

                    options.included_fields = {1, 3};

                    {
                        WriterHelper writer_helper;
                        RecordBatchVector out_batches;
                        std::shared_ptr<Schema> out_schema;
                        ASSERT_OK(RoundTripHelper(writer_helper, {batch}, IpcWriteOptions::defaults(),
                                                  options, &out_batches, &out_schema));

                        auto ex_schema = schema({field("a1", utf8()), field("a3", utf8())},
                                                key_value_metadata({"key1"}, {"value1"}));
                        AssertSchemaEqual(*ex_schema, *out_schema);

                        auto ex_batch = RecordBatch::create(ex_schema, a0->length(), {a1, a3});
                        AssertBatchesEqual(*ex_batch, *out_batches[0], /*check_metadata=*/true);
                    }

                    // Duplicated or unordered indices are normalized when reading
                    options.included_fields = {3, 1, 1};

                    {
                        WriterHelper writer_helper;
                        RecordBatchVector out_batches;
                        std::shared_ptr<Schema> out_schema;
                        ASSERT_OK(RoundTripHelper(writer_helper, {batch}, IpcWriteOptions::defaults(),
                                                  options, &out_batches, &out_schema));

                        auto ex_schema = schema({field("a1", utf8()), field("a3", utf8())},
                                                key_value_metadata({"key1"}, {"value1"}));
                        AssertSchemaEqual(*ex_schema, *out_schema);

                        auto ex_batch = RecordBatch::create(ex_schema, a0->length(), {a1, a3});
                        AssertBatchesEqual(*ex_batch, *out_batches[0], /*check_metadata=*/true);
                    }

                    // Out of bounds cases
                    options.included_fields = {1, 3, 5};
                    {
                        WriterHelper writer_helper;
                        RecordBatchVector out_batches;
                        ASSERT_RAISES(turbo::StatusCode::kInvalidArgument,
                                      RoundTripHelper(writer_helper, {batch}, IpcWriteOptions::defaults(),
                                                      options, &out_batches));
                    }
                    options.included_fields = {1, 3, -1};
                    {
                        WriterHelper writer_helper;
                        RecordBatchVector out_batches;
                        ASSERT_RAISES(turbo::StatusCode::kInvalidArgument,
                                      RoundTripHelper(writer_helper, {batch}, IpcWriteOptions::defaults(),
                                                      options, &out_batches));
                    }
                }

                void TestWriteAfterClose() {
                    // Part of GH-35095.
                    std::shared_ptr<RecordBatch> batch_ints;
                    ASSERT_OK(MakeIntRecordBatch(&batch_ints));

                    auto schema = batch_ints->schema();

                    WriterHelper writer_helper;
                    ASSERT_OK(writer_helper.init(schema, IpcWriteOptions::defaults()));
                    ASSERT_OK(writer_helper.WriteBatch(batch_ints));
                    ASSERT_OK(writer_helper.finish());

                    // write after close raises status
                    ASSERT_RAISES(turbo::StatusCode::kInvalidArgument, writer_helper.WriteBatch(batch_ints));
                }

                void TestWriteDifferentSchema() {
                    // Test writing batches with a different schema than the RecordBatchWriter
                    // was initialized with.
                    std::shared_ptr<RecordBatch> batch_ints, batch_bools;
                    ASSERT_OK(MakeIntRecordBatch(&batch_ints));
                    ASSERT_OK(MakeBooleanBatch(&batch_bools));

                    std::shared_ptr<Schema> schema = batch_bools->schema();
                    ASSERT_FALSE(schema->has_metadata());
                    schema = schema->with_metadata(key_value_metadata({"some_key"}, {"some_value"}));

                    WriterHelper writer_helper;
                    ASSERT_OK(writer_helper.init(schema, IpcWriteOptions::defaults()));
                    // Writing a record batch with a different schema
                    ASSERT_RAISES(turbo::StatusCode::kInvalidArgument, writer_helper.WriteBatch(batch_ints));
                    // Writing a record batch with the same schema (except metadata)
                    ASSERT_OK(writer_helper.WriteBatch(batch_bools));
                    ASSERT_OK(writer_helper.finish());

                    // The single successful batch can be read again
                    RecordBatchVector out_batches;
                    ASSERT_OK(writer_helper.ReadBatches(IpcReadOptions::defaults(), &out_batches));
                    ASSERT_EQ(out_batches.size(), 1);
                    CompareBatch(*out_batches[0], *batch_bools, false /* compare_metadata */);
                    // Metadata from the RecordBatchWriter initialization schema was kept
                    ASSERT_TRUE(out_batches[0]->schema()->equals(*schema));
                }

                void TestWriteBatchWithMetadata() {
                    std::shared_ptr<RecordBatch> batch;
                    ASSERT_OK(MakeIntRecordBatch(&batch));

                    WriterHelper writer_helper;
                    ASSERT_OK(writer_helper.init(batch->schema(), IpcWriteOptions::defaults()));

                    auto metadata = key_value_metadata({"some_key"}, {"some_value"});
                    ASSERT_OK(writer_helper.WriteBatch(batch, metadata));
                    ASSERT_OK(writer_helper.finish());

                    RecordBatchVector out_batches;
                    MetadataVector out_metadata_list;
                    ASSERT_OK(writer_helper.ReadBatches(IpcReadOptions::defaults(), &out_batches, nullptr,
                                                        &out_metadata_list));
                    ASSERT_EQ(out_batches.size(), 1);
                    ASSERT_EQ(out_metadata_list.size(), 1);
                    CompareBatch(*out_batches[0], *batch, false /* compare_metadata */);
                    ASSERT_TRUE(out_metadata_list[0]->equals(*metadata));
                }

                // write multiple batches and each of them with different metadata
                void TestWriteDifferentMetadata() {
                    std::shared_ptr<RecordBatch> batch_0;
                    std::shared_ptr<RecordBatch> batch_1;
                    auto metadata_0 = key_value_metadata({"some_key"}, {"0"});
                    auto metadata_1 = key_value_metadata({"some_key"}, {"1"});
                    ASSERT_OK(MakeIntRecordBatch(&batch_0));
                    ASSERT_OK(MakeIntRecordBatch(&batch_1));

                    WriterHelper writer_helper;
                    ASSERT_OK(writer_helper.init(batch_0->schema(), IpcWriteOptions::defaults()));

                    ASSERT_OK(writer_helper.WriteBatch(batch_0, metadata_0));

                    // write a batch with different metadata
                    ASSERT_OK(writer_helper.WriteBatch(batch_1, metadata_1));
                    ASSERT_OK(writer_helper.finish());

                    RecordBatchVector out_batches;
                    MetadataVector out_metadata_list;
                    ASSERT_OK(writer_helper.ReadBatches(IpcReadOptions::defaults(), &out_batches, nullptr,
                                                        &out_metadata_list));
                    ASSERT_EQ(out_batches.size(), 2);
                    ASSERT_EQ(out_metadata_list.size(), 2);
                    CompareBatch(*out_batches[0], *batch_0, true /* compare_metadata */);
                    CompareBatch(*out_batches[1], *batch_1, true /* compare_metadata */);
                    ASSERT_TRUE(out_metadata_list[0]->equals(*metadata_0));
                    ASSERT_TRUE(out_metadata_list[1]->equals(*metadata_1));
                }

                void TestWriteNoRecordBatches() {
                    // Test writing no batches.
                    auto schema = nebula::schema({field("a", int32())});

                    WriterHelper writer_helper;
                    ASSERT_OK(writer_helper.init(schema, IpcWriteOptions::defaults()));
                    ASSERT_OK(writer_helper.finish());

                    RecordBatchVector out_batches;
                    ASSERT_OK(writer_helper.ReadBatches(IpcReadOptions::defaults(), &out_batches));
                    ASSERT_EQ(out_batches.size(), 0);

                    std::shared_ptr<Schema> actual_schema;
                    ASSERT_OK(writer_helper.read_schema(&actual_schema));
                    AssertSchemaEqual(*actual_schema, *schema);
                }

            private:
                turbo::Status RoundTripHelper(WriterHelper &writer_helper, const RecordBatchVector &in_batches,
                                       const IpcWriteOptions &write_options,
                                       const IpcReadOptions &read_options,
                                       RecordBatchVector *out_batches,
                                       std::shared_ptr<Schema> *out_schema = nullptr) {
                    TURBO_RETURN_NOT_OK(writer_helper.init(in_batches[0]->schema(), write_options));
                    for (const auto &batch: in_batches) {
                        TURBO_RETURN_NOT_OK(writer_helper.WriteBatch(batch));
                    }
                    TURBO_RETURN_NOT_OK(writer_helper.finish());
                    TURBO_RETURN_NOT_OK(writer_helper.ReadBatches(read_options, out_batches));
                    if (out_schema) {
                        TURBO_RETURN_NOT_OK(writer_helper.read_schema(read_options, out_schema));
                    }
                    for (const auto &batch: *out_batches) {
                        TURBO_RETURN_NOT_OK(batch->validate_full());
                    }
                    return turbo::OkStatus();
                }

                void CheckBatchDictionaries(const RecordBatch &batch) {
                    // Check that dictionaries that should be the same are the same
                    auto schema = batch.schema();

                    const auto &b0 = turbo::checked_cast<const DictionaryArray &>(*batch.column(0));
                    const auto &b1 = turbo::checked_cast<const DictionaryArray &>(*batch.column(1));

                    ASSERT_EQ(b0.dictionary().get(), b1.dictionary().get());

                    // Same dictionary used for list values
                    const auto &b3 = turbo::checked_cast<const ListArray &>(*batch.column(3));
                    const auto &b3_value = turbo::checked_cast<const DictionaryArray &>(*b3.values());
                    ASSERT_EQ(b0.dictionary().get(), b3_value.dictionary().get());
                }
            };  // namespace test

            class TestFileFormat : public ReaderWriterMixin<FileWriterHelper>,
                                   public ::testing::TestWithParam<MakeRecordBatch *> {
            };

            class TestFileFormatGenerator
                    : public ReaderWriterMixin<FileGeneratorWriterHelper</*kCoalesce=*/false>>,
                      public ::testing::TestWithParam<MakeRecordBatch *> {
            };

            class TestFileFormatGeneratorCoalesced
                    : public ReaderWriterMixin<FileGeneratorWriterHelper</*kCoalesce=*/true>>,
                      public ::testing::TestWithParam<MakeRecordBatch *> {
            };

            class TestStreamFormat : public ReaderWriterMixin<StreamWriterHelper>,
                                     public ::testing::TestWithParam<MakeRecordBatch *> {
            };

            class TestStreamDecoderData : public ReaderWriterMixin<StreamDecoderDataWriterHelper>,
                                          public ::testing::TestWithParam<MakeRecordBatch *> {
            };

            class TestStreamDecoderBuffer : public ReaderWriterMixin<StreamDecoderBufferWriterHelper>,
                                            public ::testing::TestWithParam<MakeRecordBatch *> {
            };

            class TestStreamDecoderSmallChunks
                    : public ReaderWriterMixin<StreamDecoderSmallChunksWriterHelper>,
                      public ::testing::TestWithParam<MakeRecordBatch *> {
            };

            class TestStreamDecoderLargeChunks
                    : public ReaderWriterMixin<StreamDecoderLargeChunksWriterHelper>,
                      public ::testing::TestWithParam<MakeRecordBatch *> {
            };

            turbo::Status MakeDictionaryBatch(std::shared_ptr<RecordBatch> *out) {
                auto f0_type = nebula::dictionary(int32(), utf8());
                auto f1_type = nebula::dictionary(int8(), utf8());

                auto dict = assert_array_from_json(utf8(), "[\"foo\", \"bar\", \"baz\"]");

                auto indices0 = assert_array_from_json(int32(), "[1, 2, null, 0, 2, 0]");
                auto indices1 = assert_array_from_json(int8(), "[0, 0, 2, 2, 1, 1]");

                auto a0 = std::make_shared<DictionaryArray>(f0_type, indices0, dict);
                auto a1 = std::make_shared<DictionaryArray>(f1_type, indices1, dict);

                // construct batch
                auto schema = ::nebula::schema({field("dict1", f0_type), field("dict2", f1_type)});

                *out = RecordBatch::create(schema, 6, {a0, a1});
                return turbo::OkStatus();
            }

// A utility that supports reading/writing record batches,
// and manually specifying dictionaries.
            class DictionaryBatchHelper {
            public:
                explicit DictionaryBatchHelper(const Schema &schema) : schema_(schema) {
                    buffer_ = *allocate_resizable_buffer(0);
                    sink_.reset(new io::BufferOutputStream(buffer_));
                    payload_writer_ = *internal::make_payload_stream_writer(sink_.get());
                }

                turbo::Status Start() {
                    TURBO_RETURN_NOT_OK(payload_writer_->start());

                    // write schema
                    IpcPayload payload;
                    DictionaryFieldMapper mapper(schema_);
                    TURBO_RETURN_NOT_OK(
                            get_schema_payload(schema_, IpcWriteOptions::defaults(), mapper, &payload));
                    return payload_writer_->write_payload(payload);
                }

                turbo::Status WriteDictionary(int64_t dictionary_id, const std::shared_ptr<Array> &dictionary,
                                       bool is_delta) {
                    IpcPayload payload;
                    TURBO_RETURN_NOT_OK(get_dictionary_payload(dictionary_id, is_delta, dictionary,
                                                       IpcWriteOptions::defaults(), &payload));
                    TURBO_RETURN_NOT_OK(payload_writer_->write_payload(payload));
                    return turbo::OkStatus();
                }

                turbo::Status WriteBatchPayload(const RecordBatch &batch) {
                    // write record batch payload only
                    IpcPayload payload;
                    TURBO_RETURN_NOT_OK(get_record_batch_payload(batch, IpcWriteOptions::defaults(), &payload));
                    return payload_writer_->write_payload(payload);
                }

                turbo::Status close() {
                    TURBO_RETURN_NOT_OK(payload_writer_->close());
                    return sink_->close();
                }

                turbo::Status ReadBatch(std::shared_ptr<RecordBatch> *out_batch) {
                    auto buf_reader = std::make_shared<io::BufferReader>(buffer_);
                    std::shared_ptr<RecordBatchReader> reader;
                    TURBO_MOVE_OR_RAISE(
                            reader, RecordBatchStreamReader::open(buf_reader, IpcReadOptions::defaults()));
                    return reader->read_next(out_batch);
                }

                std::unique_ptr<internal::IpcPayloadWriter> payload_writer_;
                const Schema &schema_;
                std::shared_ptr<ResizableBuffer> buffer_;
                std::unique_ptr<io::BufferOutputStream> sink_;
            };

            TEST(TestDictionaryBatch, DictionaryDelta) {
                std::shared_ptr<RecordBatch> in_batch;
                std::shared_ptr<RecordBatch> out_batch;
                ASSERT_OK(MakeDictionaryBatch(&in_batch));

                auto dict1 = assert_array_from_json(utf8(), "[\"foo\", \"bar\"]");
                auto dict2 = assert_array_from_json(utf8(), "[\"baz\"]");

                DictionaryBatchHelper helper(*in_batch->schema());
                ASSERT_OK(helper.Start());

                ASSERT_OK(helper.WriteDictionary(0L, dict1, /*is_delta=*/false));
                ASSERT_OK(helper.WriteDictionary(0L, dict2, /*is_delta=*/true));

                ASSERT_OK(helper.WriteDictionary(1L, dict1, /*is_delta=*/false));
                ASSERT_OK(helper.WriteDictionary(1L, dict2, /*is_delta=*/true));

                ASSERT_OK(helper.WriteBatchPayload(*in_batch));
                ASSERT_OK(helper.close());

                ASSERT_OK(helper.ReadBatch(&out_batch));

                ASSERT_BATCHES_EQUAL(*in_batch, *out_batch);
            }

            TEST(TestDictionaryBatch, DictionaryDeltaWithUnknownId) {
                std::shared_ptr<RecordBatch> in_batch;
                std::shared_ptr<RecordBatch> out_batch;
                ASSERT_OK(MakeDictionaryBatch(&in_batch));

                auto dict1 = assert_array_from_json(utf8(), "[\"foo\", \"bar\"]");
                auto dict2 = assert_array_from_json(utf8(), "[\"baz\"]");

                DictionaryBatchHelper helper(*in_batch->schema());
                ASSERT_OK(helper.Start());

                ASSERT_OK(helper.WriteDictionary(0L, dict1, /*is_delta=*/false));
                ASSERT_OK(helper.WriteDictionary(0L, dict2, /*is_delta=*/true));

                /* This delta dictionary does not have a base dictionary previously in stream */
                ASSERT_OK(helper.WriteDictionary(1L, dict2, /*is_delta=*/true));

                ASSERT_OK(helper.WriteBatchPayload(*in_batch));
                ASSERT_OK(helper.close());

                ASSERT_RAISES(turbo::StatusCode::kNotFound, helper.ReadBatch(&out_batch));
            }

            TEST(TestDictionaryBatch, DictionaryReplacement) {
                std::shared_ptr<RecordBatch> in_batch;
                std::shared_ptr<RecordBatch> out_batch;
                ASSERT_OK(MakeDictionaryBatch(&in_batch));

                auto dict = assert_array_from_json(utf8(), "[\"foo\", \"bar\", \"baz\"]");
                auto dict1 = assert_array_from_json(utf8(), "[\"foo1\", \"bar1\", \"baz1\"]");
                auto dict2 = assert_array_from_json(utf8(), "[\"foo2\", \"bar2\", \"baz2\"]");

                DictionaryBatchHelper helper(*in_batch->schema());
                ASSERT_OK(helper.Start());

                // the old dictionaries will be overwritten by
                // the new dictionaries with the same ids.
                ASSERT_OK(helper.WriteDictionary(0L, dict1, /*is_delta=*/false));
                ASSERT_OK(helper.WriteDictionary(0L, dict, /*is_delta=*/false));

                ASSERT_OK(helper.WriteDictionary(1L, dict2, /*is_delta=*/false));
                ASSERT_OK(helper.WriteDictionary(1L, dict, /*is_delta=*/false));

                ASSERT_OK(helper.WriteBatchPayload(*in_batch));
                ASSERT_OK(helper.close());

                ASSERT_OK(helper.ReadBatch(&out_batch));

                ASSERT_BATCHES_EQUAL(*in_batch, *out_batch);
            }

            TEST_P(TestFileFormat, RoundTrip) { TestRoundTripWithOptions(*GetParam()); }

            TEST_P(TestFileFormatGenerator, RoundTrip) { TestRoundTripWithOptions(*GetParam()); }

            TEST_P(TestFileFormatGeneratorCoalesced, RoundTrip) {
                TestRoundTripWithOptions(*GetParam());
            }

            TEST_P(TestStreamFormat, RoundTrip) { TestRoundTripWithOptions(*GetParam()); }

            TEST_P(TestStreamDecoderData, RoundTrip) { TestRoundTripWithOptions(*GetParam()); }

            TEST_P(TestStreamDecoderBuffer, RoundTrip) { TestRoundTripWithOptions(*GetParam()); }

            TEST_P(TestStreamDecoderSmallChunks, RoundTrip) { TestRoundTripWithOptions(*GetParam()); }

            TEST_P(TestStreamDecoderLargeChunks, RoundTrip) { TestRoundTripWithOptions(*GetParam()); }

            INSTANTIATE_TEST_SUITE_P(GenericIpcRoundTripTests, TestIpcRoundTrip,
                                     ::testing::ValuesIn(kBatchCases));

            INSTANTIATE_TEST_SUITE_P(FileRoundTripTests, TestFileFormat,
                                     ::testing::ValuesIn(kBatchCases));

            INSTANTIATE_TEST_SUITE_P(FileRoundTripTests, TestFileFormatGenerator,
                                     ::testing::ValuesIn(kBatchCases));

            INSTANTIATE_TEST_SUITE_P(FileRoundTripTests, TestFileFormatGeneratorCoalesced,
                                     ::testing::ValuesIn(kBatchCases));

            INSTANTIATE_TEST_SUITE_P(StreamRoundTripTests, TestStreamFormat,
                                     ::testing::ValuesIn(kBatchCases));

            INSTANTIATE_TEST_SUITE_P(StreamDecoderDataRoundTripTests, TestStreamDecoderData,
                                     ::testing::ValuesIn(kBatchCases));

            INSTANTIATE_TEST_SUITE_P(StreamDecoderBufferRoundTripTests, TestStreamDecoderBuffer,
                                     ::testing::ValuesIn(kBatchCases));

            INSTANTIATE_TEST_SUITE_P(StreamDecoderSmallChunksRoundTripTests,
                                     TestStreamDecoderSmallChunks, ::testing::ValuesIn(kBatchCases));

            INSTANTIATE_TEST_SUITE_P(StreamDecoderLargeChunksRoundTripTests,
                                     TestStreamDecoderLargeChunks, ::testing::ValuesIn(kBatchCases));

            TEST(TestIpcFileFormat, FooterMetaData) {
                // ARROW-6837
                std::shared_ptr<RecordBatch> batch;
                ASSERT_OK(MakeIntRecordBatch(&batch));

                auto metadata = key_value_metadata({"ARROW:example", "ARROW:example2"},
                                                   {"something something", "something something2"});

                FileWriterHelper helper;
                ASSERT_OK(helper.init(batch->schema(), IpcWriteOptions::defaults(), metadata));
                ASSERT_OK(helper.WriteBatch(batch));
                ASSERT_OK(helper.finish());

                ASSERT_OK_AND_ASSIGN(auto out_metadata, helper.ReadFooterMetadata());
                ASSERT_TRUE(out_metadata->equals(*metadata));
            }

            TEST(TestIpcFileFormat, ReaderToTable) {
                std::shared_ptr<RecordBatch> batch;
                ASSERT_OK(MakeIntRecordBatch(&batch));

                FileWriterHelper helper;
                ASSERT_OK(helper.init(batch->schema(), IpcWriteOptions::defaults()));
                ASSERT_OK(helper.WriteBatch(batch));
                ASSERT_OK(helper.WriteBatch(batch));
                ASSERT_OK(helper.finish());

                ASSERT_OK_AND_ASSIGN(auto out_table, helper.ReadAll());
                ASSERT_OK_AND_ASSIGN(auto expected_table, Table::from_record_batches({batch, batch}));
                ASSERT_TABLES_EQUAL(*expected_table, *out_table);
            }

            TEST_F(TestWriteRecordBatch, RawAndSerializedSizes) {
                // ARROW-8823: Recording total raw and serialized record batch sizes in WriteStats
                FileWriterHelper helper;
                IpcWriteOptions options_uncompressed = IpcWriteOptions::defaults();

                std::vector<std::shared_ptr<RecordBatch>> batches(3);
                // empty record batch
                ASSERT_OK(MakeIntBatchSized(0, &batches[0]));
                // record batch with int values
                ASSERT_OK(MakeIntBatchSized(2000, &batches[1], 100));

                // record batch with DictionaryArray
                random::RandomArrayGenerator rg(/*seed=*/0);
                int64_t length = 500;
                int dict_size = 50;
                std::shared_ptr<Array> dict =
                        rg.String(dict_size, /*min_length=*/5, /*max_length=*/5, /*null_probability=*/0);
                std::shared_ptr<Array> indices =
                        rg.Int32(length, /*min=*/0, /*max=*/dict_size - 1, /*null_probability=*/0.1);
                auto dict_type = dictionary(int32(), utf8());
                auto dict_field = field("f1", dict_type);
                ASSERT_OK_AND_ASSIGN(auto dict_array,
                                     DictionaryArray::from_arrays(dict_type, indices, dict));

                auto schema = ::nebula::schema({field("f0", utf8()), dict_field});
                batches[2] =
                        RecordBatch::create(schema, length, {rg.String(500, 0, 10, 0.1), dict_array});

                for (size_t i = 0; i < batches.size(); ++i) {
                    // without compression
                    ASSERT_OK(helper.init(batches[i]->schema(), options_uncompressed));
                    ASSERT_OK(helper.WriteBatch(batches[i]));
                    ASSERT_OK(helper.finish());
                    // padding can make serialized data slightly larger than the raw data size
                    // when no compression is used
                    ASSERT_LE(helper.writer_->stats().total_raw_body_size,
                              helper.writer_->stats().total_serialized_body_size);

                    if (!Codec::IsAvailable(CompressionType::LZ4_FRAME)) {
                        continue;
                    }

                    IpcWriteOptions options_compressed = IpcWriteOptions::defaults();
                    ASSERT_OK_AND_ASSIGN(options_compressed.codec,
                                         Codec::create(CompressionType::LZ4_FRAME));

                    // with compression
                    ASSERT_OK(helper.init(batches[i]->schema(), options_compressed));
                    ASSERT_OK(helper.WriteBatch(batches[i]));
                    ASSERT_OK(helper.finish());
                    ASSERT_GE(helper.writer_->stats().total_raw_body_size,
                              helper.writer_->stats().total_serialized_body_size);
                }
            }

// This test uses uninitialized memory

#if !(defined(NEBULA_VALGRIND) || defined(ADDRESS_SANITIZER))
            TEST_F(TestIpcRoundTrip, LargeRecordBatch) {
                const int64_t length = static_cast<int64_t>(std::numeric_limits<int32_t>::max()) + 1;

                TypedBufferBuilder<bool> data_builder;
                ASSERT_OK(data_builder.Reserve(length));
                ASSERT_OK(data_builder.advance(length));
                ASSERT_EQ(data_builder.length(), length);
                ASSERT_OK_AND_ASSIGN(auto data, data_builder.finish());

                auto array = std::make_shared<BooleanArray>(length, data, nullptr, /*null_count=*/0);

                auto f0 = nebula::field("f0", array->type());
                std::vector<std::shared_ptr<Field>> fields = {f0};
                auto schema = std::make_shared<Schema>(fields);

                auto batch = RecordBatch::create(schema, length, {array});

                std::string path = "test-write-large-record_batch";

                // 512 MB
                constexpr int64_t kBufferSize = 1 << 29;
                ASSERT_OK_AND_ASSIGN(mmap_, io::MemoryMapFixture::InitMemoryMap(kBufferSize, path));

                ASSERT_OK_AND_ASSIGN(auto result, DoLargeRoundTrip(*batch, false));
                CheckReadResult(*result, *batch);

                ASSERT_EQ(length, result->num_rows());
            }

#endif

            TEST_F(TestStreamFormat, DictionaryRoundTrip) { TestDictionaryRoundtrip(); }

            TEST_F(TestFileFormat, DictionaryRoundTrip) { TestDictionaryRoundtrip(); }

            TEST_F(TestFileFormatGenerator, DictionaryRoundTrip) { TestDictionaryRoundtrip(); }

            TEST_F(TestFileFormatGeneratorCoalesced, DictionaryRoundTrip) {
                TestDictionaryRoundtrip();
            }

            TEST_F(TestFileFormat, WriteAfterClose) { TestWriteAfterClose(); }

            TEST_F(TestStreamFormat, WriteAfterClose) { TestWriteAfterClose(); }

            TEST_F(TestStreamFormat, DifferentSchema) { TestWriteDifferentSchema(); }

            TEST_F(TestFileFormat, BatchWithMetadata) { TestWriteBatchWithMetadata(); }

            TEST_F(TestStreamFormat, BatchWithMetadata) { TestWriteBatchWithMetadata(); }

            TEST_F(TestFileFormat, DifferentMetadataBatches) { TestWriteDifferentMetadata(); }

            TEST_F(TestStreamFormat, DifferentMetadataBatches) { TestWriteDifferentMetadata(); }

            TEST_F(TestFileFormat, DifferentSchema) { TestWriteDifferentSchema(); }

            TEST_F(TestFileFormatGenerator, DifferentSchema) { TestWriteDifferentSchema(); }

            TEST_F(TestFileFormatGeneratorCoalesced, DifferentSchema) { TestWriteDifferentSchema(); }

            TEST_F(TestStreamFormat, NoRecordBatches) { TestWriteNoRecordBatches(); }

            TEST_F(TestFileFormat, NoRecordBatches) { TestWriteNoRecordBatches(); }

            TEST_F(TestFileFormatGenerator, NoRecordBatches) { TestWriteNoRecordBatches(); }

            TEST_F(TestFileFormatGeneratorCoalesced, NoRecordBatches) { TestWriteNoRecordBatches(); }

            TEST_F(TestStreamFormat, ReadFieldSubset) { TestReadSubsetOfFields(); }

            TEST_F(TestFileFormat, ReadFieldSubset) { TestReadSubsetOfFields(); }

            TEST_F(TestFileFormatGenerator, ReadFieldSubset) { TestReadSubsetOfFields(); }

            TEST_F(TestFileFormatGeneratorCoalesced, ReadFieldSubset) { TestReadSubsetOfFields(); }

            TEST_F(TestFileFormatGeneratorCoalesced, Errors) {
                std::shared_ptr<RecordBatch> batch;
                ASSERT_OK(MakeIntRecordBatch(&batch));

                FileWriterHelper helper;
                ASSERT_OK(helper.init(batch->schema(), IpcWriteOptions::defaults()));
                ASSERT_OK(helper.WriteBatch(batch));
                ASSERT_OK(helper.finish());

                auto buf_reader = std::make_shared<NoZeroCopyBufferReader>(helper.buffer_);
                ASSERT_OK_AND_ASSIGN(auto reader, RecordBatchFileReader::open(buf_reader.get()));

                ASSERT_RAISES_WITH_MESSAGE(turbo::StatusCode::kInvalidArgument, "Cannot coalesce without an owned file",
                                           reader->get_record_batch_generator(/*coalesce=*/true));
            }

            TEST(TestRecordBatchStreamReader, EmptyStreamWithDictionaries) {
                // ARROW-6006
                auto f0 = nebula::field("f0", nebula::dictionary(nebula::int8(), nebula::utf8()));
                auto schema = nebula::schema({f0});

                ASSERT_OK_AND_ASSIGN(auto stream, io::BufferOutputStream::create(0));

                ASSERT_OK_AND_ASSIGN(auto writer, make_stream_writer(stream, schema));
                ASSERT_OK(writer->close());

                ASSERT_OK_AND_ASSIGN(auto buffer, stream->finish());
                io::BufferReader buffer_reader(buffer);
                std::shared_ptr<RecordBatchReader> reader;
                ASSERT_OK_AND_ASSIGN(reader, RecordBatchStreamReader::open(&buffer_reader));

                std::shared_ptr<RecordBatch> batch;
                ASSERT_OK(reader->read_next(&batch));
                ASSERT_EQ(nullptr, batch);
            }

// Delimit IPC stream messages and reassemble with the indicated messages
// included. This way we can remove messages from an IPC stream to test
// different failure modes or other difficult-to-test behaviors
            void SpliceMessages(std::shared_ptr<Buffer> stream,
                                const std::vector<int> &included_indices,
                                std::shared_ptr<Buffer> *spliced_stream) {
                ASSERT_OK_AND_ASSIGN(auto out, io::BufferOutputStream::create(0));

                io::BufferReader buffer_reader(stream);
                std::unique_ptr<MessageReader> message_reader = MessageReader::open(&buffer_reader);
                std::unique_ptr<Message> msg;

                // Parse and reassemble first two messages in stream
                int message_index = 0;
                while (true) {
                    ASSERT_OK_AND_ASSIGN(msg, message_reader->ReadNextMessage());
                    if (!msg) {
                        break;
                    }

                    if (std::find(included_indices.begin(), included_indices.end(), message_index++) ==
                        included_indices.end()) {
                        // Message being dropped, continue
                        continue;
                    }

                    IpcWriteOptions options;
                    IpcPayload payload;
                    payload.type = msg->type();
                    payload.metadata = msg->metadata();
                    payload.body_buffers.push_back(msg->body());
                    payload.body_length = msg->body()->size();
                    int32_t unused_metadata_length = -1;
                    ASSERT_OK(ipc::write_ipc_payload(payload, options, out.get(), &unused_metadata_length));
                }
                ASSERT_OK_AND_ASSIGN(*spliced_stream, out->finish());
            }

            TEST(TestRecordBatchStreamReader, NotEnoughDictionaries) {
                // ARROW-6126
                std::shared_ptr<RecordBatch> batch;
                ASSERT_OK(MakeDictionaryFlat(&batch));

                ASSERT_OK_AND_ASSIGN(auto out, io::BufferOutputStream::create(0));
                ASSERT_OK_AND_ASSIGN(auto writer, make_stream_writer(out, batch->schema()));
                ASSERT_OK(writer->write_record_batch(*batch));
                ASSERT_OK(writer->close());

                // Now let's mangle the stream a little bit and make sure we return the right
                // error
                ASSERT_OK_AND_ASSIGN(auto buffer, out->finish());

                auto read = [](std::shared_ptr<Buffer> stream) -> turbo::Status {
                    io::BufferReader reader(stream);
                    TURBO_MOVE_OR_RAISE(auto ipc_reader, RecordBatchStreamReader::open(&reader));
                    std::shared_ptr<RecordBatch> batch;
                    return ipc_reader->read_next(&batch);
                };

                // Stream terminates before reading all dictionaries
                std::shared_ptr<Buffer> truncated_stream;
                SpliceMessages(buffer, {0, 1}, &truncated_stream);
                ASSERT_RAISES_WITH_MESSAGE(turbo::StatusCode::kInvalidArgument,
                                           "IPC stream ended without "
                                           "reading the expected number (3) of dictionaries",
                                           read(truncated_stream));

                // One of the dictionaries is missing, then we see a record batch
                SpliceMessages(buffer, {0, 1, 2, 4}, &truncated_stream);
                ASSERT_RAISES_WITH_MESSAGE(turbo::StatusCode::kInvalidArgument,
                                           "IPC stream did not have "
                                           "the expected number (3) of dictionaries "
                                           "at the start of the stream",
                                           read(truncated_stream));
            }

            TEST(TestRecordBatchStreamReader, MalformedInput) {
                const std::string empty_str = "";
                const std::string garbage_str = "12345678";

                auto empty = std::make_shared<Buffer>(empty_str);
                auto garbage = std::make_shared<Buffer>(garbage_str);

                io::BufferReader empty_reader(empty);
                ASSERT_RAISES(turbo::StatusCode::kInvalidArgument, RecordBatchStreamReader::open(&empty_reader));

                io::BufferReader garbage_reader(garbage);
                ASSERT_RAISES(turbo::StatusCode::kInvalidArgument, RecordBatchStreamReader::open(&garbage_reader));
            }

            class EndlessCollectListener : public CollectListener {
            public:
                EndlessCollectListener() : CollectListener(), decoder_(nullptr) {}

                void SetDecoder(StreamDecoder *decoder) { decoder_ = decoder; }

                turbo::Status on_eos() override { return decoder_->reset(); }

            private:
                StreamDecoder *decoder_;
            };

            TEST(TestStreamDecoder, reset) {
                auto listener = std::make_shared<EndlessCollectListener>();
                StreamDecoder decoder(listener);
                listener->SetDecoder(&decoder);

                std::shared_ptr<RecordBatch> batch;
                ASSERT_OK(MakeIntRecordBatch(&batch));
                StreamWriterHelper writer_helper;
                ASSERT_OK(writer_helper.init(batch->schema(), IpcWriteOptions::defaults()));
                ASSERT_OK(writer_helper.WriteBatch(batch));
                ASSERT_OK(writer_helper.finish());

                ASSERT_OK_AND_ASSIGN(auto all_buffer, concatenate_buffers({writer_helper.buffer_,
                                                                          writer_helper.buffer_}));
                // consume by Buffer
                ASSERT_OK(decoder.consume(all_buffer));
                ASSERT_EQ(2, listener->num_record_batches());

                // consume by raw data
                ASSERT_OK(decoder.consume(all_buffer->data(), all_buffer->size()));
                ASSERT_EQ(4, listener->num_record_batches());
            }

            TEST(TestStreamDecoder, NextRequiredSize) {
                auto listener = std::make_shared<CollectListener>();
                StreamDecoder decoder(listener);
                auto next_required_size = decoder.next_required_size();
                const uint8_t data[1] = {0};
                ASSERT_OK(decoder.consume(data, 1));
                ASSERT_EQ(next_required_size - 1, decoder.next_required_size());
            }

            template<typename WriterHelperType>
            class TestDictionaryReplacement : public ::testing::Test {
            public:
                using WriterHelper = WriterHelperType;

                void TestSameDictPointer() {
                    auto type = dictionary(int8(), utf8());
                    auto values = assert_array_from_json(utf8(), R"(["foo", "bar", "quux"])");
                    auto batch1 = MakeBatch(type, assert_array_from_json(int8(), "[0, 2, null, 1]"), values);
                    auto batch2 = MakeBatch(type, assert_array_from_json(int8(), "[1, 0, 0]"), values);
                    CheckRoundtrip({batch1, batch2});

                    EXPECT_EQ(read_stats_.num_messages, 4);  // including schema message
                    EXPECT_EQ(read_stats_.num_record_batches, 2);
                    EXPECT_EQ(read_stats_.num_dictionary_batches, 1);
                    EXPECT_EQ(read_stats_.num_replaced_dictionaries, 0);
                    EXPECT_EQ(read_stats_.num_dictionary_deltas, 0);
                }

                void TestSameDictValues() {
                    auto type = dictionary(int8(), utf8());
                    // create two separate dictionaries, but with the same contents
                    auto batch1 = MakeBatch(assert_array_from_json(type, R"(["foo", "foo", "bar", null])"));
                    auto batch2 = MakeBatch(assert_array_from_json(type, R"(["foo", "bar", "foo"])"));
                    CheckRoundtrip({batch1, batch2});

                    EXPECT_EQ(read_stats_.num_messages, 4);  // including schema message
                    EXPECT_EQ(read_stats_.num_record_batches, 2);
                    EXPECT_EQ(read_stats_.num_dictionary_batches, 1);
                    EXPECT_EQ(read_stats_.num_replaced_dictionaries, 0);
                    EXPECT_EQ(read_stats_.num_dictionary_deltas, 0);
                }

                void TestDeltaDict() {
                    auto type = dictionary(int8(), utf8());
                    auto batch1 = MakeBatch(assert_array_from_json(type, R"(["foo", "foo", "bar", null])"));
                    // Potential delta
                    auto batch2 = MakeBatch(assert_array_from_json(type, R"(["foo", "bar", "quux", "foo"])"));
                    // Potential delta
                    auto batch3 =
                            MakeBatch(assert_array_from_json(type, R"(["foo", "bar", "quux", "zzz", "foo"])"));
                    auto batch4 = MakeBatch(assert_array_from_json(type, R"(["bar", null, "quux", "foo"])"));
                    RecordBatchVector batches{batch1, batch2, batch3, batch4};
                    RecordBatchVector only_deltas{batch1, batch2, batch3};

                    // Emit replacements
                    if (WriterHelper::kIsFileFormat) {
                        CheckWritingFails(batches, 1);
                    } else {
                        CheckRoundtrip(batches);
                        EXPECT_EQ(read_stats_.num_messages, 9);  // including schema message
                        EXPECT_EQ(read_stats_.num_record_batches, 4);
                        EXPECT_EQ(read_stats_.num_dictionary_batches, 4);
                        EXPECT_EQ(read_stats_.num_replaced_dictionaries, 3);
                        EXPECT_EQ(read_stats_.num_dictionary_deltas, 0);
                    }

                    // Emit deltas
                    write_options_.emit_dictionary_deltas = true;
                    if (WriterHelper::kIsFileFormat) {
                        // batch4 is incompatible with the previous batches and would emit
                        // a replacement
                        CheckWritingFails(batches, 3);
                    } else {
                        CheckRoundtrip(batches);
                        EXPECT_EQ(read_stats_.num_messages, 9);  // including schema message
                        EXPECT_EQ(read_stats_.num_record_batches, 4);
                        EXPECT_EQ(read_stats_.num_dictionary_batches, 4);
                        EXPECT_EQ(read_stats_.num_replaced_dictionaries, 1);
                        EXPECT_EQ(read_stats_.num_dictionary_deltas, 2);
                    }

                    CheckRoundtrip(only_deltas,
                            /*expect_expanded_dictionary=*/WriterHelper::kIsFileFormat);
                    EXPECT_EQ(read_stats_.num_messages, 7);  // including schema message
                    EXPECT_EQ(read_stats_.num_record_batches, 3);
                    EXPECT_EQ(read_stats_.num_dictionary_batches, 3);
                    EXPECT_EQ(read_stats_.num_replaced_dictionaries, 0);
                    EXPECT_EQ(read_stats_.num_dictionary_deltas, 2);

                    // IPC file format: write_table should unify dicts
                    RecordBatchVector actual;
                    write_options_.unify_dictionaries = true;
                    ASSERT_OK(RoundTripTable(batches, &actual));
                    if (WriterHelper::kIsFileFormat) {
                        EXPECT_EQ(read_stats_.num_messages, 6);  // including schema message
                        EXPECT_EQ(read_stats_.num_record_batches, 4);
                        EXPECT_EQ(read_stats_.num_dictionary_batches, 1);
                        EXPECT_EQ(read_stats_.num_replaced_dictionaries, 0);
                        EXPECT_EQ(read_stats_.num_dictionary_deltas, 0);
                        CheckBatchesLogical(batches, actual);
                    } else {
                        EXPECT_EQ(read_stats_.num_messages, 9);  // including schema message
                        EXPECT_EQ(read_stats_.num_record_batches, 4);
                        EXPECT_EQ(read_stats_.num_dictionary_batches, 4);
                        EXPECT_EQ(read_stats_.num_replaced_dictionaries, 1);
                        EXPECT_EQ(read_stats_.num_dictionary_deltas, 2);
                        CheckBatches(batches, actual);
                    }
                }

                void TestSameDictValuesNested() {
                    auto batches = SameValuesNestedDictBatches();
                    CheckRoundtrip(batches);

                    EXPECT_EQ(read_stats_.num_messages, 5);  // including schema message
                    EXPECT_EQ(read_stats_.num_record_batches, 2);
                    EXPECT_EQ(read_stats_.num_dictionary_batches, 2);
                    EXPECT_EQ(read_stats_.num_replaced_dictionaries, 0);
                    EXPECT_EQ(read_stats_.num_dictionary_deltas, 0);

                    write_options_.unify_dictionaries = true;
                    CheckRoundtrip(batches);
                    if (WriterHelper::kIsFileFormat) {
                        // This fails because unification of nested dictionaries is not supported.
                        // However, perhaps this should work because the dictionaries are simply equal.
                        CheckWritingTableFails(batches, turbo::StatusCode::kUnimplemented);
                    } else {
                        CheckRoundtripTable(batches);
                    }
                }

                void TestDifferentDictValues() {
                    if (WriterHelper::kIsFileFormat) {
                        CheckWritingFails(DifferentOrderDictBatches(), 1);
                        CheckWritingFails(DifferentValuesDictBatches(), 1);
                    } else {
                        CheckRoundtrip(DifferentOrderDictBatches());

                        EXPECT_EQ(read_stats_.num_messages, 5);  // including schema message
                        EXPECT_EQ(read_stats_.num_record_batches, 2);
                        EXPECT_EQ(read_stats_.num_dictionary_batches, 2);
                        EXPECT_EQ(read_stats_.num_replaced_dictionaries, 1);
                        EXPECT_EQ(read_stats_.num_dictionary_deltas, 0);

                        CheckRoundtrip(DifferentValuesDictBatches());

                        EXPECT_EQ(read_stats_.num_messages, 5);  // including schema message
                        EXPECT_EQ(read_stats_.num_record_batches, 2);
                        EXPECT_EQ(read_stats_.num_dictionary_batches, 2);
                        EXPECT_EQ(read_stats_.num_replaced_dictionaries, 1);
                        EXPECT_EQ(read_stats_.num_dictionary_deltas, 0);
                    }

                    // Same, but single-shot table write
                    if (WriterHelper::kIsFileFormat) {
                        CheckWritingTableFails(DifferentOrderDictBatches());
                        CheckWritingTableFails(DifferentValuesDictBatches());

                        write_options_.unify_dictionaries = true;
                        // Will unify dictionaries
                        CheckRoundtripTable(DifferentOrderDictBatches());

                        EXPECT_EQ(read_stats_.num_messages, 4);  // including schema message
                        EXPECT_EQ(read_stats_.num_record_batches, 2);
                        EXPECT_EQ(read_stats_.num_dictionary_batches, 1);
                        EXPECT_EQ(read_stats_.num_replaced_dictionaries, 0);
                        EXPECT_EQ(read_stats_.num_dictionary_deltas, 0);

                        CheckRoundtripTable(DifferentValuesDictBatches());

                        EXPECT_EQ(read_stats_.num_messages, 4);  // including schema message
                        EXPECT_EQ(read_stats_.num_record_batches, 2);
                        EXPECT_EQ(read_stats_.num_dictionary_batches, 1);
                        EXPECT_EQ(read_stats_.num_replaced_dictionaries, 0);
                        EXPECT_EQ(read_stats_.num_dictionary_deltas, 0);
                    } else {
                        CheckRoundtripTable(DifferentOrderDictBatches());

                        EXPECT_EQ(read_stats_.num_messages, 5);  // including schema message
                        EXPECT_EQ(read_stats_.num_record_batches, 2);
                        EXPECT_EQ(read_stats_.num_dictionary_batches, 2);
                        EXPECT_EQ(read_stats_.num_replaced_dictionaries, 1);
                        EXPECT_EQ(read_stats_.num_dictionary_deltas, 0);

                        CheckRoundtripTable(DifferentValuesDictBatches());

                        EXPECT_EQ(read_stats_.num_messages, 5);  // including schema message
                        EXPECT_EQ(read_stats_.num_record_batches, 2);
                        EXPECT_EQ(read_stats_.num_dictionary_batches, 2);
                        EXPECT_EQ(read_stats_.num_replaced_dictionaries, 1);
                        EXPECT_EQ(read_stats_.num_dictionary_deltas, 0);
                    }
                }

                void TestDifferentDictValuesNested() {
                    if (WriterHelper::kIsFileFormat) {
                        CheckWritingFails(DifferentValuesNestedDictBatches1(), 1);
                        CheckWritingFails(DifferentValuesNestedDictBatches2(), 1);
                        CheckWritingTableFails(DifferentValuesNestedDictBatches1());
                        CheckWritingTableFails(DifferentValuesNestedDictBatches2());

                        write_options_.unify_dictionaries = true;
                        CheckWritingFails(DifferentValuesNestedDictBatches1(), 1);
                        CheckWritingFails(DifferentValuesNestedDictBatches2(), 1);
                        CheckWritingTableFails(DifferentValuesNestedDictBatches1(),
                                               turbo::StatusCode::kUnimplemented);
                        CheckWritingTableFails(DifferentValuesNestedDictBatches2(),
                                               turbo::StatusCode::kUnimplemented);
                        return;
                    }
                    CheckRoundtrip(DifferentValuesNestedDictBatches1());

                    EXPECT_EQ(read_stats_.num_messages, 7);  // including schema message
                    EXPECT_EQ(read_stats_.num_record_batches, 2);
                    // Both inner and outer dict were replaced
                    EXPECT_EQ(read_stats_.num_dictionary_batches, 4);
                    EXPECT_EQ(read_stats_.num_replaced_dictionaries, 2);
                    EXPECT_EQ(read_stats_.num_dictionary_deltas, 0);

                    CheckRoundtrip(DifferentValuesNestedDictBatches2());

                    EXPECT_EQ(read_stats_.num_messages, 6);  // including schema message
                    EXPECT_EQ(read_stats_.num_record_batches, 2);
                    // Only inner dict was replaced
                    EXPECT_EQ(read_stats_.num_dictionary_batches, 3);
                    EXPECT_EQ(read_stats_.num_replaced_dictionaries, 1);
                    EXPECT_EQ(read_stats_.num_dictionary_deltas, 0);
                }

                void TestDeltaDictNestedOuter() {
                    // Outer dict changes, inner dict remains the same
                    auto value_type = list(dictionary(int8(), utf8()));
                    auto type = dictionary(int8(), value_type);
                    // Inner dict: ["a", "b"]
                    auto batch1_values = assert_array_from_json(value_type, R"([["a"], ["b"]])");
                    // Potential delta
                    auto batch2_values = assert_array_from_json(value_type, R"([["a"], ["b"], ["a", "a"]])");
                    auto batch1 = MakeBatch(type, assert_array_from_json(int8(), "[1, 0, 1]"), batch1_values);
                    auto batch2 =
                            MakeBatch(type, assert_array_from_json(int8(), "[2, null, 0, 0]"), batch2_values);
                    RecordBatchVector batches{batch1, batch2};

                    if (WriterHelper::kIsFileFormat) {
                        CheckWritingFails(batches, 1);
                    } else {
                        CheckRoundtrip(batches);
                        EXPECT_EQ(read_stats_.num_messages, 6);  // including schema message
                        EXPECT_EQ(read_stats_.num_record_batches, 2);
                        EXPECT_EQ(read_stats_.num_dictionary_batches, 3);
                        EXPECT_EQ(read_stats_.num_replaced_dictionaries, 1);
                        EXPECT_EQ(read_stats_.num_dictionary_deltas, 0);
                    }

                    write_options_.emit_dictionary_deltas = true;
                    if (WriterHelper::kIsFileFormat) {
                        CheckWritingFails(batches, 1);
                    } else {
                        // Outer dict deltas are not emitted as the read path doesn't support them
                        CheckRoundtrip(batches);
                        EXPECT_EQ(read_stats_.num_messages, 6);  // including schema message
                        EXPECT_EQ(read_stats_.num_record_batches, 2);
                        EXPECT_EQ(read_stats_.num_dictionary_batches, 3);
                        EXPECT_EQ(read_stats_.num_replaced_dictionaries, 1);
                        EXPECT_EQ(read_stats_.num_dictionary_deltas, 0);
                    }
                }

                void TestDeltaDictNestedInner() {
                    // Inner dict changes
                    auto value_type = list(dictionary(int8(), utf8()));
                    auto type = dictionary(int8(), value_type);
                    // Inner dict: ["a"]
                    auto batch1_values = assert_array_from_json(value_type, R"([["a"]])");
                    // Inner dict: ["a", "b"] => potential delta
                    auto batch2_values = assert_array_from_json(value_type, R"([["a"], ["b"], ["a", "a"]])");
                    // Inner dict: ["a", "b", "c"] => potential delta
                    auto batch3_values = assert_array_from_json(value_type, R"([["a"], ["b"], ["c"]])");
                    // Inner dict: ["a", "b", "c"]
                    auto batch4_values = assert_array_from_json(value_type, R"([["a"], ["b", "c"]])");
                    // Inner dict: ["a", "c", "b"] => replacement
                    auto batch5_values = assert_array_from_json(value_type, R"([["a"], ["c"], ["b"]])");
                    auto batch1 = MakeBatch(type, assert_array_from_json(int8(), "[0, null, 0]"), batch1_values);
                    auto batch2 = MakeBatch(type, assert_array_from_json(int8(), "[1, 0, 2]"), batch2_values);
                    auto batch3 = MakeBatch(type, assert_array_from_json(int8(), "[1, 0, 2]"), batch3_values);
                    auto batch4 = MakeBatch(type, assert_array_from_json(int8(), "[1, 0, null]"), batch4_values);
                    auto batch5 = MakeBatch(type, assert_array_from_json(int8(), "[1, 0, 2]"), batch5_values);
                    RecordBatchVector batches{batch1, batch2, batch3, batch4, batch5};

                    if (WriterHelper::kIsFileFormat) {
                        CheckWritingFails(batches, 1);
                    } else {
                        CheckRoundtrip(batches);
                        EXPECT_EQ(read_stats_.num_messages, 15);  // including schema message
                        EXPECT_EQ(read_stats_.num_record_batches, 5);
                        EXPECT_EQ(read_stats_.num_dictionary_batches, 9);  // 4 inner + 5 outer
                        EXPECT_EQ(read_stats_.num_replaced_dictionaries, 7);
                        EXPECT_EQ(read_stats_.num_dictionary_deltas, 0);
                    }

                    write_options_.emit_dictionary_deltas = true;
                    if (WriterHelper::kIsFileFormat) {
                        CheckWritingFails(batches, 1);
                    } else {
                        CheckRoundtrip(batches);
                        EXPECT_EQ(read_stats_.num_messages, 15);  // including schema message
                        EXPECT_EQ(read_stats_.num_record_batches, 5);
                        EXPECT_EQ(read_stats_.num_dictionary_batches, 9);  // 4 inner + 5 outer
                        EXPECT_EQ(read_stats_.num_replaced_dictionaries, 5);
                        EXPECT_EQ(read_stats_.num_dictionary_deltas, 2);
                    }
                }

                turbo::Status RoundTrip(const RecordBatchVector &in_batches, RecordBatchVector *out_batches) {
                    WriterHelper writer_helper;
                    TURBO_RETURN_NOT_OK(writer_helper.init(in_batches[0]->schema(), write_options_));
                    for (const auto &batch: in_batches) {
                        TURBO_RETURN_NOT_OK(writer_helper.WriteBatch(batch));
                    }
                    TURBO_RETURN_NOT_OK(writer_helper.finish(&write_stats_));
                    TURBO_RETURN_NOT_OK(writer_helper.ReadBatches(read_options_, out_batches, &read_stats_));
                    for (const auto &batch: *out_batches) {
                        TURBO_RETURN_NOT_OK(batch->validate_full());
                    }
                    return turbo::OkStatus();
                }

                turbo::Status RoundTripTable(const RecordBatchVector &in_batches,
                                      RecordBatchVector *out_batches) {
                    WriterHelper writer_helper;
                    TURBO_RETURN_NOT_OK(writer_helper.init(in_batches[0]->schema(), write_options_));
                    // write_table is different from a series of WriteBatch for RecordBatchFileWriter
                    TURBO_RETURN_NOT_OK(writer_helper.write_table(in_batches));
                    TURBO_RETURN_NOT_OK(writer_helper.finish(&write_stats_));
                    TURBO_RETURN_NOT_OK(writer_helper.ReadBatches(read_options_, out_batches, &read_stats_));
                    for (const auto &batch: *out_batches) {
                        TURBO_RETURN_NOT_OK(batch->validate_full());
                    }
                    return turbo::OkStatus();
                }

                void CheckBatches(const RecordBatchVector &expected, const RecordBatchVector &actual) {
                    ASSERT_EQ(expected.size(), actual.size());
                    for (size_t i = 0; i < expected.size(); ++i) {
                        AssertBatchesEqual(*expected[i], *actual[i]);
                    }
                }

                // Check that batches are logically equal, even if e.g. dictionaries
                // are different.
                void CheckBatchesLogical(const RecordBatchVector &expected,
                                         const RecordBatchVector &actual) {
                    ASSERT_OK_AND_ASSIGN(auto expected_table, Table::from_record_batches(expected));
                    ASSERT_OK_AND_ASSIGN(auto actual_table, Table::from_record_batches(actual));
                    ASSERT_OK_AND_ASSIGN(expected_table, expected_table->combine_chunks());
                    ASSERT_OK_AND_ASSIGN(actual_table, actual_table->combine_chunks());
                    AssertTablesEqual(*expected_table, *actual_table);
                }

                RecordBatchVector ExpandDictionaries(const RecordBatchVector &in_batches) {
                    RecordBatchVector out;
                    ArrayVector full_dictionaries;
                    std::shared_ptr<RecordBatch> last_batch = in_batches[in_batches.size() - 1];
                    for (const auto &column: last_batch->columns()) {
                        std::shared_ptr<DictionaryArray> dict_array =
                                turbo::checked_pointer_cast<DictionaryArray>(column);
                        full_dictionaries.push_back(dict_array->dictionary());
                    }

                    for (const auto &batch: in_batches) {
                        ArrayVector expanded_columns;
                        for (int col_index = 0; col_index < batch->num_columns(); col_index++) {
                            std::shared_ptr<Array> column = batch->column(col_index);
                            std::shared_ptr<DictionaryArray> dict_array =
                                    turbo::checked_pointer_cast<DictionaryArray>(column);
                            std::shared_ptr<Array> full_dict = full_dictionaries[col_index];
                            std::shared_ptr<Array> expanded = std::make_shared<DictionaryArray>(
                                    dict_array->type(), dict_array->indices(), full_dict);
                            expanded_columns.push_back(expanded);
                        }
                        out.push_back(
                                RecordBatch::create(batch->schema(), batch->num_rows(), expanded_columns));
                    }
                    return out;
                }

                void CheckRoundtrip(const RecordBatchVector &in_batches,
                                    bool expect_expanded_dictionary = false) {
                    RecordBatchVector out_batches;
                    ASSERT_OK(RoundTrip(in_batches, &out_batches));
                    CheckStatsConsistent();
                    if (expect_expanded_dictionary) {
                        CheckBatches(ExpandDictionaries(in_batches), out_batches);
                    } else {
                        CheckBatches(in_batches, out_batches);
                    }
                }

                void CheckRoundtripTable(const RecordBatchVector &in_batches) {
                    RecordBatchVector out_batches;
                    ASSERT_OK(RoundTripTable(in_batches, &out_batches));
                    CheckStatsConsistent();
                    CheckBatchesLogical(in_batches, out_batches);
                }

                void CheckWritingFails(const RecordBatchVector &in_batches, size_t fails_at_batch_num) {
                    WriterHelper writer_helper;
                    ASSERT_OK(writer_helper.init(in_batches[0]->schema(), write_options_));
                    for (size_t i = 0; i < fails_at_batch_num; ++i) {
                        ASSERT_OK(writer_helper.WriteBatch(in_batches[i]));
                    }
                    ASSERT_RAISES(turbo::StatusCode::kInvalidArgument, writer_helper.WriteBatch(in_batches[fails_at_batch_num]));
                }

                void CheckWritingTableFails(const RecordBatchVector &in_batches,
                                            turbo::StatusCode expected_error = turbo::StatusCode::kInvalidArgument) {
                    WriterHelper writer_helper;
                    ASSERT_OK(writer_helper.init(in_batches[0]->schema(), write_options_));
                    auto st = writer_helper.write_table(in_batches);
                    ASSERT_FALSE(st.ok());
                    ASSERT_EQ(static_cast<turbo::StatusCode>(st.raw_code()), expected_error);
                }

                void CheckStatsConsistent() {
                    ASSERT_EQ(read_stats_.num_messages, write_stats_.num_messages);
                    ASSERT_EQ(read_stats_.num_record_batches, write_stats_.num_record_batches);
                    ASSERT_EQ(read_stats_.num_dictionary_batches, write_stats_.num_dictionary_batches);
                    ASSERT_EQ(read_stats_.num_replaced_dictionaries,
                              write_stats_.num_replaced_dictionaries);
                    ASSERT_EQ(read_stats_.num_dictionary_deltas, write_stats_.num_dictionary_deltas);
                }

                RecordBatchVector DifferentOrderDictBatches() {
                    // create two separate dictionaries with different order
                    auto type = dictionary(int8(), utf8());
                    auto batch1 = MakeBatch(assert_array_from_json(type, R"(["foo", "foo", "bar", null])"));
                    auto batch2 = MakeBatch(assert_array_from_json(type, R"(["bar", "bar", "foo"])"));
                    return {batch1, batch2};
                }

                RecordBatchVector DifferentValuesDictBatches() {
                    // create two separate dictionaries with different values
                    auto type = dictionary(int8(), utf8());
                    auto batch1 = MakeBatch(assert_array_from_json(type, R"(["foo", "foo", "bar", null])"));
                    auto batch2 = MakeBatch(assert_array_from_json(type, R"(["bar", "quux", "quux"])"));
                    return {batch1, batch2};
                }

                RecordBatchVector SameValuesNestedDictBatches() {
                    auto value_type = list(dictionary(int8(), utf8()));
                    auto type = dictionary(int8(), value_type);
                    auto batch1_values = assert_array_from_json(value_type, R"([[], ["a"], ["b"], ["a", "a"]])");
                    auto batch2_values = assert_array_from_json(value_type, R"([[], ["a"], ["b"], ["a", "a"]])");
                    auto batch1 = MakeBatch(type, assert_array_from_json(int8(), "[1, 3, 0, 3]"), batch1_values);
                    auto batch2 = MakeBatch(type, assert_array_from_json(int8(), "[2, null, 2]"), batch2_values);
                    return {batch1, batch2};
                }

                RecordBatchVector DifferentValuesNestedDictBatches1() {
                    // Inner dictionary values differ
                    auto value_type = list(dictionary(int8(), utf8()));
                    auto type = dictionary(int8(), value_type);
                    auto batch1_values = assert_array_from_json(value_type, R"([[], ["a"], ["b"], ["a", "a"]])");
                    auto batch2_values = assert_array_from_json(value_type, R"([[], ["a"], ["c"], ["a", "a"]])");
                    auto batch1 = MakeBatch(type, assert_array_from_json(int8(), "[1, 3, 0, 3]"), batch1_values);
                    auto batch2 = MakeBatch(type, assert_array_from_json(int8(), "[2, null, 2]"), batch2_values);
                    return {batch1, batch2};
                }

                RecordBatchVector DifferentValuesNestedDictBatches2() {
                    // Outer dictionary values differ
                    auto value_type = list(dictionary(int8(), utf8()));
                    auto type = dictionary(int8(), value_type);
                    auto batch1_values = assert_array_from_json(value_type, R"([[], ["a"], ["b"], ["a", "a"]])");
                    auto batch2_values = assert_array_from_json(value_type, R"([["a"], ["b"], ["a", "a"]])");
                    auto batch1 = MakeBatch(type, assert_array_from_json(int8(), "[1, 3, 0, 3]"), batch1_values);
                    auto batch2 = MakeBatch(type, assert_array_from_json(int8(), "[2, null, 2]"), batch2_values);
                    return {batch1, batch2};
                }

                // Make one-column batch
                std::shared_ptr<RecordBatch> MakeBatch(std::shared_ptr<Array> column) {
                    return RecordBatch::create(schema({field("f", column->type())}), column->length(),
                                             {column});
                }

                // Make one-column batch with a dictionary array
                std::shared_ptr<RecordBatch> MakeBatch(std::shared_ptr<DataType> type,
                                                       std::shared_ptr<Array> indices,
                                                       std::shared_ptr<Array> dictionary) {
                    auto array = *DictionaryArray::from_arrays(std::move(type), std::move(indices),
                                                              std::move(dictionary));
                    return MakeBatch(std::move(array));
                }

            protected:
                IpcWriteOptions write_options_ = IpcWriteOptions::defaults();
                IpcReadOptions read_options_ = IpcReadOptions::defaults();
                WriteStats write_stats_;
                ReadStats read_stats_;
            };

            using DictionaryReplacementTestTypes =
                    ::testing::Types<StreamWriterHelper, StreamDecoderBufferWriterHelper,
                            FileWriterHelper>;

            TYPED_TEST_SUITE(TestDictionaryReplacement, DictionaryReplacementTestTypes);

            TYPED_TEST(TestDictionaryReplacement, SameDictPointer) { this->TestSameDictPointer(); }

            TYPED_TEST(TestDictionaryReplacement, SameDictValues) { this->TestSameDictValues(); }

            TYPED_TEST(TestDictionaryReplacement, DeltaDict) { this->TestDeltaDict(); }

            TYPED_TEST(TestDictionaryReplacement, SameDictValuesNested) {
                this->TestSameDictValuesNested();
            }

            TYPED_TEST(TestDictionaryReplacement, DifferentDictValues) {
                this->TestDifferentDictValues();
            }

            TYPED_TEST(TestDictionaryReplacement, DifferentDictValuesNested) {
                this->TestDifferentDictValuesNested();
            }

            TYPED_TEST(TestDictionaryReplacement, DeltaDictNestedOuter) {
                this->TestDeltaDictNestedOuter();
            }

            TYPED_TEST(TestDictionaryReplacement, DeltaDictNestedInner) {
                this->TestDeltaDictNestedInner();
            }

// ----------------------------------------------------------------------
// Miscellanea

            TEST(FieldPosition, Basics) {
                FieldPosition pos;
                ASSERT_EQ(pos.path(), std::vector<int>{});
                {
                    auto child = pos.child(6);
                    ASSERT_EQ(child.path(), std::vector<int>{6});
                    auto grand_child = child.child(42);
                    ASSERT_EQ(grand_child.path(), (std::vector<int>{6, 42}));
                }
                {
                    auto child = pos.child(12);
                    ASSERT_EQ(child.path(), std::vector<int>{12});
                }
            }

            TEST(DictionaryFieldMapper, Basics) {
                DictionaryFieldMapper mapper;

                ASSERT_EQ(mapper.num_fields(), 0);

                ASSERT_OK(mapper.add_field(42, {0, 1}));
                ASSERT_OK(mapper.add_field(43, {0, 2}));
                ASSERT_OK(mapper.add_field(44, {0, 1, 3}));
                ASSERT_EQ(mapper.num_fields(), 3);

                ASSERT_OK_AND_EQ(42, mapper.GetFieldId({0, 1}));
                ASSERT_OK_AND_EQ(43, mapper.GetFieldId({0, 2}));
                ASSERT_OK_AND_EQ(44, mapper.GetFieldId({0, 1, 3}));
                ASSERT_RAISES(turbo::StatusCode::kNotFound, mapper.GetFieldId({}));
                ASSERT_RAISES(turbo::StatusCode::kNotFound, mapper.GetFieldId({0}));
                ASSERT_RAISES(turbo::StatusCode::kNotFound, mapper.GetFieldId({0, 1, 2}));
                ASSERT_RAISES(turbo::StatusCode::kNotFound, mapper.GetFieldId({1}));

                ASSERT_OK(mapper.add_field(41, {}));
                ASSERT_EQ(mapper.num_fields(), 4);
                ASSERT_OK_AND_EQ(41, mapper.GetFieldId({}));
                ASSERT_OK_AND_EQ(42, mapper.GetFieldId({0, 1}));

                // Duplicated dictionary ids are allowed
                ASSERT_OK(mapper.add_field(42, {4, 5, 6}));
                ASSERT_EQ(mapper.num_fields(), 5);
                ASSERT_OK_AND_EQ(42, mapper.GetFieldId({0, 1}));
                ASSERT_OK_AND_EQ(42, mapper.GetFieldId({4, 5, 6}));

                // Duplicated fields paths are not
                ASSERT_RAISES(turbo::StatusCode::kAlreadyExists, mapper.add_field(46, {0, 1}));
            }

            TEST(DictionaryFieldMapper, FromSchema) {
                auto f0 = field("f0", int8());
                auto f1 =
                        field("f1", STRUCT({field("a", null()), field("b", dictionary(int8(), utf8()))}));
                auto f2 = field("f2", dictionary(int32(), list(dictionary(int8(), utf8()))));

                Schema schema({f0, f1, f2});
                DictionaryFieldMapper mapper(schema);

                ASSERT_EQ(mapper.num_fields(), 3);
                std::unordered_set<int64_t> ids;
                for (const auto &path: std::vector<std::vector<int>>{{1, 1},
                                                                     {2},
                                                                     {2, 0}}) {
                    ASSERT_OK_AND_ASSIGN(const int64_t id, mapper.GetFieldId(path));
                    ids.insert(id);
                }
                ASSERT_EQ(ids.size(), 3);  // All ids are distinct
            }

            static void AssertMemoDictionaryType(const DictionaryMemo &memo, int64_t id,
                                                 const std::shared_ptr<DataType> &expected) {
                ASSERT_OK_AND_ASSIGN(const auto actual, memo.GetDictionaryType(id));
                AssertTypeEqual(*expected, *actual);
            }

            TEST(DictionaryMemo, AddDictionaryType) {
                DictionaryMemo memo;
                std::shared_ptr<DataType> type;

                ASSERT_RAISES(turbo::StatusCode::kNotFound, memo.GetDictionaryType(42));

                ASSERT_OK(memo.AddDictionaryType(42, utf8()));
                ASSERT_OK(memo.AddDictionaryType(43, large_binary()));
                AssertMemoDictionaryType(memo, 42, utf8());
                AssertMemoDictionaryType(memo, 43, large_binary());

                // Re-adding same type with different id
                ASSERT_OK(memo.AddDictionaryType(44, utf8()));
                AssertMemoDictionaryType(memo, 42, utf8());
                AssertMemoDictionaryType(memo, 44, utf8());

                // Re-adding same type with same id
                ASSERT_OK(memo.AddDictionaryType(42, utf8()));
                AssertMemoDictionaryType(memo, 42, utf8());
                AssertMemoDictionaryType(memo, 44, utf8());

                // Trying to add different type with same id
                ASSERT_RAISES(turbo::StatusCode::kAlreadyExists, memo.AddDictionaryType(42, large_utf8()));
                AssertMemoDictionaryType(memo, 42, utf8());
                AssertMemoDictionaryType(memo, 43, large_binary());
                AssertMemoDictionaryType(memo, 44, utf8());
            }

            TEST(IoRecordedRandomAccessFile, IoRecording) {
                IoRecordedRandomAccessFile file(42);
                ASSERT_TRUE(file.GetReadRanges().empty());

                ASSERT_OK(file.read_at(1, 2));
                ASSERT_EQ(file.GetReadRanges().size(), 1);
                ASSERT_EQ(file.GetReadRanges()[0], (io::ReadRange{1, 2}));

                ASSERT_OK(file.read_at(5, 3));
                ASSERT_EQ(file.GetReadRanges().size(), 2);
                ASSERT_EQ(file.GetReadRanges()[1], (io::ReadRange{5, 3}));

                // continuous IOs will be merged
                ASSERT_OK(file.read_at(5 + 3, 6));
                ASSERT_EQ(file.GetReadRanges().size(), 2);
                ASSERT_EQ(file.GetReadRanges()[1], (io::ReadRange{5, 3 + 6}));

                // this should not happen but reading out of bounds will do no harm
                ASSERT_OK(file.read_at(43, 1));
            }

            TEST(IoRecordedRandomAccessFile, IoRecordingWithOutput) {
                std::shared_ptr<Buffer> out;
                IoRecordedRandomAccessFile file(42);
                ASSERT_TRUE(file.GetReadRanges().empty());
                ASSERT_EQ(file.read_at(1, 2, &out).value_or_die(), 2L);
                ASSERT_EQ(file.GetReadRanges().size(), 1);
                ASSERT_EQ(file.GetReadRanges()[0], (io::ReadRange{1, 2}));

                ASSERT_EQ(file.read_at(5, 1, &out).value_or_die(), 1);
                ASSERT_EQ(file.GetReadRanges().size(), 2);
                ASSERT_EQ(file.GetReadRanges()[1], (io::ReadRange{5, 1}));

                // continuous IOs will be merged
                ASSERT_EQ(file.read_at(5 + 1, 6, &out).value_or_die(), 6);
                ASSERT_EQ(file.GetReadRanges().size(), 2);
                ASSERT_EQ(file.GetReadRanges()[1], (io::ReadRange{5, 1 + 6}));
            }

            TEST(IoRecordedRandomAccessFile, ReadWithCurrentPosition) {
                IoRecordedRandomAccessFile file(42);
                ASSERT_TRUE(file.GetReadRanges().empty());

                ASSERT_OK(file.read(10));
                ASSERT_EQ(file.GetReadRanges().size(), 1);
                ASSERT_EQ(file.GetReadRanges()[0], (io::ReadRange{0, 10}));

                // the previous read should advance the position
                ASSERT_OK(file.read(10));
                ASSERT_EQ(file.GetReadRanges().size(), 1);
                // the two reads are merged into single continuous IO
                ASSERT_EQ(file.GetReadRanges()[0], (io::ReadRange{0, 20}));
            }

            std::shared_ptr<Schema> MakeBooleanInt32Int64Schema() {
                auto f0 = field("f0", boolean());
                auto f1 = field("f1", int32());
                auto f2 = field("f2", int64());
                return ::nebula::schema({f0, f1, f2});
            }

            turbo::Status MakeBooleanInt32Int64Batch(const int length, std::shared_ptr<RecordBatch> *out) {
                auto schema_ = MakeBooleanInt32Int64Schema();
                std::shared_ptr<Array> a0, a1, a2;
                TURBO_RETURN_NOT_OK(MakeRandomBooleanArray(length, false, &a0));
                TURBO_RETURN_NOT_OK(MakeRandomInt32Array(length, false, nebula::default_memory_pool(), &a1));
                TURBO_RETURN_NOT_OK(MakeRandomInt64Array(length, false, nebula::default_memory_pool(), &a2));
                *out = RecordBatch::create(std::move(schema_), length, {a0, a1, a2});
                return turbo::OkStatus();
            }

            std::shared_ptr<Buffer> MakeBooleanInt32Int64File(int num_rows, int num_batches) {
                auto schema_ = MakeBooleanInt32Int64Schema();
                EXPECT_OK_AND_ASSIGN(auto sink, io::BufferOutputStream::create(0));
                EXPECT_OK_AND_ASSIGN(auto writer, make_file_writer(sink.get(), schema_));

                std::shared_ptr<RecordBatch> batch;
                for (int i = 0; i < num_batches; i++) {
                    NEBULA_EXPECT_OK(MakeBooleanInt32Int64Batch(num_rows, &batch));
                    NEBULA_EXPECT_OK(writer->write_record_batch(*batch));
                }

                NEBULA_EXPECT_OK(writer->close());
                EXPECT_OK_AND_ASSIGN(auto buffer, sink->finish());
                return buffer;
            }

            void GetReadRecordBatchReadRanges(
                    uint32_t num_rows, const std::vector<int> &included_fields,
                    const std::vector<int64_t> &expected_body_read_lengths) {
                auto buffer = MakeBooleanInt32Int64File(num_rows, /*num_batches=*/1);

                io::BufferReader buffer_reader(buffer);
                std::unique_ptr<io::TrackedRandomAccessFile> tracked =
                        io::TrackedRandomAccessFile::create(&buffer_reader);

                auto read_options = IpcReadOptions::defaults();
                // if empty, return all fields
                read_options.included_fields = included_fields;
                ASSERT_OK_AND_ASSIGN(auto reader,
                                     RecordBatchFileReader::open(tracked.get(), read_options));
                ASSERT_OK_AND_ASSIGN(auto out_batch, reader->read_record_batch(0));

                ASSERT_EQ(out_batch->num_rows(), num_rows);
                ASSERT_EQ(out_batch->num_columns(),
                          included_fields.empty() ? 3 : included_fields.size());

                auto read_ranges = tracked->get_read_ranges();

                // there are 3 read IOs before reading body:
                // 1) read magic and footer length IO
                // 2) read footer IO
                // 3) read record batch metadata IO
                EXPECT_EQ(read_ranges.size(), 3 + expected_body_read_lengths.size());
                const int32_t magic_size = static_cast<int>(strlen(ipc::internal::kArrowMagicBytes));
                // read magic and footer length IO
                auto file_end_size = magic_size + sizeof(int32_t);
                auto footer_length_offset = buffer->size() - file_end_size;
                auto footer_length = bit_util::FromLittleEndian(
                        turbo::safe_load_as<int32_t>(buffer->data() + footer_length_offset));
                EXPECT_EQ(read_ranges[0].length, file_end_size);
                // read footer IO
                EXPECT_EQ(read_ranges[1].length, footer_length);
                // read record batch metadata.  The exact size is tricky to determine but it doesn't
                // matter for this test and it should be smaller than the footer.
                EXPECT_LE(read_ranges[2].length, footer_length);
                for (uint32_t i = 0; i < expected_body_read_lengths.size(); i++) {
                    EXPECT_EQ(read_ranges[3 + i].length, expected_body_read_lengths[i]);
                }
            }

            void GetReadRecordBatchReadRanges(
                    const std::vector<int> &included_fields,
                    const std::vector<int64_t> &expected_body_read_lengths) {
                return GetReadRecordBatchReadRanges(5, included_fields, expected_body_read_lengths);
            }

            TEST(TestRecordBatchFileReaderIo, LoadAllFieldsShouldReadTheEntireBody) {
                // read the entire record batch body in single read
                // the batch has 5 * bool + 5 * int32 + 5 * int64
                // ==>
                // + 5 bool:  5 bits      (aligned to  8 bytes)
                // + 5 int32: 5 * 4 bytes (aligned to 24 bytes)
                // + 5 int64: 5 * 8 bytes (aligned to 40 bytes)
                GetReadRecordBatchReadRanges({}, {8 + 24 + 40});
            }

            TEST(TestRecordBatchFileReaderIo, ReadSingleFieldAtTheStart) {
                // read only the bool field
                // + 5 bool:  5 bits (1 byte)
                GetReadRecordBatchReadRanges({0}, {1});
            }

            TEST(TestRecordBatchFileReaderIo, ReadSingleFieldInTheMiddle) {
                // read only the int32 field
                // + 5 int32: 5 * 4 bytes
                GetReadRecordBatchReadRanges({1}, {20});
            }

            TEST(TestRecordBatchFileReaderIo, ReadSingleFieldInTheEnd) {
                // read only the int64 field
                // + 5 int64: 5 * 8 bytes
                GetReadRecordBatchReadRanges({2}, {40});
            }

            TEST(TestRecordBatchFileReaderIo, SkipTheFieldInTheMiddle) {
                // read the bool field and the int64 field
                // two IOs for body are expected, first for reading bool and the second for reading
                // int64
                // + 5 bool:  5 bits (1 byte)
                // + 5 int64: 5 * 8 bytes
                GetReadRecordBatchReadRanges({0, 2}, {1, 40});
            }

            TEST(TestRecordBatchFileReaderIo, ReadTwoContinuousFields) {
                // read the int32 field and the int64 field
                // + 5 int32: 5 * 4 bytes
                // + 5 int64: 5 * 8 bytes
                GetReadRecordBatchReadRanges({1, 2}, {20, 40});
            }

            TEST(TestRecordBatchFileReaderIo, ReadTwoContinuousFieldsWithIoMerged) {
                // change the array length to 64 so that bool field and int32 are continuous without
                // padding
                // read the bool field and the int32 field since the bool field's aligned offset
                // is continuous with next field (int32 field), two IOs are merged into one
                // + 64 bool: 64 bits (8 bytes)
                // + 64 int32: 64 * 4 bytes (256 bytes)
                GetReadRecordBatchReadRanges(64, {0, 1}, {8 + 64 * 4});
            }

            constexpr static int kNumBatches = 10;
// It can be difficult to know the exact size of the schema.  Instead we just make the
// row data big enough that we can easily identify if a read is for a schema or for
// row data.
//
// This needs to be large enough to space record batches kDefaultHoleSizeLimit bytes apart
// and also large enough that record batch data is more than kMaxMetadataSizeBytes bytes
            constexpr static int kRowsPerBatch = 1000;
            constexpr static int64_t kMaxMetadataSizeBytes = 1 << 13;
// There are always 2 reads when the file is opened
            constexpr static int kNumReadsOnOpen = 2;

            class PreBufferingTest : public ::testing::TestWithParam<bool> {
            protected:
                void SetUp() override {
                    file_buffer_ = MakeBooleanInt32Int64File(kRowsPerBatch, kNumBatches);
                }

                void OpenReader() {
                    buffer_reader_ = std::make_shared<io::BufferReader>(file_buffer_);
                    tracked_ = io::TrackedRandomAccessFile::create(buffer_reader_.get());
                    auto read_options = IpcReadOptions::defaults();
                    if (ReadsArePlugged()) {
                        // This will ensure that all reads get globbed together into one large read
                        read_options.pre_buffer_cache_options.hole_size_limit =
                                std::numeric_limits<int64_t>::max() - 1;
                        read_options.pre_buffer_cache_options.range_size_limit =
                                std::numeric_limits<int64_t>::max();
                    }
                    ASSERT_OK_AND_ASSIGN(reader_, RecordBatchFileReader::open(tracked_, read_options));
                }

                bool ReadsArePlugged() { return GetParam(); }

                std::vector<int> AllBatchIndices() {
                    std::vector<int> all_batch_indices(kNumBatches);
                    std::iota(all_batch_indices.begin(), all_batch_indices.end(), 0);
                    return all_batch_indices;
                }

                void AssertMetadataLoaded(std::vector<int> batch_indices) {
                    if (batch_indices.size() == 0) {
                        batch_indices = AllBatchIndices();
                    }
                    const auto &read_ranges = tracked_->get_read_ranges();
                    if (ReadsArePlugged()) {
                        // The read should have arrived as one large read
                        ASSERT_EQ(kNumReadsOnOpen + 1, read_ranges.size());
                        if (batch_indices.size() > 1) {
                            ASSERT_GT(read_ranges[kNumReadsOnOpen].length, kMaxMetadataSizeBytes);
                        }
                    } else {
                        // We should get many small reads of metadata only
                        ASSERT_EQ(batch_indices.size() + kNumReadsOnOpen, read_ranges.size());
                        for (const auto &read_range: read_ranges) {
                            ASSERT_LT(read_range.length, kMaxMetadataSizeBytes);
                        }
                    }
                }

                std::vector<std::shared_ptr<RecordBatch>> LoadExpected() {
                    auto buffer_reader = std::make_shared<io::BufferReader>(file_buffer_);
                    auto read_options = IpcReadOptions::defaults();
                    EXPECT_OK_AND_ASSIGN(auto reader,
                                         RecordBatchFileReader::open(buffer_reader.get(), read_options));
                    EXPECT_OK_AND_ASSIGN(auto expected_batches, reader->to_record_batches());
                    return expected_batches;
                }

                void CheckFileRead(int num_indices_pre_buffered) {
                    auto expected_batches = LoadExpected();
                    const std::vector<io::ReadRange> &read_ranges = tracked_->get_read_ranges();
                    std::size_t starting_reads = read_ranges.size();
                    for (int i = 0; i < reader_->num_record_batches(); i++) {
                        ASSERT_OK_AND_ASSIGN(auto next_batch, reader_->read_record_batch(i));
                        AssertBatchesEqual(*expected_batches[i], *next_batch);
                    }
                    int metadata_reads = 0;
                    int data_reads = 0;
                    for (std::size_t i = starting_reads; i < read_ranges.size(); i++) {
                        if (read_ranges[i].length > kMaxMetadataSizeBytes) {
                            data_reads++;
                        } else {
                            metadata_reads++;
                        }
                    }
                    ASSERT_EQ(metadata_reads, reader_->num_record_batches() - num_indices_pre_buffered);
                    ASSERT_EQ(data_reads, reader_->num_record_batches());
                }

                std::vector<std::shared_ptr<RecordBatch>> batches_;
                std::shared_ptr<Buffer> file_buffer_;
                std::shared_ptr<io::BufferReader> buffer_reader_;
                std::shared_ptr<io::TrackedRandomAccessFile> tracked_;
                std::shared_ptr<RecordBatchFileReader> reader_;
            };

            TEST_P(PreBufferingTest, MetadataOnlyAllBatches) {
                OpenReader();
                // Should pre_buffer all metadata
                ASSERT_OK(reader_->pre_buffer_metadata({}));
                AssertMetadataLoaded({});
                CheckFileRead(kNumBatches);
            }

            TEST_P(PreBufferingTest, MetadataOnlySomeBatches) {
                OpenReader();
                // Should pre_buffer all metadata
                ASSERT_OK(reader_->pre_buffer_metadata({1, 2, 3}));
                AssertMetadataLoaded({1, 2, 3});
                CheckFileRead(3);
            }

            INSTANTIATE_TEST_SUITE_P(PreBufferingTests, PreBufferingTest,
                                     ::testing::Values(false, true),
                                     [](const ::testing::TestParamInfo<bool> &info) {
                                         if (info.param) {
                                             return "plugged";
                                         } else {
                                             return "not_plugged";
                                         }
                                     });

            turbo::Result<std::shared_ptr<RecordBatch>> MakeBatchWithDictionaries(const int length) {
                auto dict_type = dictionary(int32(), int32());
                auto schema_ = ::nebula::schema(
                        {::nebula::field("i32", int32()), ::nebula::field("i32d", dict_type)});
                std::shared_ptr<Array> i32, i32d_values, i32d_indices;
                TURBO_RETURN_NOT_OK(MakeRandomInt32Array(length, false, nebula::default_memory_pool(), &i32));
                TURBO_RETURN_NOT_OK(
                        MakeRandomInt32Array(length, false, nebula::default_memory_pool(), &i32d_values));
                TURBO_RETURN_NOT_OK(MakeRandomInt32Array(length, false, nebula::default_memory_pool(),
                                                   &i32d_indices, 0, 0, length));
                std::shared_ptr<Array> i32d =
                        std::make_shared<DictionaryArray>(dict_type, i32d_indices, i32d_values);
                return RecordBatch::create(std::move(schema_), length, {i32, i32d});
            }

            turbo::Result<std::shared_ptr<io::RandomAccessFile>> MakeFileWithDictionaries(
                    const std::unique_ptr<turbo::ScopedTempDir> &tempdir, int rows_per_batch, int num_batches) {
                auto temppath =  tempdir->path()/("testfile");
                EXPECT_OK_AND_ASSIGN(auto batch, MakeBatchWithDictionaries(rows_per_batch));
                EXPECT_OK_AND_ASSIGN(auto sink, io::FileOutputStream::open(temppath.string()));
                EXPECT_OK_AND_ASSIGN(auto writer, make_file_writer(sink.get(), batch->schema()));

                for (int i = 0; i < num_batches; i++) {
                    NEBULA_EXPECT_OK(writer->write_record_batch(*batch));
                }

                NEBULA_EXPECT_OK(writer->close());
                NEBULA_EXPECT_OK(sink->close());
                return io::ReadableFile::open(temppath.string());
            }

            TEST(PreBuffering, MixedAccess) {
                ASSERT_OK_AND_ASSIGN(auto tempdir, turbo::ScopedTempDir::create("nebula-ipc-read-write-test-"));
                ASSERT_OK_AND_ASSIGN(auto readable_file, MakeFileWithDictionaries(tempdir, 50, 2));
                auto read_options = IpcReadOptions::defaults();
                ASSERT_OK_AND_ASSIGN(auto reader,
                                     RecordBatchFileReader::open(readable_file, read_options));
                ASSERT_OK(reader->pre_buffer_metadata({0}));
                ASSERT_OK_AND_ASSIGN(auto batch, reader->read_record_batch(1));
                ASSERT_EQ(50, batch->num_rows());
                ASSERT_OK_AND_ASSIGN(batch, reader->read_record_batch(0));
                ASSERT_EQ(50, batch->num_rows());
                auto stats = reader->stats();
                ASSERT_EQ(1, stats.num_dictionary_batches);
                ASSERT_EQ(2, stats.num_record_batches);
            }

        }  // namespace test
    }  // namespace ipc
}  // namespace nebula
