// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//


#include <melon/init/init.h>

#include <pollux/testing/gtest_utils.h>
#include <pollux/testing/dwio/data_files.h> // @manual
#include <pollux/dwio/parquet/register_parquet_reader.h> // @manual
#include <pollux/dwio/parquet/reader/page_reader.h> // @manual
#include <pollux/dwio/parquet/reader/parquet_reader.h> // @manual=//pollux/connectors/hive:pollux_hive_connector_parquet
#include <pollux/testing/exec/util/assert_query_builder.h>
#include <pollux/testing/exec/util/hive_connector_test_base.h> // @manual
#include <pollux/plan/plan_builder.h>
#include <pollux/common/file/temp_directory_path.h>
#include <pollux/testing/type/subfield_filters_builder.h>
#include <pollux/type/tz/time_zone_map.h>

#include <pollux/connectors/hive/hive_config.h> // @manual=//pollux/connectors/hive:pollux_hive_connector_parquet
#include <pollux/dwio/parquet/writer/writer.h> // @manual

using namespace kumo::pollux;
using namespace kumo::pollux::exec;
using namespace kumo::pollux::connector::hive;
using namespace kumo::pollux::exec::test;
using namespace kumo::pollux::parquet;
using namespace kumo::pollux::test;
using namespace kumo::pollux::plan;

class ParquetTableScanTest : public HiveConnectorTestBase {
protected:
    using OperatorTestBase::assertQuery;

    void SetUp() override {
        HiveConnectorTestBase::SetUp();
        parquet::registerParquetReaderFactory();
    }

    void assertSelect(
        std::vector<std::string> &&outputColumnNames,
        const std::string &sql) {
        auto rowType = getRowType(std::move(outputColumnNames));

        auto plan = PlanBuilder().tableScan(rowType).planNode();

        assertQuery(plan, splits_, sql);
    }

    void assertSelectWithDataColumns(
        std::vector<std::string> &&outputColumnNames,
        const RowTypePtr &dataColumns,
        const std::string &sql) {
        auto rowType = getRowType(std::move(outputColumnNames));
        auto plan =
                PlanBuilder().tableScan(rowType, {}, "", dataColumns).planNode();
        assertQuery(plan, splits_, sql);
    }

    void assertSelectWithAssignments(
        std::vector<std::string> &&outputColumnNames,
        std::unordered_map<std::string, std::shared_ptr<connector::ColumnHandle> > &
        assignments,
        const std::string &sql) {
        auto rowType = getRowType(std::move(outputColumnNames));
        auto plan = PlanBuilder()
                .tableScan(rowType, {}, "", nullptr, assignments)
                .planNode();
        assertQuery(plan, splits_, sql);
    }

    void assertSelectWithFilter(
        std::vector<std::string> &&outputColumnNames,
        const std::vector<std::string> &subfieldFilters,
        const std::string &remainingFilter,
        const std::string &sql,
        const std::unordered_map<
            std::string,
            std::shared_ptr<connector::ColumnHandle> > &assignments = {}) {
        auto rowType = getRowType(std::move(outputColumnNames));
        parse::ParseOptions options;
        options.parseDecimalAsDouble = false;

        auto plan =
                PlanBuilder(pool_.get())
                .setParseOptions(options)
                .tableScan(
                    rowType, subfieldFilters, remainingFilter, nullptr, assignments)
                .planNode();

        AssertQueryBuilder(plan, duckDbQueryRunner_)
                .connectorSessionProperty(
                    kHiveConnectorId,
                    HiveConfig::kReadTimestampUnitSession,
                    std::to_string(static_cast<int>(timestampPrecision_)))
                .splits(splits_)
                .assertResults(sql);
    }

    void assertSelectWithAgg(
        std::vector<std::string> &&outputColumnNames,
        const std::vector<std::string> &aggregates,
        const std::vector<std::string> &groupingKeys,
        const std::string &sql) {
        auto rowType = getRowType(std::move(outputColumnNames));

        auto plan = PlanBuilder()
                .tableScan(rowType)
                .singleAggregation(groupingKeys, aggregates)
                .planNode();

        assertQuery(plan, splits_, sql);
    }

    void assertSelectWithFilterAndAgg(
        std::vector<std::string> &&outputColumnNames,
        const std::vector<std::string> &filters,
        const std::vector<std::string> &aggregates,
        const std::vector<std::string> &groupingKeys,
        const std::string &sql) {
        auto rowType = getRowType(std::move(outputColumnNames));

        auto plan = PlanBuilder()
                .tableScan(rowType, filters)
                .singleAggregation(groupingKeys, aggregates)
                .planNode();

        assertQuery(plan, splits_, sql);
    }

    void assertSelectWithTimezone(
        std::vector<std::string> &&outputColumnNames,
        const std::string &sql,
        const std::string &sessionTimezone) {
        auto rowType = getRowType(std::move(outputColumnNames));
        auto plan = PlanBuilder().tableScan(rowType).planNode();
        std::vector<exec::Split> splits;
        splits.reserve(splits_.size());
        for (const auto &connectorSplit: splits_) {
            splits.emplace_back(melon::copy(connectorSplit), -1);
        }

        AssertQueryBuilder(plan, duckDbQueryRunner_)
                .config(core::QueryConfig::kSessionTimezone, sessionTimezone)
                .splits(splits)
                .assertResults(sql);
    }

    void loadData(
        const std::string &filePath,
        RowTypePtr rowType,
        RowVectorPtr data,
        const std::optional<
            std::unordered_map<std::string, std::optional<std::string> > > &
                partitionKeys = std::nullopt,
        const std::optional<std::unordered_map<std::string, std::string> > &
                infoColumns = std::nullopt) {
        splits_ = {makeSplit(filePath, partitionKeys, infoColumns)};
        rowType_ = rowType;
        createDuckDbTable({data});
    }

    void loadDataWithRowType(const std::string &filePath, RowVectorPtr data) {
        splits_ = {makeSplit(filePath)};
        auto pool = kumo::pollux::memory::memoryManager()->addLeafPool();
        dwio::common::ReaderOptions readerOpts{pool.get()};
        auto reader = std::make_unique<ParquetReader>(
            std::make_unique<kumo::pollux::dwio::common::BufferedInput>(
                std::make_shared<LocalReadFile>(filePath), readerOpts.memoryPool()),
            readerOpts);
        rowType_ = reader->rowType();
        createDuckDbTable({data});
    }

    std::string getExampleFilePath(const std::string &fileName) {
        return getDataFilePath(
            "pollux/dwio/parquet/tests/reader", "../examples/" + fileName);
    }

    std::shared_ptr<connector::hive::HiveConnectorSplit> makeSplit(
        const std::string &filePath,
        const std::optional<
            std::unordered_map<std::string, std::optional<std::string> > > &
                partitionKeys = std::nullopt,
        const std::optional<std::unordered_map<std::string, std::string> > &
                infoColumns = std::nullopt) {
        return makeHiveConnectorSplits(
            filePath,
            1,
            dwio::common::FileFormat::PARQUET,
            partitionKeys,
            infoColumns)[0];
    }

    // Write data to a parquet file on specified path.
    void writeToParquetFile(
        const std::string &path,
        const std::vector<RowVectorPtr> &data,
        WriterOptions options) {
        POLLUX_CHECK_GT(data.size(), 0);

        auto writeFile = std::make_unique<LocalWriteFile>(path, true, false);
        auto sink = std::make_unique<dwio::common::WriteFileSink>(
            std::move(writeFile), path);
        auto childPool =
                rootPool_->addAggregateChild("ParquetTableScanTest.Writer");
        options.memoryPool = childPool.get();

        if (options.parquetWriteTimestampUnit.has_value()) {
            timestampPrecision_ = options.parquetWriteTimestampUnit.value();
        }

        auto writer = std::make_unique<Writer>(
            std::move(sink), options, as_row_type(data[0]->type()));

        for (const auto &vector: data) {
            writer->write(vector);
        }
        writer->close();
    }

    void testTimestampRead(const WriterOptions &options) {
        auto stringToTimestamp = [](std::string_view view) {
            return util::fromTimestampString(
                        view.data(),
                        view.size(),
                        util::TimestampParseMode::kPrestoCast)
                    .thenOrThrow(melon::identity, [&](const Status &status) {
                        POLLUX_USER_FAIL("{}", status.message());
                    });
        };
        std::vector<std::string_view> views = {
            "2015-06-01 19:34:56.007",
            "2015-06-02 19:34:56.12306",
            "2001-02-03 03:34:06.056",
            "1998-03-01 08:01:06.996669",
            "2022-12-23 03:56:01",
            "1980-01-24 00:23:07",
            "1999-12-08 13:39:26.123456",
            "2023-04-21 09:09:34.5",
            "2000-09-12 22:36:29",
            "2007-12-12 04:27:56.999",
        };
        std::vector<Timestamp> values;
        values.reserve(views.size());
        for (auto view: views) {
            values.emplace_back(stringToTimestamp(view));
        }

        auto vector = make_row_vector(
            {"t"},
            {
                make_flat_vector<Timestamp>(values),
            });
        auto schema = as_row_type(vector->type());
        auto file = TempFilePath::create();
        writeToParquetFile(file->getPath(), {vector}, options);
        loadData(file->getPath(), schema, vector);

        assertSelectWithFilter({"t"}, {}, "", "SELECT t from tmp");
        assertSelectWithFilter(
            {"t"},
            {},
            "t < TIMESTAMP '2000-09-12 22:36:29'",
            "SELECT t from tmp where t < TIMESTAMP '2000-09-12 22:36:29'");
        assertSelectWithFilter(
            {"t"},
            {},
            "t <= TIMESTAMP '2000-09-12 22:36:29'",
            "SELECT t from tmp where t <= TIMESTAMP '2000-09-12 22:36:29'");
        assertSelectWithFilter(
            {"t"},
            {},
            "t > TIMESTAMP '1980-01-24 00:23:07'",
            "SELECT t from tmp where t > TIMESTAMP '1980-01-24 00:23:07'");
        assertSelectWithFilter(
            {"t"},
            {},
            "t >= TIMESTAMP '1980-01-24 00:23:07'",
            "SELECT t from tmp where t >= TIMESTAMP '1980-01-24 00:23:07'");
        assertSelectWithFilter(
            {"t"},
            {},
            "t == TIMESTAMP '2022-12-23 03:56:01'",
            "SELECT t from tmp where t == TIMESTAMP '2022-12-23 03:56:01'");
    }

private:
    RowTypePtr getRowType(std::vector<std::string> &&outputColumnNames) const {
        std::vector<TypePtr> types;
        for (auto colName: outputColumnNames) {
            types.push_back(rowType_->findChild(colName));
        }

        return ROW(std::move(outputColumnNames), std::move(types));
    }

    RowTypePtr rowType_;
    std::vector<std::shared_ptr<connector::ConnectorSplit> > splits_;
    TimestampPrecision timestampPrecision_ = TimestampPrecision::kMicroseconds;
};

TEST_F(ParquetTableScanTest, basic) {
    loadData(
        getExampleFilePath("sample.parquet"),
        ROW({"a", "b"}, {BIGINT(), DOUBLE()}),
        make_row_vector(
            {"a", "b"},
            {
                make_flat_vector<int64_t>(20, [](auto row) { return row + 1; }),
                make_flat_vector<double>(20, [](auto row) { return row + 1; }),
            }));

    // Plain select.
    assertSelect({"a"}, "SELECT a FROM tmp");
    assertSelect({"b"}, "SELECT b FROM tmp");
    assertSelect({"a", "b"}, "SELECT a, b FROM tmp");
    assertSelect({"b", "a"}, "SELECT b, a FROM tmp");

    // With filters.
    assertSelectWithFilter({"a"}, {"a < 3"}, "", "SELECT a FROM tmp WHERE a < 3");
    assertSelectWithFilter(
        {"a", "b"}, {"a < 3"}, "", "SELECT a, b FROM tmp WHERE a < 3");
    assertSelectWithFilter(
        {"b", "a"}, {"a < 3"}, "", "SELECT b, a FROM tmp WHERE a < 3");
    assertSelectWithFilter(
        {"a", "b"}, {"a < 0"}, "", "SELECT a, b FROM tmp WHERE a < 0");

    assertSelectWithFilter(
        {"b"}, {"b < DOUBLE '2.0'"}, "", "SELECT b FROM tmp WHERE b < 2.0");
    assertSelectWithFilter(
        {"a", "b"},
        {"b >= DOUBLE '2.0'"},
        "",
        "SELECT a, b FROM tmp WHERE b >= 2.0");
    assertSelectWithFilter(
        {"b", "a"},
        {"b <= DOUBLE '2.0'"},
        "",
        "SELECT b, a FROM tmp WHERE b <= 2.0");
    assertSelectWithFilter(
        {"a", "b"},
        {"b < DOUBLE '0.0'"},
        "",
        "SELECT a, b FROM tmp WHERE b < 0.0");

    // With aggregations.
    assertSelectWithAgg({"a"}, {"sum(a)"}, {}, "SELECT sum(a) FROM tmp");
    assertSelectWithAgg({"b"}, {"max(b)"}, {}, "SELECT max(b) FROM tmp");
    assertSelectWithAgg(
        {"a", "b"}, {"min(a)", "max(b)"}, {}, "SELECT min(a), max(b) FROM tmp");
    assertSelectWithAgg(
        {"b", "a"}, {"max(b)"}, {"a"}, "SELECT max(b), a FROM tmp GROUP BY a");
    assertSelectWithAgg(
        {"a", "b"}, {"max(a)"}, {"b"}, "SELECT max(a), b FROM tmp GROUP BY b");

    // With filter and aggregation.
    assertSelectWithFilterAndAgg(
        {"a"}, {"a < 3"}, {"sum(a)"}, {}, "SELECT sum(a) FROM tmp WHERE a < 3");
    assertSelectWithFilterAndAgg(
        {"a", "b"},
        {"a < 3"},
        {"sum(b)"},
        {},
        "SELECT sum(b) FROM tmp WHERE a < 3");
    assertSelectWithFilterAndAgg(
        {"a", "b"},
        {"a < 3"},
        {"min(a)", "max(b)"},
        {},
        "SELECT min(a), max(b) FROM tmp WHERE a < 3");
    assertSelectWithFilterAndAgg(
        {"b", "a"},
        {"a < 3"},
        {"max(b)"},
        {"a"},
        "SELECT max(b), a FROM tmp WHERE a < 3 GROUP BY a");
}

TEST_F(ParquetTableScanTest, lazy) {
    auto filePath = getExampleFilePath("sample.parquet");
    auto schema = ROW({"a", "b"}, {BIGINT(), DOUBLE()});
    CursorParameters params;
    params.copyResult = false;
    params.planNode = PlanBuilder().tableScan(schema).planNode();
    auto cursor = TaskCursor::create(params);
    cursor->task()->addSplit("0", exec::Split(makeSplit(filePath)));
    cursor->task()->noMoreSplits("0");
    int rows = 0;
    while (cursor->moveNext()) {
        auto *result = cursor->current()->as_unchecked<RowVector>();
        ASSERT_TRUE(result->childAt(0)->is_lazy());
        ASSERT_TRUE(result->childAt(1)->is_lazy());
        rows += result->size();
    }
    ASSERT_EQ(rows, 20);
    ASSERT_TRUE(waitForTaskCompletion(cursor->task().get()));
}

TEST_F(ParquetTableScanTest, aggregatePushdown) {
    auto keysVector = make_flat_vector<int64_t>({1, 4, 0, 3, 2});
    auto valuesVector = make_flat_vector<int64_t>({8077, 6883, 5805, 10640, 3582});
    auto outputType = ROW({"c1", "c2", "c3"}, {BIGINT(), BIGINT(), BIGINT()});
    auto plan = PlanBuilder()
            .tableScan(outputType, {"c1 = 1"}, "")
            .singleAggregation({"c2"}, {"sum(c3)"})
            .planNode();
    std::vector<std::shared_ptr<connector::ConnectorSplit> > splits;
    splits.push_back(makeSplit(getExampleFilePath("gcc_data_diff.parquet")));
    auto result = AssertQueryBuilder(plan).splits(splits).copyResults(pool());
    ASSERT_EQ(result->size(), 5);
    auto rows = result->as<RowVector>();
    ASSERT_TRUE(rows);
    ASSERT_EQ(rows->childrenSize(), 2);
    assertEqualVectors(rows->childAt(0), keysVector);
    assertEqualVectors(rows->childAt(1), valuesVector);
}

TEST_F(ParquetTableScanTest, countStar) {
    // sample.parquet holds two columns (a: BIGINT, b: DOUBLE) and
    // 20 rows.
    auto filePath = getExampleFilePath("sample.parquet");
    auto split = makeSplit(filePath);

    // Output type does not have any columns.
    auto rowType = ROW({}, {});
    auto plan = PlanBuilder()
            .tableScan(rowType)
            .singleAggregation({}, {"count(0)"})
            .planNode();

    assertQuery(plan, {split}, "SELECT 20");
}

TEST_F(ParquetTableScanTest, decimalSubfieldFilter) {
    // decimal.parquet holds two columns (a: DECIMAL(5, 2), b: DECIMAL(20, 5)) and
    // 20 rows (10 rows per group). Data is in plain uncompressed format:
    //   a: [100.01 .. 100.20]
    //   b: [100000000000000.00001 .. 100000000000000.00020]
    std::vector<int64_t> unscaledShortValues(20);
    std::iota(unscaledShortValues.begin(), unscaledShortValues.end(), 10001);
    loadData(
        getExampleFilePath("decimal.parquet"),
        ROW({"a"}, {DECIMAL(5, 2)}),
        make_row_vector(
            {"a"},
            {
                make_flat_vector(unscaledShortValues, DECIMAL(5, 2)),
            }));

    assertSelectWithFilter(
        {"a"}, {"a < 100.07"}, "", "SELECT a FROM tmp WHERE a < 100.07");
    assertSelectWithFilter(
        {"a"}, {"a <= 100.07"}, "", "SELECT a FROM tmp WHERE a <= 100.07");
    assertSelectWithFilter(
        {"a"}, {"a > 100.07"}, "", "SELECT a FROM tmp WHERE a > 100.07");
    assertSelectWithFilter(
        {"a"}, {"a >= 100.07"}, "", "SELECT a FROM tmp WHERE a >= 100.07");
    assertSelectWithFilter(
        {"a"}, {"a = 100.07"}, "", "SELECT a FROM tmp WHERE a = 100.07");
    assertSelectWithFilter(
        {"a"},
        {"a BETWEEN 100.07 AND 100.12"},
        "",
        "SELECT a FROM tmp WHERE a BETWEEN 100.07 AND 100.12");

    POLLUX_ASSERT_THROW(
        assertSelectWithFilter(
            {"a"}, {"a < 1000.7"}, "", "SELECT a FROM tmp WHERE a < 1000.7"),
        "Scalar function signature is not supported: lt(DECIMAL(5, 2), DECIMAL(5, 1))");
    POLLUX_ASSERT_THROW(
        assertSelectWithFilter(
            {"a"}, {"a = 1000.7"}, "", "SELECT a FROM tmp WHERE a = 1000.7"),
        "Scalar function signature is not supported: eq(DECIMAL(5, 2), DECIMAL(5, 1))");
}

TEST_F(ParquetTableScanTest, map) {
    auto vector = make_map_vector<StringView, StringView>({{{"name", "gluten"}}});

    loadData(
        getExampleFilePath("types.parquet"),
        ROW({"map"}, {MAP(VARCHAR(), VARCHAR())}),
        make_row_vector(
            {"map"},
            {
                vector,
            }));

    assertSelectWithFilter({"map"}, {}, "", "SELECT map FROM tmp");
}

TEST_F(ParquetTableScanTest, nullMap) {
    auto path = getExampleFilePath("null_map.parquet");
    loadData(
        path,
        ROW({"i", "c"}, {VARCHAR(), MAP(VARCHAR(), VARCHAR())}),
        make_row_vector(
            {"i", "c"},
            {
                make_constant<std::string>("1", 1),
                make_nullable_map_vector<std::string, std::string>({std::nullopt})
            }));

    assertSelectWithFilter({"i", "c"}, {}, "", "SELECT i, c FROM tmp");
}

TEST_F(ParquetTableScanTest, singleRowStruct) {
    auto vector = make_array_vector<int32_t>({{}});
    loadData(
        getExampleFilePath("single_row_struct.parquet"),
        ROW({"s"}, {ROW({"a", "b"}, {BIGINT(), BIGINT()})}),
        make_row_vector(
            {"s"},
            {
                vector,
            }));

    assertSelectWithFilter({"s"}, {}, "", "SELECT (0, 1)");
}

TEST_F(ParquetTableScanTest, array) {
    auto vector = make_array_vector<int32_t>({});
    loadData(
        getExampleFilePath("old_repeated_int.parquet"),
        ROW({"repeatedInt"}, {ARRAY(INTEGER())}),
        make_row_vector(
            {"repeatedInt"},
            {
                vector,
            }));

    assertSelectWithFilter(
        {"repeatedInt"}, {}, "", "SELECT UNNEST(array[array[1,2,3]])");
}

// Optional array with required elements.
TEST_F(ParquetTableScanTest, optArrayReqEle) {
    auto vector = make_array_vector<StringView>({});

    loadData(
        getExampleFilePath("array_0.parquet"),
        ROW({"_1"}, {ARRAY(VARCHAR())}),
        make_row_vector(
            {"_1"},
            {
                vector,
            }));

    assertSelectWithFilter(
        {"_1"},
        {},
        "",
        "SELECT UNNEST(array[array['a', 'b'], array['c', 'd'], array['e', 'f'], array[], null])");
}

// Required array with required elements.
TEST_F(ParquetTableScanTest, reqArrayReqEle) {
    auto vector = make_array_vector<StringView>({});

    loadData(
        getExampleFilePath("array_1.parquet"),
        ROW({"_1"}, {ARRAY(VARCHAR())}),
        make_row_vector(
            {"_1"},
            {
                vector,
            }));

    assertSelectWithFilter(
        {"_1"},
        {},
        "",
        "SELECT UNNEST(array[array['a', 'b'], array['c', 'd'], array[]])");
}

// Required array with optional elements.
TEST_F(ParquetTableScanTest, reqArrayOptEle) {
    auto vector = make_array_vector<StringView>({});

    loadData(
        getExampleFilePath("array_2.parquet"),
        ROW({"_1"}, {ARRAY(VARCHAR())}),
        make_row_vector(
            {"_1"},
            {
                vector,
            }));

    assertSelectWithFilter(
        {"_1"},
        {},
        "",
        "SELECT UNNEST(array[array['a', null], array[], array[null, 'b']])");
}

TEST_F(ParquetTableScanTest, arrayOfArrayTest) {
    auto vector = make_array_vector<StringView>({});

    loadDataWithRowType(
        getExampleFilePath("array_of_array1.parquet"),
        make_row_vector(
            {"_1"},
            {
                vector,
            }));

    assertSelectWithFilter(
        {"_1"},
        {},
        "",
        "SELECT UNNEST(array[null, array[array['g', 'h'], null]])");
}

// Required array with legacy format.
TEST_F(ParquetTableScanTest, reqArrayLegacy) {
    auto vector = make_array_vector<StringView>({});

    loadData(
        getExampleFilePath("array_3.parquet"),
        ROW({"element"}, {ARRAY(VARCHAR())}),
        make_row_vector(
            {"element"},
            {
                vector,
            }));

    assertSelectWithFilter(
        {"element"},
        {},
        "",
        "SELECT UNNEST(array[array['a', 'b'], array[], array['c', 'd']])");
}

TEST_F(ParquetTableScanTest, filterOnNestedArray) {
    loadData(
        getExampleFilePath("struct_of_array.parquet"),
        ROW({"struct"},
            {ROW({"a0", "a1"}, {ARRAY(VARCHAR()), ARRAY(INTEGER())})}),
        make_row_vector(
            {"unused"},
            {
                make_flat_vector<int32_t>({}),
            }));

    assertSelectWithFilter(
        {"struct"}, {}, "struct.a0 is null", "SELECT ROW(NULL, NULL)");
}

TEST_F(ParquetTableScanTest, readAsLowerCase) {
    auto plan = PlanBuilder(pool_.get())
            .tableScan(ROW({"a"}, {BIGINT()}), {}, "")
            .planNode();
    CursorParameters params;
    std::shared_ptr<melon::Executor> executor =
            std::make_shared<melon::CPUThreadPoolExecutor>(
                std::thread::hardware_concurrency());
    std::shared_ptr<core::QueryCtx> queryCtx =
            core::QueryCtx::create(executor.get());
    std::unordered_map<std::string, std::string> session = {
        {
            std::string(
                connector::hive::HiveConfig::kFileColumnNamesReadAsLowerCaseSession),
            "true"
        }
    };
    queryCtx->setConnectorSessionOverridesUnsafe(
        kHiveConnectorId, std::move(session));
    params.queryCtx = queryCtx;
    params.planNode = plan;
    const int numSplitsPerFile = 1;

    bool noMoreSplits = false;
    auto addSplits = [&](exec::Task *task) {
        if (!noMoreSplits) {
            auto const splits = HiveConnectorTestBase::makeHiveConnectorSplits(
                {getExampleFilePath("upper.parquet")},
                numSplitsPerFile,
                dwio::common::FileFormat::PARQUET);
            for (const auto &split: splits) {
                task->addSplit("0", exec::Split(split));
            }
            task->noMoreSplits("0");
        }
        noMoreSplits = true;
    };
    auto result = readCursor(params, addSplits);
    ASSERT_TRUE(waitForTaskCompletion(result.first->task().get()));
    assertEqualResults(
        result.second, {make_row_vector({"a"}, {make_flat_vector<int64_t>({0, 1})})});
}

TEST_F(ParquetTableScanTest, rowIndex) {
    static const char *kPath = "file_path";
    // case 1: file not have `_tmp_metadata_row_index`, scan generate it for user.
    auto filePath = getExampleFilePath("sample.parquet");
    loadData(
        filePath,
        ROW({"a", "b", "_tmp_metadata_row_index", kPath},
            {BIGINT(), DOUBLE(), BIGINT(), VARCHAR()}),
        make_row_vector(
            {"a", "b", "_tmp_metadata_row_index", kPath},
            {
                make_flat_vector<int64_t>(20, [](auto row) { return row + 1; }),
                make_flat_vector<double>(20, [](auto row) { return row + 1; }),
                make_flat_vector<int64_t>(20, [](auto row) { return row; }),
                make_flat_vector<std::string>(
                    20, [filePath](auto row) { return filePath; }),
            }),
        std::nullopt,
        std::unordered_map<std::string, std::string>{{kPath, filePath}});
    std::unordered_map<std::string, std::shared_ptr<connector::ColumnHandle> >
            assignments;
    assignments["a"] = std::make_shared<connector::hive::HiveColumnHandle>(
        "a",
        connector::hive::HiveColumnHandle::ColumnType::kRegular,
        BIGINT(),
        BIGINT());
    assignments["b"] = std::make_shared<connector::hive::HiveColumnHandle>(
        "b",
        connector::hive::HiveColumnHandle::ColumnType::kRegular,
        DOUBLE(),
        DOUBLE());
    assignments[kPath] = synthesizedColumn(kPath, VARCHAR());
    assignments["_tmp_metadata_row_index"] =
            std::make_shared<connector::hive::HiveColumnHandle>(
                "_tmp_metadata_row_index",
                connector::hive::HiveColumnHandle::ColumnType::kRowIndex,
                BIGINT(),
                BIGINT());

    assertSelect({"a"}, "SELECT a FROM tmp");
    assertSelectWithAssignments(
        {"a", "_tmp_metadata_row_index"},
        assignments,
        "SELECT a, _tmp_metadata_row_index FROM tmp");
    assertSelectWithAssignments(
        {"_tmp_metadata_row_index", "a"},
        assignments,
        "SELECT _tmp_metadata_row_index, a FROM tmp");
    assertSelectWithAssignments(
        {"_tmp_metadata_row_index"},
        assignments,
        "SELECT _tmp_metadata_row_index FROM tmp");
    assertSelectWithAssignments(
        {kPath, "_tmp_metadata_row_index"},
        assignments,
        fmt::format("SELECT {}, _tmp_metadata_row_index FROM tmp", kPath));

    // case 2: file has `_tmp_metadata_row_index` column, then use user data
    // insteads of generating it.
    loadData(
        getExampleFilePath("sample_with_rowindex.parquet"),
        ROW({"a", "b", "_tmp_metadata_row_index"},
            {BIGINT(), DOUBLE(), BIGINT()}),
        make_row_vector(
            {"a", "b", "_tmp_metadata_row_index"},
            {
                make_flat_vector<int64_t>(20, [](auto row) { return row + 1; }),
                make_flat_vector<double>(20, [](auto row) { return row + 1; }),
                make_flat_vector<int64_t>(20, [](auto row) { return row + 1; }),
            }));

    assertSelect({"a"}, "SELECT a FROM tmp");
    assertSelect(
        {"a", "_tmp_metadata_row_index"},
        "SELECT a, _tmp_metadata_row_index FROM tmp");
}

// The file icebergNullIcebergPartition.parquet was copied from a null
// partition in an Iceberg table created with the below DDL using Spark:
//
// CREATE TABLE iceberg_tmp_parquet_partitioned
//    ( c0 bigint, c1 bigint )
// USING iceberg
// PARTITIONED BY (c1)
// TBLPROPERTIES ('write.format.default' = 'parquet', 'format-version' = 2,
// 'write.delete.mode' = 'merge-on-read') LOCATION
// 's3a://presto-workload/tmp/iceberg_tmp_parquet_partitioned';
//
// INSERT INTO iceberg_tmp_parquet_partitioned
// VALUES (1, 1), (2, null),(3, null);
TEST_F(ParquetTableScanTest, filterNullIcebergPartition) {
    loadData(
        getExampleFilePath("icebergNullIcebergPartition.parquet"),
        ROW({"c0", "c1"}, {BIGINT(), BIGINT()}),
        make_row_vector(
            {"c0", "c1"},
            {
                make_flat_vector<int64_t>(std::vector<int64_t>{2, 3}),
                make_nullable_flat_vector<int64_t>({std::nullopt, std::nullopt}),
            }),
        std::unordered_map<std::string, std::optional<std::string> >{
            {"c1", std::nullopt}
        });

    std::shared_ptr<connector::ColumnHandle> c0 = makeColumnHandle(
        "c0", BIGINT(), BIGINT(), {}, HiveColumnHandle::ColumnType::kRegular);
    std::shared_ptr<connector::ColumnHandle> c1 = makeColumnHandle(
        "c1",
        BIGINT(),
        BIGINT(),
        {},
        HiveColumnHandle::ColumnType::kPartitionKey);

    assertSelectWithFilter(
        {"c0", "c1"},
        {"c1 IS NOT NULL"},
        "",
        "SELECT c0, c1 FROM tmp WHERE c1 IS NOT NULL",
        std::unordered_map<std::string, std::shared_ptr<connector::ColumnHandle> >{
            {"c0", c0}, {"c1", c1}
        });

    assertSelectWithFilter(
        {"c0", "c1"},
        {"c1 IS NULL"},
        "",
        "SELECT c0, c1 FROM tmp WHERE c1 IS NULL",
        std::unordered_map<std::string, std::shared_ptr<connector::ColumnHandle> >{
            {"c0", c0}, {"c1", c1}
        });
}

TEST_F(ParquetTableScanTest, sessionTimezone) {
    SCOPED_TESTVALUE_SET(
        "kumo::pollux::parquet::PageReader::readPageHeader",
        std::function<void(PageReader*)>(([&](PageReader* reader) {
            POLLUX_CHECK_EQ(reader->sessionTimezone()->name(), "Asia/Shanghai");
            })));

    // Read sample.parquet to verify if the sessionTimezone in the PageReader
    // meets expectations.
    loadData(
        getExampleFilePath("sample.parquet"),
        ROW({"a", "b"}, {BIGINT(), DOUBLE()}),
        make_row_vector(
            {"a", "b"},
            {
                make_flat_vector<int64_t>(20, [](auto row) { return row + 1; }),
                make_flat_vector<double>(20, [](auto row) { return row + 1; }),
            }));

    assertSelectWithTimezone({"a"}, "SELECT a FROM tmp", "Asia/Shanghai");
}

TEST_F(ParquetTableScanTest, timestampInt64Dictionary) {
    WriterOptions options;
    options.writeInt96AsTimestamp = false;
    options.enableDictionary = true;
    options.parquetWriteTimestampUnit = TimestampPrecision::kMicroseconds;
    testTimestampRead(options);
}

TEST_F(ParquetTableScanTest, timestampInt64Plain) {
    WriterOptions options;
    options.writeInt96AsTimestamp = false;
    options.enableDictionary = false;
    options.parquetWriteTimestampUnit = TimestampPrecision::kMicroseconds;
    testTimestampRead(options);
}

TEST_F(ParquetTableScanTest, timestampInt96Dictionary) {
    WriterOptions options;
    options.writeInt96AsTimestamp = true;
    options.enableDictionary = true;
    options.parquetWriteTimestampUnit = TimestampPrecision::kMicroseconds;
    testTimestampRead(options);
}

TEST_F(ParquetTableScanTest, timestampInt96Plain) {
    WriterOptions options;
    options.writeInt96AsTimestamp = true;
    options.enableDictionary = false;
    options.parquetWriteTimestampUnit = TimestampPrecision::kMicroseconds;
    testTimestampRead(options);
}

TEST_F(ParquetTableScanTest, timestampConvertedType) {
    auto stringToTimestamp = [](std::string_view view) {
        return util::fromTimestampString(
                    view.data(), view.size(), util::TimestampParseMode::kPrestoCast)
                .thenOrThrow(melon::identity, [&](const Status &status) {
                    POLLUX_USER_FAIL("{}", status.message());
                });
    };
    std::vector<std::string_view> expected = {
        "1970-01-01 00:00:00.010",
        "1970-01-01 00:00:00.010",
        "1970-01-01 00:00:00.010",
    };
    std::vector<Timestamp> values;
    values.reserve(expected.size());
    for (auto view: expected) {
        values.emplace_back(stringToTimestamp(view));
    }

    const auto vector = make_row_vector(
        {"time"},
        {
            make_flat_vector<Timestamp>(values),
        });
    const auto schema = as_row_type(vector->type());
    const auto path = getExampleFilePath("tmmillis_i64.parquet");
    loadData(path, schema, vector);

    assertSelectWithFilter({"time"}, {}, "", "SELECT time from tmp");
}

TEST_F(ParquetTableScanTest, timestampPrecisionMicrosecond) {
    // Write timestamp data into parquet.
    constexpr int kSize = 10;
    auto vector = make_row_vector({
        make_flat_vector<Timestamp>(
            kSize, [](auto i) { return Timestamp(i, i * 1'001'001); }),
    });
    auto schema = as_row_type(vector->type());
    auto file = TempFilePath::create();
    WriterOptions options;
    options.writeInt96AsTimestamp = true;
    writeToParquetFile(file->getPath(), {vector}, options);
    auto plan = PlanBuilder().tableScan(schema).planNode();

    // Read timestamp data from parquet with microsecond precision.
    CursorParameters params;
    std::shared_ptr<melon::Executor> executor =
            std::make_shared<melon::CPUThreadPoolExecutor>(
                std::thread::hardware_concurrency());
    std::shared_ptr<core::QueryCtx> queryCtx =
            core::QueryCtx::create(executor.get());
    std::unordered_map<std::string, std::string> session = {
        {
            std::string(connector::hive::HiveConfig::kReadTimestampUnitSession),
            "6"
        }
    };
    queryCtx->setConnectorSessionOverridesUnsafe(
        kHiveConnectorId, std::move(session));
    params.queryCtx = queryCtx;
    params.planNode = plan;
    const int numSplitsPerFile = 1;

    bool noMoreSplits = false;
    auto addSplits = [&](exec::Task *task) {
        if (!noMoreSplits) {
            auto const splits = HiveConnectorTestBase::makeHiveConnectorSplits(
                {file->getPath()},
                numSplitsPerFile,
                dwio::common::FileFormat::PARQUET);
            for (const auto &split: splits) {
                task->addSplit("0", exec::Split(split));
            }
            task->noMoreSplits("0");
        }
        noMoreSplits = true;
    };
    auto result = readCursor(params, addSplits);
    ASSERT_TRUE(waitForTaskCompletion(result.first->task().get()));
    auto expected = make_row_vector({
        make_flat_vector<Timestamp>(
            kSize, [](auto i) { return Timestamp(i, i * 1'001'000); }),
    });
    assertEqualResults({expected}, result.second);
}

TEST_F(ParquetTableScanTest, testColumnNotExists) {
    auto rowType =
            ROW({"a", "b", "not_exists", "not_exists_array", "not_exists_map"},
                {
                    BIGINT(),
                    DOUBLE(),
                    BIGINT(),
                    ARRAY(VARBINARY()),
                    MAP(VARCHAR(), BIGINT())
                });
    // message schema {
    //  optional int64 a;
    //  optional double b;
    // }
    loadData(
        getExampleFilePath("sample.parquet"),
        rowType,
        make_row_vector(
            {"a", "b"},
            {
                make_flat_vector<int64_t>(20, [](auto row) { return row + 1; }),
                make_flat_vector<double>(20, [](auto row) { return row + 1; }),
            }));

    assertSelectWithDataColumns(
        {"a", "b", "not_exists", "not_exists_array", "not_exists_map"},
        rowType,
        "SELECT a, b, NULL, NULL, NULL FROM tmp");
}

TEST_F(ParquetTableScanTest, schemaMatchWithComplexTypes) {
    vector_size_t kSize = 100;
    auto valuesVector = make_row_vector(
        {"aa", "bb"},
        {
            make_flat_vector<int64_t>(kSize * 4, [](auto row) { return row; }),
            make_flat_vector<int32_t>(kSize * 4, [](auto row) { return row; })
        });
    auto keysVector =
            make_flat_vector<int64_t>(kSize * 4, [](auto row) { return row % 4; });
    std::vector<vector_size_t> offsets;
    for (auto i = 0; i < kSize; i++) {
        offsets.push_back(i * 4);
    }
    auto map_vector = make_map_vector(offsets, keysVector, valuesVector);
    auto array_vector = make_array_vector(offsets, valuesVector);
    auto primitiveVector = make_flat_vector(offsets);

    RowVectorPtr dataFileVectors = make_row_vector(
        {"p", "m", "a"},
        {primitiveVector, map_vector, array_vector}); // columns in data file

    const std::shared_ptr<exec::test::TempDirectoryPath> dataFileFolder =
            exec::test::TempDirectoryPath::create();
    auto filePath = dataFileFolder->getPath() + "/" + "nested_data.parquet";
    WriterOptions options;
    options.writeInt96AsTimestamp = false;
    writeToParquetFile(filePath, {dataFileVectors}, options);

    // Create a row type with columns having different names than in the file.
    auto structType = ROW({"aa1", "bb1"}, {BIGINT(), INTEGER()});
    auto rowType =
            ROW({"p1", "m1", "a1"},
                {
                    {
                        INTEGER(),
                        MAP(BIGINT(), structType),
                        ARRAY(structType)
                    }
                }); // column names in table metadata

    auto op =
            PlanBuilder()
            .startTableScan()
            .outputType(rowType)
            .dataColumns(rowType)
            .endTableScan()
            .project({"p1", "m1[0].aa1", "m1[1].bb1", "a1[1].aa1", "a1[2].bb1"})
            .planNode();

    auto split = makeSplit(filePath);
    auto result = AssertQueryBuilder(op).split(split).copyResults(pool());

    ASSERT_EQ(result->size(), kSize);
    auto rows = result->as<RowVector>();
    ASSERT_TRUE(rows);
    ASSERT_EQ(rows->childrenSize(), 5);

    assertEqualVectors(rows->childAt(0), primitiveVector);

    auto expected1 =
            make_flat_vector<int64_t>(kSize, [](auto row) { return row * 4; });
    assertEqualVectors(rows->childAt(1), expected1);
    assertEqualVectors(rows->childAt(3), expected1);

    auto expected2 =
            make_flat_vector<int>(kSize, [](auto row) { return row * 4 + 1; });
    assertEqualVectors(rows->childAt(2), expected2);
    assertEqualVectors(rows->childAt(4), expected2);

    // Now run query with column mapping using names - we should not be able to
    // find any names.
    result = AssertQueryBuilder(op)
            .connectorSessionProperty(
                kHiveConnectorId,
                connector::hive::HiveConfig::kParquetUseColumnNamesSession,
                "true")
            .split(split)
            .copyResults(pool());
    rows = result->as<RowVector>();
    // check for rest of the selected columns
    auto nullBigIntVector = make_flat_vector<int64_t>(
        kSize, [](auto row) { return row; }, [](auto row) { return true; });
    auto nullIntVector = make_flat_vector<int>(
        kSize, [](auto row) { return row; }, [](auto row) { return true; });
    for (const auto index: std::vector<int>({0, 2, 4})) {
        assertEqualVectors(rows->childAt(index), nullIntVector);
    }
    for (const auto index: std::vector<int>({1, 3})) {
        assertEqualVectors(rows->childAt(index), nullBigIntVector);
    }
}

TEST_F(ParquetTableScanTest, schemaMatch) {
    vector_size_t kSize = 100;
    std::shared_ptr<memory::MemoryPool> leafPool =
            rootPool_->addLeafChild("ParquetTableScanTest");
    RowVectorPtr dataFileVectors = make_row_vector(
        {"c1", "c2"},
        {
            make_flat_vector<int64_t>(kSize, [](auto row) { return row; }),
            make_flat_vector<int64_t>(kSize, [](auto row) { return row * 4; })
        });

    const std::shared_ptr<exec::test::TempDirectoryPath> dataFileFolder =
            exec::test::TempDirectoryPath::create();
    auto filePath = dataFileFolder->getPath() + "/" + "data.parquet";
    WriterOptions options;
    options.writeInt96AsTimestamp = false;
    writeToParquetFile(filePath, {dataFileVectors}, options);

    auto rowType = ROW({"c2", "c3"}, {BIGINT(), BIGINT()});
    auto op = PlanBuilder()
            .startTableScan()
            .outputType(rowType)
            .dataColumns(rowType)
            .endTableScan()
            .planNode();

    auto split = makeSplit(filePath);
    auto result = AssertQueryBuilder(op).split(split).copyResults(pool());
    auto rows = result->as<RowVector>();

    assertEqualVectors(rows->childAt(0), dataFileVectors->childAt(0));
    assertEqualVectors(rows->childAt(1), dataFileVectors->childAt(1));

    // test when schema has same column name as file schema but different data
    // type for column c3 as varchar
    auto rowType1 = ROW({"c2", "c3"}, {BIGINT(), VARCHAR()});
    op = PlanBuilder()
            .startTableScan()
            .outputType(rowType1)
            .dataColumns(rowType1)
            .endTableScan()
            .planNode();
    EXPECT_THROW(
        AssertQueryBuilder(op).split(split).copyResults(pool()),
        PolluxRuntimeError);

    // Now run query with column mapping using names, now c2 columns will match in
    // fileschema & tableschema
    op = PlanBuilder()
            .startTableScan()
            .outputType(rowType1)
            .dataColumns(rowType1)
            .endTableScan()
            .planNode();

    result = AssertQueryBuilder(op)
            .connectorSessionProperty(
                kHiveConnectorId,
                connector::hive::HiveConfig::kParquetUseColumnNamesSession,
                "true")
            .split(split)
            .copyResults(pool());

    rows = result->as<RowVector>();
    auto nullVector = make_flat_vector<std::string>(
        kSize, [](auto row) { return "row"; }, [](auto row) { return true; });
    assertEqualVectors(rows->childAt(0), dataFileVectors->childAt(1));
    assertEqualVectors(rows->childAt(1), nullVector);

    // Scan with type mismatch in the 1st item (BIGINT vs REAL)
    rowType = ROW({"c1", "c2"}, {{REAL(), BIGINT()}});
    op = PlanBuilder()
            .startTableScan()
            .outputType(rowType)
            .dataColumns(rowType)
            .endTableScan()
            .project({"c1"})
            .planNode();

    EXPECT_THROW(
        AssertQueryBuilder(op).split(split).copyResults(pool()),
        PolluxRuntimeError);

    // Schema evolution remove column.
    rowType = ROW({"c1"}, {{BIGINT()}});
    op = PlanBuilder()
            .startTableScan()
            .outputType(rowType)
            .dataColumns(rowType)
            .endTableScan()
            .project({"c1"})
            .planNode();

    result = AssertQueryBuilder(op).split(split).copyResults(pool());
    rows = result->as<RowVector>();
    assertEqualVectors(rows->childAt(0), dataFileVectors->childAt(0));

    // Schema evolution add column.
    rowType = ROW({"c1", "c2", "c3"}, {{BIGINT(), BIGINT(), VARCHAR()}});
    op = PlanBuilder()
            .startTableScan()
            .outputType(rowType)
            .dataColumns(rowType)
            .endTableScan()
            .project({"c1", "c2", "c3"})
            .planNode();

    result = AssertQueryBuilder(op).split(split).copyResults(pool());
    rows = result->as<RowVector>();
    assertEqualVectors(rows->childAt(0), dataFileVectors->childAt(0));
    assertEqualVectors(rows->childAt(1), dataFileVectors->childAt(1));
    assertEqualVectors(rows->childAt(2), nullVector);
}

TEST_F(ParquetTableScanTest, deltaByteArray) {
    auto a = make_flat_vector<StringView>({"axis", "axle", "babble", "babyhood"});
    auto expected = make_row_vector({"a"}, {a});
    createDuckDbTable("expected", {expected});

    auto vector = make_flat_vector<StringView>({{}});
    loadData(
        getExampleFilePath("delta_byte_array.parquet"),
        ROW({"a"}, {VARCHAR()}),
        make_row_vector({"a"}, {vector}));
    assertSelect({"a"}, "SELECT a from expected");
}

TEST_F(ParquetTableScanTest, booleanRle) {
    WriterOptions options;
    options.enableDictionary = false;
    options.encoding = kumo::pollux::parquet::arrow::Encoding::RLE;
    options.useParquetDataPageV2 = true;

    auto allTrue = [](vector_size_t row) -> bool { return true; };
    auto allFalse = [](vector_size_t row) -> bool { return false; };
    auto nonNullAtFirst = [](vector_size_t row) -> bool { return row != 0; };
    auto randomTrueFalse = [](vector_size_t row) -> bool {
        return std::rand() % 2 == 0;
    };
    auto randomNull = [](vector_size_t row) -> bool {
        return std::rand() % 2 == 0;
    };

    auto vector = make_row_vector(
        {"c0", "c1", "c2", "c3", "c4"},
        {
            make_flat_vector<bool>(100, allTrue, nonNullAtFirst),
            make_flat_vector<bool>(100, allFalse, nonNullAtFirst),
            make_flat_vector<bool>(100, allTrue),
            make_flat_vector<bool>(100, allFalse),
            make_flat_vector<bool>(100, randomTrueFalse, randomNull),
        });
    auto schema = as_row_type(vector->type());
    auto file = TempFilePath::create();
    writeToParquetFile(file->getPath(), {vector}, options);
    loadData(file->getPath(), schema, vector);

    std::shared_ptr<connector::ColumnHandle> c0 = makeColumnHandle(
        "c0", BOOLEAN(), BOOLEAN(), {}, HiveColumnHandle::ColumnType::kRegular);
    std::shared_ptr<connector::ColumnHandle> c1 = makeColumnHandle(
        "c1", BOOLEAN(), BOOLEAN(), {}, HiveColumnHandle::ColumnType::kRegular);
    std::shared_ptr<connector::ColumnHandle> c2 = makeColumnHandle(
        "c2", BOOLEAN(), BOOLEAN(), {}, HiveColumnHandle::ColumnType::kRegular);
    std::shared_ptr<connector::ColumnHandle> c3 = makeColumnHandle(
        "c3", BOOLEAN(), BOOLEAN(), {}, HiveColumnHandle::ColumnType::kRegular);
    std::shared_ptr<connector::ColumnHandle> c4 = makeColumnHandle(
        "c4", BOOLEAN(), BOOLEAN(), {}, HiveColumnHandle::ColumnType::kRegular);

    assertSelect({"c0"}, "SELECT c0 FROM tmp");
    assertSelect({"c1"}, "SELECT c1 FROM tmp");
    assertSelect({"c2"}, "SELECT c2 FROM tmp");
    assertSelect({"c3"}, "SELECT c3 FROM tmp");
    assertSelect({"c4"}, "SELECT c4 FROM tmp");
}

TEST_F(ParquetTableScanTest, singleBooleanRle) {
    WriterOptions options;
    options.enableDictionary = false;
    options.encoding = kumo::pollux::parquet::arrow::Encoding::RLE;
    options.useParquetDataPageV2 = true;

    auto vector = make_row_vector(
        {"c0", "c1", "c2"},
        {
            make_flat_vector<bool>(std::vector<bool>{true}),
            make_flat_vector<bool>(std::vector<bool>{false}),
            make_nullable_flat_vector<bool>({std::nullopt}),
        });
    auto schema = as_row_type(vector->type());
    auto file = TempFilePath::create();
    writeToParquetFile(file->getPath(), {vector}, options);
    loadData(file->getPath(), schema, vector);

    std::shared_ptr<connector::ColumnHandle> c0 = makeColumnHandle(
        "c0", BOOLEAN(), BOOLEAN(), {}, HiveColumnHandle::ColumnType::kRegular);
    std::shared_ptr<connector::ColumnHandle> c1 = makeColumnHandle(
        "c1", BOOLEAN(), BOOLEAN(), {}, HiveColumnHandle::ColumnType::kRegular);
    std::shared_ptr<connector::ColumnHandle> c2 = makeColumnHandle(
        "c2", BOOLEAN(), BOOLEAN(), {}, HiveColumnHandle::ColumnType::kRegular);

    assertSelect({"c0"}, "SELECT c0 FROM tmp");
    assertSelect({"c1"}, "SELECT c1 FROM tmp");
    assertSelect({"c2"}, "SELECT c2 FROM tmp");
}

int main(int argc, char **argv) {
    testing::InitGoogleTest(&argc, argv);
    melon::Init init{&argc, &argv, false};
    return RUN_ALL_TESTS();
}
