// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//


#include <pollux/exec/hash_probe.h>
#include <pollux/common/base/counters.h>
#include <pollux/common/base/stats_reporter.h>
#include <pollux/common/testutil/test_value.h>
#include <pollux/exec/operator_utils.h>
#include <pollux/exec/task.h>
#include <pollux/expression/field_reference.h>

using kumo::pollux::common::testutil::TestValue;

namespace kumo::pollux::exec {
    namespace {
        // Batch size used when iterating the row container.
        constexpr int kBatchSize = 1024;
    } // namespace

    // static
    RowTypePtr HashProbe::makeTableType(
        const RowType *type,
        const std::vector<std::shared_ptr<const core::FieldAccessTypedExpr> > &
        keys) {
        std::vector<std::string> names;
        std::vector<TypePtr> types;
        std::unordered_set<column_index_t> keyChannels(keys.size());
        names.reserve(type->size());
        types.reserve(type->size());
        for (const auto &key: keys) {
            auto channel = type->getChildIdx(key->name());
            names.emplace_back(type->nameOf(channel));
            types.emplace_back(type->childAt(channel));
            keyChannels.insert(channel);
        }
        for (auto i = 0; i < type->size(); ++i) {
            if (keyChannels.find(i) == keyChannels.end()) {
                names.emplace_back(type->nameOf(i));
                types.emplace_back(type->childAt(i));
            }
        }
        return ROW(std::move(names), std::move(types));
    }

    namespace {
        // Copy values from 'rows' of 'table' according to 'projections' in
        // 'result'. Reuses 'result' children where possible.
        void extractColumns(
            BaseHashTable *table,
            melon::Range<char * const*> rows,
            melon::Range<const IdentityProjection *> projections,
            memory::MemoryPool *pool,
            const std::vector<TypePtr> &resultTypes,
            std::vector<VectorPtr> &resultVectors) {
            POLLUX_CHECK_EQ(resultTypes.size(), resultVectors.size());
            for (auto projection: projections) {
                const auto resultChannel = projection.outputChannel;
                POLLUX_CHECK_LT(resultChannel, resultVectors.size());

                auto &child = resultVectors[resultChannel];
                // TODO: Consider reuse of complex types.
                if (!child || !BaseVector::is_vector_writable(child) ||
                    !child->is_flat_encoding()) {
                    child = BaseVector::create(resultTypes[resultChannel], rows.size(), pool);
                }
                child->resize(rows.size());
                table->extractColumn(rows, projection.inputChannel, child);
            }
        }

        BlockingReason fromStateToBlockingReason(ProbeOperatorState state) {
            switch (state) {
                case ProbeOperatorState::kRunning:
                    [[fallthrough]];
                case ProbeOperatorState::kFinish:
                    return BlockingReason::kNotBlocked;
                case ProbeOperatorState::kWaitForBuild:
                    return BlockingReason::kWaitForJoinBuild;
                case ProbeOperatorState::kWaitForPeers:
                    return BlockingReason::kWaitForJoinProbe;
                default:
                    POLLUX_UNREACHABLE("Unexpected state: ", probeOperatorStateName(state));
            }
        }

        // Generate partition number set from spill partition id set.
        SpillPartitionNumSet toPartitionNumSet(
            const SpillPartitionIdSet &partitionIdSet) {
            SpillPartitionNumSet partitionNumSet;
            partitionNumSet.reserve(partitionIdSet.size());
            for (const auto &partitionId: partitionIdSet) {
                partitionNumSet.insert(partitionId.partitionNumber());
            }
            return partitionNumSet;
        }

        template<typename T>
        T *initBuffer(BufferPtr &buffer, vector_size_t size, memory::MemoryPool *pool) {
            POLLUX_CHECK(!buffer || buffer->isMutable());
            if (!buffer || buffer->size() < size * sizeof(T)) {
                buffer = AlignedBuffer::allocate<T>(size, pool);
            }
            return buffer->asMutable<T>();
        }
    } // namespace

    HashProbe::HashProbe(
        int32_t operatorId,
        DriverCtx *driverCtx,
        const std::shared_ptr<const core::HashJoinNode> &joinNode)
        : Operator(
              driverCtx,
              joinNode->outputType(),
              operatorId,
              joinNode->id(),
              "HashProbe",
              joinNode->canSpill(driverCtx->queryConfig())
                  ? driverCtx->makeSpillConfig(operatorId)
                  : std::nullopt),
          outputBatchSize_{outputBatchRows()},
          joinNode_(std::move(joinNode)),
          joinType_{joinNode_->joinType()},
          nullAware_{joinNode_->isNullAware()},
          probeType_(joinNode_->sources()[0]->outputType()),
          joinBridge_(operatorCtx_->task()->getHashJoinBridgeLocked(
              operatorCtx_->driverCtx()->splitGroupId,
              planNodeId())),
          filterResult_(1),
          outputTableRowsCapacity_(outputBatchSize_) {
        POLLUX_CHECK_NOT_NULL(joinBridge_);
    }

    void HashProbe::initialize() {
        Operator::initialize();

        POLLUX_CHECK(hashers_.empty());
        hashers_ = createVectorHashers(probeType_, joinNode_->leftKeys());

        const auto numKeys = hashers_.size();
        keyChannels_.reserve(numKeys);
        for (auto &hasher: hashers_) {
            keyChannels_.push_back(hasher->channel());
        }

        POLLUX_CHECK_NULL(lookup_);
        lookup_ = std::make_unique<HashLookup>(hashers_, pool());
        auto buildType = joinNode_->sources()[1]->outputType();
        auto tableType = makeTableType(buildType.get(), joinNode_->rightKeys());
        if (joinNode_->filter()) {
            initializeFilter(joinNode_->filter(), probeType_, tableType);
        }

        size_t numIdentityProjections = 0;
        for (auto i = 0; i < probeType_->size(); ++i) {
            auto &name = probeType_->nameOf(i);
            auto outIndex = outputType_->getChildIdxIfExists(name);
            if (!outIndex.has_value()) {
                continue;
            }
            projectedInputColumns_[i] = *outIndex;
            if (!isRightJoin(joinType_) && !isFullJoin(joinType_)) {
                identityProjections_.emplace_back(i, *outIndex);
                if (*outIndex == i) {
                    ++numIdentityProjections;
                }
            }
        }

        for (column_index_t i = 0; i < outputType_->size(); ++i) {
            auto tableChannel = tableType->getChildIdxIfExists(outputType_->nameOf(i));
            if (tableChannel.has_value()) {
                tableOutputProjections_.emplace_back(tableChannel.value(), i);
            }
        }

        if (numIdentityProjections == probeType_->size() &&
            tableOutputProjections_.empty()) {
            isIdentityProjection_ = true;
        }

        if (nullAware_) {
            filterTableResult_.resize(1);
        }
    }

    void HashProbe::initializeFilter(
        const core::TypedExprPtr &filter,
        const RowTypePtr &probeType,
        const RowTypePtr &tableType) {
        std::vector<core::TypedExprPtr> filters = {filter};
        filter_ =
                std::make_unique<ExprSet>(std::move(filters), operatorCtx_->execCtx());

        column_index_t filterChannel = 0;
        std::vector<std::string> names;
        std::vector<TypePtr> types;
        auto numFields = filter_->expr(0)->distinctFields().size();
        names.reserve(numFields);
        types.reserve(numFields);
        for (auto &field: filter_->expr(0)->distinctFields()) {
            const auto &name = field->field();
            auto channel = probeType->getChildIdxIfExists(name);
            if (channel.has_value()) {
                auto channelValue = channel.value();
                filterInputProjections_.emplace_back(channelValue, filterChannel++);
                names.emplace_back(probeType->nameOf(channelValue));
                types.emplace_back(probeType->childAt(channelValue));
                continue;
            }
            channel = tableType->getChildIdxIfExists(name);
            if (channel.has_value()) {
                auto channelValue = channel.value();
                filterTableProjections_.emplace_back(channelValue, filterChannel);
                names.emplace_back(tableType->nameOf(channelValue));
                types.emplace_back(tableType->childAt(channelValue));
                ++filterChannel;
                continue;
            }
            POLLUX_FAIL(
                "Join filter field {} not in probe or build input", field->toString());
        }

        filterInputType_ = ROW(std::move(names), std::move(types));
    }

    void HashProbe::maybeSetupInputSpiller(
        const SpillPartitionIdSet &spillPartitionIds) {
        POLLUX_CHECK_NULL(inputSpiller_);
        POLLUX_CHECK(spillInputPartitionIds_.empty());

        spillInputPartitionIds_ = spillPartitionIds;
        if (spillInputPartitionIds_.empty()) {
            return;
        }

        // If 'spillInputPartitionIds_' is not empty, then we set up a spiller to
        // spill the incoming probe inputs.
        inputSpiller_ = std::make_unique<NoRowContainerSpiller>(
            probeType_,
            HashBitRange(
                spillInputPartitionIds_.begin()->partitionBitOffset(),
                spillInputPartitionIds_.begin()->partitionBitOffset() +
                spillConfig()->numPartitionBits),
            spillConfig(),
            &spillStats_);
        // Set the spill partitions to the corresponding ones at the build side. The
        // hash probe operator itself won't trigger any spilling.
        inputSpiller_->setPartitionsSpilled(
            toPartitionNumSet(spillInputPartitionIds_));

        spillHashFunction_ = std::make_unique<HashPartitionFunction>(
            inputSpiller_->hashBits(), probeType_, keyChannels_);
        spillInputIndicesBuffers_.resize(spillHashFunction_->numPartitions());
        rawSpillInputIndicesBuffers_.resize(spillHashFunction_->numPartitions());
        numSpillInputs_.resize(spillHashFunction_->numPartitions(), 0);
        // If we have received no more input signal from either source or restored
        // spill input, then we shall just finish the spiller and records the spilled
        // partition set accordingly.
        if (noMoreSpillInput_) {
            inputSpiller_->finishSpill(inputSpillPartitionSet_);
        }
    }

    void HashProbe::maybeSetupSpillInputReader(
        const std::optional<SpillPartitionId> &restoredPartitionId) {
        POLLUX_CHECK_NULL(spillInputReader_);
        if (!restoredPartitionId.has_value()) {
            return;
        }
        // If 'restoredPartitionId' is not null, then 'table_' is built from the
        // spilled build data. Create an unsorted reader to read the probe inputs from
        // the corresponding spilled probe partition on disk.
        auto iter = inputSpillPartitionSet_.find(restoredPartitionId.value());
        POLLUX_CHECK(iter != inputSpillPartitionSet_.end());
        auto partition = std::move(iter->second);
        POLLUX_CHECK_EQ(partition->id(), restoredPartitionId.value());
        spillInputReader_ = partition->createUnorderedReader(
            spillConfig_->readBufferSize, pool(), &spillStats_);
        inputSpillPartitionSet_.erase(iter);
    }

    std::optional<uint64_t> HashProbe::estimatedRowSize(
        const std::vector<vector_size_t> &varSizedColumns,
        uint64_t totalFixedColumnsBytes) {
        static const double kToleranceRatio = 10.0;
        std::vector<RowColumn::Stats> varSizeListColumnsStats;
        varSizeListColumnsStats.reserve(varSizedColumns.size());
        for (uint32_t i = 0; i < varSizedColumns.size(); ++i) {
            const auto statsOpt = columnStats(varSizedColumns[i]);
            if (!statsOpt.has_value() || !statsOpt->minMaxColumnStatsValid()) {
                return std::nullopt;
            }
            varSizeListColumnsStats.push_back(statsOpt.value());
        }

        uint64_t totalAvgBytes{totalFixedColumnsBytes};
        uint64_t totalMaxBytes{totalFixedColumnsBytes};
        for (const auto &stats: varSizeListColumnsStats) {
            totalAvgBytes += stats.avgBytes();
            totalMaxBytes += stats.maxBytes();
        }
        if (totalAvgBytes == 0) {
            if (totalMaxBytes == 0) {
                return 0;
            }
            // Return nullopt to prevent memory exploding in extreme size skew cases:
            // e.g. 1 row very large and all other rows of size 0.
            return std::nullopt;
        }
        if (totalMaxBytes / totalAvgBytes >= kToleranceRatio) {
            return std::nullopt;
        }
        // Make the total per batch size to be bounded by 2x 'outputBatchSize_':
        // worst case size = (outputBatchSize_ / estimated size) * totalMaxBytes
        return (totalMaxBytes + totalAvgBytes) / 2;
    }

    std::optional<RowColumn::Stats> HashProbe::columnStats(
        int32_t columnIndex) const {
        std::vector<RowColumn::Stats> columnStats;
        const auto rowContainers = table_->allRows();
        for (const auto *rowContainer: rowContainers) {
            POLLUX_CHECK_NOT_NULL(rowContainer);
            auto statsOpt = rowContainer->columnStats(columnIndex);
            if (!statsOpt.has_value()) {
                return std::nullopt;
            }
            columnStats.push_back(statsOpt.value());
        }
        return RowColumn::Stats::merge(columnStats);
    }

    void HashProbe::initializeResultIter() {
        POLLUX_CHECK_NOT_NULL(table_);
        if (resultIter_ != nullptr) {
            return;
        }
        std::vector<vector_size_t> listColumns;
        listColumns.reserve(tableOutputProjections_.size());
        for (const auto &projection: tableOutputProjections_) {
            listColumns.push_back(projection.inputChannel);
        }
        std::vector<vector_size_t> varSizeListColumns;
        uint64_t fixedSizeListColumnsSizeSum{0};
        varSizeListColumns.reserve(tableOutputProjections_.size());
        for (const auto column: listColumns) {
            if (table_->rows()->columnTypes()[column]->isFixedWidth()) {
                fixedSizeListColumnsSizeSum += table_->rows()->fixedSizeAt(column);
            } else {
                varSizeListColumns.push_back(column);
            }
        }

        auto rowSizeEstimation =
                estimatedRowSize(varSizeListColumns, fixedSizeListColumnsSizeSum);
        // TODO: Make tolerance ratio configurable if needed.
        resultIter_ = std::make_unique<BaseHashTable::JoinResultIterator>(
            std::move(varSizeListColumns),
            fixedSizeListColumnsSizeSum,
            rowSizeEstimation);
    }

    void HashProbe::asyncWaitForHashTable() {
        checkRunning();
        POLLUX_CHECK_NULL(table_);

        // Release any reserved memory before wait for next round of hash join in case
        // of disk spilling has been triggered.
        pool()->release();

        auto hashBuildResult = joinBridge_->tableOrFuture(&future_);
        if (!hashBuildResult.has_value()) {
            POLLUX_CHECK(future_.valid());
            setState(ProbeOperatorState::kWaitForBuild);
            return;
        }

        if (hashBuildResult->hasNullKeys) {
            POLLUX_CHECK(nullAware_);
            if (isAntiJoin(joinType_) && !joinNode_->filter()) {
                // Null-aware anti join with null keys on the build side without a filter
                // always returns nothing.
                // The flag must be set on the first (and only) built 'table_'.
                POLLUX_CHECK(inputSpillPartitionSet_.empty());
                noMoreInput();
                return;
            }
            buildSideHasNullKeys_ = true;
        }

        table_ = std::move(hashBuildResult->table);
        initializeResultIter();

        POLLUX_CHECK_NOT_NULL(table_);

        maybeSetupSpillInputReader(hashBuildResult->restoredPartitionId);
        maybeSetupInputSpiller(hashBuildResult->spillPartitionIds);
        checkMaxSpillLevel(hashBuildResult->restoredPartitionId);

        if (table_->numDistinct() == 0) {
            if (skipProbeOnEmptyBuild()) {
                if (!needToSpillInput()) {
                    if (isSpillInput() ||
                        operatorCtx_->driverCtx()
                        ->queryConfig()
                        .hashProbeFinishEarlyOnEmptyBuild()) {
                        noMoreInput();
                    } else {
                        skipInput_ = true;
                    }
                }
            }
        } else if (
            (isInnerJoin(joinType_) || isLeftSemiFilterJoin(joinType_) ||
             isRightSemiFilterJoin(joinType_) ||
             (isRightSemiProjectJoin(joinType_) && !nullAware_) ||
             isRightJoin(joinType_)) &&
            table_->hashMode() != BaseHashTable::HashMode::kHash && !isSpillInput() &&
            !hasMoreSpillData()) {
            // Find out whether there are any upstream operators that can accept dynamic
            // filters on all or a subset of the join keys. Create dynamic filters to
            // push down.
            //
            // NOTE: this optimization is not applied in the following cases: (1) if the
            // probe input is read from spilled data and there is no upstream operators
            // involved; (2) if there is spill data to restore, then we can't filter
            // probe inputs solely based on the current table's join keys.
            const auto &buildHashers = table_->hashers();
            const auto channels = operatorCtx_->driverCtx()->driver->canPushdownFilters(
                this, keyChannels_);

            for (auto i = 0; i < keyChannels_.size(); ++i) {
                if (channels.find(keyChannels_[i]) != channels.end()) {
                    if (auto filter = buildHashers[i]->getFilter(/*nullAllowed=*/false)) {
                        dynamicFilters_.emplace(keyChannels_[i], std::move(filter));
                    }
                }
            }
            hasGeneratedDynamicFilters_ = !dynamicFilters_.empty();
        }
    }

    bool HashProbe::isSpillInput() const {
        return spillInputReader_ != nullptr;
    }

    void HashProbe::prepareForSpillRestore() {
        checkRunning();
        POLLUX_CHECK(canSpill());
        POLLUX_CHECK(hasMoreSpillData());

        // Reset the internal states which are relevant to the previous probe run.
        noMoreSpillInput_ = false;
        if (lastProber_) {
            table_->clear(true);
        }
        table_.reset();
        inputSpiller_.reset();
        spillInputReader_.reset();
        spillInputPartitionIds_.clear();
        spillOutputReader_.reset();
        lastProbeIterator_.reset();

        POLLUX_CHECK(promises_.empty() || lastProber_);
        if (!lastProber_) {
            return;
        }
        // Notify the hash build operators to build the next hash table.
        joinBridge_->probeFinished();

        wakeupPeerOperators();

        lastProber_ = false;
    }

    void HashProbe::wakeupPeerOperators() {
        POLLUX_CHECK(lastProber_);
        auto promises = std::move(promises_);
        for (auto &promise: promises) {
            promise.setValue();
        }
    }

    std::vector<HashProbe *> HashProbe::findPeerOperators() {
        auto task = operatorCtx_->task();
        const std::vector<Operator *> operators =
                task->findPeerOperators(operatorCtx_->driverCtx()->pipelineId, this);
        std::vector<HashProbe *> probeOps;
        probeOps.reserve(operators.size());
        for (auto *op: operators) {
            auto *probeOp = dynamic_cast<HashProbe *>(op);
            probeOps.push_back(probeOp);
        }
        return probeOps;
    }

    void HashProbe::addSpillInput() {
        checkRunning();

        if (input_ != nullptr || noMoreSpillInput_) {
            return;
        }
        if (MELON_UNLIKELY(!spillInputReader_->nextBatch(input_))) {
            noMoreInputInternal();
            return;
        }

        addInput(std::move(input_));
    }

    void HashProbe::spillInput(RowVectorPtr &input) {
        POLLUX_CHECK(needToSpillInput());

        const auto numInput = input->size();
        prepareInputIndicesBuffers(
            input->size(), inputSpiller_->state().spilledPartitionSet());
        const auto singlePartition =
                spillHashFunction_->partition(*input, spillPartitions_);

        vector_size_t numNonSpillingInput = 0;
        for (auto row = 0; row < numInput; ++row) {
            const auto partition = singlePartition.has_value()
                                       ? singlePartition.value()
                                       : spillPartitions_[row];
            if (!inputSpiller_->state().isPartitionSpilled(partition)) {
                rawNonSpillInputIndicesBuffer_[numNonSpillingInput++] = row;
                continue;
            }
            rawSpillInputIndicesBuffers_[partition][numSpillInputs_[partition]++] = row;
        }
        if (numNonSpillingInput == numInput) {
            return;
        }

        // Ensure vector are lazy loaded before spilling.
        for (int32_t i = 0; i < input->childrenSize(); ++i) {
            input->childAt(i)->loaded_vector();
        }

        for (int32_t partition = 0; partition < numSpillInputs_.size(); ++partition) {
            const auto numSpillInputs = numSpillInputs_[partition];
            if (numSpillInputs == 0) {
                continue;
            }
            POLLUX_CHECK(inputSpiller_->state().isPartitionSpilled(partition));
            inputSpiller_->spill(
                partition,
                wrap(numSpillInputs, spillInputIndicesBuffers_[partition], input));
        }

        if (numNonSpillingInput == 0) {
            input = nullptr;
        } else {
            input = wrap(numNonSpillingInput, nonSpillInputIndicesBuffer_, input);
        }
    }

    void HashProbe::prepareInputIndicesBuffers(
        vector_size_t numInput,
        const melon::F14FastSet<uint32_t> &spillPartitions) {
        POLLUX_DCHECK(canSpill());
        const auto maxIndicesBufferBytes = numInput * sizeof(vector_size_t);
        if (nonSpillInputIndicesBuffer_ == nullptr ||
            !nonSpillInputIndicesBuffer_->isMutable() ||
            nonSpillInputIndicesBuffer_->size() < maxIndicesBufferBytes) {
            nonSpillInputIndicesBuffer_ = allocate_indices(numInput, pool());
            rawNonSpillInputIndicesBuffer_ =
                    nonSpillInputIndicesBuffer_->asMutable<vector_size_t>();
        }
        for (const auto &partition: spillPartitions) {
            if (spillInputIndicesBuffers_[partition] == nullptr ||
                spillInputIndicesBuffers_[partition]->size() < maxIndicesBufferBytes) {
                spillInputIndicesBuffers_[partition] = allocate_indices(numInput, pool());
                rawSpillInputIndicesBuffers_[partition] =
                        spillInputIndicesBuffers_[partition]->asMutable<vector_size_t>();
            }
        }
        std::fill(numSpillInputs_.begin(), numSpillInputs_.end(), 0);
    }

    BlockingReason HashProbe::isBlocked(ContinueFuture *future) {
        switch (state_) {
            case ProbeOperatorState::kWaitForBuild:
                POLLUX_CHECK_NULL(table_);
                if (!future_.valid()) {
                    setRunning();
                    asyncWaitForHashTable();
                }
                break;
            case ProbeOperatorState::kRunning:
                POLLUX_CHECK_NOT_NULL(table_);
                if (spillInputReader_ != nullptr) {
                    addSpillInput();
                }
                break;
            case ProbeOperatorState::kWaitForPeers:
                POLLUX_CHECK(canSpill());
                if (!future_.valid()) {
                    setRunning();
                }
                break;
            case ProbeOperatorState::kFinish:
                break;
            default:
                POLLUX_UNREACHABLE(probeOperatorStateName(state_));
                break;
        }

        if (future_.valid()) {
            POLLUX_CHECK(!isRunning());
            *future = std::move(future_);
        }
        return fromStateToBlockingReason(state_);
    }

    void HashProbe::clearDynamicFilters() {
        // The join can be completely replaced with a pushed down filter when the
        // following conditions are met:
        //  * hash table has a single key with unique values,
        //  * build side has no dependent columns.
        if (keyChannels_.size() == 1 && !table_->hasDuplicateKeys() &&
            tableOutputProjections_.empty() && !filter_ && !dynamicFilters_.empty() &&
            !isRightJoin(joinType_)) {
            canReplaceWithDynamicFilter_ = true;
        }

        Operator::clearDynamicFilters();
    }

    void HashProbe::decodeAndDetectNonNullKeys() {
        nonNullInputRows_.resize(input_->size());
        nonNullInputRows_.setAll();

        for (auto i = 0; i < hashers_.size(); ++i) {
            auto key = input_->childAt(hashers_[i]->channel())->loaded_vector();
            hashers_[i]->decode(*key, nonNullInputRows_);
        }

        deselectRowsWithNulls(hashers_, nonNullInputRows_);
        if (isRightSemiProjectJoin(joinType_) &&
            nonNullInputRows_.countSelected() < input_->size()) {
            probeSideHasNullKeys_ = true;
        }
    }

    void HashProbe::addInput(RowVectorPtr input) {
        if (skipInput_) {
            POLLUX_CHECK_NULL(input_);
            return;
        }
        input_ = std::move(input);

        // Reset passingInputRowsInitialized_ as input_ as changed.
        passingInputRowsInitialized_ = false;

        const auto numInput = input_->size();

        if (numInput > 0) {
            noInput_ = false;
        }

        if (canReplaceWithDynamicFilter_) {
            replacedWithDynamicFilter_ = true;
            return;
        }

        bool hasDecoded = false;

        if (needToSpillInput()) {
            if (isRightSemiProjectJoin(joinType_) && !probeSideHasNullKeys_) {
                decodeAndDetectNonNullKeys();
                hasDecoded = true;
            }

            spillInput(input_);
            // Check if all the probe input rows have been spilled.
            if (input_ == nullptr) {
                return;
            }
        }

        if (table_->numDistinct() == 0) {
            if (skipProbeOnEmptyBuild()) {
                POLLUX_CHECK(needToSpillInput());
                input_ = nullptr;
                return;
            }
            // Build side is empty. This state is valid only for anti, left and full
            // joins.
            POLLUX_CHECK(joinIncludesMissesFromLeft(joinType_));
            if (isLeftSemiProjectJoin(joinType_) ||
                (isAntiJoin(joinType_) && filter_)) {
                // For anti join with filter and semi project join we need to decode the
                // join keys columns to initialize 'nonNullInputRows_'. The anti join
                // filter evaluation and semi project join output generation will access
                // 'nonNullInputRows_' later.
                decodeAndDetectNonNullKeys();
            }
            return;
        }

        if (!hasDecoded) {
            decodeAndDetectNonNullKeys();
        }
        activeRows_ = nonNullInputRows_;

        // Update statistics for null keys in join operator.
        // Updating here means we will report 0 null keys when build side is empty.
        // If we want more accurate stats, we will have to decode input vector
        // even when not needed. So we tradeoff less accurate stats for more
        // performance.
        {
            auto lockedStats = stats_.wlock();
            lockedStats->numNullKeys +=
                    activeRows_.size() - activeRows_.countSelected();
        }

        table_->prepareForJoinProbe(*lookup_.get(), input_, activeRows_, false);

        if (joinIncludesMissesFromLeft(joinType_)) {
            // Make sure to allocate an entry in 'hits' for every input row to allow for
            // including rows without a match in the output. Also, make sure to
            // initialize all 'hits' to nullptr as HashTable::joinProbe will only
            // process activeRows_.
            auto &hits = lookup_->hits;
            hits.resize(numInput);
            std::fill(hits.data(), hits.data() + numInput, nullptr);
            if (!lookup_->rows.empty()) {
                table_->joinProbe(*lookup_);
            }

            // Update lookup_->rows to include all input rows, not just
            // activeRows_ as we need to include all rows in the output.
            auto &rows = lookup_->rows;
            rows.resize(numInput);
            std::iota(rows.begin(), rows.end(), 0);
        } else {
            if (lookup_->rows.empty()) {
                input_ = nullptr;
                return;
            }
            lookup_->hits.resize(lookup_->rows.back() + 1);
            table_->joinProbe(*lookup_);
        }

        resultIter_->reset(*lookup_);
    }

    void HashProbe::prepareOutput(vector_size_t size) {
        // Try to re-use memory for the output vectors that contain build-side data.
        // We expect output vectors containing probe-side data to be null (reset in
        // clearIdentityProjectedOutput). BaseVector::prepare_for_reuse keeps null
        // children unmodified and makes non-null (build side) children reusable.
        if (output_) {
            VectorPtr output = std::move(output_);
            BaseVector::prepare_for_reuse(output, size);
            output_ = std::static_pointer_cast<RowVector>(output);
        } else {
            output_ = BaseVector::create<RowVector>(outputType_, size, pool());
        }
    }

    namespace {
        VectorPtr createConstantFalse(vector_size_t size, memory::MemoryPool *pool) {
            return std::make_shared<ConstantVector<bool> >(
                pool, size, false /*isNull*/, BOOLEAN(), false /*value*/);
        }
    } // namespace

    void HashProbe::fillLeftSemiProjectMatchColumn(vector_size_t size) {
        if (emptyBuildSide()) {
            // Build side is empty or all rows have null join keys.
            if (nullAware_ && buildSideHasNullKeys_) {
                matchColumn() = BaseVector::create_null_constant(BOOLEAN(), size, pool());
            } else {
                matchColumn() = createConstantFalse(size, pool());
            }
        } else {
            auto flatMatch = matchColumn()->as<FlatVector<bool> >();
            flatMatch->resize(size);
            auto rawValues = flatMatch->mutableRawValues<uint64_t>();
            auto *outputTableRows = outputTableRows_->as<char *>();
            for (auto i = 0; i < size; ++i) {
                if (nullAware_) {
                    // Null-aware join may produce TRUE, FALSE or NULL.
                    if (filter_) {
                        if (leftSemiProjectIsNull_.isValid(i)) {
                            flatMatch->set_null(i, true);
                        } else {
                            const bool hasMatch = outputTableRows[i] != nullptr;
                            bits::setBit(rawValues, i, hasMatch);
                        }
                    } else {
                        if (!nonNullInputRows_.isValid(i)) {
                            // Probe key is null.
                            flatMatch->set_null(i, true);
                        } else {
                            // Probe key is not null.
                            const bool hasMatch = outputTableRows[i] != nullptr;
                            if (!hasMatch && buildSideHasNullKeys_) {
                                flatMatch->set_null(i, true);
                            } else {
                                bits::setBit(rawValues, i, hasMatch);
                            }
                        }
                    }
                } else {
                    const bool hasMatch = outputTableRows[i] != nullptr;
                    bits::setBit(rawValues, i, hasMatch);
                }
            }
        }
    }

    void HashProbe::fillOutput(vector_size_t size) {
        prepareOutput(size);

        for (auto [in, out]: projectedInputColumns_) {
            // Load input vector if it is being split into multiple batches. It is not
            // safe to wrap unloaded LazyVector into two different dictionaries.
            ensureLoadedIfNotAtEnd(in);
            auto inputChild = input_->childAt(in);
            output_->childAt(out) = wrapChild(size, outputRowMapping_, inputChild);
        }

        if (isLeftSemiProjectJoin(joinType_)) {
            fillLeftSemiProjectMatchColumn(size);
        } else {
            extractColumns(
                table_.get(),
                melon::Range<char * const*>(outputTableRows_->as<char *>(), size),
                tableOutputProjections_,
                pool(),
                outputType_->children(),
                output_->children());
        }
    }

    RowVectorPtr HashProbe::getBuildSideOutput() {
        auto *outputTableRows =
                initBuffer<char *>(outputTableRows_, outputTableRowsCapacity_, pool());
        int32_t numOut;
        if (isRightSemiFilterJoin(joinType_)) {
            numOut = table_->listProbedRows(
                &lastProbeIterator_,
                outputTableRowsCapacity_,
                RowContainer::kUnlimited,
                outputTableRows);
        } else if (isRightSemiProjectJoin(joinType_)) {
            numOut = table_->listAllRows(
                &lastProbeIterator_,
                outputTableRowsCapacity_,
                RowContainer::kUnlimited,
                outputTableRows);
        } else {
            // Must be a right join or full join.
            numOut = table_->listNotProbedRows(
                &lastProbeIterator_,
                outputTableRowsCapacity_,
                RowContainer::kUnlimited,
                outputTableRows);
        }
        if (numOut == 0) {
            return nullptr;
        }

        prepareOutput(numOut);

        // Populate probe-side columns of the output with nulls.
        for (auto [in, out]: projectedInputColumns_) {
            output_->childAt(out) = BaseVector::create_null_constant(
                outputType_->childAt(out), numOut, pool());
        }

        extractColumns(
            table_.get(),
            melon::Range<char **>(outputTableRows, numOut),
            tableOutputProjections_,
            pool(),
            outputType_->children(),
            output_->children());

        if (isRightSemiProjectJoin(joinType_)) {
            // Populate 'match' column.
            if (noInput_) {
                // Probe side is empty. All rows should return 'match = false', even ones
                // with a null join key.
                matchColumn() = createConstantFalse(numOut, pool());
            } else {
                table_->rows()->extractProbedFlags(
                    outputTableRows,
                    numOut,
                    nullAware_,
                    nullAware_ && probeSideHasNullKeys_,
                    matchColumn());
            }
        }

        return output_;
    }

    void HashProbe::clearProjectedOutput() {
        if (!output_ || output_.use_count() != 1) {
            return;
        }
        for (auto &[_, out]: projectedInputColumns_) {
            output_->childAt(out) = nullptr;
        }
    }

    bool HashProbe::needLastProbe() const {
        return !skipInput_ && needRightSideJoin(joinType_);
    }

    bool HashProbe::skipProbeOnEmptyBuild() const {
        return isInnerJoin(joinType_) || isLeftSemiFilterJoin(joinType_) ||
               isRightJoin(joinType_) || isRightSemiFilterJoin(joinType_) ||
               isRightSemiProjectJoin(joinType_);
    }

    bool HashProbe::canSpill() const {
        return Operator::canSpill() &&
               !operatorCtx_->task()->hasMixedExecutionGroup();
    }

    bool HashProbe::hasMoreSpillData() const {
        POLLUX_CHECK(inputSpillPartitionSet_.empty() || canSpill());
        return !inputSpillPartitionSet_.empty() || needToSpillInput();
    }

    bool HashProbe::needToSpillInput() const {
        POLLUX_CHECK(spillInputPartitionIds_.empty() || canSpill());
        POLLUX_CHECK_EQ(spillInputPartitionIds_.empty(), inputSpiller_ == nullptr);

        return !spillInputPartitionIds_.empty();
    }

    void HashProbe::setState(ProbeOperatorState state) {
        checkStateTransition(state);
        state_ = state;
    }

    void HashProbe::checkStateTransition(ProbeOperatorState state) {
        POLLUX_CHECK_NE(state_, state);
        switch (state) {
            case ProbeOperatorState::kRunning:
                if (!canSpill()) {
                    POLLUX_CHECK_EQ(state_, ProbeOperatorState::kWaitForBuild);
                } else {
                    POLLUX_CHECK(
                        state_ == ProbeOperatorState::kWaitForBuild ||
                        state_ == ProbeOperatorState::kWaitForPeers);
                }
                break;
            case ProbeOperatorState::kWaitForPeers:
                POLLUX_CHECK(canSpill());
                [[fallthrough]];
            case ProbeOperatorState::kWaitForBuild:
                [[fallthrough]];
            case ProbeOperatorState::kFinish:
                POLLUX_CHECK_EQ(state_, ProbeOperatorState::kRunning);
                break;
            default:
                POLLUX_UNREACHABLE(probeOperatorStateName(state_));
                break;
        }
    }

    RowVectorPtr HashProbe::getOutput() {
        // Release the extra unused memory reserved for output processing.
        SCOPE_EXIT {
            pool()->release();
        };
        return getOutputInternal(/*toSpillOutput=*/false);
    }

    RowVectorPtr HashProbe::getOutputInternal(bool toSpillOutput) {
        if (isFinished()) {
            return nullptr;
        }
        POLLUX_CHECK(
            isRunning() || isWaitingForPeers(),
            fmt::format("Invalid state {}", state_));

        if (!toSpillOutput) {
            // Avoid memory reservation if it is triggered by memory arbitration to
            // spill pending output.
            ensureOutputFits();
        }

        if (maybeReadSpillOutput()) {
            return output_;
        }

        clearProjectedOutput();

        if (!input_) {
            if (hasMoreInput()) {
                return nullptr;
            }

            if (needLastProbe() && lastProber_) {
                auto output = getBuildSideOutput();
                if (output != nullptr) {
                    return output;
                }
            }

            // NOTE: if getOutputInternal() is called from memory arbitration to spill
            // the produced output from pending 'input_', then we should not proceed
            // with the rest of procedure, and let the next driver getOutput() call to
            // handle the probe finishing process properly.
            if (toSpillOutput) {
                POLLUX_CHECK(memory::underMemoryArbitration());
                POLLUX_CHECK(canReclaim());
                return nullptr;
            }

            if (hasMoreSpillData()) {
                prepareForSpillRestore();
                asyncWaitForHashTable();
            } else {
                if (lastProber_ && canSpill()) {
                    joinBridge_->probeFinished();
                    wakeupPeerOperators();
                }
                setState(ProbeOperatorState::kFinish);
            }
            return nullptr;
        }

        const auto inputSize = input_->size();

        if (replacedWithDynamicFilter_) {
            addRuntimeStat("replacedWithDynamicFilterRows", RuntimeCounter(inputSize));
            auto output = Operator::fillOutput(inputSize, nullptr);
            input_ = nullptr;
            return output;
        }

        const bool isLeftSemiOrAntiJoinNoFilter = !filter_ &&
                                                  (isLeftSemiFilterJoin(joinType_) || isLeftSemiProjectJoin(joinType_)
                                                   ||
                                                   isAntiJoin(joinType_));

        const bool emptyBuildSide = (table_->numDistinct() == 0);

        // Left semi and anti joins are always cardinality reducing, e.g. for a
        // given row of input they produce zero or 1 row of output. Therefore, if
        // there is no extra filter we can process each batch of input in one go.
        auto outputBatchSize = (isLeftSemiOrAntiJoinNoFilter || emptyBuildSide)
                                   ? inputSize
                                   : outputBatchSize_;
        outputTableRowsCapacity_ = outputBatchSize;
        if (filter_ &&
            (isLeftJoin(joinType_) || isFullJoin(joinType_) ||
             isAntiJoin(joinType_) || isLeftSemiFilterJoin(joinType_) ||
             isLeftSemiProjectJoin(joinType_))) {
            // If we need non-matching probe side row, there is a possibility that such
            // row exists at end of an input batch and being carried over in the next
            // output batch, so we need to make extra room of one row in output.
            ++outputTableRowsCapacity_;
        }
        auto mapping = initializeRowNumberMapping(
            outputRowMapping_, outputTableRowsCapacity_, pool());
        auto *outputTableRows =
                initBuffer<char *>(outputTableRows_, outputTableRowsCapacity_, pool());

        for (;;) {
            // If the task owning this operator has been cancelled, there is no point
            // to continue executing this procedure, which may be long in degenerate
            // cases. Exit the working loop and let the Driver handle exiting gracefully
            // in its own loop.
            if (operatorCtx_->task()->isCancelled()) {
                return nullptr;
            }
            int numOut = 0;

            if (emptyBuildSide) {
                // When build side is empty, anti and left joins return all probe side
                // rows, including ones with null join keys.
                std::iota(mapping.begin(), mapping.begin() + inputSize, 0);
                std::fill(outputTableRows, outputTableRows + inputSize, nullptr);
                numOut = inputSize;
            } else if (isAntiJoin(joinType_) && !filter_) {
                if (nullAware_) {
                    // When build side is not empty, anti join without a filter returns
                    // probe rows with no nulls in the join key and no match in the build
                    // side.
                    for (auto i = 0; i < inputSize; ++i) {
                        if (nonNullInputRows_.isValid(i) &&
                            (!activeRows_.isValid(i) || !lookup_->hits[i])) {
                            mapping[numOut] = i;
                            ++numOut;
                        }
                    }
                } else {
                    for (auto i = 0; i < inputSize; ++i) {
                        if (!nonNullInputRows_.isValid(i) ||
                            (!activeRows_.isValid(i) || !lookup_->hits[i])) {
                            mapping[numOut] = i;
                            ++numOut;
                        }
                    }
                }
            } else {
                numOut = table_->listJoinResults(
                    *resultIter_,
                    joinIncludesMissesFromLeft(joinType_),
                    melon::Range(mapping.data(), outputBatchSize),
                    melon::Range(outputTableRows, outputBatchSize),
                    operatorCtx_->driverCtx()->queryConfig().preferredOutputBatchBytes());
            }

            // We are done processing the input batch if there are no more joined rows
            // to process and the NoMatchDetector isn't carrying forward a row that
            // still needs to be written to the output.
            if (!numOut && !noMatchDetector_.hasLastMissedRow()) {
                input_ = nullptr;
                return nullptr;
            }
            POLLUX_CHECK_LE(numOut, outputBatchSize);

            numOut = evalFilter(numOut);

            if (numOut == 0) {
                continue;
            }

            if (needLastProbe()) {
                // Mark build-side rows that have a match on the join condition.
                table_->rows()->setProbedFlag(outputTableRows, numOut);
            }

            // Right semi join only returns the build side output when the probe side
            // is fully complete. Do not return anything here.
            if (isRightSemiFilterJoin(joinType_) || isRightSemiProjectJoin(joinType_)) {
                if (resultIter_->atEnd()) {
                    input_ = nullptr;
                }
                return nullptr;
            }

            fillOutput(numOut);

            if (isLeftSemiOrAntiJoinNoFilter || emptyBuildSide) {
                input_ = nullptr;
            }
            return output_;
        }
    }

    bool HashProbe::maybeReadSpillOutput() {
        maybeSetupSpillOutputReader();

        if (spillOutputReader_ == nullptr) {
            return false;
        }

        POLLUX_DCHECK_EQ(table_->numDistinct(), 0);

        if (!spillOutputReader_->nextBatch(output_)) {
            spillOutputReader_.reset();
            return false;
        }
        return true;
    }

    RowVectorPtr HashProbe::createFilterInput(vector_size_t size) {
        std::vector<VectorPtr> filterColumns(filterInputType_->size());
        for (const auto &projection: filterInputProjections_) {
            if (projectedInputColumns_.find(projection.inputChannel) !=
                projectedInputColumns_.end()) {
                // If the column is projected to the output, ensure it's loaded if it's
                // lazy in case the filter only loads an incomplete subset of the rows
                // that will be output.
                ensureLoaded(projection.inputChannel);
            } else {
                // If the column isn't projected to the output, the Vector will only be
                // reused if we've broken the input batch into multiple output batches,
                // i.e. if results_ is not at the end of the iterator.
                ensureLoadedIfNotAtEnd(projection.inputChannel);
            }

            filterColumns[projection.outputChannel] = wrapChild(
                size, outputRowMapping_, input_->childAt(projection.inputChannel));
        }

        extractColumns(
            table_.get(),
            melon::Range<char * const*>(outputTableRows_->as<char *>(), size),
            filterTableProjections_,
            pool(),
            filterInputType_->children(),
            filterColumns);

        return std::make_shared<RowVector>(
            pool(), filterInputType_, nullptr, size, std::move(filterColumns));
    }

    void HashProbe::prepareFilterRowsForNullAwareJoin(
        RowVectorPtr &filterInput,
        vector_size_t numRows,
        bool filterPropagateNulls) {
        POLLUX_CHECK_LE(numRows, kBatchSize);
        if (filterTableInput_ == nullptr) {
            filterTableInput_ =
                    BaseVector::create<RowVector>(filterInputType_, kBatchSize, pool());
        }

        if (filterPropagateNulls) {
            nullFilterInputRows_.resizeFill(numRows, false);
            auto *rawNullRows = nullFilterInputRows_.asMutableRange().bits();
            for (auto &projection: filterInputProjections_) {
                filterInputColumnDecodedVector_.decode(
                    *filterInput->childAt(projection.outputChannel), filterInputRows_);
                if (filterInputColumnDecodedVector_.may_have_nulls()) {
                    if (const uint64_t *nulls =
                            filterInputColumnDecodedVector_.nulls(&filterInputRows_)) {
                        SelectivityVector nullsInActiveRows(numRows);
                        memcpy(
                            nullsInActiveRows.asMutableRange().bits(),
                            nulls,
                            bits::nbytes(numRows));
                        // All rows that are not active count as non-null here.
                        bits::orWithNegatedBits(
                            nullsInActiveRows.asMutableRange().bits(),
                            filterInputRows_.asRange().bits(),
                            0,
                            numRows);
                        // NOTE: the false value of a raw null bit indicates null so we OR
                        // with negative of the raw bit.
                        bits::orWithNegatedBits(
                            rawNullRows, nullsInActiveRows.asRange().bits(), 0, numRows);
                    }
                }
            }
            nullFilterInputRows_.updateBounds();
            // TODO: consider to skip filtering on 'nullFilterInputRows_' as we know
            // it will never pass the filtering.
        }

        // NOTE: for null-aware anti join, we will skip filtering on the probe rows
        // with null join key columns(s) as we can apply filtering after they cross
        // join with the table rows later.
        if (!nonNullInputRows_.isAllSelected()) {
            auto *rawMapping = outputRowMapping_->asMutable<vector_size_t>();
            for (int i = 0; i < numRows; ++i) {
                if (filterInputRows_.isValid(i) &&
                    !nonNullInputRows_.isValid(rawMapping[i])) {
                    filterInputRows_.setValid(i, false);
                }
            }
            filterInputRows_.updateBounds();
        }
    }

    namespace {
        const uint64_t *getFlatFilterResult(VectorPtr &result) {
            if (!result->is_flat_encoding()) {
                return nullptr;
            }
            auto *flat = result->as_unchecked<FlatVector<bool> >();
            if (!flat->may_have_nulls()) {
                return flat->rawValues<uint64_t>();
            }
            if (!flat->rawValues<uint64_t>()) {
                return flat->raw_nulls();
            }
            if (result.use_count() != 1) {
                return nullptr;
            }
            auto *values = flat->mutableRawValues<uint64_t>();
            bits::andBits(values, flat->raw_nulls(), 0, flat->size());
            return values;
        }
    } // namespace

    void HashProbe::applyFilterOnTableRowsForNullAwareJoin(
        const SelectivityVector &rows,
        SelectivityVector &filterPassedRows,
        std::function<int32_t(char **, int32_t)> iterator) {
        if (!rows.hasSelections()) {
            return;
        }
        POLLUX_CHECK(table_->rows(), "Should not move rows in hash joins");
        char *data[kBatchSize];
        while (auto numRows = iterator(data, kBatchSize)) {
            filterTableInput_->resize(numRows);
            filterTableInputRows_.resizeFill(numRows, true);
            for (auto &projection: filterTableProjections_) {
                table_->extractColumn(
                    melon::Range<char * const*>(data, numRows),
                    projection.inputChannel,
                    filterTableInput_->childAt(projection.outputChannel));
            }
            rows.applyToSelected([&](vector_size_t row) {
                for (auto &projection: filterInputProjections_) {
                    filterTableInput_->childAt(projection.outputChannel) =
                            BaseVector::wrap_in_constant(
                                numRows, row, input_->childAt(projection.inputChannel));
                }
                EvalCtx evalCtx(
                    operatorCtx_->execCtx(), filter_.get(), filterTableInput_.get());
                filter_->eval(filterTableInputRows_, evalCtx, filterTableResult_);
                if (auto *values = getFlatFilterResult(filterTableResult_[0])) {
                    if (!bits::testSetBits(
                        values, 0, numRows, [](vector_size_t) { return false; })) {
                        filterPassedRows.setValid(row, true);
                    }
                } else {
                    decodedFilterTableResult_.decode(
                        *filterTableResult_[0], filterTableInputRows_);
                    if (decodedFilterTableResult_.isConstantMapping()) {
                        if (!decodedFilterTableResult_.is_null_at(0) &&
                            decodedFilterTableResult_.value_at<bool>(0)) {
                            filterPassedRows.setValid(row, true);
                        }
                    } else {
                        for (vector_size_t i = 0; i < numRows; ++i) {
                            if (!decodedFilterTableResult_.is_null_at(i) &&
                                decodedFilterTableResult_.value_at<bool>(i)) {
                                filterPassedRows.setValid(row, true);
                                break;
                            }
                        }
                    }
                }
            });
        }
    }

    SelectivityVector HashProbe::evalFilterForNullAwareJoin(
        vector_size_t numRows,
        bool filterPropagateNulls) {
        auto *rawOutputProbeRowMapping =
                outputRowMapping_->asMutable<vector_size_t>();

        // Subset of probe-side rows with a match that passed the filter.
        SelectivityVector filterPassedRows(input_->size(), false);

        // Subset of probe-side rows with non-null probe key and either no match or
        // no match that passed the filter. We need to combine these with all
        // build-side rows with null keys to see if a filter passes on any of these.
        SelectivityVector nullKeyProbeRows(input_->size(), false);

        // Subset of probe-sie rows with null probe key. We need to combine these
        // with all build-side rows to see if a filter passes on any of these.
        SelectivityVector crossJoinProbeRows(input_->size(), false);

        for (auto i = 0; i < numRows; ++i) {
            // Skip filter input row if it has any null probe side filter column.
            if (filterPropagateNulls && nullFilterInputRows_.isValid(i)) {
                continue;
            }

            const auto probeRow = rawOutputProbeRowMapping[i];
            if (nonNullInputRows_.isValid(probeRow)) {
                if (filterPassed(i)) {
                    filterPassedRows.setValid(probeRow, true);
                } else {
                    nullKeyProbeRows.setValid(probeRow, true);
                }
            } else {
                crossJoinProbeRows.setValid(probeRow, true);
            }
        }

        if (buildSideHasNullKeys_) {
            prepareNullKeyProbeHashers();
            BaseHashTable::NullKeyRowsIterator iter;
            nullKeyProbeRows.deselect(filterPassedRows);
            applyFilterOnTableRowsForNullAwareJoin(
                nullKeyProbeRows, filterPassedRows, [&](char **data, int32_t maxRows) {
                    return table_->listNullKeyRows(
                        &iter, maxRows, data, nullKeyProbeHashers_);
                });
        }
        BaseHashTable::RowsIterator iter;
        crossJoinProbeRows.deselect(filterPassedRows);
        applyFilterOnTableRowsForNullAwareJoin(
            crossJoinProbeRows, filterPassedRows, [&](char **data, int32_t maxRows) {
                return table_->listAllRows(
                    &iter, maxRows, RowContainer::kUnlimited, data);
            });
        filterPassedRows.updateBounds();

        return filterPassedRows;
    }

    void HashProbe::prepareNullKeyProbeHashers() {
        if (nullKeyProbeHashers_.empty()) {
            nullKeyProbeHashers_ =
                    createVectorHashers(probeType_, joinNode_->leftKeys());
            // Null-aware joins allow only one join key.
            POLLUX_CHECK_EQ(nullKeyProbeHashers_.size(), 1);
            if (table_->hashMode() == BaseHashTable::HashMode::kHash) {
                nullKeyProbeInput_ =
                        BaseVector::create(nullKeyProbeHashers_[0]->type(), 1, pool());
                nullKeyProbeInput_->set_null(0, true);
                SelectivityVector selectivity(1);
                nullKeyProbeHashers_[0]->decode(*nullKeyProbeInput_, selectivity);
            }
        }
    }

    int32_t HashProbe::evalFilter(int32_t numRows) {
        if (!filter_) {
            return numRows;
        }

        const bool filterPropagateNulls = filter_->expr(0)->propagatesNulls();
        auto *rawOutputProbeRowMapping =
                outputRowMapping_->asMutable<vector_size_t>();
        auto *outputTableRows = outputTableRows_->asMutable<char *>();

        filterInputRows_.resizeFill(numRows);

        // Do not evaluate filter on rows with no match to (1) avoid
        // false-positives when filter evaluates to true for rows with NULLs on the
        // build side; (2) avoid errors in filter evaluation that would fail the
        // query unnecessarily.
        // TODO Apply the same to left joins.
        if (isAntiJoin(joinType_) || isLeftSemiProjectJoin(joinType_)) {
            for (auto i = 0; i < numRows; ++i) {
                if (outputTableRows[i] == nullptr) {
                    filterInputRows_.setValid(i, false);
                }
            }
            filterInputRows_.updateBounds();
        }

        RowVectorPtr filterInput = createFilterInput(numRows);

        if (nullAware_) {
            prepareFilterRowsForNullAwareJoin(
                filterInput, numRows, filterPropagateNulls);
        }

        EvalCtx evalCtx(operatorCtx_->execCtx(), filter_.get(), filterInput.get());
        filter_->eval(0, 1, true, filterInputRows_, evalCtx, filterResult_);

        decodedFilterResult_.decode(*filterResult_[0], filterInputRows_);

        int32_t numPassed = 0;
        if (isLeftJoin(joinType_) || isFullJoin(joinType_)) {
            // Identify probe rows which got filtered out and add them back with nulls
            // for build side.
            if (noMatchDetector_.hasLastMissedRow()) {
                auto *tempOutputTableRows = initBuffer<char *>(
                    tempOutputTableRows_, outputTableRowsCapacity_, pool());
                auto *tempOutputRowMapping = initBuffer<vector_size_t>(
                    tempOutputRowMapping_, outputTableRowsCapacity_, pool());
                auto addMiss = [&](auto row) {
                    tempOutputTableRows[numPassed] = nullptr;
                    tempOutputRowMapping[numPassed++] = row;
                };
                for (auto i = 0; i < numRows; ++i) {
                    const bool passed = filterPassed(i);
                    noMatchDetector_.advance(rawOutputProbeRowMapping[i], passed, addMiss);
                    if (passed) {
                        tempOutputTableRows[numPassed] = outputTableRows[i];
                        tempOutputRowMapping[numPassed++] = rawOutputProbeRowMapping[i];
                    }
                }
                if (resultIter_->atEnd()) {
                    noMatchDetector_.finish(addMiss);
                }
                std::copy(
                    tempOutputTableRows,
                    tempOutputTableRows + numPassed,
                    outputTableRows);
                std::copy(
                    tempOutputRowMapping,
                    tempOutputRowMapping + numPassed,
                    rawOutputProbeRowMapping);
            } else {
                auto addMiss = [&](auto row) {
                    outputTableRows[numPassed] = nullptr;
                    rawOutputProbeRowMapping[numPassed++] = row;
                };
                for (auto i = 0; i < numRows; ++i) {
                    const bool passed = filterPassed(i);
                    noMatchDetector_.advance(rawOutputProbeRowMapping[i], passed, addMiss);
                    if (passed) {
                        outputTableRows[numPassed] = outputTableRows[i];
                        rawOutputProbeRowMapping[numPassed++] = rawOutputProbeRowMapping[i];
                    }
                }
                if (resultIter_->atEnd()) {
                    noMatchDetector_.finish(addMiss);
                }
            }
        } else if (isLeftSemiFilterJoin(joinType_)) {
            auto addLastMatch = [&](auto row) {
                outputTableRows[numPassed] = nullptr;
                rawOutputProbeRowMapping[numPassed++] = row;
            };
            for (auto i = 0; i < numRows; ++i) {
                if (filterPassed(i)) {
                    leftSemiFilterJoinTracker_.advance(
                        rawOutputProbeRowMapping[i], addLastMatch);
                }
            }
            if (resultIter_->atEnd()) {
                leftSemiFilterJoinTracker_.finish(addLastMatch);
            }
        } else if (isLeftSemiProjectJoin(joinType_)) {
            // NOTE: Set output table row to point to a fake string to indicate there
            // is a match for this probe 'row'. 'fillOutput' populates the match
            // column based on the nullable of this pointer.
            static const char *kPassed = "passed";
            TURBO_UNUSED(kPassed);
            if (nullAware_) {
                leftSemiProjectIsNull_.resize(numRows);
                leftSemiProjectIsNull_.clearAll();

                auto addLast = [&](auto row, std::optional<bool> passed) {
                    if (passed.has_value()) {
                        outputTableRows[numPassed] =
                                passed.value() ? const_cast<char *>(kPassed) : nullptr;
                    } else {
                        leftSemiProjectIsNull_.setValid(numPassed, true);
                    }
                    rawOutputProbeRowMapping[numPassed++] = row;
                };

                auto passedRows =
                        evalFilterForNullAwareJoin(numRows, filterPropagateNulls);
                for (auto i = 0; i < numRows; ++i) {
                    // filterPassed(i) -> TRUE
                    // else passed -> NULL
                    // else FALSE
                    auto probeRow = rawOutputProbeRowMapping[i];
                    std::optional<bool> passed = filterPassed(i)
                                                     ? std::optional(true)
                                                     : (passedRows.isValid(probeRow)
                                                            ? std::nullopt
                                                            : std::optional(false));
                    leftSemiProjectJoinTracker_.advance(probeRow, passed, addLast);
                }
                leftSemiProjectIsNull_.updateBounds();
                if (resultIter_->atEnd()) {
                    leftSemiProjectJoinTracker_.finish(addLast);
                }
            } else {
                auto addLast = [&](auto row, std::optional<bool> passed) {
                    outputTableRows[numPassed] =
                            passed.value() ? const_cast<char *>(kPassed) : nullptr;
                    rawOutputProbeRowMapping[numPassed++] = row;
                };
                for (auto i = 0; i < numRows; ++i) {
                    leftSemiProjectJoinTracker_.advance(
                        rawOutputProbeRowMapping[i], filterPassed(i), addLast);
                }
                if (resultIter_->atEnd()) {
                    leftSemiProjectJoinTracker_.finish(addLast);
                }
            }
        } else if (isAntiJoin(joinType_)) {
            auto addMiss = [&](auto row) {
                outputTableRows[numPassed] = nullptr;
                rawOutputProbeRowMapping[numPassed++] = row;
            };
            if (nullAware_) {
                auto passedRows =
                        evalFilterForNullAwareJoin(numRows, filterPropagateNulls);
                for (auto i = 0; i < numRows; ++i) {
                    auto probeRow = rawOutputProbeRowMapping[i];
                    bool passed = passedRows.isValid(probeRow);
                    noMatchDetector_.advance(probeRow, passed, addMiss);
                }
            } else {
                for (auto i = 0; i < numRows; ++i) {
                    auto probeRow = rawOutputProbeRowMapping[i];
                    noMatchDetector_.advance(probeRow, filterPassed(i), addMiss);
                }
            }
            if (resultIter_->atEnd()) {
                noMatchDetector_.finish(addMiss);
            }
        } else {
            for (auto i = 0; i < numRows; ++i) {
                if (filterPassed(i)) {
                    outputTableRows[numPassed] = outputTableRows[i];
                    rawOutputProbeRowMapping[numPassed++] = rawOutputProbeRowMapping[i];
                }
            }
        }
        POLLUX_CHECK_LE(numPassed, outputTableRowsCapacity_);
        return numPassed;
    }

    void HashProbe::ensureLoadedIfNotAtEnd(column_index_t channel) {
        if (resultIter_->atEnd()) {
            return;
        }

        ensureLoaded(channel);
    }

    void HashProbe::ensureLoaded(column_index_t channel) {
        if (!filter_ &&
            (isLeftSemiFilterJoin(joinType_) || isLeftSemiProjectJoin(joinType_) ||
             isAntiJoin(joinType_))) {
            return;
        }

        if (!passingInputRowsInitialized_) {
            passingInputRowsInitialized_ = true;
            passingInputRows_.resize(input_->size());
            if (joinIncludesMissesFromLeft(joinType_)) {
                passingInputRows_.setAll();
            } else {
                passingInputRows_.clearAll();
                auto hitsSize = lookup_->hits.size();
                auto hits = lookup_->hits.data();
                for (auto i = 0; i < hitsSize; ++i) {
                    if (hits[i]) {
                        passingInputRows_.setValid(i, true);
                    }
                }

                passingInputRows_.updateBounds();
            }
        }

        LazyVector::ensureLoadedRows(input_->childAt(channel), passingInputRows_);
    }

    void HashProbe::noMoreInput() {
        Operator::noMoreInput();
        noMoreInputInternal();
    }

    bool HashProbe::hasMoreInput() const {
        return !noMoreInput_ || (spillInputReader_ != nullptr && !noMoreSpillInput_);
    }

    void HashProbe::noMoreInputInternal() {
        checkRunning();

        noMoreSpillInput_ = true;
        if (!spillInputPartitionIds_.empty()) {
            POLLUX_CHECK_NOT_NULL(inputSpiller_);
            POLLUX_CHECK_EQ(
                spillInputPartitionIds_.size(),
                inputSpiller_->state().spilledPartitionSet().size());
            inputSpiller_->finishSpill(inputSpillPartitionSet_);
            POLLUX_CHECK_EQ(spillStats_.rlock()->spillSortTimeNanos, 0);
        }

        const bool hasSpillEnabled = canSpill();
        std::vector<ContinuePromise> promises;
        std::vector<std::shared_ptr<Driver> > peers;
        // The last operator to finish processing inputs is responsible for
        // producing build-side rows based on the join.
        if (!operatorCtx_->task()->allPeersFinished(
            planNodeId(),
            operatorCtx_->driver(),
            hasSpillEnabled ? &future_ : nullptr,
            hasSpillEnabled ? promises_ : promises,
            peers)) {
            if (hasSpillEnabled) {
                POLLUX_CHECK(future_.valid());
                setState(ProbeOperatorState::kWaitForPeers);
                POLLUX_DCHECK(promises_.empty());
            } else {
                POLLUX_DCHECK(promises.empty());
            }
            return;
        }

        POLLUX_CHECK(promises.empty());
        // NOTE: if 'hasSpillEnabled' is false, then a hash probe operator doesn't
        // need to wait for all the other peers to finish probe processing.
        // Otherwise, it needs to wait and might expect spill gets triggered by the
        // other probe operators, or there is previously spilled table partition(s)
        // that needs to restore.
        POLLUX_CHECK(hasSpillEnabled || peers.empty());
        lastProber_ = true;
    }

    bool HashProbe::isFinished() {
        return state_ == ProbeOperatorState::kFinish;
    }

    bool HashProbe::isRunning() const {
        return state_ == ProbeOperatorState::kRunning;
    }

    bool HashProbe::isWaitingForPeers() const {
        return state_ == ProbeOperatorState::kWaitForPeers;
    }

    void HashProbe::checkRunning() const {
        POLLUX_CHECK(isRunning(), probeOperatorStateName(state_));
    }

    void HashProbe::setRunning() {
        setState(ProbeOperatorState::kRunning);
    }

    bool HashProbe::nonReclaimableState() const {
        return (state_ != ProbeOperatorState::kRunning &&
                state_ != ProbeOperatorState::kWaitForPeers) ||
               nonReclaimableSection_ || (inputSpiller_ != nullptr) ||
               (table_ == nullptr) || (table_->numDistinct() == 0);
    }

    void HashProbe::ensureOutputFits() {
        if (!canReclaim()) {
            // Don't reserve memory if we can't reclaim from this hash probe operator.
            return;
        }

        // We only need to reserve memory for output if need.
        if (input_ == nullptr &&
            (hasMoreInput() || !(needLastProbe() && lastProber_))) {
            return;
        }

        if (testingTriggerSpill(pool()->name())) {
            Operator::ReclaimableSectionGuard guard(this);
            memory::testingRunArbitration(pool());
        }

        const uint64_t bytesToReserve =
                operatorCtx_->driverCtx()->queryConfig().preferredOutputBatchBytes() *
                1.2;
        if (pool()->availableReservation() >= bytesToReserve) {
            return;
        } {
            Operator::ReclaimableSectionGuard guard(this);
            if (pool()->maybeReserve(bytesToReserve)) {
                return;
            }
        }
        KLOG(WARNING) << "Failed to reserve " << succinctBytes(bytesToReserve)
               << " for memory pool " << pool()->name()
               << ", usage: " << succinctBytes(pool()->usedBytes())
               << ", reservation: " << succinctBytes(pool()->reservedBytes());
    }

    bool HashProbe::canReclaim() const {
        return canSpill() && !exceededMaxSpillLevelLimit_;
    }

    void HashProbe::reclaim(
        uint64_t /*unused*/,
        memory::MemoryReclaimer::Stats &stats) {
        TestValue::adjust("kumo::pollux::exec::HashProbe::reclaim", this);
        POLLUX_CHECK(canSpill());
        auto *driver = operatorCtx_->driver();
        POLLUX_CHECK_NOT_NULL(driver);
        POLLUX_CHECK(!nonReclaimableSection_);

        if (UNLIKELY(exceededMaxSpillLevelLimit_)) {
            // 'canReclaim()' already checks the spill limit is not exceeding max, there
            // is only a small chance from the time 'canReclaim()' is checked to the
            // actual reclaim happens that the operator has spilled such that the spill
            // level exceeds max.
            const auto *config = spillConfig();
            POLLUX_CHECK_NOT_NULL(config);
            KLOG(WARNING)
        << "Can't reclaim from hash probe operator, exceeded maximum spill "
           "level of "
        << config->maxSpillLevel << ", " << pool()->name() << ", usage "
        << succinctBytes(pool()->usedBytes());
            return;
        }

        if (nonReclaimableState()) {
            RECORD_METRIC_VALUE(kMetricMemoryNonReclaimableCount);
            ++stats.numNonReclaimableAttempts;
            KLOG(WARNING) << "Can't reclaim from hash probe operator, state_["
                 << ProbeOperatorState(state_) << "], nonReclaimableSection_["
                 << nonReclaimableSection_ << "], inputSpiller_["
                 << (inputSpiller_ == nullptr ? "nullptr" : "initialized")
                 << "], table_["
                 << (table_ == nullptr ? "nullptr" : "initialized")
                 << "], table_ numDistinct["
                 << (table_ == nullptr
                         ? "nullptr"
                         : std::to_string(table_->numDistinct()))
                 << "], " << pool()->name()
                 << ", usage: " << succinctBytes(pool()->usedBytes())
                 << ", node pool reservation: "
                 << succinctBytes(pool()->parent()->reservedBytes());
            return;
        }

        const auto &task = driver->task();
        POLLUX_CHECK(task->pauseRequested());
        const std::vector<HashProbe *> probeOps = findPeerOperators();
        bool hasMoreProbeInput{false};
        for (auto *probeOp: probeOps) {
            POLLUX_CHECK_NOT_NULL(probeOp);
            POLLUX_CHECK(probeOp->canSpill());
            if (probeOp->nonReclaimableState()) {
                RECORD_METRIC_VALUE(kMetricMemoryNonReclaimableCount);
                ++stats.numNonReclaimableAttempts;
                const auto *peerPool = probeOp->pool();
                KLOG(WARNING) << "Can't reclaim from hash probe operator, state_["
                   << ProbeOperatorState(probeOp->state_)
                   << "], nonReclaimableSection_["
                   << probeOp->nonReclaimableSection_ << "], inputSpiller_["
                   << (probeOp->inputSpiller_ == nullptr
                           ? "nullptr"
                           : "initialized")
                   << "], table_["
                   << (probeOp->table_ == nullptr ? "nullptr" : "initialized")
                   << "], table_ numDistinct["
                   << (probeOp->table_ == nullptr
                           ? "nullptr"
                           : std::to_string(probeOp->table_->numDistinct()))
                   << "], " << peerPool->name()
                   << ", usage: " << succinctBytes(peerPool->usedBytes())
                   << ", node pool reservation: "
                   << succinctBytes(peerPool->parent()->reservedBytes());
                return;
            }
            hasMoreProbeInput |= !probeOp->noMoreSpillInput_;
        }

        spillOutput(probeOps);

        SpillPartitionSet spillPartitionSet;
        if (hasMoreProbeInput) {
            // Only spill hash table if any hash probe operators still has input probe
            // data, otherwise we skip this step.
            spillPartitionSet = spillHashJoinTable(
                table_, tableSpillHashBits_, joinNode_, spillConfig(), &spillStats_);
            POLLUX_CHECK(!spillPartitionSet.empty());
        }
        const auto spillPartitionIdSet = toSpillPartitionIdSet(spillPartitionSet);

        for (auto *probeOp: probeOps) {
            POLLUX_CHECK_NOT_NULL(probeOp);
            probeOp->clearBuffers();
            // Setup all the probe operators to spill the rest of probe inputs if the
            // table has been spilled.
            if (!spillPartitionSet.empty()) {
                POLLUX_CHECK(hasMoreProbeInput);
                probeOp->maybeSetupInputSpiller(spillPartitionIdSet);
            }
            probeOp->pool()->release();
        }

        // Clears memory resources held by the built hash table.
        table_->clear(true);
        // Sets the spilled hash table in the join bridge.
        if (!spillPartitionIdSet.empty()) {
            joinBridge_->appendSpilledHashTablePartitions(std::move(spillPartitionSet));
        }
    }

    void HashProbe::spillOutput(const std::vector<HashProbe *> &operators) {
        struct SpillResult {
            const std::exception_ptr error{nullptr};

            explicit SpillResult(std::exception_ptr _error)
                : error(std::move(_error)) {
            }
        };

        std::vector<std::shared_ptr<AsyncSource<SpillResult> > > spillTasks;
        auto *spillExecutor = spillConfig()->executor;
        for (auto *op: operators) {
            HashProbe *probeOp = static_cast<HashProbe *>(op);
            spillTasks.push_back(
                memory::createAsyncMemoryReclaimTask<SpillResult>([probeOp]() {
                    try {
                        probeOp->spillOutput();
                        return std::make_unique<SpillResult>(nullptr);
                    } catch (const std::exception &e) {
                        KLOG(ERROR) << "Spill output from hash probe pool "
                       << probeOp->pool()->name() << " failed: " << e.what();
                        // The exception is captured and thrown by the caller.
                        return std::make_unique<SpillResult>(std::current_exception());
                    }
                }));
            if ((spillTasks.size() > 1) && (spillExecutor != nullptr)) {
                spillExecutor->add([source = spillTasks.back()]() { source->prepare(); });
            }
        }

        SCOPE_EXIT {
            for (auto &spillTask: spillTasks) {
                // We consume the result for the pending tasks. This is a cleanup in the
                // guard and must not throw. The first error is already captured before
                // this runs.
                try {
                    spillTask->move();
                } catch (const std::exception &) {
                }
            }
        };

        for (auto &spillTask: spillTasks) {
            const auto result = spillTask->move();
            if (result->error) {
                std::rethrow_exception(result->error);
            }
        }
    }

    void HashProbe::spillOutput() {
        // Checks if there is any output to spill or not.
        if (input_ == nullptr && !needLastProbe()) {
            return;
        }
        // We spill all the outputs produced from 'input_' into a single partition.
        auto outputSpiller = std::make_unique<NoRowContainerSpiller>(
            outputType_, HashBitRange{}, spillConfig(), &spillStats_);
        outputSpiller->setPartitionsSpilled({0});

        RowVectorPtr output{nullptr};
        for (;;) {
            output = getOutputInternal(/*toSpillOutput=*/true);
            if (output != nullptr) {
                // Ensure vector are lazy loaded before spilling.
                for (int32_t i = 0; i < output->childrenSize(); ++i) {
                    output->childAt(i)->loaded_vector();
                }
                outputSpiller->spill(0, output);
                continue;
            }
            // NOTE: for right semi join types, we need to check if 'input_' has been
            // cleared or not instead of checking on output. The right semi joins only
            // producing the output after processing all the probe inputs.
            if (input_ == nullptr) {
                break;
            }
            POLLUX_CHECK(
                isRightSemiFilterJoin(joinType_) || isRightSemiProjectJoin(joinType_));
            POLLUX_CHECK((output == nullptr) && (input_ != nullptr));
        }
        POLLUX_CHECK_LE(outputSpiller->state().spilledPartitionSet().size(), 1);

        POLLUX_CHECK(spillOutputPartitionSet_.empty());
        outputSpiller->finishSpill(spillOutputPartitionSet_);
        POLLUX_CHECK_EQ(spillOutputPartitionSet_.size(), 1);

        removeEmptyPartitions(spillOutputPartitionSet_);
    }

    void HashProbe::maybeSetupSpillOutputReader() {
        if (spillOutputPartitionSet_.empty()) {
            return;
        }
        POLLUX_CHECK_EQ(spillOutputPartitionSet_.size(), 1);
        POLLUX_CHECK_NULL(spillOutputReader_);

        spillOutputReader_ =
                spillOutputPartitionSet_.begin()->second->createUnorderedReader(
                    spillConfig_->readBufferSize, pool(), &spillStats_);
        spillOutputPartitionSet_.clear();
    }

    void HashProbe::checkMaxSpillLevel(
        const std::optional<SpillPartitionId> &restoredPartitionId) {
        if (!canSpill()) {
            return;
        }

        const auto *config = spillConfig();
        uint8_t startPartitionBit = config->startPartitionBit;
        if (restoredPartitionId.has_value()) {
            startPartitionBit =
                    restoredPartitionId->partitionBitOffset() + config->numPartitionBits;
            // Disable spilling if exceeding the max spill level and the query might
            // run out of memory if the restored partition still can't fit in memory.
            if (config->exceedSpillLevelLimit(startPartitionBit)) {
                RECORD_METRIC_VALUE(kMetricMaxSpillLevelExceededCount);
                KLOG_EVERY_N_SEC(WARNING, 1)
          << "Exceeded spill level limit: " << config->maxSpillLevel
          << ", and disable spilling for memory pool: " << pool()->name();
                exceededMaxSpillLevelLimit_ = true;
                ++spillStats_.wlock()->spillMaxLevelExceededCount;
                return;
            }
        }
        exceededMaxSpillLevelLimit_ = false;
        tableSpillHashBits_ = HashBitRange(
            startPartitionBit, startPartitionBit + config->numPartitionBits);
    }

    void HashProbe::close() {
        Operator::close();

        // Free up major memory usage.
        joinBridge_.reset();
        inputSpiller_.reset();
        table_.reset();
        spillInputReader_.reset();
        spillOutputPartitionSet_.clear();
        spillOutputReader_.reset();
        clearBuffers();
    }

    void HashProbe::clearBuffers() {
        outputRowMapping_.reset();
        tempOutputRowMapping_.reset();
        outputTableRows_.reset();
        tempOutputTableRows_.reset();
        output_.reset();
        nonSpillInputIndicesBuffer_.reset();
        spillInputIndicesBuffers_.clear();
        if (filter_ == nullptr) {
            return;
        }
        filterResult_.clear();
        filterResult_.resize(1);
        filterTableInput_.reset();
        filterTableResult_.clear();
        filterTableResult_.resize(1);
        operatorCtx_->execCtx()->vectorPool()->clear();
        filter_->clearCache();
    }
} // namespace kumo::pollux::exec
