// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//
#include <pollux/exec/table_scan.h>
#include <pollux/common/testutil/test_value.h>
#include <pollux/common/time/timer.h>
#include <pollux/exec/task.h>
#include <pollux/exec/trace_util.h>
#include <pollux/expression/expr.h>

using kumo::pollux::common::testutil::TestValue;

namespace kumo::pollux::exec {
    TableScan::TableScan(
        int32_t operatorId,
        DriverCtx *driverCtx,
        const std::shared_ptr<const core::TableScanNode> &tableScanNode)
        : SourceOperator(
              driverCtx,
              tableScanNode->outputType(),
              operatorId,
              tableScanNode->id(),
              "TableScan"),
          tableHandle_(tableScanNode->tableHandle()),
          columnHandles_(tableScanNode->assignments()),
          driverCtx_(driverCtx),
          maxSplitPreloadPerDriver_(
              driverCtx_->queryConfig().maxSplitPreloadPerDriver()),
          maxReadBatchSize_(driverCtx_->queryConfig().maxOutputBatchRows()),
          connectorPool_(driverCtx_->task->addConnectorPoolLocked(
              planNodeId(),
              driverCtx_->pipelineId,
              driverCtx_->driverId,
              operatorType(),
              tableHandle_->connectorId())),
          connector_(connector::getConnector(tableHandle_->connectorId())),
          getOutputTimeLimitMs_(
              driverCtx_->queryConfig().tableScanGetOutputTimeLimitMs()),
          scaledController_(driverCtx_->task->getScaledScanControllerLocked(
              driverCtx_->splitGroupId,
              planNodeId())) {
        readBatchSize_ = driverCtx_->queryConfig().preferredOutputBatchRows();
    }

    melon::Dynamic TableScan::toJson() const {
        auto ret = SourceOperator::toJson();
        ret["status"] = curStatus_.load();
        return ret;
    }

    bool TableScan::shouldYield(StopReason taskStopReason, size_t startTimeMs)
    const {
        // Checks task-level yield signal, driver-level yield signal and table scan
        // output processing time limit.
        return taskStopReason == StopReason::kYield ||
               driverCtx_->driver->shouldYield() ||
               ((getOutputTimeLimitMs_ != 0) &&
                (getCurrentTimeMs() - startTimeMs) >= getOutputTimeLimitMs_);
    }

    bool TableScan::shouldStop(StopReason taskStopReason) const {
        return taskStopReason != StopReason::kNone &&
               taskStopReason != StopReason::kYield;
    }

    RowVectorPtr TableScan::getOutput() {
        auto exitCurStatusGuard = melon::makeGuard([this]() { curStatus_ = ""; });

        POLLUX_CHECK(!blockingFuture_.valid());
        blockingReason_ = BlockingReason::kNotBlocked;

        if (noMoreSplits_) {
            return nullptr;
        }

        // Check if we need to wait for scale up. We expect only wait once on startup.
        if (shouldWaitForScaleUp()) {
            POLLUX_CHECK(blockingFuture_.valid());
            POLLUX_CHECK_EQ(blockingReason_, BlockingReason::kWaitForScanScaleUp);
            return nullptr;
        }

        curStatus_ = "getOutput: enter";
        const auto startTimeMs = getCurrentTimeMs();
        for (;;) {
            // Check if our Task needs us to yield or we've been running for too long
            // w/o producing a result. In this case we return with the Yield blocking
            // reason and an already fulfilled future.
            curStatus_ = "getOutput: task->shouldStop";
            const StopReason taskStopReason = driverCtx_->task->shouldStop();
            if (shouldStop(taskStopReason) ||
                shouldYield(taskStopReason, startTimeMs)) {
                blockingReason_ = BlockingReason::kYield;
                blockingFuture_ = ContinueFuture{melon::Unit{}};
                // A point for test code injection.
                TestValue::adjust(
                    "kumo::pollux::exec::TableScan::getOutput::yield", this);
                return nullptr;
            }

            if (needNewSplit_) {
                // A point for test code injection.
                TestValue::adjust("kumo::pollux::exec::TableScan::getOutput", this);

                exec::Split split;
                curStatus_ = "getOutput: task->getSplitOrFuture";
                blockingReason_ = driverCtx_->task->getSplitOrFuture(
                    driverCtx_->splitGroupId,
                    planNodeId(),
                    split,
                    blockingFuture_,
                    maxPreloadedSplits_,
                    splitPreloader_);
                if (blockingReason_ != BlockingReason::kNotBlocked) {
                    return nullptr;
                }

                if (!split.hasConnectorSplit()) {
                    noMoreSplits_ = true;
                    dynamicFilters_.clear();
                    if (dataSource_) {
                        curStatus_ = "getOutput: noMoreSplits_=1, updating stats_";
                        const auto connectorStats = dataSource_->runtimeStats();
                        auto lockedStats = stats_.wlock();
                        for (const auto &[name, counter]: connectorStats) {
                            if (MELON_UNLIKELY(lockedStats->runtimeStats.count(name) == 0)) {
                                lockedStats->runtimeStats.emplace(
                                    name, RuntimeMetric(counter.unit));
                            } else {
                                POLLUX_CHECK_EQ(
                                    lockedStats->runtimeStats.at(name).unit, counter.unit);
                            }
                            lockedStats->runtimeStats.at(name).addValue(counter.value);
                        }
                    }
                    return nullptr;
                }

                if (MELON_UNLIKELY(splitTracer_ != nullptr)) {
                    splitTracer_->write(split);
                }
                const auto &connectorSplit = split.connectorSplit;
                currentSplitWeight_ = connectorSplit->splitWeight;
                needNewSplit_ = false;

                // A point for test code injection.
                TestValue::adjust(
                    "kumo::pollux::exec::TableScan::getOutput::gotSplit", this);

                POLLUX_CHECK_EQ(
                    connector_->connectorId(),
                    connectorSplit->connectorId,
                    "Got splits with different connector IDs");

                if (dataSource_ == nullptr) {
                    curStatus_ = "getOutput: creating dataSource_";
                    connectorQueryCtx_ = operatorCtx_->createConnectorQueryCtx(
                        connectorSplit->connectorId, planNodeId(), connectorPool_);
                    dataSource_ = connector_->createDataSource(
                        outputType_,
                        tableHandle_,
                        columnHandles_,
                        connectorQueryCtx_.get());
                    for (const auto &entry: dynamicFilters_) {
                        dataSource_->addDynamicFilter(entry.first, entry.second);
                    }
                }

                debugString_ = fmt::format(
                    "Split [{}] Task {}",
                    connectorSplit->toString(),
                    operatorCtx_->task()->taskId());

                ExceptionContextSetter exceptionContext(
                    {
                        [](PolluxException::Type /*exceptionType*/, auto *debugString) {
                            return *static_cast<std::string *>(debugString);
                        },
                        &debugString_
                    });

                if (connectorSplit->dataSource != nullptr) {
                    curStatus_ = "getOutput: preloaded split";
                    ++numPreloadedSplits_;
                    // The AsyncSource returns a unique_ptr to a shared_ptr. The unique_ptr
                    // will be nullptr if there was a cancellation.
                    numReadyPreloadedSplits_ += connectorSplit->dataSource->hasValue();
                    auto preparedDataSource = connectorSplit->dataSource->move();
                    stats_.wlock()->getOutputTiming.add(
                        connectorSplit->dataSource->prepareTiming());
                    if (!preparedDataSource) {
                        // There must be a cancellation.
                        POLLUX_CHECK(operatorCtx_->task()->isCancelled());
                        return nullptr;
                    }
                    dataSource_->setFromDataSource(std::move(preparedDataSource));
                } else {
                    curStatus_ = "getOutput: adding split";
                    uint64_t addSplitTimeUs{0}; {
                        MicrosecondTimer timer(&addSplitTimeUs);
                        dataSource_->addSplit(connectorSplit);
                    }
                    stats_.wlock()->addRuntimeStat(
                        "dataSourceAddSplitWallNanos",
                        RuntimeCounter(
                            addSplitTimeUs * 1'000, RuntimeCounter::Unit::kNanos));
                }
                curStatus_ = "getOutput: updating stats_.numSplits";
                ++stats_.wlock()->numSplits;

                curStatus_ = "getOutput: dataSource_->estimatedRowSize";
                const auto estimatedRowSize = dataSource_->estimatedRowSize();
                readBatchSize_ =
                        estimatedRowSize == connector::DataSource::kUnknownRowSize
                            ? outputBatchRows()
                            : outputBatchRows(estimatedRowSize);
            }

            // Check for  cancellation since scans that filter everything out will not
            // hit the check in Driver.
            curStatus_ = "getOutput: task->isCancelled";
            if (operatorCtx_->task()->isCancelled()) {
                return nullptr;
            }

            ExceptionContextSetter exceptionContext(
                {
                    [](PolluxException::Type /*exceptionType*/, auto *debugString) {
                        return *static_cast<std::string *>(debugString);
                    },
                    &debugString_
                });

            int32_t readBatchSize = readBatchSize_;
            if (maxFilteringRatio_ > 0) {
                readBatchSize = std::min(
                    maxReadBatchSize_,
                    static_cast<int32_t>(readBatchSize / maxFilteringRatio_));
            }
            curStatus_ = "getOutput: dataSource_->next";
            uint64_t ioTimeUs{0};
            std::optional<RowVectorPtr> dataOptional; {
                MicrosecondTimer timer(&ioTimeUs);
                dataOptional = dataSource_->next(readBatchSize, blockingFuture_);
            }

            curStatus_ = "getOutput: checkPreload";
            checkPreload(); {
                curStatus_ = "getOutput: updating stats_.dataSourceReadWallNanos";
                auto lockedStats = stats_.wlock();
                lockedStats->addRuntimeStat(
                    "dataSourceReadWallNanos",
                    RuntimeCounter(ioTimeUs * 1'000, RuntimeCounter::Unit::kNanos));

                if (!dataOptional.has_value()) {
                    blockingReason_ = BlockingReason::kWaitForConnector;
                    return nullptr;
                }

                curStatus_ = "getOutput: updating stats_.rawInput";
                lockedStats->rawInputPositions = dataSource_->getCompletedRows();
                lockedStats->rawInputBytes = dataSource_->getCompletedBytes();

                RowVectorPtr data = std::move(dataOptional).value();
                if (data != nullptr) {
                    if (data->size() > 0) {
                        lockedStats->addInputVector(data->estimate_flat_size(), data->size());
                        constexpr int kMaxSelectiveBatchSizeMultiplier = 4;
                        maxFilteringRatio_ = std::max(
                            {
                                maxFilteringRatio_,
                                1.0 * data->size() / readBatchSize,
                                1.0 / kMaxSelectiveBatchSizeMultiplier
                            });
                        if (ioTimeUs > 0) {
                            RECORD_HISTOGRAM_METRIC_VALUE(
                                pollux::kMetricTableScanBatchProcessTimeMs, ioTimeUs / 1'000);
                        }
                        return data;
                    }
                    continue;
                }
            }

            uint64_t currNumRawInputRows{0}; {
                curStatus_ = "getOutput: updating stats_.preloadedSplits";
                auto lockedStats = stats_.wlock();
                if (numPreloadedSplits_ > 0) {
                    lockedStats->addRuntimeStat(
                        "preloadedSplits", RuntimeCounter(numPreloadedSplits_));
                    numPreloadedSplits_ = 0;
                }
                if (numReadyPreloadedSplits_ > 0) {
                    lockedStats->addRuntimeStat(
                        "readyPreloadedSplits", RuntimeCounter(numReadyPreloadedSplits_));
                    numReadyPreloadedSplits_ = 0;
                }
                currNumRawInputRows = lockedStats->rawInputPositions;
            }
            POLLUX_CHECK_LE(rawInputRowsSinceLastSplit_, currNumRawInputRows);
            const bool emptySplit = currNumRawInputRows == rawInputRowsSinceLastSplit_;
            rawInputRowsSinceLastSplit_ = currNumRawInputRows;

            curStatus_ = "getOutput: task->splitFinished";
            driverCtx_->task->splitFinished(true, currentSplitWeight_);
            needNewSplit_ = true;

            // We only update scaled controller when we have finished a non-empty split.
            // Otherwise, it can lead to the wrong scale up decisions if the first few
            // splits are empty. Then we only report the memory usage for the file
            // footer read which is much smaller the actual memory usage when read from
            // a non-empty split. This can cause query OOM as we run too many scan
            // drivers with each use non-trivial amount of memory.
            if (!emptySplit) {
                tryScaleUp();
            }
        }
    }

    bool TableScan::shouldWaitForScaleUp() {
        if (scaledController_ == nullptr) {
            return false;
        }

        curStatus_ = "getOutput: shouldWaitForScaleUp";
        if (!scaledController_->shouldStop(
            operatorCtx_->driverCtx()->driverId, &blockingFuture_)) {
            POLLUX_CHECK(!blockingFuture_.valid());
            return false;
        }
        blockingReason_ = BlockingReason::kWaitForScanScaleUp;
        return true;
    }

    void TableScan::tryScaleUp() {
        if (scaledController_ == nullptr) {
            return;
        }

        scaledController_->updateAndTryScale(
            operatorCtx_->driverCtx()->driverId, pool()->peakBytes());
    }

    void TableScan::preload(
        const std::shared_ptr<connector::ConnectorSplit> &split) {
        // The AsyncSource returns a unique_ptr to the shared_ptr of the
        // DataSource. The callback may outlive the Task, hence it captures
        // a shared_ptr to it. This is required to keep memory pools live
        // for the duration. The callback checks for task cancellation to
        // avoid needless work.
        split->dataSource = std::make_unique<AsyncSource<connector::DataSource> >(
            [type = outputType_,
                table = tableHandle_,
                columns = columnHandles_,
                connector = connector_,
                ctx = operatorCtx_->createConnectorQueryCtx(
                    split->connectorId, planNodeId(), connectorPool_),
                task = operatorCtx_->task(),
                dynamicFilters = dynamicFilters_,
                split]() -> std::unique_ptr<connector::DataSource> {
                if (task->isCancelled()) {
                    return nullptr;
                }
                auto debugString =
                        fmt::format("Split {} Task {}", split->toString(), task->taskId());
                ExceptionContextSetter exceptionContext(
                    {
                        [](PolluxException::Type /*exceptionType*/, auto *debugString) {
                            return *static_cast<std::string *>(debugString);
                        },
                        &debugString
                    });

                auto dataSource =
                        connector->createDataSource(type, table, columns, ctx.get());
                if (task->isCancelled()) {
                    return nullptr;
                }
                for (const auto &entry: dynamicFilters) {
                    dataSource->addDynamicFilter(entry.first, entry.second);
                }
                dataSource->addSplit(split);
                return dataSource;
            });
    }

    void TableScan::checkPreload() {
        auto *executor = connector_->executor();
        if (maxSplitPreloadPerDriver_ == 0 || !executor ||
            !connector_->supportsSplitPreload()) {
            return;
        }
        if (dataSource_->allPrefetchIssued()) {
            maxPreloadedSplits_ = driverCtx_->task->numDrivers(driverCtx_->driver) *
                                  maxSplitPreloadPerDriver_;
            if (!splitPreloader_) {
                splitPreloader_ =
                        [executor,
                            this](const std::shared_ptr<connector::ConnectorSplit> &split) {
                            preload(split);

                            executor->add([connectorSplit = split]() mutable {
                                connectorSplit->dataSource->prepare();
                                connectorSplit.reset();
                            });
                        };
            }
        }
    }

    bool TableScan::isFinished() {
        return noMoreSplits_;
    }

    void TableScan::addDynamicFilter(
        const core::PlanNodeId &producer,
        column_index_t outputChannel,
        const std::shared_ptr<common::Filter> &filter) {
        if (dataSource_) {
            dataSource_->addDynamicFilter(outputChannel, filter);
        }
        auto &currentFilter = dynamicFilters_[outputChannel];
        if (currentFilter) {
            currentFilter = currentFilter->mergeWith(filter.get());
        } else {
            currentFilter = filter;
        }
        stats_.wlock()->dynamicFilterStats.producerNodeIds.emplace(producer);
    }

    void TableScan::close() {
        Operator::close();

        if (dataSource_ != nullptr) {
            dataSource_->cancel();
        }

        if (scaledController_ == nullptr) {
            return;
        }

        // Report the scaled controller stats by the first finished scan operator at
        // which point all the splits have been dispatched.
        if (!scaledController_->close()) {
            return;
        }

        const auto scaledStats = scaledController_->stats();
        auto lockedStats = stats_.wlock();
        lockedStats->addRuntimeStat(
            TableScan::kNumRunningScaleThreads,
            RuntimeCounter(scaledStats.numRunningDrivers));
    }
} // namespace kumo::pollux::exec
