// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

// (Doc section: Execution Plan Documentation Example)

#include <nebula/core/array.h>
#include <nebula/core/builder.h>

#include <nebula/acero/exec_plan.h>
#include <nebula/compute/api.h>
#include <nebula/compute/api_vector.h>
#include <nebula/compute/cast.h>

#include <nebula/csv/api.h>

#include <nebula/dataset/dataset.h>
#include <nebula/dataset/file_base.h>
#include <nebula/dataset/file_ipc.h>
#include <nebula/dataset/plan.h>
#include <nebula/dataset/scanner.h>
#include <turbo/files/filesystem.h>
#include <nebula/io/interfaces.h>
#include <nebula/io/memory.h>
#include <nebula/core/table.h>

#include <nebula/ipc/api.h>

#include <nebula/future/future.h>
#include <nebula/util/range.h>
#include <nebula/future/thread_pool.h>
#include <nebula/util/vector.h>

#include <iostream>
#include <memory>
#include <utility>

// Demonstrate various operators in Nebula Streaming Execution Engine

namespace cp = ::nebula::compute;
namespace ac = ::nebula::acero;

constexpr char kSep[] = "******";

void PrintBlock(const std::string &msg) {
    std::cout << "\n\t" << kSep << " " << msg << " " << kSep << "\n" << std::endl;
}

template<typename TYPE,
        typename = typename std::enable_if<nebula::is_number_type<TYPE>::value |
                                           nebula::is_boolean_type<TYPE>::value |
                                           nebula::is_temporal_type<TYPE>::value>::type>
turbo::Result<std::shared_ptr<nebula::Array>> get_array_data_sample(
        const std::vector<typename TYPE::c_type> &values) {
    using ArrowBuilderType = typename nebula::TypeTraits<TYPE>::BuilderType;
    ArrowBuilderType builder;
    TURBO_RETURN_NOT_OK(builder.Reserve(values.size()));
    TURBO_RETURN_NOT_OK(builder.append_values(values));
    return builder.finish();
}

template<class TYPE>
turbo::Result<std::shared_ptr<nebula::Array>> GetBinaryArrayDataSample(
        const std::vector<std::string> &values) {
    using ArrowBuilderType = typename nebula::TypeTraits<TYPE>::BuilderType;
    ArrowBuilderType builder;
    TURBO_RETURN_NOT_OK(builder.Reserve(values.size()));
    TURBO_RETURN_NOT_OK(builder.append_values(values));
    return builder.finish();
}

turbo::Result<std::shared_ptr<nebula::RecordBatch>> GetSampleRecordBatch(
        const nebula::ArrayVector array_vector, const nebula::FieldVector &field_vector) {
    std::shared_ptr<nebula::RecordBatch> record_batch;
    TURBO_MOVE_OR_RAISE(auto struct_result,
                        nebula::StructArray::create(array_vector, field_vector));
    return record_batch->FromStructArray(struct_result);
}

/// \brief create a sample table
/// The table's contents will be:
/// a,b
/// 1,null
/// 2,true
/// null,true
/// 3,false
/// null,true
/// 4,false
/// 5,null
/// 6,false
/// 7,false
/// 8,true
/// \return The created table

turbo::Result<std::shared_ptr<nebula::Table>> GetTable() {
    auto null_long = std::numeric_limits<int64_t>::quiet_NaN();
    TURBO_MOVE_OR_RAISE(auto int64_array,
                        get_array_data_sample<nebula::Int64Type>(
                                {1, 2, null_long, 3, null_long, 4, 5, 6, 7, 8}));

    nebula::BooleanBuilder boolean_builder;
    std::shared_ptr<nebula::BooleanArray> bool_array;

    std::vector<uint8_t> bool_values = {false, true, true, false, true,
                                        false, false, false, false, true};
    std::vector<bool> is_valid = {false, true, true, true, true,
                                  true, false, true, true, true};

    TURBO_RETURN_NOT_OK(boolean_builder.Reserve(10));

    TURBO_RETURN_NOT_OK(boolean_builder.append_values(bool_values, is_valid));

    TURBO_RETURN_NOT_OK(boolean_builder.finish(&bool_array));

    auto record_batch =
            nebula::RecordBatch::create(nebula::schema({nebula::field("a", nebula::int64()),
                                                        nebula::field("b", nebula::boolean())}),
                                        10, {int64_array, bool_array});
    TURBO_MOVE_OR_RAISE(auto table, nebula::Table::from_record_batches({record_batch}));
    return table;
}

/// \brief create a sample dataset
/// \return An in-memory dataset based on GetTable()
turbo::Result<std::shared_ptr<nebula::dataset::Dataset>> GetDataset() {
    TURBO_MOVE_OR_RAISE(auto table, GetTable());
    auto ds = std::make_shared<nebula::dataset::InMemoryDataset>(table);
    return ds;
}

turbo::Result<cp::ExecBatch> GetExecBatchFromVectors(
        const nebula::FieldVector &field_vector, const nebula::ArrayVector &array_vector) {
    std::shared_ptr<nebula::RecordBatch> record_batch;
    TURBO_MOVE_OR_RAISE(auto res_batch, GetSampleRecordBatch(array_vector, field_vector));
    cp::ExecBatch batch{*res_batch};
    return batch;
}

// (Doc section: BatchesWithSchema Definition)
struct BatchesWithSchema {
    std::vector<cp::ExecBatch> batches;
    std::shared_ptr<nebula::Schema> schema;

    // This method uses internal nebula utilities to
    // convert a vector of record batches to an AsyncGenerator of optional batches
    nebula::AsyncGenerator<std::optional<cp::ExecBatch>> gen() const {
        auto opt_batches = ::nebula::internal::MapVector(
                [](cp::ExecBatch batch) { return std::make_optional(std::move(batch)); },
                batches);
        nebula::AsyncGenerator<std::optional<cp::ExecBatch>> gen;
        gen = nebula::create_vector_generator(std::move(opt_batches));
        return gen;
    }
};
// (Doc section: BatchesWithSchema Definition)

// (Doc section: MakeBasicBatches Definition)
turbo::Result<BatchesWithSchema> MakeBasicBatches() {
    BatchesWithSchema out;
    auto field_vector = {nebula::field("a", nebula::int32()),
                         nebula::field("b", nebula::boolean())};
    TURBO_MOVE_OR_RAISE(auto b1_int, get_array_data_sample<nebula::Int32Type>({0, 4}));
    TURBO_MOVE_OR_RAISE(auto b2_int, get_array_data_sample<nebula::Int32Type>({5, 6, 7}));
    TURBO_MOVE_OR_RAISE(auto b3_int, get_array_data_sample<nebula::Int32Type>({8, 9, 10}));

    TURBO_MOVE_OR_RAISE(auto b1_bool,
                        get_array_data_sample<nebula::BooleanType>({false, true}));
    TURBO_MOVE_OR_RAISE(auto b2_bool,
                        get_array_data_sample<nebula::BooleanType>({true, false, true}));
    TURBO_MOVE_OR_RAISE(auto b3_bool,
                        get_array_data_sample<nebula::BooleanType>({false, true, false}));

    TURBO_MOVE_OR_RAISE(auto b1,
                        GetExecBatchFromVectors(field_vector, {b1_int, b1_bool}));
    TURBO_MOVE_OR_RAISE(auto b2,
                        GetExecBatchFromVectors(field_vector, {b2_int, b2_bool}));
    TURBO_MOVE_OR_RAISE(auto b3,
                        GetExecBatchFromVectors(field_vector, {b3_int, b3_bool}));

    out.batches = {b1, b2, b3};
    out.schema = nebula::schema(field_vector);
    return out;
}
// (Doc section: MakeBasicBatches Definition)

turbo::Result<BatchesWithSchema> MakeSortTestBasicBatches() {
    BatchesWithSchema out;
    auto field = nebula::field("a", nebula::int32());
    TURBO_MOVE_OR_RAISE(auto b1_int, get_array_data_sample<nebula::Int32Type>({1, 3, 0, 2}));
    TURBO_MOVE_OR_RAISE(auto b2_int,
                        get_array_data_sample<nebula::Int32Type>({121, 101, 120, 12}));
    TURBO_MOVE_OR_RAISE(auto b3_int,
                        get_array_data_sample<nebula::Int32Type>({10, 110, 210, 121}));
    TURBO_MOVE_OR_RAISE(auto b4_int,
                        get_array_data_sample<nebula::Int32Type>({51, 101, 2, 34}));
    TURBO_MOVE_OR_RAISE(auto b5_int,
                        get_array_data_sample<nebula::Int32Type>({11, 31, 1, 12}));
    TURBO_MOVE_OR_RAISE(auto b6_int,
                        get_array_data_sample<nebula::Int32Type>({12, 101, 120, 12}));
    TURBO_MOVE_OR_RAISE(auto b7_int,
                        get_array_data_sample<nebula::Int32Type>({0, 110, 210, 11}));
    TURBO_MOVE_OR_RAISE(auto b8_int,
                        get_array_data_sample<nebula::Int32Type>({51, 10, 2, 3}));

    TURBO_MOVE_OR_RAISE(auto b1, GetExecBatchFromVectors({field}, {b1_int}));
    TURBO_MOVE_OR_RAISE(auto b2, GetExecBatchFromVectors({field}, {b2_int}));
    TURBO_MOVE_OR_RAISE(auto b3,
                        GetExecBatchFromVectors({field, field}, {b3_int, b8_int}));
    TURBO_MOVE_OR_RAISE(auto b4,
                        GetExecBatchFromVectors({field, field, field, field},
                                                {b4_int, b5_int, b6_int, b7_int}));
    out.batches = {b1, b2, b3, b4};
    out.schema = nebula::schema({field});
    return out;
}

turbo::Result<BatchesWithSchema> MakeGroupableBatches(int multiplicity = 1) {
    BatchesWithSchema out;
    auto fields = {nebula::field("i32", nebula::int32()), nebula::field("str", nebula::utf8())};
    TURBO_MOVE_OR_RAISE(auto b1_int, get_array_data_sample<nebula::Int32Type>({12, 7, 3}));
    TURBO_MOVE_OR_RAISE(auto b2_int, get_array_data_sample<nebula::Int32Type>({-2, -1, 3}));
    TURBO_MOVE_OR_RAISE(auto b3_int, get_array_data_sample<nebula::Int32Type>({5, 3, -8}));
    TURBO_MOVE_OR_RAISE(auto b1_str, GetBinaryArrayDataSample<nebula::StringType>(
            {"alpha", "beta", "alpha"}));
    TURBO_MOVE_OR_RAISE(auto b2_str, GetBinaryArrayDataSample<nebula::StringType>(
            {"alpha", "gamma", "alpha"}));
    TURBO_MOVE_OR_RAISE(auto b3_str, GetBinaryArrayDataSample<nebula::StringType>(
            {"gamma", "beta", "alpha"}));
    TURBO_MOVE_OR_RAISE(auto b1, GetExecBatchFromVectors(fields, {b1_int, b1_str}));
    TURBO_MOVE_OR_RAISE(auto b2, GetExecBatchFromVectors(fields, {b2_int, b2_str}));
    TURBO_MOVE_OR_RAISE(auto b3, GetExecBatchFromVectors(fields, {b3_int, b3_str}));
    out.batches = {b1, b2, b3};

    size_t batch_count = out.batches.size();
    for (int repeat = 1; repeat < multiplicity; ++repeat) {
        for (size_t i = 0; i < batch_count; ++i) {
            out.batches.push_back(out.batches[i]);
        }
    }

    out.schema = nebula::schema(fields);
    return out;
}

turbo::Status ExecutePlanAndCollectAsTable(ac::Declaration plan) {
    // collect sink_reader into a Table
    std::shared_ptr<nebula::Table> response_table;
    TURBO_MOVE_OR_RAISE(response_table, ac::declaration_to_table(std::move(plan)));

    std::cout << "Results : " << response_table->to_string() << std::endl;

    return turbo::OkStatus();
}

// (Doc section: Scan Example)

/// \brief An example demonstrating a scan and sink node
///
/// Scan-Table
/// This example shows how scan operation can be applied on a dataset.
/// There are operations that can be applied on the scan (project, filter)
/// and the input data can be processed. The output is obtained as a table
turbo::Status ScanSinkExample() {
    TURBO_MOVE_OR_RAISE(std::shared_ptr<nebula::dataset::Dataset> dataset, GetDataset());

    auto options = std::make_shared<nebula::dataset::ScanOptions>();
    options->projection = cp::project({}, {});  // create empty projection

    // construct the scan node
    auto scan_node_options = nebula::dataset::ScanNodeOptions{dataset, options};

    ac::Declaration scan{"scan", std::move(scan_node_options)};

    return ExecutePlanAndCollectAsTable(std::move(scan));
}
// (Doc section: Scan Example)

// (Doc section: Source Example)

/// \brief An example demonstrating a source and sink node
///
/// Source-Table Example
/// This example shows how a custom source can be used
/// in an execution plan. This includes source node using pregenerated
/// data and collecting it into a table.
///
/// This sort of custom source is often not needed.  In most cases you can
/// use a scan (for a dataset source) or a source like table_source, array_vector_source,
/// exec_batch_source, or record_batch_source (for in-memory data)
turbo::Status SourceSinkExample() {
    TURBO_MOVE_OR_RAISE(auto basic_data, MakeBasicBatches());

    auto source_node_options = ac::SourceNodeOptions{basic_data.schema, basic_data.gen()};

    ac::Declaration source{"source", std::move(source_node_options)};

    return ExecutePlanAndCollectAsTable(std::move(source));
}
// (Doc section: Source Example)

// (Doc section: Table Source Example)

/// \brief An example showing a table source node
///
/// TableSource-Table Example
/// This example shows how a table_source can be used
/// in an execution plan. This includes a table source node
/// receiving data from a table.  This plan simply collects the
/// data back into a table but nodes could be added that modify
/// or transform the data as well (as is shown in later examples)
turbo::Status TableSourceSinkExample() {
    TURBO_MOVE_OR_RAISE(auto table, GetTable());

    nebula::AsyncGenerator<std::optional<cp::ExecBatch>> sink_gen;
    int max_batch_size = 2;
    auto table_source_options = ac::TableSourceNodeOptions{table, max_batch_size};

    ac::Declaration source{"table_source", std::move(table_source_options)};

    return ExecutePlanAndCollectAsTable(std::move(source));
}
// (Doc section: Table Source Example)

// (Doc section: Filter Example)

/// \brief An example showing a filter node
///
/// Source-Filter-Table
/// This example shows how a filter can be used in an execution plan,
/// to filter data from a source. The output from the execution plan
/// is collected into a table.
turbo::Status ScanFilterSinkExample() {
    TURBO_MOVE_OR_RAISE(std::shared_ptr<nebula::dataset::Dataset> dataset, GetDataset());

    auto options = std::make_shared<nebula::dataset::ScanOptions>();
    // specify the filter.  This filter removes all rows where the
    // value of the "a" column is greater than 3.
    cp::Expression filter_expr = cp::greater(cp::field_ref("a"), cp::literal(3));
    // set filter for scanner : on-disk / push-down filtering.
    // This step can be skipped if you are not reading from disk.
    options->filter = filter_expr;
    // empty projection
    options->projection = cp::project({}, {});

    // construct the scan node
    std::cout << "Initialized Scanning Options" << std::endl;

    auto scan_node_options = nebula::dataset::ScanNodeOptions{dataset, options};
    std::cout << "Scan node options created" << std::endl;

    ac::Declaration scan{"scan", std::move(scan_node_options)};

    // pipe the scan node into the filter node
    // Need to set the filter in scan node options and filter node options.
    // At scan node it is used for on-disk / push-down filtering.
    // At filter node it is used for in-memory filtering.
    ac::Declaration filter{
            "filter", {std::move(scan)}, ac::FilterNodeOptions(std::move(filter_expr))};

    return ExecutePlanAndCollectAsTable(std::move(filter));
}

// (Doc section: Filter Example)

// (Doc section: Project Example)

/// \brief An example showing a project node
///
/// Scan-Project-Table
/// This example shows how a Scan operation can be used to load the data
/// into the execution plan, how a project operation can be applied on the
/// data stream and how the output is collected into a table
turbo::Status ScanProjectSinkExample() {
    TURBO_MOVE_OR_RAISE(std::shared_ptr<nebula::dataset::Dataset> dataset, GetDataset());

    auto options = std::make_shared<nebula::dataset::ScanOptions>();
    // projection
    cp::Expression a_times_2 = cp::call("multiply", {cp::field_ref("a"), cp::literal(2)});
    options->projection = cp::project({}, {});

    auto scan_node_options = nebula::dataset::ScanNodeOptions{dataset, options};

    ac::Declaration scan{"scan", std::move(scan_node_options)};
    ac::Declaration project{
            "project", {std::move(scan)}, ac::ProjectNodeOptions({a_times_2})};

    return ExecutePlanAndCollectAsTable(std::move(project));
}

// (Doc section: Project Example)

// This is a variation of ScanProjectSinkExample introducing how to use the
// Declaration::sequence function
turbo::Status ScanProjectSequenceSinkExample() {
    TURBO_MOVE_OR_RAISE(std::shared_ptr<nebula::dataset::Dataset> dataset, GetDataset());

    auto options = std::make_shared<nebula::dataset::ScanOptions>();
    // projection
    cp::Expression a_times_2 = cp::call("multiply", {cp::field_ref("a"), cp::literal(2)});
    options->projection = cp::project({}, {});

    auto scan_node_options = nebula::dataset::ScanNodeOptions{dataset, options};

    // (Doc section: Project sequence Example)
    // Inputs do not have to be passed to the project node when using sequence
    ac::Declaration plan =
            ac::Declaration::sequence({{"scan",    std::move(scan_node_options)},
                                       {"project", ac::ProjectNodeOptions({a_times_2})}});
    // (Doc section: Project sequence Example)

    return ExecutePlanAndCollectAsTable(std::move(plan));
}

// (Doc section: Scalar Aggregate Example)

/// \brief An example showing an aggregation node to aggregate an entire table
///
/// Source-Aggregation-Table
/// This example shows how an aggregation operation can be applied on a
/// execution plan resulting in a scalar output. The source node loads the
/// data and the aggregation (counting unique types in column 'a')
/// is applied on this data. The output is collected into a table (that will
/// have exactly one row)
turbo::Status SourceScalarAggregateSinkExample() {
    TURBO_MOVE_OR_RAISE(auto basic_data, MakeBasicBatches());

    auto source_node_options = ac::SourceNodeOptions{basic_data.schema, basic_data.gen()};

    ac::Declaration source{"source", std::move(source_node_options)};
    auto aggregate_options =
            ac::AggregateNodeOptions{/*aggregates=*/{{"sum", nullptr, "a", "sum(a)"}}};
    ac::Declaration aggregate{
            "aggregate", {std::move(source)}, std::move(aggregate_options)};

    return ExecutePlanAndCollectAsTable(std::move(aggregate));
}
// (Doc section: Scalar Aggregate Example)

// (Doc section: Group Aggregate Example)

/// \brief An example showing an aggregation node to perform a group-by operation
///
/// Source-Aggregation-Table
/// This example shows how an aggregation operation can be applied on a
/// execution plan resulting in grouped output. The source node loads the
/// data and the aggregation (counting unique types in column 'a') is
/// applied on this data. The output is collected into a table that will contain
/// one row for each unique combination of group keys.
turbo::Status SourceGroupAggregateSinkExample() {
    TURBO_MOVE_OR_RAISE(auto basic_data, MakeBasicBatches());

    nebula::AsyncGenerator<std::optional<cp::ExecBatch>> sink_gen;

    auto source_node_options = ac::SourceNodeOptions{basic_data.schema, basic_data.gen()};

    ac::Declaration source{"source", std::move(source_node_options)};
    auto options = std::make_shared<cp::CountOptions>(cp::CountOptions::ONLY_VALID);
    auto aggregate_options =
            ac::AggregateNodeOptions{/*aggregates=*/{{"hash_count", options, "a", "count(a)"}},
                    /*keys=*/
                                                    {"b"}};
    ac::Declaration aggregate{
            "aggregate", {std::move(source)}, std::move(aggregate_options)};

    return ExecutePlanAndCollectAsTable(std::move(aggregate));
}
// (Doc section: Group Aggregate Example)

// (Doc section: ConsumingSink Example)

/// \brief An example showing a consuming sink node
///
/// Source-Consuming-Sink
/// This example shows how the data can be consumed within the execution plan
/// by using a ConsumingSink node. There is no data output from this execution plan.
turbo::Status SourceConsumingSinkExample() {
    TURBO_MOVE_OR_RAISE(auto basic_data, MakeBasicBatches());

    auto source_node_options = ac::SourceNodeOptions{basic_data.schema, basic_data.gen()};

    ac::Declaration source{"source", std::move(source_node_options)};

    std::atomic<uint32_t> batches_seen{0};
    nebula::Future<> f = nebula::Future<>::create();
    struct CustomSinkNodeConsumer : public ac::SinkNodeConsumer {
        CustomSinkNodeConsumer(std::atomic<uint32_t> *batches_seen, nebula::Future<> f)
                : batches_seen(batches_seen), f(std::move(f)) {}

        turbo::Status init(const std::shared_ptr<nebula::Schema> &schema,
                           ac::BackpressureControl *backpressure_control,
                           ac::ExecPlan *plan) override {
            // This will be called as the plan is started (before the first call to consume)
            // and provides the schema of the data coming into the node, controls for pausing /
            // resuming input, and a pointer to the plan itself which can be used to access
            // other utilities such as the thread indexer or async task scheduler.
            return turbo::OkStatus();
        }

        turbo::Status consume(cp::ExecBatch batch) override {
            (*batches_seen)++;
            return turbo::OkStatus();
        }

        nebula::Future<> finish() override {
            // Here you can perform whatever (possibly async) cleanup is needed, e.g. closing
            // output file handles and flushing remaining work
            return nebula::Future<>::make_finished();
        }

        std::atomic<uint32_t> *batches_seen;
        nebula::Future<> f;
    };
    std::shared_ptr<CustomSinkNodeConsumer> consumer =
            std::make_shared<CustomSinkNodeConsumer>(&batches_seen, f);

    ac::Declaration consuming_sink{"consuming_sink",
                                   {std::move(source)},
                                   ac::ConsumingSinkNodeOptions(std::move(consumer))};

    // Since we are consuming the data within the plan there is no output and we simply
    // run the plan to completion instead of collecting into a table.
    TURBO_RETURN_NOT_OK(ac::declaration_to_status(std::move(consuming_sink)));

    std::cout << "The consuming sink node saw " << batches_seen.load() << " batches"
              << std::endl;
    return turbo::OkStatus();
}
// (Doc section: ConsumingSink Example)

// (Doc section: OrderBySink Example)

turbo::Status ExecutePlanAndCollectAsTableWithCustomSink(
        std::shared_ptr<ac::ExecPlan> plan, std::shared_ptr<nebula::Schema> schema,
        nebula::AsyncGenerator<std::optional<cp::ExecBatch>> sink_gen) {
    // translate sink_gen (async) to sink_reader (sync)
    std::shared_ptr<nebula::RecordBatchReader> sink_reader =
            ac::make_generator_reader(schema, std::move(sink_gen), nebula::default_memory_pool());

    // validate the ExecPlan
    TURBO_RETURN_NOT_OK(plan->validate());
    std::cout << "ExecPlan created : " << plan->to_string() << std::endl;
    // start the ExecPlan
    plan->start_producing();

    // collect sink_reader into a Table
    std::shared_ptr<nebula::Table> response_table;

    TURBO_MOVE_OR_RAISE(response_table,
                        nebula::Table::from_record_batch_reader(sink_reader.get()));

    std::cout << "Results : " << response_table->to_string() << std::endl;

    // stop producing
    plan->stop_producing();
    // plan mark finished
    auto future = plan->finished();
    return future.status();
}

/// \brief An example showing an order-by node
///
/// Source-OrderBy-Sink
/// In this example, the data enters through the source node
/// and the data is ordered in the sink node. The order can be
/// ASCENDING or DESCENDING and it is configurable. The output
/// is obtained as a table from the sink node.
turbo::Status SourceOrderBySinkExample() {
    TURBO_MOVE_OR_RAISE(std::shared_ptr<ac::ExecPlan> plan,
                        ac::ExecPlan::create(*cp::threaded_exec_context()));

    TURBO_MOVE_OR_RAISE(auto basic_data, MakeSortTestBasicBatches());

    nebula::AsyncGenerator<std::optional<cp::ExecBatch>> sink_gen;

    auto source_node_options = ac::SourceNodeOptions{basic_data.schema, basic_data.gen()};
    TURBO_MOVE_OR_RAISE(ac::ExecNode *source,
                        ac::make_exec_node("source", plan.get(), {}, source_node_options));

    TURBO_RETURN_NOT_OK(ac::make_exec_node(
            "order_by_sink", plan.get(), {source},
            ac::OrderBySinkNodeOptions{
                    cp::SortOptions{{cp::SortKey{"a", cp::SortOrder::Descending}}}, &sink_gen}));

    return ExecutePlanAndCollectAsTableWithCustomSink(plan, basic_data.schema, sink_gen);
}

// (Doc section: OrderBySink Example)

// (Doc section: HashJoin Example)

/// \brief An example showing a hash join node
///
/// Source-HashJoin-Table
/// This example shows how source node gets the data and how a self-join
/// is applied on the data. The join options are configurable. The output
/// is collected into a table.
turbo::Status SourceHashJoinSinkExample() {
    TURBO_MOVE_OR_RAISE(auto input, MakeGroupableBatches());

    ac::Declaration left{"source", ac::SourceNodeOptions{input.schema, input.gen()}};
    ac::Declaration right{"source", ac::SourceNodeOptions{input.schema, input.gen()}};

    ac::HashJoinNodeOptions join_opts{
            ac::JoinType::INNER,
            /*left_keys=*/{"str"},
            /*right_keys=*/{"str"}, cp::literal(true), "l_", "r_"};

    ac::Declaration hashjoin{
            "hashjoin", {std::move(left), std::move(right)}, std::move(join_opts)};

    return ExecutePlanAndCollectAsTable(std::move(hashjoin));
}

// (Doc section: HashJoin Example)

// (Doc section: KSelect Example)

/// \brief An example showing a select-k node
///
/// Source-KSelect
/// This example shows how K number of elements can be selected
/// either from the top or bottom. The output node is a modified
/// sink node where output can be obtained as a table.
turbo::Status SourceKSelectExample() {
    TURBO_MOVE_OR_RAISE(auto input, MakeGroupableBatches());
    TURBO_MOVE_OR_RAISE(std::shared_ptr<ac::ExecPlan> plan,
                        ac::ExecPlan::create(*cp::threaded_exec_context()));
    nebula::AsyncGenerator<std::optional<cp::ExecBatch>> sink_gen;

    TURBO_MOVE_OR_RAISE(
            ac::ExecNode *source,
            ac::make_exec_node("source", plan.get(), {},
                               ac::SourceNodeOptions{input.schema, input.gen()}));

    cp::SelectKOptions options = cp::SelectKOptions::TopKDefault(/*k=*/2, {"i32"});

    TURBO_RETURN_NOT_OK(ac::make_exec_node("select_k_sink", plan.get(), {source},
                                           ac::SelectKSinkNodeOptions{options, &sink_gen}));

    auto schema = nebula::schema(
            {nebula::field("i32", nebula::int32()), nebula::field("str", nebula::utf8())});

    return ExecutePlanAndCollectAsTableWithCustomSink(plan, schema, sink_gen);
}

// (Doc section: KSelect Example)

// (Doc section: write Example)

/// \brief An example showing a write node
/// \param file_path The destination to write to
///
/// Scan-Filter-write
/// This example shows how scan node can be used to load the data
/// and after processing how it can be written to disk.
turbo::Status ScanFilterWriteExample(const std::string &file_path) {
    TURBO_MOVE_OR_RAISE(std::shared_ptr<nebula::dataset::Dataset> dataset, GetDataset());

    auto options = std::make_shared<nebula::dataset::ScanOptions>();
    // empty projection
    options->projection = cp::project({}, {});

    auto scan_node_options = nebula::dataset::ScanNodeOptions{dataset, options};

    ac::Declaration scan{"scan", std::move(scan_node_options)};

    nebula::AsyncGenerator<std::optional<cp::ExecBatch>> sink_gen;

    std::string root_path = "";
    std::string uri = "file://" + file_path;
    TURBO_MOVE_OR_RAISE(std::shared_ptr<nebula::fs::FileSystem> filesystem,
                        nebula::fs::FileSystemFromUri(uri, &root_path));

    auto base_path = root_path + "/ipc_dataset";
    // Uncomment the following line, if run repeatedly
    // TURBO_RETURN_NOT_OK(filesystem->DeleteDirContents(base_path));
    TURBO_RETURN_NOT_OK(filesystem->CreateDir(base_path));

    // The partition schema determines which fields are part of the partitioning.
    auto partition_schema = nebula::schema({nebula::field("a", nebula::int32())});
    // We'll use Hive-style partitioning,
    // which creates directories with "key=value" pairs.

    auto partitioning =
            std::make_shared<nebula::dataset::HivePartitioning>(partition_schema);
    // We'll write Parquet files.
    auto format = std::make_shared<nebula::dataset::IpcFileFormat>();

    nebula::dataset::FileSystemDatasetWriteOptions write_options;
    write_options.file_write_options = format->default_write_options();
    write_options.filesystem = filesystem;
    write_options.base_dir = base_path;
    write_options.partitioning = partitioning;
    write_options.basename_template = "part{i}.ipc";

    nebula::dataset::WriteNodeOptions write_node_options{write_options};

    ac::Declaration write{"write", {std::move(scan)}, std::move(write_node_options)};

    // Since the write node has no output we simply run the plan to completion and the
    // data should be written
    TURBO_RETURN_NOT_OK(ac::declaration_to_status(std::move(write)));

    std::cout << "Dataset written to " << base_path << std::endl;
    return turbo::OkStatus();
}

// (Doc section: write Example)

// (Doc section: Union Example)

/// \brief An example showing a union node
///
/// Source-Union-Table
/// This example shows how a union operation can be applied on two
/// data sources. The output is collected into a table.
turbo::Status SourceUnionSinkExample() {
    TURBO_MOVE_OR_RAISE(auto basic_data, MakeBasicBatches());

    ac::Declaration lhs{"source",
                        ac::SourceNodeOptions{basic_data.schema, basic_data.gen()}};
    lhs.label = "lhs";
    ac::Declaration rhs{"source",
                        ac::SourceNodeOptions{basic_data.schema, basic_data.gen()}};
    rhs.label = "rhs";
    ac::Declaration union_plan{
            "union", {std::move(lhs), std::move(rhs)}, ac::ExecNodeOptions{}};

    return ExecutePlanAndCollectAsTable(std::move(union_plan));
}

// (Doc section: Union Example)

// (Doc section: Table Sink Example)

/// \brief An example showing a table sink node
///
/// TableSink Example
/// This example shows how a table_sink can be used
/// in an execution plan. This includes a source node
/// receiving data as batches and the table sink node
/// which emits the output as a table.
turbo::Status TableSinkExample() {
    TURBO_MOVE_OR_RAISE(std::shared_ptr<ac::ExecPlan> plan,
                        ac::ExecPlan::create(*cp::threaded_exec_context()));

    TURBO_MOVE_OR_RAISE(auto basic_data, MakeBasicBatches());

    auto source_node_options = ac::SourceNodeOptions{basic_data.schema, basic_data.gen()};

    TURBO_MOVE_OR_RAISE(ac::ExecNode *source,
                        ac::make_exec_node("source", plan.get(), {}, source_node_options));

    std::shared_ptr<nebula::Table> output_table;
    auto table_sink_options = ac::TableSinkNodeOptions{&output_table};

    TURBO_RETURN_NOT_OK(
            ac::make_exec_node("table_sink", plan.get(), {source}, table_sink_options));
    // validate the ExecPlan
    TURBO_RETURN_NOT_OK(plan->validate());
    std::cout << "ExecPlan created : " << plan->to_string() << std::endl;
    // start the ExecPlan
    plan->start_producing();

    // Wait for the plan to finish
    auto finished = plan->finished();
    TURBO_RETURN_NOT_OK(finished.status());
    std::cout << "Results : " << output_table->to_string() << std::endl;
    return turbo::OkStatus();
}

// (Doc section: Table Sink Example)

// (Doc section: RecordBatchReaderSource Example)

/// \brief An example showing the usage of a RecordBatchReader as the data source.
///
/// RecordBatchReaderSourceSink Example
/// This example shows how a record_batch_reader_source can be used
/// in an execution plan. This includes the source node
/// receiving data from a TableRecordBatchReader.

turbo::Status RecordBatchReaderSourceSinkExample() {
    TURBO_MOVE_OR_RAISE(auto table, GetTable());
    std::shared_ptr<nebula::RecordBatchReader> reader =
            std::make_shared<nebula::TableBatchReader>(table);
    ac::Declaration reader_source{"record_batch_reader_source",
                                  ac::RecordBatchReaderSourceNodeOptions{reader}};
    return ExecutePlanAndCollectAsTable(std::move(reader_source));
}

// (Doc section: RecordBatchReaderSource Example)

enum ExampleMode {
    SOURCE_SINK = 0,
    TABLE_SOURCE_SINK = 1,
    SCAN = 2,
    FILTER = 3,
    PROJECT = 4,
    SCALAR_AGGREGATION = 5,
    GROUP_AGGREGATION = 6,
    CONSUMING_SINK = 7,
    ORDER_BY_SINK = 8,
    HASHJOIN = 9,
    KSELECT = 10,
    WRITE = 11,
    UNION = 12,
    TABLE_SOURCE_TABLE_SINK = 13,
    RECORD_BATCH_READER_SOURCE = 14,
    PROJECT_SEQUENCE = 15
};

int main(int argc, char **argv) {
    if (argc < 3) {
        // Fake success for CI purposes.
        return EXIT_SUCCESS;
    }

    std::string base_save_path = turbo::absolute(argv[1])->string();

    int mode = std::atoi(argv[2]);
    turbo::Status status;
    // ensure nebula::dataset node factories are in the registry
    nebula::dataset::internal::Initialize();
    switch (mode) {
        case SOURCE_SINK:
            PrintBlock("Source Sink Example");
            status = SourceSinkExample();
            break;
        case TABLE_SOURCE_SINK:
            PrintBlock("Table Source Sink Example");
            status = TableSourceSinkExample();
            break;
        case SCAN:
            PrintBlock("Scan Example");
            status = ScanSinkExample();
            break;
        case FILTER:
            PrintBlock("Filter Example");
            status = ScanFilterSinkExample();
            break;
        case PROJECT:
            PrintBlock("Project Example");
            status = ScanProjectSinkExample();
            break;
        case PROJECT_SEQUENCE:
            PrintBlock("Project Example (using Declaration::sequence)");
            status = ScanProjectSequenceSinkExample();
            break;
        case GROUP_AGGREGATION:
            PrintBlock("Aggregate Example");
            status = SourceGroupAggregateSinkExample();
            break;
        case SCALAR_AGGREGATION:
            PrintBlock("Aggregate Example");
            status = SourceScalarAggregateSinkExample();
            break;
        case CONSUMING_SINK:
            PrintBlock("Consuming-Sink Example");
            status = SourceConsumingSinkExample();
            break;
        case ORDER_BY_SINK:
            PrintBlock("OrderBy Example");
            status = SourceOrderBySinkExample();
            break;
        case HASHJOIN:
            PrintBlock("HashJoin Example");
            status = SourceHashJoinSinkExample();
            break;
        case KSELECT:
            PrintBlock("KSelect Example");
            status = SourceKSelectExample();
            break;
        case WRITE:
            PrintBlock("write Example");
            status = ScanFilterWriteExample(base_save_path);
            break;
        case UNION:
            PrintBlock("Union Example");
            status = SourceUnionSinkExample();
            break;
        case TABLE_SOURCE_TABLE_SINK:
            PrintBlock("TableSink Example");
            status = TableSinkExample();
            break;
        case RECORD_BATCH_READER_SOURCE:
            PrintBlock("RecordBatchReaderSource Example");
            status = RecordBatchReaderSourceSinkExample();
            break;
        default:
            break;
    }

    if (status.ok()) {
        return EXIT_SUCCESS;
    } else {
        std::cout << "Error occurred: " << status.message() << std::endl;
        return EXIT_FAILURE;
    }
}

// (Doc section: Execution Plan Documentation Example)
