// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <nebula/csv/reader.h>

#include <cstdint>
#include <cstring>
#include <functional>
#include <limits>
#include <memory>
#include <optional>
#include <sstream>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>

#include <nebula/core/array.h>
#include <nebula/core/buffer.h>
#include <nebula/csv/chunker.h>
#include <nebula/csv/column_builder.h>
#include <nebula/csv/column_decoder.h>
#include <nebula/csv/options.h>
#include <nebula/csv/parser.h>
#include <nebula/io/interfaces.h>

#include <turbo/utility/status.h>
#include <nebula/core/table.h>
#include <nebula/types/type.h>
#include <nebula/types/type_fwd.h>
#include <nebula/future/async_generator.h>
#include <nebula/future/future.h>
#include <turbo/functional/iterator.h>
#include <turbo/log/logging.h>
#include <turbo/base/macros.h>
#include <nebula/future/task_group.h>
#include <nebula/future/thread_pool.h>
#include <nebula/util/utf8_internal.h>
#include <nebula/util/vector.h>

namespace nebula {

    using internal::Executor;
    using internal::TaskGroup;
    using internal::UnwrapOrRaise;
}

namespace nebula::csv {
    namespace {

        struct ConversionSchema {
            struct Column {
                std::string name;
                // Physical column index in CSV file
                int32_t index;
                // If true, make a column of nulls
                bool is_missing;
                // If set, convert the CSV column to this type
                // If unset (and is_missing is false), infer the type from the CSV column
                std::shared_ptr<DataType> type;
            };

            static Column NullColumn(std::string col_name, std::shared_ptr<DataType> type) {
                return Column{std::move(col_name), -1, true, std::move(type)};
            }

            static Column TypedColumn(std::string col_name, int32_t col_index,
                                      std::shared_ptr<DataType> type) {
                return Column{std::move(col_name), col_index, false, std::move(type)};
            }

            static Column InferredColumn(std::string col_name, int32_t col_index) {
                return Column{std::move(col_name), col_index, false, nullptr};
            }

            std::vector<Column> columns;
        };

        // An iterator of Buffers that makes sure there is no straddling CRLF sequence.
        class CSVBufferIterator {
        public:
            static turbo::Iterator<std::shared_ptr<Buffer>> create(
                    turbo::Iterator<std::shared_ptr<Buffer>> buffer_iterator) {
                turbo::Transformer<std::shared_ptr<Buffer>, std::shared_ptr<Buffer>> fn =
                        CSVBufferIterator();
                return turbo::make_transformed_iterator(std::move(buffer_iterator), fn);
            }

            static AsyncGenerator<std::shared_ptr<Buffer>> make_async(
                    AsyncGenerator<std::shared_ptr<Buffer>> buffer_iterator) {
                turbo::Transformer<std::shared_ptr<Buffer>, std::shared_ptr<Buffer>> fn =
                        CSVBufferIterator();
                return create_transformed_generator(std::move(buffer_iterator), fn);
            }

            turbo::Result<turbo::TransformFlow<std::shared_ptr<Buffer>>> operator()(std::shared_ptr<Buffer> buf) {
                if (buf == nullptr) {
                    // EOF
                    return turbo::TransformFinish();
                }

                int64_t offset = 0;
                if (first_buffer_) {
                    TURBO_MOVE_OR_RAISE(auto data, util::SkipUTF8BOM(buf->data(), buf->size()));
                    offset += data - buf->data();
                            DKCHECK_GE(offset, 0);
                    first_buffer_ = false;
                }

                if (trailing_cr_ && buf->data()[offset] == '\n') {
                    // Skip '\r\n' line separator that started at the end of previous buffer
                    ++offset;
                }

                trailing_cr_ = (buf->data()[buf->size() - 1] == '\r');
                buf = SliceBuffer(buf, offset);
                if (buf->size() == 0) {
                    // EOF
                    return turbo::TransformFinish();
                } else {
                    return turbo::TransformYield(buf);
                }
            }

        protected:
            bool first_buffer_ = true;
            // Whether there was a trailing CR at the end of last received buffer
            bool trailing_cr_ = false;
        };

        struct CSVBlock {
            // (partial + completion + buffer) is an entire delimited CSV buffer.
            std::shared_ptr<Buffer> partial;
            std::shared_ptr<Buffer> completion;
            std::shared_ptr<Buffer> buffer;
            int64_t block_index;
            bool is_final;
            int64_t bytes_skipped;
            std::function<turbo::Status(int64_t)> consume_bytes;
        };

    }  // namespace
}  // namespace nebula::csv

namespace turbo {
    template<>
    struct IterationTraits<nebula::csv::CSVBlock> {
        static nebula::csv::CSVBlock end() { return nebula::csv::CSVBlock{{}, {}, {}, -1, true, 0, {}}; }

        static bool is_end(const nebula::csv::CSVBlock &val) { return val.block_index < 0; }
    };
}  // namespace nebula

namespace nebula::csv {
    namespace {

        // This is a callable that can be used to transform an iterator.  The source iterator
        // will contain buffers of data and the output iterator will contain delimited CSV
        // blocks.  std::optional is used so that there is an end token (required by the
        // iterator APIs (e.g. Visit)) even though an empty optional is never used in this code.
        class BlockReader {
        public:
            BlockReader(std::unique_ptr<Chunker> chunker, std::shared_ptr<Buffer> first_buffer,
                        int64_t skip_rows)
                    : chunker_(std::move(chunker)),
                      partial_(std::make_shared<Buffer>("")),
                      buffer_(std::move(first_buffer)),
                      skip_rows_(skip_rows) {}

        protected:
            std::unique_ptr<Chunker> chunker_;
            std::shared_ptr<Buffer> partial_, buffer_;
            int64_t skip_rows_;
            int64_t block_index_ = 0;
            // Whether there was a trailing CR at the end of last received buffer
            bool trailing_cr_ = false;
        };

// An object that reads delimited CSV blocks for serial use.
// The number of bytes consumed should be notified after each read,
// using CSVBlock::consume_bytes.
        class SerialBlockReader : public BlockReader {
        public:
            using BlockReader::BlockReader;

            static turbo::Iterator<CSVBlock> MakeIterator(
                    turbo::Iterator<std::shared_ptr<Buffer>> buffer_iterator, std::unique_ptr<Chunker> chunker,
                    std::shared_ptr<Buffer> first_buffer, int64_t skip_rows) {
                auto block_reader =
                        std::make_shared<SerialBlockReader>(std::move(chunker), first_buffer, skip_rows);
                // Wrap shared pointer in callable
                turbo::Transformer<std::shared_ptr<Buffer>, CSVBlock> block_reader_fn =
                        [block_reader](std::shared_ptr<Buffer> buf) {
                            return (*block_reader)(std::move(buf));
                        };
                return turbo::make_transformed_iterator(std::move(buffer_iterator), block_reader_fn);
            }

            static AsyncGenerator<CSVBlock> MakeAsyncIterator(
                    AsyncGenerator<std::shared_ptr<Buffer>> buffer_generator,
                    std::unique_ptr<Chunker> chunker, std::shared_ptr<Buffer> first_buffer,
                    int64_t skip_rows) {
                auto block_reader =
                        std::make_shared<SerialBlockReader>(std::move(chunker), first_buffer, skip_rows);
                // Wrap shared pointer in callable
                turbo::Transformer<std::shared_ptr<Buffer>, CSVBlock> block_reader_fn =
                        [block_reader](std::shared_ptr<Buffer> next) {
                            return (*block_reader)(std::move(next));
                        };
                return create_transformed_generator(std::move(buffer_generator), block_reader_fn);
            }

            turbo::Result<turbo::TransformFlow<CSVBlock>> operator()(std::shared_ptr<Buffer> next_buffer) {
                if (buffer_ == nullptr) {
                    return turbo::TransformFinish();
                }

                bool is_final = (next_buffer == nullptr);
                int64_t bytes_skipped = 0;

                if (skip_rows_) {
                    bytes_skipped += partial_->size();
                    auto orig_size = buffer_->size();
                    TURBO_RETURN_NOT_OK(
                            chunker_->ProcessSkip(partial_, buffer_, is_final, &skip_rows_, &buffer_));
                    bytes_skipped += orig_size - buffer_->size();
                    auto empty = std::make_shared<Buffer>(nullptr, 0);
                    if (skip_rows_) {
                        // Still have rows beyond this buffer to skip return empty block
                        partial_ = std::move(buffer_);
                        buffer_ = next_buffer;
                        return turbo::TransformYield<CSVBlock>(CSVBlock{empty, empty, empty, block_index_++,
                                                                 is_final, bytes_skipped,
                                                                 [](int64_t) { return turbo::OkStatus(); }});
                    }
                    partial_ = std::move(empty);
                }

                std::shared_ptr<Buffer> completion;

                if (is_final) {
                    // End of file reached => compute completion from penultimate block
                    TURBO_RETURN_NOT_OK(chunker_->ProcessFinal(partial_, buffer_, &completion, &buffer_));
                } else {
                    // Get completion of partial from previous block.
                    TURBO_RETURN_NOT_OK(
                            chunker_->ProcessWithPartial(partial_, buffer_, &completion, &buffer_));
                }
                int64_t bytes_before_buffer = partial_->size() + completion->size();

                auto consume_bytes = [this, bytes_before_buffer,
                        next_buffer](int64_t nbytes) -> turbo::Status {
                            DKCHECK_GE(nbytes, 0);
                    int64_t offset = nbytes - bytes_before_buffer;
                    // All data before the buffer should have been consumed.
                    // This is checked in parse() and BlockParsingOperator::operator().
                            DKCHECK_GE(offset, 0);
                    partial_ = SliceBuffer(buffer_, offset);
                    buffer_ = next_buffer;
                    return turbo::OkStatus();
                };

                return turbo::TransformYield<CSVBlock>(CSVBlock{partial_, completion, buffer_,
                                                         block_index_++, is_final, bytes_skipped,
                                                         std::move(consume_bytes)});
            }
        };

        // An object that reads delimited CSV blocks for threaded use.
        class ThreadedBlockReader : public BlockReader {
        public:
            using BlockReader::BlockReader;

            static AsyncGenerator<CSVBlock> MakeAsyncIterator(
                    AsyncGenerator<std::shared_ptr<Buffer>> buffer_generator,
                    std::unique_ptr<Chunker> chunker, std::shared_ptr<Buffer> first_buffer,
                    int64_t skip_rows) {
                auto block_reader = std::make_shared<ThreadedBlockReader>(std::move(chunker),
                                                                          first_buffer, skip_rows);
                // Wrap shared pointer in callable
                turbo::Transformer<std::shared_ptr<Buffer>, CSVBlock> block_reader_fn =
                        [block_reader](std::shared_ptr<Buffer> next) { return (*block_reader)(next); };
                return create_transformed_generator(std::move(buffer_generator), block_reader_fn);
            }

            turbo::Result<turbo::TransformFlow<CSVBlock>> operator()(std::shared_ptr<Buffer> next_buffer) {
                if (buffer_ == nullptr) {
                    // EOF
                    return turbo::TransformFinish();
                }

                bool is_final = (next_buffer == nullptr);

                auto current_partial = std::move(partial_);
                auto current_buffer = std::move(buffer_);
                int64_t bytes_skipped = 0;

                if (skip_rows_) {
                    auto orig_size = current_buffer->size();
                    bytes_skipped = current_partial->size();
                    TURBO_RETURN_NOT_OK(chunker_->ProcessSkip(current_partial, current_buffer, is_final,
                                                        &skip_rows_, &current_buffer));
                    bytes_skipped += orig_size - current_buffer->size();
                    current_partial = std::make_shared<Buffer>(nullptr, 0);
                    if (skip_rows_) {
                        partial_ = std::move(current_buffer);
                        buffer_ = std::move(next_buffer);
                        return turbo::TransformYield<CSVBlock>(CSVBlock{current_partial,
                                                                 current_partial,
                                                                 current_partial,
                                                                 block_index_++,
                                                                 is_final,
                                                                 bytes_skipped,
                                                                 {}});
                    }
                }

                std::shared_ptr<Buffer> whole, completion, next_partial;

                if (is_final) {
                    // End of file reached => compute completion from penultimate block
                    TURBO_RETURN_NOT_OK(
                            chunker_->ProcessFinal(current_partial, current_buffer, &completion, &whole));
                } else {
                    // Get completion of partial from previous block.
                    std::shared_ptr<Buffer> starts_with_whole;
                    // Get completion of partial from previous block.
                    TURBO_RETURN_NOT_OK(chunker_->ProcessWithPartial(current_partial, current_buffer,
                                                               &completion, &starts_with_whole));

                    // Get a complete CSV block inside `partial + block`, and keep
                    // the rest for the next iteration.
                    TURBO_RETURN_NOT_OK(chunker_->process(starts_with_whole, &whole, &next_partial));
                }

                partial_ = std::move(next_partial);
                buffer_ = std::move(next_buffer);

                return turbo::TransformYield<CSVBlock>(CSVBlock{
                        current_partial, completion, whole, block_index_++, is_final, bytes_skipped, {}});
            }
        };

        struct ParsedBlock {
            std::shared_ptr<BlockParser> parser;
            int64_t block_index;
            int64_t bytes_parsed_or_skipped;
        };

        struct DecodedBlock {
            std::shared_ptr<RecordBatch> record_batch;
            // Represents the number of input bytes represented by this batch
            // This will include bytes skipped when skipping rows after the header
            int64_t bytes_processed;
        };

    }  // namespace

}  // namespace nebula::csv

namespace turbo {
    template<>
    struct IterationTraits<nebula::csv::ParsedBlock> {
        static nebula::csv::ParsedBlock end() { return nebula::csv::ParsedBlock{nullptr, -1, -1}; }

        static bool is_end(const nebula::csv::ParsedBlock &val) { return val.block_index < 0; }
    };

    template<>
    struct IterationTraits<nebula::csv::DecodedBlock> {
        static nebula::csv::DecodedBlock end() { return nebula::csv::DecodedBlock{nullptr, -1}; }

        static bool is_end(const nebula::csv::DecodedBlock &val) { return val.bytes_processed < 0; }
    };
}  // namespace nebula

namespace nebula::csv {
    namespace {

        // A function object that takes in a buffer of CSV data and returns a parsed batch of CSV
        // data (CSVBlock -> ParsedBlock) for use with create_mapped_generator.
        // The parsed batch contains a list of offsets for each of the columns so that columns
        // can be individually scanned
        //
        // This operator is not reentrant
        class BlockParsingOperator {
        public:
            BlockParsingOperator(io::IOContext io_context, ParseOptions parse_options,
                                 int num_csv_cols, int64_t first_row)
                    : io_context_(io_context),
                      parse_options_(parse_options),
                      num_csv_cols_(num_csv_cols),
                      count_rows_(first_row >= 0),
                      num_rows_seen_(first_row) {}

            // TODO: this is almost entirely the same as ReaderMixin::parse(). Refactor?
            turbo::Result<ParsedBlock> operator()(const CSVBlock &block) {
                constexpr int32_t max_num_rows = std::numeric_limits<int32_t>::max();
                auto parser = std::make_shared<BlockParser>(
                        io_context_.pool(), parse_options_, num_csv_cols_, num_rows_seen_, max_num_rows);

                std::shared_ptr<Buffer> straddling;
                std::vector<std::string_view> views;
                if (block.partial->size() != 0 || block.completion->size() != 0) {
                    if (block.partial->size() == 0) {
                        straddling = block.completion;
                    } else if (block.completion->size() == 0) {
                        straddling = block.partial;
                    } else {
                        TURBO_MOVE_OR_RAISE(
                                straddling,
                                concatenate_buffers({block.partial, block.completion}, io_context_.pool()));
                    }
                    views = {std::string_view(*straddling), std::string_view(*block.buffer)};
                } else {
                    views = {std::string_view(*block.buffer)};
                }
                uint32_t parsed_size;
                if (block.is_final) {
                    TURBO_RETURN_NOT_OK(parser->parse_final(views, &parsed_size));
                } else {
                    TURBO_RETURN_NOT_OK(parser->parse(views, &parsed_size));
                }

                // `partial + completion` should have been entirely consumed.
                const int64_t bytes_before_buffer = block.partial->size() + block.completion->size();
                if (static_cast<int64_t>(parsed_size) < bytes_before_buffer) {
                    // This can happen if `newlines_in_values` is not enabled and
                    // `partial + completion` ends with a newline inside a quoted string.
                    // In this case, the BlockParser stops at the truncated data in the first
                    // block (see gh-39857).
                    return turbo::invalid_argument_error(
                            "CSV parser got out of sync with chunker. This can mean the data file "
                            "contains cell values spanning multiple lines; please consider enabling "
                            "the option 'newlines_in_values'.");
                }

                if (count_rows_) {
                    num_rows_seen_ += parser->total_num_rows();
                }

                if (block.consume_bytes) {
                    TURBO_RETURN_NOT_OK(block.consume_bytes(parsed_size));
                }
                return ParsedBlock{std::move(parser), block.block_index,
                                   static_cast<int64_t>(parsed_size) + block.bytes_skipped};
            }

            int num_csv_cols() const { return num_csv_cols_; }

        private:
            io::IOContext io_context_;
            const ParseOptions parse_options_;
            const int num_csv_cols_;
            const bool count_rows_;
            int64_t num_rows_seen_;
        };

        // A function object that takes in parsed batch of CSV data and decodes it to an nebula
        // record batch (ParsedBlock -> DecodedBlock) for use with create_mapped_generator.
        class BlockDecodingOperator {
        public:
            Future<DecodedBlock> operator()(const ParsedBlock &block) {
                        DKCHECK(!state_->column_decoders.empty());
                std::vector<Future<std::shared_ptr<Array>>> decoded_array_futs;
                for (auto &decoder: state_->column_decoders) {
                    decoded_array_futs.push_back(decoder->Decode(block.parser));
                }
                auto bytes_parsed_or_skipped = block.bytes_parsed_or_skipped;
                auto decoded_arrays_fut = All(std::move(decoded_array_futs));
                auto state = state_;
                return decoded_arrays_fut.Then(
                        [state, bytes_parsed_or_skipped](
                                const std::vector<turbo::Result<std::shared_ptr<Array>>> &maybe_decoded_arrays)
                                -> turbo::Result<DecodedBlock> {
                            TURBO_MOVE_OR_RAISE(auto decoded_arrays,
                                                   nebula::internal::UnwrapOrRaise(maybe_decoded_arrays));

                            TURBO_MOVE_OR_RAISE(auto batch,
                                                   state->DecodedArraysToBatch(std::move(decoded_arrays)));
                            return DecodedBlock{std::move(batch), bytes_parsed_or_skipped};
                        });
            }

            static turbo::Result<BlockDecodingOperator> create(io::IOContext io_context,
                                                      ConvertOptions convert_options,
                                                      ConversionSchema conversion_schema) {
                BlockDecodingOperator op(std::move(io_context), std::move(convert_options),
                                         std::move(conversion_schema));
                TURBO_RETURN_NOT_OK(op.state_->MakeColumnDecoders(io_context));
                return op;
            }

        private:
            BlockDecodingOperator(io::IOContext io_context, ConvertOptions convert_options,
                                  ConversionSchema conversion_schema)
                    : state_(std::make_shared<State>(std::move(io_context), std::move(convert_options),
                                                     std::move(conversion_schema))) {}

            struct State {
                State(io::IOContext io_context, ConvertOptions convert_options,
                      ConversionSchema conversion_schema)
                        : convert_options(std::move(convert_options)),
                          conversion_schema(std::move(conversion_schema)) {}

                turbo::Result<std::shared_ptr<RecordBatch>> DecodedArraysToBatch(
                        std::vector<std::shared_ptr<Array>> arrays) {
                    const auto n_rows = arrays[0]->length();

                    if (schema == nullptr) {
                        FieldVector fields(arrays.size());
                        for (size_t i = 0; i < arrays.size(); ++i) {
                            fields[i] = field(conversion_schema.columns[i].name, arrays[i]->type());
                        }

                        if (n_rows == 0) {
                            // No rows so schema is not reliable. return RecordBatch but do not set schema
                            return RecordBatch::create(nebula::schema(std::move(fields)), n_rows,
                                                     std::move(arrays));
                        }

                        schema = nebula::schema(std::move(fields));
                    }

                    return RecordBatch::create(schema, n_rows, std::move(arrays));
                }

                // Make column decoders from conversion schema
                turbo::Status MakeColumnDecoders(io::IOContext io_context) {
                    for (const auto &column: conversion_schema.columns) {
                        std::shared_ptr<ColumnDecoder> decoder;
                        if (column.is_missing) {
                            TURBO_MOVE_OR_RAISE(decoder,
                                                   ColumnDecoder::MakeNull(io_context.pool(), column.type));
                        } else if (column.type != nullptr) {
                            TURBO_MOVE_OR_RAISE(
                                    decoder, ColumnDecoder::create(io_context.pool(), column.type, column.index,
                                                                 convert_options));
                        } else {
                            TURBO_MOVE_OR_RAISE(
                                    decoder,
                                    ColumnDecoder::create(io_context.pool(), column.index, convert_options));
                        }
                        column_decoders.push_back(std::move(decoder));
                    }
                    return turbo::OkStatus();
                }

                ConvertOptions convert_options;
                ConversionSchema conversion_schema;
                std::vector<std::shared_ptr<ColumnDecoder>> column_decoders;
                std::shared_ptr<Schema> schema;
            };

            std::shared_ptr<State> state_;
        };

/////////////////////////////////////////////////////////////////////////
// Base class for common functionality

        class ReaderMixin {
        public:
            ReaderMixin(io::IOContext io_context, std::shared_ptr<io::InputStream> input,
                        const ReadOptions &read_options, const ParseOptions &parse_options,
                        const ConvertOptions &convert_options, bool count_rows)
                    : io_context_(std::move(io_context)),
                      read_options_(read_options),
                      parse_options_(parse_options),
                      convert_options_(convert_options),
                      count_rows_(count_rows),
                      input_(std::move(input)) {}

        protected:
            // read header and column names from buffer, create column builders
            // Returns the # of bytes consumed
            turbo::Result<int64_t> ProcessHeader(const std::shared_ptr<Buffer> &buf,
                                          std::shared_ptr<Buffer> *rest) {
                const uint8_t *data = buf->data();
                const auto data_end = data + buf->size();
                        DKCHECK_GT(data_end - data, 0);
                int64_t num_rows_seen = 1;

                if (read_options_.skip_rows) {
                    // Skip initial rows (potentially invalid CSV data)
                    auto num_skipped_rows = SkipRows(data, static_cast<uint32_t>(data_end - data),
                                                     read_options_.skip_rows, &data);
                    if (num_skipped_rows < read_options_.skip_rows) {
                        return turbo::invalid_argument_error(
                                "Could not skip initial ", read_options_.skip_rows,
                                " rows from CSV file, "
                                "either file is too short or header is larger than block size");
                    }
                    if (count_rows_) {
                        num_rows_seen += num_skipped_rows;
                    }
                }

                if (read_options_.column_names.empty()) {
                    // Parse one row (either to read column names or to know the number of columns)
                    BlockParser parser(io_context_.pool(), parse_options_, /*num_cols=*/-1,
                            /*first_row=*/num_rows_seen, /*max_num_rows=*/1);
                    uint32_t parsed_size = 0;
                    TURBO_RETURN_NOT_OK(parser.parse(
                            std::string_view(reinterpret_cast<const char *>(data), data_end - data),
                            &parsed_size));
                    if (parser.num_rows() != 1) {
                        return turbo::invalid_argument_error(
                                "Could not read first row from CSV file, either "
                                "file is too short or header is larger than block size");
                    }
                    if (parser.num_cols() == 0) {
                        return turbo::invalid_argument_error("No columns in CSV file");
                    }

                    if (read_options_.autogenerate_column_names) {
                        column_names_ = GenerateColumnNames(parser.num_cols());
                    } else {
                        // read column names from header row
                        auto visit = [&](const uint8_t *data, uint32_t size, bool quoted) -> turbo::Status {
                            column_names_.emplace_back(reinterpret_cast<const char *>(data), size);
                            return turbo::OkStatus();
                        };
                        TURBO_RETURN_NOT_OK(parser.visit_last_row(visit));
                                DKCHECK_EQ(static_cast<size_t>(parser.num_cols()), column_names_.size());
                        // Skip parsed header row
                        data += parsed_size;
                        if (count_rows_) {
                            ++num_rows_seen;
                        }
                    }
                } else {
                    column_names_ = read_options_.column_names;
                }

                if (count_rows_) {
                    // increase rows seen to skip past rows which will be skipped
                    num_rows_seen += read_options_.skip_rows_after_names;
                }

                auto bytes_consumed = data - buf->data();
                *rest = SliceBuffer(buf, bytes_consumed);

                int32_t num_csv_cols = static_cast<int32_t>(column_names_.size());
                        DKCHECK_GT(num_csv_cols, 0);
                // Since we know the number of columns, we can instantiate the BlockParsingOperator
                parsing_operator_.emplace(io_context_, parse_options_, num_csv_cols,
                                          count_rows_ ? num_rows_seen : -1);

                TURBO_RETURN_NOT_OK(MakeConversionSchema());
                return bytes_consumed;
            }

            std::vector<std::string> GenerateColumnNames(int32_t num_cols) {
                std::vector<std::string> res;
                res.reserve(num_cols);
                for (int32_t i = 0; i < num_cols; ++i) {
                    std::stringstream ss;
                    ss << "f" << i;
                    res.push_back(ss.str());
                }
                return res;
            }

            // Make conversion schema from options and parsed CSV header
            turbo::Status MakeConversionSchema() {
                // append a column converted from CSV data
                auto append_csv_column = [&](std::string col_name, int32_t col_index) {
                    // Does the named column have a fixed type?
                    auto it = convert_options_.column_types.find(col_name);
                    if (it == convert_options_.column_types.end()) {
                        conversion_schema_.columns.push_back(
                                ConversionSchema::InferredColumn(std::move(col_name), col_index));
                    } else {
                        conversion_schema_.columns.push_back(
                                ConversionSchema::TypedColumn(std::move(col_name), col_index, it->second));
                    }
                };

                // append a column of nulls
                auto append_null_column = [&](std::string col_name) {
                    // If the named column has a fixed type, use it, otherwise use null()
                    std::shared_ptr<DataType> type;
                    auto it = convert_options_.column_types.find(col_name);
                    if (it == convert_options_.column_types.end()) {
                        type = null();
                    } else {
                        type = it->second;
                    }
                    conversion_schema_.columns.push_back(
                            ConversionSchema::NullColumn(std::move(col_name), std::move(type)));
                };

                if (convert_options_.include_columns.empty()) {
                    // Include all columns in CSV file order
                    for (int32_t col_index = 0; col_index < num_csv_cols(); ++col_index) {
                        append_csv_column(column_names_[col_index], col_index);
                    }
                } else {
                    // Include columns from `include_columns` (in that order)
                    // Compute indices of columns in the CSV file
                    std::unordered_map<std::string, int32_t> col_indices;
                    col_indices.reserve(column_names_.size());
                    for (int32_t i = 0; i < static_cast<int32_t>(column_names_.size()); ++i) {
                        col_indices.emplace(column_names_[i], i);
                    }

                    for (const auto &col_name: convert_options_.include_columns) {
                        auto it = col_indices.find(col_name);
                        if (it != col_indices.end()) {
                            append_csv_column(col_name, it->second);
                        } else if (convert_options_.include_missing_columns) {
                            append_null_column(col_name);
                        } else {
                            return turbo::not_found_error("Column '", col_name,
                                                    "' in include_columns "
                                                    "does not exist in CSV file");
                        }
                    }
                }
                return turbo::OkStatus();
            }

            turbo::Result<ParsedBlock> parse(const CSVBlock &block) {
                        DKCHECK(parsing_operator_.has_value());
                return (*parsing_operator_)(block);
            }

            int num_csv_cols() const {
                        DKCHECK(parsing_operator_.has_value());
                return parsing_operator_->num_csv_cols();
            }

            io::IOContext io_context_;
            const ReadOptions read_options_;
            const ParseOptions parse_options_;
            const ConvertOptions convert_options_;
            // Whether to track the number of rows seen in the CSV being parsed
            const bool count_rows_;

            std::optional<BlockParsingOperator> parsing_operator_;

            // Column names in the CSV file
            std::vector<std::string> column_names_;
            ConversionSchema conversion_schema_;

            std::shared_ptr<io::InputStream> input_;
            std::shared_ptr<TaskGroup> task_group_;
        };

/////////////////////////////////////////////////////////////////////////
// Base class for one-shot table readers

        class BaseTableReader : public ReaderMixin, public csv::TableReader {
        public:
            using ReaderMixin::ReaderMixin;

            virtual turbo::Status init() = 0;

            Future<std::shared_ptr<Table>> read_async() override {
                return Future<std::shared_ptr<Table>>::make_finished(read());
            }

        protected:
            // Make column builders from conversion schema
            turbo::Status MakeColumnBuilders() {
                for (const auto &column: conversion_schema_.columns) {
                    std::shared_ptr<ColumnBuilder> builder;
                    if (column.is_missing) {
                        TURBO_MOVE_OR_RAISE(builder, ColumnBuilder::MakeNull(io_context_.pool(),
                                                                                column.type, task_group_));
                    } else if (column.type != nullptr) {
                        TURBO_MOVE_OR_RAISE(
                                builder, ColumnBuilder::create(io_context_.pool(), column.type, column.index,
                                                             convert_options_, task_group_));
                    } else {
                        TURBO_MOVE_OR_RAISE(builder,
                                               ColumnBuilder::create(io_context_.pool(), column.index,
                                                                   convert_options_, task_group_));
                    }
                    column_builders_.push_back(std::move(builder));
                }
                return turbo::OkStatus();
            }

            turbo::Status ParseAndInsert(const CSVBlock &block) {
                TURBO_MOVE_OR_RAISE(auto result, parse(block));
                TURBO_RETURN_NOT_OK(ProcessData(result.parser, result.block_index));
                return turbo::OkStatus();
            }

            // Trigger conversion of parsed block data
            turbo::Status ProcessData(const std::shared_ptr<BlockParser> &parser, int64_t block_index) {
                for (auto &builder: column_builders_) {
                    builder->insert(block_index, parser);
                }
                return turbo::OkStatus();
            }

            turbo::Result<std::shared_ptr<Table>> MakeTable() {
                        DKCHECK_EQ(column_builders_.size(), conversion_schema_.columns.size());

                std::vector<std::shared_ptr<Field>> fields;
                std::vector<std::shared_ptr<ChunkedArray>> columns;

                for (int32_t i = 0; i < static_cast<int32_t>(column_builders_.size()); ++i) {
                    const auto &column = conversion_schema_.columns[i];
                    TURBO_MOVE_OR_RAISE(auto array, column_builders_[i]->finish());
                    fields.push_back(::nebula::field(column.name, array->type()));
                    columns.emplace_back(std::move(array));
                }
                return Table::create(schema(std::move(fields)), std::move(columns));
            }

            // Column builders for target Table (in ConversionSchema order)
            std::vector<std::shared_ptr<ColumnBuilder>> column_builders_;
        };

/////////////////////////////////////////////////////////////////////////
// Base class for streaming readers

        class StreamingReaderImpl : public ReaderMixin,
                                    public csv::StreamingReader,
                                    public std::enable_shared_from_this<StreamingReaderImpl> {
        public:
            StreamingReaderImpl(io::IOContext io_context, std::shared_ptr<io::InputStream> input,
                                const ReadOptions &read_options, const ParseOptions &parse_options,
                                const ConvertOptions &convert_options, bool count_rows)
                    : ReaderMixin(io_context, std::move(input), read_options, parse_options,
                                  convert_options, count_rows),
                      bytes_decoded_(std::make_shared<std::atomic<int64_t>>(0)) {}

            Future<> init(Executor *cpu_executor) {
                TURBO_MOVE_OR_RAISE(auto istream_it,
                                       io::make_input_stream_iterator(input_, read_options_.block_size));

                // TODO Consider exposing readahead as a read option (ARROW-12090)
                TURBO_MOVE_OR_RAISE(auto bg_it, create_background_generator(std::move(istream_it),
                                                                           io_context_.executor()));

                auto transferred_it = create_transferred_generator(bg_it, cpu_executor);

                auto buffer_generator = CSVBufferIterator::make_async(std::move(transferred_it));

                int max_readahead = cpu_executor->get_capacity();
                auto self = shared_from_this();

                return buffer_generator().Then([self, buffer_generator, max_readahead](
                        const std::shared_ptr<Buffer> &first_buffer) {
                    return self->InitAfterFirstBuffer(first_buffer, buffer_generator, max_readahead);
                });
            }

            std::shared_ptr<Schema> schema() const override { return schema_; }

            int64_t bytes_read() const override { return bytes_decoded_->load(); }

            turbo::Status read_next(std::shared_ptr<RecordBatch> *batch) override {
                auto next_fut = read_next_async();
                auto next_result = next_fut.result();
                return std::move(next_result).try_value(batch);
            }

            Future<std::shared_ptr<RecordBatch>> read_next_async() override {
                return record_batch_gen_();
            }

        protected:
            Future<> InitAfterFirstBuffer(const std::shared_ptr<Buffer> &first_buffer,
                                          AsyncGenerator<std::shared_ptr<Buffer>> buffer_generator,
                                          int max_readahead) {
                if (first_buffer == nullptr) {
                    return turbo::invalid_argument_error("Empty CSV file");
                }

                std::shared_ptr<Buffer> after_header;
                TURBO_MOVE_OR_RAISE(auto header_bytes_consumed,
                                       ProcessHeader(first_buffer, &after_header));
                bytes_decoded_->fetch_add(header_bytes_consumed);

                TURBO_MOVE_OR_RAISE(
                        auto decoder_op,
                        BlockDecodingOperator::create(io_context_, convert_options_, conversion_schema_));

                auto block_gen = SerialBlockReader::MakeAsyncIterator(
                        std::move(buffer_generator), MakeChunker(parse_options_), std::move(after_header),
                        read_options_.skip_rows_after_names);
                auto parsed_block_gen = create_mapped_generator(std::move(block_gen), *parsing_operator_);
                auto rb_gen = create_mapped_generator(std::move(parsed_block_gen), std::move(decoder_op));

                auto self = shared_from_this();
                return rb_gen().Then([self, rb_gen, max_readahead](const DecodedBlock &first_block) {
                    return self->InitFromBlock(first_block, std::move(rb_gen), max_readahead, 0);
                });
            }

            Future<> InitFromBlock(const DecodedBlock &block,
                                   AsyncGenerator<DecodedBlock> batch_gen, int max_readahead,
                                   int64_t prev_bytes_processed) {
                if (!block.record_batch) {
                    // End of file just return null batches
                    record_batch_gen_ = create_empty_generator<std::shared_ptr<RecordBatch>>();
                    return turbo::OkStatus();
                }

                schema_ = block.record_batch->schema();

                if (block.record_batch->num_rows() == 0) {
                    // Keep consuming blocks until the first non empty block is found
                    auto self = shared_from_this();
                    prev_bytes_processed += block.bytes_processed;
                    return batch_gen().Then([self, batch_gen, max_readahead,
                                                    prev_bytes_processed](const DecodedBlock &next_block) {
                        return self->InitFromBlock(next_block, std::move(batch_gen), max_readahead,
                                                   prev_bytes_processed);
                    });
                }

                AsyncGenerator<DecodedBlock> readahead_gen;
                if (read_options_.use_threads) {
                    readahead_gen = create_read_ahead_generator(std::move(batch_gen), max_readahead);
                } else {
                    readahead_gen = std::move(batch_gen);
                }

                AsyncGenerator<DecodedBlock> restarted_gen =
                        create_generator_starts_with({block}, std::move(readahead_gen));

                auto bytes_decoded = bytes_decoded_;
                auto unwrap_and_record_bytes =
                        [bytes_decoded, prev_bytes_processed](
                                const DecodedBlock &block) mutable -> turbo::Result<std::shared_ptr<RecordBatch>> {
                            bytes_decoded->fetch_add(block.bytes_processed + prev_bytes_processed);
                            prev_bytes_processed = 0;
                            return block.record_batch;
                        };

                auto unwrapped =
                        create_mapped_generator(std::move(restarted_gen), std::move(unwrap_and_record_bytes));

                record_batch_gen_ = create_cancellable(std::move(unwrapped), io_context_.stop_token());
                return turbo::OkStatus();
            }

            std::shared_ptr<Schema> schema_;
            AsyncGenerator<std::shared_ptr<RecordBatch>> record_batch_gen_;
            // bytes which have been decoded and asked for by the caller
            std::shared_ptr<std::atomic<int64_t>> bytes_decoded_;
        };

/////////////////////////////////////////////////////////////////////////
// Serial TableReader implementation

        class SerialTableReader : public BaseTableReader {
        public:
            using BaseTableReader::BaseTableReader;

            turbo::Status init() override {
                TURBO_MOVE_OR_RAISE(auto istream_it,
                                       io::make_input_stream_iterator(input_, read_options_.block_size));

                // Since we're converting serially, no need to readahead more than one block
                int32_t block_queue_size = 1;
                TURBO_MOVE_OR_RAISE(auto rh_it,
                                       create_read_ahead_iterator(std::move(istream_it), block_queue_size));
                buffer_iterator_ = CSVBufferIterator::create(std::move(rh_it));
                return turbo::OkStatus();
            }

            turbo::Result<std::shared_ptr<Table>> read() override {
                task_group_ = TaskGroup::MakeSerial(io_context_.stop_token());

                // First block
                TURBO_MOVE_OR_RAISE(auto first_buffer, buffer_iterator_.next());
                if (first_buffer == nullptr) {
                    return turbo::invalid_argument_error("Empty CSV file");
                }
                TURBO_RETURN_NOT_OK(ProcessHeader(first_buffer, &first_buffer));
                TURBO_RETURN_NOT_OK(MakeColumnBuilders());

                auto block_iterator = SerialBlockReader::MakeIterator(
                        std::move(buffer_iterator_), MakeChunker(parse_options_), std::move(first_buffer),
                        read_options_.skip_rows_after_names);
                while (true) {
                    TURBO_RETURN_NOT_OK(io_context_.stop_token().Poll());

                    TURBO_MOVE_OR_RAISE(auto maybe_block, block_iterator.next());
                    if (turbo::is_iteration_end(maybe_block)) {
                        // EOF
                        break;
                    }
                    TURBO_RETURN_NOT_OK(ParseAndInsert(maybe_block));
                }
                // finish conversion, create schema and table
                TURBO_RETURN_NOT_OK(task_group_->finish());
                return MakeTable();
            }

        protected:
            turbo::Iterator<std::shared_ptr<Buffer>> buffer_iterator_;
        };

        class AsyncThreadedTableReader
                : public BaseTableReader,
                  public std::enable_shared_from_this<AsyncThreadedTableReader> {
        public:
            using BaseTableReader::BaseTableReader;

            AsyncThreadedTableReader(io::IOContext io_context,
                                     std::shared_ptr<io::InputStream> input,
                                     const ReadOptions &read_options,
                                     const ParseOptions &parse_options,
                                     const ConvertOptions &convert_options, Executor *cpu_executor)
            // Count rows is currently not supported during parallel read
                    : BaseTableReader(std::move(io_context), input, read_options, parse_options,
                                      convert_options, /*count_rows=*/false),
                      cpu_executor_(cpu_executor) {}

            ~AsyncThreadedTableReader() override {
                if (task_group_) {
                    // In case of error, make sure all pending tasks are finished before
                    // we start destroying BaseTableReader members
                    TURBO_UNUSED(task_group_->finish());
                }
            }

            turbo::Status init() override {
                TURBO_MOVE_OR_RAISE(auto istream_it,
                                       io::make_input_stream_iterator(input_, read_options_.block_size));

                int max_readahead = cpu_executor_->get_capacity();
                int readahead_restart = std::max(1, max_readahead / 2);

                TURBO_MOVE_OR_RAISE(
                        auto bg_it, create_background_generator(std::move(istream_it), io_context_.executor(),
                                                            max_readahead, readahead_restart));

                auto transferred_it = create_transferred_generator(bg_it, cpu_executor_);
                buffer_generator_ = CSVBufferIterator::make_async(std::move(transferred_it));
                return turbo::OkStatus();
            }

            turbo::Result<std::shared_ptr<Table>> read() override { return read_async().result(); }

            Future<std::shared_ptr<Table>> read_async() override {
                task_group_ = TaskGroup::MakeThreaded(cpu_executor_, io_context_.stop_token());

                auto self = shared_from_this();
                return ProcessFirstBuffer().Then([self](const std::shared_ptr<Buffer> &first_buffer) {
                    auto block_generator = ThreadedBlockReader::MakeAsyncIterator(
                            self->buffer_generator_, MakeChunker(self->parse_options_), first_buffer,
                            self->read_options_.skip_rows_after_names);

                    std::function<turbo::Status(CSVBlock)> block_visitor =
                            [self](CSVBlock maybe_block) -> turbo::Status {
                                // The logic in visit_async_generator ensures that we will never be
                                // passed an empty block (visit does not call with the end token) so
                                // we can be assured maybe_block has a value.
                                        DKCHECK_GE(maybe_block.block_index, 0);
                                        DKCHECK(!maybe_block.consume_bytes);

                                // Launch parse task
                                self->task_group_->append(
                                        [self, maybe_block] { return self->ParseAndInsert(maybe_block); });
                                return turbo::OkStatus();
                            };

                    return visit_async_generator(std::move(block_generator), block_visitor)
                            .Then([self]() -> Future<> {
                                // By this point we've added all top level tasks so it is safe to call
                                // FinishAsync
                                return self->task_group_->FinishAsync();
                            })
                            .Then([self]() -> turbo::Result<std::shared_ptr<Table>> {
                                // finish conversion, create schema and table
                                return self->MakeTable();
                            });
                });
            }

        protected:
            Future<std::shared_ptr<Buffer>> ProcessFirstBuffer() {
                // First block
                auto first_buffer_future = buffer_generator_();
                return first_buffer_future.Then(
                        [self = shared_from_this()](const std::shared_ptr<Buffer> &first_buffer)
                                -> turbo::Result<std::shared_ptr<Buffer>> {
                            if (first_buffer == nullptr) {
                                return turbo::invalid_argument_error("Empty CSV file");
                            }
                            std::shared_ptr<Buffer> first_buffer_processed;
                            TURBO_RETURN_NOT_OK(self->ProcessHeader(first_buffer, &first_buffer_processed));
                            TURBO_RETURN_NOT_OK(self->MakeColumnBuilders());
                            return first_buffer_processed;
                        });
            }

            Executor *cpu_executor_;
            AsyncGenerator<std::shared_ptr<Buffer>> buffer_generator_;
        };

        turbo::Result<std::shared_ptr<TableReader>> MakeTableReader(
                MemoryPool *pool, io::IOContext io_context, std::shared_ptr<io::InputStream> input,
                const ReadOptions &read_options, const ParseOptions &parse_options,
                const ConvertOptions &convert_options) {
            TURBO_RETURN_NOT_OK(parse_options.validate());
            TURBO_RETURN_NOT_OK(read_options.validate());
            TURBO_RETURN_NOT_OK(convert_options.validate());
            std::shared_ptr<BaseTableReader> reader;
            if (read_options.use_threads) {
                auto cpu_executor = nebula::internal::get_cpu_thread_pool();
                reader = std::make_shared<AsyncThreadedTableReader>(
                        io_context, input, read_options, parse_options, convert_options, cpu_executor);
            } else {
                reader = std::make_shared<SerialTableReader>(io_context, input, read_options,
                                                             parse_options, convert_options,
                        /*count_rows=*/true);
            }
            TURBO_RETURN_NOT_OK(reader->init());
            return reader;
        }

        Future<std::shared_ptr<StreamingReader>> MakeStreamingReader(
                io::IOContext io_context, std::shared_ptr<io::InputStream> input,
                Executor *cpu_executor, const ReadOptions &read_options,
                const ParseOptions &parse_options, const ConvertOptions &convert_options) {
            TURBO_RETURN_NOT_OK(parse_options.validate());
            TURBO_RETURN_NOT_OK(read_options.validate());
            TURBO_RETURN_NOT_OK(convert_options.validate());
            std::shared_ptr<StreamingReaderImpl> reader;
            reader = std::make_shared<StreamingReaderImpl>(
                    io_context, input, read_options, parse_options, convert_options,
                    /*count_rows=*/!read_options.use_threads || cpu_executor->get_capacity() == 1);
            return reader->init(cpu_executor).Then([reader] {
                return std::dynamic_pointer_cast<StreamingReader>(reader);
            });
        }

/////////////////////////////////////////////////////////////////////////
// Row count implementation

        class CSVRowCounter : public ReaderMixin,
                              public std::enable_shared_from_this<CSVRowCounter> {
        public:
            CSVRowCounter(io::IOContext io_context, Executor *cpu_executor,
                          std::shared_ptr<io::InputStream> input, const ReadOptions &read_options,
                          const ParseOptions &parse_options)
                    : ReaderMixin(io_context, std::move(input), read_options, parse_options,
                                  ConvertOptions::defaults(), /*count_rows=*/true),
                      cpu_executor_(cpu_executor),
                      row_count_(0) {}

            Future<int64_t> Count() {
                auto self = shared_from_this();
                return init(self).Then([self]() { return self->DoCount(self); });
            }

        private:
            Future<> init(const std::shared_ptr<CSVRowCounter> &self) {
                TURBO_MOVE_OR_RAISE(auto istream_it,
                                       io::make_input_stream_iterator(input_, read_options_.block_size));
                // TODO Consider exposing readahead as a read option (ARROW-12090)
                TURBO_MOVE_OR_RAISE(auto bg_it, create_background_generator(std::move(istream_it),
                                                                           io_context_.executor()));
                auto transferred_it = create_transferred_generator(bg_it, cpu_executor_);
                auto buffer_generator = CSVBufferIterator::make_async(std::move(transferred_it));

                return buffer_generator().Then(
                        [self, buffer_generator](std::shared_ptr<Buffer> first_buffer) {
                            if (!first_buffer) {
                                return turbo::invalid_argument_error("Empty CSV file");
                            }
                            TURBO_RETURN_NOT_OK(self->ProcessHeader(first_buffer, &first_buffer));
                            self->block_generator_ = SerialBlockReader::MakeAsyncIterator(
                                    buffer_generator, MakeChunker(self->parse_options_),
                                    std::move(first_buffer), 0);
                            return turbo::OkStatus();
                        });
            }

            Future<int64_t> DoCount(const std::shared_ptr<CSVRowCounter> &self) {
                // count_cb must return a value instead of turbo::Status/Future<> to work with
                // create_mapped_generator, and it must use a type with a valid end value to work with
                // turbo::iteration_end.
                std::function<turbo::Result<std::optional<int64_t>>(const CSVBlock &)> count_cb =
                        [self](const CSVBlock &maybe_block) -> turbo::Result<std::optional<int64_t>> {
                            TURBO_MOVE_OR_RAISE(auto parsed_block, self->parse(maybe_block));
                            int32_t total_row_count = parsed_block.parser->total_num_rows();
                            self->row_count_ += total_row_count;
                            return total_row_count;
                        };
                auto count_gen = create_mapped_generator(block_generator_, std::move(count_cb));
                return discard_all_from_async_generator(count_gen).Then(
                        [self]() { return self->row_count_; });
            }

            Executor *cpu_executor_;
            AsyncGenerator<CSVBlock> block_generator_;
            int64_t row_count_;
        };

    }  // namespace

/////////////////////////////////////////////////////////////////////////
// Factory functions

    turbo::Result<std::shared_ptr<TableReader>> TableReader::create(
            io::IOContext io_context, std::shared_ptr<io::InputStream> input,
            const ReadOptions &read_options, const ParseOptions &parse_options,
            const ConvertOptions &convert_options) {
        return MakeTableReader(io_context.pool(), io_context, std::move(input), read_options,
                               parse_options, convert_options);
    }

    turbo::Result<std::shared_ptr<StreamingReader>> StreamingReader::create(
            io::IOContext io_context, std::shared_ptr<io::InputStream> input,
            const ReadOptions &read_options, const ParseOptions &parse_options,
            const ConvertOptions &convert_options) {
        auto cpu_executor = nebula::internal::get_cpu_thread_pool();
        auto reader_fut = MakeStreamingReader(io_context, std::move(input), cpu_executor,
                                              read_options, parse_options, convert_options);
        auto reader_result = reader_fut.result();
        TURBO_MOVE_OR_RAISE(auto reader, reader_result);
        return reader;
    }

    Future<std::shared_ptr<StreamingReader>> StreamingReader::make_async(
            io::IOContext io_context, std::shared_ptr<io::InputStream> input,
            Executor *cpu_executor, const ReadOptions &read_options,
            const ParseOptions &parse_options, const ConvertOptions &convert_options) {
        return MakeStreamingReader(io_context, std::move(input), cpu_executor, read_options,
                                   parse_options, convert_options);
    }

    Future<int64_t> count_rows_async(io::IOContext io_context,
                                   std::shared_ptr<io::InputStream> input,
                                   Executor *cpu_executor, const ReadOptions &read_options,
                                   const ParseOptions &parse_options) {
        TURBO_RETURN_NOT_OK(parse_options.validate());
        TURBO_RETURN_NOT_OK(read_options.validate());
        auto counter = std::make_shared<CSVRowCounter>(
                io_context, cpu_executor, std::move(input), read_options, parse_options);
        return counter->Count();
    }

}  // namespace nebula::csv

