// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#pragma once

#include <memory>

#include <nebula/io/type_fwd.h>
#include <nebula/json/options.h>
#include <nebula/core/record_batch.h>

#include <turbo/utility/status.h>
#include <turbo/base/macros.h>
#include <nebula/util/type_fwd.h>
#include <turbo/base/macros.h>

namespace nebula::json {

    /// A class that reads an entire JSON file into a Nebula Table
    ///
    /// The file is expected to consist of individual line-separated JSON objects
    class TURBO_EXPORT TableReader {
    public:
        virtual ~TableReader() = default;

        /// read the entire JSON file and convert it to a Nebula Table
        virtual turbo::Result<std::shared_ptr<Table>> read() = 0;

        /// create a TableReader instance
        static turbo::Result<std::shared_ptr<TableReader>> create(MemoryPool *pool,
                                                         std::shared_ptr<io::InputStream> input,
                                                         const ReadOptions &,
                                                         const ParseOptions &);
    };

    TURBO_EXPORT turbo::Result<std::shared_ptr<RecordBatch>> ParseOne(ParseOptions options,
                                                               std::shared_ptr<Buffer> json);

    /// \brief A class that reads a JSON file incrementally
    ///
    /// JSON data is read from a stream in fixed-size blocks (configurable with
    /// `ReadOptions::block_size`). Each block is converted to a `RecordBatch`. Yielded
    /// batches have a consistent schema but may differ in row count.
    ///
    /// The supplied `ParseOptions` are used to determine a schema, based either on a
    /// provided explicit schema or inferred from the first non-empty block.
    /// Afterwards, the target schema is frozen. If `UnexpectedFieldBehavior::InferType` is
    /// specified, unexpected fields will only be inferred for the first block. Afterwards
    /// they'll be treated as errors.
    ///
    /// If `ReadOptions::use_threads` is `true`, each block's parsing/decoding task will be
    /// parallelized on the given `cpu_executor` (with readahead corresponding to the
    /// executor's capacity). If an executor isn't provided, the global thread pool will be
    /// used.
    ///
    /// If `ReadOptions::use_threads` is `false`, computations will be run on the calling
    /// thread and `cpu_executor` will be ignored.
    class TURBO_EXPORT StreamingReader : public RecordBatchReader {
    public:
        virtual ~StreamingReader() = default;

        /// \brief read the next `RecordBatch` asynchronously
        /// This function is async-reentrant (but not synchronously reentrant). However, if
        /// threading is disabled, this will block until completion.
        virtual Future<std::shared_ptr<RecordBatch>> read_next_async() = 0;

        /// Get the number of bytes which have been successfully converted to record batches
        /// and consumed
        [[nodiscard]] virtual int64_t bytes_processed() const = 0;

        /// \brief create a `StreamingReader` from an `InputStream`
        /// Blocks until the initial batch is loaded
        ///
        /// \param[in] stream JSON source stream
        /// \param[in] read_options Options for reading
        /// \param[in] parse_options Options for chunking, parsing, and conversion
        /// \param[in] io_context Context for IO operations (optional)
        /// \param[in] cpu_executor Executor for computation tasks (optional)
        /// \return The initialized reader
        static turbo::Result<std::shared_ptr<StreamingReader>> create(
                std::shared_ptr<io::InputStream> stream, const ReadOptions &read_options,
                const ParseOptions &parse_options,
                const io::IOContext &io_context = io::default_io_context(),
                ::nebula::internal::Executor *cpu_executor = nullptr);

        /// \brief create a `StreamingReader` from an `InputStream` asynchronously
        /// Returned future completes after loading the first batch
        ///
        /// \param[in] stream JSON source stream
        /// \param[in] read_options Options for reading
        /// \param[in] parse_options Options for chunking, parsing, and conversion
        /// \param[in] io_context Context for IO operations (optional)
        /// \param[in] cpu_executor Executor for computation tasks (optional)
        /// \return Future for the initialized reader
        static Future<std::shared_ptr<StreamingReader>> make_async(
                std::shared_ptr<io::InputStream> stream, const ReadOptions &read_options,
                const ParseOptions &parse_options,
                const io::IOContext &io_context = io::default_io_context(),
                ::nebula::internal::Executor *cpu_executor = nullptr);
    };

}  // namespace nebula::json

