// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

// read Nebula files and streams

#pragma once

#include <cstddef>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>

#include <nebula/io/caching.h>
#include <nebula/io/type_fwd.h>
#include <nebula/ipc/message.h>
#include <nebula/ipc/options.h>
#include <nebula/core/record_batch.h>

#include <nebula/types/type_fwd.h>
#include <nebula/future/async_generator.h>
#include <turbo/base/macros.h>

namespace nebula::ipc {

    class DictionaryMemo;
    struct IpcPayload;

    using RecordBatchReader = ::nebula::RecordBatchReader;

    struct ReadStats {
      /// Number of IPC messages read.
      int64_t num_messages = 0;
      /// Number of record batches read.
      int64_t num_record_batches = 0;
      /// Number of dictionary batches read.
      ///
      /// Note: num_dictionary_batches >= num_dictionary_deltas + num_replaced_dictionaries
      int64_t num_dictionary_batches = 0;

      /// Number of dictionary deltas read.
      int64_t num_dictionary_deltas = 0;
      /// Number of replaced dictionaries (i.e. where a dictionary batch replaces
      /// an existing dictionary with an unrelated new dictionary).
      int64_t num_replaced_dictionaries = 0;
    };

    /// \brief Synchronous batch stream reader that reads from io::InputStream
    ///
    /// This class reads the schema (plus any dictionaries) as the first messages
    /// in the stream, followed by record batches. For more granular zero-copy
    /// reads see the read_record_batch functions
    class TURBO_EXPORT RecordBatchStreamReader : public RecordBatchReader {
     public:
      /// create batch reader from generic MessageReader.
      /// This will take ownership of the given MessageReader.
      ///
      /// \param[in] message_reader a MessageReader implementation
      /// \param[in] options any IPC reading options (optional)
      /// \return the created batch reader
      static turbo::Result<std::shared_ptr<RecordBatchStreamReader>> open(
          std::unique_ptr<MessageReader> message_reader,
          const IpcReadOptions& options = IpcReadOptions::defaults());

      /// \brief Record batch stream reader from InputStream
      ///
      /// \param[in] stream an input stream instance. Must stay alive throughout
      /// lifetime of stream reader
      /// \param[in] options any IPC reading options (optional)
      /// \return the created batch reader
      static turbo::Result<std::shared_ptr<RecordBatchStreamReader>> open(
          io::InputStream* stream,
          const IpcReadOptions& options = IpcReadOptions::defaults());

      /// \brief open stream and retain ownership of stream object
      /// \param[in] stream the input stream
      /// \param[in] options any IPC reading options (optional)
      /// \return the created batch reader
      static turbo::Result<std::shared_ptr<RecordBatchStreamReader>> open(
          const std::shared_ptr<io::InputStream>& stream,
          const IpcReadOptions& options = IpcReadOptions::defaults());

      /// \brief Return current read statistics
      virtual ReadStats stats() const = 0;
    };

    /// \brief Reads the record batch file format
    class TURBO_EXPORT RecordBatchFileReader
        : public std::enable_shared_from_this<RecordBatchFileReader> {
     public:
      virtual ~RecordBatchFileReader() = default;

      /// \brief open a RecordBatchFileReader
      ///
      /// open a file-like object that is assumed to be self-contained; i.e., the
      /// end of the file interface is the end of the Nebula file. Note that there
      /// can be any amount of data preceding the Nebula-formatted data, because we
      /// need only locate the end of the Nebula file stream to discover the metadata
      /// and then proceed to read the data into memory.
      static turbo::Result<std::shared_ptr<RecordBatchFileReader>> open(
          io::RandomAccessFile* file,
          const IpcReadOptions& options = IpcReadOptions::defaults());

      /// \brief open a RecordBatchFileReader
      /// If the file is embedded within some larger file or memory region, you can
      /// pass the absolute memory offset to the end of the file (which contains the
      /// metadata footer). The metadata must have been written with memory offsets
      /// relative to the start of the containing file
      ///
      /// \param[in] file the data source
      /// \param[in] footer_offset the position of the end of the Nebula file
      /// \param[in] options options for IPC reading
      /// \return the returned reader
      static turbo::Result<std::shared_ptr<RecordBatchFileReader>> open(
          io::RandomAccessFile* file, int64_t footer_offset,
          const IpcReadOptions& options = IpcReadOptions::defaults());

      /// \brief Version of open that retains ownership of file
      ///
      /// \param[in] file the data source
      /// \param[in] options options for IPC reading
      /// \return the returned reader
      static turbo::Result<std::shared_ptr<RecordBatchFileReader>> open(
          const std::shared_ptr<io::RandomAccessFile>& file,
          const IpcReadOptions& options = IpcReadOptions::defaults());

      /// \brief Version of open that retains ownership of file
      ///
      /// \param[in] file the data source
      /// \param[in] footer_offset the position of the end of the Nebula file
      /// \param[in] options options for IPC reading
      /// \return the returned reader
      static turbo::Result<std::shared_ptr<RecordBatchFileReader>> open(
          const std::shared_ptr<io::RandomAccessFile>& file, int64_t footer_offset,
          const IpcReadOptions& options = IpcReadOptions::defaults());

      /// \brief open a file asynchronously (owns the file).
      static Future<std::shared_ptr<RecordBatchFileReader>> open_async(
          const std::shared_ptr<io::RandomAccessFile>& file,
          const IpcReadOptions& options = IpcReadOptions::defaults());

      /// \brief open a file asynchronously (borrows the file).
      static Future<std::shared_ptr<RecordBatchFileReader>> open_async(
          io::RandomAccessFile* file,
          const IpcReadOptions& options = IpcReadOptions::defaults());

      /// \brief open a file asynchronously (owns the file).
      static Future<std::shared_ptr<RecordBatchFileReader>> open_async(
          const std::shared_ptr<io::RandomAccessFile>& file, int64_t footer_offset,
          const IpcReadOptions& options = IpcReadOptions::defaults());

      /// \brief open a file asynchronously (borrows the file).
      static Future<std::shared_ptr<RecordBatchFileReader>> open_async(
          io::RandomAccessFile* file, int64_t footer_offset,
          const IpcReadOptions& options = IpcReadOptions::defaults());

      /// \brief The schema read from the file
      virtual std::shared_ptr<Schema> schema() const = 0;

      /// \brief Returns the number of record batches in the file
      virtual int num_record_batches() const = 0;

      /// \brief Return the metadata version from the file metadata
      virtual MetadataVersion version() const = 0;

      /// \brief Return the contents of the custom_metadata field from the file's
      /// Footer
      virtual std::shared_ptr<const KeyValueMetadata> metadata() const = 0;

      /// \brief read a particular record batch from the file. Does not copy memory
      /// if the input source supports zero-copy.
      ///
      /// \param[in] i the index of the record batch to return
      /// \return the read batch
      virtual turbo::Result<std::shared_ptr<RecordBatch>> read_record_batch(int i) = 0;

      /// \brief read a particular record batch along with its custom metadata from the file.
      /// Does not copy memory if the input source supports zero-copy.
      ///
      /// \param[in] i the index of the record batch to return
      /// \return a struct containing the read batch and its custom metadata
      virtual turbo::Result<RecordBatchWithMetadata> read_record_batch_with_custom_metadata(int i) = 0;

      /// \brief Return current read statistics
      virtual ReadStats stats() const = 0;

      /// \brief Computes the total number of rows in the file.
      virtual turbo::Result<int64_t> count_rows() = 0;

      /// \brief Begin loading metadata for the desired batches into memory.
      ///
      /// This method will also begin loading all dictionaries messages into memory.
      ///
      /// For a regular file this will immediately begin disk I/O in the background on a
      /// thread on the IOContext's thread pool.  If the file is memory mapped this will
      /// ensure the memory needed for the metadata is paged from disk into memory
      ///
      /// \param indices Indices of the batches to prefetch
      ///                If empty then all batches will be prefetched.
      virtual turbo::Status pre_buffer_metadata(const std::vector<int>& indices) = 0;

      /// \brief Get a reentrant generator of record batches.
      ///
      /// \param[in] coalesce If true, enable I/O coalescing.
      /// \param[in] io_context The IOContext to use (controls which thread pool
      ///     is used for I/O).
      /// \param[in] cache_options Options for coalescing (if enabled).
      /// \param[in] executor Optionally, an executor to use for decoding record
      ///     batches. This is generally only a benefit for very wide and/or
      ///     compressed batches.
      virtual turbo::Result<AsyncGenerator<std::shared_ptr<RecordBatch>>> get_record_batch_generator(
          const bool coalesce = false,
          const io::IOContext& io_context = io::default_io_context(),
          const io::CacheOptions cache_options = io::CacheOptions::lazy_defaults(),
          nebula::internal::Executor* executor = nullptr) = 0;

      /// \brief Collect all batches as a vector of record batches
      turbo::Result<RecordBatchVector> to_record_batches();

      /// \brief Collect all batches and concatenate as nebula::Table
      turbo::Result<std::shared_ptr<Table>> to_table();
    };

    /// \brief A general listener class to receive events.
    ///
    /// You must implement callback methods for interested events.
    ///
    /// This API is EXPERIMENTAL.
    ///
    /// \since 0.17.0
    class TURBO_EXPORT Listener {
     public:
      virtual ~Listener() = default;

      /// \brief Called when end-of-stream is received.
      ///
      /// The default implementation just returns turbo::OkStatus().
      ///
      /// \return turbo::Status
      ///
      /// \see StreamDecoder
      virtual turbo::Status on_eos();

      /// \brief Called when a record batch is decoded and
      /// on_record_batch_with_metadata_decoded() isn't overridden.
      ///
      /// The default implementation just returns
      /// turbo::unimplemented_error().
      ///
      /// \param[in] record_batch a record batch decoded
      /// \return turbo::Status
      ///
      /// \see StreamDecoder
      virtual turbo::Status on_record_batch_decoded(std::shared_ptr<RecordBatch> record_batch);

      /// \brief Called when a record batch with custom metadata is decoded.
      ///
      /// The default implementation just calls on_record_batch_decoded()
      /// without custom metadata.
      ///
      /// \param[in] record_batch_with_metadata a record batch with custom
      /// metadata decoded
      /// \return turbo::Status
      ///
      /// \see StreamDecoder
      ///
      /// \since 13.0.0
      virtual turbo::Status on_record_batch_with_metadata_decoded(
          RecordBatchWithMetadata record_batch_with_metadata);

      /// \brief Called when a schema is decoded.
      ///
      /// The default implementation just returns turbo::OkStatus().
      ///
      /// \param[in] schema a schema decoded
      /// \return turbo::Status
      ///
      /// \see StreamDecoder
      virtual turbo::Status on_schema_decoded(std::shared_ptr<Schema> schema);

      /// \brief Called when a schema is decoded.
      ///
      /// The default implementation just calls on_schema_decoded(schema)
      /// (without filtered_schema) to keep backward compatibility.
      ///
      /// \param[in] schema a schema decoded
      /// \param[in] filtered_schema a filtered schema that only has read fields
      /// \return turbo::Status
      ///
      /// \see StreamDecoder
      ///
      /// \since 13.0.0
      virtual turbo::Status on_schema_decoded(std::shared_ptr<Schema> schema,
                                     std::shared_ptr<Schema> filtered_schema);
    };

    /// \brief Collect schema and record batches decoded by StreamDecoder.
    ///
    /// This API is EXPERIMENTAL.
    ///
    /// \since 0.17.0
    class TURBO_EXPORT CollectListener : public Listener {
     public:
      CollectListener() : schema_(), filtered_schema_(), record_batches_(), metadatas_() {}
      virtual ~CollectListener() = default;

      turbo::Status on_schema_decoded(std::shared_ptr<Schema> schema,
                             std::shared_ptr<Schema> filtered_schema) override {
        schema_ = std::move(schema);
        filtered_schema_ = std::move(filtered_schema);
        return turbo::OkStatus();
      }

      turbo::Status on_record_batch_with_metadata_decoded(
          RecordBatchWithMetadata record_batch_with_metadata) override {
        record_batches_.push_back(std::move(record_batch_with_metadata.batch));
        metadatas_.push_back(std::move(record_batch_with_metadata.custom_metadata));
        return turbo::OkStatus();
      }

      /// \return the decoded schema
      std::shared_ptr<Schema> schema() const { return schema_; }

      /// \return the filtered schema
      std::shared_ptr<Schema> filtered_schema() const { return filtered_schema_; }

      /// \return the all decoded record batches
      const std::vector<std::shared_ptr<RecordBatch>>& record_batches() const {
        return record_batches_;
      }

      /// \return the all decoded metadatas
      const std::vector<std::shared_ptr<KeyValueMetadata>>& metadatas() const {
        return metadatas_;
      }

      /// \return the number of collected record batches
      int64_t num_record_batches() const { return record_batches_.size(); }

      /// \return the last decoded record batch and remove it from
      /// record_batches
      std::shared_ptr<RecordBatch> PopRecordBatch() {
        auto record_batch_with_metadata = PopRecordBatchWithMetadata();
        return std::move(record_batch_with_metadata.batch);
      }

      /// \return the last decoded record batch with custom metadata and
      /// remove it from record_batches
      RecordBatchWithMetadata PopRecordBatchWithMetadata() {
        RecordBatchWithMetadata record_batch_with_metadata;
        if (record_batches_.empty()) {
          return record_batch_with_metadata;
        }
        record_batch_with_metadata.batch = std::move(record_batches_.back());
        record_batch_with_metadata.custom_metadata = std::move(metadatas_.back());
        record_batches_.pop_back();
        metadatas_.pop_back();
        return record_batch_with_metadata;
      }

     private:
      std::shared_ptr<Schema> schema_;
      std::shared_ptr<Schema> filtered_schema_;
      std::vector<std::shared_ptr<RecordBatch>> record_batches_;
      std::vector<std::shared_ptr<KeyValueMetadata>> metadatas_;
    };

    /// \brief Push style stream decoder that receives data from user.
    ///
    /// This class decodes the Apache Nebula IPC streaming format data.
    ///
    /// This API is EXPERIMENTAL.
    class TURBO_EXPORT StreamDecoder {
     public:
      /// \brief Construct a stream decoder.
      ///
      /// \param[in] listener a Listener that must implement
      /// Listener::on_record_batch_decoded() to receive decoded record batches
      /// \param[in] options any IPC reading options (optional)
      StreamDecoder(std::shared_ptr<Listener> listener,
                    IpcReadOptions options = IpcReadOptions::defaults());

      virtual ~StreamDecoder();

      /// \brief Feed data to the decoder as a raw data.
      ///
      /// If the decoder can read one or more record batches by the data,
      /// the decoder calls listener->on_record_batch_decoded() with a
      /// decoded record batch multiple times.
      ///
      /// \param[in] data a raw data to be processed. This data isn't
      /// copied. The passed memory must be kept alive through record
      /// batch processing.
      /// \param[in] size raw data size.
      /// \return turbo::Status
      turbo::Status consume(const uint8_t* data, int64_t size);

      /// \brief Feed data to the decoder as a Buffer.
      ///
      /// If the decoder can read one or more record batches by the
      /// Buffer, the decoder calls listener->RecordBatchReceived() with a
      /// decoded record batch multiple times.
      ///
      /// \param[in] buffer a Buffer to be processed.
      /// \return turbo::Status
      turbo::Status consume(std::shared_ptr<Buffer> buffer);

      /// \brief reset the internal status.
      ///
      /// You can reuse this decoder for new stream after calling
      /// this.
      ///
      /// \return turbo::Status
      turbo::Status reset();

      /// \return the shared schema of the record batches in the stream
      std::shared_ptr<Schema> schema() const;

      /// \brief Return the number of bytes needed to advance the state of
      /// the decoder.
      ///
      /// This method is provided for users who want to optimize performance.
      /// Normal users don't need to use this method.
      ///
      /// Here is an example usage for normal users:
      ///
      /// ~~~{.cpp}
      /// decoder.consume(buffer1);
      /// decoder.consume(buffer2);
      /// decoder.consume(buffer3);
      /// ~~~
      ///
      /// Decoder has internal buffer. If consumed data isn't enough to
      /// advance the state of the decoder, consumed data is buffered to
      /// the internal buffer. It causes performance overhead.
      ///
      /// If you pass next_required_size() size data to each consume()
      /// call, the decoder doesn't use its internal buffer. It improves
      /// performance.
      ///
      /// Here is an example usage to avoid using internal buffer:
      ///
      /// ~~~{.cpp}
      /// buffer1 = get_data(decoder.next_required_size());
      /// decoder.consume(buffer1);
      /// buffer2 = get_data(decoder.next_required_size());
      /// decoder.consume(buffer2);
      /// ~~~
      ///
      /// Users can use this method to avoid creating small chunks. Record
      /// batch data must be contiguous data. If users pass small chunks
      /// to the decoder, the decoder needs concatenate small chunks
      /// internally. It causes performance overhead.
      ///
      /// Here is an example usage to reduce small chunks:
      ///
      /// ~~~{.cpp}
      /// buffer = allocate_resizable_buffer();
      /// while ((small_chunk = get_data(&small_chunk_size))) {
      ///   auto current_buffer_size = buffer->size();
      ///   buffer->resize(current_buffer_size + small_chunk_size);
      ///   memcpy(buffer->mutable_data() + current_buffer_size,
      ///          small_chunk,
      ///          small_chunk_size);
      ///   if (buffer->size() < decoder.next_required_size()) {
      ///     continue;
      ///   }
      ///   std::shared_ptr<nebula::Buffer> chunk(buffer.release());
      ///   decoder.consume(chunk);
      ///   buffer = allocate_resizable_buffer();
      /// }
      /// if (buffer->size() > 0) {
      ///   std::shared_ptr<nebula::Buffer> chunk(buffer.release());
      ///   decoder.consume(chunk);
      /// }
      /// ~~~
      ///
      /// \return the number of bytes needed to advance the state of the
      /// decoder
      int64_t next_required_size() const;

      /// \brief Return current read statistics
      ReadStats stats() const;

     private:
      class StreamDecoderImpl;
      std::unique_ptr<StreamDecoderImpl> impl_;

      TURBO_DISALLOW_COPY_AND_ASSIGN(StreamDecoder);
    };

    // Generic read functions; does not copy data if the input supports zero copy reads

    /// \brief read Schema from stream serialized as a single IPC message
    /// and populate any dictionary-encoded fields into a DictionaryMemo
    ///
    /// \param[in] stream an InputStream
    /// \param[in] dictionary_memo for recording dictionary-encoded fields
    /// \return the output Schema
    ///
    /// If record batches follow the schema, it is better to use
    /// RecordBatchStreamReader
    TURBO_EXPORT
    turbo::Result<std::shared_ptr<Schema>> read_schema(io::InputStream* stream,
                                               DictionaryMemo* dictionary_memo);

    /// \brief read Schema from encapsulated Message
    ///
    /// \param[in] message the message containing the Schema IPC metadata
    /// \param[in] dictionary_memo DictionaryMemo for recording dictionary-encoded
    /// fields. Can be nullptr if you are sure there are no
    /// dictionary-encoded fields
    /// \return the resulting Schema
    TURBO_EXPORT
    turbo::Result<std::shared_ptr<Schema>> read_schema(const Message& message,
                                               DictionaryMemo* dictionary_memo);

    /// read record batch as encapsulated IPC message with metadata size prefix and
    /// header
    ///
    /// \param[in] schema the record batch schema
    /// \param[in] dictionary_memo DictionaryMemo which has any
    /// dictionaries. Can be nullptr if you are sure there are no
    /// dictionary-encoded fields
    /// \param[in] options IPC options for reading
    /// \param[in] stream the file where the batch is located
    /// \return the read record batch
    TURBO_EXPORT
    turbo::Result<std::shared_ptr<RecordBatch>> read_record_batch(
        const std::shared_ptr<Schema>& schema, const DictionaryMemo* dictionary_memo,
        const IpcReadOptions& options, io::InputStream* stream);

    /// \brief read record batch from message
    ///
    /// \param[in] message a Message containing the record batch metadata
    /// \param[in] schema the record batch schema
    /// \param[in] dictionary_memo DictionaryMemo which has any
    /// dictionaries. Can be nullptr if you are sure there are no
    /// dictionary-encoded fields
    /// \param[in] options IPC options for reading
    /// \return the read record batch
    TURBO_EXPORT
    turbo::Result<std::shared_ptr<RecordBatch>> read_record_batch(
        const Message& message, const std::shared_ptr<Schema>& schema,
        const DictionaryMemo* dictionary_memo, const IpcReadOptions& options);

    /// read record batch from file given metadata and schema
    ///
    /// \param[in] metadata a Message containing the record batch metadata
    /// \param[in] schema the record batch schema
    /// \param[in] dictionary_memo DictionaryMemo which has any
    /// dictionaries. Can be nullptr if you are sure there are no
    /// dictionary-encoded fields
    /// \param[in] file a random access file
    /// \param[in] options options for deserialization
    /// \return the read record batch
    TURBO_EXPORT
    turbo::Result<std::shared_ptr<RecordBatch>> read_record_batch(
            BufferSpan metadata, const std::shared_ptr<Schema>& schema,
        const DictionaryMemo* dictionary_memo, const IpcReadOptions& options,
        io::RandomAccessFile* file);

    /// \brief read nebula::Tensor as encapsulated IPC message in file
    ///
    /// \param[in] file an InputStream pointed at the start of the message
    /// \return the read tensor
    TURBO_EXPORT
    turbo::Result<std::shared_ptr<Tensor>> read_tensor(io::InputStream* file);

    /// \brief EXPERIMENTAL: read nebula::Tensor from IPC message
    ///
    /// \param[in] message a Message containing the tensor metadata and body
    /// \return the read tensor
    TURBO_EXPORT
    turbo::Result<std::shared_ptr<Tensor>> read_tensor(const Message& message);

    /// \brief EXPERIMENTAL: read nebula::SparseTensor as encapsulated IPC message in file
    ///
    /// \param[in] file an InputStream pointed at the start of the message
    /// \return the read sparse tensor
    TURBO_EXPORT
    turbo::Result<std::shared_ptr<SparseTensor>> read_sparse_tensor(io::InputStream* file);

    /// \brief EXPERIMENTAL: read nebula::SparseTensor from IPC message
    ///
    /// \param[in] message a Message containing the tensor metadata and body
    /// \return the read sparse tensor
    TURBO_EXPORT
    turbo::Result<std::shared_ptr<SparseTensor>> read_sparse_tensor(const Message& message);

    namespace internal {

    // These internal APIs may change without warning or deprecation

    /// \brief EXPERIMENTAL: read nebula::SparseTensorFormat::type from a metadata
    /// \param[in] metadata a Buffer containing the sparse tensor metadata
    /// \return the count of the body buffers
    TURBO_EXPORT
    turbo::Result<size_t> read_sparse_tensor_body_buffer_count(BufferSpan metadata);

    /// \brief EXPERIMENTAL: read nebula::SparseTensor from an IpcPayload
    /// \param[in] payload a IpcPayload contains a serialized SparseTensor
    /// \return the read sparse tensor
    TURBO_EXPORT
    turbo::Result<std::shared_ptr<SparseTensor>> read_sparse_tensor_payload(const IpcPayload& payload);

    // For fuzzing targets
    TURBO_EXPORT
    turbo::Status FuzzIpcStream(const uint8_t* data, int64_t size);
    TURBO_EXPORT
    turbo::Status FuzzIpcTensorStream(const uint8_t* data, int64_t size);
    TURBO_EXPORT
    turbo::Status FuzzIpcFile(const uint8_t* data, int64_t size);

    }  // namespace internal

}  // namespace nebula::ipc

