// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#pragma once

#include <cstdint>
#include <memory>
#include <string>
#include <vector>

#include <nebula/core/compare.h>
#include <nebula/core/device.h>

#include <turbo/utility/status.h>
#include <nebula/types/type_fwd.h>
#include <turbo/functional/iterator.h>
#include <turbo/base/macros.h>

namespace nebula {

    /// \class RecordBatch
    /// \brief Collection of equal-length arrays matching a particular Schema
    ///
    /// A record batch is table-like data structure that is semantically a sequence
    /// of fields, each a contiguous Nebula array
    class TURBO_EXPORT RecordBatch {
    public:
        virtual ~RecordBatch() = default;

        /// \param[in] schema The record batch schema
        /// \param[in] num_rows length of fields in the record batch. Each array
        /// should have the same length as num_rows
        /// \param[in] columns the record batch fields as vector of arrays
        /// \param[in] sync_event optional synchronization event for non-CPU device
        /// memory used by buffers
        static std::shared_ptr<RecordBatch> create(
                std::shared_ptr<Schema> schema, int64_t num_rows,
                std::vector<std::shared_ptr<Array>> columns,
                std::shared_ptr<Device::SyncEvent> sync_event = nullptr);

        /// \brief Construct record batch from vector of internal data structures
        /// \since 0.5.0
        ///
        /// This class is intended for internal use, or advanced users.
        ///
        /// \param schema the record batch schema
        /// \param num_rows the number of semantic rows in the record batch. This
        /// should be equal to the length of each field
        /// \param columns the data for the batch's columns
        /// \param device_type the type of the device that the Nebula columns are
        /// allocated on
        /// \param sync_event optional synchronization event for non-CPU device
        /// memory used by buffers
        static std::shared_ptr<RecordBatch> create(
                std::shared_ptr<Schema> schema, int64_t num_rows,
                std::vector<std::shared_ptr<ArrayData>> columns,
                DeviceAllocationType device_type = DeviceAllocationType::kCPU,
                std::shared_ptr<Device::SyncEvent> sync_event = nullptr);

        /// \brief create an empty RecordBatch of a given schema
        ///
        /// The output RecordBatch will be created with DataTypes from
        /// the given schema.
        ///
        /// \param[in] schema the schema of the empty RecordBatch
        /// \param[in] pool the memory pool to allocate memory from
        /// \return the resulting RecordBatch
        static turbo::Result<std::shared_ptr<RecordBatch>> make_empty(
                std::shared_ptr<Schema> schema, MemoryPool *pool = default_memory_pool());

        /// \brief Convert record batch to struct array
        ///
        /// create a struct array whose child arrays are the record batch's columns.
        /// Note that the record batch's top-level field metadata cannot be reflected
        /// in the resulting struct array.
        turbo::Result<std::shared_ptr<StructArray>> ToStructArray() const;

        /// \brief Convert record batch with one data type to Tensor
        ///
        /// create a Tensor object with shape (number of rows, number of columns) and
        /// strides (type size in bytes, type size in bytes * number of rows).
        /// Generated Tensor will have column-major layout.
        ///
        /// \param[in] null_to_nan if true, convert nulls to NaN
        /// \param[in] row_major if true, create row-major Tensor else column-major Tensor
        /// \param[in] pool the memory pool to allocate the tensor buffer
        /// \return the resulting Tensor
        turbo::Result<std::shared_ptr<Tensor>> ToTensor(
                bool null_to_nan = false, bool row_major = true,
                MemoryPool *pool = default_memory_pool()) const;

        /// \brief Construct record batch from struct array
        ///
        /// This constructs a record batch using the child arrays of the given
        /// array, which must be a struct array.
        ///
        /// \param[in] array the source array, must be a StructArray
        /// \param[in] pool the memory pool to allocate new validity bitmaps
        ///
        /// This operation will usually be zero-copy.  However, if the struct array has an
        /// offset or a validity bitmap then these will need to be pushed into the child arrays.
        /// Pushing the offset is zero-copy but pushing the validity bitmap is not.
        static turbo::Result<std::shared_ptr<RecordBatch>> FromStructArray(
                const std::shared_ptr<Array> &array, MemoryPool *pool = default_memory_pool());

        /// \brief Determine if two record batches are exactly equal
        ///
        /// \param[in] other the RecordBatch to compare with
        /// \param[in] check_metadata if true, check that Schema metadata is the same
        /// \param[in] opts the options for equality comparisons
        /// \return true if batches are equal
        bool equals(const RecordBatch &other, bool check_metadata = false,
                    const EqualOptions &opts = EqualOptions::defaults()) const;

        /// \brief Determine if two record batches are approximately equal
        ///
        /// \param[in] other the RecordBatch to compare with
        /// \param[in] opts the options for equality comparisons
        /// \return true if batches are approximately equal
        bool approx_equals(const RecordBatch &other,
                          const EqualOptions &opts = EqualOptions::defaults()) const;

        /// \return the record batch's schema
        const std::shared_ptr<Schema> &schema() const { return schema_; }

        /// \brief Replace the schema with another schema with the same types, but potentially
        /// different field names and/or metadata.
        turbo::Result<std::shared_ptr<RecordBatch>> replace_schema(
                std::shared_ptr<Schema> schema) const;

        /// \brief Retrieve all columns at once
        virtual const std::vector<std::shared_ptr<Array>> &columns() const = 0;

        /// \brief Retrieve an array from the record batch
        /// \param[in] i field index, does not boundscheck
        /// \return an Array object
        virtual std::shared_ptr<Array> column(int i) const = 0;

        /// \brief Retrieve an array from the record batch
        /// \param[in] name field name
        /// \return an Array or null if no field was found
        std::shared_ptr<Array> get_column_by_name(const std::string &name) const;

        /// \brief Retrieve an array's internal data from the record batch
        /// \param[in] i field index, does not boundscheck
        /// \return an internal ArrayData object
        virtual std::shared_ptr<ArrayData> column_data(int i) const = 0;

        /// \brief Retrieve all arrays' internal data from the record batch.
        virtual const ArrayDataVector &column_data() const = 0;

        /// \brief Add column to the record batch, producing a new RecordBatch
        ///
        /// \param[in] i field index, which will be boundschecked
        /// \param[in] field field to be added
        /// \param[in] column column to be added
        virtual turbo::Result<std::shared_ptr<RecordBatch>> add_column(
                int i, const std::shared_ptr<Field> &field,
                const std::shared_ptr<Array> &column) const = 0;

        /// \brief Add new nullable column to the record batch, producing a new
        /// RecordBatch.
        ///
        /// For non-nullable columns, use the Field-based version of this method.
        ///
        /// \param[in] i field index, which will be boundschecked
        /// \param[in] field_name name of field to be added
        /// \param[in] column column to be added
        virtual turbo::Result<std::shared_ptr<RecordBatch>> add_column(
                int i, std::string field_name, const std::shared_ptr<Array> &column) const;

        /// \brief Replace a column in the record batch, producing a new RecordBatch
        ///
        /// \param[in] i field index, does boundscheck
        /// \param[in] field field to be replaced
        /// \param[in] column column to be replaced
        virtual turbo::Result<std::shared_ptr<RecordBatch>> set_column(
                int i, const std::shared_ptr<Field> &field,
                const std::shared_ptr<Array> &column) const = 0;

        /// \brief Remove column from the record batch, producing a new RecordBatch
        ///
        /// \param[in] i field index, does boundscheck
        virtual turbo::Result<std::shared_ptr<RecordBatch>> remove_column(int i) const = 0;

        virtual std::shared_ptr<RecordBatch> replace_schema_metadata(
                const std::shared_ptr<const KeyValueMetadata> &metadata) const = 0;

        /// \brief Name in i-th column
        const std::string &column_name(int i) const;

        /// \return the number of columns in the table
        int num_columns() const;

        /// \return the number of rows (the corresponding length of each column)
        int64_t num_rows() const { return num_rows_; }

        /// \brief Copy the entire RecordBatch to destination MemoryManager
        ///
        /// This uses Array::copy_to on each column of the record batch to create
        /// a new record batch where all underlying buffers for the columns have
        /// been copied to the destination MemoryManager. This uses
        /// MemoryManager::CopyBuffer under the hood.
        turbo::Result<std::shared_ptr<RecordBatch>> copy_to(
                const std::shared_ptr<MemoryManager> &to) const;

        /// \brief View or Copy the entire RecordBatch to destination MemoryManager
        ///
        /// This uses Array::view_or_copy_to on each column of the record batch to create
        /// a new record batch where all underlying buffers for the columns have
        /// been zero-copy viewed on the destination MemoryManager, falling back
        /// to performing a copy if it can't be viewed as a zero-copy buffer. This uses
        /// Buffer::ViewOrCopy under the hood.
        turbo::Result<std::shared_ptr<RecordBatch>> view_or_copy_to(
                const std::shared_ptr<MemoryManager> &to) const;

        /// \brief slice each of the arrays in the record batch
        /// \param[in] offset the starting offset to slice, through end of batch
        /// \return new record batch
        virtual std::shared_ptr<RecordBatch> slice(int64_t offset) const;

        /// \brief slice each of the arrays in the record batch
        /// \param[in] offset the starting offset to slice
        /// \param[in] length the number of elements to slice from offset
        /// \return new record batch
        virtual std::shared_ptr<RecordBatch> slice(int64_t offset, int64_t length) const = 0;

        /// \return pretty_print representation suitable for debugging
        std::string to_string() const;

        /// \brief Return names of all columns
        std::vector<std::string> column_names() const;

        /// \brief Rename columns with provided names
        turbo::Result<std::shared_ptr<RecordBatch>> rename_columns(
                const std::vector<std::string> &names) const;

        /// \brief Return new record batch with specified columns
        turbo::Result<std::shared_ptr<RecordBatch>> select_columns(
                const std::vector<int> &indices) const;

        /// \brief Perform cheap validation checks to determine obvious inconsistencies
        /// within the record batch's schema and internal data.
        ///
        /// This is O(k) where k is the total number of fields and array descendents.
        ///
        /// \return turbo::Status
        virtual turbo::Status validate() const;

        /// \brief Perform extensive validation checks to determine inconsistencies
        /// within the record batch's schema and internal data.
        ///
        /// This is potentially O(k*n) where n is the number of rows.
        ///
        /// \return turbo::Status
        virtual turbo::Status validate_full() const;

        /// \brief EXPERIMENTAL: Return a top-level sync event object for this record batch
        ///
        /// If all of the data for this record batch is in CPU memory, then this
        /// will return null. If the data for this batch is
        /// on a device, then if synchronization is needed before accessing the
        /// data the returned sync event will allow for it.
        ///
        /// \return null or a Device::SyncEvent
        virtual const std::shared_ptr<Device::SyncEvent> &GetSyncEvent() const = 0;

        virtual DeviceAllocationType device_type() const = 0;

    protected:
        RecordBatch(const std::shared_ptr<Schema> &schema, int64_t num_rows);

        std::shared_ptr<Schema> schema_;
        int64_t num_rows_;

    private:
        TURBO_DISALLOW_COPY_AND_ASSIGN(RecordBatch);
    };

    struct TURBO_EXPORT RecordBatchWithMetadata {
        std::shared_ptr<RecordBatch> batch;
        std::shared_ptr<KeyValueMetadata> custom_metadata;
    };

    /// \brief Abstract interface for reading stream of record batches
    class TURBO_EXPORT RecordBatchReader {
    public:
        using value_type = std::shared_ptr<RecordBatch>;

        virtual ~RecordBatchReader();

        /// \return the shared schema of the record batches in the stream
        virtual std::shared_ptr<Schema> schema() const = 0;

        /// \brief read the next record batch in the stream. Return null for batch
        /// when reaching end of stream
        ///
        /// Example:
        ///
        /// ```
        /// while (true) {
        ///   std::shared_ptr<RecordBatch> batch;
        ///   TURBO_RETURN_NOT_OK(reader->read_next(&batch));
        ///   if (!batch) {
        ///     break;
        ///   }
        ///   // handling the `batch`, the `batch->num_rows()`
        ///   // might be 0.
        /// }
        /// ```
        ///
        /// \param[out] batch the next loaded batch, null at end of stream. Returning
        /// an empty batch doesn't mean the end of stream because it is valid data.
        /// \return turbo::Status
        virtual turbo::Status read_next(std::shared_ptr<RecordBatch> *batch) = 0;

        virtual turbo::Result<RecordBatchWithMetadata> read_next() {
            return turbo::unimplemented_error("read_next with custom metadata");
        }

        /// \brief turbo::Iterator interface
        turbo::Result<std::shared_ptr<RecordBatch>> next() {
            std::shared_ptr<RecordBatch> batch;
            TURBO_RETURN_NOT_OK(read_next(&batch));
            return batch;
        }

        /// \brief finalize reader
        virtual turbo::Status close() { return turbo::OkStatus(); }

        /// \brief EXPERIMENTAL: Get the device type for record batches this reader produces
        ///
        /// default implementation is to return DeviceAllocationType::kCPU
        virtual DeviceAllocationType device_type() const { return DeviceAllocationType::kCPU; }

        class RecordBatchReaderIterator {
        public:
            using iterator_category = std::input_iterator_tag;
            using difference_type = std::ptrdiff_t;
            using value_type = std::shared_ptr<RecordBatch>;
            using pointer = value_type const *;
            using reference = value_type const &;

            RecordBatchReaderIterator() : batch_(RecordBatchEnd()), reader_(nullptr) {}

            explicit RecordBatchReaderIterator(RecordBatchReader *reader)
                    : batch_(RecordBatchEnd()), reader_(reader) {
                next();
            }

            bool operator==(const RecordBatchReaderIterator &other) const {
                return batch_ == other.batch_;
            }

            bool operator!=(const RecordBatchReaderIterator &other) const {
                return !(*this == other);
            }

            turbo::Result<std::shared_ptr<RecordBatch>> operator*() {
                TURBO_RETURN_NOT_OK(batch_.status());

                return batch_;
            }

            RecordBatchReaderIterator &operator++() {
                next();
                return *this;
            }

            RecordBatchReaderIterator operator++(int) {
                RecordBatchReaderIterator tmp(*this);
                next();
                return tmp;
            }

        private:
            std::shared_ptr<RecordBatch> RecordBatchEnd() {
                return std::shared_ptr<RecordBatch>(nullptr);
            }

            void next() {
                if (reader_ == nullptr) {
                    batch_ = RecordBatchEnd();
                    return;
                }
                batch_ = reader_->next();
            }

            turbo::Result<std::shared_ptr<RecordBatch>> batch_;
            RecordBatchReader *reader_;
        };

        /// \brief Return an iterator to the first record batch in the stream
        RecordBatchReaderIterator begin() { return RecordBatchReaderIterator(this); }

        /// \brief Return an iterator to the end of the stream
        RecordBatchReaderIterator end() { return RecordBatchReaderIterator(); }

        /// \brief consume entire stream as a vector of record batches
        turbo::Result<RecordBatchVector> to_record_batches();

        /// \brief read all batches and concatenate as nebula::Table
        turbo::Result<std::shared_ptr<Table>> to_table();

        /// \brief create a RecordBatchReader from a vector of RecordBatch.
        ///
        /// \param[in] batches the vector of RecordBatch to read from
        /// \param[in] schema schema to conform to. Will be inferred from the first
        ///            element if not provided.
        /// \param[in] device_type the type of device that the batches are allocated on
        static turbo::Result<std::shared_ptr<RecordBatchReader>> create(
                RecordBatchVector batches, std::shared_ptr<Schema> schema = nullptr,
                DeviceAllocationType device_type = DeviceAllocationType::kCPU);

        /// \brief create a RecordBatchReader from an turbo::Iterator of RecordBatch.
        ///
        /// \param[in] batches an iterator of RecordBatch to read from.
        /// \param[in] schema schema that each record batch in iterator will conform to.
        /// \param[in] device_type the type of device that the batches are allocated on
        static turbo::Result<std::shared_ptr<RecordBatchReader>> make_from_iterator(
                turbo::Iterator<std::shared_ptr<RecordBatch>> batches, std::shared_ptr<Schema> schema,
                DeviceAllocationType device_type = DeviceAllocationType::kCPU);
    };

    /// \brief concatenate record batches
    ///
    /// The columns of the new batch are formed by concatenate the same columns of each input
    /// batch. concatenate multiple batches into a new batch requires that the schema must be
    /// consistent. It supports merging batches without columns (only length, scenarios such
    /// as count(*)).
    ///
    /// \param[in] batches a vector of record batches to be concatenated
    /// \param[in] pool memory to store the result will be allocated from this memory pool
    /// \return the concatenated record batch
    TURBO_EXPORT
    turbo::Result<std::shared_ptr<RecordBatch>> concatenate_record_batches(
            const RecordBatchVector &batches, MemoryPool *pool = default_memory_pool());

}  // namespace nebula
