// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#pragma once

#include <algorithm>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>

#include <nebula/core/buffer.h>
#include <turbo/utility/status.h>
#include <nebula/bits/bit_util.h>
#include <nebula/bits/bitmap_generate.h>
#include <nebula/bits/bitmap_ops.h>
#include <turbo/base/macros.h>
#include <turbo/base/ubsan.h>
#include <turbo/base/macros.h>

namespace nebula {

    // ----------------------------------------------------------------------
    // Buffer builder classes

    /// \class BufferBuilder
    /// \brief A class for incrementally building a contiguous chunk of in-memory
    /// data
    class TURBO_EXPORT BufferBuilder {
    public:
        explicit BufferBuilder(MemoryPool *pool = default_memory_pool(),
                               int64_t alignment = kDefaultBufferAlignment)
                : pool_(pool),
                  data_(/*ensure never null to make ubsan happy and avoid check penalties below*/
                          turbo::make_non_null<uint8_t>()),
                  capacity_(0),
                  size_(0),
                  alignment_(alignment) {}

        /// \brief Constructs new Builder that will start using
        /// the provided buffer until finish/reset are called.
        /// The buffer is not resized.
        explicit BufferBuilder(std::shared_ptr<ResizableBuffer> buffer,
                               MemoryPool *pool = default_memory_pool(),
                               int64_t alignment = kDefaultBufferAlignment)
                : buffer_(std::move(buffer)),
                  pool_(pool),
                  data_(buffer_->mutable_data()),
                  capacity_(buffer_->capacity()),
                  size_(buffer_->size()),
                  alignment_(alignment) {}

        /// \brief resize the buffer to the nearest multiple of 64 bytes
        ///
        /// \param new_capacity the new capacity of the of the builder. Will be
        /// rounded up to a multiple of 64 bytes for padding
        /// \param shrink_to_fit if new capacity is smaller than the existing,
        /// reallocate internal buffer. Set to false to avoid reallocations when
        /// shrinking the builder.
        /// \return turbo::Status
        turbo::Status resize(const int64_t new_capacity, bool shrink_to_fit = true) {
            if (buffer_ == nullptr) {
                TURBO_MOVE_OR_RAISE(buffer_,
                                    allocate_resizable_buffer(new_capacity, alignment_, pool_));
            } else {
                TURBO_RETURN_NOT_OK(buffer_->resize(new_capacity, shrink_to_fit));
            }
            capacity_ = buffer_->capacity();
            data_ = buffer_->mutable_data();
            return turbo::OkStatus();
        }

        /// \brief Ensure that builder can accommodate the additional number of bytes
        /// without the need to perform allocations
        ///
        /// \param[in] additional_bytes number of additional bytes to make space for
        /// \return turbo::Status
        turbo::Status Reserve(const int64_t additional_bytes) {
            auto min_capacity = size_ + additional_bytes;
            if (min_capacity <= capacity_) {
                return turbo::OkStatus();
            }
            return resize(GrowByFactor(capacity_, min_capacity), false);
        }

        /// \brief Return a capacity expanded by the desired growth factor
        static int64_t GrowByFactor(int64_t current_capacity, int64_t new_capacity) {
            // Doubling capacity except for large Reserve requests. 2x growth strategy
            // (versus 1.5x) seems to have slightly better performance when using
            // jemalloc, but significantly better performance when using the system
            // allocator. See ARROW-6450 for further discussion
            return std::max(new_capacity, current_capacity * 2);
        }

        /// \brief append the given data to the buffer
        ///
        /// The buffer is automatically expanded if necessary.
        turbo::Status append(const void *data, const int64_t length) {
            if (TURBO_UNLIKELY(size_ + length > capacity_)) {
                TURBO_RETURN_NOT_OK(resize(GrowByFactor(capacity_, size_ + length), false));
            }
            unsafe_append(data, length);
            return turbo::OkStatus();
        }

        /// \brief append the given data to the buffer
        ///
        /// The buffer is automatically expanded if necessary.
        turbo::Status append(std::string_view v) { return append(v.data(), v.size()); }

        /// \brief append copies of a value to the buffer
        ///
        /// The buffer is automatically expanded if necessary.
        turbo::Status append(const int64_t num_copies, uint8_t value) {
            TURBO_RETURN_NOT_OK(Reserve(num_copies));
            unsafe_append(num_copies, value);
            return turbo::OkStatus();
        }

        // advance pointer and zero out memory
        turbo::Status advance(const int64_t length) { return append(length, 0); }

        // advance pointer, but don't allocate or zero memory
        void UnsafeAdvance(const int64_t length) { size_ += length; }

        // Unsafe methods don't check existing size
        void unsafe_append(const void *data, const int64_t length) {
            memcpy(data_ + size_, data, static_cast<size_t>(length));
            size_ += length;
        }

        void unsafe_append(std::string_view v) {
            unsafe_append(v.data(), static_cast<int64_t>(v.size()));
        }

        void unsafe_append(const int64_t num_copies, uint8_t value) {
            memset(data_ + size_, value, static_cast<size_t>(num_copies));
            size_ += num_copies;
        }

        /// \brief Return result of builder as a Buffer object.
        ///
        /// The builder is reset and can be reused afterwards.
        ///
        /// \param[out] out the finalized Buffer object
        /// \param shrink_to_fit if the buffer size is smaller than its capacity,
        /// reallocate to fit more tightly in memory. Set to false to avoid
        /// a reallocation, at the expense of potentially more memory consumption.
        /// \return turbo::Status
        turbo::Status finish(std::shared_ptr<Buffer> *out, bool shrink_to_fit = true) {
            TURBO_RETURN_NOT_OK(resize(size_, shrink_to_fit));
            if (size_ != 0) buffer_->zero_padding();
            *out = buffer_;
            if (*out == nullptr) {
                TURBO_MOVE_OR_RAISE(*out, allocate_buffer(0, alignment_, pool_));
            }
            reset();
            return turbo::OkStatus();
        }

        turbo::Result<std::shared_ptr<Buffer>> finish(bool shrink_to_fit = true) {
            std::shared_ptr<Buffer> out;
            TURBO_RETURN_NOT_OK(finish(&out, shrink_to_fit));
            return out;
        }

        /// \brief Like finish, but override the final buffer size
        ///
        /// This is useful after writing data directly into the builder memory
        /// without calling the append methods (basically, when using BufferBuilder
        /// mostly for memory allocation).
        turbo::Result<std::shared_ptr<Buffer>> FinishWithLength(int64_t final_length,
                                                                bool shrink_to_fit = true) {
            size_ = final_length;
            return finish(shrink_to_fit);
        }

        void reset() {
            buffer_ = nullptr;
            capacity_ = size_ = 0;
        }

        /// \brief Set size to a smaller value without modifying builder
        /// contents. For reusable BufferBuilder classes
        /// \param[in] position must be non-negative and less than or equal
        /// to the current length()
        void Rewind(int64_t position) { size_ = position; }

        int64_t capacity() const { return capacity_; }

        int64_t length() const { return size_; }

        const uint8_t *data() const { return data_; }

        uint8_t *mutable_data() { return data_; }

        template<typename T>
        const T *data_as() const {
            return reinterpret_cast<const T *>(data_);
        }

        template<typename T>
        T *mutable_data_as() {
            return reinterpret_cast<T *>(data_);
        }

    private:
        std::shared_ptr<ResizableBuffer> buffer_;
        MemoryPool *pool_;
        uint8_t *data_;
        int64_t capacity_;
        int64_t size_;
        int64_t alignment_;
    };

    template<typename T, typename Enable = void>
    class TypedBufferBuilder;

    /// \brief A BufferBuilder for building a buffer of arithmetic elements
    template<typename T>
    class TypedBufferBuilder<
            T, typename std::enable_if<std::is_arithmetic<T>::value ||
                                       std::is_standard_layout<T>::value>::type> {
    public:
        explicit TypedBufferBuilder(MemoryPool *pool = default_memory_pool(),
                                    int64_t alignment = kDefaultBufferAlignment)
                : bytes_builder_(pool, alignment) {}

        explicit TypedBufferBuilder(std::shared_ptr<ResizableBuffer> buffer,
                                    MemoryPool *pool = default_memory_pool())
                : bytes_builder_(std::move(buffer), pool) {}

        explicit TypedBufferBuilder(BufferBuilder builder)
                : bytes_builder_(std::move(builder)) {}

        BufferBuilder *bytes_builder() { return &bytes_builder_; }

        turbo::Status append(T value) {
            return bytes_builder_.append(reinterpret_cast<uint8_t *>(&value), sizeof(T));
        }

        turbo::Status append(const T *values, int64_t num_elements) {
            return bytes_builder_.append(reinterpret_cast<const uint8_t *>(values),
                                         num_elements * sizeof(T));
        }

        turbo::Status append(const int64_t num_copies, T value) {
            TURBO_RETURN_NOT_OK(Reserve(num_copies + length()));
            unsafe_append(num_copies, value);
            return turbo::OkStatus();
        }

        void unsafe_append(T value) {
            bytes_builder_.unsafe_append(reinterpret_cast<uint8_t *>(&value), sizeof(T));
        }

        void unsafe_append(const T *values, int64_t num_elements) {
            bytes_builder_.unsafe_append(reinterpret_cast<const uint8_t *>(values),
                                        num_elements * sizeof(T));
        }

        template<typename Iter>
        void unsafe_append(Iter values_begin, Iter values_end) {
            auto num_elements = static_cast<int64_t>(std::distance(values_begin, values_end));
            auto data = mutable_data() + length();
            bytes_builder_.UnsafeAdvance(num_elements * sizeof(T));
            std::copy(values_begin, values_end, data);
        }

        void unsafe_append(const int64_t num_copies, T value) {
            auto data = mutable_data() + length();
            bytes_builder_.UnsafeAdvance(num_copies * sizeof(T));
            std::fill(data, data + num_copies, value);
        }

        turbo::Status resize(const int64_t new_capacity, bool shrink_to_fit = true) {
            return bytes_builder_.resize(new_capacity * sizeof(T), shrink_to_fit);
        }

        turbo::Status Reserve(const int64_t additional_elements) {
            return bytes_builder_.Reserve(additional_elements * sizeof(T));
        }

        turbo::Status advance(const int64_t length) {
            return bytes_builder_.advance(length * sizeof(T));
        }

        turbo::Status finish(std::shared_ptr<Buffer> *out, bool shrink_to_fit = true) {
            return bytes_builder_.finish(out, shrink_to_fit);
        }

        turbo::Result<std::shared_ptr<Buffer>> finish(bool shrink_to_fit = true) {
            std::shared_ptr<Buffer> out;
            TURBO_RETURN_NOT_OK(finish(&out, shrink_to_fit));
            return out;
        }

        /// \brief Like finish, but override the final buffer size
        ///
        /// This is useful after writing data directly into the builder memory
        /// without calling the append methods (basically, when using TypedBufferBuilder
        /// only for memory allocation).
        turbo::Result<std::shared_ptr<Buffer>> FinishWithLength(int64_t final_length,
                                                                bool shrink_to_fit = true) {
            return bytes_builder_.FinishWithLength(final_length * sizeof(T), shrink_to_fit);
        }

        void reset() { bytes_builder_.reset(); }

        int64_t length() const { return bytes_builder_.length() / sizeof(T); }

        int64_t capacity() const { return bytes_builder_.capacity() / sizeof(T); }

        const T *data() const { return reinterpret_cast<const T *>(bytes_builder_.data()); }

        T *mutable_data() { return reinterpret_cast<T *>(bytes_builder_.mutable_data()); }

    private:
        BufferBuilder bytes_builder_;
    };

    /// \brief A BufferBuilder for building a buffer containing a bitmap
    template<>
    class TypedBufferBuilder<bool> {
    public:
        explicit TypedBufferBuilder(MemoryPool *pool = default_memory_pool(),
                                    int64_t alignment = kDefaultBufferAlignment)
                : bytes_builder_(pool, alignment) {}

        explicit TypedBufferBuilder(BufferBuilder builder)
                : bytes_builder_(std::move(builder)) {}

        BufferBuilder *bytes_builder() { return &bytes_builder_; }

        turbo::Status append(bool value) {
            TURBO_RETURN_NOT_OK(Reserve(1));
            unsafe_append(value);
            return turbo::OkStatus();
        }

        turbo::Status append(const uint8_t *valid_bytes, int64_t num_elements) {
            TURBO_RETURN_NOT_OK(Reserve(num_elements));
            unsafe_append(valid_bytes, num_elements);
            return turbo::OkStatus();
        }

        turbo::Status append(const int64_t num_copies, bool value) {
            TURBO_RETURN_NOT_OK(Reserve(num_copies));
            unsafe_append(num_copies, value);
            return turbo::OkStatus();
        }

        void unsafe_append(bool value) {
            bit_util::SetBitTo(mutable_data(), bit_length_, value);
            if (!value) {
                ++false_count_;
            }
            ++bit_length_;
        }

        /// \brief append bits from an array of bytes (one value per byte)
        void unsafe_append(const uint8_t *bytes, int64_t num_elements) {
            if (num_elements == 0) return;
            int64_t i = 0;
            internal::GenerateBitsUnrolled(mutable_data(), bit_length_, num_elements, [&] {
                bool value = bytes[i++];
                false_count_ += !value;
                return value;
            });
            bit_length_ += num_elements;
        }

        /// \brief append bits from a packed bitmap
        void unsafe_append(const uint8_t *bitmap, int64_t offset, int64_t num_elements) {
            if (num_elements == 0) return;
            internal::CopyBitmap(bitmap, offset, num_elements, mutable_data(), bit_length_);
            false_count_ += num_elements - internal::CountSetBits(bitmap, offset, num_elements);
            bit_length_ += num_elements;
        }

        void unsafe_append(const int64_t num_copies, bool value) {
            bit_util::SetBitsTo(mutable_data(), bit_length_, num_copies, value);
            false_count_ += num_copies * !value;
            bit_length_ += num_copies;
        }

        template<bool count_falses, typename Generator>
        void unsafe_append(const int64_t num_elements, Generator &&gen) {
            if (num_elements == 0) return;

            if (count_falses) {
                internal::GenerateBitsUnrolled(mutable_data(), bit_length_, num_elements, [&] {
                    bool value = gen();
                    false_count_ += !value;
                    return value;
                });
            } else {
                internal::GenerateBitsUnrolled(mutable_data(), bit_length_, num_elements,
                                               std::forward<Generator>(gen));
            }
            bit_length_ += num_elements;
        }

        turbo::Status resize(const int64_t new_capacity, bool shrink_to_fit = true) {
            const int64_t old_byte_capacity = bytes_builder_.capacity();
            TURBO_RETURN_NOT_OK(
                    bytes_builder_.resize(bit_util::BytesForBits(new_capacity), shrink_to_fit));
            // resize() may have chosen a larger capacity (e.g. for padding),
            // so ask it again before calling memset().
            const int64_t new_byte_capacity = bytes_builder_.capacity();
            if (new_byte_capacity > old_byte_capacity) {
                // The additional buffer space is 0-initialized for convenience,
                // so that other methods can simply bump the length.
                memset(mutable_data() + old_byte_capacity, 0,
                       static_cast<size_t>(new_byte_capacity - old_byte_capacity));
            }
            return turbo::OkStatus();
        }

        turbo::Status Reserve(const int64_t additional_elements) {
            return resize(
                    BufferBuilder::GrowByFactor(bit_length_, bit_length_ + additional_elements),
                    false);
        }

        turbo::Status advance(const int64_t length) {
            TURBO_RETURN_NOT_OK(Reserve(length));
            bit_length_ += length;
            false_count_ += length;
            return turbo::OkStatus();
        }

        turbo::Status finish(std::shared_ptr<Buffer> *out, bool shrink_to_fit = true) {
            // set bytes_builder_.size_ == byte size of data
            bytes_builder_.UnsafeAdvance(bit_util::BytesForBits(bit_length_) -
                                         bytes_builder_.length());
            bit_length_ = false_count_ = 0;
            return bytes_builder_.finish(out, shrink_to_fit);
        }

        turbo::Result<std::shared_ptr<Buffer>> finish(bool shrink_to_fit = true) {
            std::shared_ptr<Buffer> out;
            TURBO_RETURN_NOT_OK(finish(&out, shrink_to_fit));
            return out;
        }

        /// \brief Like finish, but override the final buffer size
        ///
        /// This is useful after writing data directly into the builder memory
        /// without calling the append methods (basically, when using TypedBufferBuilder
        /// only for memory allocation).
        turbo::Result<std::shared_ptr<Buffer>> FinishWithLength(int64_t final_length,
                                                                bool shrink_to_fit = true) {
            const auto final_byte_length = bit_util::BytesForBits(final_length);
            bytes_builder_.UnsafeAdvance(final_byte_length - bytes_builder_.length());
            bit_length_ = false_count_ = 0;
            return bytes_builder_.FinishWithLength(final_byte_length, shrink_to_fit);
        }

        void reset() {
            bytes_builder_.reset();
            bit_length_ = false_count_ = 0;
        }

        int64_t length() const { return bit_length_; }

        int64_t capacity() const { return bytes_builder_.capacity() * 8; }

        const uint8_t *data() const { return bytes_builder_.data(); }

        uint8_t *mutable_data() { return bytes_builder_.mutable_data(); }

        int64_t false_count() const { return false_count_; }

    private:
        BufferBuilder bytes_builder_;
        int64_t bit_length_ = 0;
        int64_t false_count_ = 0;
    };

}  // namespace nebula
