// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <nebula/io/memory.h>

#include <algorithm>
#include <cstdint>
#include <cstring>
#include <mutex>
#include <utility>

#include <nebula/core/buffer.h>
#include <nebula/io/util_internal.h>
#include <nebula/core/memory_pool.h>
#include <turbo/utility/status.h>
#include <nebula/future/future.h>
#include <turbo/files/io_util.h>
#include <turbo/log/logging.h>
#include <turbo/base/macros.h>
#include <nebula/future/memory.h>

namespace nebula::io {

    // ----------------------------------------------------------------------
    // OutputStream that writes to resizable buffer

    static constexpr int64_t kBufferMinimumSize = 256;

    BufferOutputStream::BufferOutputStream()
            : is_open_(false), capacity_(0), position_(0), mutable_data_(nullptr) {}

    BufferOutputStream::BufferOutputStream(const std::shared_ptr<ResizableBuffer> &buffer)
            : buffer_(buffer),
              is_open_(true),
              capacity_(buffer->size()),
              position_(0),
              mutable_data_(buffer->mutable_data()) {}

    turbo::Result<std::shared_ptr<BufferOutputStream>> BufferOutputStream::create(
            int64_t initial_capacity, MemoryPool *pool) {
        // ctor is private, so cannot use make_shared
        auto ptr = std::shared_ptr<BufferOutputStream>(new BufferOutputStream);
        TURBO_RETURN_NOT_OK(ptr->reset(initial_capacity, pool));
        return ptr;
    }

    turbo::Status BufferOutputStream::reset(int64_t initial_capacity, MemoryPool *pool) {
        TURBO_MOVE_OR_RAISE(buffer_, allocate_resizable_buffer(initial_capacity, pool));
        is_open_ = true;
        capacity_ = initial_capacity;
        position_ = 0;
        mutable_data_ = buffer_->mutable_data();
        return turbo::OkStatus();
    }

    BufferOutputStream::~BufferOutputStream() {
        if (buffer_) {
            internal::CloseFromDestructor(this);
        }
    }

    turbo::Status BufferOutputStream::close() {
        if (is_open_) {
            is_open_ = false;
            if (position_ < capacity_) {
                TURBO_RETURN_NOT_OK(buffer_->resize(position_, false));
            }
        }
        return turbo::OkStatus();
    }

    bool BufferOutputStream::closed() const { return !is_open_; }

    turbo::Result<std::shared_ptr<Buffer>> BufferOutputStream::finish() {
        TURBO_RETURN_NOT_OK(close());
        buffer_->zero_padding();
        is_open_ = false;
        return std::move(buffer_);
    }

    turbo::Result<int64_t> BufferOutputStream::tell() const { return position_; }

    turbo::Status BufferOutputStream::write(const void *data, int64_t nbytes) {
        if (TURBO_UNLIKELY(!is_open_)) {
            return turbo::io_error("OutputStream is closed");
        }
                DKCHECK(buffer_);
        if (TURBO_LIKELY(nbytes > 0)) {
            if (TURBO_UNLIKELY(position_ + nbytes >= capacity_)) {
                TURBO_RETURN_NOT_OK(Reserve(nbytes));
            }
            memcpy(mutable_data_ + position_, data, nbytes);
            position_ += nbytes;
        }
        return turbo::OkStatus();
    }

    turbo::Status BufferOutputStream::Reserve(int64_t nbytes) {
        // Always overallocate by doubling.  It seems that it is a better growth
        // strategy, at least for memory_benchmark.cc.
        // This may be because it helps match the allocator's allocation buckets
        // more exactly.  Or perhaps it hits a sweet spot in jemalloc.
        int64_t new_capacity = std::max(kBufferMinimumSize, capacity_);
        while (new_capacity < position_ + nbytes) {
            new_capacity = new_capacity * 2;
        }
        if (new_capacity > capacity_) {
            TURBO_RETURN_NOT_OK(buffer_->resize(new_capacity));
            capacity_ = new_capacity;
            mutable_data_ = buffer_->mutable_data();
        }
        return turbo::OkStatus();
    }

    // ----------------------------------------------------------------------
    // OutputStream that doesn't write anything

    turbo::Status MockOutputStream::close() {
        is_open_ = false;
        return turbo::OkStatus();
    }

    bool MockOutputStream::closed() const { return !is_open_; }

    turbo::Result<int64_t> MockOutputStream::tell() const { return extent_bytes_written_; }

    turbo::Status MockOutputStream::write(const void *data, int64_t nbytes) {
        extent_bytes_written_ += nbytes;
        return turbo::OkStatus();
    }

    // ----------------------------------------------------------------------
    // In-memory buffer writer

    static constexpr int kMemcopyDefaultNumThreads = 1;
    static constexpr int64_t kMemcopyDefaultBlocksize = 64;
    static constexpr int64_t kMemcopyDefaultThreshold = 1024 * 1024;

    class FixedSizeBufferWriter::FixedSizeBufferWriterImpl {
    public:
        /// Input buffer must be mutable, will abort if not

        /// Input buffer must be mutable, will abort if not
        explicit FixedSizeBufferWriterImpl(const std::shared_ptr<Buffer> &buffer)
                : is_open_(true),
                  memcopy_num_threads_(kMemcopyDefaultNumThreads),
                  memcopy_blocksize_(kMemcopyDefaultBlocksize),
                  memcopy_threshold_(kMemcopyDefaultThreshold) {
            buffer_ = buffer;
            KCHECK(buffer->is_mutable()) << "Must pass mutable buffer";
            mutable_data_ = buffer->mutable_data();
            size_ = buffer->size();
            position_ = 0;
        }

        turbo::Status close() {
            is_open_ = false;
            return turbo::OkStatus();
        }

        bool closed() const { return !is_open_; }

        turbo::Status Seek(int64_t position) {
            if (position < 0 || position > size_) {
                return turbo::io_error("Seek out of bounds");
            }
            position_ = position;
            return turbo::OkStatus();
        }

        turbo::Result<int64_t> tell() { return position_; }

        turbo::Status write(const void *data, int64_t nbytes) {
            TURBO_RETURN_NOT_OK(internal::ValidateWriteRange(position_, nbytes, size_));
            if (nbytes > memcopy_threshold_ && memcopy_num_threads_ > 1) {
                ::nebula::internal::parallel_memcopy(mutable_data_ + position_,
                                                     reinterpret_cast<const uint8_t *>(data), nbytes,
                                                     memcopy_blocksize_, memcopy_num_threads_);
            } else {
                memcpy(mutable_data_ + position_, data, nbytes);
            }
            position_ += nbytes;
            return turbo::OkStatus();
        }

        turbo::Status write_at(int64_t position, const void *data, int64_t nbytes) {
            std::lock_guard<std::mutex> guard(lock_);
            TURBO_RETURN_NOT_OK(internal::ValidateWriteRange(position, nbytes, size_));
            TURBO_RETURN_NOT_OK(Seek(position));
            return write(data, nbytes);
        }

        void set_memcopy_threads(int num_threads) { memcopy_num_threads_ = num_threads; }

        void set_memcopy_blocksize(int64_t blocksize) { memcopy_blocksize_ = blocksize; }

        void set_memcopy_threshold(int64_t threshold) { memcopy_threshold_ = threshold; }

    private:
        std::mutex lock_;
        std::shared_ptr<Buffer> buffer_;
        uint8_t *mutable_data_;
        int64_t size_;
        int64_t position_;
        bool is_open_;

        int memcopy_num_threads_;
        int64_t memcopy_blocksize_;
        int64_t memcopy_threshold_;
    };

    FixedSizeBufferWriter::FixedSizeBufferWriter(const std::shared_ptr<Buffer> &buffer)
            : impl_(new FixedSizeBufferWriterImpl(buffer)) {}

    FixedSizeBufferWriter::~FixedSizeBufferWriter() = default;

    turbo::Status FixedSizeBufferWriter::close() { return impl_->close(); }

    bool FixedSizeBufferWriter::closed() const { return impl_->closed(); }

    turbo::Status FixedSizeBufferWriter::Seek(int64_t position) { return impl_->Seek(position); }

    turbo::Result<int64_t> FixedSizeBufferWriter::tell() const { return impl_->tell(); }

    turbo::Status FixedSizeBufferWriter::write(const void *data, int64_t nbytes) {
        return impl_->write(data, nbytes);
    }

    turbo::Status FixedSizeBufferWriter::write_at(int64_t position, const void *data,
                                          int64_t nbytes) {
        return impl_->write_at(position, data, nbytes);
    }

    void FixedSizeBufferWriter::set_memcopy_threads(int num_threads) {
        impl_->set_memcopy_threads(num_threads);
    }

    void FixedSizeBufferWriter::set_memcopy_blocksize(int64_t blocksize) {
        impl_->set_memcopy_blocksize(blocksize);
    }

    void FixedSizeBufferWriter::set_memcopy_threshold(int64_t threshold) {
        impl_->set_memcopy_threshold(threshold);
    }

    // ----------------------------------------------------------------------
    // In-memory buffer reader

    BufferReader::BufferReader(std::shared_ptr<Buffer> buffer)
            : buffer_(std::move(buffer)),
              data_(buffer_ ? buffer_->data() : reinterpret_cast<const uint8_t *>("")),
              size_(buffer_ ? buffer_->size() : 0),
              position_(0),
              is_open_(true) {}

    BufferReader::BufferReader(const uint8_t *data, int64_t size)
            : BufferReader(std::make_shared<Buffer>(data, size)) {}

    BufferReader::BufferReader(BufferSpan buffer)
            : BufferReader(std::make_shared<Buffer>(buffer.data(), buffer.size())) {}

    BufferReader::BufferReader(std::string_view data)
            : BufferReader(std::make_shared<Buffer>(data)) {}

    std::unique_ptr<BufferReader> BufferReader::from_string(std::string data) {
        return std::make_unique<BufferReader>(Buffer::from_string(std::move(data)));
    }

    turbo::Status BufferReader::DoClose() {
        is_open_ = false;
        return turbo::OkStatus();
    }

    bool BufferReader::closed() const { return !is_open_; }

    turbo::Result<int64_t> BufferReader::DoTell() const {
        TURBO_RETURN_NOT_OK(CheckClosed());
        return position_;
    }

    turbo::Result<std::string_view> BufferReader::DoPeek(int64_t nbytes) {
        TURBO_RETURN_NOT_OK(CheckClosed());

        const int64_t bytes_available = std::min(nbytes, size_ - position_);
        return std::string_view(reinterpret_cast<const char *>(data_) + position_,
                                static_cast<size_t>(bytes_available));
    }

    bool BufferReader::supports_zero_copy() const { return true; }

    turbo::Status BufferReader::will_need(const std::vector<ReadRange> &ranges) {

        TURBO_RETURN_NOT_OK(CheckClosed());

        std::vector<turbo::MemoryRegion> regions(ranges.size());
        for (size_t i = 0; i < ranges.size(); ++i) {
            const auto &range = ranges[i];
            TURBO_MOVE_OR_RAISE(auto size,
                                   internal::ValidateReadRange(range.offset, range.length, size_));
            regions[i] = {const_cast<uint8_t *>(data_ + range.offset), static_cast<size_t>(size)};
        }
        const auto st = ::turbo::memory_advise_will_need(regions);
        if (turbo::is_io_error(st)) {
            // Ignore any system-level errors, in case the memory area isn't madvise()-able
            return turbo::OkStatus();
        }
        return st;
    }

    Future<std::shared_ptr<Buffer>> BufferReader::read_async(const IOContext &,
                                                            int64_t position,
                                                            int64_t nbytes) {
        return Future<std::shared_ptr<Buffer>>::make_finished(DoReadAt(position, nbytes));
    }

    turbo::Result<int64_t> BufferReader::DoReadAt(int64_t position, int64_t nbytes, void *buffer) {
        TURBO_RETURN_NOT_OK(CheckClosed());

        TURBO_MOVE_OR_RAISE(nbytes, internal::ValidateReadRange(position, nbytes, size_));
                DKCHECK_GE(nbytes, 0);
        if (nbytes) {
            memcpy(buffer, data_ + position, nbytes);
        }
        return nbytes;
    }

    turbo::Result<std::shared_ptr<Buffer>> BufferReader::DoReadAt(int64_t position, int64_t nbytes) {
        TURBO_RETURN_NOT_OK(CheckClosed());

        TURBO_MOVE_OR_RAISE(nbytes, internal::ValidateReadRange(position, nbytes, size_));
                DKCHECK_GE(nbytes, 0);

        // Arrange for data to be paged in
        // TURBO_RETURN_NOT_OK(::nebula::internal::MemoryAdviseWillNeed(
        //     {{const_cast<uint8_t*>(data_ + position), static_cast<size_t>(nbytes)}}));

        if (nbytes > 0 && buffer_ != nullptr) {
            return SliceBuffer(buffer_, position, nbytes);
        } else {
            return std::make_shared<Buffer>(data_ + position, nbytes);
        }
    }

    turbo::Result<int64_t> BufferReader::DoRead(int64_t nbytes, void *out) {
        TURBO_RETURN_NOT_OK(CheckClosed());
        TURBO_MOVE_OR_RAISE(int64_t bytes_read, DoReadAt(position_, nbytes, out));
        position_ += bytes_read;
        return bytes_read;
    }

    turbo::Result<std::shared_ptr<Buffer>> BufferReader::DoRead(int64_t nbytes) {
        TURBO_RETURN_NOT_OK(CheckClosed());
        TURBO_MOVE_OR_RAISE(auto buffer, DoReadAt(position_, nbytes));
        position_ += buffer->size();
        return buffer;
    }

    turbo::Result<int64_t> BufferReader::DoGetSize() {
        TURBO_RETURN_NOT_OK(CheckClosed());
        return size_;
    }

    turbo::Status BufferReader::DoSeek(int64_t position) {
        TURBO_RETURN_NOT_OK(CheckClosed());

        if (position < 0 || position > size_) {
            return turbo::io_error("Seek out of bounds");
        }

        position_ = position;
        return turbo::OkStatus();
    }

}  // namespace nebula::io

