// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <nebula/io/interfaces.h>

#include <algorithm>
#include <cstdint>
#include <iterator>
#include <list>
#include <memory>
#include <mutex>
#include <sstream>
#include <string_view>
#include <typeinfo>
#include <utility>

#include <nebula/core/buffer.h>
#include <nebula/io/concurrency.h>
#include <nebula/io/util_internal.h>

#include <turbo/utility/status.h>
#include <turbo/base/checked_cast.h>
#include <nebula/future/future.h>
#include <turbo/utility/environment.h>
#include <turbo/functional/iterator.h>
#include <turbo/log/logging.h>
#include <nebula/future/thread_pool.h>

namespace nebula {

    using internal::Executor;
    using internal::TaskHints;
    using internal::ThreadPool;

    namespace io {

        IOContext::IOContext(MemoryPool *pool, StopToken stop_token)
                : IOContext(pool, internal::GetIOThreadPool(), std::move(stop_token)) {}

        const IOContext &default_io_context() {
            // Avoid using a global variable because of initialization order issues (ARROW-18383)
            static IOContext g_default_io_context{};
            return g_default_io_context;
        }

        int GetIOThreadPoolCapacity() { return internal::GetIOThreadPool()->get_capacity(); }

        turbo::Status SetIOThreadPoolCapacity(int threads) {
            return internal::GetIOThreadPool()->set_capacity(threads);
        }

        FileInterface::~FileInterface() = default;

        Future<> FileInterface::close_async() {
            return DeferNotOk(
                    default_io_context().executor()->submit([this]() { return close(); }));
        }

        turbo::Status FileInterface::abort() { return close(); }

        namespace {

            class InputStreamBlockIterator {
            public:
                InputStreamBlockIterator(std::shared_ptr<InputStream> stream, int64_t block_size)
                        : stream_(std::move(stream)), block_size_(block_size) {}

                turbo::Result<std::shared_ptr<Buffer>> next() {
                    if (done_) {
                        return nullptr;
                    }

                    TURBO_MOVE_OR_RAISE(auto out, stream_->read(block_size_));

                    if (out->size() == 0) {
                        done_ = true;
                        stream_.reset();
                        out.reset();
                    }

                    return out;
                }

            protected:
                std::shared_ptr<InputStream> stream_;
                int64_t block_size_;
                bool done_ = false;
            };

        }  // namespace

        const IOContext &Readable::io_context() const { return default_io_context(); }

        turbo::Status InputStream::advance(int64_t nbytes) { return read(nbytes).status(); }

        turbo::Result<std::string_view> InputStream::peek(int64_t TURBO_ARG_UNUSED(nbytes)) {
            return turbo::unimplemented_error("peek not implemented");
        }

        bool InputStream::supports_zero_copy() const { return false; }

        turbo::Result<std::shared_ptr<const KeyValueMetadata>> InputStream::read_metadata() {
            return std::shared_ptr<const KeyValueMetadata>{};
        }

        // Default read_metadata_async() implementation: simply issue the read on the context's
        // executor
        Future<std::shared_ptr<const KeyValueMetadata>> InputStream::read_metadata_async(
                const IOContext &ctx) {
            std::shared_ptr<InputStream> self =
                    std::dynamic_pointer_cast<InputStream>(shared_from_this());
            return DeferNotOk(internal::SubmitIO(ctx, [self] { return self->read_metadata(); }));
        }

        Future<std::shared_ptr<const KeyValueMetadata>> InputStream::read_metadata_async() {
            return read_metadata_async(io_context());
        }

        turbo::Result<turbo::Iterator<std::shared_ptr<Buffer>>> make_input_stream_iterator(
                std::shared_ptr<InputStream> stream, int64_t block_size) {
            if (stream->closed()) {
                return turbo::invalid_argument_error("Cannot take iterator on closed stream");
            }
                    DKCHECK_GT(block_size, 0);
            return turbo::Iterator<std::shared_ptr<Buffer>>(InputStreamBlockIterator(stream, block_size));
        }

        struct RandomAccessFile::Impl {
            std::mutex lock_;
        };

        RandomAccessFile::~RandomAccessFile() = default;

        RandomAccessFile::RandomAccessFile() : interface_impl_(new Impl()) {}

        turbo::Result<int64_t> RandomAccessFile::read_at(int64_t position, int64_t nbytes, void *out) {
            std::lock_guard<std::mutex> lock(interface_impl_->lock_);
            TURBO_RETURN_NOT_OK(Seek(position));
            return read(nbytes, out);
        }

        turbo::Result<std::shared_ptr<Buffer>> RandomAccessFile::read_at(int64_t position,
                                                                 int64_t nbytes) {
            std::lock_guard<std::mutex> lock(interface_impl_->lock_);
            TURBO_RETURN_NOT_OK(Seek(position));
            return read(nbytes);
        }

        // Default read_async() implementation: simply issue the read on the context's executor
        Future<std::shared_ptr<Buffer>> RandomAccessFile::read_async(const IOContext &ctx,
                                                                    int64_t position,
                                                                    int64_t nbytes) {
            auto self = std::dynamic_pointer_cast<RandomAccessFile>(shared_from_this());
            return DeferNotOk(internal::SubmitIO(
                    ctx, [self, position, nbytes] { return self->read_at(position, nbytes); }));
        }

        Future<std::shared_ptr<Buffer>> RandomAccessFile::read_async(int64_t position,
                                                                    int64_t nbytes) {
            return read_async(io_context(), position, nbytes);
        }

        std::vector<Future<std::shared_ptr<Buffer>>> RandomAccessFile::read_many_async(
                const IOContext &ctx, const std::vector<ReadRange> &ranges) {
            std::vector<Future<std::shared_ptr<Buffer>>> ret;
            for (auto r: ranges) {
                ret.push_back(this->read_async(ctx, r.offset, r.length));
            }
            return ret;
        }

        std::vector<Future<std::shared_ptr<Buffer>>> RandomAccessFile::read_many_async(
                const std::vector<ReadRange> &ranges) {
            return read_many_async(io_context(), ranges);
        }

        // Default will_need() implementation: no-op
        turbo::Status RandomAccessFile::will_need(const std::vector<ReadRange> &ranges) {
            return turbo::OkStatus();
        }

        turbo::Status Writable::write(std::string_view data) {
            return write(data.data(), static_cast<int64_t>(data.size()));
        }

        turbo::Status Writable::write(const std::shared_ptr<Buffer> &data) {
            return write(data->data(), data->size());
        }

        turbo::Status Writable::write(BufferSpan data) {
            return write(data.data(), data.size());
        }


        turbo::Status Writable::flush() { return turbo::OkStatus(); }

        // An InputStream that reads from a delimited range of a RandomAccessFile
        class FileSegmentReader
                : public internal::InputStreamConcurrencyWrapper<FileSegmentReader> {
        public:
            FileSegmentReader(std::shared_ptr<RandomAccessFile> file, int64_t file_offset,
                              int64_t nbytes)
                    : file_(std::move(file)),
                      closed_(false),
                      position_(0),
                      file_offset_(file_offset),
                      nbytes_(nbytes) {
                FileInterface::set_mode(FileMode::READ);
            }

            turbo::Status CheckOpen() const {
                if (closed_) {
                    return turbo::io_error("Stream is closed");
                }
                return turbo::OkStatus();
            }

            turbo::Status DoClose() {
                closed_ = true;
                return turbo::OkStatus();
            }

            turbo::Result<int64_t> DoTell() const {
                TURBO_RETURN_NOT_OK(CheckOpen());
                return position_;
            }

            bool closed() const override { return closed_; }

            turbo::Result<int64_t> DoRead(int64_t nbytes, void *out) {
                TURBO_RETURN_NOT_OK(CheckOpen());
                int64_t bytes_to_read = std::min(nbytes, nbytes_ - position_);
                TURBO_MOVE_OR_RAISE(int64_t bytes_read,
                                       file_->read_at(file_offset_ + position_, bytes_to_read, out));
                position_ += bytes_read;
                return bytes_read;
            }

            turbo::Result<std::shared_ptr<Buffer>> DoRead(int64_t nbytes) {
                TURBO_RETURN_NOT_OK(CheckOpen());
                int64_t bytes_to_read = std::min(nbytes, nbytes_ - position_);
                TURBO_MOVE_OR_RAISE(auto buffer,
                                       file_->read_at(file_offset_ + position_, bytes_to_read));
                position_ += buffer->size();
                return buffer;
            }

        private:
            std::shared_ptr<RandomAccessFile> file_;
            bool closed_;
            int64_t position_;
            int64_t file_offset_;
            int64_t nbytes_;
        };

        turbo::Result<std::shared_ptr<InputStream>> RandomAccessFile::get_stream(
                std::shared_ptr<RandomAccessFile> file, int64_t file_offset, int64_t nbytes) {
            if (file_offset < 0) {
                return turbo::invalid_argument_error("file_offset should be a positive value, got: ", file_offset);
            }
            if (nbytes < 0) {
                return turbo::invalid_argument_error("nbytes should be a positive value, got: ", nbytes);
            }
            return std::make_shared<FileSegmentReader>(std::move(file), file_offset, nbytes);
        }

        // -----------------------------------------------------------------------
        // Implement utilities exported from concurrency.h and util_internal.h

        namespace internal {

            void CloseFromDestructor(FileInterface *file) {
                turbo::Status st = file->close();
                if (!st.ok()) {
                    auto file_type = typeid(*file).name();
#ifdef NDEBUG
                    KLOG(ERROR) << "Error ignored when destroying file of type " << file_type << ": "
                                     << st;
#else
                    std::stringstream ss;
                    ss << "When destroying file of type " << file_type << ": " << st.message();
                    KLOG(FATAL) << st.with_message(ss.str());
#endif
                }
            }

            turbo::Result<int64_t> ValidateReadRange(int64_t offset, int64_t size, int64_t file_size) {
                if (offset < 0 || size < 0) {
                    return turbo::invalid_argument_error("Invalid read (offset = ", offset, ", size = ", size, ")");
                }
                if (offset > file_size) {
                    return turbo::io_error("read out of bounds (offset = ", offset, ", size = ", size,
                                           ") in file of size ", file_size);
                }
                return std::min(size, file_size - offset);
            }

            turbo::Status ValidateWriteRange(int64_t offset, int64_t size, int64_t file_size) {
                if (offset < 0 || size < 0) {
                    return turbo::invalid_argument_error("Invalid write (offset = ", offset, ", size = ", size, ")");
                }
                if (offset + size > file_size) {
                    return turbo::io_error("write out of bounds (offset = ", offset, ", size = ", size,
                                           ") in file of size ", file_size);
                }
                return turbo::OkStatus();
            }

            turbo::Status ValidateRange(int64_t offset, int64_t size) {
                if (offset < 0 || size < 0) {
                    return turbo::invalid_argument_error("Invalid IO range (offset = ", offset, ", size = ", size, ")");
                }
                return turbo::OkStatus();
            }

#ifndef NDEBUG

            // Debug mode concurrency checking

            struct SharedExclusiveChecker::Impl {
                std::mutex mutex;
                int64_t n_shared = 0;
                int64_t n_exclusive = 0;
            };

            SharedExclusiveChecker::SharedExclusiveChecker() : impl_(new Impl) {}

            void SharedExclusiveChecker::LockShared() {
                std::lock_guard<std::mutex> lock(impl_->mutex);
                // XXX The error message doesn't really describe the actual situation
                // (e.g. read_at() called while read() call in progress)
                KCHECK_EQ(impl_->n_exclusive, 0)
                << "Attempted to take shared lock while locked exclusive";
                ++impl_->n_shared;
            }

            void SharedExclusiveChecker::UnlockShared() {
                std::lock_guard<std::mutex> lock(impl_->mutex);
                KCHECK_GT(impl_->n_shared, 0);
                --impl_->n_shared;
            }

            void SharedExclusiveChecker::LockExclusive() {
                std::lock_guard<std::mutex> lock(impl_->mutex);
                KCHECK_EQ(impl_->n_shared, 0)
                << "Attempted to take exclusive lock while locked shared";
                KCHECK_EQ(impl_->n_exclusive, 0)
                << "Attempted to take exclusive lock while already locked exclusive";
                ++impl_->n_exclusive;
            }

            void SharedExclusiveChecker::UnlockExclusive() {
                std::lock_guard<std::mutex> lock(impl_->mutex);
                KCHECK_EQ(impl_->n_exclusive, 1);
                --impl_->n_exclusive;
            }

#else

            // Release mode no-op concurrency checking

            struct SharedExclusiveChecker::Impl {};

            SharedExclusiveChecker::SharedExclusiveChecker() {}

            void SharedExclusiveChecker::LockShared() {}
            void SharedExclusiveChecker::UnlockShared() {}
            void SharedExclusiveChecker::LockExclusive() {}
            void SharedExclusiveChecker::UnlockExclusive() {}

#endif

            // -----------------------------------------------------------------------
            // Global IO thread pool

            namespace {

                constexpr int kDefaultNumIoThreads = 8;

                std::shared_ptr<ThreadPool> MakeIOThreadPool() {
                    int threads = 0;
                    auto maybe_env_var = turbo::get_env_string("NEBULA_IO_THREADS");
                    if (maybe_env_var.ok()) {
                        auto str = *std::move(maybe_env_var);
                        if (!str.empty()) {
                            try {
                                threads = std::stoi(str);
                            } catch (...) {
                            }
                            if (threads <= 0) {
                                KLOG(WARNING)
                                        << "NEBULA_IO_THREADS does not contain a valid number of threads "
                                           "(should be an integer > 0)";
                            }
                        }
                    }
                    auto maybe_pool = ThreadPool::create_internal(threads > 0 ? threads : kDefaultNumIoThreads);
                    if (!maybe_pool.ok()) {
                        maybe_pool.status().abort("Failed to create global IO thread pool");
                    }
                    return *std::move(maybe_pool);
                }

            }  // namespace

            ThreadPool *GetIOThreadPool() {
                static std::shared_ptr<ThreadPool> pool = MakeIOThreadPool();
                return pool.get();
            }

            // -----------------------------------------------------------------------
            // CoalesceReadRanges

            namespace {

                struct ReadRangeCombiner {
                    turbo::Result<std::vector<ReadRange>> Coalesce(std::vector<ReadRange> ranges) {
                        if (ranges.empty()) {
                            return ranges;
                        }

                        // Remove zero-sized ranges
                        auto end = std::remove_if(ranges.begin(), ranges.end(),
                                                  [](const ReadRange &range) { return range.length == 0; });
                        // Sort in position order
                        std::sort(ranges.begin(), end,
                                  [](const ReadRange &a, const ReadRange &b) { return a.offset < b.offset; });
                        // Remove ranges that overlap 100%
                        end = std::unique(ranges.begin(), end,
                                          [](const ReadRange &left, const ReadRange &right) {
                                              return right.offset >= left.offset &&
                                                     right.offset + right.length <= left.offset + left.length;
                                          });
                        ranges.resize(end - ranges.begin());

                        // Skip further processing if ranges is empty after removing zero-sized ranges.
                        if (ranges.empty()) {
                            return ranges;
                        }

#ifndef NDEBUG
                        for (size_t i = 0; i < ranges.size() - 1; ++i) {
                            const auto &left = ranges[i];
                            const auto &right = ranges[i + 1];
                                    DKCHECK_LE(left.offset, right.offset);
                            if (left.offset + left.length > right.offset) {
                                return turbo::io_error("Some read ranges overlap");
                            }
                        }
#endif

                        std::vector<ReadRange> coalesced;

                        auto itr = ranges.begin();
                        // Ensure ranges is not empty.
                        DKCHECK(itr <= ranges.end());
                        // Start of the current coalesced range and end (exclusive) of previous range.
                        // Both are initialized with the start of first range which is a placeholder value.
                        int64_t coalesced_start = itr->offset;
                        int64_t prev_range_end = coalesced_start;

                        for (; itr < ranges.end(); ++itr) {
                            const int64_t current_range_start = itr->offset;
                            const int64_t current_range_end = current_range_start + itr->length;
                            // We don't expect to have 0 sized ranges.
                                    DKCHECK_LT(current_range_start, current_range_end);

                            // At this point, the coalesced range is [coalesced_start, prev_range_end).
                            // Stop coalescing if:
                            //   - coalesced range is too large, or
                            //   - distance (hole/gap) between consecutive ranges is too large.
                            if (current_range_end - coalesced_start > range_size_limit_ ||
                                current_range_start - prev_range_end > hole_size_limit_) {
                                        DKCHECK_LE(coalesced_start, prev_range_end);
                                // append the coalesced range only if coalesced range size > 0.
                                if (prev_range_end > coalesced_start) {
                                    coalesced.push_back({coalesced_start, prev_range_end - coalesced_start});
                                }
                                // Start a new coalesced range.
                                coalesced_start = current_range_start;
                            }

                            // Update the prev_range_end with the current range.
                            prev_range_end = current_range_end;
                        }
                        // append the coalesced range only if coalesced range size > 0.
                        if (prev_range_end > coalesced_start) {
                            coalesced.push_back({coalesced_start, prev_range_end - coalesced_start});
                        }

                                DKCHECK_EQ(coalesced.front().offset, ranges.front().offset);
                                DKCHECK_EQ(coalesced.back().offset + coalesced.back().length,
                                          ranges.back().offset + ranges.back().length);
                        return coalesced;
                    }

                    const int64_t hole_size_limit_;
                    const int64_t range_size_limit_;
                };

            };  // namespace

            turbo::Result<std::vector<ReadRange>> CoalesceReadRanges(std::vector<ReadRange> ranges,
                                                              int64_t hole_size_limit,
                                                              int64_t range_size_limit) {
                        DKCHECK_GT(range_size_limit, hole_size_limit);

                ReadRangeCombiner combiner{hole_size_limit, range_size_limit};
                return combiner.Coalesce(std::move(ranges));
            }

        }  // namespace internal
    }  // namespace io
}  // namespace nebula
