// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//
#include <nebula/version.h>
#ifdef NEBULA_HDFS
#include <algorithm>
#include <cerrno>
#include <cstdint>
#include <cstring>
#include <limits>
#include <memory>
#include <mutex>
#include <sstream>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>

#include <nebula/core/buffer.h>
#include <nebula/io/hdfs.h>
#include <nebula/io/hdfs_internal.h>
#include <nebula/io/interfaces.h>
#include <nebula/core/memory_pool.h>

#include <turbo/utility/status.h>
#include <turbo/log/logging.h>

using std::size_t;

namespace nebula {

    namespace io {

#define CHECK_FAILURE(RETURN_VALUE, WHAT)                       \
      do {                                                          \
        if (RETURN_VALUE == -1) {                                   \
          return turbo::io_error_with_errno_payload(errno, "HDFS ", WHAT, " failed"); \
        }                                                           \
      } while (0)

        static constexpr int kDefaultHdfsBufferSize = 1 << 16;

        // ----------------------------------------------------------------------
        // File reading

        class HdfsAnyFileImpl {
        public:
            void set_members(const std::string &path, internal::LibHdfsShim *driver, hdfsFS fs,
                             hdfsFile handle) {
                path_ = path;
                driver_ = driver;
                fs_ = fs;
                file_ = handle;
                is_open_ = true;
            }

            turbo::Status Seek(int64_t position) {
                TURBO_RETURN_NOT_OK(CheckClosed());
                int ret = driver_->Seek(fs_, file_, position);
                CHECK_FAILURE(ret, "seek");
                return turbo::OkStatus();
            }

            turbo::Result<int64_t> tell() {
                TURBO_RETURN_NOT_OK(CheckClosed());
                int64_t ret = driver_->tell(fs_, file_);
                CHECK_FAILURE(ret, "tell");
                return ret;
            }

            bool is_open() const { return is_open_; }

        protected:
            turbo::Status CheckClosed() {
                if (!is_open_) {
                    return turbo::invalid_argument_error("Operation on closed HDFS file");
                }
                return turbo::OkStatus();
            }

            std::string path_;

            internal::LibHdfsShim *driver_;

            // For threadsafety
            std::mutex lock_;

            // These are pointers in libhdfs, so OK to copy
            hdfsFS fs_;
            hdfsFile file_;

            bool is_open_;
        };

        namespace {

            turbo::Status GetPathInfoFailed(const std::string &path) {
                return turbo::io_error_with_errno_payload(errno, "Calling GetPathInfo for '", path, "' failed");
            }

        }  // namespace

        // Private implementation for read-only files
        class HdfsReadableFile::HdfsReadableFileImpl : public HdfsAnyFileImpl {
        public:
            explicit HdfsReadableFileImpl(MemoryPool *pool) : pool_(pool) {}

            turbo::Status close() {
                if (is_open_) {
                    // is_open_ must be set to false in the beginning, because the destructor
                    // attempts to close the stream again, and if the first close fails, then
                    // the error doesn't get propagated properly and the second close
                    // initiated by the destructor raises a segfault
                    is_open_ = false;
                    int ret = driver_->CloseFile(fs_, file_);
                    CHECK_FAILURE(ret, "CloseFile");
                }
                return turbo::OkStatus();
            }

            bool closed() const { return !is_open_; }

            turbo::Result<int64_t> read_at(int64_t position, int64_t nbytes, uint8_t *buffer) {
                TURBO_RETURN_NOT_OK(CheckClosed());
                if (!driver_->HasPread()) {
                    std::lock_guard<std::mutex> guard(lock_);
                    TURBO_RETURN_NOT_OK(Seek(position));
                    return read(nbytes, buffer);
                }

                constexpr int64_t kMaxBlockSize = std::numeric_limits<int32_t>::max();
                int64_t total_bytes = 0;
                while (nbytes > 0) {
                    const auto block_size = static_cast<tSize>(std::min(kMaxBlockSize, nbytes));
                    tSize ret =
                            driver_->Pread(fs_, file_, static_cast<tOffset>(position), buffer, block_size);
                    CHECK_FAILURE(ret, "read");
                            DKCHECK_LE(ret, block_size);
                    if (ret == 0) {
                        break;  // EOF
                    }
                    buffer += ret;
                    total_bytes += ret;
                    position += ret;
                    nbytes -= ret;
                }
                return total_bytes;
            }

            turbo::Result<std::shared_ptr<Buffer>> read_at(int64_t position, int64_t nbytes) {
                TURBO_RETURN_NOT_OK(CheckClosed());

                TURBO_MOVE_OR_RAISE(auto buffer, allocate_resizable_buffer(nbytes, pool_));
                TURBO_MOVE_OR_RAISE(int64_t bytes_read,
                                       read_at(position, nbytes, buffer->mutable_data()));
                if (bytes_read < nbytes) {
                    TURBO_RETURN_NOT_OK(buffer->resize(bytes_read));
                    buffer->zero_padding();
                }
                // R build with openSUSE155 requires an explicit shared_ptr construction
                return std::shared_ptr<Buffer>(std::move(buffer));
            }

            turbo::Result<int64_t> read(int64_t nbytes, void *buffer) {
                TURBO_RETURN_NOT_OK(CheckClosed());

                int64_t total_bytes = 0;
                while (total_bytes < nbytes) {
                    tSize ret = driver_->read(
                            fs_, file_, reinterpret_cast<uint8_t *>(buffer) + total_bytes,
                            static_cast<tSize>(std::min<int64_t>(buffer_size_, nbytes - total_bytes)));
                    CHECK_FAILURE(ret, "read");
                    total_bytes += ret;
                    if (ret == 0) {
                        break;
                    }
                }
                return total_bytes;
            }

            turbo::Result<std::shared_ptr<Buffer>> read(int64_t nbytes) {
                TURBO_RETURN_NOT_OK(CheckClosed());

                TURBO_MOVE_OR_RAISE(auto buffer, allocate_resizable_buffer(nbytes, pool_));
                TURBO_MOVE_OR_RAISE(int64_t bytes_read, read(nbytes, buffer->mutable_data()));
                if (bytes_read < nbytes) {
                    TURBO_RETURN_NOT_OK(buffer->resize(bytes_read));
                }
                // R build with openSUSE155 requires an explicit shared_ptr construction
                return std::shared_ptr<Buffer>(std::move(buffer));
            }

            turbo::Result<int64_t> get_size() {
                TURBO_RETURN_NOT_OK(CheckClosed());

                hdfsFileInfo *entry = driver_->GetPathInfo(fs_, path_.c_str());
                if (entry == nullptr) {
                    return GetPathInfoFailed(path_);
                }
                int64_t size = entry->mSize;
                driver_->FreeFileInfo(entry, 1);
                return size;
            }

            void set_memory_pool(MemoryPool *pool) { pool_ = pool; }

            void set_buffer_size(int32_t buffer_size) { buffer_size_ = buffer_size; }

        private:
            MemoryPool *pool_;
            int32_t buffer_size_;
        };

        HdfsReadableFile::HdfsReadableFile(const io::IOContext &io_context) {
            impl_.reset(new HdfsReadableFileImpl(io_context.pool()));
        }

        HdfsReadableFile::~HdfsReadableFile() {
            TURBO_WARN_NOT_OK(impl_->close(), "Failed to close HdfsReadableFile");
        }

        turbo::Status HdfsReadableFile::close() { return impl_->close(); }

        bool HdfsReadableFile::closed() const { return impl_->closed(); }

        turbo::Result<int64_t> HdfsReadableFile::read_at(int64_t position, int64_t nbytes, void *buffer) {
            return impl_->read_at(position, nbytes, reinterpret_cast<uint8_t *>(buffer));
        }

        turbo::Result<std::shared_ptr<Buffer>> HdfsReadableFile::read_at(int64_t position,
                                                                 int64_t nbytes) {
            return impl_->read_at(position, nbytes);
        }

        turbo::Result<int64_t> HdfsReadableFile::read(int64_t nbytes, void *buffer) {
            return impl_->read(nbytes, buffer);
        }

        turbo::Result<std::shared_ptr<Buffer>> HdfsReadableFile::read(int64_t nbytes) {
            return impl_->read(nbytes);
        }

        turbo::Result<int64_t> HdfsReadableFile::get_size() { return impl_->get_size(); }

        turbo::Status HdfsReadableFile::Seek(int64_t position) { return impl_->Seek(position); }

        turbo::Result<int64_t> HdfsReadableFile::tell() const { return impl_->tell(); }

        // ----------------------------------------------------------------------
        // File writing

        // Private implementation for writable-only files
        class HdfsOutputStream::HdfsOutputStreamImpl : public HdfsAnyFileImpl {
        public:
            HdfsOutputStreamImpl() {}

            turbo::Status close() {
                if (is_open_) {
                    // is_open_ must be set to false in the beginning, because the destructor
                    // attempts to close the stream again, and if the first close fails, then
                    // the error doesn't get propagated properly and the second close
                    // initiated by the destructor raises a segfault
                    is_open_ = false;
                    TURBO_RETURN_NOT_OK(FlushInternal());
                    int ret = driver_->CloseFile(fs_, file_);
                    CHECK_FAILURE(ret, "CloseFile");
                }
                return turbo::OkStatus();
            }

            bool closed() const { return !is_open_; }

            turbo::Status flush() {
                TURBO_RETURN_NOT_OK(CheckClosed());

                return FlushInternal();
            }

            turbo::Status write(const uint8_t *buffer, int64_t nbytes) {
                TURBO_RETURN_NOT_OK(CheckClosed());

                constexpr int64_t kMaxBlockSize = std::numeric_limits<int32_t>::max();

                std::lock_guard<std::mutex> guard(lock_);
                while (nbytes > 0) {
                    const auto block_size = static_cast<tSize>(std::min(kMaxBlockSize, nbytes));
                    tSize ret = driver_->write(fs_, file_, buffer, block_size);
                    CHECK_FAILURE(ret, "write");
                            DKCHECK_LE(ret, block_size);
                    buffer += ret;
                    nbytes -= ret;
                }
                return turbo::OkStatus();
            }

        protected:
            turbo::Status FlushInternal() {
                int ret = driver_->flush(fs_, file_);
                CHECK_FAILURE(ret, "flush");
                return turbo::OkStatus();
            }
        };

        HdfsOutputStream::HdfsOutputStream() { impl_.reset(new HdfsOutputStreamImpl()); }

        HdfsOutputStream::~HdfsOutputStream() {
            TURBO_WARN_NOT_OK(impl_->close(), "Failed to close HdfsOutputStream");
        }

        turbo::Status HdfsOutputStream::close() { return impl_->close(); }

        bool HdfsOutputStream::closed() const { return impl_->closed(); }

        turbo::Status HdfsOutputStream::write(const void *buffer, int64_t nbytes) {
            return impl_->write(reinterpret_cast<const uint8_t *>(buffer), nbytes);
        }

        turbo::Status HdfsOutputStream::flush() { return impl_->flush(); }

        turbo::Result<int64_t> HdfsOutputStream::tell() const { return impl_->tell(); }

        // ----------------------------------------------------------------------
        // HDFS client

        // TODO(wesm): this could throw std::bad_alloc in the course of copying strings
        // into the path info object
        static void SetPathInfo(const hdfsFileInfo *input, HdfsPathInfo *out) {
            out->kind = input->mKind == kObjectKindFile ? ObjectType::FILE : ObjectType::DIRECTORY;
            out->name = std::string(input->mName);
            out->owner = std::string(input->mOwner);
            out->group = std::string(input->mGroup);

            out->last_access_time = static_cast<int32_t>(input->mLastAccess);
            out->last_modified_time = static_cast<int32_t>(input->mLastMod);
            out->size = static_cast<int64_t>(input->mSize);

            out->replication = input->mReplication;
            out->block_size = input->mBlockSize;

            out->permissions = input->mPermissions;
        }

        // Private implementation
        class HadoopFileSystem::HadoopFileSystemImpl {
        public:
            HadoopFileSystemImpl() : driver_(nullptr), port_(0), fs_(nullptr) {}

            turbo::Status Connect(const HdfsConnectionConfig *config) {
                TURBO_RETURN_NOT_OK(ConnectLibHdfs(&driver_));

                // connect to HDFS with the builder object
                hdfsBuilder *builder = driver_->NewBuilder();
                if (!config->host.empty()) {
                    driver_->BuilderSetNameNode(builder, config->host.c_str());
                }
                driver_->BuilderSetNameNodePort(builder, static_cast<tPort>(config->port));
                if (!config->user.empty()) {
                    driver_->BuilderSetUserName(builder, config->user.c_str());
                }
                if (!config->kerb_ticket.empty()) {
                    driver_->BuilderSetKerbTicketCachePath(builder, config->kerb_ticket.c_str());
                }

                for (const auto &kv: config->extra_conf) {
                    int ret = driver_->BuilderConfSetStr(builder, kv.first.c_str(), kv.second.c_str());
                    CHECK_FAILURE(ret, "confsetstr");
                }

                driver_->BuilderSetForceNewInstance(builder);
                fs_ = driver_->BuilderConnect(builder);

                if (fs_ == nullptr) {
                    return turbo::io_error("HDFS connection failed");
                }
                namenode_host_ = config->host;
                port_ = config->port;
                user_ = config->user;
                kerb_ticket_ = config->kerb_ticket;

                return turbo::OkStatus();
            }

            turbo::Status MakeDirectory(const std::string &path) {
                int ret = driver_->MakeDirectory(fs_, path.c_str());
                CHECK_FAILURE(ret, "create directory");
                return turbo::OkStatus();
            }

            turbo::Status Delete(const std::string &path, bool recursive) {
                int ret = driver_->Delete(fs_, path.c_str(), static_cast<int>(recursive));
                CHECK_FAILURE(ret, "delete");
                return turbo::OkStatus();
            }

            turbo::Status Disconnect() {
                int ret = driver_->Disconnect(fs_);
                CHECK_FAILURE(ret, "hdfsFS::Disconnect");
                return turbo::OkStatus();
            }

            bool Exists(const std::string &path) {
                // hdfsExists does not distinguish between RPC failure and the file not
                // existing
                int ret = driver_->Exists(fs_, path.c_str());
                return ret == 0;
            }

            turbo::Status get_capacity(int64_t *nbytes) {
                tOffset ret = driver_->get_capacity(fs_);
                CHECK_FAILURE(ret, "get_capacity");
                *nbytes = ret;
                return turbo::OkStatus();
            }

            turbo::Status GetUsed(int64_t *nbytes) {
                tOffset ret = driver_->GetUsed(fs_);
                CHECK_FAILURE(ret, "GetUsed");
                *nbytes = ret;
                return turbo::OkStatus();
            }

            turbo::Status GetWorkingDirectory(std::string *out) {
                char buffer[2048];
                if (driver_->GetWorkingDirectory(fs_, buffer, sizeof(buffer) - 1) == nullptr) {
                    return turbo::io_error_with_errno_payload(errno, "HDFS GetWorkingDirectory failed");
                }
                *out = buffer;
                return turbo::OkStatus();
            }

            turbo::Status GetPathInfo(const std::string &path, HdfsPathInfo *info) {
                hdfsFileInfo *entry = driver_->GetPathInfo(fs_, path.c_str());

                if (entry == nullptr) {
                    return GetPathInfoFailed(path);
                }

                SetPathInfo(entry, info);
                driver_->FreeFileInfo(entry, 1);

                return turbo::OkStatus();
            }

            turbo::Status Stat(const std::string &path, FileStatistics *stat) {
                HdfsPathInfo info;
                TURBO_RETURN_NOT_OK(GetPathInfo(path, &info));

                stat->size = info.size;
                stat->kind = info.kind;
                return turbo::OkStatus();
            }

            turbo::Status GetChildren(const std::string &path, std::vector<std::string> *listing) {
                std::vector<HdfsPathInfo> detailed_listing;
                TURBO_RETURN_NOT_OK(ListDirectory(path, &detailed_listing));
                for (const auto &info: detailed_listing) {
                    listing->push_back(info.name);
                }
                return turbo::OkStatus();
            }

            turbo::Status ListDirectory(const std::string &path, std::vector<HdfsPathInfo> *listing) {
                int num_entries = 0;
                errno = 0;
                hdfsFileInfo *entries = driver_->ListDirectory(fs_, path.c_str(), &num_entries);

                if (entries == nullptr) {
                    // If the directory is empty, entries is NULL but errno is 0. Non-zero
                    // errno indicates error
                    //
                    // Note: errno is thread-local
                    //
                    // XXX(wesm): ARROW-2300; we found with Hadoop 2.6 that libhdfs would set
                    // errno 2/ENOENT for empty directories. To be more robust to this we
                    // double check this case
                    if ((errno == 0) || (errno == ENOENT && Exists(path))) {
                        num_entries = 0;
                    } else {
                        return turbo::io_error_with_errno_payload(errno, "HDFS list directory failed");
                    }
                }

                // Allocate additional space for elements
                int vec_offset = static_cast<int>(listing->size());
                listing->resize(vec_offset + num_entries);

                for (int i = 0; i < num_entries; ++i) {
                    SetPathInfo(entries + i, &(*listing)[vec_offset + i]);
                }

                // Free libhdfs file info
                driver_->FreeFileInfo(entries, num_entries);

                return turbo::OkStatus();
            }

            turbo::Status OpenReadable(const std::string &path, int32_t buffer_size,
                                const io::IOContext &io_context,
                                std::shared_ptr<HdfsReadableFile> *file) {
                errno = 0;
                hdfsFile handle = driver_->OpenFile(fs_, path.c_str(), O_RDONLY, buffer_size, 0, 0);

                if (handle == nullptr) {
                    return turbo::io_error_with_errno_payload(errno, "Opening HDFS file '", path, "' failed");
                }

                // std::make_shared does not work with private ctors
                *file = std::shared_ptr<HdfsReadableFile>(new HdfsReadableFile(io_context));
                (*file)->impl_->set_members(path, driver_, fs_, handle);
                (*file)->impl_->set_buffer_size(buffer_size);

                return turbo::OkStatus();
            }

            turbo::Status OpenWritable(const std::string &path, bool append, int32_t buffer_size,
                                int16_t replication, int64_t default_block_size,
                                std::shared_ptr<HdfsOutputStream> *file) {
                int flags = O_WRONLY;
                if (append) flags |= O_APPEND;

                errno = 0;
                hdfsFile handle =
                        driver_->OpenFile(fs_, path.c_str(), flags, buffer_size, replication,
                                          static_cast<tSize>(default_block_size));

                if (handle == nullptr) {
                    return turbo::io_error_with_errno_payload(errno, "Opening HDFS file '", path, "' failed");
                }

                // std::make_shared does not work with private ctors
                *file = std::shared_ptr<HdfsOutputStream>(new HdfsOutputStream());
                (*file)->impl_->set_members(path, driver_, fs_, handle);

                return turbo::OkStatus();
            }

            turbo::Status Rename(const std::string &src, const std::string &dst) {
                int ret = driver_->Rename(fs_, src.c_str(), dst.c_str());
                CHECK_FAILURE(ret, "Rename");
                return turbo::OkStatus();
            }

            turbo::Status copy(const std::string &src, const std::string &dst) {
                int ret = driver_->copy(fs_, src.c_str(), fs_, dst.c_str());
                CHECK_FAILURE(ret, "Rename");
                return turbo::OkStatus();
            }

            turbo::Status Move(const std::string &src, const std::string &dst) {
                int ret = driver_->Move(fs_, src.c_str(), fs_, dst.c_str());
                CHECK_FAILURE(ret, "Rename");
                return turbo::OkStatus();
            }

            turbo::Status Chmod(const std::string &path, int mode) {
                int ret = driver_->Chmod(fs_, path.c_str(), static_cast<short>(mode));  // NOLINT
                CHECK_FAILURE(ret, "Chmod");
                return turbo::OkStatus();
            }

            turbo::Status Chown(const std::string &path, const char *owner, const char *group) {
                int ret = driver_->Chown(fs_, path.c_str(), owner, group);
                CHECK_FAILURE(ret, "Chown");
                return turbo::OkStatus();
            }

        private:
            internal::LibHdfsShim *driver_;

            std::string namenode_host_;
            std::string user_;
            int port_;
            std::string kerb_ticket_;

            hdfsFS fs_;
        };

        // ----------------------------------------------------------------------
        // Public API for HDFSClient

        HadoopFileSystem::HadoopFileSystem() { impl_.reset(new HadoopFileSystemImpl()); }

        HadoopFileSystem::~HadoopFileSystem() {}

        turbo::Status HadoopFileSystem::Connect(const HdfsConnectionConfig *config,
                                         std::shared_ptr<HadoopFileSystem> *fs) {
            // ctor is private, make_shared will not work
            *fs = std::shared_ptr<HadoopFileSystem>(new HadoopFileSystem());

            TURBO_RETURN_NOT_OK((*fs)->impl_->Connect(config));
            return turbo::OkStatus();
        }

        turbo::Status HadoopFileSystem::MakeDirectory(const std::string &path) {
            return impl_->MakeDirectory(path);
        }

        turbo::Status HadoopFileSystem::Delete(const std::string &path, bool recursive) {
            return impl_->Delete(path, recursive);
        }

        turbo::Status HadoopFileSystem::DeleteDirectory(const std::string &path) {
            return Delete(path, true);
        }

        turbo::Status HadoopFileSystem::Disconnect() { return impl_->Disconnect(); }

        bool HadoopFileSystem::Exists(const std::string &path) { return impl_->Exists(path); }

        turbo::Status HadoopFileSystem::GetPathInfo(const std::string &path, HdfsPathInfo *info) {
            return impl_->GetPathInfo(path, info);
        }

        turbo::Status HadoopFileSystem::Stat(const std::string &path, FileStatistics *stat) {
            return impl_->Stat(path, stat);
        }

        turbo::Status HadoopFileSystem::get_capacity(int64_t *nbytes) {
            return impl_->get_capacity(nbytes);
        }

        turbo::Status HadoopFileSystem::GetUsed(int64_t *nbytes) { return impl_->GetUsed(nbytes); }

        turbo::Status HadoopFileSystem::GetWorkingDirectory(std::string *out) {
            return impl_->GetWorkingDirectory(out);
        }

        turbo::Status HadoopFileSystem::GetChildren(const std::string &path,
                                             std::vector<std::string> *listing) {
            return impl_->GetChildren(path, listing);
        }

        turbo::Status HadoopFileSystem::ListDirectory(const std::string &path,
                                               std::vector<HdfsPathInfo> *listing) {
            return impl_->ListDirectory(path, listing);
        }

        turbo::Status HadoopFileSystem::OpenReadable(const std::string &path, int32_t buffer_size,
                                              std::shared_ptr<HdfsReadableFile> *file) {
            return impl_->OpenReadable(path, buffer_size, io::default_io_context(), file);
        }

        turbo::Status HadoopFileSystem::OpenReadable(const std::string &path,
                                              std::shared_ptr<HdfsReadableFile> *file) {
            return OpenReadable(path, kDefaultHdfsBufferSize, io::default_io_context(), file);
        }

        turbo::Status HadoopFileSystem::OpenReadable(const std::string &path, int32_t buffer_size,
                                              const io::IOContext &io_context,
                                              std::shared_ptr<HdfsReadableFile> *file) {
            return impl_->OpenReadable(path, buffer_size, io_context, file);
        }

        turbo::Status HadoopFileSystem::OpenReadable(const std::string &path,
                                              const io::IOContext &io_context,
                                              std::shared_ptr<HdfsReadableFile> *file) {
            return OpenReadable(path, kDefaultHdfsBufferSize, io_context, file);
        }

        turbo::Status HadoopFileSystem::OpenWritable(const std::string &path, bool append,
                                              int32_t buffer_size, int16_t replication,
                                              int64_t default_block_size,
                                              std::shared_ptr<HdfsOutputStream> *file) {
            return impl_->OpenWritable(path, append, buffer_size, replication, default_block_size,
                                       file);
        }

        turbo::Status HadoopFileSystem::OpenWritable(const std::string &path, bool append,
                                              std::shared_ptr<HdfsOutputStream> *file) {
            return OpenWritable(path, append, 0, 0, 0, file);
        }

        turbo::Status HadoopFileSystem::Chmod(const std::string &path, int mode) {
            return impl_->Chmod(path, mode);
        }

        turbo::Status HadoopFileSystem::Chown(const std::string &path, const char *owner,
                                       const char *group) {
            return impl_->Chown(path, owner, group);
        }

        turbo::Status HadoopFileSystem::Rename(const std::string &src, const std::string &dst) {
            return impl_->Rename(src, dst);
        }

        turbo::Status HadoopFileSystem::copy(const std::string &src, const std::string &dst) {
            return impl_->copy(src, dst);
        }

        turbo::Status HadoopFileSystem::Move(const std::string &src, const std::string &dst) {
            return impl_->Move(src, dst);
        }

        // ----------------------------------------------------------------------
        // Allow public API users to check whether we are set up correctly

        turbo::Status HaveLibHdfs() {
            internal::LibHdfsShim *driver;
            return internal::ConnectLibHdfs(&driver);
        }

    }  // namespace io
}  // namespace nebula
#endif