/*
 * Copyright (c) 2025 Huawei Device Co., Ltd.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
#include "hmfs_io.h"

#include <algorithm>
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#ifdef HAVE_MNTENT_H
#include <mntent.h>
#endif
#include <inttypes.h>
#include <securec.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
#include <time.h>
#ifdef HAVE_SYS_STAT_H
#include <sys/stat.h>
#endif
#ifdef HAVE_SYS_MOUNT_H
#include <sys/mount.h>
#endif
#ifdef HAVE_SYS_IOCTL_H
#include <sys/ioctl.h>
#endif
#ifdef HAVE_LINUX_HDREG_H
#include <linux/hdreg.h>
#endif
#ifdef HAVE_SPARSE_SPARSE_H
#include <sparse/sparse.h>
#endif
#include <unistd.h>

#include "hmfs_utils.h"
#include "device_manager.h"

#ifndef _LARGEFILE64_SOURCE
#define _LARGEFILE64_SOURCE
#endif

namespace OHOS::Hmfs {
#define MIN_NUM_CACHE_ENTRY  1024L
#define MAX_MAX_HASH_COLLISION  16

#define MAX_CHUNK_SIZE        (1 * 1024 * 1024 * 1024ULL)
#define MAX_CHUNK_COUNT        (MAX_CHUNK_SIZE / HMFS_BLKSIZE)

hmfs_configuration g_hmfsConfig;

#ifndef HAVE_LSEEK64
static inline off64_t lseek64(int fd, uint64_t offset, int set)
{
    return lseek(fd, offset, set);
}
#endif

static int GetDeviceFd(uint64_t *offset)
{
    uint64_t blkAddr = *offset >> HMFS_BLKSIZE_BITS;
    for (uint32_t i = 0; i < DeviceManager::GetInstance().GetDeviceCount(); i++) {
        DeviceInfo *deviceInfo = DeviceManager::GetInstance().GetDeviceInfo(i);
        if (deviceInfo == nullptr) {
            HMFS_DEBUG("failed to get device info by id %zu", i);
            continue;
        }

        if ((deviceInfo->startBlkId <= blkAddr) && (deviceInfo->endBlkId >= blkAddr)) {
            *offset -= deviceInfo->startBlkId << HMFS_BLKSIZE_BITS;
            return deviceInfo->fd;
        }
    }

    return -1;
}

Dcache& Dcache::GetInstance()
{
    static Dcache instance;
    return instance;
}

void Dcache::DcachePrintStatistics(void)
{
    /* Number of used cache entries */
    long useCnt = 0;
    for (long i = 0; i < dcacheConfig.numCacheEntry; i++) {
        if (dcacheValid[i]) {
            ++useCnt;
        }
    }

    /*
    *  c: number of cache entries
    *  u: used entries
    *  RA: number of read access blocks
    *  CH: cache hit
    *  CM: cache miss
    *  Repl: read cache replaced
    */
    HMFS_INFO("\nc, u, RA, CH, CM, Repl=\n");
    HMFS_INFO("%ld %ld %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
            dcacheConfig.numCacheEntry,
            useCnt,
            dcacheRaccess,
            dcacheRhit,
            dcacheRmiss,
            dcacheRreplace);
}

void Dcache::DcacheRelease()
{
    if (!dcacheInitialized) {
        return;
    }

    dcacheInitialized = false;
    if (g_hmfsConfig.cacheConfig.dbgEn) {
        DcachePrintStatistics();
    }
    if (dcacheBlk != nullptr) {
        delete(dcacheBlk);
    }
    if (dcacheLastused != nullptr) {
        delete(dcacheLastused);
    }
    if (dcacheBuf != nullptr) {
        delete(dcacheBuf);
    }
    if (dcacheValid != nullptr) {
        delete(dcacheValid);
    }

    dcacheConfig.numCacheEntry = 0;
    dcacheBlk = nullptr;
    dcacheLastused = nullptr;
    dcacheBuf = nullptr;
    dcacheValid = nullptr;
}

// return 0 for success, error code for failure.
int Dcache::DcacheAllocAll(long n)
{
    if (n <= 0) {
        return -1;
    }
    if ((dcacheBlk = new off64_t[n]) == nullptr ||
        (dcacheLastused = new uint64_t[n]) == nullptr ||
        (dcacheBuf = new char[HMFS_BLKSIZE * n]) == nullptr ||
        (dcacheValid = new bool[n]) == nullptr) {
        DcacheRelease();
        return -1;
    }
    dcacheConfig.numCacheEntry = n;
    return 0;
}

void Dcache::DcacheRelocateInit(void)
{
    uint32_t n0 = (sizeof(dcacheRelocateOffset0) / sizeof(dcacheRelocateOffset0[0]));
    uint32_t n = (sizeof(dcacheRelocateOffset) / sizeof(dcacheRelocateOffset[0]));
    if (n0 != n) {
        HMFS_INFO("dcacheRelocateOffset0 and dcacheRelocateOffset size mismatch\n");
    }

    for (uint32_t i = 0; i < n && i < dcacheConfig.maxHashCollision; i++) {
        if (labs(dcacheRelocateOffset0[i]) > dcacheConfig.numCacheEntry / 2) {
            dcacheConfig.maxHashCollision = i;
            break;
        }
        dcacheRelocateOffset[i] = dcacheConfig.numCacheEntry + dcacheRelocateOffset0[i];
    }
}

void Dcache::DcacheInit(void)
{
    if (g_hmfsConfig.cacheConfig.numCacheEntry <= 0) {
        return;
    }
    /* release previous cache init, if any */
    DcacheRelease();

    dcacheBlk = nullptr;
    dcacheLastused = nullptr;
    dcacheBuf = nullptr;
    dcacheValid = nullptr;

    dcacheConfig = g_hmfsConfig.cacheConfig;

    long n = std::max(MIN_NUM_CACHE_ENTRY, dcacheConfig.numCacheEntry);
    Dcache& ins = Dcache::GetInstance();
    /* halve alloc size until alloc succeed, or min cache reached */
    while (ins.DcacheAllocAll(n) != 0 && n != MIN_NUM_CACHE_ENTRY) {
        n = std::max(MIN_NUM_CACHE_ENTRY, n/2);
    }

    /* must be the last: data dependent on numCacheEntry */
    ins.DcacheRelocateInit();
    dcacheInitialized = true;

    if (!dcacheExitRegistered) {
        dcacheExitRegistered = true;
        std::atexit(Dcache::DcacheRelease); /* auto release */
    }

    dcacheRaccess = 0;
    dcacheRhit = 0;
    dcacheRmiss = 0;
    dcacheRreplace = 0;
}

inline char *Dcache::DcacheAddr(long entry)
{
    return dcacheBuf + HMFS_BLKSIZE * entry;
}

/* relocate on (n+1)-th collision */
inline long Dcache::DcacheRelocate(long entry, int n)
{
    return (entry + dcacheRelocateOffset[n]) % dcacheConfig.numCacheEntry;
}

long Dcache::DcacheFind(off64_t blk)
{
    long n = dcacheConfig.numCacheEntry;
    unsigned m = dcacheConfig.maxHashCollision;
    long entry, leastUsed, target;
    unsigned index;

    if (n <= 0) {
        return 0;
    }
    target = leastUsed = entry = blk % n; /* simple modulo hash */

    for (index = 0; index < m; index++) {
        if (!dcacheValid[target] || dcacheBlk[target] == blk) {
            return target;  /* found target or empty cache slot */
        }
        if (dcacheLastused[target] < dcacheLastused[leastUsed]) {
            leastUsed = target;
        }
        target = DcacheRelocate(entry, index); /* next target */
    }
    return leastUsed;  /* max search reached, return least used slot */
}

/* Physical read into cache */
int Dcache::DcacheIoRead(int fd, long entry, off64_t offset, off64_t blk)
{
    if (pread64(fd, dcacheBuf + entry * HMFS_BLKSIZE, HMFS_BLKSIZE, offset) < 0) {
        HMFS_ERROR("\n read() fail.\n");
        return -1;
    }
    dcacheLastused[entry] = ++dcacheUsetick;
    dcacheValid[entry] = true;
    dcacheBlk[entry] = blk;
    return 0;
}

/*
 *  - Note: Read/Write are not symmetric:
 *       For read, we need to do it block by block, due to the cache nature:
 *           some blocks may be cached, and others don't.
 *       For write, since we always do a write-thru, we can join all writes into one,
 *       and write it once at the caller.  This function updates the cache for write, but
 *       not the do a physical write.  The caller is responsible for the physical write.
 *  - Note: We concentrate read/write together, due to the fact of similar structure to find
 *          the relavant cache entries
 *  - Return values:
 *       0: success
 *       1: cache not available (uninitialized)
 *      -1: error
 */
int Dcache::DcacheUpdateRw(int fd, void *buf, off64_t offset, size_t byteCount, bool isWrite)
{
    if (!dcacheInitialized) {
        DcacheInit(); /* auto initialize */
    }
    if (!dcacheInitialized) {
        return 1; /* not available */
    }
    off64_t blk = offset / HMFS_BLKSIZE;
    int addrInBlk = offset % HMFS_BLKSIZE;
    off64_t start = blk * HMFS_BLKSIZE;

    while (byteCount != 0) {
        size_t curSize = std::min(byteCount, (size_t)(HMFS_BLKSIZE - addrInBlk));
        long entry = DcacheFind(blk);
        if (!isWrite) {
            ++dcacheRaccess;
        }

        if (dcacheValid[entry] && dcacheBlk[entry] == blk) {
            /* cache hit */
            if (isWrite) {  /* write: update cache */
                memcpy_s(DcacheAddr(entry) + addrInBlk, curSize, buf, curSize);
            } else {
                ++dcacheRhit;
            }
        } else {
            /* cache miss */
            if (!isWrite) {
                ++dcacheRmiss;
                if (dcacheValid[entry]) {
                    ++dcacheRreplace;
                }
                /* read: physical I/O read into cache */
                int err = DcacheIoRead(fd, entry, start, blk);
                if (err) {
                    return err;
                }
            }
        }

        /* read: copy data from cache */
        /* write: nothing to do, since we don't do physical write. */
        if (!isWrite) {
            memcpy_s(buf, curSize, DcacheAddr(entry) + addrInBlk, curSize);
        }

        /* next block */
        ++blk;
        buf = (uint8_t *)buf + curSize;
        start += HMFS_BLKSIZE;
        byteCount -= curSize;
        addrInBlk = 0;
    }
    return 0;
}

/*
 * DcacheUpdateCache() just update cache, won't do physical I/O.
 * Thus even no error, we need normal non-cache I/O for actual write
 *
 * return value: 1: cache not available
 *               0: success, -1: I/O error
 */
int Dcache::DcacheUpdateCache(int fd, void *buf, off64_t offset, size_t count)
{
    return DcacheUpdateRw(fd, buf, offset, count, true);
}

/* handles read into cache + read into buffer  */
int Dcache::DcacheRead(int fd, void *buf, off64_t offset, size_t count)
{
    return DcacheUpdateRw(fd, buf, offset, count, false);
}

std::unique_ptr<HmfsIo> HmfsIo::instance_ = nullptr;
void HmfsIo::CreateInstance(CmdConfig &cfgPara)
{
    if (instance_ == nullptr) {
        instance_ = std::make_unique<HmfsIo>(cfgPara);
    }
}

HmfsIo& HmfsIo::GetInstance()
{
    return *instance_.get();
}

int HmfsIo::DevRead(void *buf, uint64_t offset, size_t len)
{
    if (cmdPara_.sparseMode) {
        return SparseReadBlk(offset / HMFS_BLKSIZE, len / HMFS_BLKSIZE, buf);
    }

    int fd = GetDeviceFd(&offset);
    if (fd < 0) {
        return fd;
    }

    /* err = 1: cache not available, fall back to non-cache R/W */
    /* err = 0: success, err=-1: I/O error */
    int err = Dcache::GetInstance().DcacheRead(fd, buf, (off64_t)offset, len);
    if (err <= 0) {
        return err;
    }
    if (pread64(fd, buf, len, offset) < 0) {
        return -1;
    }
    return 0;
}

int HmfsIo::DevReadAhead(uint64_t offset)
{
    int fd = GetDeviceFd(&offset);
    if (fd < 0)
        return fd;

    return 0;
}

int HmfsIo::DevWrite(void *buf, uint64_t offset, size_t len)
{
    // if (g_hmfsConfig.dryRun) {
    //     return 0;
    // }

    if (cmdPara_.sparseMode) {
        return SparseWriteBlk(offset / HMFS_BLKSIZE, len / HMFS_BLKSIZE, buf);
    }

    int fd = GetDeviceFd(&offset);
    if (fd < 0) {
        HMFS_DEBUG("failed to GetDeviceFd offset = %" PRIu64 "", offset);
        return fd;
    }

    /*
    * DcacheUpdateCache() just update cache, won't do I/O.
    * Thus even no error, we need normal non-cache I/O for actual write
    */
    if (Dcache::GetInstance().DcacheUpdateCache(fd, buf, (off64_t)offset, len) < 0) {
        HMFS_DEBUG("failed to DcacheUpdateCache");
        return -1;
    }

    if (pwrite64(fd, buf, len, offset) < 0) {
        HMFS_DEBUG("failed to pwrite64 offset = %" PRIu64 ", len = %zu, buf = %p, error = %s",
            offset, len, buf, strerror(errno));
        return -1;
    }
    return 0;
}

int HmfsIo::DevWriteBlock(void *buf, uint64_t blkAddr)
{
    return DevWrite(buf, blkAddr << HMFS_BLKSIZE_BITS, HMFS_BLKSIZE);
}

int HmfsIo::DevWriteDump(void *buf, uint64_t offset, size_t len)
{
    if (pwrite64(g_hmfsConfig.dumpFd, buf, len, offset) < 0) {
        return -1;
    }
    return 0;
}

int HmfsIo::DevFill(void *buf, uint64_t offset, size_t len)
{
    if (cmdPara_.sparseMode) {
        return SparseWriteZeroedBlk(offset / HMFS_BLKSIZE, len / HMFS_BLKSIZE);
    }

    int fd = GetDeviceFd(&offset);
    if (fd < 0) {
        return fd;
    }

    /* Only allow fill to zero */
    if (*((uint8_t*)buf)) {
        return -1;
    }

    if (pwrite64(fd, buf, len, offset) < 0) {
        return -1;
    }
    return 0;
}

int HmfsIo::DevFillBlock(void *buf, uint64_t blkAddr)
{
    return DevFill(buf, blkAddr << HMFS_BLKSIZE_BITS, HMFS_BLKSIZE);
}

int HmfsIo::DevReadBlock(void *buf, uint64_t blkAddr)
{
    return DevRead(buf, blkAddr << HMFS_BLKSIZE_BITS, HMFS_BLKSIZE);
}

int HmfsIo::DevReadaBlock(uint64_t blkAddr)
{
    return DevReadAhead(blkAddr << HMFS_BLKSIZE_BITS);
}

int HmfsIo::DevReadVersion(void *buf, uint64_t offset, size_t len)
{
    if (cmdPara_.sparseMode) {
        return 0;
    }

    if (pread64(g_hmfsConfig.kd, buf, len, offset) < 0) {
        return -1;
    }
    return 0;
}


#ifdef HAVE_SPARSE_SPARSE_H
int HmfsIo::SparseReadBlk(uint64_t block, int count, void *buf)
{
    char *out = buf;
    uint64_t curBlock;

    for (int i = 0; i < count; ++i) {
        curBlock = block + i;
        if (blocks_[curBlock]) {
            memcpy_s(out + (i * HMFS_BLKSIZE), HMFS_BLKSIZE, blocks_[curBlock], HMFS_BLKSIZE);
        } else if (blocks_) {
            memset(out + (i * HMFS_BLKSIZE), 0, HMFS_BLKSIZE);
        }
    }
    return 0;
}

int HmfsIo::SparseWriteBlk(uint64_t block, int count, const void *buf)
{
    uint64_t curBlock;
    const char *in = buf;

    for (int i = 0; i < count; ++i) {
        curBlock = block + i;
        if (blocks_[curBlock] == zeroedBlock) {
            blocks_[curBlock] = nullptr;
        }
        if (blocks_[curBlock] == nullptr) {
            blocks_[curBlock] = calloc(1, HMFS_BLKSIZE);
            if (blocks_[curBlock]  == nullptr) {
                return -ENOMEM;
            }
        }
        memcpy_s(blocks_[curBlock], HMFS_BLKSIZE, in + (i * HMFS_BLKSIZE), HMFS_BLKSIZE);
    }
    return 0;
}

int HmfsIo::SparseWriteZeroedBlk(uint64_t block, int count)
{
    uint64_t curBlock;
    for (int i = 0; i < count; ++i) {
        curBlock = block + i;
        if (blocks_[curBlock]) {
            continue;
        }
        blocks_[curBlock] = zeroedBlock;
    }
    return 0;
}


int SparseImportSegment(const void *data, int len, unsigned int block, unsigned int nrBlock)
{
    /* Ignore chunk headers, only write the data */
    if (nrBlock == 0 || len % HMFS_BLKSIZE) {
        return 0;
    }

    return HmfsIo::GetInstance().SparseWriteBlk(block, nrBlock, data);
}

int HmfsIo::SparseMergeBlocks(uint64_t start, uint64_t num, int zero)
{
    if (zero) {
        blocks_[start] = nullptr;
        return sparse_file_add_fill(sparseFile_, 0x0, HMFS_BLKSIZE * num, start);
    }

    char *buf = calloc(num, HMFS_BLKSIZE);
    if (buf == nullptr) {
        fprintf(stderr, "failed to alloc %llu\n", (unsigned long long)num * HMFS_BLKSIZE);
        return -ENOMEM;
    }

    for (uint64_t i = 0; i < num; i++) {
        memcpy_s(buf + i * HMFS_BLKSIZE, HMFS_BLKSIZE, blocks_[start + i], HMFS_BLKSIZE);
        free(blocks_[start + i]);
        blocks_[start + i] = nullptr;
    }

    /* free_sparse_blocks will release this buf. */
    blocks_[start] = buf;

    return sparse_file_add_data(sparseFile_, blocks_[start], HMFS_BLKSIZE * num, start);
}
#else
int HmfsIo::SparseReadBlk(uint64_t block, int count, void *buf)
{
    return 0;
}

int HmfsIo::SparseWriteBlk(uint64_t block, int count, const void *buf)
{
    return 0;
}

int HmfsIo::SparseWriteZeroedBlk(uint64_t block, int count)
{
    return 0;
}
#endif

int HmfsIo::HmfsFsyncDevice()
{
#ifdef HAVE_FSYNC
    for (uint32_t id = 0; id < DeviceManager::GetInstance().GetDeviceCount(); id++) {
        DeviceInfo* deviceInfo = DeviceManager::GetInstance().GetDeviceInfo(id);
        if (deviceInfo != nullptr) {
            if (fsync(deviceInfo->fd) < 0) {
                HMFS_ERROR("failed to do fsync for %s.", deviceInfo->path.c_str());
                return -1;
            }
        }
    }
#endif
    return 0;
}

int HmfsIo::HmfsInitSparseFile()
{
#ifdef HAVE_SPARSE_SPARSE_H
    if (cmdPara_.func == MKFS) {
        sparseFile_ = sparse_file_new(HMFS_BLOCK_SIZE, cmdPara_.deviceSize);
        if (sparseFile_ == nullptr) {
            return -1;
        }
    } else {
        sparseFile_ = sparse_file_import(cmdPara_.devices[0].fd, true, false);
        if (sparseFile_ == nullptr) {
            return -1;
        }
        cmdPara_.deviceSize = sparse_file_len(sparseFile_, 0, 0);
        cmdPara_.deviceSize &= (~((uint64_t)(HMFS_BLOCK_SIZE - 1)));
    }

    if (sparse_file_block_size(sparseFile_) != HMFS_BLOCK_SIZE) {
        HMFS_ERROR("Corrupted sparse file\n");
        return -1;
    }
    blocksCount_ = cmdPara_.deviceSize / HMFS_BLOCK_SIZE;
    blocks_ = calloc(blocksCount_, sizeof(char *));
    if (blocks_ == nullptr) {
        HMFS_ERROR("Calloc Failed for blocks!!!\n");
        return -1;
    }

    zeroedBlock = calloc(1, HMFS_BLOCK_SIZE);
    if (zeroedBlock == nullptr) {
        HMFS_ERROR("Calloc Failed for zeroed block!!!\n");
        return -1;
    }

    return sparse_file_foreach_chunk(sparseFile_, true, false, SparseImportSegment, nullptr);
#else
    HMFS_ERROR("Sparse mode is not supported.");
    return -1;
#endif
}

void HmfsIo::HmfsReleaseSparseResource()
{
#ifdef HAVE_SPARSE_SPARSE_H
    if (cmdPara_.sparseMode) {
        if (sparseFile_ != nullptr) {
            sparse_file_destroy(sparseFile_);
            sparseFile_ = nullptr;
        }
        for (int j = 0; j < blocksCount_; j++) {
            free(blocks_[j]);
        }
        free(blocks_);
        blocks_ = nullptr;
        free(zeroedBlock);
        zeroedBlock = nullptr;
    }
#endif
}

void HmfsIo::HmfsFinalizeDeviceSparse(int &ret)
{
#ifdef HAVE_SPARSE_SPARSE_H
    if (!cmdPara_.sparseMode) {
        return;
    }

    int64_t chunkStart = (blocks_[0] == nullptr) ? -1 : 0;
    DeviceInfo* device = DeviceManager::GetInstance().GetDeviceInfo(0);
    ASSERT(device != nullptr);

    if (cmdPara_.func != MKFS) {
        sparse_file_destroy(sparseFile_);
        ret = ftruncate(device->fd, 0);
        ASSERT(!ret);
        lseek(device->fd, 0, SEEK_SET);
        sparseFile_ = sparse_file_new(HMFS_BLKSIZE, cmdPara_.deviceSize);
    }

    for (uint64_t j = 0; j < blocksCount_; ++j) {
        if (chunkStart != -1) {
            if (j - chunkStart >= MAX_CHUNK_COUNT) {
                ret = SparseMergeBlocks(chunkStart, j - chunkStart, 0);
                ASSERT(!ret);
                chunkStart = -1;
            }
        }

        if (chunkStart == -1) {
            if (blocks_[j] == nullptr) {
                continue;
            }

            if (blocks_[j] == zeroedBlock) {
                ret = SparseMergeBlocks(j, 1, 1);
                ASSERT(!ret);
            } else {
                chunkStart = j;
            }
        } else {
            if (blocks_[j] && blocks_[j] != zeroedBlock) {
                continue;
            }

            ret = SparseMergeBlocks(chunkStart, j - chunkStart, 0);
            ASSERT(!ret);

            if (blocks_[j] == zeroedBlock) {
                ret = SparseMergeBlocks(j, 1, 1);
                ASSERT(!ret);
            }
            chunkStart = -1;
        }
    }
    if (chunkStart != -1) {
        ret = SparseMergeBlocks(chunkStart, blocksCount_ - chunkStart, 0);
        ASSERT(!ret);
    }

    sparse_file_write(sparseFile_, device->fd, /*gzip*/0, /*sparse*/1, /*crc*/0);

    HmfsReleaseSparseResource();
#endif
}

int HmfsIo::HmfsFinalizeDevice()
{
    int ret = 0;
    HmfsFinalizeDeviceSparse(ret);
    /*
    * We should call fsync() to flush out all the dirty pages
    * in the block device page cache.
    */
    for (uint32_t id = 0; id < DeviceManager::GetInstance().GetDeviceCount(); id++) {
        DeviceInfo* device = DeviceManager::GetInstance().GetDeviceInfo(id);
        if (device == nullptr) {
            continue;
        }

#ifdef HAVE_FSYNC
        ret = fsync(device->fd);
        if (ret < 0) {
            HMFS_ERROR("Could not conduct fsync.");
            break;
        }
#endif
        ret = close(device->fd);
        if (ret < 0) {
            HMFS_ERROR("Failed to close device file.");
            break;
        }
    }

    return ret;
}
} // namespace HMFS