// SPDX-FileCopyrightText: Copyright 2021 CitruS Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later

#ifdef _WIN32

#include <iterator>
#include <unordered_map>
#include <boost/icl/separate_interval_set.hpp>
#include <windows.h>
#include "common/dynamic_library.h"

#elif defined(__linux__) || defined(__FreeBSD__) // ^^^ Windows ^^^ vvv Linux vvv

#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <boost/icl/interval_set.hpp>
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/random.h>
#include <unistd.h>
#include "common/scope_exit.h"

#ifndef MAP_NORESERVE
#define MAP_NORESERVE 0
#endif

#endif // ^^^ Linux ^^^

#include <mutex>
#include <random>
#include "core/hle/kernel/board/nintendo/nx/k_system_control.h"

#include "common/alignment.h"
#include "common/assert.h"
#include "common/free_region_manager.h"
#include "common/host_memory.h"
#include "common/logging/log.h"
#include "core/hle/kernel/memory_types.h"

extern u8*    virtual_base;
extern u8*    virtual_last;
static u8*    backing_base;
static size_t backing_size;

namespace Common {

namespace HostMemory {

static FreeRegionManager free_manager{};

#ifdef _WIN32

#include <memoryapi.h>

    static HANDLE process = GetCurrentProcess(); ///< Current process handle
    static HANDLE backing_handle{};              ///< File based backing memory

    static size_t backing_size; ///< Size of the backing memory in bytes
    static u8* backing_base{};

    static std::mutex placeholder_mutex;                           ///< Mutex for placeholders

    /// Release all resources in the object
    void Release() {
        free_manager.SetAddressSpace(virtual_base, virtual_last - virtual_base);
    }

    void Initialize() {
        if (backing_size)
            return;
        backing_size = Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize();

        // Allocate backing file map
        backing_handle =
            CreateFileMapping2(INVALID_HANDLE_VALUE, nullptr, FILE_MAP_WRITE | FILE_MAP_READ,
                                   PAGE_READWRITE, SEC_COMMIT, backing_size, nullptr, nullptr, 0);
        if (!backing_handle) {
            LOG_CRITICAL(HW_Memory, "Failed to allocate {} MiB of backing memory",
                         backing_size >> 20);
            throw std::bad_alloc{};
        }
        // Allocate a virtual memory for the backing file map as placeholder
        backing_base = static_cast<u8*>(VirtualAlloc2(process, nullptr, backing_size,
                                                          MEM_RESERVE | MEM_RESERVE_PLACEHOLDER,
                                                          PAGE_NOACCESS, nullptr, 0));
        if (!backing_base) {
            Release();
            LOG_CRITICAL(HW_Memory, "Failed to reserve {} MiB of virtual memory",
                         backing_size >> 20);
            throw std::bad_alloc{};
        }
        // Map backing placeholder
        void* const ret = MapViewOfFile3(backing_handle, process, backing_base, 0, backing_size,
                                             MEM_REPLACE_PLACEHOLDER, PAGE_READWRITE, nullptr, 0);
        if (ret != backing_base) {
            Release();
            LOG_CRITICAL(HW_Memory, "Failed to map {} MiB of virtual memory", backing_size >> 20);
            throw std::bad_alloc{};
        }
        free_manager.SetAddressSpace(virtual_base, virtual_last - virtual_base);
    }

    void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perms) {
        free_manager.AllocateBlock((void*)virtual_offset, length);
        for (SIZE_T i = 0; i < length; i += Kernel::PageSize) {
            SIZE_T src = virtual_offset + i;
            SIZE_T dst = host_offset + i;
            if (!MapViewOfFile3(backing_handle, process, (PVOID)src, dst, Kernel::PageSize,
                                MEM_REPLACE_PLACEHOLDER, PAGE_READWRITE, NULL, 0)) {
                {
                    std::unique_lock lock{placeholder_mutex};
                    MEMORY_BASIC_INFORMATION info = {0};
                    SIZE_T base = src >> Kernel::HugePageBits << Kernel::HugePageBits;
                    VirtualQueryEx(process, (PVOID)base, &info, sizeof(info));
                    if (info.RegionSize == Kernel::HugePageSize) {
                        for (SIZE_T j = base; j < base + (Kernel::HugePageSize - Kernel::PageSize);
                             j += Kernel::PageSize) {
                            VirtualFreeEx(process, (LPVOID)j, Kernel::PageSize,
                                          MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER);
                        }
                    }
                }
                if (!MapViewOfFile3(backing_handle, process, (PVOID)src, dst, Kernel::PageSize,
                                    MEM_REPLACE_PLACEHOLDER, PAGE_READWRITE, NULL, 0)) {
                    return;
                }
            }
        }
    }

    void Unmap(size_t virtual_offset, size_t length) {
        for (SIZE_T i = virtual_offset; i < virtual_offset + length; i += Kernel::PageSize) {
            UnmapViewOfFile2(process, (PVOID)i, MEM_PRESERVE_PLACEHOLDER);
        }
        free_manager.FreeBlock((void*)virtual_offset, length);
    }

    void Protect(size_t virtual_offset, size_t length, MemoryPermission perm) {
        const bool read = True(perm & MemoryPermission::Read);
        const bool write = True(perm & MemoryPermission::Write);
        DWORD new_flags{};
        if (read && write) {
            new_flags = PAGE_READWRITE;
        } else if (read && !write) {
            new_flags = PAGE_READONLY;
        } else if (!read && !write) {
            new_flags = PAGE_NOACCESS;
        } else {
            UNIMPLEMENTED_MSG("Protection flag combination read={} write={}", read, write);
        }
        for (SIZE_T i = virtual_offset; i < virtual_offset + length; i += Kernel::PageSize) {
            DWORD old_flags{};
            VirtualProtect((LPVOID)i, Kernel::PageSize, new_flags, &old_flags);
        }
    }

    void ClearBackingRegion(size_t physical_offset, size_t length, u32 fill_value) {
        std::memset(backing_base + physical_offset, fill_value, length);
    }

    bool IsValidMapping(size_t offset, size_t length) {
        return (offset + length) <= backing_size;
    }

    bool IsInVirtualRange(void* address) noexcept {
        return address >= virtual_base && address < virtual_last &&
               (address < (void*)0x7FFE0000 || address >= (void*)0x7FFF0000);
    }

#elif defined(__linux__) || defined(__FreeBSD__) // ^^^ Windows ^^^ vvv Linux vvv

#if defined(__ANDROID__)
#include <android/sharedmem.h>
#endif

    static int fd{-1}; // memfd file descriptor, -1 is the error value of memfd_create

    void Initialize() {
        if (backing_size) return;
        size_t virtual_size = virtual_last - virtual_base;
        backing_size = Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize();
        long page_size = sysconf(_SC_PAGESIZE);
        if (page_size != 0x1000) {
            LOG_CRITICAL(HW_Memory, "page size {:#x} is incompatible with 4K paging", page_size);
            throw std::bad_alloc{};
        }

        // Backing memory initialization
#if defined(__ANDROID__)
        fd = ASharedMemory_create("HostMemory", backing_size);

        if (fd < 0) {
            LOG_CRITICAL(HW_Memory, "ASharedMemory_create failed: {}", strerror(errno));
            throw std::bad_alloc{};
        }
#else

#if defined(__FreeBSD__) && __FreeBSD__ < 13
        // XXX Drop after FreeBSD 12.* reaches EOL on 2024-06-30
        fd = shm_open(SHM_ANON, O_RDWR, 0600);
#else
        fd = memfd_create("HostMemory", 0);
#endif
        if (fd < 0) {
            LOG_CRITICAL(HW_Memory, "memfd_create failed: {}", strerror(errno));
            throw std::bad_alloc{};
        }

        // Defined to extend the file with zeros
        int ret = ftruncate(fd, backing_size);
        if (ret != 0) {
            LOG_CRITICAL(HW_Memory, "ftruncate failed with {}, are you out-of-memory?",
                         strerror(errno));
            throw std::bad_alloc{};
        }
#endif

        backing_base = static_cast<u8*>(
            mmap(nullptr, backing_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0));
        if (backing_base == MAP_FAILED) {
            LOG_CRITICAL(HW_Memory, "mmap failed: {}", strerror(errno));
            throw std::bad_alloc{};
        }

        memset(backing_base, 0, backing_size);

        // Virtual memory initialization
#if defined(__linux__)
        madvise(virtual_base, virtual_size, MADV_HUGEPAGE);
#endif
        free_manager.SetAddressSpace(virtual_base, virtual_size);
    }

    void Release() {
        size_t virtual_size = virtual_last - virtual_base;
        memset(backing_base, 0, backing_size);
        mmap(virtual_base, virtual_size, PROT_NONE, MAP_ANON | MAP_NORESERVE | MAP_PRIVATE | MAP_FIXED, -1, 0);
        free_manager.SetAddressSpace(virtual_base, virtual_size);
    }

    void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perms) {

        // We are removing a placeholder.
        free_manager.AllocateBlock((void*)virtual_offset, length);

        // Deduce mapping protection flags.
        int flags = PROT_NONE;
        if (True(perms & MemoryPermission::Read)) {
            flags |= PROT_READ;
        }
        if (True(perms & MemoryPermission::Write)) {
            flags |= PROT_WRITE;
        }
#ifdef ARCHITECTURE_arm64
        if (True(perms & MemoryPermission::Execute)) {
            flags |= PROT_EXEC;
        }
#endif

        void* ret = mmap((void*)virtual_offset, length, flags, MAP_SHARED | MAP_FIXED, fd,
                         host_offset);
        ASSERT_MSG(ret != MAP_FAILED, "mmap failed: {}", strerror(errno));
    }

    void Unmap(size_t virtual_offset, size_t length) {
        // The method name is wrong. We're still talking about the virtual range.
        // We don't want to unmap, we want to reserve this memory.

        // Merge with any adjacent placeholder mappings.
        auto [merged_pointer, merged_size] =
            free_manager.FreeBlock((void*)virtual_offset, length);

        void* ret = mmap(merged_pointer, merged_size, PROT_NONE,
                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
        ASSERT_MSG(ret != MAP_FAILED, "mmap failed: {}", strerror(errno));
    }

    void Protect(size_t virtual_offset, size_t length, MemoryPermission perm) {
        const bool read = True(perm & MemoryPermission::Read);
        const bool write = True(perm & MemoryPermission::Write);
        const bool execute = True(perm & MemoryPermission::Execute);
        int flags = PROT_NONE;
        if (read) {
            flags |= PROT_READ;
        }
        if (write) {
            flags |= PROT_WRITE;
        }
#ifdef HAS_NCE
        if (execute) {
            flags |= PROT_EXEC;
        }
#endif
        int ret = mprotect((void*)virtual_offset, length, flags);
        ASSERT_MSG(ret == 0, "mprotect failed: {}", strerror(errno));
    }

    void ClearBackingRegion(size_t physical_offset, size_t length, u32 fill_value) {
#ifdef __linux__
        // Set MADV_REMOVE on backing map to destroy it instantly.
        // This also deletes the area from the backing file.
        int ret = madvise(backing_base + physical_offset, length, MADV_REMOVE);
        ASSERT_MSG(ret == 0, "madvise failed: {}", strerror(errno));
#else
        std::memset(backing_base + physical_offset, fill_value, length);
#endif
    }

    bool IsInVirtualRange(void* address) noexcept {
        return address >= virtual_base && address < virtual_last;
    }

#else // ^^^ Linux ^^^ vvv Generic vvv

    void Initialize() {}

    void Release() {}

    void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perm) {}

    void Unmap(size_t virtual_offset, size_t length) {}

    void Protect(size_t virtual_offset, size_t length, MemoryPermission perm) {}

    void ClearBackingRegion(size_t physical_offset, size_t length, u32 fill_value) {
        std::memset(backing_base + physical_offset, fill_value, length);
    }

    bool IsInVirtualRange(void* address) noexcept {
        return false;
    }

#endif // ^^^ Generic ^^^

    u8* BackingBasePointer() noexcept {
        return backing_base;
    }
};


} // namespace Common
