/*
 * memory/map.c
 *
 * Copyright (C) 2018 Aleksandar Andrejevic <theflash@sdf.lonestar.org>
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU Affero General Public License as
 * published by the Free Software Foundation, either version 3 of the
 * License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU Affero General Public License for more details.
 *
 * You should have received a copy of the GNU Affero General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <memory.h>
#include <cpu.h>

typedef enum
{
    PTE_BLANK = 0,
    PTE_RESERVED,
    PTE_EVICTED,
    PTE_TRANSITIONAL,
    PTE_COMMITTED,
} pte_type_t;

typedef page_table_t pte_pointer_t;

typedef union
{
    bool_t is_present : 1;
    physical_t raw_entry;

    struct
    {
        bool_t always_set : 1;
        bool_t writable : 1;
        bool_t usermode : 1;
        bool_t no_writeback : 1;
        bool_t no_cache : 1;
        bool_t accessed : 1;
        bool_t dirty : 1;
        bool_t large : 1;
        bool_t global : 1;
        bool_t cow : 1;
        bool_t sticky : 1;
        size_t reserved1 : 1;
        page_num_t number : 40;
        size_t reserved2 : 11;  /* PAE only */
        bool_t no_execute : 1;  /* PAE only */
    } present;

    struct
    {
        bool_t always_clear : 1;
        pte_type_t type : 3;
        bool_t readable : 1;
        bool_t writable : 1;
        bool_t executable : 1;
        bool_t usermode : 1;
        bool_t sticky : 1;
        bool_t cow : 1;
        size_t reserved1 : 2;
        page_num_t number : 40;
        size_t reserved2 : 12;  /* PAE only */
    } absent;
} pte_t;

static struct
{
    int shift;
    int bits;
} page_table_levels[MAX_PAGING_LEVELS] = { {22, 10}, {12, 10} };
// PAE:  { {30, 2}, {21, 9}, {12, 9} }
// LMA:  { {39, 9}, {30, 9}, {21, 9}, {12, 9} }
// VA57: { {48, 9}. {39, 9}, {30, 9}, {21, 9}, {12, 9} }

const page_table_t memory_default_table = (page_table_t)0xFFFFFFFC;
const page_table_t memory_shadow_table = (page_table_t)0xFFFFFFF8;
size_t memory_table_size = 0x400000;
byte_t paging_levels = 2, self_entry_level = 0;
byte_t table_entry_size = 4;

static inline pte_t read_pte(pte_pointer_t ppte)
{
    pte_t pte;

    switch (table_entry_size)
    {
    case 4: pte.raw_entry = __atomic_load_n((dword_t*)ppte, __ATOMIC_RELAXED); break;
    case 8: pte.raw_entry = __atomic_load_n((qword_t*)ppte, __ATOMIC_RELAXED); break;
    default: ASSERT(FALSE);
    }

    return pte;
}

static inline bool_t cmpxchg_pte(pte_pointer_t ppte, pte_t old_pte, pte_t new_pte)
{
    switch (table_entry_size)
    {
    case 4:
        return __atomic_compare_exchange((dword_t*)ppte, (dword_t*)&old_pte, (dword_t*)&new_pte, FALSE, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
    case 8:
        return __atomic_compare_exchange((qword_t*)ppte, (qword_t*)&old_pte, (qword_t*)&new_pte, FALSE, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
    default:
        ASSERT(FALSE);
    }
}

static pte_pointer_t memory_get_table_entry(page_table_t table, void *address, bool_t hypothetical)
{
    uintptr_t numeric_address = (uintptr_t)address;
    uintptr_t table_base = (uintptr_t)table;

    for (int level = 0; level < paging_levels; level++)
    {
        uintptr_t level_mask = ((uintptr_t)1 << page_table_levels[level].bits) - 1;
        uintptr_t table_mask = (level_mask + 1) * table_entry_size - 1;
        uintptr_t entry_num = (numeric_address >> page_table_levels[level].shift) & level_mask;

        table_base <<= page_table_levels[level].bits;

        if (numeric_address >= table_base && numeric_address <= (table_base | table_mask))
        {
            return (pte_pointer_t)(table_base | (entry_num * table_entry_size));
        }

        table_base |= entry_num * table_entry_size;
        if (!hypothetical && !read_pte((pte_pointer_t)table_base).raw_entry && level < (paging_levels - 1)) return NULL;
    }

    return (pte_pointer_t)table_base;
}

static sysret_t get_or_create_table_entry(page_table_t table, void *address, pte_pointer_t *ppte)
{
    if (!(*ppte = memory_get_table_entry(table, address, FALSE)))
    {
        if (!(*ppte = memory_get_table_entry(table, address, TRUE))) return ERR_INVALID;
        page_table_t lower_table = (page_table_t)PAGE_ALIGN((uintptr_t)*ppte);

        page_t *lower_table_page = memory_acquire_page(MIN_PHYS_ADDR_BITS, MAX_PHYS_ADDR_BITS, PAGE_SIZE);
        if (!lower_table_page) return ERR_NOMEMORY;

        sysret_t ret = memory_map_page(table, lower_table_page, lower_table, MEMORY_FLAG_ACCESSIBLE | MEMORY_FLAG_WRITABLE);
        if (ret != ERR_SUCCESS)
        {
            memory_release_page(lower_table_page);
            return ret;
        }

        memset(lower_table, 0, PAGE_SIZE);
    }

    return ERR_SUCCESS;
}

static void update_sticky_pages(int level, pte_pointer_t source, pte_pointer_t destination)
{
    pte_t pte;
    size_t count = 1 << (page_table_levels[level].bits - (level ? 0 : 1));

    /* We must skip entries that point back into higher-level tables */
    if (level == self_entry_level) count -= 2;

#define UPDATE_PAGES_LOOP(type)                                         \
    do                                                                  \
    {                                                                   \
        type *src_ppte = (type*)source;                                 \
        type *dest_ppte = (type*)destination;                           \
                                                                        \
        for (size_t i = 0; i < count; i++)                              \
        {                                                               \
            pte.raw_entry = *src_ppte;                                  \
                                                                        \
            if (pte.is_present)                                         \
            {                                                           \
                if (pte.present.sticky)                                 \
                {                                                       \
                    if (!memory_get_page_mapping(memory_shadow_table, dest_ppte)) \
                    {                                                   \
                        page_t *table_page = memory_acquire_page(MIN_PHYS_ADDR_BITS, MAX_PHYS_ADDR_BITS, PAGE_SIZE); \
                        if (!table_page) KERNEL_CRASH("No free pages were available at a critical moment"); \
                                                                        \
                        sysret_t ret = memory_map_page(memory_shadow_table, \
                                                       table_page,      \
                                                       (void*)PAGE_ALIGN((uintptr_t)dest_ppte), \
                                                       MEMORY_FLAG_ACCESSIBLE | MEMORY_FLAG_WRITABLE); \
                        if (ret != ERR_SUCCESS) KERNEL_CRASH("Unexpected mapping error"); \
                    }                                                   \
                                                                        \
                    *dest_ppte = pte.raw_entry;                         \
                }                                                       \
                else if (level + 1 < paging_levels)                     \
                {                                                       \
                    uintptr_t mask = memory_table_size - 1;             \
                    int shift = page_table_levels[level + 1].bits;      \
                    pte_pointer_t nested_src = (pte_pointer_t)(((uintptr_t)src_ppte & ~mask) | (((uintptr_t)src_ppte << shift) & mask)); \
                    pte_pointer_t nested_dest = (pte_pointer_t)(((uintptr_t)dest_ppte & ~mask) | (((uintptr_t)dest_ppte << shift) & mask)); \
                    update_sticky_pages(level + 1, nested_src, nested_dest); \
                }                                                       \
            }                                                           \
                                                                        \
            src_ppte++;                                                 \
            dest_ppte++;                                                \
        }                                                               \
    } while(FALSE)

    if (table_entry_size == 4) UPDATE_PAGES_LOOP(dword_t);
    else if (table_entry_size == 8) UPDATE_PAGES_LOOP(qword_t);

#undef UPDATE_PAGES_LOOP
}

page_t *memory_get_page_mapping(page_table_t table, void *address)
{
    pte_pointer_t ppte = memory_get_table_entry(table, address, FALSE);
    if (!ppte) return NULL;

    pte_t pte = read_pte(ppte);

    if (pte.is_present)
    {
        return memory_find_page_by_address(pte.present.number * PAGE_SIZE);
    }
    else
    {
        switch (pte.absent.type)
        {
        case PTE_COMMITTED:
            return memory_find_page_by_address(pte.absent.number * PAGE_SIZE);

        case PTE_BLANK:
        case PTE_RESERVED:
        case PTE_EVICTED:
        case PTE_TRANSITIONAL:
            return NULL;

        default:
            KERNEL_CRASH("Invalid page type");
        }
    }
}

sysret_t memory_map_page(page_table_t table, page_t *page, void *address, memory_flags_t access_flags)
{
    if (page->status < PAGE_STATUS_ALLOCATED) return ERR_INVALID;

    pte_pointer_t ppte;
    sysret_t ret = get_or_create_table_entry(table, address, &ppte);
    if (ret != ERR_SUCCESS) return ret;

    pte_t old_pte = read_pte(ppte);
    pte_t new_pte = { 0 };
    if (old_pte.is_present || old_pte.absent.type == PTE_COMMITTED) return ERR_EXISTS;

    if (access_flags & MEMORY_FLAG_ACCESSIBLE)
    {
        new_pte.is_present = TRUE;
        new_pte.present.writable = (access_flags & MEMORY_FLAG_WRITABLE) ? TRUE : FALSE;
        new_pte.present.usermode = (access_flags & MEMORY_FLAG_USERMODE) ? TRUE : FALSE;
        new_pte.present.sticky = (access_flags & MEMORY_FLAG_STICKY) ? TRUE : FALSE;
        new_pte.present.number = page->number;
        new_pte.present.no_execute = (access_flags & MEMORY_FLAG_EXECUTABLE) ? FALSE : TRUE;

        __atomic_add_fetch(&page->map_count, 1, __ATOMIC_ACQUIRE);
    }
    else
    {
        new_pte.is_present = FALSE;
        new_pte.absent.type = PTE_COMMITTED;
        new_pte.absent.readable = FALSE;
        new_pte.absent.writable = (access_flags & MEMORY_FLAG_WRITABLE) ? TRUE : FALSE;
        new_pte.absent.executable = (access_flags & MEMORY_FLAG_EXECUTABLE) ? TRUE : FALSE;
        new_pte.absent.usermode = (access_flags & MEMORY_FLAG_USERMODE) ? TRUE : FALSE;
        new_pte.absent.sticky = (access_flags & MEMORY_FLAG_STICKY) ? TRUE : FALSE;
        new_pte.absent.number = page->number;
    }

    if (cmpxchg_pte(ppte, old_pte, new_pte))
    {
        if (new_pte.is_present) cpu_invalidate_tlb(address);
        return ERR_SUCCESS;
    }
    else
    {
        if (new_pte.is_present) __atomic_sub_fetch(&page->map_count, 1, __ATOMIC_RELEASE);
        return ERR_BUSY;
    }
}

sysret_t memory_map_area(page_table_t table, const area_t *area, void *address, memory_flags_t access_flags)
{
    sysret_t ret = ERR_SUCCESS;
    uintptr_t numeric_address = PAGE_ALIGN((uintptr_t)address);
    page_num_t page;

    for (page = 0; page < area->count; page++)
    {
        if ((ret = memory_map_page(table,
                                   &area->pages[page],
                                   (void*)(numeric_address + (size_t)page * PAGE_SIZE),
                                   access_flags)) != ERR_SUCCESS) break;
    }

    if (ret != ERR_SUCCESS)
    {
        for (page_num_t i = 0; i < page; i++)
        {
            memory_unmap_clear_page(table, (void*)(numeric_address + (size_t)page * PAGE_SIZE));
        }
    }

    return ret;
}

sysret_t memory_query_page_flags(page_table_t table, void *address, memory_flags_t *access_flags)
{
    pte_pointer_t ppte = memory_get_table_entry(table, address, FALSE);
    if (!ppte) return ERR_BADPTR;

    pte_t pte = read_pte(ppte);
    if (!pte.is_present && pte.absent.type == PTE_BLANK) return ERR_BADPTR;

    if (pte.is_present)
    {
        *access_flags = MEMORY_FLAG_ACCESSIBLE;
        if (pte.present.writable) *access_flags |= MEMORY_FLAG_WRITABLE;
        if (!(pte.present.no_execute)) *access_flags |= MEMORY_FLAG_EXECUTABLE;
        if (pte.present.usermode) *access_flags |= MEMORY_FLAG_USERMODE;
        if (pte.present.sticky) *access_flags |= MEMORY_FLAG_STICKY;
        if (pte.present.cow) *access_flags |= MEMORY_FLAG_COPY_ON_WRITE;
    }
    else
    {
        *access_flags = 0;
        if (pte.absent.readable) *access_flags |= MEMORY_FLAG_ACCESSIBLE;
        if (pte.absent.writable) *access_flags |= MEMORY_FLAG_WRITABLE;
        if (pte.absent.executable) *access_flags |= MEMORY_FLAG_EXECUTABLE;
        if (pte.absent.usermode) *access_flags |= MEMORY_FLAG_USERMODE;
        if (pte.absent.sticky) *access_flags |= MEMORY_FLAG_STICKY;
        if (pte.absent.type == PTE_EVICTED || pte.absent.type == PTE_TRANSITIONAL) *access_flags |= MEMORY_FLAG_EVICTED;
    }

    return ERR_SUCCESS;
}

sysret_t memory_adjust_page_flags(page_table_t table, void *address, memory_flags_t access_flags)
{
    pte_pointer_t ppte;
    sysret_t ret = get_or_create_table_entry(table, address, &ppte);
    if (ret != ERR_SUCCESS) return ret;

    pte_t old_pte = read_pte(ppte);
    pte_t new_pte = old_pte;

    if (old_pte.is_present && !(access_flags & MEMORY_FLAG_ACCESSIBLE))
    {
        new_pte.is_present = FALSE;
        new_pte.absent.type = PTE_COMMITTED;
        new_pte.absent.readable = FALSE;
        new_pte.absent.reserved1 = 0;
        new_pte.absent.reserved2 = 0;
    }

    if (new_pte.is_present)
    {
        new_pte.present.writable = (access_flags & MEMORY_FLAG_WRITABLE) ? TRUE : FALSE;
        new_pte.present.usermode = (access_flags & MEMORY_FLAG_USERMODE) ? TRUE : FALSE;
        new_pte.present.no_execute = (access_flags & MEMORY_FLAG_EXECUTABLE) ? FALSE : TRUE;
    }
    else
    {
        new_pte.absent.readable = (access_flags & MEMORY_FLAG_ACCESSIBLE) ? TRUE: FALSE;
        new_pte.absent.writable = (access_flags & MEMORY_FLAG_WRITABLE) ? TRUE : FALSE;
        new_pte.absent.executable = (access_flags & MEMORY_FLAG_EXECUTABLE) ? TRUE : FALSE;
        new_pte.absent.usermode = (access_flags & MEMORY_FLAG_USERMODE) ? TRUE : FALSE;
    }

    if (!cmpxchg_pte(ppte, old_pte, new_pte)) return ERR_BUSY;

    cpu_invalidate_tlb(address);
    return ERR_SUCCESS;
}

sysret_t memory_unmap_clear_page(page_table_t table, void *address)
{
    pte_pointer_t ppte = memory_get_table_entry(table, address, FALSE);
    if (!ppte) return ERR_NOTFOUND;

    pte_t pte = read_pte(ppte);
    pte_t zero = { 0 };
    if (!cmpxchg_pte(ppte, pte, zero)) return ERR_BUSY;

    if (pte.is_present)
    {
        cpu_invalidate_tlb(address);
        page_t *page = memory_find_page_by_address(pte.present.number * PAGE_SIZE);
        if (page) __atomic_sub_fetch(&page->map_count, 1, __ATOMIC_RELEASE);
    }

    return ERR_SUCCESS;
}

sysret_t memory_unmap_keep_page(page_table_t table, void *address)
{
    pte_pointer_t pte = memory_get_table_entry(table, address, FALSE);
    if (!pte) return ERR_NOTFOUND;

    pte_t original_entry = read_pte(pte);
    pte_t entry = original_entry;
    if (!entry.is_present) return ERR_NOTFOUND;

    page_t *page = memory_find_page_by_address(entry.present.number * PAGE_SIZE);

    entry.is_present = FALSE;
    entry.absent.type = PTE_RESERVED;
    entry.absent.readable = TRUE;
    entry.absent.writable = original_entry.present.writable;
    entry.absent.executable = !original_entry.present.no_execute;
    entry.absent.usermode = original_entry.present.usermode;
    entry.absent.sticky = original_entry.present.sticky;
    entry.absent.reserved1 = 0;
    entry.absent.number = 0;
    entry.absent.reserved2 = 0;
    if (!cmpxchg_pte(pte, original_entry, entry)) return ERR_BUSY;

    cpu_invalidate_tlb(address);
    if (page) __atomic_sub_fetch(&page->map_count, 1, __ATOMIC_RELEASE);

    return ERR_SUCCESS;
}

sysret_t memory_unmap_clear_area(page_table_t table, void *address, size_t num_pages)
{
    sysret_t ret = ERR_SUCCESS;

    for (size_t i = 0; i < num_pages; i++)
    {
        sysret_t result = memory_unmap_clear_page(table, (void*)((PAGE_NUMBER((uintptr_t)address) + i) * PAGE_SIZE));
        if (result != ERR_SUCCESS) ret = result;
    }

    return ret;
}

sysret_t memory_unmap_keep_area(page_table_t table, void *address, size_t num_pages)
{
    sysret_t ret = ERR_SUCCESS;

    for (size_t i = 0; i < num_pages; i++)
    {
        sysret_t result = memory_unmap_keep_page(table, (void*)((PAGE_NUMBER((uintptr_t)address) + i) * PAGE_SIZE));
        if (result != ERR_SUCCESS) ret = result;
    }

    return ret;
}

sysret_t memory_load_shadow_table(page_t *new_shadow_table)
{
    if (new_shadow_table->status < PAGE_STATUS_ALLOCATED) return ERR_INVALID;

    sysret_t ret = memory_map_page(memory_default_table,
                                   new_shadow_table,
                                   (void*)(-2 * PAGE_SIZE),
                                   MEMORY_FLAG_ACCESSIBLE | MEMORY_FLAG_WRITABLE);
    if (ret != ERR_SUCCESS) return ret;

    pte_pointer_t self_entry = (pte_pointer_t)(-PAGE_SIZE - table_entry_size * 2);
    pte_t new_pte = { 0 };
    pte_t old_pte = read_pte(self_entry);
    new_pte.is_present = TRUE;
    new_pte.present.writable = TRUE;
    new_pte.present.number = new_shadow_table->number;
    new_pte.present.no_execute = TRUE;

    if (!cmpxchg_pte(self_entry, old_pte, new_pte))
    {
        memory_unmap_clear_page(memory_default_table, (void*)(-2 * PAGE_SIZE));
        return ERR_BUSY;
    }

    return ERR_SUCCESS;
}

sysret_t memory_unload_shadow_table(void)
{
    return memory_unmap_clear_page(memory_default_table, (void*)(-2 * PAGE_SIZE));
}

sysret_t memory_load_default_table(page_t *new_default_table)
{
    if (new_default_table->status < PAGE_STATUS_ALLOCATED) return ERR_INVALID;

    sysret_t ret = memory_load_shadow_table(new_default_table);
    if (ret != ERR_SUCCESS) return ret;

    pte_pointer_t source_table = (pte_pointer_t)((intptr_t)-PAGE_SIZE >> 1);
    pte_pointer_t dest_table = (pte_pointer_t)(((intptr_t)-PAGE_SIZE >> 1) - (intptr_t)memory_table_size);
    update_sticky_pages(0, source_table, dest_table);
    ret = memory_unload_shadow_table();
    ASSERT(ret == ERR_SUCCESS);

    cpu_write_page_table_register(new_default_table->number * PAGE_SIZE);
    return ERR_SUCCESS;
}

page_t *memory_create_page_table(void)
{
    page_t *page = memory_acquire_page(MIN_PHYS_ADDR_BITS, MAX_PHYS_ADDR_BITS, PAGE_SIZE);
    if (!page) return NULL;

    if (memory_load_shadow_table(page) != ERR_SUCCESS)
    {
        memory_release_page(page);
        return NULL;
    }

    memset((void*)(-2 * PAGE_SIZE), 0, PAGE_SIZE);
    pte_t zero = { 0 }, pte = zero;
    pte.is_present = TRUE;
    pte.present.writable = TRUE;
    pte.present.number = page->number;
    pte.present.no_execute = TRUE;

    if (!cmpxchg_pte((pte_pointer_t)(-PAGE_SIZE - table_entry_size), zero, pte))
    {
        memory_unload_shadow_table();
        memory_release_page(page);
        return NULL;
    }

    memory_unload_shadow_table();
    return page;
}

sysret_t memory_commit(void *address, size_t size)
{
    sysret_t ret = ERR_SUCCESS;

    EH_TRY
    {
        for (uintptr_t page = PAGE_ALIGN((uintptr_t)address);
             page <= PAGE_ALIGN_UP((uintptr_t)address + size - 1);
             page += PAGE_SIZE)
        {
            volatile size_t value = *(volatile size_t*)page;
            UNUSED_PARAMETER(value);
        }
    }
    EH_CATCH
    {
        ret = ERR_BADPTR;
    }
    EH_DONE;

    return ret;
}
