/*
 * memory.c
 *
 * Copyright (C) 2016 Aleksandar Andrejevic <theflash@sdf.lonestar.org>
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU Affero General Public License as
 * published by the Free Software Foundation, either version 3 of the
 * License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU Affero General Public License for more details.
 *
 * You should have received a copy of the GNU Affero General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <memory.h>
#include <log.h>
#include <exception.h>
#include <process.h>
#include <syscalls.h>
#include <heap.h>
#include <cpu.h>
#include <semaphore.h>

static void **physical_memory_stack = (void**)MEM_STACK_VIRT_ADDR;
static DECLARE_LOCK(phys_mem_stack_lock);
static page_t *pages = NULL;
static void *current_page_directory = INVALID_PAGE;
static memory_address_space_t kernel_address_space;
static memory_address_space_t mapping_space;
static list_entry_t user_address_spaces = { &user_address_spaces, &user_address_spaces };
static dword_t total_physical_pages = 0;
static dword_t num_free_pages = 0;
static dword_t mem_tree_bitmap[TOTAL_PAGES / 32];
static DECLARE_LOCK(mem_tree_lock);
static semaphore_t temporary_page_semaphore;
static bool_t evicting = FALSE;
static DECLARE_LIST(transition_pages);
static DECLARE_LIST(page_stores);
static DECLARE_LOCK(page_store_lock);

static void *evict_page(void);

static inline void invalidate_tlb(dword_t *addr)
{
    asm volatile ("invlpg %0" :: "m"(*addr));
}

static inline void *alloc_physical_page(void)
{
    void *page = INVALID_PAGE;

    if (!evicting && num_free_pages <= EVICTION_THRESHOLD)
    {
        evicting = TRUE;
        page = evict_page();
        evicting = FALSE;

        if (page != INVALID_PAGE) return page;
    }

    lock_acquire(&phys_mem_stack_lock);
    if (num_free_pages) page = physical_memory_stack[--num_free_pages];
    lock_release(&phys_mem_stack_lock);

    return page;
}

static inline void free_physical_page(void *address)
{
    lock_acquire(&phys_mem_stack_lock);
    physical_memory_stack[num_free_pages++] = address;
    lock_release(&phys_mem_stack_lock);
}

static int compare_page(const void *a, const void *b)
{
    const page_t *page_a = (const page_t*)a;
    const page_t *page_b = (const page_t*)b;

    if (page_a->phys_addr < page_b->phys_addr) return -1;
    else if (page_a->phys_addr > page_b->phys_addr) return 1;
    else return 0;
}

static page_t *get_page(void *physical)
{
    page_t key = { .phys_addr = (uintptr_t)physical };
    if (pages == NULL) return NULL;
    return (page_t*)bsearch(&key, pages, total_physical_pages, sizeof(page_t), compare_page);
}

static inline dword_t reference_page(void *physical)
{
    page_t *page = get_page(physical);
    if (!page) return 0;

    return ++page->ref_count;
}

static inline dword_t dereference_page(void *physical)
{
    page_t *page = get_page(physical);
    if (!page) return 0;

    return --page->ref_count;
}

static dword_t map_page(void *physical, void *virtual, dword_t flags)
{
    dword_t i;
    dword_t ret = ERR_SUCCESS;
    critical_t critical;
    dword_t phys_addr = PAGE_ALIGN((dword_t)physical);
    dword_t virt_addr = PAGE_ALIGN((dword_t)virtual);
    dword_t pd_index = ADDR_TO_PDE(virt_addr), pt_index = ADDR_TO_PTE(virt_addr);
    dword_t *page_directory = (dword_t*)PAGE_DIRECTORY_ADDR;
    dword_t *page_table = (dword_t*)(PAGE_TABLE_ADDR + (pd_index << 12));

    flags &= 0x00000FFF;
    enter_critical(&critical);

    if (!(page_directory[pd_index] & PAGE_PRESENT))
    {
        void *table_page = alloc_physical_page();
        if (table_page == INVALID_PAGE)
        {
            ret = ERR_NOMEMORY;
            goto done;
        }

        reference_page(table_page);
        page_directory[pd_index] = (dword_t)table_page | PAGE_PRESENT | PAGE_WRITABLE;

        invalidate_tlb(page_table);
        for (i = 0; i < PAGE_SIZE / sizeof(dword_t); i++) page_table[i] = 0;
    }

    page_directory[pd_index] |= flags;
    if (page_table[pt_index] & PAGE_PRESENT)
    {
        ret = ERR_EXISTS;
        goto done;
    }

    reference_page((void*)phys_addr);
    page_table[pt_index] = phys_addr | flags | PAGE_PRESENT;
    invalidate_tlb(virtual);

done:
    leave_critical(&critical);
    return ret;
}

static dword_t unmap_page(void *virtual)
{
    dword_t i, ret = ERR_SUCCESS;
    critical_t critical;
    bool_t empty_dir = TRUE;
    dword_t virt_addr = PAGE_ALIGN((dword_t)virtual);
    dword_t pd_index = ADDR_TO_PDE(virt_addr), pt_index = ADDR_TO_PTE(virt_addr);
    dword_t *page_directory = (dword_t*)PAGE_DIRECTORY_ADDR;
    dword_t *page_table = (dword_t*)(PAGE_TABLE_ADDR + (pd_index << 12));

    enter_critical(&critical);

    if (!(page_directory[pd_index] & PAGE_PRESENT))
    {
        ret = ERR_NOTFOUND;
        goto done;
    }

    if (!(page_table[pt_index] & PAGE_PRESENT))
    {
        ret = ERR_NOTFOUND;
        goto done;
    }

    dereference_page((void*)PAGE_ALIGN(page_table[pt_index]));
    page_table[pt_index] = 0;
    invalidate_tlb((dword_t*)virt_addr);

    for (i = 0; i < PAGE_SIZE / sizeof(dword_t); i++) if (page_table[i])
    {
        empty_dir = FALSE;
        break;
    }

    if (empty_dir)
    {
        void *table_page = (void*)PAGE_ALIGN(page_directory[pd_index]);
        page_directory[pd_index] = 0;
        invalidate_tlb(page_table);

        if (dereference_page(table_page) == 0)
        {
            free_physical_page(table_page);
        }
    }

done:
    leave_critical(&critical);
    return ret;
}

static dword_t get_page_flags(void *virtual)
{
    dword_t virt_addr = PAGE_ALIGN((uintptr_t)virtual);
    dword_t pd_index = ADDR_TO_PDE(virt_addr), pt_index = ADDR_TO_PTE(virt_addr);
    dword_t *page_directory = (dword_t*)PAGE_DIRECTORY_ADDR;
    dword_t *page_table = (dword_t*)(PAGE_TABLE_ADDR + (pd_index << 12));

    if (!(page_directory[pd_index] & PAGE_PRESENT)) return 0;
    if (!(page_table[pt_index] & PAGE_PRESENT)) return 0;

    return PAGE_OFFSET(page_table[pt_index]);
}

static dword_t set_page_flags(void *virtual, dword_t flags)
{
    dword_t ret = ERR_SUCCESS;
    critical_t critical;
    dword_t virt_addr = PAGE_ALIGN((dword_t)virtual);
    dword_t pd_index = ADDR_TO_PDE(virt_addr), pt_index = ADDR_TO_PTE(virt_addr);
    dword_t *page_directory = (dword_t*)PAGE_DIRECTORY_ADDR;
    dword_t *page_table = (dword_t*)(PAGE_TABLE_ADDR + (pd_index << 12));

    flags &= 0x00000FFF;
    enter_critical(&critical);

    if (!(page_directory[pd_index] & PAGE_PRESENT))
    {
        ret = ERR_NOTFOUND;
        goto done;
    }

    if (!(page_table[pt_index] & PAGE_PRESENT))
    {
        ret = ERR_NOTFOUND;
        goto done;
    }

    page_directory[pd_index] |= flags;
    page_table[pt_index] = PAGE_ALIGN(page_table[pt_index]) | flags | PAGE_PRESENT;
    invalidate_tlb((void*)virt_addr);

done:
    leave_critical(&critical);
    return ret;
}

static void *map_temporary_page(void *physical, dword_t flags)
{
    int i;
    wait_semaphore(&temporary_page_semaphore, 1, NO_TIMEOUT);

    for (i = TEMPORARY_PAGES - 1; i >= temporary_page_semaphore.count ; i--)
    {
        void *address = (void*)(TEMPORARY_ADDR + i * PAGE_SIZE);

        if (get_physical_address(address) == INVALID_PAGE)
        {
            if (map_page(physical, address, flags) == ERR_SUCCESS) return address;
            break;
        }
    }

    return NULL;
}

static void unmap_temporary_page(void *virtual)
{
    unmap_page(virtual);
    release_semaphore(&temporary_page_semaphore, 1);
}

static inline dword_t alloc_page(void *virtual, dword_t flags)
{
    void *phys = alloc_physical_page();
    if (phys == INVALID_PAGE) return ERR_NOMEMORY;

    dword_t ret = map_page(phys, virtual, flags);
    if (ret != ERR_SUCCESS) free_physical_page(phys);

    return ret;
}

static inline dword_t free_page(void *virtual)
{
    void *phys = get_physical_address(virtual);
    if (phys == INVALID_PAGE) return ERR_INVALID;

    unmap_page(virtual);

    page_t *page = get_page(phys);
    if (page == NULL || page->ref_count == 0) free_physical_page(phys);

    return ERR_SUCCESS;
}

static void *evict_page_from_address_space(memory_address_space_t *space)
{
    void *physical = INVALID_PAGE;
    int chances = 2;
    dword_t cached_directory[PAGE_SIZE / sizeof(dword_t)];
    dword_t *table = NULL;

    if (read_physical(space->page_directory, cached_directory, PAGE_SIZE) != ERR_SUCCESS)
    {
        return INVALID_PAGE;
    }

    if (!space->evict_blk_ptr) space->evict_blk_ptr = space->evictable_blocks.next;
    memory_block_t *block = CONTAINER_OF(space->evict_blk_ptr, memory_block_t, evict_link);
    dword_t prev_pd_index = (dword_t)-1;
    dword_t address;
    dword_t pd_index, pt_index;

    while (chances)
    {
        address = (dword_t)block->address + space->evict_page_num * PAGE_SIZE;
        pd_index = ADDR_TO_PDE(address);
        pt_index = ADDR_TO_PTE(address);
        if (!(cached_directory[pd_index] & PAGE_PRESENT)) goto next;

        if (prev_pd_index != pd_index)
        {
            if (table) unmap_temporary_page(table);
            table = map_temporary_page((void*)PAGE_ALIGN(cached_directory[pd_index]),
                                       PAGE_PRESENT | PAGE_WRITABLE);
            if (table == NULL) break;
            prev_pd_index = pd_index;
        }

        if (table[pt_index])
        {
            if (!(table[pt_index] & PAGE_ACCESSED))
            {
                physical = (void*)PAGE_ALIGN(table[pt_index]);
                break;
            }

            table[pt_index] &= ~PAGE_ACCESSED;
        }

next:
        space->evict_page_num++;

        if (space->evict_page_num == (dword_t)block->size)
        {
            space->evict_page_num = 0;
            space->evict_blk_ptr = space->evict_blk_ptr->next;

            if (space->evict_blk_ptr == &space->evictable_blocks)
            {
                space->evict_blk_ptr = space->evict_blk_ptr->next;
                chances--;
            }

            if (space->evict_blk_ptr == &space->evictable_blocks) break;
            block = CONTAINER_OF(space->evict_blk_ptr, memory_block_t, evict_link);
        }
    }

    if (physical == INVALID_PAGE) goto cleanup;

    dword_t i;
    list_entry_t *ptr;
    page_store_t *store = NULL;
    byte_t buffer[PAGE_SIZE];

    dword_t ret = read_physical(physical, buffer, PAGE_SIZE);
    if (ret != ERR_SUCCESS)
    {
        physical = INVALID_PAGE;
        goto cleanup;
    }

    for (ptr = page_stores.next; ptr != &page_stores; ptr = ptr->next)
    {
        store = CONTAINER_OF(ptr, page_store_t, link);

        for (i = 0; i < store->max_entries; i++) if (!test_bit(store->bitmap, i)) break;
        if (i == store->max_entries) continue;
    }

    if (ptr == &page_stores)
    {
        physical = INVALID_PAGE;
        goto cleanup;
    }

    page_store_entry_t *entry = (page_store_entry_t*)malloc(sizeof(page_store_entry_t));
    if (entry == NULL)
    {
        physical = INVALID_PAGE;
        goto cleanup;
    }

    space->stats.evicted += PAGE_SIZE;
    entry->address = (void*)address;
    entry->address_space = space;
    entry->number = INVALID_STORE_NUMBER;
    entry->physical = INVALID_PAGE;

    if (dereference_page(physical) == 0)
    {
        entry->number = i;

        dword_t bytes_written;
        ret = syscall_write_file(store->file_handle, buffer, (qword_t)entry->number * (qword_t)PAGE_SIZE, PAGE_SIZE, &bytes_written);
        if (ret != ERR_SUCCESS)
        {
            reference_page(physical);
            free(entry);
            physical = INVALID_PAGE;
            goto cleanup;
        }

        set_bit(store->bitmap, i);
        list_append(&store->entry_list, &entry->link);

        for (ptr = transition_pages.next; ptr != &transition_pages; ptr = ptr->next)
        {
            page_store_entry_t *other_entry = CONTAINER_OF(ptr, page_store_entry_t, link);

            if (other_entry->physical == physical)
            {
                ASSERT(other_entry->number == INVALID_STORE_NUMBER);

                list_remove(&other_entry->link);
                list_append(&store->entry_list, &other_entry->link);

                other_entry->number = entry->number;
                other_entry->physical = INVALID_PAGE;
            }
        }
    }
    else
    {
        entry->physical = physical;
        list_append(&transition_pages, &entry->link);
        physical = INVALID_PAGE;
    }

    table[pt_index] = 0;
    if (space->page_directory == get_page_directory()) invalidate_tlb((void*)address);

cleanup:
    if (table) unmap_temporary_page(table);
    return physical;
}

static void *evict_page(void)
{
    if (pages == NULL) return INVALID_PAGE;

    list_entry_t *ptr;

    for (ptr = user_address_spaces.next; ptr != &user_address_spaces; ptr = ptr->next)
    {
        memory_address_space_t *space = CONTAINER_OF(ptr, memory_address_space_t, link);
        void *page = evict_page_from_address_space(space);
        if (page != INVALID_PAGE) return page;
    }

    return evict_page_from_address_space(&kernel_address_space);
}

static memory_block_t *mem_tree_alloc(void)
{
    dword_t i;
    memory_block_t *block = NULL;

    lock_acquire(&mem_tree_lock);
    for (i = 0; i < TOTAL_PAGES; i++) if (!test_bit(mem_tree_bitmap, i)) break;

    if (i < TOTAL_PAGES)
    {
        block = (memory_block_t*)(MEM_TREE_BLOCKS + i * sizeof(memory_block_t));

        if ((get_physical_address(block) != INVALID_PAGE)
            || (alloc_page(block, PAGE_GLOBAL | PAGE_WRITABLE | PAGE_PRESENT) == ERR_SUCCESS))
        {
            set_bit(mem_tree_bitmap, i);
        }
        else
        {
            block = NULL;
        }
    }

    lock_release(&mem_tree_lock);
    return block;
}

static void mem_tree_free(memory_block_t *block)
{
    dword_t index = ((dword_t)block - MEM_TREE_BLOCKS) / sizeof(memory_block_t);
    bool_t busy = FALSE;
    dword_t i, page = PAGE_ALIGN((dword_t)block);

    lock_acquire(&mem_tree_lock);
    clear_bit(mem_tree_bitmap, index);

    for (i = page; i < page + PAGE_SIZE; i += sizeof(memory_block_t))
    {
        index = (i - MEM_TREE_BLOCKS) / sizeof(memory_block_t);
        if (test_bit(mem_tree_bitmap, index))
        {
            busy = TRUE;
            break;
        }
    }

    if (!busy) free_page((void*)page);
    lock_release(&mem_tree_lock);
}

static memory_block_t *find_block_by_addr_internal(memory_block_t *block, void *address)
{
    qword_t key = (qword_t)(dword_t)address;
    qword_t start_addr = block->address;
    qword_t end_addr = start_addr + block->size * PAGE_SIZE;

    if (key >= start_addr && key < end_addr) return block;

    if (key < start_addr)
    {
        if (!block->by_addr_node.left) return NULL;

        memory_block_t *left_block = CONTAINER_OF(block->by_addr_node.left, memory_block_t, by_addr_node);
        return find_block_by_addr_internal(left_block, address);
    }
    else
    {
        if (!block->by_addr_node.right) return NULL;

        memory_block_t *right_block = CONTAINER_OF(block->by_addr_node.right, memory_block_t, by_addr_node);
        return find_block_by_addr_internal(right_block, address);
    }
}

static memory_block_t *find_block_by_addr(memory_address_space_t *space, void *address)
{
    if (!space->by_addr_tree.root) return NULL;
    memory_block_t *root = CONTAINER_OF(space->by_addr_tree.root, memory_block_t, by_addr_node);
    return find_block_by_addr_internal(root, address);
}

static bool_t clone_blocks_recursive(memory_address_space_t *space, memory_block_t *block)
{
    memory_block_t *clone = mem_tree_alloc();
    if (clone == NULL) return FALSE;

    clone->address = block->address;
    clone->size = block->size;
    block->flags |= MEMORY_BLOCK_COPY_ON_WRITE;
    clone->flags = block->flags;
    clone->address_space = space;
    clone->section = block->section;

    avl_tree_insert(&space->by_addr_tree, &clone->by_addr_node);
    avl_tree_insert(&space->by_size_tree, &clone->by_size_node);

    memory_block_t *left_block = CONTAINER_OF(block->by_addr_node.left, memory_block_t, by_addr_node);
    memory_block_t *right_block = CONTAINER_OF(block->by_addr_node.right, memory_block_t, by_addr_node);

    if ((block->by_addr_node.left && !clone_blocks_recursive(space, left_block))
        || (block->by_addr_node.right && !clone_blocks_recursive(space, right_block)))
    {
        avl_tree_remove(&space->by_addr_tree, &clone->by_addr_node);
        avl_tree_remove(&space->by_size_tree, &clone->by_size_node);
        mem_tree_free(clone);
        return FALSE;
    }

    return TRUE;
}

static inline void release_memory_block(memory_block_t *block)
{
    dword_t page;
    dword_t start_address = (dword_t)block->address;
    dword_t end_address = start_address + (dword_t)block->size * PAGE_SIZE;

    critical_t critical;
    enter_critical(&critical);
    void *old_page_dir = get_page_directory();
    set_page_directory(block->address_space->page_directory);

    for (page = start_address; page < end_address; page += PAGE_SIZE)
    {
        free_page((void*)page);
    }

    set_page_directory(old_page_dir);
    leave_critical(&critical);

    if (block->section)
    {
        dereference(&block->section->header);
        block->section = NULL;
    }

    list_entry_t *i;

    for (i = transition_pages.next; i != &transition_pages; i = i->next)
    {
        page_store_entry_t *entry = CONTAINER_OF(i, page_store_entry_t, link);

        if (entry->address_space == block->address_space
            && (dword_t)entry->address >= start_address
            && ((dword_t)entry->address < end_address))
        {
            list_remove(&entry->link);
            free(entry);
        }
    }

    lock_acquire(&page_store_lock);

    for (i = page_stores.next; i != &page_stores; i = i->next)
    {
        list_entry_t *j;
        page_store_t *store = CONTAINER_OF(i, page_store_t, link);

        for (j = store->entry_list.next; j != &store->entry_list; j = j->next)
        {
            page_store_entry_t *entry = CONTAINER_OF(j, page_store_entry_t, link);

            if (entry->address_space == block->address_space
                && (dword_t)entry->address >= start_address
                && ((dword_t)entry->address < end_address))
            {
                if (entry->number != INVALID_STORE_NUMBER) clear_bit(store->bitmap, entry->number);
                list_remove(&entry->link);
                free(entry);
            }
        }
    }

    lock_release(&page_store_lock);
}

static void free_blocks_recursive(memory_block_t *block)
{
    release_memory_block(block);

    if (block->by_addr_node.left)
    {
        memory_block_t *left_block = CONTAINER_OF(block->by_addr_node.left, memory_block_t, by_addr_node);
        free_blocks_recursive(left_block);
    }

    if (block->by_addr_node.right)
    {
        memory_block_t *right_block = CONTAINER_OF(block->by_addr_node.right, memory_block_t, by_addr_node);
        free_blocks_recursive(right_block);
    }

    mem_tree_free(block);
}

static memory_block_t *find_free_block_internal(memory_block_t *root, void *address, dword_t size)
{
    avl_node_t *ptr;

    if (root->by_size_node.left && (dword_t)root->size > size)
    {
        memory_block_t *left = CONTAINER_OF(root->by_size_node.left, memory_block_t, by_size_node);
        memory_block_t *block = find_free_block_internal(left, address, size);
        if (block) return block;
    }

    if ((dword_t)root->size >= size)
    {
        for (ptr = &root->by_size_node; ptr != NULL; ptr = ptr->next_equal)
        {
            memory_block_t *block = CONTAINER_OF(ptr, memory_block_t, by_size_node);

            if (!(block->flags & MEMORY_BLOCK_FREE)) continue;

            if (address != NULL)
            {
                dword_t block_start = (dword_t)block->address;
                dword_t block_end = block_start + ((dword_t)block->size * PAGE_SIZE) - 1;

                dword_t needed_start = (dword_t)address;
                dword_t needed_end = needed_start + (size * PAGE_SIZE) - 1;

                if ((needed_start < block_start) || (needed_end > block_end)) continue;
            }

            return block;
        }
    }

    if (!root->by_size_node.right) return NULL;
    memory_block_t *right = CONTAINER_OF(root->by_size_node.right, memory_block_t, by_size_node);
    return find_free_block_internal(right, address, size);
}

static memory_block_t *find_free_block(memory_address_space_t *address_space, void *address, dword_t size)
{
    memory_block_t *root_block = CONTAINER_OF(address_space->by_size_tree.root, memory_block_t, by_size_node);
    return find_free_block_internal(root_block, address, size);
}

static void *create_page_directory(void)
{
    dword_t *current = (dword_t*)PAGE_DIRECTORY_ADDR;
    dword_t new_dir_buffer[PAGE_SIZE / sizeof(dword_t)];

    memset(&new_dir_buffer[USER_PAGE_START],
           0,
           (USER_PAGE_END - USER_PAGE_START + 1) * sizeof(dword_t));

    memcpy(&new_dir_buffer[KERNEL_PAGE_START],
           &current[KERNEL_PAGE_START],
           (KERNEL_PAGE_END - KERNEL_PAGE_START + 1) * sizeof(dword_t));

    void *directory = alloc_physical_page();
    if (directory == NULL) return NULL;

    new_dir_buffer[PAGEDIR_SELF_ENTRY] = (dword_t)directory | PAGE_PRESENT | PAGE_WRITABLE;
    write_physical(directory, new_dir_buffer, PAGE_SIZE);

    return directory;
}

static void fix_overlapping_sections(multiboot_tag_mmap_t *mmap)
{
    multiboot_mmap_entry_t *entry;

    for (entry = (multiboot_mmap_entry_t*)(mmap + 1);
         (uintptr_t)entry < ((uintptr_t)mmap + mmap->size);
         entry = (multiboot_mmap_entry_t*)((uintptr_t)entry + mmap->entry_size))
    {
        multiboot_mmap_entry_t *ptr;

        for (ptr = (multiboot_mmap_entry_t*)(mmap + 1);
             (uintptr_t)ptr < (uintptr_t)entry;
             ptr = (multiboot_mmap_entry_t*)((uintptr_t)ptr + mmap->entry_size))
        {
            qword_t entry_end = entry->base + entry->length;
            qword_t ptr_end = ptr->base + ptr->length;

            if (entry->base > ptr->base && entry->base < ptr_end)
            {
                entry->base = ptr_end;
                if (entry->base >= entry_end) entry->length = 0;
                else entry->length = entry_end - entry->base;
            }
            else if (ptr->base > entry->base && ptr->base < entry_end)
            {
                ptr->base = entry_end;
                if (ptr->base >= ptr_end) ptr->length = 0;
                else ptr->length = ptr_end - ptr->base;
            }
        }
    }
}

static inline memory_block_t *combine_blocks_forward(memory_block_t *mem_block)
{
    while (TRUE)
    {
        avl_node_t *next = avl_get_next_node(&mem_block->by_addr_node);
        if (!next) break;

        memory_block_t *next_block = CONTAINER_OF(next, memory_block_t, by_addr_node);
        if (!(next_block->flags & MEMORY_BLOCK_FREE)) break;

        size_t new_size = mem_block->size + next_block->size;
        avl_tree_change_key(&mem_block->address_space->by_size_tree, &mem_block->by_size_node, &new_size);

        avl_tree_remove(&mem_block->address_space->by_addr_tree, &next_block->by_addr_node);
        avl_tree_remove(&mem_block->address_space->by_size_tree, &next_block->by_size_node);
        mem_tree_free(next_block);
    }

    return mem_block;
}

static inline memory_block_t *combine_blocks_backward(memory_block_t *mem_block)
{
    while (TRUE)
    {
        avl_node_t *previous = avl_get_previous_node(&mem_block->by_addr_node);
        if (!previous) break;

        memory_block_t *prev_block = CONTAINER_OF(previous, memory_block_t, by_addr_node);
        if (!(prev_block->flags & MEMORY_BLOCK_FREE)) break;

        size_t new_size = prev_block->size + mem_block->size;
        avl_tree_change_key(&mem_block->address_space->by_size_tree, &prev_block->by_size_node, &new_size);

        avl_tree_remove(&mem_block->address_space->by_addr_tree, &mem_block->by_addr_node);
        avl_tree_remove(&mem_block->address_space->by_size_tree, &mem_block->by_size_node);
        mem_tree_free(mem_block);

        mem_block = prev_block;
    }

    return mem_block;
}

void memory_cleanup(object_t *obj)
{
    memory_section_t *section = (memory_section_t*)obj;
    if (section->file) dereference(&section->file->header);
}

void *get_page_directory(void)
{
    return current_page_directory;
}

void set_page_directory(void *phys_addr)
{
    current_page_directory = phys_addr;

    asm volatile ("mov %0, %%eax\n\
                   mov %%eax, %%cr3" :: "r"(phys_addr));
}

void *get_physical_address(void *virtual)
{
    dword_t virt_addr = PAGE_ALIGN((dword_t)virtual);
    dword_t pd_index = ADDR_TO_PDE(virt_addr), pt_index = ADDR_TO_PTE(virt_addr);
    dword_t *page_directory = (dword_t*)PAGE_DIRECTORY_ADDR;
    dword_t *page_table = (dword_t*)(PAGE_TABLE_ADDR + (pd_index << 12));

    if (!(page_directory[pd_index] & PAGE_PRESENT)) return INVALID_PAGE;
    if (!(page_table[pt_index] & PAGE_PRESENT)) return INVALID_PAGE;

    return (void*)(PAGE_ALIGN(page_table[pt_index]) + PAGE_OFFSET((dword_t)virtual));
}

dword_t map_memory_internal(void *physical, void *virtual, uintptr_t size, dword_t page_flags)
{
    dword_t i, j;
    dword_t phys_addr = PAGE_ALIGN((dword_t)physical);
    dword_t virt_addr = PAGE_ALIGN((dword_t)virtual);
    size = PAGE_ALIGN_UP(size);
    page_flags &= 0xFFF;

    for (i = 0; i < size; i += PAGE_SIZE)
    {
        dword_t ret = map_page((void*)(phys_addr + i), (void*)(virt_addr + i), page_flags);
        if (ret != ERR_SUCCESS)
        {
            for (j = 0; j < i; j += PAGE_SIZE) unmap_page((void*)(virt_addr + j));
            return ret;
        }
    }

    return ERR_SUCCESS;
}

void unmap_memory_internal(void *virtual, dword_t size)
{
    dword_t i;
    dword_t virt_addr = PAGE_ALIGN((dword_t)virtual);
    size = PAGE_ALIGN_UP(size);

    for (i = 0; i < size; i += PAGE_SIZE)
    {
        void *page_addr = (void*)(virt_addr + i);
        void *physical = get_physical_address(page_addr);

        unmap_page(page_addr);
        dereference_page(physical);
    }
}

dword_t map_memory_in_address_space(memory_address_space_t *address_space,
                                    void *physical,
                                    void **virtual,
                                    uintptr_t size,
                                    dword_t block_flags)
{
    dword_t ret;
    void *address = (void*)PAGE_ALIGN((uintptr_t)*virtual);
    uintptr_t aligned_physical = PAGE_ALIGN((uintptr_t)physical);
    if (*virtual != NULL && PAGE_OFFSET((uintptr_t)*virtual) != PAGE_OFFSET((uintptr_t)physical)) return ERR_INVALID;

    size = (PAGE_ALIGN_UP((uintptr_t)physical + size - 1) - aligned_physical) >> 12;
    lock_acquire(&address_space->lock);

    memory_block_t *block = find_free_block(address_space, address, size);
    if (block == NULL)
    {
        lock_release(&address_space->lock);
        return ERR_NOMEMORY;
    }

    dword_t flags = PAGE_GLOBAL;
    dword_t real_address = (address != NULL) ? (dword_t)address : (dword_t)block->address;

    block_flags &= ~MEMORY_BLOCK_EVICTABLE;
    if (block_flags & MEMORY_BLOCK_ACCESSIBLE) flags |= PAGE_PRESENT;
    if (block_flags & MEMORY_BLOCK_WRITABLE) flags |= PAGE_WRITABLE;
    if (block_flags & MEMORY_BLOCK_USERMODE) flags |= PAGE_USERMODE;

    ret = map_memory_internal((void*)aligned_physical, (void*)real_address, size * PAGE_SIZE, flags);
    if (ret != ERR_SUCCESS)
    {
        lock_release(&address_space->lock);
        return ret;
    }

    if ((dword_t)block->address < (dword_t)address)
    {
        memory_block_t *new_block = mem_tree_alloc();
        new_block->flags = MEMORY_BLOCK_FREE;
        new_block->address = block->address;
        new_block->size = (size_t)(((dword_t)address - block->address) / PAGE_SIZE);
        new_block->address_space = address_space;
        new_block->section = NULL;

        size_t new_size = block->size - new_block->size;
        avl_tree_change_key(&address_space->by_size_tree, &block->by_size_node, &new_size);
        avl_tree_change_key(&address_space->by_addr_tree, &block->by_addr_node, &address);

        avl_tree_insert(&address_space->by_addr_tree, &new_block->by_addr_node);
        avl_tree_insert(&address_space->by_size_tree, &new_block->by_size_node);

        combine_blocks_backward(new_block);
    }

    if (block->size > size)
    {
        memory_block_t *new_block = mem_tree_alloc();
        new_block->flags = MEMORY_BLOCK_FREE;
        new_block->address = (qword_t)(block->address + (size * PAGE_SIZE));
        new_block->size = (qword_t)((dword_t)block->size - size);
        new_block->address_space = address_space;
        new_block->section = NULL;

        avl_tree_change_key(&address_space->by_size_tree, &block->by_size_node, &size);

        avl_tree_insert(&address_space->by_addr_tree, &new_block->by_addr_node);
        avl_tree_insert(&address_space->by_size_tree, &new_block->by_size_node);

        combine_blocks_forward(new_block);
    }

    block->flags = block_flags;
    *virtual = (void*)((dword_t)block->address + PAGE_OFFSET((uintptr_t)physical));

    lock_release(&address_space->lock);
    return ERR_SUCCESS;
}

dword_t pin_memory(const void *virtual, void **pinned, uintptr_t size, bool_t lock_contents)
{
    uintptr_t i;
    uintptr_t virt_addr = PAGE_ALIGN((uintptr_t)virtual);
    void *address = (void*)PAGE_ALIGN((uintptr_t)*pinned);
    size = PAGE_ALIGN_UP(size) >> 12;

    memory_address_space_t *address_space = check_usermode(virtual, 1) ? &get_current_process()->memory_space : &kernel_address_space;
    lock_acquire_shared(&address_space->lock);
    lock_acquire(&mapping_space.lock);

    memory_block_t *block = find_free_block(&mapping_space, address, size);
    if (block == NULL)
    {
        lock_release(&address_space->lock);
        lock_release(&mapping_space.lock);
        return ERR_NOMEMORY;
    }

    dword_t real_address = (address != NULL) ? (dword_t)address : (dword_t)block->address;
    dword_t new_flags = PAGE_PRESENT | PAGE_GLOBAL;
    if (!lock_contents) new_flags |= PAGE_WRITABLE;

    for (i = 0; i < size; i++)
    {
        void *virt_page = (void*)(virt_addr + i * PAGE_SIZE);
        void *phys_page = get_physical_address(virt_page);

        if (lock_contents)
        {
            memory_block_t *block = find_block_by_addr(address_space, (void*)(virt_addr + i));
            ASSERT(block != NULL);
            block->flags |= MEMORY_BLOCK_COPY_ON_WRITE;
            set_page_flags(virt_page, get_page_flags(virt_page) & ~PAGE_WRITABLE);
        }

        dword_t ret = map_page(phys_page, (void*)(real_address + i), new_flags);
        ASSERT(ret == ERR_SUCCESS);
        reference_page(phys_page);
    }

    if ((dword_t)block->address < (dword_t)address)
    {
        memory_block_t *new_block = mem_tree_alloc();
        new_block->flags = MEMORY_BLOCK_FREE;
        new_block->address = block->address;
        new_block->size = (size_t)(((dword_t)address - block->address) / PAGE_SIZE);
        new_block->address_space = &mapping_space;
        new_block->section = NULL;

        size_t new_size = block->size - new_block->size;
        avl_tree_change_key(&mapping_space.by_size_tree, &block->by_size_node, &new_size);
        avl_tree_change_key(&mapping_space.by_addr_tree, &block->by_addr_node, &address);

        avl_tree_insert(&mapping_space.by_addr_tree, &new_block->by_addr_node);
        avl_tree_insert(&mapping_space.by_size_tree, &new_block->by_size_node);

        combine_blocks_backward(new_block);
    }

    if ((dword_t)block->size > size)
    {
        memory_block_t *new_block = mem_tree_alloc();
        new_block->flags = MEMORY_BLOCK_FREE;
        new_block->address = (qword_t)(block->address + (size * PAGE_SIZE));
        new_block->size = (qword_t)((dword_t)block->size - size);
        new_block->address_space = &mapping_space;
        new_block->section = NULL;

        avl_tree_change_key(&mapping_space.by_size_tree, &block->by_size_node, &size);

        avl_tree_insert(&mapping_space.by_addr_tree, &new_block->by_addr_node);
        avl_tree_insert(&mapping_space.by_size_tree, &new_block->by_size_node);

        combine_blocks_forward(new_block);
    }

    block->flags = MEMORY_BLOCK_ACCESSIBLE;
    if (!lock_contents) block->flags |= MEMORY_BLOCK_WRITABLE;
    *pinned = (void*)((dword_t)block->address) + PAGE_OFFSET((uintptr_t)virtual);

    lock_release(&address_space->lock);
    lock_release(&mapping_space.lock);
    return ERR_SUCCESS;
}

dword_t unmap_memory_in_address_space(memory_address_space_t *address_space, void *virtual)
{
    lock_acquire(&mapping_space.lock);
    uintptr_t aligned_address = PAGE_ALIGN((uintptr_t)virtual);

    avl_node_t *node = avl_tree_lookup(&mapping_space.by_addr_tree, &aligned_address);
    if (node == NULL)
    {
        lock_release(&mapping_space.lock);
        return ERR_INVALID;
    }

    memory_block_t *mem_block = CONTAINER_OF(node, memory_block_t, by_addr_node);
    if (mem_block->flags & MEMORY_BLOCK_FREE)
    {
        lock_release(&mapping_space.lock);
        return ERR_INVALID;
    }

    unmap_memory_internal((void*)((dword_t)mem_block->address), (dword_t)mem_block->size * PAGE_SIZE);

    mem_block->flags = MEMORY_BLOCK_FREE;
    mem_block = combine_blocks_backward(mem_block);
    mem_block = combine_blocks_forward(mem_block);

    lock_release(&mapping_space.lock);
    return ERR_SUCCESS;
}

dword_t map_memory(void *physical, void **virtual, uintptr_t size, dword_t block_flags)
{
    return map_memory_in_address_space(&mapping_space, physical, virtual, size, block_flags);
}

dword_t unmap_memory(void *virtual)
{
    return unmap_memory_in_address_space(&mapping_space, virtual);
}

dword_t alloc_memory_in_address_space(memory_address_space_t *address_space,
                                      void **address,
                                      dword_t size,
                                      dword_t block_flags,
                                      memory_section_t *section,
                                      qword_t section_offset)
{
    void *base_address = (void*)PAGE_ALIGN((uintptr_t)*address);

    block_flags &= ~(MEMORY_BLOCK_FREE | MEMORY_BLOCK_COPY_ON_WRITE);
    size = PAGE_ALIGN_UP(size) >> 12;
    if (size == 0) return ERR_INVALID;

    lock_acquire(&address_space->lock);

    memory_block_t *block = find_free_block(address_space, base_address, size);
    if (block == NULL)
    {
        lock_release(&address_space->lock);
        return ERR_NOMEMORY;
    }

    if (section)
    {
        reference(&section->header);
        block->section = section;
        block->section_offset = section_offset;

        if ((section->flags & (MEMORY_SECTION_WRITABLE | MEMORY_SECTION_DIRECT_WRITE)) == MEMORY_SECTION_WRITABLE)
        {
            block_flags |= MEMORY_BLOCK_COPY_ON_WRITE;
        }
    }

    if ((dword_t)block->address < (dword_t)base_address)
    {
        memory_block_t *new_block = mem_tree_alloc();
        new_block->flags = MEMORY_BLOCK_FREE;
        new_block->address = block->address;
        new_block->size = (size_t)(((dword_t)base_address - block->address) / PAGE_SIZE);
        new_block->address_space = address_space;
        new_block->section = NULL;

        size_t new_size = block->size - new_block->size;
        avl_tree_change_key(&address_space->by_size_tree, &block->by_size_node, &new_size);
        avl_tree_change_key(&address_space->by_addr_tree, &block->by_addr_node, &base_address);

        avl_tree_insert(&address_space->by_addr_tree, &new_block->by_addr_node);
        avl_tree_insert(&address_space->by_size_tree, &new_block->by_size_node);

        combine_blocks_backward(new_block);
    }

    if ((dword_t)block->size > size)
    {
        memory_block_t *new_block = mem_tree_alloc();
        new_block->flags = MEMORY_BLOCK_FREE;
        new_block->address = (qword_t)(block->address + (size * PAGE_SIZE));
        new_block->size = (qword_t)((dword_t)block->size - size);
        new_block->address_space = address_space;
        new_block->section = NULL;

        avl_tree_change_key(&address_space->by_size_tree, &block->by_size_node, &size);

        avl_tree_insert(&address_space->by_addr_tree, &new_block->by_addr_node);
        avl_tree_insert(&address_space->by_size_tree, &new_block->by_size_node);

        combine_blocks_forward(new_block);
    }

    block->flags = block_flags;
    *address = (void*)((dword_t)block->address);
    if (block_flags & MEMORY_BLOCK_EVICTABLE) list_append(&address_space->evictable_blocks, &block->evict_link);

    lock_release(&address_space->lock);
    return ERR_SUCCESS;
}

dword_t free_memory_in_address_space(memory_address_space_t *address_space, void *address)
{
    lock_acquire(&address_space->lock);
    uintptr_t aligned_address = PAGE_ALIGN((uintptr_t)address);

    avl_node_t *node = avl_tree_lookup(&address_space->by_addr_tree, &aligned_address);
    if (node == NULL)
    {
        lock_release(&address_space->lock);
        return ERR_INVALID;
    }

    memory_block_t *mem_block = CONTAINER_OF(node, memory_block_t, by_addr_node);
    if (mem_block->flags & MEMORY_BLOCK_FREE)
    {
        lock_release(&address_space->lock);
        return ERR_INVALID;
    }

    release_memory_block(mem_block);

    if (mem_block->flags & MEMORY_BLOCK_EVICTABLE) list_remove(&mem_block->evict_link);
    mem_block->flags = MEMORY_BLOCK_FREE;

    mem_block = combine_blocks_backward(mem_block);
    mem_block = combine_blocks_forward(mem_block);

    lock_release(&address_space->lock);
    return ERR_SUCCESS;
}

dword_t commit_pages(void *address, size_t size)
{
    uintptr_t i;
    uintptr_t first_page = PAGE_ALIGN((uintptr_t)address);
    uintptr_t last_page = PAGE_ALIGN_UP(first_page + size - 1);

    EH_TRY
    {
        for (i = first_page; i <= last_page; i += PAGE_SIZE)
        {
            volatile uintptr_t value = *(volatile uintptr_t*)i;
            UNUSED_PARAMETER(value);
        }
    }
    EH_CATCH
    {
        EH_ESCAPE(return ERR_BADPTR);
    }
    EH_DONE;

    return ERR_SUCCESS;
}

dword_t uncommit_pages(void *address, size_t size)
{
    uintptr_t i;
    uintptr_t first_page = PAGE_ALIGN((uintptr_t)address);
    uintptr_t last_page = PAGE_ALIGN_UP(first_page + size - 1);

    EH_TRY
    {
        for (i = first_page; i <= last_page; i += PAGE_SIZE)
        {
            volatile uintptr_t value = *(volatile uintptr_t*)i;
            UNUSED_PARAMETER(value);

            dword_t ret = unmap_page((void*)i);
            if (ret != ERR_SUCCESS) return ret;
        }
    }
    EH_CATCH
    {
        EH_ESCAPE(return ERR_BADPTR);
    }
    EH_DONE;

    return ERR_SUCCESS;
}

dword_t read_physical(void *physical, void *buffer, dword_t size)
{
    critical_t critical;
    dword_t ret = ERR_SUCCESS;
    dword_t page;
    dword_t first_page = PAGE_ALIGN((dword_t)physical);
    dword_t last_page = PAGE_ALIGN((dword_t)physical + size - 1);
    dword_t offset = PAGE_OFFSET((dword_t)physical);

    enter_critical(&critical);

    for (page = first_page; page <= last_page; page += PAGE_SIZE)
    {
        dword_t length = ((page == last_page) ? ((dword_t)physical + size - page) : PAGE_SIZE) - offset;

        void *mapping = map_temporary_page((void*)page, PAGE_PRESENT);
        if (mapping == NULL) return ERR_NOMEMORY;

        memcpy(buffer, (void*)((dword_t)mapping + offset), length);
        unmap_temporary_page(mapping);

        buffer = (void*)((dword_t)buffer + length);
        offset = 0;
    }

    leave_critical(&critical);
    return ret;
}

dword_t write_physical(void *physical, void *buffer, dword_t size)
{
    critical_t critical;
    dword_t ret = ERR_SUCCESS;
    dword_t page;
    dword_t first_page = PAGE_ALIGN((dword_t)physical);
    dword_t last_page = PAGE_ALIGN((dword_t)physical + size - 1);
    dword_t offset = PAGE_OFFSET((dword_t)physical);

    enter_critical(&critical);

    for (page = first_page; page <= last_page; page += PAGE_SIZE)
    {
        dword_t length = ((page == last_page) ? ((dword_t)physical + size - page) : PAGE_SIZE) - offset;

        void *mapping = map_temporary_page((void*)page, PAGE_PRESENT | PAGE_WRITABLE);
        if (mapping == NULL) return ERR_NOMEMORY;

        memcpy((void*)((dword_t)mapping + offset), buffer, length);
        unmap_temporary_page(mapping);

        buffer = (void*)((dword_t)buffer + length);
        offset = 0;
    }

    leave_critical(&critical);
    return ret;
}

sysret_t syscall_alloc_memory(handle_t process, void **address, dword_t size, dword_t flags)
{
    process_t *proc;
    dword_t ret = ERR_SUCCESS;
    void *safe_address;
    void **local_address = address;

    if (get_previous_mode() == USER_MODE)
    {
        flags &= MEMORY_BLOCK_WRITABLE | MEMORY_BLOCK_ACCESSIBLE;
        flags |= MEMORY_BLOCK_USERMODE | MEMORY_BLOCK_EVICTABLE;

        if (!check_usermode(address, sizeof(void*))) return ERR_BADPTR;

        EH_TRY
        {
            safe_address = *address;
            local_address = &safe_address;
        }
        EH_CATCH
        {
            EH_ESCAPE(return ERR_BADPTR);
        }
        EH_DONE;
    }

    if (process != INVALID_HANDLE)
    {
        if (!reference_by_handle(process, OBJECT_PROCESS, (object_t**)&proc)) return ERR_INVALID;
    }
    else
    {
        proc = get_current_process();
        reference(&proc->header);
    }

    ret = alloc_memory_in_address_space(&proc->memory_space, local_address, size, flags, NULL, 0ULL);

    if (get_previous_mode() == USER_MODE)
    {
        EH_TRY *address = safe_address;
        EH_DONE;
    }

    dereference(&proc->header);
    return ret;
}

sysret_t syscall_free_memory(handle_t process, void *address)
{
    dword_t ret = ERR_SUCCESS;
    process_t *proc;

    if (process != INVALID_HANDLE)
    {
        if (!reference_by_handle(process, OBJECT_PROCESS, (object_t**)&proc)) return ERR_INVALID;
    }
    else
    {
        proc = get_current_process();
        reference(&proc->header);
    }

    ret = free_memory_in_address_space(&proc->memory_space, address);

    dereference(&proc->header);
    return ret;
}

sysret_t syscall_commit_memory(handle_t process, void *address, dword_t size)
{
    dword_t ret = ERR_SUCCESS;
    process_t *proc;

    if (get_previous_mode() == USER_MODE && !check_usermode(address, size)) return ERR_BADPTR;

    if (process == INVALID_HANDLE)
    {
        proc = get_current_process();
        reference(&proc->header);
    }
    else
    {
        if (!reference_by_handle(process, OBJECT_PROCESS, (object_t**)&proc)) return ERR_INVALID;
    }

    if (proc->terminating) return ERR_CANCELED;
    lock_acquire_shared(&proc->memory_space.lock);

    process_t *prev_proc = switch_process(proc);
    ret = commit_pages(address, size);
    switch_process(prev_proc);

    lock_release(&proc->memory_space.lock);
    dereference(&proc->header);
    return ret;
}

sysret_t syscall_uncommit_memory(handle_t process, void *address, dword_t size)
{
    dword_t ret = ERR_SUCCESS;
    process_t *proc;

    if (get_previous_mode() == USER_MODE && !check_usermode(address, size)) return ERR_BADPTR;

    if (process == INVALID_HANDLE)
    {
        proc = get_current_process();
        reference(&proc->header);
    }
    else
    {
        if (!reference_by_handle(process, OBJECT_PROCESS, (object_t**)&proc)) return ERR_INVALID;
    }

    if (proc->terminating) return ERR_CANCELED;
    lock_acquire_shared(&proc->memory_space.lock);

    process_t *prev_proc = switch_process(proc);
    ret = uncommit_pages(address, size);
    switch_process(prev_proc);

    lock_release(&proc->memory_space.lock);
    dereference(&proc->header);
    return ret;
}

sysret_t syscall_set_memory_flags(handle_t process, void *address, dword_t flags)
{
    dword_t ret = ERR_SUCCESS;
    process_t *proc;

    flags &= ~(MEMORY_BLOCK_FREE | MEMORY_BLOCK_COPY_ON_WRITE);
    if (get_previous_mode() == USER_MODE) flags |= MEMORY_BLOCK_USERMODE | MEMORY_BLOCK_EVICTABLE;

    if (process != INVALID_HANDLE)
    {
        if (!reference_by_handle(process, OBJECT_PROCESS, (object_t**)&proc)) return ERR_INVALID;
    }
    else
    {
        proc = get_current_process();
        reference(&proc->header);
    }

    process_t *prev_proc = switch_process(proc);
    lock_acquire(&proc->memory_space.lock);

    memory_block_t *block = find_block_by_addr(&proc->memory_space, address);
    if (block == NULL)
    {
        ret = ERR_INVALID;
        goto cleanup;
    }

    if (block->section)
    {
        if ((flags & MEMORY_BLOCK_WRITABLE) && !(block->section->flags & MEMORY_SECTION_WRITABLE))
        {
            ret = ERR_FORBIDDEN;
            goto cleanup;
        }
    }

    if (block->flags & MEMORY_BLOCK_FREE)
    {
        ret = ERR_INVALID;
        goto cleanup;
    }

    dword_t page;
    dword_t start_address = (dword_t)block->address;
    dword_t end_address = start_address + (dword_t)block->size * PAGE_SIZE;
    dword_t page_flags = 0;

    if (flags & MEMORY_BLOCK_ACCESSIBLE) page_flags |= PAGE_PRESENT;
    if (flags & MEMORY_BLOCK_WRITABLE) page_flags |= PAGE_WRITABLE;

    if (flags & MEMORY_BLOCK_USERMODE) page_flags |= PAGE_USERMODE;
    else page_flags |= PAGE_GLOBAL;

    for (page = start_address; page < end_address; page += PAGE_SIZE)
    {
        set_page_flags((void*)page, page_flags);
    }

    if (!(block->flags & MEMORY_BLOCK_EVICTABLE) && (flags & MEMORY_BLOCK_EVICTABLE))
    {
        list_append(&proc->memory_space.evictable_blocks, &block->evict_link);
    }
    else if ((block->flags & MEMORY_BLOCK_EVICTABLE) && !(flags & MEMORY_BLOCK_EVICTABLE))
    {
        list_remove(&block->evict_link);
    }

    block->flags &= MEMORY_BLOCK_COPY_ON_WRITE;
    block->flags |= flags;

cleanup:
    lock_release(&proc->memory_space.lock);
    switch_process(prev_proc);
    dereference(&proc->header);
    return ret;
}

sysret_t syscall_query_memory(handle_t process, void *address, memory_block_info_t *info)
{
    dword_t ret = ERR_SUCCESS;
    process_t *proc;

    if ((get_previous_mode() == USER_MODE) && !check_usermode(info, sizeof(memory_block_info_t)))
    {
        return ERR_BADPTR;
    }

    if (process != INVALID_HANDLE)
    {
        if (!reference_by_handle(process, OBJECT_PROCESS, (object_t**)&proc)) return ERR_INVALID;
    }
    else
    {
        proc = get_current_process();
        reference(&proc->header);
    }

    lock_acquire_shared(&proc->memory_space.lock);

    memory_block_t *block = find_block_by_addr(&proc->memory_space, address);
    if (block == NULL)
    {
        ret = ERR_INVALID;
        goto cleanup;
    }

    EH_TRY
    {
        info->address = block->address;
        info->size = block->size;
        info->flags = block->flags;
    }
    EH_CATCH
    {
        ret = ERR_BADPTR;
    }
    EH_DONE;

cleanup:
    lock_release(&proc->memory_space.lock);
    dereference(&proc->header);
    return ret;
}

sysret_t syscall_read_memory(handle_t process, void *address, void *buffer, dword_t size)
{
    dword_t ret = ERR_SUCCESS;
    process_t *proc;
    byte_t page_cache[PAGE_SIZE];

    if (get_previous_mode() == USER_MODE && !check_usermode(buffer, size)) return ERR_BADPTR;

    if (process == INVALID_HANDLE)
    {
        EH_TRY
        {
            memmove(buffer, address, size);
            return ERR_SUCCESS;
        }
        EH_CATCH
        {
            EH_ESCAPE(return ERR_FORBIDDEN);
        }
        EH_DONE;
    }

    if (!reference_by_handle(process, OBJECT_PROCESS, (object_t**)&proc)) return ERR_INVALID;
    if (proc->terminating) return ERR_CANCELED;

    lock_acquire_shared(&proc->memory_space.lock);

    dword_t page;
    dword_t first_page = PAGE_ALIGN((dword_t)address);
    dword_t last_page = PAGE_ALIGN((dword_t)address + size - 1);
    dword_t offset = PAGE_OFFSET((dword_t)address);

    for (page = first_page; page <= last_page; page += PAGE_SIZE)
    {
        dword_t length = ((page == last_page) ? ((dword_t)address + size - page) : PAGE_SIZE) - offset;

        process_t *prev_proc = switch_process(proc);

        EH_TRY memcpy(&page_cache[offset], (void*)(page + offset), length);
        EH_CATCH ret = ERR_FORBIDDEN;
        EH_DONE;

        switch_process(prev_proc);
        if (ret != ERR_SUCCESS) break;

        EH_TRY memcpy(buffer, &page_cache[offset], length);
        EH_CATCH ret = ERR_BADPTR;
        EH_DONE;

        buffer = (void*)((dword_t)buffer + length);
        offset = 0;
        if (ret != ERR_SUCCESS) break;
    }

    lock_release(&proc->memory_space.lock);
    dereference(&proc->header);
    return ret;
}

sysret_t syscall_write_memory(handle_t process, void *address, void *buffer, dword_t size)
{
    dword_t ret = ERR_SUCCESS;
    process_t *proc;
    byte_t page_cache[PAGE_SIZE];

    if (get_previous_mode() == USER_MODE && !check_usermode(buffer, size)) return ERR_BADPTR;

    if (process == INVALID_HANDLE)
    {
        EH_TRY
        {
            memmove(address, buffer, size);
            return ERR_SUCCESS;
        }
        EH_CATCH
        {
            EH_ESCAPE(return ERR_FORBIDDEN);
        }
        EH_DONE;
    }

    if (!reference_by_handle(process, OBJECT_PROCESS, (object_t**)&proc)) return ERR_INVALID;
    if (proc->terminating) return ERR_CANCELED;

    lock_acquire(&proc->memory_space.lock);

    dword_t page;
    dword_t first_page = PAGE_ALIGN((dword_t)address);
    dword_t last_page = PAGE_ALIGN((dword_t)address + size - 1);
    dword_t offset = PAGE_OFFSET((dword_t)address);

    for (page = first_page; page <= last_page; page += PAGE_SIZE)
    {
        dword_t length = ((page == last_page) ? ((dword_t)address + size - page) : PAGE_SIZE) - offset;

        EH_TRY memcpy(&page_cache[offset], buffer, length);
        EH_CATCH ret = ERR_BADPTR;
        EH_DONE;

        if (ret != ERR_SUCCESS) break;
        process_t *prev_proc = switch_process(proc);

        EH_TRY memcpy((void*)(page + offset), &page_cache[offset], length);
        EH_CATCH ret = ERR_FORBIDDEN;
        EH_DONE;

        switch_process(prev_proc);

        buffer = (void*)((dword_t)buffer + length);
        offset = 0;
        if (ret != ERR_SUCCESS) break;
    }

    lock_release(&proc->memory_space.lock);
    dereference(&proc->header);
    return ret;
}

void *alloc_pool(void *address, dword_t size, dword_t block_flags)
{
    size = PAGE_ALIGN_UP(size);
    void *result = address;

    if (alloc_memory_in_address_space(&kernel_address_space,
                                      &result,
                                      size,
                                      block_flags,
                                      NULL,
                                      0ULL) == ERR_SUCCESS)
    {
        return result;
    }
    else
    {
        return NULL;
    }
}

void free_pool(void *address)
{
    free_memory_in_address_space(&kernel_address_space, address);
}

sysret_t syscall_create_memory_section(const char *name, handle_t file, size_t max_size, dword_t flags, handle_t *handle)
{
    dword_t ret = ERR_SUCCESS;
    handle_t safe_handle;
    char *safe_name = NULL;

    flags &= MEMORY_SECTION_WRITABLE | MEMORY_SECTION_DIRECT_WRITE;
    if (flags & MEMORY_SECTION_DIRECT_WRITE) flags |= MEMORY_SECTION_WRITABLE;

    if (get_previous_mode() == USER_MODE)
    {
        dword_t name_length = 0;

        EH_TRY name_length = strlen(name);
        EH_CATCH EH_ESCAPE(return ERR_BADPTR);
        EH_DONE;

        if (!check_usermode(name, name_length + 1)) return ERR_BADPTR;
        if (!check_usermode(handle, sizeof(handle_t))) return ERR_BADPTR;

        safe_name = copy_user_string(name);
        if (safe_name == NULL) return ERR_BADPTR;
    }
    else
    {
        safe_name = (char*)name;
    }

    memory_section_t *section = (memory_section_t*)malloc(sizeof(memory_section_t));
    if (section == NULL)
    {
        ret = ERR_NOMEMORY;
        goto cleanup;
    }

    file_instance_t *file_instance = NULL;
    if (file != INVALID_HANDLE)
    {
        if (!reference_by_handle(file, OBJECT_FILE_INSTANCE, (object_t**)&file_instance))
        {
            ret = ERR_INVALID;
            goto cleanup;
        }
    }

    list_init(&section->page_list);
    section->flags = flags;
    section->size = max_size;
    section->file = file != INVALID_HANDLE ? file_instance : NULL;

    init_object(&section->header, safe_name, OBJECT_MEMORY);
    ret = create_object(&section->header);
    if (ret != ERR_SUCCESS)
    {
        if (file_instance) dereference(&file_instance->header);
        if (section->header.name) free(section->header.name);
        free(section);
        section = NULL;
        goto cleanup;
    }

    ret = open_object(&section->header, 0, &safe_handle);
    if (ret == ERR_SUCCESS)
    {
        EH_TRY
        {
            *handle = safe_handle;
        }
        EH_CATCH
        {
            syscall_close_object(safe_handle);
            ret = ERR_BADPTR;
        }
        EH_DONE;
    }

cleanup:
    if (section) dereference(&section->header);
    if (get_previous_mode() == USER_MODE) free(safe_name);

    return ret;
}

sysret_t syscall_open_memory_section(const char *name, handle_t *handle)
{
    handle_t safe_handle;
    char *safe_name = NULL;

    if (get_previous_mode() == USER_MODE)
    {
        dword_t name_length = 0;

        EH_TRY name_length = strlen(name);
        EH_CATCH EH_ESCAPE(return ERR_BADPTR);
        EH_DONE;

        if (!check_usermode(name, name_length + 1)) return ERR_BADPTR;
        if (!check_usermode(handle, sizeof(handle_t))) return ERR_BADPTR;

        safe_name = copy_user_string(name);
        if (safe_name == NULL) return ERR_NOMEMORY;
    }
    else safe_name = (char*)name;

    dword_t ret = open_object_by_name(safe_name, OBJECT_MEMORY, 0, &safe_handle);

    EH_TRY
    {
        *handle = safe_handle;
    }
    EH_CATCH
    {
        syscall_close_object(safe_handle);
        ret = ERR_BADPTR;
    }
    EH_DONE;

    if (get_previous_mode() == USER_MODE) free(safe_name);
    return ret;
}

sysret_t syscall_map_memory_section(handle_t process, handle_t section, void **address, qword_t offset, size_t size, dword_t flags)
{
    dword_t ret = ERR_SUCCESS;
    process_t *proc = NULL;
    memory_section_t *mem_sec = NULL;
    void *safe_address;

    if (PAGE_OFFSET(offset) != 0) return ERR_INVALID;

    if (process != INVALID_HANDLE)
    {
        if (!reference_by_handle(process, OBJECT_PROCESS, (object_t**)&proc))
        {
            ret = ERR_INVALID;
            goto cleanup;
        }
    }
    else
    {
        proc = get_current_process();
        reference(&proc->header);
    }

    if (!reference_by_handle(section, OBJECT_MEMORY, (object_t**)&mem_sec))
    {
        ret = ERR_INVALID;
        goto cleanup;
    }

    if (get_previous_mode() == USER_MODE)
    {
        if (!check_usermode(address, sizeof(void*)))
        {
            ret = ERR_BADPTR;
            goto cleanup;
        }

        EH_TRY safe_address = *address;
        EH_CATCH ret = ERR_BADPTR;
        EH_DONE;

        if (ret != ERR_SUCCESS) goto cleanup;
    }
    else
    {
        safe_address = *address;
    }

    if ((flags & MEMORY_BLOCK_WRITABLE) && !(mem_sec->flags & MEMORY_SECTION_WRITABLE))
    {
        ret = ERR_FORBIDDEN;
        goto cleanup;
    }

    ret = alloc_memory_in_address_space(&proc->memory_space, &safe_address, size, flags, mem_sec, offset);
    if (ret != ERR_SUCCESS) goto cleanup;

    EH_TRY *address = safe_address;
    EH_DONE;

cleanup:
    if (proc) dereference(&proc->header);
    if (mem_sec) dereference(&mem_sec->header);
    return ret;
}

sysret_t syscall_flush_memory_section(handle_t process, void *address)
{
    dword_t ret = ERR_SUCCESS;
    process_t *proc = NULL;

    if (process != INVALID_HANDLE)
    {
        if (!reference_by_handle(process, OBJECT_PROCESS, (object_t**)&proc))
        {
            ret = ERR_INVALID;
            goto cleanup;
        }
    }
    else
    {
        proc = get_current_process();
        reference(&proc->header);
    }

    lock_acquire_shared(&proc->memory_space.lock);

    memory_block_t *block = find_block_by_addr(&proc->memory_space, address);
    if (block == NULL || block->section == NULL)
    {
        ret = ERR_INVALID;
        goto cleanup;
    }

    if (block->section->file == NULL) goto cleanup;

    list_entry_t *ptr;

    for (ptr = block->section->page_list.next; ptr != &block->section->page_list; ptr = ptr->next)
    {
        dword_t bytes_written;
        byte_t buffer[PAGE_SIZE];
        shared_page_t *shared = CONTAINER_OF(ptr, shared_page_t, link);

        ret = read_physical(shared->physical, buffer, PAGE_SIZE);
        if (ret != ERR_SUCCESS) continue;

        file_instance_t *file = block->section->file;
        lock_acquire(&file->global->volume->lock);
        ret = file->global->volume->driver->write_file(file, buffer, shared->offset, PAGE_SIZE, &bytes_written);
        lock_release(&file->global->volume->lock);
        if (ret != ERR_SUCCESS) break;
    }

cleanup:
    lock_release(&proc->memory_space.lock);
    dereference(&proc->header);
    return ret;
}

sysret_t syscall_add_page_file(const char *path, dword_t max_entries)
{
    dword_t ret;
    char *safe_path = NULL;
    if (max_entries == INVALID_STORE_NUMBER) max_entries--;

    if (get_previous_mode() == USER_MODE)
    {
        if (!check_privileges(PRIVILEGE_SET_PAGE_FILE)) return ERR_FORBIDDEN;

        if (path)
        {
            dword_t path_length = 0;

            EH_TRY path_length = strlen(path);
            EH_CATCH EH_ESCAPE(return ERR_BADPTR);
            EH_DONE;

            if (!check_usermode(path, path_length + 1)) return ERR_BADPTR;

            safe_path = copy_user_string(path);
            if (!safe_path) return ERR_NOMEMORY;
        }
    }
    else safe_path = (char*)path;

    page_store_t *store = (page_store_t*)malloc(sizeof(page_store_t));
    if (store == NULL)
    {
        ret = ERR_NOMEMORY;
        goto cleanup;
    }

    store->bitmap = malloc((max_entries + 7) / 8);
    if (store->bitmap == NULL)
    {
        free(store);
        ret = ERR_NOMEMORY;
        goto cleanup;
    }

    memset(store->bitmap, 0, (max_entries + 7) / 8);
    store->num_entries = 0;
    store->max_entries = max_entries;
    list_init(&store->entry_list);

    ret = syscall(SYSCALL_OPEN_FILE,
                  safe_path,
                  &store->file_handle,
                  FILE_MODE_READ
                  | FILE_MODE_WRITE
                  | FILE_MODE_NO_CACHE
                  | FILE_MODE_DELETE_ON_CLOSE
                  | FILE_MODE_CREATE
                  | FILE_MODE_TRUNCATE,
                  0);
    if (ret != ERR_SUCCESS)
    {
        free(store->bitmap);
        free(store);
        goto cleanup;
    }

    lock_acquire(&page_store_lock);
    list_append(&page_stores, &store->link);
    lock_release(&page_store_lock);

cleanup:
    if (get_previous_mode() == USER_MODE) free(safe_path);
    return ret;
}

sysret_t syscall_remove_page_file(const char *path)
{
    dword_t ret = ERR_SUCCESS;
    char *safe_path = NULL;

    if (get_previous_mode() == USER_MODE)
    {
        if (!check_privileges(PRIVILEGE_SET_PAGE_FILE)) return ERR_FORBIDDEN;

        if (path)
        {
            dword_t path_length = 0;

            EH_TRY path_length = strlen(path);
            EH_CATCH EH_ESCAPE(return ERR_BADPTR);
            EH_DONE;

            if (!check_usermode(path, path_length + 1)) return ERR_BADPTR;

            safe_path = copy_user_string(path);
            if (!safe_path) return ERR_NOMEMORY;
        }
    }
    else safe_path = (char*)path;

    list_entry_t *ptr;
    page_store_t *store;

    lock_acquire(&page_store_lock);

    for (ptr = page_stores.next; ptr != &page_stores; ptr = ptr->next)
    {
        store = CONTAINER_OF(ptr, page_store_t, link);

        char *name_buffer = NULL;
        size_t name_buffer_size = 256;

        while (TRUE)
        {
            char *name_buffer = malloc(name_buffer_size);
            if (!name_buffer) break;

            ret = syscall(SYSCALL_QUERY_FILE, store->file_handle, name_buffer, name_buffer_size);
            if (ret != ERR_SUCCESS) free(name_buffer);
            if (ret != ERR_SMALLBUF) break;

            name_buffer_size *= 2;
        }

        if (ret == ERR_SUCCESS)
        {
            bool_t found = strcmp(name_buffer, safe_path) == 0;
            if (name_buffer) free(name_buffer);
            if (found) break;
        }
    }

    if (ptr == &page_stores)
    {
        ret = ERR_NOTFOUND;
        lock_release(&page_store_lock);
        goto cleanup;
    }

    list_remove(&store->link);
    lock_release(&page_store_lock);

    for (ptr = store->entry_list.next; ptr != &store->entry_list; ptr = ptr->next)
    {
        process_t *old_process;
        byte_t buffer[PAGE_SIZE];
        dword_t bytes_read;
        dword_t page_flags = 0;
        page_store_entry_t *entry = CONTAINER_OF(ptr, page_store_entry_t, link);

        ret = syscall_read_file(store->file_handle, buffer, (qword_t)entry->number * (qword_t)PAGE_SIZE, PAGE_SIZE, &bytes_read);
        if (ret != ERR_SUCCESS) break;

        lock_acquire(&entry->address_space->lock);
        memory_block_t *block = find_block_by_addr(entry->address_space, entry->address);

        if (block->flags & MEMORY_BLOCK_ACCESSIBLE) page_flags |= PAGE_PRESENT;
        if ((block->flags & (MEMORY_BLOCK_WRITABLE | MEMORY_BLOCK_COPY_ON_WRITE))
            == MEMORY_BLOCK_WRITABLE)
        {
            page_flags |= PAGE_WRITABLE;
        }

        if (block->flags & MEMORY_BLOCK_USERMODE) page_flags |= PAGE_USERMODE;
        else page_flags |= PAGE_GLOBAL;

        if (entry->address_space != &kernel_address_space)
        {
            old_process = switch_process(CONTAINER_OF(entry->address_space, process_t, memory_space));
        }

        ret = alloc_page(entry->address, page_flags);
        if (ret != ERR_SUCCESS) goto loop_cleanup;

        list_entry_t *p;
        for (p = store->entry_list.next; p != &store->entry_list; p = ptr->next)
        {
            page_store_entry_t *other_entry = CONTAINER_OF(ptr, page_store_entry_t, link);

            if (entry != other_entry && other_entry->number == entry->number)
            {
                list_remove(&other_entry->link);
                list_append(&transition_pages, &other_entry->link);

                other_entry->physical = get_physical_address(entry->address);
                other_entry->number = INVALID_STORE_NUMBER;
            }
        }

        clear_bit(store->bitmap, entry->number);
        list_remove(&entry->link);

        memcpy(entry->address, buffer, PAGE_SIZE);
        free(entry);

loop_cleanup:
        if (entry->address_space != &kernel_address_space) switch_process(old_process);
        lock_release(&entry->address_space->lock);
    }

    free(store);

cleanup:
    if (ret != ERR_SUCCESS)
    {
        lock_acquire(&page_store_lock);
        list_append(&page_stores, &store->link);
        lock_release(&page_store_lock);
    }

    if (get_previous_mode() == USER_MODE) free(safe_path);
    return ret;
}

static int compare_address(const void *key1, const void *key2)
{
    const uintptr_t first = *(const uintptr_t*)key1;
    const uintptr_t second = *(const uintptr_t*)key2;

    if (first < second) return -1;
    else if (first == second) return 0;
    else return 1;
}

static int compare_size(const void *key1, const void *key2)
{
    const size_t first = *(const size_t*)key1;
    const size_t second = *(const size_t*)key2;

    if (first < second) return -1;
    else if (first == second) return 0;
    else return 1;
}

dword_t create_address_space(void *base_address, dword_t page_count, memory_address_space_t *mem_space)
{
    dword_t ret = ERR_NOMEMORY;

    mem_space->pool_address = base_address;
    mem_space->pool_size = page_count;
    AVL_TREE_INIT(&mem_space->by_addr_tree, memory_block_t, by_addr_node, address, compare_address);
    AVL_TREE_INIT(&mem_space->by_size_tree, memory_block_t, by_size_node, size, compare_size);
    lock_init(&mem_space->lock);
    list_init(&mem_space->evictable_blocks);
    mem_space->evict_blk_ptr = NULL;
    mem_space->evict_page_num = 0;
    mem_space->stats.used_virtual = 0;
    mem_space->stats.committed = 0;
    mem_space->stats.evicted = 0;
    mem_space->stats.shared = 0;

    if (get_page_directory() != INVALID_PAGE)
    {
        mem_space->page_directory = create_page_directory();
        if (mem_space->page_directory == NULL) return ret;
    }
    else
    {
        dword_t *boot_directory = (dword_t*)PAGE_DIRECTORY_ADDR;
        mem_space->page_directory = (void*)PAGE_ALIGN(boot_directory[PAGEDIR_SELF_ENTRY]);
    }

    memory_block_t *initial = mem_tree_alloc();
    if (initial != NULL)
    {
        initial->address = (uintptr_t)base_address;
        initial->size = page_count;
        initial->flags = MEMORY_BLOCK_FREE;
        initial->address_space = mem_space;
        initial->section = NULL;

        avl_tree_insert(&mem_space->by_addr_tree, &initial->by_addr_node);
        avl_tree_insert(&mem_space->by_size_tree, &initial->by_size_node);
        ret = ERR_SUCCESS;
    }

    if (mem_space != &kernel_address_space)
    {
        list_append(&user_address_spaces, &mem_space->link);
    }

    return ret;
}

dword_t clone_address_space(memory_address_space_t *original, memory_address_space_t *clone)
{
    dword_t i;
    dword_t ret = ERR_SUCCESS;

    lock_acquire_shared(&original->lock);

    clone->pool_address = original->pool_address;
    clone->pool_size = original->pool_size;
    AVL_TREE_INIT(&clone->by_addr_tree, memory_block_t, by_addr_node, address, NULL);
    AVL_TREE_INIT(&clone->by_size_tree, memory_block_t, by_size_node, size, NULL);
    lock_init(&clone->lock);
    list_init(&clone->evictable_blocks);
    clone->evict_blk_ptr = NULL;
    clone->evict_page_num = 0;
    clone->stats.used_virtual = original->stats.used_virtual;
    clone->stats.committed = original->stats.committed;
    clone->stats.evicted = original->stats.evicted;
    clone->stats.shared = original->stats.committed;

    if (original->by_addr_tree.root != NULL)
    {
        memory_block_t *root_block = CONTAINER_OF(original->by_addr_tree.root, memory_block_t, by_addr_node);
        if (!clone_blocks_recursive(clone, root_block))
        {
            ret = ERR_NOMEMORY;
            goto cleanup;
        }
    }

    if (!(clone->page_directory = create_page_directory()))
    {
        ret = ERR_NOMEMORY;
        goto cleanup;
    }

    dword_t *clone_dir = map_temporary_page(clone->page_directory, PAGE_PRESENT | PAGE_WRITABLE);
    bool_t this_directory = original->page_directory == get_page_directory();

    dword_t *original_dir;
    if (this_directory) original_dir = (dword_t*)PAGE_DIRECTORY_ADDR;
    else original_dir = map_temporary_page(original->page_directory, PAGE_PRESENT | PAGE_WRITABLE);

    for (i = USER_PAGE_START; i <= USER_PAGE_END; i++)
    {
        reference_page((void*)PAGE_ALIGN(original_dir[i]));
        original_dir[i] &= ~PAGE_WRITABLE;
        clone_dir[i] = original_dir[i];
        if (this_directory) invalidate_tlb((void*)(i << 12));
    }

    if (!this_directory) unmap_temporary_page(original_dir);
    unmap_temporary_page(clone_dir);
    list_append(&user_address_spaces, &clone->link);

cleanup:
    lock_release(&original->lock);
    return ret;
}

void bump_address_space(memory_address_space_t *mem_space)
{
    list_remove(&mem_space->link);
    list_append(&user_address_spaces, &mem_space->link);
}

void delete_address_space(memory_address_space_t *mem_space)
{
    ASSERT(get_page_directory() != mem_space->page_directory);
    lock_acquire(&mem_space->lock);

    if (mem_space->by_addr_tree.root)
    {
        memory_block_t *root = CONTAINER_OF(mem_space->by_addr_tree.root, memory_block_t, by_addr_node);
        free_blocks_recursive(root);
        mem_space->by_addr_tree.root = mem_space->by_size_tree.root = NULL;
    }

    free_physical_page(mem_space->page_directory);
    mem_space->page_directory = NULL;

    lock_release(&mem_space->lock);
}

static bool_t find_evicted_page(memory_block_t *block, void *address, page_store_t **store, page_store_entry_t **entry)
{
    list_entry_t *i;

    for (i = transition_pages.next; i != &transition_pages; i = i->next)
    {
        *entry = CONTAINER_OF(i, page_store_entry_t, link);

        if ((*entry)->address_space == block->address_space
            && PAGE_ALIGN((dword_t)(*entry)->address) == PAGE_ALIGN((dword_t)address))
        {
            return TRUE;
        }
    }

    for (i = page_stores.next; i != &page_stores; i = i->next)
    {
        list_entry_t *j;
        *store = CONTAINER_OF(i, page_store_t, link);

        for (j = (*store)->entry_list.next; j != &(*store)->entry_list; j = j->next)
        {
            *entry = CONTAINER_OF(j, page_store_entry_t, link);

            if ((*entry)->address_space == block->address_space
                && PAGE_ALIGN((dword_t)(*entry)->address) == PAGE_ALIGN((dword_t)address))
            {
                return TRUE;
            }
        }
    }

    return FALSE;
}

bool_t memory_fault_handler(void *address, registers_t *regs)
{
    int i;
    page_error_t problem;
    dword_t aligned_address = PAGE_ALIGN((dword_t)address);
    dword_t pd_index = ADDR_TO_PDE((dword_t)address);
    dword_t pt_index = ADDR_TO_PTE((dword_t)address);
    dword_t *page_directory = (dword_t*)PAGE_DIRECTORY_ADDR;
    dword_t *page_table = (dword_t*)(PAGE_TABLE_ADDR + (pd_index << 12));
    process_t *proc = get_current_process();

    memory_address_space_t *address_space = (proc != NULL && check_usermode(address, 1))
                                            ? &proc->memory_space : &kernel_address_space;
    memory_block_t *block = find_block_by_addr(address_space, address);
    if (block == NULL) return FALSE;

    if (!(regs->error_code & PAGE_ERROR_PRESENT_FLAG))
    {
        problem = PAGE_ERROR_NOTPRESENT;
    }
    else if (!(block->flags & MEMORY_BLOCK_USERMODE)
        && (regs->error_code & PAGE_ERROR_USERMODE_FLAG))
    {
        problem = PAGE_ERROR_UNPRIVILEGED;
    }
    else if (regs->error_code & PAGE_ERROR_WRITE_FLAG)
    {
        problem = PAGE_ERROR_READONLY;
    }
    else
    {
        KERNEL_CRASH_WITH_REGS("Unknown paging problem", regs);
    }

    if ((block->flags & MEMORY_BLOCK_ACCESSIBLE) && (problem == PAGE_ERROR_NOTPRESENT))
    {
        page_store_t *store = NULL;
        page_store_entry_t *entry = NULL;
        byte_t buffer[PAGE_SIZE];
        dword_t bytes_read;
        dword_t page_flags = 0;

        if (find_evicted_page(block, address, &store, &entry))
        {
            if (block->flags & MEMORY_BLOCK_ACCESSIBLE) page_flags |= PAGE_PRESENT;
            if ((block->flags & (MEMORY_BLOCK_WRITABLE | MEMORY_BLOCK_COPY_ON_WRITE))
                == MEMORY_BLOCK_WRITABLE)
            {
                page_flags |= PAGE_WRITABLE;
            }

            if (block->flags & MEMORY_BLOCK_USERMODE) page_flags |= PAGE_USERMODE;
            else page_flags |= PAGE_GLOBAL;

            if (entry->number != INVALID_STORE_NUMBER)
            {
                enable_ints();
                dword_t ret = syscall_read_file(store->file_handle, buffer, (qword_t)entry->number * (qword_t)PAGE_SIZE, PAGE_SIZE, &bytes_read);
                disable_ints();

                if ((page_directory[pd_index] & PAGE_PRESENT) && (page_table[pt_index] & PAGE_PRESENT))
                {
                    return TRUE;
                }

                if (ret != ERR_SUCCESS) return FALSE;

                ret = alloc_page((void*)aligned_address, page_flags);
                if (ret != ERR_SUCCESS) return FALSE;

                list_entry_t *ptr;
                for (ptr = store->entry_list.next; ptr != &store->entry_list; ptr = ptr->next)
                {
                    page_store_entry_t *other_entry = CONTAINER_OF(ptr, page_store_entry_t, link);

                    if (entry != other_entry && other_entry->number == entry->number)
                    {
                        list_remove(&other_entry->link);
                        list_append(&transition_pages, &other_entry->link);

                        other_entry->physical = get_physical_address((void*)aligned_address);
                        other_entry->number = INVALID_STORE_NUMBER;
                    }
                }

                clear_bit(store->bitmap, entry->number);
                list_remove(&entry->link);
                free(entry);

                memcpy((void*)aligned_address, buffer, PAGE_SIZE);
                address_space->stats.evicted -= PAGE_SIZE;
                return TRUE;
            }
            else
            {
                if (map_page(entry->physical, entry->address, page_flags) == ERR_SUCCESS)
                {
                    list_remove(&entry->link);
                    free(entry);
                    address_space->stats.evicted -= PAGE_SIZE;
                    return TRUE;
                }
            }

            return FALSE;
        }
        else
        {
            list_entry_t *ptr;
            shared_page_t *page = NULL;
            qword_t offset = block->section_offset + (qword_t)aligned_address - (qword_t)block->address;

            page_flags = PAGE_PRESENT;
            if (block->flags & MEMORY_BLOCK_WRITABLE) page_flags |= PAGE_WRITABLE;

            if (block->flags & MEMORY_BLOCK_USERMODE) page_flags |= PAGE_USERMODE;
            else page_flags |= PAGE_GLOBAL;

            if (block->section && offset < (qword_t)block->section->size)
            {
                ASSERT(PAGE_OFFSET(offset) == 0);

                for (ptr = block->section->page_list.next; ptr != &block->section->page_list; ptr = ptr->next)
                {
                    page = CONTAINER_OF(ptr, shared_page_t, link);
                    if (page->offset == offset) break;
                }

                if (ptr != &block->section->page_list)
                {
                    return (map_page(page->physical, (void*)aligned_address, page_flags) == ERR_SUCCESS);
                }
            }

            memset(buffer, 0, PAGE_SIZE);

            if (block->section && block->section->file && offset < (qword_t)block->section->size)
            {
                enable_ints();
                file_instance_t *file = block->section->file;
                lock_acquire_shared(&file->global->volume->lock);
                dword_t ret = file->global->volume->driver->read_file(file, buffer, offset, PAGE_SIZE, &bytes_read);
                lock_release(&file->global->volume->lock);
                disable_ints();
                if (ret != ERR_SUCCESS && ret != ERR_BEYOND) return FALSE;
            }

            dword_t ret = alloc_page((void*)aligned_address, page_flags | PAGE_WRITABLE);
            if (ret != ERR_SUCCESS) return FALSE;

            memcpy((void*)aligned_address, buffer, PAGE_SIZE);
            set_page_flags((void*)aligned_address, page_flags);

            if (block->section && offset < (qword_t)block->section->size)
            {
                page = (shared_page_t*)malloc(sizeof(shared_page_t));
                if (page == NULL)
                {
                    free_page((void*)aligned_address);
                    return FALSE;
                }

                page->physical = get_physical_address((void*)aligned_address);
                page->offset = offset;

                list_append(&block->section->page_list, &page->link);
            }

            address_space->stats.committed += PAGE_SIZE;
            return TRUE;
        }
    }

    if ((block->flags & (MEMORY_BLOCK_COPY_ON_WRITE | MEMORY_BLOCK_WRITABLE))
        == (MEMORY_BLOCK_COPY_ON_WRITE | MEMORY_BLOCK_WRITABLE)
        && (problem == PAGE_ERROR_READONLY))
    {
        if (!(page_directory[pd_index] & PAGE_WRITABLE))
        {
            void *table_phys = (void*)PAGE_ALIGN(page_directory[pd_index]);

            if (get_page(table_phys)->ref_count > 1)
            {
                void *table_copy = alloc_physical_page();
                if (table_copy == NULL) return FALSE;

                dword_t *temporary = map_temporary_page(table_copy, PAGE_PRESENT | PAGE_WRITABLE);
                if (temporary == NULL)
                {
                    free_physical_page(table_copy);
                    return FALSE;
                }

                for (i = 0; i < PAGE_SIZE / sizeof(dword_t); i++)
                {
                    if (page_table[i])
                    {
                        reference_page((void*)PAGE_ALIGN(page_table[i]));
                        temporary[i] = page_table[i] & ~PAGE_WRITABLE;
                    }
                }

                unmap_temporary_page(temporary);

                reference_page(table_copy);
                dereference_page(table_phys);

                page_directory[pd_index] = PAGE_ALIGN((dword_t)table_copy)
                                           | PAGE_OFFSET(page_directory[pd_index])
                                           | PAGE_WRITABLE;
                invalidate_tlb(page_table);
            }
            else
            {
                page_directory[pd_index] |= PAGE_WRITABLE;
                invalidate_tlb(page_table);

                for (i = 0; i < PAGE_SIZE / sizeof(dword_t); i++)
                {
                    page_table[i] &= ~PAGE_WRITABLE;
                    invalidate_tlb((void*)((pd_index << 22) | (i << 12)));
                }
            }
        }

        if (!(page_table[pt_index] & PAGE_WRITABLE))
        {
            void *phys = (void*)PAGE_ALIGN(page_table[pt_index]);

            if (get_page(phys)->ref_count > 1)
            {
                void *page_copy = alloc_physical_page();
                if (page_copy == NULL) return FALSE;

                write_physical(page_copy, (void*)PAGE_ALIGN((dword_t)address), PAGE_SIZE);
                reference_page(page_copy);
                dereference_page(phys);

                page_table[pt_index] = PAGE_ALIGN((dword_t)page_copy)
                                       | PAGE_OFFSET(page_table[pt_index])
                                       | PAGE_WRITABLE;
                invalidate_tlb((void*)aligned_address);
            }
            else
            {
                page_table[pt_index] |= PAGE_WRITABLE;
                invalidate_tlb((void*)aligned_address);
            }
        }

        return TRUE;
    }

    return FALSE;
}

void memory_init(multiboot_tag_mmap_t *mmap, uintptr_t lowest_physical)
{
    dword_t i, j;
    dword_t *page_directory = (dword_t*)PAGE_DIRECTORY_ADDR;

    fix_overlapping_sections(mmap);

    log_write(LOG_NORMAL, "Memory map:\nBase\t\t\tLength\t\t\tType\n");
    log_write(LOG_NORMAL, "------------------------------------------------------------\n");

    multiboot_mmap_entry_t *entry;

    for (entry = (multiboot_mmap_entry_t*)(mmap + 1);
         (uintptr_t)entry < ((uintptr_t)mmap + mmap->size);
         entry = (multiboot_mmap_entry_t*)((uintptr_t)entry + mmap->entry_size))
    {
        log_write(LOG_NORMAL, "0x%08X%08X\t0x%08X%08X\t%s\n",
                  entry->base_high,
                  entry->base_low,
                  entry->length_high,
                  entry->length_low,
                  (entry->type == 1) ? "Usable" : "Not Usable");

        if (entry->type == 1
            && entry->base_high == 0
            && entry->length_high == 0
            && entry->length_low < (0xFFFFFFFF - entry->base_low)
            && entry->length_low > 0)
        {
            dword_t start_addr = entry->base_low;
            if (start_addr < lowest_physical) start_addr = lowest_physical;
            start_addr = PAGE_ALIGN_UP(start_addr);
            dword_t end_addr = PAGE_ALIGN_UP(entry->base_low + entry->length_low);
            dword_t page = end_addr - PAGE_SIZE;

            while (page >= start_addr)
            {
                dword_t stack_address = (dword_t)&physical_memory_stack[num_free_pages];
                dword_t pd_index = ADDR_TO_PDE(stack_address);
                dword_t pt_index = ADDR_TO_PTE(stack_address);
                dword_t *page_table = (dword_t*)(PAGE_TABLE_ADDR + pd_index * PAGE_SIZE);

                if (!(page_directory[pd_index] & PAGE_PRESENT))
                {
                    page_directory[pd_index] = start_addr | PAGE_PRESENT | PAGE_WRITABLE | PAGE_GLOBAL;
                    start_addr += PAGE_SIZE;
                    invalidate_tlb(page_table);
                    memset(page_table, 0, PAGE_SIZE);
                    total_physical_pages++;
                    continue;
                }

                if (!(page_table[pt_index] & PAGE_PRESENT))
                {
                    page_table[pt_index] = start_addr | PAGE_PRESENT | PAGE_WRITABLE | PAGE_GLOBAL;
                    start_addr += PAGE_SIZE;
                    invalidate_tlb((void*)stack_address);
                    total_physical_pages++;
                    continue;
                }

                free_physical_page((void*)page);
                page -= PAGE_SIZE;
            }
        }
    }

    log_write(LOG_NORMAL, "------------------------------------------------------------\n");
    total_physical_pages += num_free_pages;
    pages = (page_t*)(KERNEL_POOL_START - total_physical_pages * sizeof(page_t));

    for (i = PAGE_ALIGN((uintptr_t)pages); i < KERNEL_POOL_START; i += PAGE_SIZE)
    {
        dword_t pd_index = ADDR_TO_PDE(i);
        dword_t pt_index = ADDR_TO_PTE(i);
        dword_t *page_table = (dword_t*)(PAGE_TABLE_ADDR + pd_index * PAGE_SIZE);

        if (!(page_directory[pd_index] & PAGE_PRESENT))
        {
            page_directory[pd_index] = (uintptr_t)alloc_physical_page() | PAGE_PRESENT | PAGE_WRITABLE | PAGE_GLOBAL;
            invalidate_tlb(page_table);
            memset(page_table, 0, PAGE_SIZE);
        }

        if (!(page_table[pt_index] & PAGE_PRESENT))
        {
            page_table[pt_index] = (uintptr_t)alloc_physical_page() | PAGE_PRESENT | PAGE_WRITABLE | PAGE_GLOBAL;
            invalidate_tlb((void*)i);
        }
    }

    dword_t pages_inserted = 0;

    for (i = 0; i < num_free_pages; i++)
    {
        pages[pages_inserted].phys_addr = PAGE_ALIGN((dword_t)physical_memory_stack[i]);
        pages[pages_inserted].ref_count = 0;
        pages_inserted++;
    }

    for (i = KERNEL_PAGE_START; i <= KERNEL_PAGE_END; i++)
    {
        dword_t *page_table = (dword_t*)(PAGE_TABLE_ADDR + i * PAGE_SIZE);
        if (!(page_directory[i] & PAGE_PRESENT)) continue;

        for (j = 0; j < PAGE_SIZE / sizeof(dword_t); j++)
        {
            if (PAGE_ALIGN(page_table[j]) < lowest_physical) continue;

            if (page_table[j] & PAGE_PRESENT)
            {
                pages[pages_inserted].phys_addr = PAGE_ALIGN((dword_t)page_table[j]);
                pages[pages_inserted].ref_count = 0;
                pages_inserted++;
            }
        }
    }

    ASSERT(pages_inserted == total_physical_pages);
    qsort(pages, total_physical_pages, sizeof(page_t), compare_page);

    init_semaphore(&temporary_page_semaphore, TEMPORARY_PAGES, TEMPORARY_PAGES);

    if (create_address_space((void*)KERNEL_POOL_START,
                             (KERNEL_POOL_END - KERNEL_POOL_START + PAGE_SIZE - 1) / PAGE_SIZE,
                             &kernel_address_space) != ERR_SUCCESS)
    {
        KERNEL_CRASH("Unable to create kernel address space");
    }

    if (create_address_space((void*)MAPPING_START,
                             (MAPPING_END - MAPPING_START + PAGE_SIZE - 1) / PAGE_SIZE,
                             &mapping_space) != ERR_SUCCESS)
    {
        KERNEL_CRASH("Unable to create mapping space");
    }

    set_page_directory((void*)PAGE_ALIGN(page_directory[PAGEDIR_SELF_ENTRY]));

    for (i = KERNEL_PAGE_START; i <= KERNEL_PAGE_END; i++)
    {
        dword_t *page_table = (dword_t*)(PAGE_TABLE_ADDR + i * PAGE_SIZE);
        if (!(page_directory[i] & PAGE_PRESENT)) continue;

        for (j = 0; j < PAGE_SIZE / sizeof(dword_t); j++)
        {
            if (page_table[j] & PAGE_PRESENT) reference_page((void*)PAGE_ALIGN(page_table[j]));
        }
    }

    for (i = USER_PAGE_START; i <= USER_PAGE_END; i++) page_directory[i] = 0;
    set_page_directory(get_page_directory());

    if (cpu_features[0] & CPU_FEATURE_PGE)
    {
        asm volatile ("movl %cr4, %eax\n"
                      "orl $0x80, %eax\n"
                      "movl %eax, %cr4\n");
    }
}
