#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/numa.h>
#include <linux/stdlib.h>
#include <linux/err.h>
#include <linux/bug.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/rbtree.h>
#include <linux/list.h>
#include <linux/pgtable.h>
#include <linux/kernel.h>
#include <linux/bits.h>
#include <linux/preempt.h>

#include "inc/pgalloc-track.h"
#include "inc/internal.h"

/*
 * A fast size storage contains VAs up to 1M size. A pool consists
 * of linked between each other ready to go VAs of certain sizes.
 * An index in the pool-array corresponds to number of pages + 1.
 */
#define MAX_VA_SIZE_PAGES 256

struct vmap_pool
{
    struct list_head head;
    unsigned long len;
};

/*
 * This structure defines a single, solid model where a list and
 * rb-tree are part of one entity protected by the lock. Nodes are
 * sorted in ascending order, thus for O(1) access to left/right
 * neighbors a list is used as well as for sequential traversal.
 */
struct rb_list
{
    struct rb_root root;
    struct list_head head;
    spinlock_t lock;
};

/*
 * An effective vmap-node logic. Users make use of nodes instead
 * of a global heap. It allows to balance an access and mitigate
 * contention.
 */
static struct vmap_node
{
    /* Simple size segregated storage. */
    struct vmap_pool pool[MAX_VA_SIZE_PAGES];
    spinlock_t pool_lock;
    bool skip_populate;

    /* Bookkeeping data of this node. */
    struct rb_list busy;
    struct rb_list lazy;

    /*
     * Ready-to-free areas.
     */
    struct list_head purge_list;
    // struct work_struct purge_work;
    unsigned long nr_purged;
} single;

/*
 * Initial setup consists of one single node, i.e. a balancing
 * is fully disabled. Later on, after vmap is initialized these
 * parameters are updated based on a system capacity.
 */
static struct vmap_node *vmap_nodes = &single;
static unsigned int nr_vmap_nodes = 1;
static unsigned int vmap_zone_size = 1;
static bool vmap_initialized = false;

static const unsigned int ioremap_max_page_shift = PAGE_SHIFT;

/* A simple iterator over all vmap-nodes. */
#define for_each_vmap_node(vn)  \
    for ((vn) = &vmap_nodes[0]; \
         (vn) < &vmap_nodes[nr_vmap_nodes]; (vn)++)

#include "_vmalloc_/vmap_node.c"
#include "_vmalloc_/va-link.c"
#include "_vmalloc_/find_vmap_area.c"
#include "_vmalloc_/vmap_init_nodes.c"
#include "_vmalloc_/insert_vmap_area.c"
#include "_vmalloc_/alloc_vmap_area.c"
#include "_vmalloc_/vmap_range.c"
#include "_vmalloc_/free_vmap_area.c"

void vunmap_range_noflush(unsigned long start, unsigned long end)
{
    // todo
}

struct vm_struct *__get_vm_area_node(unsigned long size,
                                     unsigned long align, unsigned long shift, unsigned long flags,
                                     unsigned long start, unsigned long end, int node,
                                     gfp_t gfp_mask, const void *caller)
{
    struct vmap_area *va;
    struct vm_struct *area;
    unsigned long requested_size = size;

    area = kzalloc_node(sizeof(*area), gfp_mask, node);
    if (unlikely(!area))
        return NULL;

    area->flags = flags;
    area->caller = caller;
    area->requested_size = requested_size;

    va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area);
    if (IS_ERR(va))
    {
        kfree(area);
        return NULL;
    }

    return area;
}

struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
                                       unsigned long start, unsigned long end,
                                       const void *caller)
{
    return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
                              NUMA_NO_NODE, GFP_KERNEL, caller);
}

struct vmap_area *find_vmap_area(unsigned long addr)
{
    struct vmap_node *vn;
    struct vmap_area *va;
    int i, j;

    if (unlikely(!vmap_initialized))
        return NULL;

    /*
     * An addr_to_node_id(addr) converts an address to a node index
     * where a VA is located. If VA spans several zones and passed
     * addr is not the same as va->va_start, what is not common, we
     * may need to scan extra nodes. See an example:
     *
     *      <----va---->
     * -|-----|-----|-----|-----|-
     *     1     2     0     1
     *
     * VA resides in node 1 whereas it spans 1, 2 an 0. If passed
     * addr is within 2 or 0 nodes we should do extra work.
     */
    i = j = addr_to_node_id(addr);
    do
    {
        vn = &vmap_nodes[i];

        spin_lock(&vn->busy.lock);
        va = __find_vmap_area(addr, &vn->busy.root);
        spin_unlock(&vn->busy.lock);

        if (va)
            return va;
    } while ((i = (i + nr_vmap_nodes - 1) % nr_vmap_nodes) != j);

    return NULL;
}

/**
 * find_vm_area - find a continuous kernel virtual area
 * @addr:	  base address
 *
 * Search for the kernel VM area starting at @addr, and return it.
 * It is up to the caller to do all required locking to keep the returned
 * pointer valid.
 *
 * Return: the area descriptor on success or %NULL on failure.
 */
struct vm_struct *find_vm_area(const void *addr)
{
    struct vmap_area *va;

    va = find_vmap_area((unsigned long)addr);
    if (!va)
        return NULL;

    return va->vm;
}

int ioremap_page_range(unsigned long addr, unsigned long end,
                       phys_addr_t phys_addr, pgprot_t prot)
{
    struct vm_struct *area;

    area = find_vm_area((void *)addr);
    if (!area || !(area->flags & VM_IOREMAP))
    {
        WARN_ONCE(1, "vm_area at addr %lx is not marked as VM_IOREMAP\n", addr);
        return -EINVAL;
    }

    if (addr != (unsigned long)area->addr ||
        (void *)end != area->addr + get_vm_area_size(area))
    {
        WARN_ONCE(1, "ioremap request [%lx,%lx) doesn't match vm_area [%lx, %lx)\n",
                  addr, end, (long)area->addr,
                  (long)area->addr + get_vm_area_size(area));
        return -ERANGE;
    }

    return vmap_page_range(addr, end, phys_addr, prot);
}

/**
 * remove_vm_area - find and remove a continuous kernel virtual area
 * @addr:	    base address
 *
 * Search for the kernel VM area starting at @addr, and remove it.
 * This function returns the found VM area, but using it is NOT safe
 * on SMP machines, except for its size or flags.
 *
 * Return: the area descriptor on success or %NULL on failure.
 */
struct vm_struct *remove_vm_area(const void *addr)
{
    struct vmap_area *va;
    struct vm_struct *vm;

    might_sleep();

    if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
             addr))
        return NULL;

    va = find_unlink_vmap_area((unsigned long)addr);
    if (!va || !va->vm)
        return NULL;
    vm = va->vm;

    free_unmap_vmap_area(va);
    return vm;
}

void free_vm_area(struct vm_struct *area)
{
    struct vm_struct *ret;
    ret = remove_vm_area(area->addr);
    BUG_ON(ret != area);
    kfree(area);
}

/**
 * vunmap - release virtual mapping obtained by vmap()
 * @addr:   memory base address
 *
 * Free the virtually contiguous memory area starting at @addr,
 * which was created from the page array passed to vmap().
 *
 * Must not be called in interrupt context.
 */
void vunmap(const void *addr)
{
    struct vm_struct *vm;

    BUG_ON(in_interrupt());
    might_sleep();

    if (!addr)
        return;
    vm = remove_vm_area(addr);
    if (unlikely(!vm))
    {
        WARN(1, "Trying to vunmap() nonexistent vm area (%p)\n",
             addr);
        return;
    }

    kfree(vm);
}

int vmap_page_range(unsigned long addr, unsigned long end,
                    phys_addr_t phys_addr, pgprot_t prot)
{
    int err;

    err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
                             ioremap_max_page_shift);
    flush_cache_vmap(addr, end);

    return err;
}

void __init vmalloc_init(void)
{
    vmap_init_nodes();

    vmap_initialized = true;
}
