
/*
 * Allocate a region of KVA of the specified size and alignment, within the
 * vstart and vend. If vm is passed in, the two will also be bound.
 */
static struct vmap_area *alloc_vmap_area(unsigned long size,
                                         unsigned long align,
                                         unsigned long vstart, unsigned long vend,
                                         int node, gfp_t gfp_mask,
                                         unsigned long va_flags, struct vm_struct *vm)
{
    struct vmap_node *vn;
    struct vmap_area *va;
    unsigned long freed;
    unsigned long addr;
    unsigned int vn_id;
    int purged = 0;
    int ret;

    va = node_alloc(size, align, vstart, vend, &addr, &vn_id);
    if (!va)
    {
        va = kmalloc(sizeof(*va), __GFP_ZERO);
        if (!va)
            return ERR_PTR(-ENOMEM);
    }

retry:
    if (IS_ERR_VALUE(addr))
    {
        // todo
    }

    /*
     * If an allocation fails, the error value is
     * returned. Therefore trigger the overflow path.
     */
    if (IS_ERR_VALUE(addr))
        goto overflow;

    va->va_start = addr;
    va->va_end = addr + size;
    va->vm = NULL;
    va->flags = (va_flags | vn_id);

    if (vm)
    {
        vm->addr = (void *)va->va_start;
        vm->size = va_size(va);
        va->vm = vm;
    }

    vn = addr_to_node(va->va_start);

    spin_lock(&vn->busy.lock);
    insert_vmap_area(va, &vn->busy.root, &vn->busy.head);
    spin_unlock(&vn->busy.lock);

    return va;

overflow:

    return ERR_PTR(-EBUSY);
}
