
static inline unsigned long va_size(struct vmap_area *va)
{
	return (va->va_end - va->va_start);
}

static inline unsigned int addr_to_node_id(unsigned long addr)
{
    return (addr / vmap_zone_size) % nr_vmap_nodes;
}

static inline struct vmap_node *addr_to_node(unsigned long addr)
{
	return &vmap_nodes[addr_to_node_id(addr)];
}

static inline struct vmap_node *id_to_node(unsigned int id)
{
	return &vmap_nodes[id % nr_vmap_nodes];
}

static struct vmap_pool *size_to_va_pool(struct vmap_node *vn, unsigned long size)
{
    unsigned int idx = (size - 1) / PAGE_SIZE;

    if (idx < MAX_VA_SIZE_PAGES)
        return &vn->pool[idx];

    return NULL;
}

static struct vmap_area *node_pool_del_va(struct vmap_node *vn, unsigned long size,
                 unsigned long align, unsigned long vstart,
                 unsigned long vend)
{
    struct vmap_area *va = NULL;
    struct vmap_pool *vp;
    int err = 0;

    vp = size_to_va_pool(vn, size);
    if (!vp || list_empty(&vp->head))
        return NULL;

    spin_lock(&vn->pool_lock);
    if (!list_empty(&vp->head))
    {
        va = list_first_entry(&vp->head, struct vmap_area, list);

        if (IS_ALIGNED(va->va_start, align))
        {
            /*
             * Do some sanity check and emit a warning
             * if one of below checks detects an error.
             */
            err |= (va_size(va) != size);
            err |= (va->va_start < vstart);
            err |= (va->va_end > vend);

            if (!WARN_ON_ONCE(err))
            {
                list_del_init(&va->list);
                WRITE_ONCE(vp->len, vp->len - 1);
            }
            else
            {
                va = NULL;
            }
        }
        else
        {
            list_move_tail(&va->list, &vp->head);
            va = NULL;
        }
    }
    spin_unlock(&vn->pool_lock);

    return va;
}

/*
 * We use the value 0 to represent "no node", that is why
 * an encoded value will be the node-id incremented by 1.
 * It is always greater then 0. A valid node_id which can
 * be encoded is [0:nr_vmap_nodes - 1]. If a passed node_id
 * is not valid 0 is returned.
 */
static unsigned int encode_vn_id(unsigned int node_id)
{
    /* Can store U8_MAX [0:254] nodes. */
    if (node_id < nr_vmap_nodes)
        return (node_id + 1) << BITS_PER_BYTE;

    /* Warn and no node encoded. */
    WARN_ONCE(1, "Encode wrong node id (%u)\n", node_id);
    return 0;
}

static struct vmap_area *node_alloc(unsigned long size, unsigned long align,
                                    unsigned long vstart, unsigned long vend,
                                    unsigned long *addr, unsigned int *vn_id)

{
    struct vmap_area *va;

    *vn_id = 0;
    *addr = -EINVAL;

    /*
     * Fallback to a global heap if not vmalloc or there
     * is only one node.
     */
    if (vstart != VMALLOC_START || vend != VMALLOC_END ||
        nr_vmap_nodes == 1)
        return NULL;

    *vn_id = raw_smp_processor_id() % nr_vmap_nodes;
    va = node_pool_del_va(id_to_node(*vn_id), size, align, vstart, vend);
    *vn_id = encode_vn_id(*vn_id);

    if (va)
        *addr = va->va_start;

    return va;
}
