#include <linux/mm.h>
#include <linux/numa.h>
#include <linux/page_alloc.h>
#include <linux/slab.h>

#include "slab.h"

/* Structure holding parameters for get_partial() call chain */
struct partial_context
{
    gfp_t flags;
    unsigned int orig_size;
    void *object;
};

/*
 * Get a partial slab, lock it and return it.
 */
static slab_t *get_partial(struct kmem_cache *s, int node, struct partial_context *pc)
{
    slab_t *slab;

    return NULL;
}

static inline unsigned int oo_order(struct kmem_cache_order_objects x)
{
    return x.x >> OO_SHIFT;
}

static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
{
    return x.x & OO_MASK;
}

static inline slab_t *alloc_slab_page(gfp_t flags, int node, struct kmem_cache_order_objects oo)
{
    slab_t *slab;
    unsigned int order = oo_order(oo);

    slab = __alloc_frozen_pages_noprof(flags, order, node, NULL);

    if (slab)
    {
        __PageTypeSet(slab, PGTY_slab);
    }

    return slab;
}

static inline void *slab_address(const slab_t *slab)
{
    return page_address((const struct page *)slab);
}

static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab)
{
    return false;
}

void *fixup_red_left(struct kmem_cache *s, void *p)
{
    return p;
}

static void *setup_object(struct kmem_cache *s, void *object)
{
    if (s->ctor)
    {
        s->ctor(object);
    }

    return object;
}

/*
 * Returns freelist pointer (ptr). With hardening, this is obfuscated
 * with an XOR of the address where the pointer is held and a per-cache
 * random number.
 */
static inline freeptr_t freelist_ptr_encode(const struct kmem_cache *s,
                                            void *ptr, unsigned long ptr_addr)
{
    unsigned long encoded;

    encoded = (unsigned long)ptr;

    return (freeptr_t){.v = encoded};
}

static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
{
    unsigned long freeptr_addr = (unsigned long)object + s->offset;

    *(freeptr_t *)freeptr_addr = freelist_ptr_encode(s, fp, freeptr_addr);
}

static slab_t *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
{
    slab_t *slab;
    struct kmem_cache_order_objects oo = s->oo;
    gfp_t alloc_gfp;
    void *start, *p, *next;
    int idx;
    bool shuffle;

    slab = alloc_slab_page(alloc_gfp, node, oo);
    if (!slab)
    {
        return NULL;
    }

    slab->objects = oo_objects(oo);
    slab->inuse = 0;
    slab->frozen = 0;

    slab->slab_cache = s;

    start = slab_address(slab);

    shuffle = shuffle_freelist(s, slab);
    if (!shuffle)
    {
        start = fixup_red_left(s, start);
        start = setup_object(s, start);
        slab->freelist = start;
        for (idx = 0, p = start; idx < slab->objects - 1; idx++)
        {
            next = p + s->size;
            next = setup_object(s, next);
            set_freepointer(s, p, next);
            p = next;
        }
        set_freepointer(s, p, NULL);
    }

    return slab;
}

static slab_t *new_slab(struct kmem_cache *s, gfp_t flags, int node)
{

    return allocate_slab(s, flags, node);
}

static inline void *freelist_ptr_decode(const struct kmem_cache *s,
                                        freeptr_t ptr, unsigned long ptr_addr)
{
    void *decoded;

    decoded = (void *)ptr.v;

    return decoded;
}

static inline void *get_freepointer(struct kmem_cache *s, void *object)
{
    unsigned long ptr_addr;
    freeptr_t p;

    ptr_addr = (unsigned long)object + s->offset;
    p = *(freeptr_t *)(ptr_addr);

    return freelist_ptr_decode(s, p, ptr_addr);
}

static void *alloc_single_from_new_slab(struct kmem_cache *s, slab_t *slab, int orig_size)
{
    void *object;

    object = slab->freelist;
    slab->freelist = get_freepointer(s, object);
    slab->inuse = 1;

    return object;
}

static void *__slab_alloc_node(struct kmem_cache *s,
                               gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
{
    slab_t *slab;
    void *object;

    slab = new_slab(s, gfpflags, node);

    object = alloc_single_from_new_slab(s, slab, orig_size);

    return object;
}

/*
 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
 * have the fastpath folded into their functions. So no function call
 * overhead for requests that can be satisfied on the fastpath.
 *
 * The fastpath works by first checking if the lockless freelist can be used.
 * If not then __slab_alloc is called for slow processing.
 *
 * Otherwise we can simply pick the next object from the lockless free list.
 */
static inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru,
                                    gfp_t gfpflags, int node,
                                    unsigned long addr, size_t orig_size)
{
    void *object;
    bool init = false;

    if (unlikely(!s))
        return NULL;

    object = __slab_alloc_node(s, gfpflags, node, addr, orig_size);

    return object;
}

static void *__do_kmalloc_node(size_t size, kmem_buckets *b, gfp_t flags, int node,
                               unsigned long caller)
{
    struct kmem_cache *s;
    void *ret;

    s = kmalloc_slab(size, b, flags, caller);

    ret = slab_alloc_node(s, NULL, flags, node, caller, size);

    return ret;
}

void *__kmalloc_noprof(size_t size, gfp_t flags)
{
    return __do_kmalloc_node(size, NULL, flags, NUMA_NO_NODE, 0);
}

void *kmem_cache_alloc_noprof(struct kmem_cache *s, gfp_t gfpflags)
{
    void *ret;

    ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, 0,
                          s->object_size);

    return ret;
}
