#include <linux/init.h>
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/nodemask.h>
#include <linux/minmax.h>

#include "slab.h"

#ifndef ARCH_KMALLOC_MINALIGN
#define ARCH_KMALLOC_MINALIGN 64
#endif

#define INIT_KMALLOC_INFO(__size, __short_size)           \
    {                                                     \
        .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \
        .size = __size,                                   \
    }

static kmem_buckets kmalloc_caches[NR_KMALLOC_TYPES] = {};
/*
 * Conversion table for small slabs sizes / 8 to the index in the
 * kmalloc array. This is necessary for slabs < 192 since we have non power
 * of two cache sizes there. The size of larger slabs can be determined using
 * fls.
 */
static unsigned char kmalloc_size_index[24] = {
    3, /* 8 */
    4, /* 16 */
    5, /* 24 */
    5, /* 32 */
    6, /* 40 */
    6, /* 48 */
    6, /* 56 */
    6, /* 64 */
    1, /* 72 */
    1, /* 80 */
    1, /* 88 */
    1, /* 96 */
    7, /* 104 */
    7, /* 112 */
    7, /* 120 */
    7, /* 128 */
    2, /* 136 */
    2, /* 144 */
    2, /* 152 */
    2, /* 160 */
    2, /* 168 */
    2, /* 176 */
    2, /* 184 */
    2, /* 192 */
};

/*
 * kmalloc_info[] is to make slab_debug=,kmalloc-xx option work at boot time.
 * kmalloc_index() supports up to 2^21=2MB, so the final entry of the table is
 * kmalloc-2M.
 */
const struct kmalloc_info_struct kmalloc_info[] = {
    INIT_KMALLOC_INFO(0, 0),
    INIT_KMALLOC_INFO(96, 96),
    INIT_KMALLOC_INFO(192, 192),
    INIT_KMALLOC_INFO(8, 8),
    INIT_KMALLOC_INFO(16, 16),
    INIT_KMALLOC_INFO(32, 32),
    INIT_KMALLOC_INFO(64, 64),
    INIT_KMALLOC_INFO(128, 128),
    INIT_KMALLOC_INFO(256, 256),
    INIT_KMALLOC_INFO(512, 512),
};

static struct kmem_cache boot_kmem_cache;

static inline enum kmalloc_cache_type kmalloc_type(gfp_t flags, unsigned long caller)
{
    enum kmalloc_cache_type t = KMALLOC_NORMAL;

    return t;
}

static inline unsigned int size_index_elem(unsigned int bytes)
{
    return (bytes - 1) / 8;
}

/*
 * Figure out what the alignment of the objects will be given a set of
 * flags, a user specified alignment and the size of the objects.
 */
static unsigned int calculate_alignment(slab_flags_t flags,
                                        unsigned int align, unsigned int size)
{
    return 64; // todo
}

struct kmem_cache *kmalloc_slab(size_t size, kmem_buckets *b, gfp_t flags, unsigned long caller)
{
    unsigned int index;

    if (!b)
        b = &kmalloc_caches[kmalloc_type(flags, caller)];
    if (size <= 192)
        index = kmalloc_size_index[size_index_elem(size)];
    else
        index = fls(size - 1);

    return (*b)[index];
}

static inline int calculate_order(unsigned int size)
{
    return 0; // todo
}

static inline unsigned int order_objects(unsigned int order, unsigned int size)
{
    return ((unsigned int)PAGE_SIZE << order) / size;
}

static inline struct kmem_cache_order_objects oo_make(unsigned int order,
                                                      unsigned int size)
{
    struct kmem_cache_order_objects x = {
        (order << OO_SHIFT) + order_objects(order, size)};

    return x;
}

/*
 * calculate_sizes() determines the order and the distribution of data within
 * a slab object.
 */
static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s)
{
    slab_flags_t flags = s->flags;
    unsigned int size = s->object_size;
    unsigned int order;

    /*
     * Round up object size to the next word boundary. We can only
     * place the free pointer at word boundaries and this determines
     * the possible location of the free pointer.
     */
    size = ALIGN(size, sizeof(void *));

    /*
     * With that we have determined the number of bytes in actual use
     * by the object and redzoning.
     */
    s->inuse = size;

    if (0)
    {
    }
    else
    {
        /*
         * Store freelist pointer near middle of object to keep
         * it away from the edges of the object to avoid small
         * sized over/underflows from neighboring allocations.
         */
        s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
    }

    /*
     * SLUB stores one object immediately after another beginning from
     * offset 0. In order to align the objects we have to simply size
     * each object to conform to the alignment.
     */
    size = ALIGN(size, s->align);
    s->size = size;

    order = calculate_order(size);

    /*
     * Determine the number of objects per slab
     */
    s->oo = oo_make(order, size);

    return 0;
}

int __kmem_cache_init(struct kmem_cache *s, const char *name,
                      unsigned int size, struct kmem_cache_args *args,
                      slab_flags_t flags)
{
    int err = -EINVAL;

    s->name = name;
    s->size = s->object_size = size;

    s->align = args->align;
    s->ctor = args->ctor;

    calculate_sizes(args, s);

    err = 0;

    return err;
}

/* Create a cache during boot when no slab services are available yet */
void __init create_boot_cache(struct kmem_cache *s, const char *name,
                              unsigned int size, slab_flags_t flags,
                              unsigned int useroffset, unsigned int usersize)
{
    int err;
    unsigned int align = ARCH_KMALLOC_MINALIGN;
    struct kmem_cache_args kmem_args = {};

    /*
     * kmalloc caches guarantee alignment of at least the largest
     * power-of-two divisor of the size. For power-of-two sizes,
     * it is the size itself.
     */
    if (flags & SLAB_KMALLOC)
        align = max(align, 1U << (ffs(size) - 1));
    kmem_args.align = calculate_alignment(flags, align, size);

    err = __kmem_cache_init(s, name, size, &kmem_args, flags);

    if (err)
        panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n",
              name, size, err);

    s->refcount = -1; /* Exempt from merging for now */
}

static unsigned int __kmalloc_minalign(void)
{
    return 32;
}

unsigned int __kmalloc_index(size_t size, bool size_is_constant)
{
    return 5;
}

static struct kmem_cache *__init create_kmalloc_cache(const char *name,
                                                      unsigned int size,
                                                      slab_flags_t flags)
{
    struct kmem_cache *s = kmem_cache_zalloc(&boot_kmem_cache, GFP_NOWAIT);

    if (!s)
        panic("Out of memory when creating slab %s\n", name);

    create_boot_cache(s, name, size, flags | SLAB_KMALLOC, 0, size);

    s->refcount = 1;

    return s;
}

static void __init new_kmalloc_cache(int idx, enum kmalloc_cache_type type)
{
    slab_flags_t flags = 0;
    unsigned int minalign = __kmalloc_minalign();
    unsigned int aligned_size = kmalloc_info[idx].size;
    int aligned_idx = idx;

#ifdef CONFIG_RANDOM_KMALLOC_CACHES
    if (type >= KMALLOC_RANDOM_START && type <= KMALLOC_RANDOM_END)
        flags |= SLAB_NO_MERGE;
#endif

    if (minalign > ARCH_KMALLOC_MINALIGN)
    {
        aligned_size = ALIGN(aligned_size, minalign);
        aligned_idx = __kmalloc_index(aligned_size, false);
    }

    if (!kmalloc_caches[type][aligned_idx])
        kmalloc_caches[type][aligned_idx] = create_kmalloc_cache(
            kmalloc_info[aligned_idx].name[type],
            aligned_size, flags);

    if (idx != aligned_idx)
        kmalloc_caches[type][idx] = kmalloc_caches[type][aligned_idx];
}

void __init create_kmalloc_caches(void)
{
    enum kmalloc_cache_type type;

    for (type = KMALLOC_NORMAL; type < NR_KMALLOC_TYPES; type++)
    {
        /* Caches that are NOT of the two-to-the-power-of size. */
        if (KMALLOC_MIN_SIZE <= 32)
            new_kmalloc_cache(1, type);
        if (KMALLOC_MIN_SIZE <= 64)
            new_kmalloc_cache(2, type);

        /* Caches that are of the two-to-the-power-of size. */
        for (int i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
            new_kmalloc_cache(i, type);
    }
}

void __init kmem_cache_init(void)
{
    create_boot_cache(&boot_kmem_cache, "kmem_cache",
                      offsetof(struct kmem_cache, node) +
                          nr_node_ids * sizeof(struct kmem_cache_node *),
                      SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0);

    create_kmalloc_caches();
}
