#include "../include/linux/memblock.h"
#include "../include/linux/numa.h"
#include "../include/linux/limits.h"
#include "../include/linux/kernel.h"

#define INIT_MEMBLOCK_REGIONS 128
#define INIT_PHYSMEM_REGIONS 4

#ifndef INIT_MEMBLOCK_RESERVED_REGIONS
#define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
#endif

static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS];
static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS];

#ifndef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS];
#endif

struct memblock memblock = 
{
    .memory.regions = memblock_memory_init_regions,
    .memory.cnt = 1,
    .memory.max = INIT_MEMBLOCK_REGIONS,
    .memory.name = "memory",

    .reserved.regions = memblock_reserved_init_regions,
    .reserved.cnt = 1,
    .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS,
    .reserved.name = "reserved",

#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
    .physmem.regions = memblock_physmem_init_regions,
    .physmem.cnt = 1,
    .physmem.max = INIT_PHYSMEM_REGIONS,
    .physmem.name = "physmem",
#endif
    .bottom_up = false,
    .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
};

static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t* size)
{
    return *size = min(*size, PHYS_ADDR_MAX - base);
}

static void* memmove(void* dest, const void* src, size_t n)
{
    unsigned char* d = (unsigned char*) dest;
    const unsigned char* s = (const unsigned char*) src;

    if (d == s)
    {
        return dest;
    }

    if (d > s && d < s + n)
    {
        d += n;
        s += n;
        while (n--)
        {
            *(--d) = *(--s);
        }
        
    }
    else
    {
        while (n--)
        {
            *d++ = *s++;
        }
        
    }
    
    return dest;
}

static void memblock_insert_region(struct memblock_type* type,
    int idx, phys_addr_t base,
    phys_addr_t size, int nid,
    enum memblock_flags flags)
{
    struct memblock_region* rgn = &type->regions[idx];
    memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
    rgn->base = base;
    rgn->size = size;
    rgn->flags = flags;

    memblock_set_region_node(rgn, nid);
    type->cnt++;
    type->total_size += size;
}

static void memblock_merge_regions(struct memblock_type *type)
{
    int i = 0;

    while (i < type->cnt - 1)
    {
        struct memblock_region* this = &type->regions[i];
        struct memblock_region* next = &type->regions[i + 1];

        if (this->base + this->size != next->base ||
            memblock_get_region_node(this) != 
            memblock_get_region_node(next) ||
            this->flags != next->flags)
        {
            i++;
            continue;
        }
        this->size += next->size;
        memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
        type->cnt--;
    }
    
}

int memblock_add_range(struct memblock_type* type,
    phys_addr_t base, phys_addr_t size,
    int nid, enum memblock_flags flags)
{
    bool insert = false;
    phys_addr_t obase = base;
    phys_addr_t end = base + memblock_cap_size(base, &size);
    int idx, nr_new;
    struct memblock_region* rgn;

    if (!size)
    {
        return 0;
    }

    if (type->regions[0].size == 0)
    {
        type->regions[0].base = base;
        type->regions[0].size = size;
        type->regions[0].flags = flags;

        memblock_set_region_node(&type->regions[0], nid);
        type->total_size = size;
        return 0;
    }
    
repeat:
    base = obase;
    nr_new = 0;
    for (idx = 0, rgn = &type->regions[0]; idx < type->cnt; idx++, rgn = &type->regions[idx])
    {
        phys_addr_t rbase = rgn->base;
        phys_addr_t rend = rbase + rgn->size;

        if (rbase >= end)
        {
            break;
        }
        if (rend <= base)
        {
            continue;
        }
        
        if (rbase > base)
        {
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
            WARN_ON(nid != memblock_get_region_node(rgn)); 
#endif
            nr_new++;
            if (insert)
            {
                memblock_insert_region(type, idx++, base, rbase - base, nid, flags);
            }
            
        }
        base = min(rend, end);
    }

    if (base < end)
    {
        nr_new++;
        if (insert)
        {
            memblock_insert_region(type, idx, base, end - base, nid, flags);
        }
        
    }

    if (!nr_new)
    {
        return 0;
    }
    
    if (!insert)
    {
        insert = true;
        goto repeat;
    }
    else
    {
        memblock_merge_regions(type);
    }
    
    return 0;
}

int memblock_add(phys_addr_t base, phys_addr_t size)
{
    return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
}

int memblock_reserve(phys_addr_t base, phys_addr_t size)
{
    return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
}

void memblock_memory_print()
{
    struct memblock_type* type = &(memblock.memory);
    printk("memblock: %s, cnt: %d, max: %d, total_size: 0x%x\n", type->name,
        type->cnt, type->max, type->total_size);
    
    for (int i = 0; i < type->cnt; i++)
    {
        struct memblock_region* region = &(type->regions[i]);
        printk("[ %d ]from: 0x%08x, size: 0x%08x, flags: %d\n",
            i, region->base, region->size, region->flags);
    }
    
}

void memblock_reserved_print()
{
    struct memblock_type* type = &(memblock.reserved);
    printk("memblock: %s, cnt: %d, max: %d, total_size: 0x%x\n", type->name,
        type->cnt, type->max, type->total_size);
    
    for (int i = 0; i < type->cnt; i++)
    {
        struct memblock_region* region = &(type->regions[i]);
        printk("[ %d ]from: 0x%08x, size: 0x%08x, flags: %d\n",
            i, region->base, region->size, region->flags);
    }
}

static enum memblock_flags choose_memblock_flags(void)
{
    return MEMBLOCK_NONE;
}

static inline bool memblock_bottom_up(void)
{
    return memblock.bottom_up;
}

static phys_addr_t __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
    phys_addr_t size, phys_addr_t align, int nid, enum memblock_flags flags)
{
    return 0;
}

static bool should_skip_region(struct memblock_region* m, int nid, int flags)
{
    int m_nid = memblock_get_region_node(m);
    if (nid != NUMA_NO_NODE && nid != m_nid)
    {
        return true;
    }
    
    return false;
}

void __next_mem_range_rev(u64 *idx, int nid,
    enum memblock_flags flags,
    struct memblock_type *type_a,
    struct memblock_type *type_b,
    phys_addr_t *out_start,
    phys_addr_t * out_end, int *out_nid)
{
    int idx_a = *idx & 0xffffffff;
    int idx_b = *idx >> 32;

    if (nid == MAX_NUMNODES)
    {
        nid = NUMA_NO_NODE;
    }

    if (*idx == (u64) ULLONG_MAX)
    {
        idx_a = type_a->cnt - 1;
        if (type_b != NULL)
        {
            idx_b = type_b->cnt;
        }
        else
        {
            idx_b = 0;
        }
    }

    for (; idx_a >= 0; idx_a--)
    {
        struct memblock_region* m = &type_a->regions[idx_a];

        phys_addr_t m_start = m->base;
        phys_addr_t m_end = m->base + m->size;
        int m_nid = memblock_get_region_node(m);

        if (should_skip_region(m, nid, flags))
        {
            continue;
        }
        
        if (!type_b)
        {
            if (out_start)
            {
                *out_start = m_start;
            }
            if (out_end)
            {
                *out_end = m_end;
            }
            if (out_nid)
            {
                *out_nid = m_nid;
            }
            idx_a--;
            *idx = (u32) idx_a | (u64) idx_b << 32;
            return;
        }
        
        for (; idx_b >= 0; idx_b--)
        {
            struct memblock_region* r;
            phys_addr_t r_start;
            phys_addr_t r_end;

            r = &type_b->regions[idx_b];
            r_start = idx_b ? r[-1].base + r[-1].size : 0;
            r_end = idx_b < type_b->cnt ? r->base : PHYS_ADDR_MAX;

            if (r_end <= m_start)
            {
                break;
            }

            if (m_end > r_start)
            {
                if (out_start)
                {
                    *out_start = max(m_start, r_start);
                }
                if (out_end)
                {
                    *out_end = min(m_end, r_end);
                }
                if (out_nid)
                {
                    *out_nid = m_nid;
                }
                if (m_start >= r_start)
                {
                    idx_a--;
                }
                else
                {
                    idx_b--;
                }
                *idx = (u32) idx_a | (u64) idx_b << 32;
                return;
            }
            
        }
    }
    *idx = ULLONG_MAX;
}

#define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
    p_start, p_end, p_nid)\
    for (i = (u64) ULLONG_MAX, __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
        p_start, p_end, p_nid); \
        i != (u64) ULLONG_MAX; \
        __next_mem_range_rev(&i, nid, flags, type_a, type_b, p_start, p_end, p_nid))

#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, p_nid) \
    for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved,\
    nid, flags, p_start, p_end, p_nid)

static phys_addr_t __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
    phys_addr_t size, phys_addr_t align, int nid, enum memblock_flags flags)
{
    phys_addr_t this_start, this_end, cand;
    u64 i = ULLONG_MAX;

    for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end, NULL)
    {
        this_start = clamp(this_start, start, end);
        this_end = clamp(this_end, start, end);
        if (this_end < size)
        {
            continue;
        }
        
        cand = round_down(this_end - size, align);
        if (cand >= this_start)
        {
            return cand;
        }
        
    }
    return 0;
}

static phys_addr_t memblock_find_in_range_node(phys_addr_t size,
    phys_addr_t align, phys_addr_t start, phys_addr_t end, 
    int nid, enum memblock_flags flags)
{
    if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
      end == MEMBLOCK_ALLOC_KASAN)
    {
        end = memblock.current_limit;
    }

    start = max_t(phys_addr_t, start, PAGE_SIZE);
    end = max(start, end);

    if (memblock_bottom_up())
    {
        return __memblock_find_range_bottom_up(start, end, size, align, nid, flags);
    }
    else
    {
        return __memblock_find_range_top_down(start, end, size, align, nid, flags);
    }
}

phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
    phys_addr_t size, phys_addr_t align)
{
    phys_addr_t ret;
    enum memblock_flags flags = choose_memblock_flags();
again:
    ret = memblock_find_in_range_node(size, align, start, end, NUMA_NO_NODE, flags);
    if (!ret && (flags & MEMBLOCK_MIRROR))
    {
        flags &= ~MEMBLOCK_MIRROR;
        goto again;
    }

    return ret;
}

static int memblock_double_array(struct memblock_type* type,
    phys_addr_t new_area_start, phys_addr_t new_area_size)
{
    return 0;
}

static int memblock_isolate_range(struct memblock_type * type,
    phys_addr_t base, phys_addr_t size,
    int* start_rgn, int* end_rgn)
{
    phys_addr_t end = base + memblock_cap_size(base, &size);
    int idx;
    struct memblock_region* rgn;
    *start_rgn = *end_rgn = 0;
    if (!size)
    {
        return 0;
    }
    while (type->cnt + 2 > type->max)
    {
        if (memblock_double_array(type, base, size) < 0)
        {
            return -1;
        }
    }
    for (idx = 0, rgn = &type->regions[0]; idx < type->cnt; idx++, rgn = &type->regions[idx]) 
    {
        phys_addr_t rbase = rgn->base;
        phys_addr_t rend = rbase + rgn->size;

        if (rbase >= end)
        {
            break;
        }
        if (rend <= base)
        {
            continue;
        }
        
        if (rbase < base)
        {
            rgn->base = base;
            rgn->size -= base - rbase;
            type->total_size -= base - rbase;
            memblock_insert_region(type, idx, rbase, base - rbase, 
                memblock_get_region_node(rgn), rgn->flags);
        } 
        else if (rend > end)
        {
            rgn->base = end;
            rgn->size -= end - rbase;
            type->total_size -= end - rbase;
            memblock_insert_region(type, idx--, rbase, end - rbase, 
                memblock_get_region_node(rgn),
                rgn->flags);
        }
        else 
        {
            if (!*end_rgn)
            {
                *start_rgn = idx;
            }
            *end_rgn = idx;
        }
        
        
    }

    return 0;
}

static void memblock_remove_region(struct memblock_type* type, unsigned long r)
{
    type->total_size -= type->regions[r].size;
    memmove(&type->regions[r], &type->regions[r + 1], (type->cnt - (r + 1)) * sizeof(type->regions[r]));
    type->cnt--;

    if (type->cnt == 0)
    {
        type->cnt = 1;
        type->regions[0].base = 0;
        type->regions[0].size = 0;
        type->regions[0].flags = 0;
        memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
    }
    
}

static int memblock_remove_range(struct memblock_type* type,
    phys_addr_t base, phys_addr_t size)
{
    int start_rgn, end_rgn;
    int i, ret;

    ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
    if (ret)
    {
        return ret;
    }
    
    for ( i = end_rgn - 1; i >= start_rgn; i--)
    {
        memblock_remove_region(type, i);
    }
    return 0;
}

int memblock_free(phys_addr_t base, phys_addr_t size)
{
    return memblock_remove_range(&memblock.reserved, base, size);
}

void memblock_test()
{
    memblock_memory_print();
    memblock_reserved_print();

    phys_addr_t mem = memblock_find_in_range(0, 1 << 20, 0x1000, PAGE_SIZE);
    memblock_reserve(mem, 0x1000);

    mem = memblock_find_in_range(0x100000, 0x200000, 0x1000, PAGE_SIZE);
    memblock_reserve(mem, 0x1000);

    memblock_reserved_print();

    {
        memblock_free(mem, 0x1000);
        memblock_add(mem, 0x1000);

        memblock_memory_print();
        memblock_reserved_print();
    }
}