
#include <align.h>
#include <as.h>
#include <fibril.h>

#include "runtime_types.h"
#include "type.h"
#include "arch.h"
#include "lock.h"
#include "panic.h"
#include "util.h"
#include "print.h"

#include "mem_size.h"
#include "mem_sys.h"
#include "mem_fixalloc.h"
#include "mem_heap.h"
#include "mem_central.h"
#include "mem_cache.h"
#include "mem_alloc.h"
#include "mem_stats.h"
#include "mem_gc.h"

// Protected by runtime_mheap's lock.
static FixAlloc mcachealloc;

static int mcache_tls_index;

static MCache*
runtime_allocmcache(void)
{
	/*int32 rate;*/
	MCache *c;

	runtime_lock(&runtime_mheap);
	if (mcachealloc.size == 0) {
		runtime_FixAlloc_Init(&mcachealloc, sizeof(MCache), runtime_sys_alloc, NULL, NULL);
	}

	c = runtime_FixAlloc_Alloc(&mcachealloc);
	mstats.mcache_inuse = mcachealloc.inuse;
	mstats.mcache_sys = mcachealloc.sys;
	runtime_unlock(&runtime_mheap);

/* TODO
	// Set first allocation sample size.
	rate = runtime_MemProfileRate;
	if(rate > 0x3fffffff)	// make 2*rate not overflow
		rate = 0x3fffffff;
	if(rate != 0)
		c->next_sample = runtime_fastrand1() % (2*rate);
*/
	return c;
}

// This function must be reentrant, and the returned cache must be safe to use without any locking.
MCache *
runtime_mcache(void)
{
	MCache *c = fibril_get_tls(mcache_tls_index);
	if (c == NULL) {
		c = runtime_allocmcache();
		if (fibril_set_tls(mcache_tls_index, c) != 0) {
			runtime_throw("runtime_mcache: Out of memory\n");
			// Not reached.
		}
	}
	return c;
}

extern uintptr runtime_sizeof_C_MStats __asm__("runtime.Sizeof_C_MStats");

void runtime_mallocinit()
{
	runtime_sizeof_C_MStats = sizeof(MStats);
	runtime_InitSizes();

	// Initialize the heap.
	runtime_MHeap_Init(&runtime_mheap);

	// Initialize the central free lists.
	for(unsigned i = 0; i < nelem(runtime_mcentral); i++)
		runtime_MCentral_Init(&runtime_mcentral[i], i);

	mcache_tls_index = fibril_alloc_tls();

	// See if it works.
	runtime_free(runtime_malloc(1));
}

void runtime_enable_gc()
{
	mstats.enablegc = 1;
}

void runtime_disable_gc()
{
	mstats.enablegc = 0;
}

// Allocate an object of at least size bytes.
// Small objects are allocated from the per-thread cache's free lists.
// Large objects (> 32 kB) are allocated straight from the heap.
void*
runtime_mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed)
{
	/*
	M *m;
	G *g;
	int32 rate;
	*/
	int32 sizeclass;
	MCache *c;
	uintptr npages;
	MSpan *s;
	void *v;

	/*
	m = runtime_m();
	g = runtime_g();
	if(g->status == Gsyscall)
		dogc = 0;
	if(runtime_gcwaiting && g != m->g0 && m->locks == 0 && g->status != Gsyscall) {
		runtime_gosched();
		m = runtime_m();
	}
	if(m->mallocing)
		runtime_throw("malloc/free - deadlock");
	m->mallocing = 1;
	*/
	if(size == 0)
		size = 1;

	c = runtime_mcache();
	c->local_nmalloc++;
	if(size <= MaxSmallSize) {
		// Allocate from mcache free lists.
		sizeclass = runtime_SizeToClass(size);
		size = runtime_class_to_size[sizeclass];
		v = runtime_MCache_Alloc(c, sizeclass, size, zeroed);
		if(v == NULL)
			runtime_throw("out of memory");
		c->local_alloc += size;
		c->local_total_alloc += size;
		c->local_by_size[sizeclass].nmalloc++;
	} else {
		// TODO(rsc): Report tracebacks for very large allocations.

		// Allocate directly from heap.
		size = ALIGN_UP(size, PAGE_SIZE);
		npages = size / PAGE_SIZE;
		s = runtime_MHeap_Alloc(&runtime_mheap, npages, 0, 1);
		if(s == NULL)
			runtime_throw("out of memory");
		c->local_alloc += size;
		c->local_total_alloc += size;
		v = (void*)(s->start * PAGE_SIZE);

		// setup for mark sweep
		runtime_markspan(&runtime_mheap, v, 0, 0, true);
	}
	if(!(flag & FlagNoGC))
		runtime_markallocated(&runtime_mheap, v, size, (flag&FlagNoPointers) != 0, true);

	/*
	m->mallocing = 0;

	if(!(flag & FlagNoProfiling) && (rate = runtime_MemProfileRate) > 0) {
		if(size >= (uint32) rate)
			goto profile;
		if((uint32) m->mcache->next_sample > size)
			m->mcache->next_sample -= size;
		else {
			// pick next profile time
			// If you change this, also change allocmcache.
			if(rate > 0x3fffffff)	// make 2*rate not overflow
				rate = 0x3fffffff;
			m->mcache->next_sample = runtime_fastrand1() % (2*rate);
		profile:
			runtime_setblockspecial(v, true);
			runtime_MProf_Malloc(v, size);
		}
	}

	*/

	if(dogc && mstats.heap_alloc >= mstats.next_gc) {
		runtime_gc(0);
	}
	return v;
}

void*
runtime_malloc(uintptr size)
{
	return runtime_mallocgc(size, 0, 1, 1);
}

void
runtime_free(void *v) {
	/*
	M *m;
	uint32 prof;
	*/
	int32 sizeclass;
	MSpan *s;
	MCache *c;
	uintptr size;

	if(v == NULL)
		return;

	// If you change this also change mgc0.c:/^sweep,
	// which has a copy of the guts of free.

	/*
	m = runtime_m();
	if(m->mallocing)
		runtime_throw("malloc/free - deadlock");
	m->mallocing = 1;
	*/

	if(!runtime_mlookup(&runtime_mheap, v, NULL, NULL, &s)) {
		runtime_printf("free %p: not an allocated block\n", v);
		runtime_throw("free runtime_mlookup");
	}
	/*
	prof = runtime_blockspecial(v);
	*/
	
	// Find size class for v.
	sizeclass = s->sizeclass;
	c = runtime_mcache();
	if(sizeclass == 0) {
		// Large object.
		size = s->npages * PAGE_SIZE;
		*(uintptr*)(s->start * PAGE_SIZE) = 1;	// mark as "needs to be zeroed"
		// Must mark v freed before calling unmarkspan and MHeap_Free:
		// they might coalesce v into other spans and change the bitmap further.
		runtime_markfreed(&runtime_mheap, v, size, true);
		runtime_unmarkspan(&runtime_mheap, v, PAGE_SIZE);
		runtime_MHeap_Free(&runtime_mheap, s, 1);
	} else {
		// Small object.
		size = runtime_class_to_size[sizeclass];
		if(size > sizeof(uintptr))
			((uintptr*)v)[1] = 1;	// mark as "needs to be zeroed"
		// Must mark v freed before calling MCache_Free:
		// it might coalesce v and other blocks into a bigger span
		// and change the bitmap further.
		runtime_markfreed(&runtime_mheap, v, size, true);
		c->local_by_size[sizeclass].nfree++;
		runtime_MCache_Free(c, v, sizeclass, size);
	}
	c->local_nfree++;
	c->local_alloc -= size;
	/*
	if(prof)
		runtime_MProf_Free(v, size);
	m->mallocing = 0;
	*/
}


// Go called functions.
extern uint8 *runtime_new(Type *typ) __asm__("runtime.new");
extern void runtime_GC(void) __asm__("runtime.GC");

uint8 *
runtime_new(Type *typ) {
	uint32 flag = typ->__code&GO_NO_POINTERS ? FlagNoPointers : 0;
	return runtime_mallocgc(typ->__size, flag, 1, 1);
}

void
runtime_GC() {
	runtime_gc(1);
}
