#include <as.h>

#include "runtime_types.h"
#include "arch.h"
#include "proc.h"
#include "lock.h"
#include "util.h"
#include "print.h"
#include "panic.h"
#include "atomic.h"
#include "assert.h"
#include "time.h"

#include "mem_sys.h"
#include "mem_fixalloc.h"
#include "mem_size.h"
#include "mem_stats.h"
#include "mem_heap.h"
#include "mem_cache.h"
#include "mem_gc.h"
#include "mem_final.h"

static uintptr *finalizer_stack = NULL; 

static bool
ptr_stack_empty(void *stack)
{
	uintptr *ptr_stack = *(uintptr **)stack;

	if (ptr_stack == NULL) {
		return true;
	}

	ptr_stack--;

	if ((((uintptr)ptr_stack) % PAGE_SIZE) == 0 && (*ptr_stack == 0)) {
		return true;
	}
	return false;
}

// This is a simple stack of pointers. It is stored in memory as a linked list of page sized blocks.
// The first (last) word on each page points to the last (first) word of the previous (next) page.
static void
ptr_push(void *stack, uintptr val)
{
	uintptr *ptr_stack = *(uintptr **)stack;

	if (ptr_stack == NULL) {
		uintptr *new = runtime_sys_alloc(PAGE_SIZE);
		if (new == NULL) {
			runtime_throw("GC0: out of memory");
		}
		assert((uintptr)new % PAGE_SIZE == 0);

		// Set the first and last pointer to NULL.
		*new = 0;
		*((uintptr *)(((byte *)new) + PAGE_SIZE) - 1) = 0;

		ptr_stack = new + 1;
	}

	if (((uintptr)(ptr_stack + 1)) % PAGE_SIZE == 0) {
		// Last pointer on the page, points to the next page (if present).

		if (*ptr_stack == 0) {
			// Allocate a new page.
			// TODO: Try to extend an existing area first.

			uintptr *new = runtime_sys_alloc(PAGE_SIZE);
			if (new == NULL) {
				runtime_throw("GC0: out of memory");
			}
			assert((uintptr)new % PAGE_SIZE == 0);

			*new = (uintptr)ptr_stack;
			*((uintptr *)(((byte *)new) + PAGE_SIZE) - 1) = 0;
			*ptr_stack = (uintptr)new;
		}

		ptr_stack = (uintptr *)*ptr_stack + 1;
	}

	*ptr_stack = val;
	ptr_stack++;

	*(uintptr **)stack = ptr_stack;
}

static uintptr
ptr_pop(void *stack)
{
	uintptr *ptr_stack = *(uintptr **)stack;

	if (ptr_stack == NULL) {
		return 0;
	}

	ptr_stack--;

	if (((uintptr)ptr_stack) % PAGE_SIZE == 0) {
		// First pointer on the page, points to the previous page.
		if (*ptr_stack == 0) {
			return 0;
		}
		ptr_stack = (uintptr *)*ptr_stack - 1;
	}

	*(uintptr **)stack = ptr_stack;
	return *ptr_stack;
}

static void
scan_finalizers(void (*scan)(byte*, uintptr)) {
	uintptr *stack = finalizer_stack;
	while (stack != NULL) {
		uintptr size = (uintptr)stack % PAGE_SIZE;
		scan(((byte*)stack) - size + sizeof(void *), size - sizeof(void *));

		// Move to previous page.
		stack = (uintptr *)(((uintptr)stack / PAGE_SIZE) * PAGE_SIZE);
		stack = (uintptr *)*stack;
	}
}

// A simple, single-threaded, iterative implementation.
static void
scanblock0(byte *b, uintptr n)
{
	assert(b != NULL);
	assert(n != 0);

	byte *obj, *p;
	byte **vp;
	uintptr size;
	MSpan *s;

	// Stack of blocks to scan.
	// Static because it reuses allocates pages without ever freeing them.
	static void *stack = NULL;

	assert(ptr_stack_empty(&stack));

	ptr_push(&stack, (uintptr)b);
	ptr_push(&stack, n);

	for (;;) {
		n = ptr_pop(&stack);
		b = (byte *)ptr_pop(&stack);

		if (b == NULL) {
			if (n != 0) {
				runtime_throw("scanblock0: bad stack");
			}

			assert(ptr_stack_empty(&stack));
			// No more objects.
			break;
		}

		// Align b up to a word boundary.
		uintptr off = (uintptr)b % sizeof(void *);
		if (off != 0) {
			b += sizeof(void *) - off;
			n -= sizeof(void *) - off;
		}

		vp = (byte **)b;
		n /= sizeof(void *);

		for (uintptr i = 0; i < n; i++) {
			obj = vp[i];

			// Words outside the arena cannot be pointers.
			if (obj < runtime_mheap.arena_start || obj >= runtime_mheap.arena_used)
				continue;

			// Round down to word boundary.
			obj = (byte *)(((uintptr)obj / sizeof(void *)) * sizeof(void *));

			// TODO: Check that we are not already at the beginning of the block.

			// Consult span table to find beginning.
			s = runtime_MHeap_LookupMaybe(&runtime_mheap, obj);
			if (s == nil)
				continue;

			p = (byte *)((uintptr)s->start * PAGE_SIZE);
			if (s->sizeclass == 0) {
				obj = p;
				size = (uintptr)s->npages * PAGE_SIZE;
			} else {
				if((byte*)obj >= (byte *)s->limit)
					continue;
				size = runtime_class_to_size[s->sizeclass];
				obj = p + (((byte *)obj - p) / size) * size;
			}

			MBits bits;
			runtime_bits_init(&bits, &runtime_mheap, obj);

			if (!runtime_bits_read(&bits, bitAllocated) || runtime_bits_read(&bits, bitMarked)) {
				continue;
			}

			runtime_bits_set(&bits, bitMarked, false);

			// If object has no pointers, don't need to scan further.
			if (runtime_bits_read(&bits, bitNoPointers)) {
				continue;
			}

			assert(obj != NULL);
			assert(size != 0);

			ptr_push(&stack, (uintptr)obj);
			ptr_push(&stack, size);
		}
	}

	assert(ptr_stack_empty(&stack));
}

// Markfin calls scanblock on the blocks that have finalizers:
// the things pointed at cannot be freed until the finalizers have run.
static void
markfin(void *v)
{
	assert(v != NULL);

	uintptr size;

	size = 0;
	if (!runtime_mlookup(&runtime_mheap, v, (byte**)&v, &size, nil) || !runtime_blockspecial(&runtime_mheap, v))
		runtime_throw("mark - finalizer inconsistency");

	// do not mark the finalizer block itself.  just mark the things it points at.
	scanblock0(v, size);
}

// Mark
static void
mark(void (*scan)(byte*, uintptr))
{
	// mark data+bss.
	runtime_scan_roots(scan);
	// mark data and stacks in goroutine info structures.
	for (G* g = runtime_allg(); g != NULL; g = g->all_link) {
		scan((byte*)g, sizeof(G));
	}
	runtime_scan_stacks(scan);
	/* TODO
	runtime_MProf_Mark(scan);
	*/

	// mark things pointed at by objects with finalizers
	runtime_walkfintab(markfin, scan);
	scan_finalizers(scan);
}

static bool
handlespecial(byte *p, uintptr size)
{
	void (*fn)(void*);
	const struct __go_func_type *ft;

	if(!runtime_getfinalizer(p, true, &fn, &ft)) {
		runtime_setblockspecial(&runtime_mheap, p, false, false);
		/* TODO
		runtime_MProf_Free(p, size);
		*/
		return false;
	}

	ptr_push(&finalizer_stack, (uintptr)fn);
	(void)ft;
	//ptr_push(&finalizer_stack, (uintptr)ft);
	ptr_push(&finalizer_stack, (uintptr)p);
	return true;
}

static int fingwait;
static G *fing;

static void
runfinq(void* dummy __attribute__ ((unused)))
{
	G* gp;
	byte *arg;
	void (*fn)(void*);

	gp = runtime_g();
	for(;;) {
		// There's no need for a lock in this section
		// because it only conflicts with the garbage
		// collector, and the garbage collector only
		// runs when everyone else is stopped, and
		// runfinq only stops at the gosched() or
		// during the calls in the for loop.

		arg = (void *)ptr_pop(&finalizer_stack);
		fn = (void (*)(void *))ptr_pop(&finalizer_stack);

		if (fn == NULL) {
			if (arg != NULL) {
				runtime_throw("invalid finalizer stack");
			}

			// Sleep until GC makes more finalizers available.
			fingwait = 1;
			gp->status = Gwaiting;
			runtime_gosched();
			continue;
		}

		// FIXME: Check that the calling conventions do the right thing
		// when there are return values.
		fn(arg);
	}
}

// Sweep frees or collects finalizers for blocks not marked in the mark phase.
// It clears the mark bits in preparation for the next GC round.
static void
sweep(void)
{
	int32 cl, n, npages;
	uintptr size;
	byte *p;
	MCache *c;

	int64 now = 0; // FIXME: runtime_nanotime();

	for (MSpan *s = runtime_mheap.allspans; s != NULL; s = s->allnext) {

		// Stamp newly unused spans. The scavenger will use that
		// info to potentially give back some pages to the OS.
		if (s->state == MSpanFree && s->unusedsince == 0)
			s->unusedsince = now;

		if (s->state != MSpanInUse)
			continue;

		p = (byte*)(s->start * PAGE_SIZE);
		cl = s->sizeclass;
		if (cl == 0) {
			size = s->npages * PAGE_SIZE;
			n = 1;
		} else {
			// Chunk full of small blocks.
			size = runtime_class_to_size[cl];
			npages = runtime_class_to_allocnpages[cl];
			n = (npages * PAGE_SIZE) / size;
		}

		// Sweep through n objects of given size starting at p.
		// This thread owns the span now, so it can manipulate
		// the block bitmap without atomic operations.
		for(; n > 0; n--, p += size) {
			MBits bits;
			runtime_bits_init(&bits, &runtime_mheap, p);

			if (!runtime_bits_read(&bits, bitAllocated))
				continue;

			if (runtime_bits_read(&bits, bitMarked)) {
				if (DebugMark) {
					if (!runtime_bits_read(&bits, bitSpecial)) {
						runtime_printf("found spurious mark on %p\n", p);
					}
					runtime_bits_clear(&bits, bitSpecial, false);
				}
				runtime_bits_clear(&bits, bitMarked, false);
				continue;
			}

			// Special means it has a finalizer or is being profiled.
			// In DebugMark mode, the bit has been coopted so
			// we have to assume all blocks are special.
			if (DebugMark || runtime_bits_read(&bits, bitSpecial)) {
				if (handlespecial(p, size)) {
					continue;
				}
			}

			// Mark freed; restore block boundary bit.
			runtime_bits_assign(&bits, bitBlockBoundary, false);

			c = runtime_mcache();
			if(s->sizeclass == 0) {
				// Free large span.
				runtime_unmarkspan(&runtime_mheap, p, PAGE_SIZE);
				*(uintptr*)p = 1;	// needs zeroing
				runtime_MHeap_Free(&runtime_mheap, s, 1);
			} else {
				// Free small object.
				if(size > sizeof(uintptr))
					((uintptr*)p)[1] = 1;	// mark as "needs to be zeroed"
				c->local_by_size[s->sizeclass].nfree++;
				runtime_MCache_Free(c, p, s->sizeclass, size);
			}
			c->local_alloc -= size;
			c->local_nfree++;
		}
	}
}

static int32 gctrace = 1;

// Initialized from $GOGC.  GOGC=off means no gc.
//
// Next gc is after we've allocated an extra amount of
// memory proportional to the amount already in use.
// If gcpercent=100 and we're using 4M, we'll gc again
// when we get to 8M.  This keeps the gc cost in linear
// proportion to the allocation cost.  Adjusting gcpercent
// just changes the linear constant (and also the amount of
// extra memory used).
static int32 gcpercent = -2;


void
runtime_gc(int32 force)
{
	int64 t0 = 0, t1 = 0, t2 = 0, t3 = 0;
	uint64 heap0, heap1, obj0, obj1;
	const byte *p;

	// Make sure all registers are saved on stack so that
	// scanstack sees them.
	__builtin_unwind_init();

	// The gc is turned off (via enablegc) until
	// the bootstrap has completed.
	// Also, malloc gets called in the guts
	// of a number of libraries that might be
	// holding locks.  To avoid priority inversion
	// problems, don't bother trying to run gc
	// while holding a lock.  The next mallocgc
	// without a lock will do the gc instead.

	// TODO
	/* m = runtime_m(); */
	if(!mstats.enablegc || /* m->locks > 0 ||*/ runtime_panicking())
		return;

	if(gcpercent == -2) {	// first time through
		// TODO: p = runtime_getenv("GOGC");
		p = nil;
		if(p == nil || p[0] == '\0')
			gcpercent = 100;
		else if(__builtin_strcmp((const char*)p, "off") == 0)
			gcpercent = -1;
		else
			gcpercent = runtime_atoi(p);

		// p = runtime_getenv("GOGCTRACE");
		p = nil;
		if(p != nil)
			gctrace = runtime_atoi(p);
	}
	if(gcpercent < 0)
		return;

	runtime_lock_world();
	if(!force && mstats.heap_alloc < mstats.next_gc) {
		runtime_unlock_world();
		return;
	}

	// FIXME: reading time causes asynchronous IPC (and thus reschedules fibril)
	//t0 = runtime_nanotime();

	runtime_stop_world();

	runtime_flushstats();
	heap0 = mstats.heap_alloc;
	obj0 = mstats.nmalloc - mstats.nfree;

	mark(scanblock0);

	//t1 = runtime_nanotime();

	sweep();

	//t2 = runtime_nanotime();

	runtime_stealcache();
	runtime_flushstats();

	mstats.next_gc = mstats.heap_alloc + mstats.heap_alloc * gcpercent / 100;

	mstats.enablegc = 0;	// disable gc during the mallocs in newproc
	if (!ptr_stack_empty(&finalizer_stack)) {
		// kick off or wake up goroutine to run queued finalizers
		if(fing == nil) {
			fing = __go_go(runfinq, nil);
		} else if(fingwait) {
			fingwait = 0;
			runtime_ready(fing);
		}
	}
	mstats.enablegc = 1;

	runtime_flushstats();
	heap1 = mstats.heap_alloc;
	obj1 = mstats.nmalloc - mstats.nfree;

	//t3 = runtime_nanotime();
	mstats.last_gc = t3;
	mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t3 - t0;
	mstats.pause_total_ns += t3 - t0;
	mstats.numgc++;

	uint32 numgc = mstats.numgc;
	uint64 nmalloc = mstats.nmalloc, nfree = mstats.nfree;

	//runtime_MProf_GC();
	runtime_unlock_world();

	runtime_start_world();

	if (mstats.debuggc)
		runtime_printf("pause %D\n", t3-t0);

	if (gctrace) {
		runtime_printf("gc%d: %D+%D+%D ms, %D -> %D MB %D -> %D (%D-%D) objects\n",
			numgc, (t1-t0)/1000000, (t2-t1)/1000000, (t3-t2)/1000000,
			heap0>>20, heap1>>20, obj0, obj1,
			nmalloc, nfree);
	}

	// give the queued finalizers, if any, a chance to run
	if (!ptr_stack_empty(&finalizer_stack)) {
		runtime_gosched();
	}

	if (gctrace > 1 && !force) {
		runtime_gc(1);
	}
}
