// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

#include <align.h>
#include <stdlib.h>
#include <unistd.h>
#include <fibril.h>
#include <futex.h>
#include <async.h>

#include "runtime_types.h"
#include "defer.h"
#include "print.h"
#include "util.h"
#include "panic.h"

#include "mem_alloc.h"
#include "mem_sys.h"
#include "proc.h"

extern void runtime_Goexit (void) asm ("runtime.Goexit");
extern void runtime_Gosched (void) asm ("runtime.Gosched");


static G *all_list = NULL;
static G *free_list = NULL;
static futex_t alloc_futex = FUTEX_INITIALIZER;

static int goroutines = 0;
static int last_goid = 0;

static void gcount_inc()
{
	__atomic_add_fetch(&goroutines, 1, __ATOMIC_SEQ_CST);
}

static void gcount_dec()
{
	__atomic_sub_fetch(&goroutines, 1, __ATOMIC_SEQ_CST);
}

int32
runtime_gcount()
{
	return goroutines;
}

static int next_goid()
{
	return __atomic_add_fetch(&last_goid, 1, __ATOMIC_SEQ_CST);
}

// A simple custom allocator for goroutine objects.
static G *goinfo_alloc() {
	static void *mem_area = NULL;
	static unsigned mem_remaining = 0;

	static const uintptr GSIZE = ALIGN_UP(sizeof(G), sizeof(void *)); 

	// TODO: lock-free stack
	futex_down(&alloc_futex);

	G *new;

	// First try to use free_list.
	if (free_list != NULL) {
		new = free_list;
		free_list = new->free_link;
		new->free_link = NULL;
	} else {
		// No free objects in the list. Allocate new.
		if (mem_remaining < GSIZE) {
			// Refill.
			mem_area = runtime_sys_alloc(PAGE_SIZE);
			// FIXME: handle failure
			mem_remaining = PAGE_SIZE;
		}

		new = (G *)mem_area;
		mem_area = (void *)(new + 1);
		mem_remaining -= GSIZE;

		__builtin_memset(new, 0, GSIZE);
		new->all_link = all_list;
		all_list = new;
		new->status = Gdead;
	}

	futex_up(&alloc_futex);
	gcount_inc();
	new->goid = next_goid();
	return new;
}

static void goinfo_free(G *old) {
	old->status = Gdead;
	gcount_dec();

	// Just puts the object in a free list.
	futex_down(&alloc_futex);
	old->free_link = free_list;
	free_list = old;
	futex_up(&alloc_futex);
}

G *runtime_allg() {
	futex_down(&alloc_futex);
	G *all = all_list;
	futex_up(&alloc_futex);
	return all;
}



extern void runtime_main (void);

static fibril_local G *__g;

/*
void runtime_enable_multithread(void)
{
	// TODO: support for multiple threads
}

void runtime_enable_gc(void)
{
	// TODO
	//mstats.enablegc = 1;
}
*/

// We can not always refer to the TLS variables directly.  The
// compiler will call tls_get_addr to get the address of the variable,
// and it may hold it in a register across a call to schedule.  When
// we get back from the call we may be running in a different thread,
// in which case the register now points to the TLS variable for a
// different thread.  We use non-inlinable functions to avoid this
// when necessary.

G* runtime_g(void) __attribute__ ((noinline, no_split_stack));

G*
runtime_g(void)
{
	assert(__g != NULL);
	return __g;
}

static uintptr checkfunc(void) __attribute__((noinline));

uintptr checkfunc(void) {
	void *arg = NULL;
	uintptr ret = (uintptr)&arg;
	return ret;
}

uintptr runtime_gstack_start(G *pg)
{
	assert(pg != NULL);
	assert(pg->fib != NULL);

	enum { UNKNOWN, UP, DOWN };
	static int stack_grows = UNKNOWN;

	if (stack_grows == UNKNOWN) {
		// Check which way the stack grows.
		if (checkfunc() > (uintptr)&pg) {
			stack_grows = UP;
		} else {
			stack_grows = DOWN;
		}
	}

	// FIXME: Deal with IA64's weird stack.

	if (stack_grows == UP) {
		return (uintptr)pg->fib->stack;
	} else {
		return (uintptr)pg->fib->stack + pg->fib->stack_size;
	}
}

uintptr runtime_gstack_last(G *pg)
{
	assert(pg != NULL);
	assert(pg->fib != NULL);

	return (uintptr)pg->fib->ctx.sp;
}

// Mark g ready to run.
void
runtime_ready(G *g)
{
	fibril_add_ready((fid_t)g->fib);
}

/*
int32
runtime_helpgc(bool *extra)
{
	M *mp;
	int32 n, max;

	// Figure out how many CPUs to use.
	// Limited by gomaxprocs, number of actual CPUs, and MaxGcproc.
	max = runtime_gomaxprocs;
	if(max > runtime_ncpu)
		max = runtime_ncpu > 0 ? runtime_ncpu : 1;
	if(max > MaxGcproc)
		max = MaxGcproc;

	// We're going to use one CPU no matter what.
	// Figure out the max number of additional CPUs.
	max--;

	runtime_lock(&runtime_sched);
	n = 0;
	while(n < max && (mp = mget(nil)) != nil) {
		n++;
		mp->helpgc = 1;
		mp->waitnextg = 0;
		runtime_notewakeup(&mp->havenextg);
	}
	runtime_unlock(&runtime_sched);
	if(extra)
		*extra = n != max;
	if (extra)
		*extra = false;
	return 0;
}
*/

// Enter scheduler.  If g->status is Grunning,
// re-queues g and runs everyone else who is waiting
// before running g again.
void
runtime_gosched(void)
{
	G *g = runtime_g();

	switch(g->status) {
	case Grunning:
		fibril_switch(FIBRIL_PREEMPT);
		break;
	case Gwaiting:
		futex_down(&async_futex);
		fibril_switch(FIBRIL_TO_MANAGER);
		// Futex is not held on return.
		break;
	default:
		runtime_printf("Status: %d\n", g->status);
		runtime_throw("Invalid status in gosched().");
	}

	g->status = Grunning;
}

_Bool runtime_fibril_setup() {
	// libc ensures that uninitialized fibril-local memory is zeroed out.
	if (__g != NULL) {
		return 0;
	}

	__g = goinfo_alloc();
	__g->status = Grunning;
	__g->fib = (fibril_t*)fibril_get_id();
	return 1;
}

void runtime_fibril_teardown() {
	__g->status = Gdead;
	__g->fib = NULL;
	__g->defer = NULL;
	goinfo_free(__g);
	__g = NULL;
}

// First function run by a new goroutine/fibril.
static int fibril_main(G* newg) {
	__g = newg;

	void (*fn)(void*);
	fn = (void (*)(void*))(newg->entry);
	fn(newg->param);
	runtime_Goexit();

	runtime_throw("fibril_main: unreachable reached");
}

G*
runtime_grun(void (*fn)(void*), uintptr retaddr, void* arg, uintptr stack_size)
{
	assert(fn != NULL);

	G *newg = goinfo_alloc();
	assert(newg != NULL);

	newg->entry = (void *)fn;
	newg->param = arg;
	newg->gopc = retaddr == 0 ? (uintptr)__builtin_return_address(0) : retaddr;
	newg->status = Grunning;

	fid_t fib = fibril_create_generic((int(*)(void*))fibril_main, newg, stack_size);
	if (fib == 0) {
		runtime_throw("__go_go: unable to create a new fibril");
	}

	newg->fib = (fibril_t *)fib;
	fibril_add_ready(fib);
	return newg;
}

G*
__go_go(void (*fn)(void*), void* arg)
{
	return runtime_grun(fn, (uintptr)__builtin_return_address(0), arg, 0);
}

// Run all deferred functions for the current goroutine.
static void
rundefer(void)
{
	Defer *nextd = NULL;
	Defer *d = NULL;

	{
		G *g = runtime_g();
		d = g->defer;
		g->defer = NULL;
	}

	while (d != nil) {
		void (*pfn)(void*);

		pfn = d->__pfn;
		d->__pfn = nil;
		if (pfn != nil)
			(*pfn)(d->__arg);

		nextd = d->__next;
		runtime_free(d);
		d = nextd;
	}
}

void
runtime_Goexit(void)
{
	rundefer();
	runtime_fibril_teardown();
	fibril_switch(FIBRIL_FROM_DEAD);
}

void
runtime_Gosched(void)
{
	runtime_gosched();
}

void
runtime_LockOSThread(void)
{
	// TODO
}

void
runtime_UnlockOSThread(void)
{
	// TODO
}

bool
runtime_lockedOSThread(void)
{
	// TODO
	return false;
}

// for testing of callbacks

_Bool runtime_golockedOSThread(void)
  asm("runtime.golockedOSThread");

_Bool
runtime_golockedOSThread(void)
{
	return runtime_lockedOSThread();
}

void runtime_lock_world()
{
	// TODO
}

bool runtime_stop_world()
{
	// TODO

	fibril_enter_critical();
	return true;
}

void runtime_unlock_world()
{
	// TODO
}

void runtime_start_world()
{
	fibril_exit_critical();
	// TODO
}
