#include "scheduler.h"

// scheduling context: threads are assigned to LWPs
utContext_t utArray[MAX_LWPS][MAX_THREADS];
// initialize thread ids
int utNum[MAX_LWPS] = { 0, 0};
// initialize current thread: first user space thread in first LWP
int utCurrent[MAX_LWPS] = { 0, 0};
// array of LWPs running
pthread_t lwpIdx2Id[MAX_LWPS];

// environment for setjmp()/longjmp() for each LWP
jmp_buf utSchedEnv[MAX_LWPS];

// give CPU to next user space thread
void utYield(int lwpIdx) {

    if ( !setjmp(utArray[lwpIdx][utCurrent[lwpIdx]].env) ) 
            longjmp(utSchedEnv[lwpIdx],1);
}

// schedule user space threads
void *utSchedule(void *pData) {

    int i=0;
    utContext_t *c;
    unsigned long myCpu;
    pid_t pid;

    // get id of LPW
    pthread_t pthreadId = pthread_self();

    // indicate user space threads to run
    int haveUt = 1;

    // get current data to handle user threads in correct LWP
    // after that free pointer from calling function
    int lwpIdx = *(int *)pData;
    free(pData);

    // register pthread LWP identification under
    // this LWP's index:
    lwpIdx2Id[lwpIdx] = pthreadId;
    pid = getpid();

#ifdef _PRINT
    printf("LWP %d starts with pthread-id 0x%08X, PID 0x%08X\n",
           lwpIdx,pthreadId,pid);
#endif

    // for multiprocessor: gives cpus on which a process (or LWP)
    // may run
    sched_getaffinity(0,sizeof(unsigned long),&myCpu);

#ifdef _PRINT
    printf("Affinitmask is 0x%0X\n",myCpu);fflush(NULL);
#endif

    // set affinity mask: LWPS can run on different CPUs, user space
    // threads not!
    // if the CPU doesn't exist, nothing happens
    myCpu = 1 << lwpIdx;
    if ( sched_setaffinity(0,sizeof(unsigned long),&myCpu) ) {
	perror("sched_setaffinity");
    }

    // are there user space threads to schedule?
    // if not, return
    if ( utNum <= 0 ) return;

    // as long as there are threads to schedule
    while ( haveUt ) {

	// set termination flag to zero to finish scheduling if no
	// thread sets the flag back to 1
	haveUt = 0;

	// loop through all threads for the current LWP
	for ( utCurrent[lwpIdx]=0; 
              utCurrent[lwpIdx] < utNum[lwpIdx]; 
              utCurrent[lwpIdx]++ ) {

	    // get thread information
	    c = &(utArray[lwpIdx][utCurrent[lwpIdx]]);

	    // if thread is running
	    if ( c->running ) {

		// signal that scheduling is still performed
		haveUt = 1;

#ifdef _PRINT
		printf("Schedule %d\n",
                ((utInputData_t *)
                 (utArray[lwpIdx][utCurrent[lwpIdx]].arg))->utId);
#endif
		
		//schedule next thread
		if ( !setjmp(utSchedEnv[lwpIdx]) ) 
                    longjmp(utArray[lwpIdx][utCurrent[lwpIdx]].env,1);

	    }
	}
    }
}

// wrapper for starting user space thread
void utWrapper(void) {

    // get index of corresponding LWP 
    int lwpIdx = lwpId2Idx();

    // get context for thread
    utContext_t *c = &(utArray[lwpIdx][utCurrent[lwpIdx]]);

    // start function
    c->func(c->arg);

    // if the function has terminated, we are at this point 
    // set flag that indicates that function has terminated
    c->running = 0;

    // function has finished, continue with the scheduler
    if ( !setjmp(c->env) ) longjmp(utSchedEnv[lwpIdx],1);
}

// prepare context; call before scheduler is started
void utMakeContext(utContext_t *uctx, 
                   int (*func)(void *), 
                   int sk_size,
                   void *arg) {

    void *stackPtr;

    if ( !uctx || !func ) return;

    // get thread function & parameters
    // thread is running, a stack is allocated
    uctx->func = func;
    uctx->arg = arg;
    uctx->running = 1;
    stackPtr = calloc(1,sk_size);

    // the following is now prepared (dependent on the environment)
    // 1) the instruction pointer is set to the wrapper function, here
    //    starts and ends our thread
    // 2) the stack base pointer is set to the beginning of our stack
    // 3) the stack pointer is set to the top of the stack
    // 4) the top of the stack is the old instruction pointer
    //    when a function returns, the program is going back to this address
    //    as the wrapper handles the termination of our threads, we set
    //    this to 0

#if defined(__linux__) && defined(__i386__)
    uctx->env[0].__jmpbuf[5] = (int)utWrapper; /* EIP */// Instrucktion Pointer
    PTR_MANGLE(uctx->env[0].__jmpbuf[5]);
    uctx->env[0].__jmpbuf[3] = 0; /* EBP */// Base Pointer
    PTR_MANGLE(uctx->env[0].__jmpbuf[3]);
    uctx->env[0].__jmpbuf[4] = (int)stackPtr + sk_size - sizeof(int32_t); /* ESP *///Stacke Pointer
    *((void**)uctx->env[0].__jmpbuf[4]) = (void *)0; /* old EIP */
    PTR_MANGLE(uctx->env[0].__jmpbuf[4]);
#elif defined(__linux__) && defined(__x86_64__)
    uctx->env[0].__jmpbuf[7] = (long int)utWrapper; /* RIP */
    PTR_MANGLE(uctx->env[0].__jmpbuf[7]);
    uctx->env[0].__jmpbuf[1] = 0; /* RBP */
    PTR_MANGLE(uctx->env[0].__jmpbuf[1]);
    uctx->env[0].__jmpbuf[6] = (long int)stackPtr + sk_size - sizeof(int64_t); /* RSP */
    *((void**)uctx->env[0].__jmpbuf[6]) = (void *)0; /* old RIP */
    PTR_MANGLE(uctx->env[0].__jmpbuf[6]);
#elif defined(__linux__) && defined(__PPC__)
    uctx->env[0].__jmpbuf[2] = (int)utWrapper; /* LR */
    uctx->env[0].__jmpbuf[0] = (int)stackPtr + sk_size - 32 * sizeof(int); /* SP */
    *((int *)uctx->env[0].__jmpbuf[0]) = 0; /* old SP */
    *((int *)uctx->env[0].__jmpbuf[0] + sizeof(int)) = 0; /* old LR */
    PTR_MANGLE(uctx->env[0].__jmpbuf[0]);
    PTR_MANGLE(uctx->env[0].__jmpbuf[2]);
#elif defined(__MACH__) && defined(__ppc__)   /* ppc32-osx */
    uctx->env[0] = ((int)stackPtr + sk_size - 32 * sizeof(int)); /* SP */
    *((int *)uctx->env[0]) = 0; /* old SP */
    *((int *)uctx->env[0] + sizeof(int)) = 0; /* old LR */
    uctx->env[21] = (int)utWrapper; /* LR */
#elif defined(__MACH__) && defined(__i386__)   /* i386-osx */
    uctx->env[12] = (int)utWrapper; /* EIP */
    uctx->env[8] = 0; /* EBP */
    uctx->env[9] = (int)stackPtr + sk_size - sizeof(int32_t); /* ESP */
    *((void**)uctx->env[9]) = (void *)0; /* old EIP */
#endif 

}



