/* Author: Ratish J. Punnoose, 2006
 * This file is part of TiROS, the Tickless Real-Time Operating System.
 * Copyright(c) 2006, 2007: Ratish J. Punnoose.
 * Copyright(c) 2006 Sandia Corporation. Under the terms of Contract
 * DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
 * certain rights in this software. 
 * 
 * TiROS is free software; you can redistribute it and/or modify it under
 * the terms of the GNU General Public License as published by the Free
 * Software Foundation; either version 2 or (at your option) any later version.
 *
 * TiROS is distributed in the hope that it will be useful, but WITHOUT ANY
 * WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * for more details.
 *
 * You should have received a copy of the GNU General Public License along
 * with TiROS; if not, write to the Free Software Foundation, Inc.,
 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
 *
 * As a special exception, if other files instantiate templates or use macros
 * or inline functions from this file, or you compile this file and link it
 * with other works to produce a work based on this file, this file does not
 * by itself cause the resulting work to be covered by the GNU General Public
 * License. However the source code for this file must still be made available
 * in accordance with section (3) of the GNU General Public License.
 *
 * This exception does not invalidate any other reasons why a work based on
 * this file might be covered by the GNU General Public License.
 *
 */



#define TIROS_C


#include "tr_int.h"


#ifdef TIROS_PRIO_INHERIT_PROTOCOL
#  warning "TIROS_PRIO_INHERIT_PROTOCOL is deprecated. See release."
#endif



 
/* ************************************************************/
/** \name OS_variables 
 * @{ */

/** Array of TCBs */
static struct TCB  TCBarray[TIROS_MAX_PROCS] ;





/* -------------------------------------------------- */
/* Internal taskflags used by TiROS */

/* What queue is the state in?  Bits 4,5 
 * Ready or WAIT or WAIT_Mutex.
 * Checking for TASK_WAIT should also show WAIT_MUTEX which is a
 * subset of TASK_WAIT */
/*lint -esym(750, TASK_READY) Not  used in code */
#define TASK_READY		((uint8_t)0x00) 
#define TASK_WAIT_OTHER         ((uint8_t)0x10)
#define TASK_WAIT		(TASK_WAIT_OTHER)


/* Event Flag : Bit 3 */
#define  EFLAG_OPTS		(O_EFLAG_AND | O_EFLAG_OR)


/* Flag marking forced resume : Bit 6 */
#define TASK_RESUMED		((uint8_t)0x40)

/* Flag marking forced timeout :  Bit 7 */
#define TASK_TIMEOUT		((uint8_t)0x80)

/* Using last Bit 6, Bit 7 for error notification for blocked
 * tasks that are resumed */
#define TASK_BLOCK_ERRMASK     (TASK_TIMEOUT | TASK_RESUMED)


/* -------------------------------------------------- */




/** The currently running task id. 
 * 
 * This is usually the same as the head of the ready list, except when a new
 * task has been scheduled. */
static tid_t  RUNNING_task = ILLEGAL_ELEM;
static  struct TCB* RUNNING_task_TCB = (struct TCB *) TR_ILLEGAL_ADDR;




/*lint -esym(551, ctxt_switch_cnt) ctxt_switch_cnt is exposed for
 * debugging, but may not always be accessed */
/**  Number of context switches */
static osword_t ctxt_switch_cnt = 0U;

/** Interrupt nesting level
 *   This is also used to determine that a call is made from an
 *   interrupt */
uint8_t /*@unchecked@*/ os_isr_nesting = 0U;



/* Memory Usage 
 * Assume sizeof(tid_t) = 1.
 *  sizeof(struct TCB) = sizeof(osstkptkr_t) + sizeof(tid_t) + 
 *                       sizeof(tid_t) + sizeof(uint8_t) +
 *                       sizeof(uint8_t) +  sizeof(trtime_t) + 
 *                       sizeof(osptr_t) + sizeof(flag_t) 
 *                       == approx sizeof(osstkptr_t)+ 4 + 4 +
 *  sizeof(subtime_t) + sizeof(flag_t) + sizeof(osptr_t)
 *                        
 *
 *  TCBarray: TIROS_MAX_PROCS * sizeof(struct TCB)
 * 
 *  For standard priority sort method:
 *  
 *  rdywt_list_metadata: TIROS_MAX_PROCS * sizeof(tid_t) = TIROS_MAX_PROCS
 *  lock_list_metadata: TIROS_MAX_PROCS * sizeof(tid_t)  = TIROS_MAX_PROCS
 *  readylist: sizeof(tid_t) = 1
 *  waitlist: sizeof(tid_t) = 1
 *
 *  Variables:  RUNNING_task, RUNNING_task_TCB, ctxt_switch_cnt,
 *  isr_nesting
 *         sizeof(tid_t) + sizeof(void*) + sizeof(osword_t) + sizeof(uint8_t)
 */



/** @} ---------------------------------------------------------- */





/* ******** Internal support functions ***********************/
/** @defgroup os_int Internal Support Functions 
 * @{ */



/** The currently running task's id.
 * @return Return the task id of the running task */
static inline tid_t running_task(void) /*@globals RUNNING_task;@*/
{ 
	return RUNNING_task;
}

/** The currently running task's TCB.
 * @return Return the TCB of the running task */
static inline /*@shared@*/ struct TCB* running_task_TCB(void) /*@globals RUNNING_task_TCB;@*/
{ 
	return RUNNING_task_TCB;
}

/* Include list managing routines */
#include "tr_llmgr.h"


/*lint -esym(759, osint_running_task_ctxt)  should be an external
 * interface even if unused */

/*lint -esym(765, osint_running_task_ctxt) is not marked static
 * intentionally */

/*lint -esym(714, osint_running_task_ctxt) It is ok if
 * osint_running_task_ctxt is not referenced externally */

/* Function documentation in tr_int.h.
 * This function is accessible from the hal. */
osstkptr_t osint_running_task_ctxt(void)
{
	return running_task_TCB()->ctxt_ptr;
}


/* Function documentation given in tr_int.h.
 * This function is accessible from the hal. */
osstkptr_t osint_taskswitcher( osstkptr_t old_ctxtptr)
{
	osstkptr_t  ctxtptr;

#ifdef TIROS_USER_CTXT_UPCALL
	tid_t old_task;
	old_task = RUNNING_task;
#endif

	/* Note: This is the only function where RUNNING_task and
	   RUNNING_task_TCB are accessed directly. This is the only
	   place they are changed.  Everywhere else they are accessed
	   by the functions running_task() and running_task_TCB() */

	/* If the task has been deleted then the ctxt_ptr is
	   marked as ILLEGAL_STACK.  This frees the TCB slot. */
	if (RUNNING_task_TCB->ctxt_ptr != ILLEGAL_STACK) {
		RUNNING_task_TCB->ctxt_ptr = old_ctxtptr;
	}

	ctxt_switch_cnt++;

	TR_MSG_TRACE_args('S', running_task(), RDY_list_head());


	RUNNING_task = RDY_list_head();
	/* If RUNNING_task is ILLEGAL_ELEM, there are no tasks in the
	 * ready Queue.  It is a critical error. */
	TR_ASSERTwTCB( RUNNING_task != ILLEGAL_ELEM,  "No RDY task", RUNNING_task, 1);
	RUNNING_task_TCB = &TCBarray[RUNNING_task];


	ctxtptr =  RUNNING_task_TCB->ctxt_ptr;


#ifdef TIROS_USER_CTXT_UPCALL
	user_ctxt_upcall(old_task, RUNNING_task);
#endif

	return ctxtptr; 
}




/** Perform a context switch.
 * @return The return value is the value passed to task that is
 *  consciously waiting on a system call. */
static inline /*@null@*/ /*@shared@*/ osptr_t osint_ctxt_switch(void)
{
	if (os_isr_nesting == 0) {


#if (TIROS_DEBUG_LEVEL > 0)		 
#  ifdef TIROS_STK_CHECK
		{
			struct TCB * t =  running_task_TCB();
			osword_t maxusage;
			/* Running task is going to be put on the stack */
			maxusage = hal_stkusage_max(t->ctxt_ptr,
						    t->base_stk_ptr, t->stksize);
			/* Make sure max occupancy is less than
			   available stack size */
			TR_ASSERTwTCB( (maxusage < t->stksize), 
				       "Task stack overflow", running_task(), 1);
		}
#  endif
#endif



#ifdef TIROS_REGISTER_PASSING
		TR_ASSERTwTCB( running_task() != RDY_list_head(), 
			       "Run tsk == rdy", running_task(), 1);

		return hal_ctxt_switch();
#else
		hal_ctxt_switch();
		return running_task_TCB()->paramret_val;
#endif	
	} else {
		return 0;
	}

}


/*@access mutex_t@*/
/** Reprioritize a task.
 * This function reprioritizes the task within its queue.  It is
 * assumed that all relevent checks to see if this is permissible have
 * already been completed 
 * @param task
 * @param prio */
static void osint_reprioritize(tid_t const task, tid_t const prio)
{
	plist_t *wtlist; /* Pointer to waitlist of the lock that 
			  * this task is waiting for */

	/* For reprioritizing a task, we do the following steps:
	 * 1. Remove from existing list.
	 * 2. Set new effective priority.
	 * 3. Add to new list.
	 * */
	if ( TCBarray[task].flags & TASK_WAIT) {
		/* Wait Queue itself is sorted on time and does not
		 * have to be reprioritized.  */

		/* If it is waiting on a lock, then reprioritize it */
		wtlist = (plist_t *)TCBarray[task].lock_ptr; 
		if ( wtlist != TR_ILLEGAL_ADDR) {
			/* Lock Queue */

			/* Reprioritize task */
			LK_list_rm(wtlist, task);
			TCBarray[task].eff_priority = prio;
			LK_list_add(wtlist, task);
		} else {
			/* Even if we don't resort, we still have to
			   set the priority appropriately */
			TCBarray[task].eff_priority = prio;
		}
		
	} else { 
		/* Ready Q */
		RDY_list_rm(task);
		TCBarray[task].eff_priority = prio;
		RDY_list_add(task);	       
	}
}








/** Internal OS function to wake a task due to timeout.
 * 
 * There are two ways in which a task can be timedout.
 * 1. The task is waiting on a lock and its wait timer expires.
 * 2. A task that is sleeping is explicitly woken up by a resume.
 * @param waiting_tid  Task that has to be woken up.*/
static void osint_task_timeout(tid_t waiting_tid)
{
	plist_t *wtlist; /* Pointer to waitlist of the lock that 
			  * this task is waiting for */


	TR_ASSERTwTCB( waiting_tid < TIROS_MAX_PROCS, 
		       "Illegal task in osint_task_timeout", 
		       waiting_tid,       1);

	TR_ASSERTwTCB( TCBarray[waiting_tid].ctxt_ptr !=
		       ILLEGAL_STACK, 
		       "Illegal task in osint_task_timeout",
		       waiting_tid,       1);
		       

	wtlist = (plist_t *) TCBarray[waiting_tid].lock_ptr;


	/* Is this task waiting on a lock?? */
	if (wtlist != TR_ILLEGAL_ADDR) {
		/* Remove it from the wait list of the lock. */
		LK_list_rm( wtlist, waiting_tid); 

		/* Reset the lock pointer. */
		TCBarray[waiting_tid].lock_ptr = TR_ILLEGAL_ADDR;

		/* Set state showing timeout. */
		TCBarray[waiting_tid].flags |= TASK_TIMEOUT;
	}
	/* If the task is not waiting on a lock then it has been
	   sleeping. In this case, the TASK_TIMEOUT flag should NOT be
	   set here, because the reason for the timeout is ambiguous
	   within this function scope. The task could be waking at its
	   expected time (in which case no timeout should be set).  So
	   a task that is being resumed should be so marked in the
	   os_task_resume() function */
	
	
	/* Remove task from the wait queue. */
	TCBarray[waiting_tid].flags &=  ~TASK_WAIT; 

	/* The task has already been removed from the wait Q by the
	 * calling function, so move it to ready list. */
	RDY_list_add(waiting_tid);

			
}
/*@noaccess mutex_t@*/



/** Internal OS function called to setup the hal alarm for
 * wakeup at the next event. */
static void osint_alarm_setup(void) 
{
	tid_t waiting_tid = WT_list_head();
	if (waiting_tid == ILLEGAL_ELEM) {
		/* Disable the alarm. */
		hal_alarm_set(0);
	} else {
		hal_alarm_set(&TCBarray[waiting_tid].timeout);
	}
}



/** Internal OS function that is called by the hal when a compare
 *   timer has been reached.
 *  
 *  This functiion wakes tasks that have timed out.
 * @param t   Current time value
 * @return 0 if no context switch is needed, 1 if a context switch is
 * needed after.
 * 
 */
uint8_t osint_alarm_reached(trtime_t const *t)
{
	uint8_t rc;
	tid_t waiting_tid;
	trtime_t wakeup_time = TRPORT_RESPONSE_TIME;


	time_add(t, &wakeup_time, &wakeup_time );

	waiting_tid = WT_list_head();
	/* Time = 2 */
	while( waiting_tid != ILLEGAL_ELEM) {
		if ( time_lessthan( &TCBarray[waiting_tid ].timeout,
				    &wakeup_time) ) 
		{
			/* Pop off this task from the wait list */
			(void) WT_list_pop();

			/* Time it out */
			osint_task_timeout(waiting_tid);


		} else {
			break;
		}
		waiting_tid = WT_list_head();
	}

	osint_alarm_setup();
	

	TR_MSG_TRACE_wTCB("Alarm", waiting_tid,  __LINE__);


	/* Is a context switch needed */
	if (running_task() != RDY_list_head() ) {
		rc = 1;
	} else  {
		rc = 0;
	}


	return rc;
}





/** Internal OS function to place a task on the wait queue.
 * *
 * Note: The calling function must have set any specific wait flags
 * before calling this function.
 * Note: When called, running_task may not be the readylist head.
 * @param task  Id of task.
 * @param timeout Time at which to wake up 
 * @return  Return value to be passed to the calling task or SUCCESS
 * if the calling task is not the one being put to sleep. */
static  /*@shared@*/ /*@null@*/ osptr_t 
osint_task_puttosleep(tid_t task, /*@null@*/trtime_t const *timeout, uint8_t options)
{
	static trtime_t const adv_time = TRPORT_RESPONSE_TIME;
	trtime_t *wake_time;
	trtime_t curr_time;
	tid_t old_wait_list_head;

	wake_time = &TCBarray[task].timeout;


	if (timeout) {
		/* Use advance time. */
		/* 1. Get current time */
		hal_time_get(&curr_time);
	

		/* 2. Is time specified as relative ? */
		if (options & O_RELATIVE_TIME)
			time_add(&curr_time, timeout, wake_time);
		else {
			wake_time->units = timeout->units;
			wake_time->subunits = timeout->subunits;
		}
			
		
		/* 3.  Add our wakeup response time to current time */
		time_add(&curr_time, &adv_time, &curr_time);

		/* 4. Is this less than timeout time ? 
		 * If already timedout, then do a  task  timeout. */
		if (time_lessthan(wake_time, &curr_time)) {
			osint_task_timeout(task);
			goto quit;
		}
	}  else {
		wake_time->units = ~0;
		wake_time->subunits = ~0;
	}

	old_wait_list_head = WT_list_head();
	/* Add to wait queue by timeout priority */


	/* Set the wait flag.  The calling function must clear and set
	   the other additional wait flags as needed.*/
	TCBarray[task].flags |= TASK_WAIT_OTHER;
	WT_list_add(task);
		
	/* Setup timer if the head of the wait list has changed. */
	if ( WT_list_head() != old_wait_list_head )
		osint_alarm_setup();



 quit:

	if (running_task() != RDY_list_head() ) 
		return osint_ctxt_switch();
	else
		return (osptr_t)SUCCESS;

}


/** Function to check if the current task should be allowed to block.
 * This function is called to check if blocking is allowed, before a
 * task is blocked.  It checks 
 * a) if  the current task has any mutexes held,
 * b) is the functions is being called from an ISR.
 * c) is O_NONBLOCKING specified to explicitly disallow blocks.
 * @param options The options used by the task (for O_NONBLOCKING) 
 * @return SUCCESS on allowed, ERR_WOULDBLOCK_XXX otherise. */
static int osint_allow_blocking(uint8_t options)
{
	if (options & O_NONBLOCKING) 
		return ERR_WOULDBLOCK;
	else if (running_task_TCB()->num_mutexes) {
		TR_MSG_INFO("Cannot Block due to held mutex");
		return ERR_WOULDBLOCK_MUTEX;
	} else if (os_isr_nesting) {
		TR_MSG_INFO("Blocking call from ISR");
		return ERR_WOULDBLOCK_ISR;
	}
	
	return SUCCESS;
}


/*@access mutex_t@*/
/** This is internally used by locking functions to wait for a lock.
 *
 * This function MUST be called from a critical section.
 * @param  wtlist      Pointer to the waitlist of the lock structure (mutex, sem, msgQ, eflag)
 * @param  timeout     Timeout at which the process is woken
 *                     if the lock has not been obtained.
 * @param  options     Options, eg. O_NONBLOCKING 
 * @return Return value from system call. This can either be a
 * success status (value depends on the lock type) or can be a timeout
 * indication. */
static /*@shared@*/ /*@null@*/ osptr_t 
osint_wait_on_lock(/*@shared@*/ plist_t *wtlist, 
		   trtime_t const  *timeout,
		   uint8_t options) 
{
	osptr_t /*@shared@*/ rc;
	uint8_t retflags;
	tid_t my_tid = running_task();
	struct TCB * my_tcb = running_task_TCB();

	/* The flags field in the TCB for  a specific task can be
	 * changed when a task is sleeping. It may  awake with a
	 * different value for this field.  Therefore we force this to
	 * be volatile to prevent using the cached value if any. */  
	uint8_t volatile * my_flags;

 


	TR_ASSERTwTCB(RDY_list_head() == my_tid, "RDY != RUN",
		      my_tid, 1);
		
	/* Remove self from readyQ and put on waitQ */
	LK_list_add(wtlist, RDY_list_pop()  );



	my_tcb->lock_ptr = (osptr_t) wtlist;
	/* Clear all the flags that will be checked later upon
	   resumption */
	my_tcb->flags &= ~(TASK_BLOCK_ERRMASK | TASK_WAIT);


	rc = osint_task_puttosleep(my_tid, timeout, options);
	my_flags = &my_tcb->flags;   
	/* Check task flags for any error flags */
	retflags =  (*my_flags) & TASK_BLOCK_ERRMASK;
	if (retflags & TASK_RESUMED) 
		rc = (osptr_t) ERR_RESUMED;
	else if (retflags & TASK_TIMEOUT)
		rc = (osptr_t) ERR_TIMEOUT;


	return rc;
}
/*@noaccess mutex_t@*/



/** Internal function to remove a task from the wait list.
 *
 * This function removes a task from the wait list and also updates
 * the alarm if needed */
static inline void osint_rmtask_from_wtlist(tid_t wait_tid)
{
	tid_t old_head;

	old_head = WT_list_head();
	WT_list_rm(wait_tid);
	/* Setup timer if the head of the wait list has changed. */
	if ( WT_list_head() != old_head )
		osint_alarm_setup();	
}




/** Internal function to make a task ready from lock state with the
 *  appropriate return  value.
 *  @param wait_tid  The task to be woken.
 *  @param ret_val   The return value to be passed to the woken up
 *                    process 
 *  case, we will also do a priority elevation.*/
static void osint_make_task_ready(tid_t wait_tid, /*@null@*/ osptr_t ret_val)
{

	TR_ASSERT(wait_tid != ILLEGAL_ELEM, "Illegal task ready", wait_tid);


	/* Also remove the task from the regular wait list */
	osint_rmtask_from_wtlist(wait_tid);
	RDY_list_add(wait_tid);




#ifdef TIROS_REGISTER_PASSING
	hal_retval_set(TCBarray[wait_tid].ctxt_ptr, ret_val);
#else
	TCBarray[wait_tid].paramret_val = ret_val;	
#endif


	/* Set state of this process to ready. */
	TCBarray[wait_tid].lock_ptr = TR_ILLEGAL_ADDR;

	/* Mark wait_tid's TCB as being in the ready list. 
	   Clear out any wait flags.  Don't clear out any error flags
	   that have to be checked by the resuming task. */
	TCBarray[wait_tid].flags &= ~TASK_WAIT;

}








/** Find a new tcb slot for a new process at a given priority.
 * @param [in] priority Priority of the new task.  If this priority is
 *            already in use by a different task, then this function MUST fail.
 * @param [in] stk  The stackpointer for the new task.  This is only
 *            needed to mark that the TCB has been allocated.
 * @return A unique tid_t if successful, ILLEGAL_ELEM if failure */
static tid_t osint_newtaskid(tid_t priority, /*@shared@*/ osword_t *stk)
{
	tid_t i, prio_existing = 0;
	tid_t tid = ILLEGAL_ELEM;

	/** Find an unused tid at the available priority*/
	for (i=0; i< TIROS_MAX_PROCS; i++) {
		/* Look for available slot. */
		if (TCBarray[i].ctxt_ptr == ILLEGAL_STACK) {
			if (tid == ILLEGAL_ELEM) 
				tid = i;
		} else { /* Check for existing priority. */
			if (TCBarray[i].priority == priority) {
				prio_existing = 1;
				break;
			}
		}
	}
	

	if (prio_existing==0 && tid != ILLEGAL_ELEM){
		TCBarray[tid].ctxt_ptr = (osstkptr_t) stk;
		TCBarray[tid].priority = priority;
	} else 
		tid = ILLEGAL_ELEM;



	return tid;
}

/** @}  */
/* ------------------------------------------------------------ */







void OS_ISR_END(void)
{
#if (TIROS_MAX_ISR_NESTING > 1)
	OS_CRITICAL_ENABLE();
	OS_CRITICAL_BEGIN(); 
	os_isr_nesting--; 
	if (os_isr_nesting == 0) { 
		if (running_task() != RDY_list_head() )
			OS_KERNEL_TRAP(); 
	} 
	OS_CRITICAL_END();
#else
	os_isr_nesting--;
	if (running_task() != RDY_list_head() )
		OS_KERNEL_TRAP();                 	       
#endif
}




void os_init(void)
{
	int i;

	/* Initialize all data structures. */
	LISTS_init();
	for (i=0; i< TIROS_MAX_PROCS; i++) {
		TCBarray[i].ctxt_ptr  	= ILLEGAL_STACK;
	}
	
	RUNNING_task = ILLEGAL_ELEM;
	RUNNING_task_TCB = (struct TCB *) TR_ILLEGAL_ADDR;
	ctxt_switch_cnt = 0;
	os_isr_nesting = 1;
	hal_init();
}






void os_start(void)
{
	osstkptr_t ctxtptr;
	tid_t starting_tid = RDY_list_head();
	os_isr_nesting = 0;
	if ( starting_tid != ILLEGAL_ELEM) {
		ctxtptr = TCBarray[starting_tid].ctxt_ptr;
		RUNNING_task = starting_tid;
		RUNNING_task_TCB = &TCBarray[starting_tid];
		hal_ctxt_load(ctxtptr);	
	}

}







tid_t os_task_create(taskfunc_t func, osptr_t param, 
		     osword_t  *stack, osword_t stacksize, 
		     tid_t priority) 
{
	tid_t tid = ILLEGAL_ELEM;
	OS_CRITICAL_ENABLE();

	if (priority == ILLEGAL_ELEM)
		goto quit;


	OS_CRITICAL_BEGIN();

	tid = osint_newtaskid(priority, stack);
	if (tid == ILLEGAL_ELEM)
		goto endcritical;

	/* The stack val and priority are already set */
	TCBarray[tid].eff_priority = priority;

	/* Now we have a valid task id and TCB slot, and priority. */
	TCBarray[tid].num_mutexes = 0;
	TCBarray[tid].lock_ptr = TR_ILLEGAL_ADDR;

#ifdef TIROS_STK_CHECK
	TCBarray[tid].base_stk_ptr = stack;
	TCBarray[tid].stksize      = stacksize;
#endif


	
	/* Initialize stack  */
	TCBarray[tid].ctxt_ptr = 
		hal_stk_init( stack, stacksize, (osfnptr_t)func, param);
	

	/* Schedule task. */
	RDY_list_add(tid);

	/* Check to see if a new process should be scheduled
	 * On startup, running_task = ILLEGAL. */
	if (running_task() != ILLEGAL_ELEM && 
	    RDY_list_head() != running_task() ) 
		(void) osint_ctxt_switch();
	

 endcritical:	
	OS_CRITICAL_END();

 quit:
	return tid;
}






/* Return the id of the calling task. */
tid_t os_self_tid(void)
{
	tid_t mytid;
	if (os_isr_nesting)
		mytid = ILLEGAL_ELEM;
	else
		mytid = running_task();

	return mytid;
}







int8_t os_prio_set(tid_t task, tid_t prio)
{
	int8_t rc = ERR_NOSUCHTASK;
	int i;
	OS_CRITICAL_ENABLE();


	if (task >= TIROS_MAX_PROCS)
		goto quit;
	
	OS_CRITICAL_BEGIN();
	if (TCBarray[task].ctxt_ptr == ILLEGAL_STACK)
		goto end_critical;
	
	if (TCBarray[task].priority == prio) {
		rc = SUCCESS;
		goto end_critical;
	}


	if (TCBarray[task].num_mutexes) {
		rc = ERR_WOULDBLOCK_MUTEX;
		goto end_critical;
	}



	/* Check to see if the task priority is in use */
	for (i=0; i<TIROS_MAX_PROCS; i++) {
		if (TCBarray[i].ctxt_ptr != ILLEGAL_STACK &&
		    TCBarray[i].priority == prio) {
			rc = ERR_PRIO_IN_USE;
			goto end_critical;
		}
	}


	rc = SUCCESS;
	/* Priority is not in use */
	TCBarray[task].priority = prio;


	/* If effective priority is already set to priority then we
	 * are done.
	 * When does this happen?
	 * The trivial case where the base priority is originally prio
	 * has been eliminated by previous checks.
	 * In the priority inheritance protocol, due to delayed
	 * fallback, the effective priority will not fall back to the
	 * base priority until all mutexes have been released.
	 *
	 * Note that the effective priority is always
	 * the base priority if the task is not holding any mutexes. 
	 *
	 * If the priority ceiling protocol is in effect, the priority
	 * of a task holding a mutex is not allowed to be changed.
	 * For the code to reach this point, the task is not holding
	 * any mutexes.  Thus the effective priority is different from 
	 * the desired priority.
	 *
	 * So effectively at this time, the effective priority is
	 * different from the desired priority OR the task is holding
	 * a mutex (priority inheritance protocol).
	*/


	osint_reprioritize(task, prio);





	/* We may need a context switch */
	if (running_task() != RDY_list_head() )
		(void) osint_ctxt_switch();
	

 end_critical:
	OS_CRITICAL_END();
 quit:
	return rc;
}








tid_t os_prio_get(tid_t task, uint8_t options)
{
	tid_t prio = ILLEGAL_ELEM;
	OS_CRITICAL_ENABLE();

	if (task >= TIROS_MAX_PROCS)
		goto quit;

	OS_CRITICAL_BEGIN();
	if (TCBarray[task].ctxt_ptr == ILLEGAL_STACK)
		goto end_critical;

	if (options & O_EFFECTIVE_PRIO)
		prio = TCBarray[task].eff_priority;
	else
		prio = TCBarray[task].priority;

 end_critical:
	OS_CRITICAL_END();
 quit:
	return prio;
}




int8_t os_task_del(tid_t task)
{
	int8_t rc;
	struct TCB *task_tcb;
	OS_CRITICAL_ENABLE();
	/* The task could be in several states:
	   0. Having locked a mutex.  This call should fail.
	   1. Waiting on a mutex.
	   2. Waiting in the wait queue.
	   3. In the run queue.
	*/

	if (task >= TIROS_MAX_PROCS) {
		rc = ERR_NOSUCHTASK;
		TR_MSG_WARN("del task id > MAX_PROCS\n");
		goto quit;
	}

	OS_CRITICAL_BEGIN();
	task_tcb = &TCBarray[task];

	if ( task_tcb->ctxt_ptr == ILLEGAL_STACK) {
		rc = ERR_NOSUCHTASK;
		TR_MSG_WARN("del task does not exist\n");
		goto end_critical;
	}
	if ( task_tcb->num_mutexes) {
		rc = ERR_TASKBLOCKED;
		goto end_critical;
	}

	/* Now we have a valid task that is not holding a mutex.
	 * Remove it from all queues. */
	if (task_tcb->lock_ptr != TR_ILLEGAL_ADDR) {
		/* Remove it from the lock waiting list. */
		LK_list_rm((plist_t*)task_tcb->lock_ptr, task);
	}

	/* The task is either in the ready list, if not certainly in
	   the wait list */
	if (task_tcb->flags & TASK_WAIT) 
		osint_rmtask_from_wtlist(task);
	else
		RDY_list_rm(task);

	rc = SUCCESS;

	/* We have to free the TCB slot */
	task_tcb->ctxt_ptr = ILLEGAL_STACK;

	/* We may need a context switch */
	if (running_task() != RDY_list_head() )
		(void) osint_ctxt_switch();

 end_critical:
	OS_CRITICAL_END();
 quit:
	return rc;
}




int8_t os_task_suspend(tid_t task)
{
	int8_t rc = ERR_NOSUCHTASK;

	OS_CRITICAL_ENABLE();

	if (task >= TIROS_MAX_PROCS) 
		goto quit;


	OS_CRITICAL_BEGIN();

	if ( TCBarray[task].ctxt_ptr == ILLEGAL_STACK)
		goto end_critical;
	
	if (TCBarray[task].num_mutexes) {
		/* Not allowed to suspend tasks holding a mutex */
		rc = ERR_TASKBLOCKED;
		goto end_critical;
	}

	
	/* At this point the only outcome is success.*/
	rc = SUCCESS;


	/* If the task is waiting on a lock, remove it from the lock
	   list */
	if (TCBarray[task].lock_ptr != TR_ILLEGAL_ADDR) {
		/* Remove it from the lock waiting list. */
		LK_list_rm((plist_t*)TCBarray[task].lock_ptr, task);
		TCBarray[task].lock_ptr = TR_ILLEGAL_ADDR;
	}


	/* The task is either in the ready list, if not certainly in
	   the wait list */
	if (TCBarray[task].flags & TASK_WAIT) 
		WT_list_rm(task); /* Don't need to call
				     osint_rmtask_from_wtlist() */
	else
		RDY_list_rm(task);

	/* Put task to sleep for an infinite time.  
	   osint_task_puttosleep will take    care of setting the wait
	   flag. */ 
	(void)osint_task_puttosleep(task, 0, 0);

	/* Context switch is not necessary.  If it
	   were the current task that was suspended,  the context will
	   automatically be switched within osint_task_puttosleep()  */



 end_critical:
	OS_CRITICAL_END();

 quit:
	return rc;
}





int8_t os_task_resume(tid_t task)
{
	int8_t rc = ERR_NOSUCHTASK;
	OS_CRITICAL_ENABLE();


	if (task >= TIROS_MAX_PROCS)
		goto quit;

	OS_CRITICAL_BEGIN();

	/* Is this a valid task */
	if (TCBarray[task].ctxt_ptr == ILLEGAL_STACK) {
		goto end_critical;
	}

	rc = SUCCESS;

	/* Is the task in wait state? os_task_suspend should have done
	   this.  If not, then nothing further to do. */
	if ((TCBarray[task].flags & TASK_WAIT) == 0) 
		goto end_critical;

	/* Mark the task flag to indicate that it has been timed out
	   forcibly */
	TCBarray[task].flags |= TASK_RESUMED;

	/* If the task is waiting for a lock, remove it forcibly */
	if (TCBarray[task].lock_ptr != TR_ILLEGAL_ADDR) {
		/* Remove it from the lock waiting list. */
		LK_list_rm((plist_t*)TCBarray[task].lock_ptr, task);
		TCBarray[task].lock_ptr = TR_ILLEGAL_ADDR;
	}

	/* Move task from wait list to ready list */
	osint_rmtask_from_wtlist(task);
	RDY_list_add(task);

	/* Context switch if necessary */
	if (RDY_list_head() != running_task() )
		(void) osint_ctxt_switch();
	
 end_critical:
	OS_CRITICAL_END();

 quit:
	return rc;
}




void os_time_get(trtime_t *curr_time) 
{
	OS_CRITICAL_ENABLE();
	OS_CRITICAL_BEGIN();
	hal_time_get(curr_time);
	OS_CRITICAL_END();
	return;
}





int8_t os_time_set(trtime_t const *new_time)
{
	int8_t rc = ERR_FAILED;
	OS_CRITICAL_ENABLE();

	OS_CRITICAL_BEGIN();
	
	/* This may not always be possible. If it succeeds then also
	 * timeout any  waiting tasks.   We already have a function
	for this purpose, so we just  use that.*/ 	   
	if (hal_time_set(new_time)) { 
		/* osint_alarm_reached returns true if a context
		   switch is necessary. Explicitly perform a  context
		   switch if necessary */ 
		if (osint_alarm_reached(new_time))
			(void) osint_ctxt_switch();

		rc = SUCCESS;
	}


	OS_CRITICAL_END();
	return rc;
}




int8_t os_wake_at(trtime_t const *wake_time, uint8_t options)
{
	int8_t rc = SUCCESS;
	struct TCB * my_tcb;  /* This does not have to be volatile
			       * since, it always points at the task
			       * that is running this code, even after
			       * it has been waken from sleep */

	/* The flags field in the TCB for  a specific task can be
	 * changed when a task is sleeping. It may  awake with a
	 * different value for this field.  Therefore we force this to
	 * be volatile to prevent using the cached value if any. */  
	uint8_t volatile *my_flags;



	OS_CRITICAL_ENABLE();


	my_tcb = running_task_TCB();

#if (TIROS_ALLOW_SLEEP_W_MUTEX != 1)	
	if (my_tcb->num_mutexes) {
		/* This section does not have to be inside the
		   critical section. This syscall is actively invoked
		   by the current task.  No other task can therefore
		   change the num_mutexes for this
		   task. running_task_TCB() is valid even if this task
		   is interrupted within this call */
		TR_MSG_INFO("Cannot sleep due to held mutex");
		rc = ERR_WOULDBLOCK_MUTEX;
		goto quit;
	}
#endif

	/* This call is disallowed from an ISR */
	if (os_isr_nesting) {
		/* This does not have to be inside the critical
		   section.  If this is called from a user task, an
		   interrupting ISR does not change the perceived value by the
		   task .  If this is called from an ISR,
		   os_isr_nesting value must be non-zero */
		TR_MSG_CRIT("os_wake_at from ISR");
		rc = ERR_WOULDBLOCK_ISR;
		goto quit;
	}

	/*  Remove from readylist and move to wait queue. */
	OS_CRITICAL_BEGIN();


	/* Clear the error flags */
	my_tcb->flags &= ~(TASK_BLOCK_ERRMASK);

	TR_ASSERTwTCB(RDY_list_head() == running_task(), "RDY != RUN",
		      running_task(), 1);

	/* Remove self from readyQ and put on waitQ */
	(void)osint_task_puttosleep( RDY_list_pop(), wake_time, options);
	/* Time = 15 */
	my_flags = &my_tcb->flags;   
	
	if ( (*my_flags) & TASK_RESUMED) 
		rc = ERR_RESUMED;

	OS_CRITICAL_END();


 quit:

	return rc; 
}





#if (TIROS_ENABLE_MUTEX == 1)
/*@access mutex_t@*/
void mutex_init(mutex_t *m, tid_t prio_ceiling) 
{
	LK_list_init(&m->waitlist);

	m->owner = ILLEGAL_ELEM;
	m->prio_ceiling = prio_ceiling;
	m->recursion = 0;
}


tid_t mutex_owner(mutex_t const *m)
{
	/* NOTE: This represents a snapshot. No need for critical section*/
	return m->owner;
}



int8_t mutex_lock(mutex_t *m, 
		  trtime_t const *timeout, 
		  uint8_t options) 
{
	int8_t rc;
	struct TCB * my_tcb;
	osptr_t tmp_ptr;       /* Tmp variables for explicit casting */


	OS_CRITICAL_ENABLE();


	OS_CRITICAL_BEGIN();
	if (os_isr_nesting) {   /* Not allowed from ISR */
		TR_MSG_CRIT("mutex_lock from ISR");
		rc = ERR_LOCK_ISR;
		goto endcritical;
	}

	my_tcb = running_task_TCB();

	/* Requirement is that the effective priority of a task be
	 * lower or equal to the  priority ceiling */
	if ( my_tcb->eff_priority < m->prio_ceiling) {
		TR_MSG_WARN("attempted mutex_lock with high prio");
		rc = ERR_LOCK_PRIO_CEIL;
		goto endcritical;
	}



	/* If locking this mutex would increase our held mutexes
	 * beyond TIROS_MAX_MUTEXES, give an error */
	if (my_tcb->num_mutexes == TIROS_MAX_MUTEXES) {
		rc = ERR_FULL;
		goto endcritical;
	}


	/* If the mutex does not have an owner, it now belongs to us */
	if (m->owner == ILLEGAL_ELEM) {
		m->owner = running_task();
		m->recursion++;
		my_tcb->num_mutexes++;

		/* Increase the priority of this task.  
		 * Note that we don't have to
		 * resort this task into the running queue since by
		 * definition, for this section of the code to run, this
		 * task is the highest priority task. */
		my_tcb->eff_priority = m->prio_ceiling;

		rc = SUCCESS;
		goto endcritical;
	} else if (m->owner == running_task()) {  /* Recursive call */
		m->recursion++;
		my_tcb->num_mutexes++;
		/* Already the owner */
		/* Priority should already be set correctly */
		rc = SUCCESS;
		goto endcritical;
	}

	/* We have to wait for the mutex */
	/* Is locking allowed */
	if (options & O_NONBLOCKING) {
		rc = ERR_WOULDBLOCK;
		goto endcritical;
	}

	/* Locking is permitted, continue */
	tmp_ptr = osint_wait_on_lock(&m->waitlist, timeout, options);
	rc = (int8_t)(osptrword_t)tmp_ptr;



 endcritical:
	OS_CRITICAL_END();
	return rc;
}





int8_t mutex_unlock(mutex_t *m) 
{
	int8_t rc;
	tid_t myid;
	tid_t nexttask;
	tid_t base_prio;
	struct TCB * my_tcb;
	OS_CRITICAL_ENABLE();

	/* If we are within an ISR then return error */

	OS_CRITICAL_BEGIN();
	/* We can only do this if we are the owner */
	if ((m->owner != running_task()) || (os_isr_nesting != 0) ) {
		TR_MSG_CRIT("mutex_unlock by NOT owner");
		rc = ERR_NOTOWNER;
		goto endcritical;
	}

	/* Decrement the mutex count */
	my_tcb = running_task_TCB();
	my_tcb->num_mutexes--;

	/* Decrement the mutex recursion */
	m->recursion--;
	
	/* If recursive locks have not been unlocked, exit.
	 * no change in priority */
	if (m->recursion != 0) {
		rc = SUCCESS;
		goto endcritical;
	}


	/* If mutex count is zero (and thus recursion is also zero), 
	 * get back to our original priority */
	if (   my_tcb->num_mutexes == 0) {
		base_prio = my_tcb->priority ;
		if ( my_tcb->eff_priority != base_prio) {

			TR_ASSERTwTCB(RDY_list_head() == running_task(), "RDY != RUN",
				      running_task(), 1);

			/* Since our priority has changed, remove ourselves
			   and reinsert by new priority. */
			myid = RDY_list_pop();
			my_tcb->eff_priority = base_prio;
			RDY_list_add(myid);
			/* Leave context switching for next section */
		}
	}


	/* Is there anyone waiting?   */
	nexttask = LK_list_pop(&m->waitlist);
	m->owner = nexttask;
	if ( nexttask  != ILLEGAL_ELEM) {
		TCBarray[nexttask].num_mutexes++;
		/* This task would have to be elevated in
		 * priority. This happens automatically as the
		 * waitlist is in prioritized order and the task that
		 * is woken up is the highest priority task that is
		 * waiting. */ 
		TCBarray[nexttask].eff_priority = m->prio_ceiling;

		/* Context switch will happen if needed */
		osint_make_task_ready(nexttask, (osptr_t)SUCCESS);
	} 

	
	rc = SUCCESS;

	/* Explicitly perform a  context switch if necessary */
	if (running_task() != RDY_list_head() )
		(void) osint_ctxt_switch();


 endcritical:
	OS_CRITICAL_END();
	return rc;
}
/*@noaccess mutex_t@*/
#endif /* TIROS_ENABLE_MUTEX */









#if (TIROS_ENABLE_CSEM == 1)
/*@access csem_t, mutex_t@*/
void csem_init(csem_t *cs, csemval_t init_val, csemval_t max_val) 
{
	LK_list_init(&cs->waitlist);
	cs->count = init_val;
	if (max_val < TIROS_CSEMVAL_MAX)
		cs->max_count = max_val;
	else 
		cs->max_count = TIROS_CSEMVAL_MAX;
}

csemval_t csem_count(csem_t const *cs)
{
	/* This doesn't need a critical section.  Either the value
	 * returned to the task is the value before interruption or
	 * after interruption. Either way is acceptable since even if
	 * critical sections are used, interruptions can occur before
	 * or after the critical section */
	return cs->count;
}



csemval_t csem_V(csem_t *cs)  
{
	csemval_t rc;
	tid_t nexttask;
	OS_CRITICAL_ENABLE();

	OS_CRITICAL_BEGIN();

	/* Check that the count is not at maximum */
	if (cs->count == cs->max_count) {
		rc = ERR_FULL;
		goto endcritical;
	}
		
	cs->count++;
	rc = cs->count;

	/* Is there anyone waiting */
	nexttask = LK_list_pop(&cs->waitlist);
	if (nexttask  != ILLEGAL_ELEM) {
		/* Decrease count. */
		cs->count--;

		/* Pass count to the new task as a generic pointer */
		/*lint -e{571} The cast is necessary */
		osint_make_task_ready(nexttask, (osptr_t)(osptrword_t)cs->count );

		/* Explicitly perform a  context switch if necessary */
		if (running_task() != RDY_list_head() )
			(void) osint_ctxt_switch();
	}

 endcritical:
	OS_CRITICAL_END();
	return rc;
	
}





csemval_t csem_P(csem_t *cs,  trtime_t const *timeout, 
		 uint8_t options) 
{
	csemval_t rc;
	osptr_t tmp_ptr;       /* Tmp variables for explicit casting */

	OS_CRITICAL_ENABLE();

	OS_CRITICAL_BEGIN();

	if (cs->count) { /* No need to block */
		cs->count--;
		rc = cs->count;		
		goto endcritical;
	}

	/* Is blocking allowed ? */
	rc = (csemval_t)osint_allow_blocking(options);
	if (rc != (csemval_t)SUCCESS) {
		TR_MSG_INFO("csem_P: cannot block");
		goto endcritical;
	}

	/* Wait on the lock */

	tmp_ptr = osint_wait_on_lock(&cs->waitlist, 
				     timeout, options);




	rc = (csemval_t)(osptrword_t)tmp_ptr;


 endcritical:
	OS_CRITICAL_END();
	return rc;
}
/*@noaccess csem_t, mutex_t@*/
#endif /* TIROS_ENABLE_CSEM */









#if (TIROS_ENABLE_MSGQ == 1)
/*@access msgQ_t, mutex_t@*/
void msgQ_init(msgQ_t *mq, uint8_t qlen) 
{
	LK_list_init(&mq->waitlist);
	mq->head = 0;
	mq->currlen = 0;
	mq->maxlen = qlen;
}


uint8_t msgQ_count(/*@shared@*/ msgQ_t const *mq)
{
	/* Note: No need for critical section, since the count
	 * only reflects an instantaneous snapshot */
	return mq->currlen;
}


/** Get the msgQ tail, i.e., insertion point for the next message 
 * @param mq [inout] Pointer to message queue.
 * @return  Index to the tail element of the array */
static inline mqind_t osint_msgQtail(msgQ_t const *mq)
{
	mqind_t tail;
	mqind_t tmp;
	tmp = (mq->maxlen - 1) - mq->head;
	if (mq->currlen <= tmp)
		tail = mq->head + mq->currlen;
	else
		tail = (mq->currlen -1) - tmp;

	return tail;
}



int8_t msgQ_send(msgQ_t *mq,  
		 osptr_t tx_value) 
{
	int8_t rc;
	tid_t nexttask;
	mqind_t tmp;

	OS_CRITICAL_ENABLE();


	OS_CRITICAL_BEGIN();
	
	if (mq->currlen == mq->maxlen)  { 
		/* Queue full.  Have to block */
		TR_MSG_WARN("msgQ_send: msgQ Full");
		rc = ERR_FULL;
		goto endcritical;
	} 

	/* Add value to the tail end */
	tmp = osint_msgQtail(mq);
	mq->msgs[tmp] = tx_value;
	mq->currlen++;
	rc = SUCCESS;
	

	/* Is there anyone waiting */
	nexttask = LK_list_pop(&mq->waitlist);
	if (nexttask != ILLEGAL_ELEM) {
		/* Remove value from the head end */
		tmp = mq->head;
		mq->currlen--;
		mq->head++;
		if (mq->head == mq->maxlen)   /* Wrap around */
			mq->head = 0;

		osint_make_task_ready(nexttask, 
				      mq->msgs[tmp] );


		/* Explicitly perform a  context switch if necessary */
		if (running_task() != RDY_list_head() )
			(void) osint_ctxt_switch();
		
	}


 endcritical:

	OS_CRITICAL_END();
	return rc;
}




int8_t msgQ_recv(msgQ_t *mq, trtime_t const *timeout, uint8_t options, 
		 osptr_t *rx_value) 
{
	int8_t rc;
	osptr_t /*@null@*/ rcp;
	OS_CRITICAL_ENABLE();


	OS_CRITICAL_BEGIN();


	if (mq->currlen) {  /* There is data in the queue */
		*rx_value = mq->msgs[mq->head];
		rc = SUCCESS;
		mq->currlen--;
		mq->head++;
		if (mq->head == mq->maxlen )  /* Wrap around */
			mq->head = 0;

		goto endcritical;
	}

	/* Have to block */
	

	/* Check if this task is allowed to block */
	rc = (int8_t) osint_allow_blocking(options);
	if (rc != SUCCESS) { /* Blocking not allowed */
		TR_MSG_INFO("msgQ_recv: Block failed");
		goto endcritical;
	}
	
	
	/* Task has to block and blocking is allowed  */
	rcp = osint_wait_on_lock(&mq->waitlist, 
				 timeout, options);
	/* The flags may change during the above call.  If
	running_task_TCB() is later added before this
	osint_wait_on_lock() call, then use the appropriate volatile
	designation to access the flags field */

	/* At this time rc is set to SUCCESS */
	if ( running_task_TCB()->flags & TASK_BLOCK_ERRMASK) {
		rc = (int8_t) (osptrword_t)rcp;
		/* rx_value is not changed */
	} else {
		*rx_value = rcp;
	}
	


 endcritical:
	OS_CRITICAL_END();
	return rc;
}
/*@noaccess msgQ_t, mutex_t@*/
#endif /* TIROS_ENABLE_MSGQ */







#if (TIROS_ENABLE_EFLAG == 1)
/*@access eflag_t, mutex_t@*/
void eflag_init(eflag_t *ef, flag_t initval) 
{
	LK_list_init(&ef->waitlist);
	ef->status = initval;
}

flag_t eflag_get(eflag_t const *ef)
{
	return ef->status;
}


static inline 
int eflag_is_match(flag_t eflag_status, flag_t checkbits, 
		   uint8_t task_options ) 
{
	int flag_match = 0;
	switch (task_options & (O_EFLAG_OR | O_EFLAG_AND)) {
	case O_EFLAG_OR:
		/* In this case, we AND the values to see if
		   at least one of the requested bits is
		   set. If the   output is non-zero, we wake
		   the task */ 
		if (eflag_status & checkbits)
			flag_match = 1;
		break;

	case O_EFLAG_AND:
		/* In this case, all of the requested bits
		   must be on. We can negate the flag bits and
		   AND it with the requested bits to see if it is
		   zero. */
		if ( ( (~eflag_status) & checkbits) == 0)
			flag_match = 1;
		break;

	default:
		break;
	}

	return flag_match;
}




int8_t eflag_set(eflag_t *ef, flag_t setbits, 
		 uint8_t options) 
{
	tid_t  waiting_task, next_waiting_task;
	flag_t efstatus_tmp, wtask_bits;
	uint8_t wtask_options;  /* Waiting task options */
	OS_CRITICAL_ENABLE();
	

	OS_CRITICAL_BEGIN();
	efstatus_tmp = ef->status;
	
	/* Are the bits getting set or cleared ? */
	if (options & O_EFLAG_CLEAR) 
		efstatus_tmp &= ~setbits;
	else 
		efstatus_tmp |= setbits;
       

	/* Is this a trigger? -> Temporary */
	if ( (options & O_EFLAG_TRIGGER) == 0)
		ef->status = efstatus_tmp;


	/* The bits have been set appropriately.
	   Now we need to go through the waiting list */
	waiting_task = LK_list_head(ef->waitlist);
	while (waiting_task != ILLEGAL_ELEM) {
		/* Retrieve the next waiting task. We will use this later. */
		next_waiting_task = LK_list_next(&ef->waitlist, 
						 waiting_task);

		/* Check eventflag condition for this task. We need to
		 * examine two things within the tasks TCB.
		 * 1. The bits that it cares about.
		 * 2. The combining Option .i.e. O_AND, O_OR etc. */
		wtask_bits = TCBarray[waiting_task].event_bits;
		wtask_options = TCBarray[waiting_task].flags & EFLAG_OPTS;

		/* If this task matches, make it ready */
		if (eflag_is_match(efstatus_tmp, wtask_bits,
				   wtask_options)) {
			LK_list_rm(&ef->waitlist, waiting_task);
			osint_make_task_ready(waiting_task, (osptr_t)SUCCESS);
		}

		waiting_task = next_waiting_task;
	}

	/* Explicitly perform a  context switch if necessary */
	if (running_task() != RDY_list_head() )
		(void) osint_ctxt_switch();

	OS_CRITICAL_END();

	return SUCCESS;
}






int8_t eflag_wait(eflag_t *ef, flag_t checkbits, trtime_t const *timeout,
		  uint8_t options) 
{
	int8_t rc;
	struct TCB * my_tcb;
	osptr_t tmp_ptr;       /* Tmp variable for explicit casting */


	OS_CRITICAL_ENABLE();


	OS_CRITICAL_BEGIN();
	
	/* Is there a flag match ? */
	if (eflag_is_match(ef->status, checkbits, options)) {
		rc = SUCCESS;
		goto endcritical;
	}

	/* Have to block */
	rc = (int8_t) osint_allow_blocking(options);
	if (rc != SUCCESS) { /* Not allowed to block */
		TR_MSG_INFO("eflag_wait: Block failed");
		goto endcritical;
	}

	
	/* Now proceeding with blocking operation */
	my_tcb = running_task_TCB();
	my_tcb->event_bits = checkbits;
	my_tcb->flags |= (options & EFLAG_OPTS);
	
	tmp_ptr = osint_wait_on_lock(&ef->waitlist,
				     timeout, options);

	rc = (int8_t) (osptrword_t) tmp_ptr;

	/* After being done, clear the flag bits */
	my_tcb->flags &= ~EFLAG_OPTS;

 endcritical:
	OS_CRITICAL_END();
	return rc;
}
/*@noaccess eflag_t, mutex_t@*/
#endif /* TIROS_ENABLE_EFLAG */










#ifdef TIROSINT_DATA_DEBUG
unsigned char * osint_get_datastruct(void)
{
	return (unsigned char*) TCBarray;
}

void osint_gettrdata(trintdata_t* trdata)
{
	if (trdata) {
		trdata->running_task = running_task();
		trdata->isr_nesting = os_isr_nesting;
		trdata->ctxt_switch_cnt = ctxt_switch_cnt;
		trdata->tcblist = TCBarray;
	}
}

osword_t osint_get_ctxt_switch_cnt(void)
{
	return ctxt_switch_cnt;
}

#endif
