#include <stdio.h>
#include <stdlib.h>
#include <memory.h>
#include <string.h>

#include "list.h"
#include "types.h"
#include "sys_main.h"
#include "core_main.h"
#include "inter_main.h"

extern int  line_number;
extern char line_buffer[MAX_LINE_LEN];

//#define DEBUG_VERBOSE	1

/*
 * after parsing UPDATE_SCREEN_LINES, vsa will print a "." to the screen.
 */
#define UPDATE_SCREEN_LINES		1000000
#define UPDATE_SCREEN_EVENTS	10000

struct list_head pcpu_list;
struct list_head wake_event_list;
struct list_head yield_event_list;
/*
 * There may exist the "do_yield" records, or may not. This depends on whether Xen
 * supports tracing the yielding event.
 */
struct list_head yield_event_list;

/*
 * Note: we treat running_to_blocked and running_to_offline with the 
 * same method, this may be inappropriate.
 * 	2013-1-17
 */
struct rec_types type_array[MAX_TYPES] = {
	{"__enter_scheduler", TRACE_REC_SCHED, handle_enter_scheduler, 0},
	{"domain_wake", TRACE_REC_WAKE, handle_domain_wake, 0},
	{"do_yield", TRACE_REC_YIELD, handle_do_yield, 0},
	{"running_to_blocked", TRACE_REC_BLOCK, handle_running_to_blocked, 0},
	{"running_to_offline", TRACE_REC_OFFLINE, handle_running_to_blocked, 0},
	{"running_to_runnable", TRACE_REC_SCHED_OUT, handle_running_to_runnable, 0},
	{"lost_records", TRACE_REC_LOST_RECORDS, handle_lost_records, 0}
};

int total_wake_events = 0;
int total_yield_events = 0;

/*
 * The following global variables recording the possible warnings and errors
 * during paring the Xen trace record file. After parsing (intermediate
 * part finished), all warnings and errors will be printed and shown
 * to the user.
 * Warnings pertain to the mistakes that will NOT affect the final analysis,
 * e.g., not annotated wake up events.
 * Errors pertain to the mistakes that will affect the final analysis.
 * 
 * Such warnings and errors will be very useful in parsing buggy trace files.
 */
int lost_record_number = 0;
int lost_begin_part_time_slice = 0;
int failed_wake_events = 0;
int failed_yield_events = 0;

/*
 * inter_init: Initialize the intermediate module. 
 * It's about init the pcpu_list
 */
void inter_init( void )
{
	INIT_LIST_HEAD( &pcpu_list );
	INIT_LIST_HEAD( &wake_event_list );
	INIT_LIST_HEAD( &yield_event_list );
}

/*
 * reclaim all the resources used in this intermediate part.
 */
void inter_exit( void )
{
	reclaim_pcpu_list();
//	reclaim_wake_event_list();
	printf( "Resouces used in the INTERMEDIATE module are reclaimed.\n" );
}

/*
 * inter_working: the working part of intermediate part.
 */
int inter_working( void )
{
	int res, i;
	unsigned long long line_count;
	struct pcpu* ppcpu;
	char input[20], *ret;

	line_count=0;
	printf( "Paring Xen trace file." );
	fflush( stdout );

	while( (res=read_line()) != TRACE_FILE_EOF ){
		ppcpu = inter_update_pcpu_list();

		for(i=0; i<MAX_TYPES; i++ )
			if( !strcmp( tokens[3], type_array[i].token_name ) )
				(*type_array[i].func)( i, ppcpu );

		line_count++;
		if( (line_count % UPDATE_SCREEN_LINES) == 0 ){
			printf( "." );
			fflush( stdout );
		}
	}
	printf( "\n" );

	final_assemble_inter_time_slice();
	browse_pcpu_list();
	brief_core_data();

	annotate_wake_events();
	annotate_yield_events();

	//intermediate module finished working. give the statics on analyzing now.
	printf( "============================	Trace data parsing results stat begins	============================\n" );
	printf( "Total lines in the trace file: %llu\n", line_count );
	printf( "TYPE\t\tCOUNT\n" );
	//print all interested lines
	for( i=0; i<=TRACE_REC_LOST_RECORDS; i++ ){
		if( type_array[i].token_name != NULL )
			printf( "%s\t%d\n", type_array[i].token_name, type_array[i].count );
	}

	printf( "\nWarnings:\n" );
	printf( "Lost Records (lost_records line in Xen trace file): %d\n", 
		lost_record_number );
	printf( "Unannotated Wake Events: %d\n", failed_wake_events );
	printf( "Unannotated Yield Events: %d\n", failed_yield_events );

	printf( "\nErrors:\n" );
	printf( "Incomplete time slices (They will be missing in futher analysis): %d\n", 
		lost_begin_part_time_slice );

	printf( "\nLogical Checking\n" );
	//The core data struct should be established, verify it now
	switch( verify_core_data() ){
		case 0: 
			printf( "Core data structure is verified to correct!\n" );
			break;
		case -1:	//minor errors
			printf( "Core data structure contains minor errors that will not damage further analysis.\n" );
			break;
		case -2:
			printf( "Core data structure has serious error, your analysis will be INCORRECT.\n" );
			break;
	}		
	printf( "============================	Trace data parsing results stat ends	============================\n" );
	printf( "Continue analyzing? (Y/N)" );
	ret = fgets( input, 20, stdin );

	if( (input[0]=='Y') || (input[0]=='y') ) return 0;
	else return -1;	//quit
}

/*
 * inter_update_pcpu_list: update the pcpu list according to tokens[0]
 * update the max and min TSC according to tokens[1]
 */
struct pcpu* inter_update_pcpu_list( void )
{
	unsigned long long temp_tsc;
	unsigned int pcpu_id;
	char pcpu_id_str[3];
	struct pcpu* ppcpu=NULL;
	struct list_head *iter;

//  deal with PCPU list
    if( (tokens[0][0]!='C') || (tokens[0][1]!='P') || (tokens[0][2]!='U') ){
		printf( "Error when parsing the %d line:\n%s\n", line_number, line_buffer );
        printf( "Wrong Xen trace record file!\n" );
        exit( -1 );
    }
	
    strcpy( pcpu_id_str, (char*)(tokens[0]+3) );
    pcpu_id = atoi( pcpu_id_str );

	temp_tsc = strtoull( tokens[1], NULL, 0 );

	//omit the pad line
	if( temp_tsc == 0L ) return NULL;

	//now find the PCPU node by pcpu_id, if cannot find, new one pcpu node
    list_for_each( iter, &pcpu_list )
    {
        ppcpu = list_entry( iter, struct pcpu, pcpu_list_elem );
        if (ppcpu->pcpu_id == pcpu_id ) break;
    }

	if( (ppcpu == NULL) || (iter==&pcpu_list) ){
		ppcpu = malloc( sizeof( struct pcpu ) );
		ASSERT( ppcpu );
		memset( ppcpu, 0, sizeof( struct pcpu ) );
		INIT_LIST_HEAD( &ppcpu->pcpu_list_elem );
		INIT_LIST_HEAD( &ppcpu->inter_time_slice_list );
		ppcpu->pcpu_id = pcpu_id;
		ppcpu->max_tsc = temp_tsc;
		ppcpu->min_tsc = temp_tsc;
		list_add_tail( &ppcpu->pcpu_list_elem, &pcpu_list );
	}else{
		if( temp_tsc > ppcpu->max_tsc )
			ppcpu->max_tsc = temp_tsc;

		if( temp_tsc < ppcpu->min_tsc )
			ppcpu->min_tsc = temp_tsc;
	}
	return ppcpu;
}

/*
 * browse pcpu list: util routine
 */
void browse_pcpu_list( void )
{
	struct list_head *iter;
	struct pcpu* ppcpu;

	printf( "PCPU list\n" );
	printf( "PCPU_ID\tMin_tsc\t\tMax_tsc\n" );
    list_for_each( iter, &pcpu_list )
    {
        ppcpu = list_entry( iter, struct pcpu, pcpu_list_elem );
		printf( "%d\t%llu\t%llu\n", ppcpu->pcpu_id, ppcpu->min_tsc, ppcpu->max_tsc );
    }
}

/*
 * reclaim_pcpu_list: reclaim the pcpu list
 */
void reclaim_pcpu_list( void )
{
	struct list_head *iter, *iter_safe;
	struct pcpu* ppcpu;

	list_for_each_safe( iter, iter_safe, &pcpu_list ){
        ppcpu = list_entry( iter, struct pcpu, pcpu_list_elem );
		list_del( iter );
		free( ppcpu );
	}
	ASSERT( list_empty( &pcpu_list ) );

}

/*
 *
 * 	Belows are routines handling each specific record.
 *
 */
/*
 * The idea of records processing in VSA 2.0 is as follows:
 * --> domain_wake record: 
 *  A "wake_event" structure will be allocated to record
 *  this domain wake event, and this new structure will be added in wake_event_list.
 *  Note, after finished reading the trace record file (i.e., EOF encountered),
 *  the system will invoke a procedure to help all remaining "wake_event"s in
 *  the wake_event_list to find their corresponding time slices, and change the 
 *  schedule-in type from default SCHED_IN_NORMAL to SCHED_IN_WAKE.
 *
 * --> __enter_scheduler record: 
 *  At this line, there should be at least two 
 *  things happend: 1) one VCPU (say it <domi, vcpui>) is schedule out; 
 *  2) another VCPU (say it <domj, vcpuj> is scheduled in.
 *  Therefore, two actions will be done for this __enter_scheduler record:
 *  1) for <domi, vcpui>, search the inter_time_slice_list of the PCPU, 
 *  where the record is found, for INTER_TIME_SLICE_BEGIN_HALF time slice record. 
 *  If not found and the inter_time_slice_list is NULL, create one time slice 
 *  (INTER_TIME_SLICE_FIRST status, this time slice will NEVER be complete anyway). 
 *  If found, the schedule-out time (TSC of __enter_scheduler record) is recorded
 *  in the intermediate time slice. And the status of this intermediate time slice
 *  is changed to INTER_TIME_SLICE_END_HALF status.
 *  2) for <domj, vcpuj>, create one intermediate time slice, record its start time
 *  (still the TSC of __enter_scheduler record) and set it's status as 
 *  INTER_TIME_SLICE_BEGIN_HALF. 
 *  For this new intermedaite time slice, the reason for its scheduled-in should be
 *  SCHED_IN_NORMAL by default. 
 *
 *  --> running_to_runnable and running_to_blocked: 
 *  The appearance of this two records actually tell us how the time slice
 *  actually ends (i.e., a VCPU <dom_id, vcpu_id> is scheduled out).
 *  For running_to_runnable, the VCPU should be scheduled out for it running
 *  out of time slice (SCHED_OUT_NORMAL), yielding (SCHED_OUT_YIELD) or been
 *  preempted (SCHED_OUT_PREEMPT). 
 *  For running_to_blocked, the VCPU was scheduled out since it blocked at that
 *  moment (SCHED_OUT_BLOCK).
 *
 *  Therefore, the records of these two types are used to mark the reasons of 
 *  scheduled out for the VCPUs. For running_to_blocked, the time slice will have 
 *  the scheduled out type of SCHED_OUT_BLOCK. 
 *  Simple soltion for running_to_runnable is that: we set the scheduled out
 *  reason for the time slice as SCHED_OUT_NORMAL. 
 *  After marking the reasons of VCPU scheduled out, the final version of time slice
 *  will be passed to the core data structure.
 *
 *  Here, marking the reason of VCPU scheduled out as SCHED_OUT_PREEMPT 
 *  is the most challenging! I leave the 
 *  consideration on this to the next stage of development in VSA 2.0.
 */

/*
 * looking for intermediate time slice with <domain_id, vcpu_id>
 */
struct inter_time_slice* find_inter_time_slice_by_id( struct pcpu*ppcpu, unsigned domain_id, unsigned vcpu_id )
{
	struct inter_time_slice* ret;
	struct list_head *iter;
	list_for_each( iter, &ppcpu->inter_time_slice_list){
		ret = list_entry( iter, struct inter_time_slice, inter_time_slice_elem );
		if( (ret->domain_id == domain_id) && (ret->vcpu_id == vcpu_id) )
			return ret;
	}
	return NULL;
}

/*
 * debugging routine
 */
void browse_inter_time_slice_records( struct pcpu *ppcpu )
{
	struct inter_time_slice* ret;
	struct list_head *iter;
	int rec_sn = 0;

	printf( "in browse_inter_time_slice_records\n" );
	printf( "SN\tSCHED_IN_TSC\tSCHED_OUT_TSC\tSCHED_IN_TYPE\tSCHED_OUT_TYPE\tDOM_ID\tVCPU_ID\n" );
	list_for_each( iter, &ppcpu->inter_time_slice_list){
		ret = list_entry( iter, struct inter_time_slice, inter_time_slice_elem );
		
		printf( "%d\t%llu\t%llu\t%d\t%d\t%d\t%d\n", rec_sn++, 
			ret->sched_in_tsc, 
			ret->sched_out_tsc, 
			ret->sched_in_type, 
			ret->sched_out_type, 
			ret->domain_id, 
			ret->vcpu_id );
	}
	printf( "out browse_inter_time_slice_records\n" );
}

/*
 * create time slice from intermediate time slice, 
 * and add it to the core data structure.
 */
int create_time_slice_and_add( struct inter_time_slice* pinter_time_slice )
{
	struct time_slice * ptime_slice;

	ptime_slice = malloc( sizeof(struct time_slice) );
	ASSERT( ptime_slice );
	memset( ptime_slice, 0, sizeof(struct time_slice) );

	ptime_slice->domain_id = pinter_time_slice->domain_id;
	ptime_slice->vcpu_id = pinter_time_slice->vcpu_id;
	ptime_slice->start_tsc = pinter_time_slice->sched_in_tsc;
	ptime_slice->term_tsc = pinter_time_slice->sched_out_tsc;
	ptime_slice->pcpu_number = pinter_time_slice->pcpu_id;
	ptime_slice->sched_in_type = pinter_time_slice->sched_in_type;
	ptime_slice->sched_out_type = pinter_time_slice->sched_out_type;
	INIT_LIST_HEAD( &ptime_slice->time_slice_elem );

	//insert the time slice to core data structure now
	return insert_time_slice( pinter_time_slice->domain_id, pinter_time_slice->vcpu_id, ptime_slice );
}

/*
 * This is the final routine to be called after record line processing.
 */
void final_assemble_inter_time_slice( void )
{
	struct list_head *iter_pcpu, *iter_inter_time_slice;
	struct pcpu *ppcpu;
	struct inter_time_slice *pinter_time_slice = NULL;
	int inter_time_slice_count;

	//browse the PCPU list to finish all intermediate time slices
    list_for_each( iter_pcpu, &pcpu_list )
    {
        ppcpu = list_entry( iter_pcpu, struct pcpu, pcpu_list_elem );
		inter_time_slice_count = 0;
		list_for_each( iter_inter_time_slice, &ppcpu->inter_time_slice_list ){
			inter_time_slice_count ++;
			pinter_time_slice = list_entry( iter_inter_time_slice, struct inter_time_slice, inter_time_slice_elem );
		}
		if( inter_time_slice_count == 0 ){
//			printf( "No remaining intermediate time slice on PCPU %d.\n", ppcpu->pcpu_id );
			continue;
		}

		/*
		 * if there are more than one intermediate time slice remaining
		 * in the list after parsing the whole trace file, such errors will
		 * be recorded in the global var of lost_begin_part_time_slice.
		 */
		if( inter_time_slice_count > 1 ){
			lost_begin_part_time_slice += (inter_time_slice_count - 1);
#ifdef DEBUG_VERBOSE
			browse_inter_time_slice_records( ppcpu );
#endif
		}
		pinter_time_slice->sched_out_tsc = ppcpu->max_tsc;
		pinter_time_slice->sched_out_type = SCHED_OUT_NORMAL;

#ifdef DEBUG_VERBOSE
		printf( "%llu\t%llu\t%d\t%d\t%d\t%d\n",
			pinter_time_slice->sched_in_tsc, 
			pinter_time_slice->sched_out_tsc, 
			pinter_time_slice->sched_in_type, 
			pinter_time_slice->sched_out_type, 
			pinter_time_slice->domain_id, 
			pinter_time_slice->vcpu_id );
#endif

		//create and add the time slice now
		create_time_slice_and_add( pinter_time_slice );

		//delete the intermediate time slice
		list_del( &pinter_time_slice->inter_time_slice_elem );
		free( pinter_time_slice );
    }
}

/*
 * annotate the wake_event records of wake_event list to the time slices.
 * This routine will be called at the final stage, the wake event records will 
 * be annotated to time slices before reclaiming.
 */
void annotate_wake_events( void )
{
	struct list_head *iter, *iter_next;
	struct wake_event *pwake_event;
	int annotated_wake_events;
	double percentage;

	if( total_wake_events == 0 ) return;
	printf( "Annotating wake events (there are %d wake events):     0%%", total_wake_events );
	fflush( stdout );
	failed_wake_events = 0;
	annotated_wake_events = 0;

    list_for_each_safe( iter, iter_next, &wake_event_list )
    {
		pwake_event = list_entry( iter, struct wake_event, wake_event_list_elem );
		if( annotate_one_wake_event( pwake_event ) == 0 ){
			list_del( iter );
			free( pwake_event );
		}else
			failed_wake_events ++;

		annotated_wake_events ++;
		if( (annotated_wake_events % UPDATE_SCREEN_EVENTS) == 0 ) {
			percentage = (double)((double)annotated_wake_events/(double)total_wake_events);
			printf( "\b\b\b\b\b" );
			if (percentage < 0.1) printf( " " );
			printf( "%3.1f%%", percentage * 100 );
			fflush( stdout );
		}
	}
	printf( "\b\b\b\b\b\b" );
	printf( "100.0%%" );
	printf( "\n" );

	if( failed_wake_events ) reclaim_wake_event_list();
}

/*
 * annotate one wake_event record to the time slice.
 * Return: 
 *      -1 means the wake event not used (either missing or duplicated).
 *      0 means succeed.
 */
int annotate_one_wake_event( struct wake_event *pwake_event )
{
    struct domain *pdomain;
    struct vcpu* pvcpu;
    struct list_head *iter, *iter_next;
    struct time_slice *ptime_slice, *ptime_slice_next=NULL;

    pdomain = find_domain_by_id( pwake_event->domain_id );
    if( !pdomain ) goto failed;

    pvcpu = find_vcpu_by_id( pdomain, pwake_event->vcpu_id );
    if( !pvcpu ) goto failed;

	//see if it is the wake event for the very first time slice
	if( !list_empty( &pvcpu->time_slice_list ) ){
		ptime_slice = list_entry( pvcpu->time_slice_list.next, struct time_slice, time_slice_elem );
		if (pwake_event->wake_tsc < ptime_slice->start_tsc){
				ptime_slice->sched_in_type = SCHED_IN_WAKE;
	            ptime_slice->sched_in.wake_data.wake_tsc = pwake_event->wake_tsc;
        	    ptime_slice->sched_in.wake_data.wake_PCPU = pwake_event->pcpu_id;
				return 0;
		}
	}

    //browse the time slice list to find the annotation object
    list_for_each_safe( iter, iter_next, &pvcpu->time_slice_list )
    {
        ptime_slice = list_entry( iter, struct time_slice, time_slice_elem );
        ptime_slice_next = list_entry( iter_next, struct time_slice, time_slice_elem );

        //see if the wake event resides between these two time slices
        if( (pwake_event->wake_tsc > ptime_slice->term_tsc) && (pwake_event->wake_tsc < ptime_slice_next->start_tsc) ){
            //if the VCPU was woken multiple times, only use the last wake event.
            if( (ptime_slice_next->sched_in_type == SCHED_IN_NORMAL) || 
				((ptime_slice_next->sched_in_type == SCHED_IN_WAKE ) &&
				(ptime_slice_next->sched_in.wake_data.wake_tsc < pwake_event->wake_tsc)) ){
                ptime_slice_next->sched_in_type = SCHED_IN_WAKE;
                ptime_slice_next->sched_in.wake_data.wake_tsc =
                    pwake_event->wake_tsc;
                ptime_slice_next->sched_in.wake_data.wake_PCPU =
                    pwake_event->pcpu_id;
#ifdef DEBUG_VERBOSE
    			printf( "Wake event <DomID:%d, VCPUID:%d> at TSC:%llu annotating.\n", 
					pwake_event->domain_id,
		            pwake_event->vcpu_id,
		            pwake_event->wake_tsc );
#endif
                return 0;
			}
        }

		//wake inside?
        if( (pwake_event->wake_tsc > ptime_slice_next->start_tsc) && (pwake_event->wake_tsc < ptime_slice_next->term_tsc) ){
#ifdef DEBUG_VERBOSE
    		printf( "Wake event <DomID:%d, VCPUID:%d> at TSC:%llu wake inside at time slice <%llu, %llu>.\n", 
				pwake_event->domain_id,
            	pwake_event->vcpu_id,
	            pwake_event->wake_tsc,
				ptime_slice_next->start_tsc,
				ptime_slice_next->term_tsc );
#endif
            ptime_slice_next->is_woken_inside = 1;
            return 0;
        }
    }
//cannot find the annotate object
    return -1;

failed:
    printf( "Wake event <DomID:%d, VCPUID:%d> at TSC:%llu cannot find domain or VCPU struct.\n", pwake_event->domain_id,
            pwake_event->vcpu_id,
            pwake_event->wake_tsc );
    return -1;
}

/*
 * show the remaining wake event list
 */
void reclaim_wake_event_list( void )
{
	struct list_head *iter, *iter_safe;
	struct wake_event *pwake_event;

#ifdef DEBUG_VERBOSE
	int sn = 0;
	printf( "SN\tPCPU_ID\tDOM_ID\tVCPU_ID\tWAKE_TSC\n" );
#endif

    list_for_each_safe( iter, iter_safe, &wake_event_list )
    {
		pwake_event = list_entry( iter, struct wake_event, wake_event_list_elem );
#ifdef DEBUG_VERBOSE
		printf( "%d\t%d\t%d\t%d\t%llu\n", sn++,
				pwake_event->pcpu_id,
				pwake_event->domain_id,
				pwake_event->vcpu_id,
				pwake_event->wake_tsc );
#endif
		list_del( iter );
		free( pwake_event );
	}
}

/*
 * The following three functions are made to process yield events.
 */
void annotate_yield_events( void )
{
	struct list_head *iter, *iter_next;
	struct yield_event *pyield_event;
	int annotated_yield_events;
	double percentage;

	if( total_yield_events == 0 ) return;
	printf( "Annotating yield events (there are %d yield events):     0%%", total_yield_events );
	failed_yield_events = 0;
	annotated_yield_events = 0;

    list_for_each_safe( iter, iter_next, &yield_event_list )
    {
		pyield_event = list_entry( iter, struct yield_event, yield_event_list_elem );
		if( annotate_one_yield_event( pyield_event ) == 0 ){
			list_del( iter );
			free( pyield_event );
		}else
			failed_yield_events ++;

		annotated_yield_events ++;
		if( (annotated_yield_events % UPDATE_SCREEN_EVENTS) == 0 ){
			percentage = (double)((double)annotated_yield_events/(double)total_yield_events);
			printf( "\b\b\b\b\b" );
			if( percentage < 0.1 ) printf( " " );

			printf( "%3.1f%%", percentage * 100 );
			fflush( stdout );
		}
	}
	printf( "\b\b\b\b\b" );
	printf( "100.0%%" );
	printf( "\n" );

	if( failed_yield_events ) reclaim_yield_event_list();
}

int annotate_one_yield_event( struct yield_event *pyield_event )
{
    struct domain *pdomain;
    struct vcpu* pvcpu;
    struct list_head *iter;
    struct time_slice *ptime_slice = NULL;

    pdomain = find_domain_by_id( pyield_event->domain_id );
    if( !pdomain ) goto failed;

    pvcpu = find_vcpu_by_id( pdomain, pyield_event->vcpu_id );
    if( !pvcpu ) goto failed;

	//see if it is the wake event for the very first time slice
	if( list_empty( &pvcpu->time_slice_list ) ) return -1;

    //browse the time slice list to find the annotation object
    list_for_each( iter, &pvcpu->time_slice_list )
    {
        ptime_slice = list_entry( iter, struct time_slice, time_slice_elem );

        //see if the yield event resides in this time slice
        if( (pyield_event->yield_tsc > ptime_slice->start_tsc) && 
			(pyield_event->yield_tsc < ptime_slice->term_tsc) ){
			if( ptime_slice->sched_out_type == SCHED_OUT_NORMAL ){
				ptime_slice->sched_out_type = SCHED_OUT_YIELD;
				ptime_slice->sched_out.yield_data.yield_count = 1;
				ptime_slice->sched_out.yield_data.yield_tsc = pyield_event->yield_tsc;
			}else if( ptime_slice->sched_out_type == SCHED_OUT_YIELD ){
				ptime_slice->sched_out.yield_data.yield_count ++;
				if( pyield_event->yield_tsc > ptime_slice->sched_out.yield_data.yield_tsc )	//update yield_tsc
					ptime_slice->sched_out.yield_data.yield_tsc = pyield_event->yield_tsc;
			}
			return 0;
		}
	}
//cannot find the annotate object
    return -1;

failed:
    printf( "Yield event <DomID:%d, VCPUID:%d> at TSC:%llu cannot find domain or VCPU struct.\n", pyield_event->domain_id,
            pyield_event->vcpu_id,
            pyield_event->yield_tsc );
    return -1;
}

void reclaim_yield_event_list( void )
{
	struct list_head *iter, *iter_safe;
	struct yield_event *pyield_event;

#ifdef DEBUG_VERBOSE
	int sn = 0;
	printf( "SN\tPCPU_ID\tDOM_ID\tVCPU_ID\tYIELD_TSC\n" );
#endif

    list_for_each_safe( iter, iter_safe, &yield_event_list )
    {
		pyield_event = list_entry( iter, struct yield_event, yield_event_list_elem );
#ifdef DEBUG_VERBOSE
		printf( "%d\t%d\t%d\t%d\t%llu\n", sn++,
				pyield_event->pcpu_id,
				pyield_event->domain_id,
				pyield_event->vcpu_id,
				pyield_event->wake_tsc );
#endif
		list_del( iter );
		free( pyield_event );
	}
}

/*
 * The following functions process Xen trace lines.
 */
void handle_enter_scheduler( int rec_type, struct pcpu* ppcpu )
{
	unsigned sched_out_domain_id, sched_out_vcpu_id;
	unsigned sched_in_domain_id, sched_in_vcpu_id;
	struct inter_time_slice *pinter_time_slice = NULL;
	unsigned long long current_tsc;

	type_array[rec_type].count++;

	sched_out_domain_id = strtoul( tokens[5], NULL, 16 );
	sched_out_vcpu_id = strtoul( tokens[7], NULL, 16 )& 0xff;

	sched_in_domain_id = strtoul( tokens[9], NULL, 16 );
	sched_in_vcpu_id = strtoul( tokens[11], NULL, 16 ) & 0xff;
	current_tsc = strtoull( tokens[1], NULL, 0 );

	/*
 	 * handle the schedule-out part
 	 */
	if( list_empty( &ppcpu->inter_time_slice_list ) ){
		//create the first dummy time slice
		pinter_time_slice = malloc( sizeof(struct inter_time_slice) );
		ASSERT( pinter_time_slice );
		memset( pinter_time_slice, 0, sizeof(struct inter_time_slice) );
		pinter_time_slice->inter_time_slice_status = INTER_TIME_SLICE_END_HALF;
		pinter_time_slice->sched_in_tsc = ppcpu->min_tsc;
		pinter_time_slice->sched_in_type = SCHED_IN_NORMAL;
		pinter_time_slice->pcpu_id = ppcpu->pcpu_id;

		pinter_time_slice->sched_out_tsc = current_tsc;
		pinter_time_slice->domain_id = sched_out_domain_id;
		pinter_time_slice->vcpu_id = sched_out_vcpu_id;
		list_add_tail( &pinter_time_slice->inter_time_slice_elem, &ppcpu->inter_time_slice_list );
		goto sched_in_part;
	}

	//else, should find the BEGIN PART intermediate time slice
	pinter_time_slice = find_inter_time_slice_by_id( ppcpu, sched_out_domain_id, sched_out_vcpu_id );
	if( pinter_time_slice == NULL ){
#ifdef DEBUG_VERBOSE
		printf( "in handle_enter_scheduler: cannot find the BEGIN PART intermediate time slice\n" );
#endif
		//what if the BEGIN PART cannot find?
		goto sched_in_part;
	}
	if( pinter_time_slice->inter_time_slice_status != INTER_TIME_SLICE_BEGIN_HALF ){
#ifdef DEBUG_VERBOSE
		printf( "in handle_enter_scheduler: inter time slice status is not INTER_TIME_SLICE_BEGIN_HALF\n" );
#endif
		goto sched_in_part;
	}

	pinter_time_slice->sched_out_tsc = current_tsc;
	pinter_time_slice->inter_time_slice_status = INTER_TIME_SLICE_END_HALF;
	
	/*
 	 * handle the succeeding schedule-in part
 	 */
sched_in_part:
	pinter_time_slice = malloc( sizeof(struct inter_time_slice) );
	ASSERT( pinter_time_slice );
	memset( pinter_time_slice, 0, sizeof(struct inter_time_slice) );
	pinter_time_slice->pcpu_id = ppcpu->pcpu_id;
	pinter_time_slice->inter_time_slice_status = INTER_TIME_SLICE_BEGIN_HALF;
	pinter_time_slice->sched_in_tsc = current_tsc;
	pinter_time_slice->domain_id = sched_in_domain_id;
	pinter_time_slice->vcpu_id = sched_in_vcpu_id;

	//by default, the reason for the VCPU was schedule in is SCHED_IN_NORMAL.
	//Such sched_in_type will be changed when annotating the wake events.
	pinter_time_slice->sched_in_type = SCHED_IN_NORMAL;

	list_add_tail( &pinter_time_slice->inter_time_slice_elem, &ppcpu->inter_time_slice_list );
}

void handle_running_to_blocked( int rec_type, struct pcpu* ppcpu )
{
	unsigned domain_id, vcpu_id;
	struct inter_time_slice *pinter_time_slice = NULL;

	type_array[rec_type].count++;

	domain_id = (unsigned int)(strtoul(tokens[5], NULL, 16)>>16);
	vcpu_id = (unsigned int)(strtoul(tokens[5], NULL, 16)&0xFF);

	pinter_time_slice = find_inter_time_slice_by_id( ppcpu, domain_id, vcpu_id );
	if ( (pinter_time_slice == NULL) && !list_empty( &ppcpu->inter_time_slice_list ) ){
#ifdef DEBUG_VERBOSE
		printf( "in handle_running_to_blocked, Cannot find intermediate time slice, at line number:%d.\n", line_number );
		browse_inter_time_slice_records( ppcpu );
#endif
		return;
	}
	if( pinter_time_slice->inter_time_slice_status != INTER_TIME_SLICE_END_HALF ){
#ifdef DEBUG_VERBOSE
		printf( "in handle_running_to_blocked: the intermediate time slice is not in INTER_TIME_SLICE_END_HALF status, at linenumber:%d.\n", line_number );
		browse_inter_time_slice_records( ppcpu );
#endif
	}
	pinter_time_slice->sched_out_type = SCHED_OUT_BLOCK;
	
	//Now, the intermediate time slice is completed. Print it out.
#ifdef DEBUG_VERBOSE
	printf( "%llu\t%llu\t%d\t%d\t%d\t%d\n", 
		pinter_time_slice->sched_in_tsc, 
		pinter_time_slice->sched_out_tsc, 
		pinter_time_slice->sched_in_type, 
		pinter_time_slice->sched_out_type, 
		pinter_time_slice->domain_id, 
		pinter_time_slice->vcpu_id );
#endif

	//create and add the time slice now
	create_time_slice_and_add( pinter_time_slice );

	//drop the intermediate time slice now
	list_del( &pinter_time_slice->inter_time_slice_elem );
	free( pinter_time_slice );
}

void handle_running_to_runnable( int rec_type, struct pcpu* ppcpu )
{
	unsigned domain_id, vcpu_id;
	struct inter_time_slice *pinter_time_slice = NULL;

	type_array[rec_type].count++;

	domain_id = (unsigned int)(strtoul(tokens[5], NULL, 16)>>16);
	vcpu_id = (unsigned int)(strtoul(tokens[5], NULL, 16)&0xFF);

	pinter_time_slice = find_inter_time_slice_by_id( ppcpu, domain_id, vcpu_id );
	if ( (pinter_time_slice == NULL) && !list_empty( &ppcpu->inter_time_slice_list ) ){
#ifdef DEBUG_VERBOSE
		printf( "in handle_running_to_runnable, Cannot find intermediate time slice, at line number:%d.\n", line_number );
		browse_inter_time_slice_records( ppcpu );
#endif
		return;
	}
	if( pinter_time_slice->inter_time_slice_status != INTER_TIME_SLICE_END_HALF ){
#ifdef DEBUG_VERBOSE
		printf( "in handle_running_to_runnable: the intermediate time slice is not in INTER_TIME_SLICE_END_HALF status, at line number:%d.\n", line_number );
		browse_inter_time_slice_records( ppcpu );
#endif
	}
	pinter_time_slice->sched_out_type = SCHED_OUT_NORMAL;
	
	//Now, the intermediate time slice is completed. Print it out.
#ifdef DEBUG_VERBOSE
	printf( "%llu\t%llu\t%d\t%d\t%d\t%d\n", 
		pinter_time_slice->sched_in_tsc, 
		pinter_time_slice->sched_out_tsc, 
		pinter_time_slice->sched_in_type, 
		pinter_time_slice->sched_out_type, 
		pinter_time_slice->domain_id, 
		pinter_time_slice->vcpu_id );
#endif

	//create and add the time slice now
	create_time_slice_and_add( pinter_time_slice );

	//drop the intermediate time slice now
	list_del( &pinter_time_slice->inter_time_slice_elem );
	free( pinter_time_slice );
}

void handle_domain_wake( int rec_type, struct pcpu* ppcpu )
{
	unsigned domain_id, vcpu_id;
	unsigned long long current_tsc;
	struct wake_event* pwake_event;

	type_array[rec_type].count++;

	domain_id = (unsigned int)strtoul( tokens[5], NULL, 16 );
	vcpu_id = (unsigned int)strtoul( tokens[7], NULL, 16 )&0xff;
	current_tsc = strtoull( tokens[1], NULL, 0 );

	pwake_event = malloc( sizeof(struct wake_event) );
	ASSERT( pwake_event );
	memset( pwake_event, 0, sizeof(struct wake_event) );

	pwake_event->wake_tsc = current_tsc;
	pwake_event->domain_id = domain_id;
	pwake_event->vcpu_id = vcpu_id;
	pwake_event->pcpu_id = ppcpu->pcpu_id;

	total_wake_events ++;

	INIT_LIST_HEAD( &pwake_event->wake_event_list_elem );
	list_add_tail( &pwake_event->wake_event_list_elem, &wake_event_list );
}

/*
 * handle "do_yield" records in the trace file. An example of such event:
 * CPU0  33344662285649 (+    2642)  do_yield          [ domid = 0x00000008, edomid = 0x00000000 ]
 * Rather simliar to handle_wake_event, add the yield events to yield_event_list.
 */

void handle_do_yield( int rec_type, struct pcpu* ppcpu )
{
	unsigned domain_id, vcpu_id;
	unsigned long long current_tsc;
	struct yield_event* pyield_event;

	type_array[rec_type].count++;

	domain_id = (unsigned int)strtoul( tokens[5], NULL, 16 );
	vcpu_id = (unsigned int)strtoul( tokens[7], NULL, 16 )&0xff;
	current_tsc = strtoull( tokens[1], NULL, 0 );

	pyield_event = malloc( sizeof(struct yield_event) );
	ASSERT( pyield_event );
	memset( pyield_event, 0, sizeof(struct yield_event) );

	pyield_event->yield_tsc = current_tsc;
	pyield_event->domain_id = domain_id;
	pyield_event->vcpu_id = vcpu_id;
	pyield_event->pcpu_id = ppcpu->pcpu_id;

	total_yield_events ++;

	INIT_LIST_HEAD( &pyield_event->yield_event_list_elem );
	list_add_tail( &pyield_event->yield_event_list_elem, &yield_event_list );
}

/*
 * handle lost records:
 * only increment the global variable "lost_record_number". 
 * VSA 2.0 will compress such warnings when the core data structure is correct
 * and verifiable.
 */
void handle_lost_records( int rec_type, struct pcpu* ppcpu )
{
//	unsigned domain_id, vcpu_id;
//	unsigned long long current_tsc;
//
	type_array[rec_type].count++;

	lost_record_number++;
}

