#include "processor.h"


void processor::trace_dispatch() {
	unsigned int i;
	unsigned int bundle_load, bundle_store;
	unsigned int index;
	bool load_flag;
	bool store_flag;
	bool A_ready;
	bool B_ready;
	db_t *actual;
	unsigned int oracle_address;
	bool A_live_in, B_live_in;
	unsigned int A_tag, B_tag;

	int trace_width;
	int processor_to_serve;
	unsigned int starting_pc;

	// To Model delay in syncing of register files, we update these feilds BEFORE dispatch
	REN_INT->updateTimeToGo();
	REN_FP->updateTimeToGo();

	// First stall condition: There isn't a dispatch bundle.
	if (!DISPATCH[0].valid)
		return;

	index = DISPATCH[0].index;
	trace_width = PAY.buf[index].trace_length;

	assert(trace_width == PAY.trace_buf[PAY.buf[index].trace_ID].length);
	starting_pc = PAY.buf[index].pc;
	// Second stall condition: There aren't enough AL entries for the dispatch bundle.
	// Check if the Dispatch Stage must stall due to either Active List not having sufficient entries for the dispatch bundle. (The current implementation keeps the two ALs redundant.)
	// * If either the integer Active List or floating-point Active List does not have enough entries for the *whole* dispatch bundle (which is always comprised of 'dispatch_width' instructions),
	//   then stall the Dispatch Stage. Stalling is achieved by returning from this function ('return').
	// * Else, don't stall the Dispatch Stage. This is achieved by doing nothing and proceeding to the next statements.
	//
	// Tips:
	// 1. If you need to stall, you do it by hard exiting this function with 'return'. Otherwise do nothing.

	//Condition 2.a No processing elements are free or no space in Trace level active list

	processor_to_serve = -1;
	for(i=0; i<num_pe; i++){
		if (PE[i]->is_free()){
			processor_to_serve = i;
			break;
		}
	}


	if(processor_to_serve == -1){
		assert(TQ.trace_queue_full());
		return;		//no processing elements are free
	}

	if(REN_INT->stall_dispatch(trace_width) || REN_FP->stall_dispatch(trace_width))
		return;

	bundle_load = 0;
	bundle_store = 0;
	for (i = 0; i < trace_width; i++) {
	
		assert(DISPATCH[i].valid);
		index = DISPATCH[i].index;
		// Check LQ/SQ requirement.
		if (IS_LOAD(PAY.buf[index].flags))
		{
			bundle_load++;
		}
		else if (IS_STORE(PAY.buf[index].flags))
		{
			// Special cases:
			// S_S and S_D are split-stores, i.e., they are split into an addr-op and a value-op.
			// The two ops share a SQ entry to "rejoin". Therefore, only the first op should check
			// for and allocate a SQ entry; the second op should inherit the same entry.
			if (!PAY.buf[index].split_store || PAY.buf[index].upper)
				bundle_store++;
		}
	}

	if (LSU.stall(bundle_load, bundle_store))
		return;



	//
	// Making it this far means we have all the required resources to dispatch the dispatch bundle.
	//

	index = DISPATCH[0].index;
	unsigned int tq_index = TQ.push(index, trace_width, processor_to_serve, starting_pc);
	PE[processor_to_serve]->clear_free();	//make processing element busy


	for(i=0; i<trace_width; i++){
		assert(DISPATCH[i].valid);
		index = DISPATCH[i].index;

		load_flag = IS_LOAD(PAY.buf[index].flags);
		store_flag = IS_STORE(PAY.buf[index].flags) && !(PAY.buf[index].split_store && PAY.buf[index].upper);

		bool global_int_dst = PAY.buf[index].C_valid && PAY.buf[index].C_int && (PAY.buf[index].C_hierarchy & GLOBAL_REGISTER);
		bool global_float_dst = PAY.buf[index].C_valid && !PAY.buf[index].C_int && (PAY.buf[index].C_hierarchy & GLOBAL_REGISTER);

		PAY.buf[index].AL_index_int = REN_INT->dispatch_inst(global_int_dst, PAY.buf[index].C_log_reg, PAY.buf[index].C_phys_reg, load_flag, store_flag, false, PAY.buf[index].pc, processor_to_serve);

		PAY.buf[index].AL_index_fp = REN_FP->dispatch_inst(global_float_dst, PAY.buf[index].C_log_reg, PAY.buf[index].C_phys_reg, load_flag, store_flag, false, PAY.buf[index].pc, processor_to_serve);

		A_ready = true;
		B_ready = true;
		A_live_in = (PAY.buf[index].A_hierarchy == GLOBAL_REGISTER);
		B_live_in = (PAY.buf[index].B_hierarchy == GLOBAL_REGISTER);
		A_tag = A_live_in ? PAY.buf[index].A_phys_reg : PAY.buf[index].A_loc_reg;
		B_tag = B_live_in ? PAY.buf[index].B_phys_reg : PAY.buf[index].B_loc_reg;

		if (PAY.buf[index].A_valid)
		{
			if(PAY.buf[index].A_hierarchy & GLOBAL_REGISTER){
				if(PAY.buf[index].A_int)
					A_ready = REN_INT->is_ready(PAY.buf[index].A_phys_reg);
				else
					A_ready = REN_FP->is_ready(PAY.buf[index].A_phys_reg);
			}
			else{
				assert(PAY.buf[index].A_hierarchy & LOCAL_REGISTER);
				A_ready = false;
			}
		}

		if (PAY.buf[index].B_valid)
		{
			if(PAY.buf[index].B_hierarchy & GLOBAL_REGISTER){
				if(PAY.buf[index].B_int)
					B_ready = REN_INT->is_ready(PAY.buf[index].B_phys_reg);
				else
					B_ready = REN_FP->is_ready(PAY.buf[index].B_phys_reg);
			}
			else{
				assert(PAY.buf[index].B_hierarchy & LOCAL_REGISTER);
				B_ready = false;
			}
		}

		if (PAY.buf[index].C_valid)
		{
			if(PAY.buf[index].C_hierarchy & GLOBAL_REGISTER){
				if (PAY.buf[index].C_int)
					REN_INT->clear_ready(PAY.buf[index].C_phys_reg);

				else
					REN_FP->clear_ready(PAY.buf[index].C_phys_reg);
			}
		}


		switch (PAY.buf[index].iq) {
		case SEL_IQ_INT:
			PE[processor_to_serve]->IQ_INT.dispatch( index,
													DISPATCH[i].branch_mask,
													PAY.buf[index].A_valid,
													A_ready,
													A_live_in,
													A_tag,
													PAY.buf[index].B_valid,
													B_ready,
													B_live_in,
													B_tag);

			break;

		case SEL_IQ_FP:
			PE[processor_to_serve]->IQ_FP.dispatch(index,
													DISPATCH[i].branch_mask,
													PAY.buf[index].A_valid,
													A_ready,
													A_live_in,
													A_tag,
													PAY.buf[index].B_valid,
													B_ready,
													B_live_in,
													B_tag);

			break;

		case SEL_IQ_NONE:
			REN_INT->set_complete(PAY.buf[index].AL_index_int, 0);
			REN_FP->set_complete(PAY.buf[index].AL_index_fp, 0);
			TQ.inc_completed_inst(processor_to_serve);


			break;

		case SEL_IQ_NONE_EXCEPTION:
			REN_INT->set_complete(PAY.buf[index].AL_index_int, 0);
			REN_FP->set_complete(PAY.buf[index].AL_index_fp, 0);

			REN_INT->set_exception(PAY.buf[index].AL_index_int);
			REN_FP->set_exception(PAY.buf[index].AL_index_fp);
			TQ.set_exception(processor_to_serve, PAY.buf[index].pc, i);
			TQ.inc_completed_inst(processor_to_serve);

			break;

		default:
			assert(0);
			break;
		}

		// Dispatch loads and stores into the LQ/SQ and record their LQ/SQ indices.
		if (IS_MEM_OP(PAY.buf[index].flags)) {
			if (!PAY.buf[index].split_store || PAY.buf[index].upper) {
				LSU.dispatch(IS_LOAD(PAY.buf[index].flags),
						PAY.buf[index].size,
						PAY.buf[index].left,
						PAY.buf[index].right,
						PAY.buf[index].is_signed,
						index,
						PAY.buf[index].LQ_index, PAY.buf[index].LQ_phase,
						PAY.buf[index].SQ_index, PAY.buf[index].SQ_phase);
				// The lower part of a split-store should inherit the same LSU indices.
				if (PAY.buf[index].split_store) {
					assert(PAY.buf[index+1].split && !PAY.buf[index+1].upper);
					PAY.buf[index+1].LQ_index = PAY.buf[index].LQ_index;
					PAY.buf[index+1].LQ_phase = PAY.buf[index].LQ_phase;
					PAY.buf[index+1].SQ_index = PAY.buf[index].SQ_index;
					PAY.buf[index+1].SQ_phase = PAY.buf[index].SQ_phase;
				}

				// Oracle memory disambiguation support.
				if (ORACLE_DISAMBIG && PAY.buf[index].good_instruction && IS_STORE(PAY.buf[index].flags)) {
					// Get pointer to the corresponding instruction in the functional simulator.
					actual = THREAD[Tid]->peek(PAY.buf[index].db_index);

					// Determine the oracle store address.
					if (SS_OPCODE(PAY.buf[index].inst) == DSW) {
						assert(PAY.buf[index].split);
						oracle_address = (PAY.buf[index].upper ?  actual->a_addr : actual->a_addr + 4);
					}
					else {
						oracle_address = actual->a_addr;
					}

					// Place oracle store address into SQ before all subsequent loads are dispatched.
					// This policy ensures loads only stall on truly-dependent stores.
					LSU.store_addr(cycle, oracle_address, PAY.buf[index].SQ_index, PAY.buf[index].LQ_index, PAY.buf[index].LQ_phase);
				}
			}
		}


	}
	
	// Remove the dispatch bundle from the Dispatch Stage.
   for (i = 0; i < trace_width; i++) {
      assert(DISPATCH[i].valid);
      DISPATCH[i].valid = false;
   }
}


