
#ifndef  SIMPLE_PROC_CC_INC
#define  SIMPLE_PROC_CC_INC

#include	"simple-proc.h"
#include	"kernel/component.h"
#include	<stdlib.h>
#include	<assert.h>

#ifdef DBG_SIMPLE_PROC
#include "kernel/manifold.h"
using namespace manifold::kernel;
#endif

using namespace std;
using namespace manifold::simple_cache;

namespace manifold {
namespace simple_proc {

//! @param \c cl_size  cache line size; must be a power of 2
//! @param \c sack    whether or not the cache model sends back ACK for Stores.
//! @param \c msize   MSHR size; once the number of requests for different cache lines reaches this number, the processor is stalled.
SimpleProc_Settings :: SimpleProc_Settings(unsigned cl_size, bool sack, int msize, int ninst) :
cache_line_size(cl_size), store_ack(sack), mshrsize(msize), max_inst(ninst)
{
}


SimpleProcessor::SimpleProcessor (int id, const SimpleProc_Settings& settings)
{
	processor_id = id;
	max_outstanding_cache_lines = settings.mshrsize;
	max_instructions = settings.max_inst;

	thread_state = SP_THREAD_READY;
	no_outstanding_requests = 0;

	pending_insn = 0;

	cache_offset_bits = 0;
	while((0x1 << cache_offset_bits) < (int)settings.cache_line_size) {
	    cache_offset_bits++;
	}

	assert((int)settings.cache_line_size == (0x1 << cache_offset_bits));

	//stats
	n_stalled_cycles = 0;
	no_loads = 0;
	no_stores = 0;
	total_issued_insn = 0;
}


SimpleProcessor::~SimpleProcessor ()
{
	vector<cache_req*>::iterator it;

	for ( it = outstanding_requests.begin(); it != outstanding_requests.end() ; it++ ) {
		delete (*it);
	}
}





//! This is called every tick. It checks if there are spaces in the MSHRs. If so,
//! fetch the next instruction. If it is a memory instruction, sends a request to cache.
//! Otherwise do nothing.
void
SimpleProcessor::tick ( void )
{
#ifdef DBG_SIMPLE_PROC
	cout << "####### " << manifold::kernel::Manifold::NowTicks() << " tick() ... " << endl;
#endif

        // stalled means (no_outstanding_request == max) AND there's a pending mem instruction

	//if stalled or exited, return right away
	if ( is_suspended() || is_exited()) {
	        if(is_suspended())
		    n_stalled_cycles++;
		return;
	}


	//If there's a pending instruction, we process the pending instruction first.
	if ( pending_insn )
	{
		//If we get here, it means we're not stalled, but there is a pending instruction.
		//So no_outstanding_requests must be < max
		//This could happen like this: after being stalled (out standing requests maxed out, and
		//there's a pending instruciton), a response from cache comes back and decrements no. of
		//outstanding requests. It's not stalled anymore, but there's a pending instruction.

		assert(no_outstanding_requests < max_outstanding_cache_lines);
		assert(pending_insn->is_memop());

		dispatch ( pending_insn );
		delete pending_insn;
		pending_insn = 0;
		total_issued_insn++;
		return;
	}

	//normal operation
	Instruction* insn = fetch();
	if ( insn )
	{
		if ( insn->is_memop () )
		{
			if(no_outstanding_requests >= max_outstanding_cache_lines)
			{
				//there mustn't be a pending_insn.
				assert(pending_insn == 0);

				pending_insn = insn;
				thread_state = SP_THREAD_STALLED;
				return; //must return here, otherwise insn will be deleted twice.
			}
			else
				dispatch ( insn );

		}
		total_issued_insn++;
		delete insn;
	}
	else { //no more instruction
	    //probably should set status to exited
	}

}


//! Event handler for cache response.
void
SimpleProcessor::handle_cache_response (int, cache_req* request )
{
#ifdef DBG_SIMPLE_PROC
	cout << "@@@ " << Manifold::NowTicks() << " proc: handle_cache_response " << ", req= " << request
	     << " addr= " << request->addr;

	switch(request->msg) {
		case CACHE_HIT:
			cout << " cache hit" << endl;
			break;
		case LD_RESPONSE:
			cout << " load miss complete" << endl;
			break;
                default:
		    assert(0);
	}
#endif

        //remove all requests for the same cache line
	int found = 0;
	for(int i=0; i<outstanding_requests.size(); i++)
	{
                cache_req* req = outstanding_requests[i];

		if ( same_cache_line(req->addr, request->addr) )
		{
			found++;
			assert(req != request);
			delete req;
		}
		else
		        outstanding_requests[i-found] = req; //move the item "found" positions to the low end
	}

	//now resize the vector
	outstanding_requests.resize(outstanding_requests.size() - found);


	//all requests for the same cache line are counted as one outstanding request
	if ( found )
	{
		no_outstanding_requests--; // this variable is the number of different lines that are requested
		if(thread_state == SP_THREAD_STALLED)
		    thread_state = SP_THREAD_READY;
		delete request;
	}

	if ( !found ) {
		cout << " Received response from cache but entry not in LDQ " << endl;
		exit(1);
	}

	assert ( no_outstanding_requests >= 0 && "outstanding_requests count <0 ");
}


void
SimpleProcessor::dispatch ( Instruction* insn)
{
    //stats first
    if(insn->opcode == Instruction :: OpMemLd) {
        no_loads++;
    }
    else {
	assert(insn->opcode == Instruction :: OpMemSt);
	no_stores++;
    }

        //Store always dispatch
	if(insn->opcode == Instruction :: OpMemSt) {
		//not using req_id, so just set it to 0
		cache_req* request = new cache_req (0, processor_id, insn->addr, OpMemSt, INITIAL );
		Send(OUT_TO_CACHE, request);
	}
	else {
		assert(insn->opcode == Instruction :: OpMemLd);
		assert ( no_outstanding_requests < max_outstanding_cache_lines);

		bool mshr_hit = false;

		// Mitch: We dont want to send out dublicate requests for the same cache line. Hence store 
		// them in the outstanding_requests queue but count it as a single outstanding_request.
	//	for (vector<cache_req*>::iterator it=outstanding_requests.begin(); it!=outstanding_requests.end() ; ++it )
		for (unsigned i=0; i<outstanding_requests.size(); i++)
			//if ( same_cache_line((*it)->addr, insn->addr) )
			if ( same_cache_line(outstanding_requests[i]->addr, insn->addr) )
			{
				mshr_hit = true;
				break;
			}

		cache_req* request = new cache_req (0, processor_id, insn->addr, OpMemLd, INITIAL );
		outstanding_requests.push_back(request);


		if ( !mshr_hit )
		{
			no_outstanding_requests++;
			Send( OUT_TO_CACHE, new cache_req(*request) ); //must create a new one since the request is to be deleted by
			//the event handler in the cache.
		}
	}//Load

}



	bool
SimpleProcessor::is_suspended ( void )
{
	if ( thread_state == SP_THREAD_STALLED )
		return true;
	else
		return false;
}		/* -----  end of method SimpleProcessor::is_suspended  ----- */

	bool
SimpleProcessor::is_exited ( void )
{
	if ( thread_state == SP_THREAD_EXITED )
		return true;
	else
		return false;
}		/* -----  end of method SimpleProcessor::is_exited  ----- */


//! Check if two addresses are in the same cache line.
bool
SimpleProcessor :: same_cache_line(paddr_t a1, paddr_t a2)
{
    return (a1 >> cache_offset_bits) == (a2 >> cache_offset_bits);
}





void SimpleProcessor :: print_stats(ostream& out)
{
    out << "********** SimpleProcessor " << processor_id << " stats **********" << endl;
    out << std::dec << "Total stalled cycles: " << n_stalled_cycles << endl;
    out << std::dec << "Total issued instructions: " << total_issued_insn << endl;
    out << "    total issued LOADs: " << no_loads << endl;
    out << "    total issued STOREs: " << no_stores << endl;
}





} //namespace simple_proc
} //namespace manifold


#endif   /* ----- #ifndef SIMPLE_PROC_CC_INC  ----- */
