/*
 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
 * Copyright (c) 2012 Advanced Micro Devices, Inc.
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are
 * met: redistributions of source code must retain the above copyright
 * notice, this list of conditions and the following disclaimer;
 * redistributions in binary form must reproduce the above copyright
 * notice, this list of conditions and the following disclaimer in the
 * documentation and/or other materials provided with the distribution;
 * neither the name of the copyright holders nor the names of its
 * contributors may be used to endorse or promote products derived from
 * this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */

/*
 * Description:  This module simulates a basic DDR-style memory controller
 * (and can easily be extended to do FB-DIMM as well).
 *
 * This module models a single channel, connected to any number of
 * DIMMs with any number of ranks of DRAMs each.  If you want multiple
 * address/data channels, you need to instantiate multiple copies of
 * this module.
 *
 * Each memory request is placed in a queue associated with a specific
 * memory bank.  This queue is of finite size; if the queue is full
 * the request will back up in an (infinite) common queue and will
 * effectively throttle the whole system.  This sort of behavior is
 * intended to be closer to real system behavior than if we had an
 * infinite queue on each bank.  If you want the latter, just make
 * the bank queues unreasonably large.
 *
 * The head item on a bank queue is issued when all of the
 * following are true:
 *   the bank is available
 *   the address path to the DIMM is available
 *   the data path to or from the DIMM is available
 *
 * Note that we are not concerned about fixed offsets in time.  The bank
 * will not be used at the same moment as the address path, but since
 * there is no queue in the DIMM or the DRAM it will be used at a constant
 * number of cycles later, so it is treated as if it is used at the same
 * time.
 *
 * We are assuming closed bank policy; that is, we automatically close
 * each bank after a single read or write.  Adding an option for open
 * bank policy is for future work.
 *
 * We are assuming "posted CAS"; that is, we send the READ or WRITE
 * immediately after the ACTIVATE.  This makes scheduling the address
 * bus trivial; we always schedule a fixed set of cycles.  For DDR-400,
 * this is a set of two cycles; for some configurations such as
 * DDR-800 the parameter tRRD forces this to be set to three cycles.
 *
 * We assume a four-bit-time transfer on the data wires.  This is
 * the minimum burst length for DDR-2.  This would correspond
 * to (for example) a memory where each DIMM is 72 bits wide
 * and DIMMs are ganged in pairs to deliver 64 bytes at a shot.
 * This gives us the same occupancy on the data wires as on the
 * address wires (for the two-address-cycle case).
 *
 * The only non-trivial scheduling problem is the data wires.
 * A write will use the wires earlier in the operation than a read
 * will; typically one cycle earlier as seen at the DRAM, but earlier
 * by a worst-case round-trip wire delay when seen at the memory controller.
 * So, while reads from one rank can be scheduled back-to-back
 * every two cycles, and writes (to any rank) scheduled every two cycles,
 * when a read is followed by a write we need to insert a bubble.
 * Furthermore, consecutive reads from two different ranks may need
 * to insert a bubble due to skew between when one DRAM stops driving the
 * wires and when the other one starts.  (These bubbles are parameters.)
 *
 * This means that when some number of reads and writes are at the
 * heads of their queues, reads could starve writes, and/or reads
 * to the same rank could starve out other requests, since the others
 * would never see the data bus ready.
 * For this reason, we have implemented an anti-starvation feature.
 * A group of requests is marked "old", and a counter is incremented
 * each cycle as long as any request from that batch has not issued.
 * if the counter reaches twice the bank busy time, we hold off any
 * newer requests until all of the "old" requests have issued.
 *
 * We also model tFAW.  This is an obscure DRAM parameter that says
 * that no more than four activate requests can happen within a window
 * of a certain size.  For most configurations this does not come into play,
 * or has very little effect, but it could be used to throttle the power
 * consumption of the DRAM.  In this implementation (unlike in a DRAM
 * data sheet) TFAW is measured in memory bus cycles; i.e. if TFAW = 16
 * then no more than four activates may happen within any 16 cycle window.
 * Refreshes are included in the activates.
 *
 */

#include "base/cast.hh"
#include "base/cprintf.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Consumer.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/network/Network.hh"
#include "mem/ruby/profiler/Profiler.hh"
#include "mem/ruby/slicc_interface/NetworkMessage.hh"
#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh"
#include "mem/ruby/system/RubyMemoryControl.hh"
#include "mem/ruby/system/System.hh"

//Praveen added
#include "sim/praveen_header.hh"
#include <cmath>
#include <vector>
#include <list>
#define NUM_COLUMNS_IN_ROW 64
#define MEM_REQ_PERIOD 100000
#define MEM_MAX_INPUT_Q_SIZE 100
#define MAX_OUTSTANDING_INSTRS 48

using namespace std;

class mem_transaction{
	public:
	Address mem_address;
	long long enter_time;
	long long pop_at_time;
	long long enter_instruction; //instruction count at which this transaction was sent
	bool ready_to_commit;
	mem_transaction(Address address, long long enter, long long pop_at, long long instruction):mem_address(address), enter_time(enter), pop_at_time(pop_at), enter_instruction(instruction), ready_to_commit(false)
	{

	}

};
long insns;
long gpu_cycles;
//long long current_cycle;
long  total_insns;
long  total_gpuinsns;
std::vector<mem_transaction> outstanding_mem_requests;
Address oldest_seen_address(0);
int ip_master;  //Who is the master injectiong packets into the memory controller?
int cpu_frames_displayed=-1;
int gpu_frames_displayed=-1;
int cpu_frame_id_last_written=-1;
int cpu_frame_being_processed=-1;
long fps_stall_cycles = 0;
long total_fps_stall_cycles = 0;

long total_mem_ctrl0_delay = 0;
long total_mem_ctrl1_delay = 0;

long total_mem_ctrl0_latency = 0;
long total_mem_ctrl1_latency = 0;

int frames_dropped_cpu_faster=0;
int frames_dropped_cpu_slower=0;
int frames_dropped_cpu_ipmem[7];
float avg_fps= 0.0;
//Core = 3 times the clock of MemFreq.
int core_to_mem_freq=4;

//Display IP Options
long long display_tick=1;  // Display IP's clock ticks
long long cpu_display_tick=1;  // Display IP's clock ticks
long long display_frame_id=0;  // DIsplay IP's clock ticks


int bitSelect(physical_address_t address, int small, int big); // rips bits inclusive
int search_addr(Address addr);
std::vector<mem_transaction>::iterator search_ready(long long m_cycleCount);
bool isCPURequest(MemoryNode req);
//std::vector<long> outstanding_cpu_requests;
void enqueue_to_slave(MemoryMsg& msg);

std::list<MemoryMsg> msg_to_slave_queue;

void enqueue_to_slave(MemoryMsg& msg)
{
	msg_to_slave_queue.push_back(msg);
}


int search_addr(Address addr)
{
	for(int i =0; i<outstanding_mem_requests.size(); i++)
	{
		if(addr == outstanding_mem_requests[i].mem_address)
			return i;
	}
	if(outstanding_mem_requests.size() == 0)
		return -2;

	return -1;
}

void RubyMemoryControl::BLP_Counter()
{
	int blp_counter=0;

	for (int bank=0; bank < m_total_banks; bank++) {
		m_avgBankQSize[bank] += m_bankQueues[bank].size();
		if (m_bankBusyCounter[bank] > 0)
			blp_counter++;
	}
	if(blp_counter>0)
	{
		// if(m_mem_controller_id == 1 && blp_counter != 1)
		// if(m_mem_controller_id == 1)
			// cout<<"BLP of MC1: "<<blp_counter<<endl;

		m_avgBanksBusy += blp_counter;
		m_timesBanksBusy++;
		m_profiler_ptr->profileMemBLP(blp_counter, m_timesBanksBusy);
	}

	if(m_mem_two_queues)
	{
		m_avgMemQSize_cpu += m_input_queue_cpu.size();
		m_avgMemQSize_ip += m_input_queue_ip.size();
	}
	else
		m_avgMemQSize += m_input_queue.size();

}

std::vector<mem_transaction>::iterator search_ready(long long m_cycleCount)
{
	std::vector<mem_transaction>::iterator it;

	for(it =outstanding_mem_requests.begin(); it != outstanding_mem_requests.end(); it++)
	{

		if(it->pop_at_time != -1 && it->pop_at_time <= m_cycleCount)
			return it;
	}
	return  outstanding_mem_requests.end();
}
//************  End of Nachi/Praveen added *********************

class Consumer;

// Value to reset watchdog timer to.
// If we're idle for this many memory control cycles,
// shut down our clock (our rescheduling of ourselves).
// Refresh shuts down as well.
// When we restart, we'll be in a different phase
// with respect to ruby cycles, so this introduces
// a slight inaccuracy.  But it is necessary or the
// ruby tester never terminates because the event
// queue is never empty.
#define IDLECOUNT_MAX_VALUE 10000000

// Output operator definition
ostream&
operator<<(ostream& out, const RubyMemoryControl& obj)
{
    obj.print(out);
    out << flush;
    return out;
}



//-------------------------------------------------------------------------------------------------




// ****************************************************************

bool
RubyMemoryControl::cpu_driver_execute()
{
	//long long diff = (curTick() - start_tick);

	// double current_microsecs = (double) (diff/ (1000*1000*1.0));
	//1 clock in micro seconds = (diff/current_cycle*1.0) / (1000*1000);
	bool issued = false;

	for(int i=0; i<7; i++) {
		if(m_shadowcpuIPWriteCount[i] > 0)
		{
	       MemoryMsg msg(curTick());
	       Address address(0);
	       Cycles cycles(0);
	       MachineID machineID;
	       //We are assigning each request to be of a DMA type, be default. We change to READ, WRITE later for CPU requests.
	       machineID.type = MachineType_DMA;
	       machineID.num = 0;

	       msg.setType(MemoryRequestType_MEMORY_WB);
	       msg.setMessageSize(MessageSizeType_Request_Control);
	       msg.setSender(machineID);

	       address.setAddress(m_reqAddr);
	       msg.setAddress(address);

  			msg.setIPid(CPU);

      		if(m_separate_mem)
			   enqueue_to_slave(msg);
		   else
		   {
			   m_rand_enqueue = rand();
			   if(m_rand_enqueue < RAND_MAX/2)
			   {
				   enqueue(msg, cycles);
				   m_periodicCount[CPU] += 64;
			   }
			   else
				   enqueue_to_slave(msg);
		   }
      		m_reqAddr += 64;
      		m_shadowcpuIPWriteCount[i]--;

			   mem_transaction tmp_transaction(address,m_cycleCount,-1,total_insns);
			   int index = search_addr(address);
			   if(index == -1 || index == -2)
				   outstanding_mem_requests.push_back(tmp_transaction);
			   issued = true;
		}

		if(m_shadowcpuIPReadCount[i] > 0)
		{
		       MemoryMsg msg(curTick());
		       Address address(0);
		       Cycles cycles(0);
		       MachineID machineID;
		       //We are assigning each request to be of a DMA type, be default. We change to READ, WRITE later for CPU requests.
		       machineID.type = MachineType_DMA;
		       machineID.num = 0;

		       msg.setType(MemoryRequestType_MEMORY_READ);
		       msg.setMessageSize(MessageSizeType_Request_Control);
		       msg.setSender(machineID);

		       address.setAddress(m_reqAddr);
		       msg.setAddress(address);

	  			msg.setIPid(CPU);

	      		if(m_separate_mem)
				   enqueue_to_slave(msg);
			   else
			   {
				   m_rand_enqueue = rand();
				   if(m_rand_enqueue < RAND_MAX/2)
				   {
					   enqueue(msg, cycles);
					   m_periodicCount[CPU] += 64;
				   }
				   else
					   enqueue_to_slave(msg);
			   }
	      		m_reqAddr += 64;
	      		m_shadowcpuIPReadCount[i]--;

				   mem_transaction tmp_transaction(address,m_cycleCount,-1,total_insns);
				   int index = search_addr(address);
				   if(index == -1 || index == -2)
					   outstanding_mem_requests.push_back(tmp_transaction);
				   issued = true;
		}
	}


	//Is it poll time ?
	//if(current_microsecs > (((cpu_display_tick)/240.0) * (1.0*1000*1000)))
	{
		for(int i=0; i<7; i++) {
			m_shadowcpuIPWriteCount[i] += m_cpuIPWriteCount[i]/4;
			m_shadowcpuIPReadCount[i] += m_cpuIPReadCount[i]/4;

			// m_shadowcpuIPWriteCount[i] = 0;
			// m_shadowcpuIPReadCount[i] = 0;


			m_cpuIPWriteCount[i] = 0;
			m_cpuIPReadCount[i] = 0;
		}

		//m_reqAddr = 540595840;
		cpu_display_tick++;
	}

	if (issued)
		return true;
	else
		return false;
}

void
RubyMemoryControl::display_driver_execute()
{
	long long diff = (curTick() - start_tick);
	long double time_micro =  (diff/ (1.0*1000*1000L));

	double current_microsecs = (double) (diff/ (1000*1000*1.0));
	//1 clock in micro seconds = (diff/current_cycle*1.0) / (1000*1000);

	//Is it poll time ?
/*	if(cpu_frames_displayed < display_frame_id)
		fps_stall_cycles=0;
	if(cpu_frame_being_processed == display_frame_id)
		fps_stall_cycles=0;*/

	if(current_microsecs > (((display_tick)/60.0) * (1.0*1000*1000)))
	{
		cout<<"Current Time in micro  = "<<time_micro<<endl;
		cout<<"\n Timestamp Display is requesting: \t\t"<<display_frame_id;
		cout<<"\n Frame Core has placed in memory: \t\t"<<cpu_frames_displayed;
		cout<<"\n Frame Core is still processing: \t\t"<<cpu_frame_being_processed;
		cout<<"\n FrameId of what Core placed or skipped last(processed): "<<cpu_frame_id_last_written<<endl;

		if(display_frame_id == cpu_frame_id_last_written)
		{
			cout<<"Yaay! Frame found. Frame "<<display_frame_id <<" rendered on the screen"<<endl;
			display_frame_id++;
		}
		else
		{
			cout<<"Oops couldnt find the frame "<<display_frame_id<< " at "<<current_cycle<<"; Last written frame / last skipped frame is "<<cpu_frame_id_last_written<<endl;
			cout<<"\t More info: CPU frame_being_processed: "<<cpu_frame_being_processed<<endl;
			if(display_frame_id < (cpu_frame_id_last_written-1))
				frames_dropped_cpu_faster++;
			else if(display_frame_id == (cpu_frame_id_last_written-1))
			{
				frames_dropped_cpu_slower++;
				cpu_frame_id_last_written++;
			}
			else if(display_frame_id > (cpu_frame_id_last_written-1))
			{
				frames_dropped_cpu_slower++;
				cpu_frame_id_last_written++;
			}	
			display_frame_id++;
			//Make the core not stall anymore, if it was stalling (i.e. android controlling the rate is removed, and the core is allowed to work on the next frame.
			cout<<"fps_stall_cycles made from "<<fps_stall_cycles<<" to 0";
			fps_stall_cycles=0;
			//If we are dropping the frame, then, we wont be injecting the packets in FB packets into the memory.
			m_reqCount = 0;
		}
		cout<<"Frames Dropped:"<<frames_dropped_cpu_slower<<"\t + \t"<<frames_dropped_cpu_faster<<endl<<endl;
		display_tick++;
	}

	/*if(current_microsecs > (((display_tick)/60.0) * (1.0*1000*1000)))
	{
		//cout<<"\n Current Time Diff = "<<diff<<endl;
		//cout<<"Current Time in micro  = "<<time_micro<<endl;
		//cout<<"\n Display CLock Ticks = "<<((display_tick/60.0) * (1.0*1000*1000))<<endl;

		//Okay, lets poll if CPU has rendered a frame.
		cout<<"Display polling. "<<display_tick<<"; My poll id = "<<display_frame_id<<endl;
		if(display_frame_id == cpu_frames_displayed-1)
		{
			cout<<"Yaay! Frame found. Frame "<<display_frame_id <<" rendered on the screen"<<endl;
			display_frame_id++;
		}
		else
		{
			cout<<"Oops couldnt find the frame "<<display_frame_id<< " at "<<current_cycle<<"; Last sent Frame by core is "<<cpu_frames_displayed<<endl;
			if(display_frame_id < (cpu_frames_displayed-1))
				frames_dropped_cpu_faster++;
			else if(display_frame_id == (cpu_frames_displayed-1))
				frames_dropped_cpu_slower++;
			else if(display_frame_id > (cpu_frames_displayed-1))
				frames_dropped_cpu_slower++;
			display_frame_id++;
			//Make the core not stall anymore, if it was stalling (i.e. android controlling the rate is removed, and the core is allowed to work on the next frame.
			cout<<"fps_stall_cycles made from "<<fps_stall_cycles<<" to 0";
			fps_stall_cycles=0;
			//If we are dropping the frame, then, we wont be injecting the packets in FB packets into the memory.
			m_reqCount = 0;
			cpu_frames_displayed++;
		}
		cout<<"Frames Dropped:"<<frames_dropped_cpu_slower<<"\t + \t"<<frames_dropped_cpu_faster<<endl<<endl;
		display_tick++;
	}*/

	//avg_fps = ((double)cpu_frames_displayed/time_micro)*1000;
}

// Nachi
void
RubyMemoryControl::gpu_driver_execute()
{
    //Inserting GPU requests
    //Below if condition is borrowed from CPU's entry loop

	//Each memory cycle, a GPU cycle is reduced. //Assuming memory freq = GPU freq.
	m_gpu_cyclesToSkip--;

    if(m_reqCount > 0)   //If there are more requests tpo be sent with the previous IP call.. (An IP call has 100's of requests to be injected in consecutive cycles
    {
    	string op;
		MemoryMsg msg(curTick());
		Address address(0);
		Cycles cycles(0);
		MachineID machineID;
		//We are assigning each request to be of a DMA type, be default. We change to READ, WRITE later for CPU requests.
		machineID.type = MachineType_DMA;
		machineID.num = 0;

		msg.setType(MemoryRequestType_MEMORY_READ);
		msg.setMessageSize(MessageSizeType_Request_Control);
		msg.setSender(machineID);


		address.setAddress(m_reqAddr);
		msg.setAddress(address);
		// cout << msg << endl;
		msg.setIPid(ip_master);

		if(m_reqCount == 1 && ip_master == FB)
			msg.setIPIsLast(FB);

		//enqueue(msg, cycles);
		// enqueue_to_slave(msg);
		if(m_separate_mem)
		   enqueue_to_slave(msg);
		else
		{
			m_rand_enqueue = rand();
		   if(m_rand_enqueue < RAND_MAX/2)
		   {
			   enqueue(msg, cycles);
			   m_periodicCount[ip_master] += m_reqCount * 64;
		   }
		   else
			   enqueue_to_slave(msg);
		}
		m_reqAddr += 64;
		m_reqCount--;
    }

	if(1 && m_input_queue.size() <= MEM_MAX_INPUT_Q_SIZE && m_input_queue_ip.size() <= MEM_MAX_INPUT_Q_SIZE && m_gpu_cyclesToSkip <= 0)
	{
		string op;
		physical_address_t addr;
		MemoryMsg msg(curTick());
		Address address(0);
		Cycles cycles(0);
		MachineID machineID;
		//We are assigning each request to be of a DMA type, be default. We change to READ, WRITE later for CPU requests.
		machineID.type = MachineType_DMA;
		machineID.num = 0;

		msg.setType(MemoryRequestType_MEMORY_READ);
		msg.setMessageSize(MessageSizeType_Request_Control);
		msg.setSender(machineID);

		em_gputrace_file>>op;
		if (op == "GPU")
		{ // GPU line
			em_gputrace_file>>m_gpu_cyclesToSkip;
			total_gpuinsns+=m_gpu_cyclesToSkip;
			//cout << op << "  " << m_gpu_cyclesToSkip<< endl;
		}
		else if (op == "GMU_ld")
		{
			em_gputrace_file>>hex>>addr>>dec;
			//em_gputrace_file>>size;

			msg.setType(MemoryRequestType_MEMORY_WB);
			address.setAddress(addr);
			msg.setAddress(address);
			msg.setIPid(GPU);

			if(m_separate_mem)
			   enqueue_to_slave(msg);
		   else
		   {
			   m_rand_enqueue = rand();
			   if(m_rand_enqueue < RAND_MAX/2)
			   {
				   enqueue(msg, cycles);
					m_periodicCount[GPU] += 64;
					ip_master=GPU;
			   }
			   else
				   enqueue_to_slave(msg);
		   }

			//cout<<"LD Added to Q: "<< total_gpuinsns<<" Number of reqs in Q:"<<m_input_queue.size()<<endl;

		}
		else if (op == "GMU_st")
		{
			em_gputrace_file>>hex>>addr>>dec;
			//em_gputrace_file>>size;

			msg.setType(MemoryRequestType_MEMORY_READ);
			address.setAddress(addr);
			msg.setAddress(address);
			msg.setIPid(GPU);
			if(m_separate_mem)
			   enqueue_to_slave(msg);
			else
			{
				 m_rand_enqueue = rand();
			   if(m_rand_enqueue < RAND_MAX/2)
			   {
				   enqueue(msg, cycles);
					m_periodicCount[GPU] += 64;
					ip_master =GPU;
			   }
			   else
				   enqueue_to_slave(msg);
			}

			//cout<<"ST Added to Q: "<< total_gpuinsns<<" Number of reqs in Q:"<<m_input_queue.size()<<endl;

		}
		else if (op == "Rendered")
		{
			string frame_txt;
			em_gputrace_file>>frame_txt;
			string frame_id;
			em_gputrace_file>>frame_id;
			gpu_frames_displayed++;
			cout<<"\n JOOMLA! GPU Frame rendered!";


			m_reqCount = ceil((double) (1080*1920)/ 64);
		   if (addr == 0)
			   addr = 540595840;
			ip_master = FB;

		   address.setAddress(addr);
		   msg.setAddress(address);
		   msg.setIPid(ip_master);
		   //enqueue(msg, cycles);
		   if(m_separate_mem)
			   enqueue_to_slave(msg);
		   else
		   {
			   m_rand_enqueue = rand();
			   if(m_rand_enqueue < RAND_MAX/2)
			   {
				   enqueue(msg, cycles);
				   m_periodicCount[ip_master] += m_reqCount * 64;
			   }
			   else
				   enqueue_to_slave(msg);
		   }

		   // cout << msg << endl;
			m_reqCount--;
			m_reqAddr = addr + 64;
		}
		else if (op == "END")
		{
			cout << "Read END from trace file" << endl;
			true_fetch = false;

			cout << "Memory Controller Cycles: " << m_cycleCount << endl;
			for (int i=0; i<IPS; i++)
				cout << i << "\t" << m_periodicCount[i] << endl;
		}
	}
}
// End Nachi

//Nachi
void
RubyMemoryControl::interval_stat_printer()
{
	cout << "\nReq Count @ " << m_cycleCount << endl;

	for (int i=0; i<IPS; i++)
		cout << i << "    " << m_periodicCount[i] << endl;

	// m_profiler_ptr->profileMemBLP(m_avgBanksBusy, m_timesBanksBusy);

	if(m_mem_controller_id==0)
	{
		cout<<"core_stalls_1: "<<m_core_stalls<<endl;
		cout<<"IPC_1: "<<(double)total_insns/(m_cycleCount*core_to_mem_freq)<<endl;
		cout<<"AvgBLP_1: "<<m_avgBanksBusy*1.0/m_timesBanksBusy<<endl;
		cout<<"InstantMemQ_Size_1: "<<m_input_queue.size()<<endl;
		cout<<"InstantOutStandingReqQ_Size_1: "<<outstanding_mem_requests.size()<<endl;
		cout<<"AvgMemQ_Size_1: "<<(double)m_avgMemQSize/(double)MEM_REQ_PERIOD<<endl;
		cout<<"AvgMemQ_CPU_Size_1: "<<(double)m_avgMemQSize_cpu*1.0<<endl;
		cout<<"AvgMemQ_IP_Size_1: "<<(double)m_avgMemQSize_ip*1.0<<endl;
		cout<<"AvgOutStandingReqQ Size_1: "<<(double)m_avgOutStandQSize/(double)MEM_REQ_PERIOD<<endl;
		cout<<"Total_fps_stall_cycles_1: "<<total_fps_stall_cycles<<endl;
		cout<<"Avg_fps_1: "<<avg_fps<<endl;
		cout<<"Tot_gpu_fps_1: "<<gpu_frames_displayed<<endl;
		cout<<"Fdps_1: "<<frames_dropped_cpu_slower<<endl;
		cout<<"ExeSeconds_1: "<<simTicks.value()/simFreq.value()<<endl;
		cout<<"Lines_read_cpu_1: "<<lines_read_cpu<<endl;
		cout<<"AvgBankQ_1_Size: ";
		for (int bank=0; bank < m_total_banks; bank++) {
			cout<<(m_avgBankQSize[bank]*1.0)/MEM_REQ_PERIOD<<"    ";
		}
		cout<<endl;
		cout << "Frames dropped due to cpu inserting to ip mem  ";
		for(int i=0; i<7; i++)
			cout << frames_dropped_cpu_ipmem[i] << "  ";
		cout<<endl;
	}


	if(m_mem_controller_id==1)
	{
		cout<<"core_stalls_2: "<<m_core_stalls<<endl;
		cout<<"IPC_2: "<<(double)total_insns/(m_cycleCount*core_to_mem_freq)<<endl;
		cout<<"AvgBLP_2: "<<m_avgBanksBusy*1.0/m_timesBanksBusy<<endl;
		cout<<"InstantMemQ_Size_2: "<<m_input_queue.size()<<endl;
		cout<<"InstantOutStandingReqQ Size_2: "<<outstanding_mem_requests.size()<<endl;
		cout<<"AvgMemQ_Size_2: "<<(double)m_avgMemQSize/(double)MEM_REQ_PERIOD<<endl;
		cout<<"AvgMemQ_CPU_Size_2: "<<(double)m_avgMemQSize_cpu*1.0/(double)MEM_REQ_PERIOD<<endl;
		cout<<"AvgMemQ_IP_Size_2: "<<(double)m_avgMemQSize_ip*1.0/(double)MEM_REQ_PERIOD<<endl;
		cout<<"AvgOutStandingReqQ_Size_2: "<<(double)m_avgOutStandQSize/(double)MEM_REQ_PERIOD<<endl;
		cout<<"Total_fps_stall_cycles_2: "<<total_fps_stall_cycles<<endl;
		cout<<"Avg_fps_2: "<<avg_fps<<endl;
		cout<<"Tot_gpu_fps_2: "<<gpu_frames_displayed<<endl;
		cout<<"Fdps_2: "<<frames_dropped_cpu_slower<<endl;
		cout<<"ExeSeconds_2: "<<simTicks.value()/simFreq.value()<<endl;

		cout<<"AvgBankQ_2_Size: ";
		for (int bank=0; bank < m_total_banks; bank++) {
			cout<<(m_avgBankQSize[bank]*1.0)/MEM_REQ_PERIOD<<"    ";
		}
		cout<<endl;

	}

	m_avgBanksBusy = 0;
	m_avgMemQSize = 0;
	m_avgMemQSize_cpu = 0;
	m_avgMemQSize_ip = 0;
	m_avgOutStandQSize = 0;
	m_avgBankQSize.clear();
	m_avgBankQSize.resize(m_total_banks);
	m_timesBanksBusy = 0;

	//cout<<"core_busy_cycles: "<<m_corebusy_stalls<<endl;
	m_core_stalls = 0;
	m_corebusy_cycles = 0;

	for (int i=0; i<IPS; i++)
		m_periodicCount[i] = 0;
}
//End Nachi

// Praveen
void
RubyMemoryControl::enqueue(MemoryMsg& memMess, Cycles latency)
{
    Cycles arrival_time = curCycle() + latency;
    physical_address_t addr = memMess.getAddress().getAddress();
    MemoryRequestType type = memMess.getType();
    bool is_mem_read = (type == MemoryRequestType_MEMORY_READ);
    MemoryNode thisReq(arrival_time, NULL, addr, is_mem_read, !is_mem_read);

    //Set IP Characteristics
    thisReq.IPIsLast = memMess.getIPIsLast();
    thisReq.IPid = memMess.getIPid();

    enqueueMemRef(thisReq, 0);
    // cout << "Enqueued memory request to addr " << hex << addr  << dec << endl;
}
// End Praveen

// enqueue new request from directory
void
RubyMemoryControl::enqueue(const MsgPtr& message, Cycles latency)
{
    Cycles arrival_time = curCycle() + latency;
    const MemoryMsg* memMess = safe_cast<const MemoryMsg*>(message.get());
    physical_address_t addr = memMess->getAddress().getAddress();
    MemoryRequestType type = memMess->getType();
    bool is_mem_read = (type == MemoryRequestType_MEMORY_READ);
    MemoryNode thisReq(arrival_time, message, addr, is_mem_read, !is_mem_read);
    enqueueMemRef(thisReq, 0);
}

// CONSTRUCTOR
RubyMemoryControl::RubyMemoryControl(const Params *p)
    : MemoryControl(p)
{
	//praveen added
	m_mem_controller_id = global_mem_controller_count++;

    m_banks_per_rank = p->banks_per_rank;
    m_ranks_per_dimm = p->ranks_per_dimm;
    m_dimms_per_channel = p->dimms_per_channel;
    m_bank_bit_0 = p->bank_bit_0;
    m_rank_bit_0 = p->rank_bit_0;
    m_dimm_bit_0 = p->dimm_bit_0;

    //Praveen added
    //Hard coded cache line interleaving for MC 1 to get more parallel requests
    /*if(p->mem_blp_aware_ip_mem==1 &&  m_mem_controller_id >= 1)
    {
        m_bank_bit_0 = 6;
		m_rank_bit_0 = 8;
		m_dimm_bit_0 = 9;
    } */
    m_openpage_policy = true;

    m_bank_queue_size = p->bank_queue_size;
    m_bank_busy_time = p->bank_busy_time;
    m_rank_rank_delay = p->rank_rank_delay;
    m_read_write_delay = p->read_write_delay;
    m_basic_bus_busy_time = p->basic_bus_busy_time;
    m_mem_ctl_latency = p->mem_ctl_latency;
    m_refresh_period = p->refresh_period;
    m_tFaw = p->tFaw;
    m_mem_random_arbitrate = p->mem_random_arbitrate;
    m_mem_fixed_delay = p->mem_fixed_delay;
    m_separate_mem = mem_separate_mem;
    m_optimized_mem = mem_optimized;
	m_mem_prioritize_cpu = false;
	m_mem_two_queues = false;
	m_frfcfs_policy = p->mem_frfcfs_policy;


    if(m_optimized_mem)
    {
    	m_separate_mem = true;
    }


    //praveen added
    if(m_optimized_mem && m_mem_controller_id >= 1) {
    	m_bank_bit_0 = 6;
    	m_rank_bit_0 = 8;
    	m_dimm_bit_0 = 9;

    	m_mem_prioritize_cpu = true;
    	m_mem_two_queues = true;
    	m_frfcfs_policy = true;
    }


    m_mem_rowbuffer_hit_latency = p->m_mem_rowbuffer_hit_latency;
    m_mem_rowbuffer_conflict_latency = p->m_mem_rowbuffer_conflict_latency;
    m_profiler_ptr = new MemCntrlProfiler(name(), m_mem_controller_id,
                                          m_banks_per_rank,
                                          m_ranks_per_dimm,
                                          m_dimms_per_channel);

    for(int i=0; i<7; i++)
    	frames_dropped_cpu_ipmem[i] = 0;
}

void
RubyMemoryControl::init()
{
    m_msg_counter = 0;

    assert(m_tFaw <= 62); // must fit in a uint64 shift register

    m_total_banks = m_banks_per_rank * m_ranks_per_dimm * m_dimms_per_channel;
    m_total_ranks = m_ranks_per_dimm * m_dimms_per_channel;
    m_refresh_period_system = m_refresh_period / m_total_banks;

    m_bankQueues = new list<MemoryNode> [m_total_banks];
    assert(m_bankQueues);

    m_bankBusyCounter = new int [m_total_banks];
    assert(m_bankBusyCounter);

    m_oldRequest = new int [m_total_banks];
    assert(m_oldRequest);

    for (int i = 0; i < m_total_banks; i++) {
        m_bankBusyCounter[i] = 0;
        m_oldRequest[i] = 0;
    }

    //praveen added
    m_bankRowBuffer = new int [m_total_banks];
    assert(m_bankRowBuffer);


    m_busBusyCounter_Basic = 0;
    m_busBusyCounter_Write = 0;
    m_busBusyCounter_ReadNewRank = 0;
    m_busBusy_WhichRank = 0;

    m_roundRobin = 0;
    m_refresh_count = 1;
    m_need_refresh = 0;
    m_refresh_bank = 0;
    m_idleCount = 0;
    m_ageCounter = 0;
    // Praveen
    m_cyclesToSkip = 0;
    m_cycleCount = 0;
	m_reqCount = 0;
	m_core_stalls = 0;
    for (int i=0; i<IPS; i++)
        m_periodicCount[i] = 0;
	m_avgBankQSize.clear();
	m_avgBankQSize.resize(m_total_banks);

    // Each tfaw shift register keeps a moving bit pattern
    // which shows when recent activates have occurred.
    // m_tfaw_count keeps track of how many 1 bits are set
    // in each shift register.  When m_tfaw_count is >= 4,
    // new activates are not allowed.
    m_tfaw_shift = new uint64[m_total_ranks];
    m_tfaw_count = new int[m_total_ranks];
    for (int i = 0; i < m_total_ranks; i++) {
        m_tfaw_shift[i] = 0;
        m_tfaw_count[i] = 0;
    }
}
void
RubyMemoryControl::reset()
{
    m_msg_counter = 0;

    assert(m_tFaw <= 62); // must fit in a uint64 shift register

    m_total_banks = m_banks_per_rank * m_ranks_per_dimm * m_dimms_per_channel;
    m_total_ranks = m_ranks_per_dimm * m_dimms_per_channel;
    m_refresh_period_system = m_refresh_period / m_total_banks;

    assert(m_bankQueues);

    assert(m_bankBusyCounter);

    assert(m_oldRequest);

    for (int i = 0; i < m_total_banks; i++) {
        m_bankBusyCounter[i] = 0;
        m_oldRequest[i] = 0;
        //praveen added
        m_bankRowBuffer[i] = -1;
    }

    m_busBusyCounter_Basic = 0;
    m_busBusyCounter_Write = 0;
    m_busBusyCounter_ReadNewRank = 0;
    m_busBusy_WhichRank = 0;

    m_roundRobin = 0;
    m_refresh_count = 1;
    m_need_refresh = 0;
    m_refresh_bank = 0;
    m_idleCount = 0;
    m_ageCounter = 0;
    // Praveen
    m_cyclesToSkip = 0;
    m_gpu_cyclesToSkip = 0;
    m_cycleCount = 0;
	m_reqCount = 0;
    m_reqAddr = 0;
    m_reqCount = 0;

    for (int i=0; i<IPS; i++)
        m_periodicCount[i] = 0;
	m_avgBankQSize.clear();
	m_avgBankQSize.resize(m_total_banks);

    // Each tfaw shift register keeps a moving bit pattern
    // which shows when recent activates have occurred.
    // m_tfaw_count keeps track of how many 1 bits are set
    // in each shift register.  When m_tfaw_count is >= 4,
    // new activates are not allowed.
    for (int i = 0; i < m_total_ranks; i++) {
        m_tfaw_shift[i] = 0;
        m_tfaw_count[i] = 0;
    }
}
RubyMemoryControl::~RubyMemoryControl()
{
    delete [] m_bankQueues;
    delete [] m_bankBusyCounter;
    delete [] m_oldRequest;
    delete m_profiler_ptr;
}
// Alternate entry point used when we already have a MemoryNode
// structure built.
void
RubyMemoryControl::enqueueMemRef(MemoryNode& memRef, int param)
{
    m_msg_counter++;
    memRef.m_msg_counter = m_msg_counter;
    physical_address_t addr = memRef.m_addr;
    int bank = getBank(addr);

    // Praveen
    // if(memRef.getMsgPtr() == NULL)
    DPRINTF(RubyMemory, "New memory request%7d: %llu %c arrived at %zu bank = %3x sched %c\n",
            m_msg_counter, addr, memRef.m_is_mem_read ? 'R':'W',
            memRef.m_time * g_system_ptr->clockPeriod(),
            bank, m_event.scheduled() ? 'Y':'N');

    m_profiler_ptr->profileMemReq(bank);
    if(m_mem_two_queues)
    {
    	if(memRef.IPid == CPU)
    	{
    		m_input_queue_cpu.push_back(memRef);
    	}
    	else
    		m_input_queue_ip.push_back(memRef);
    }
    else
    	m_input_queue.push_back(memRef);

    if (!m_event.scheduled() && param != 1) {
        schedule(m_event, nextCycle());
    }
}
// dequeue, peek, and isReady are used to transfer completed requests
// back to the directory
void
RubyMemoryControl::dequeue()
{
    assert(isReady());
    m_response_queue.pop_front();
}
const Message*
RubyMemoryControl::peek()
{
    MemoryNode node = peekNode();
    Message* msg_ptr = node.m_msgptr.get();
    assert(msg_ptr != NULL);
    return msg_ptr;
}
MemoryNode
RubyMemoryControl::peekNode()
{
    assert(isReady());
    MemoryNode req = m_response_queue.front();
    DPRINTF(RubyMemory, "Peek: memory request%7d: %#08x %c sched %c\n",
            req.m_msg_counter, req.m_addr, req.m_is_mem_read ? 'R':'W',
            m_event.scheduled() ? 'Y':'N');

    return req;
}
bool
RubyMemoryControl::isReady()
{
    return ((!m_response_queue.empty()) &&
            (m_response_queue.front().m_time <= g_system_ptr->curCycle()));
}
void
RubyMemoryControl::setConsumer(Consumer* consumer_ptr)
{
    m_consumer_ptr = consumer_ptr;
}
void
RubyMemoryControl::print(ostream& out) const
{
}
void
RubyMemoryControl::clearStats() const
{
    m_profiler_ptr->clearStats();
}
void
RubyMemoryControl::printStats(ostream& out) const
{
    m_profiler_ptr->printStats(out);
}
// Queue up a completed request to send back to directory
void
RubyMemoryControl::enqueueToDirectory(MemoryNode req, Cycles latency)
{
    Tick arrival_time = clockEdge(latency);
    Cycles ruby_arrival_time = g_system_ptr->ticksToCycles(arrival_time);
    req.m_time = ruby_arrival_time;
    m_response_queue.push_back(req);

    DPRINTF(RubyMemory, "Enqueueing msg %#08x %c back to directory at %15d\n",
            req.m_addr, req.m_is_mem_read ? 'R':'W', arrival_time);

    // schedule the wake up
    m_consumer_ptr->scheduleEventAbsolute(arrival_time);
}
// getBank returns an integer that is unique for each
// bank across this memory controller.
const int
RubyMemoryControl::getBank(const physical_address_t addr) const
{
    int dimm = (addr >> m_dimm_bit_0) & (m_dimms_per_channel - 1);
    int rank = (addr >> m_rank_bit_0) & (m_ranks_per_dimm - 1);
    int bank = (addr >> m_bank_bit_0) & (m_banks_per_rank - 1);
    return (dimm * m_ranks_per_dimm * m_banks_per_rank)
        + (rank * m_banks_per_rank)
        + bank;
}
const int
RubyMemoryControl::getRank(const physical_address_t addr) const
{
    int bank = getBank(addr);
    int rank = (bank / m_banks_per_rank);
    assert (rank < (m_ranks_per_dimm * m_dimms_per_channel));
    return rank;
}
// getRank returns an integer that is unique for each rank
// and independent of individual bank.
const int
RubyMemoryControl::getRank(int bank) const
{
    int rank = (bank / m_banks_per_rank);
    assert (rank < (m_ranks_per_dimm * m_dimms_per_channel));
    return rank;
}
// Not used!
const int
RubyMemoryControl::getChannel(const physical_address_t addr) const
{
    assert(false);
    return -1;
}
// Not used!
/*const int
RubyMemoryControl::getRow(const physical_address_t addr) const
{
    assert(false);
    return -1;
}*/
// queueReady determines if the head item in a bank queue
// can be issued this cycle
bool
RubyMemoryControl::queueReady(int bank)
{
    if ((m_bankBusyCounter[bank] > 0) && !m_mem_fixed_delay) {
        m_profiler_ptr->profileMemBankBusy();

        DPRINTF(RubyMemory, "bank %x busy %d\n", bank, m_bankBusyCounter[bank]);
        return false;
    }

    if (m_mem_random_arbitrate >= 2) {
        if ((random() % 100) < m_mem_random_arbitrate) {
            m_profiler_ptr->profileMemRandBusy();
            return false;
        }
    }

    if (m_mem_fixed_delay)
        return true;

    if ((m_ageCounter > (2 * m_bank_busy_time)) && !m_oldRequest[bank]) {
        m_profiler_ptr->profileMemNotOld();
        return false;
    }

    if (m_busBusyCounter_Basic == m_basic_bus_busy_time) {
        // Another bank must have issued this same cycle.  For
        // profiling, we count this as an arb wait rather than a bus
        // wait.  This is a little inaccurate since it MIGHT have also
        // been blocked waiting for a read-write or a read-read
        // instead, but it's pretty close.
        m_profiler_ptr->profileMemArbWait(1);
        return false;
    }

    if (m_busBusyCounter_Basic > 0) {
        m_profiler_ptr->profileMemBusBusy();
        return false;
    }

    int rank = getRank(bank);
    if (m_tfaw_count[rank] >= ACTIVATE_PER_TFAW) {
        m_profiler_ptr->profileMemTfawBusy();
        return false;
    }

    bool write = !m_bankQueues[bank].front().m_is_mem_read;
    if (write && (m_busBusyCounter_Write > 0)) {
        m_profiler_ptr->profileMemReadWriteBusy();
        return false;
    }

    if (!write && (rank != m_busBusy_WhichRank)
        && (m_busBusyCounter_ReadNewRank > 0)) {
        m_profiler_ptr->profileMemDataBusBusy();
        return false;
    }

    return true;
}
// issueRefresh checks to see if this bank has a refresh scheduled
// and, if so, does the refresh and returns true
bool
RubyMemoryControl::issueRefresh(int bank)
{
    if (!m_need_refresh || (m_refresh_bank != bank))
        return false;
    if (m_bankBusyCounter[bank] > 0)
        return false;
    // Note that m_busBusyCounter will prevent multiple issues during
    // the same cycle, as well as on different but close cycles:
    if (m_busBusyCounter_Basic > 0)
        return false;
    int rank = getRank(bank);
    if (m_tfaw_count[rank] >= ACTIVATE_PER_TFAW)
        return false;

    // Issue it:
    DPRINTF(RubyMemory, "Refresh bank %3x\n", bank);

    m_profiler_ptr->profileMemRefresh();
    m_need_refresh--;
    m_refresh_bank++;
    if (m_refresh_bank >= m_total_banks)
        m_refresh_bank = 0;
    m_bankBusyCounter[bank] = m_bank_busy_time;
    m_busBusyCounter_Basic = m_basic_bus_busy_time;
    m_busBusyCounter_Write = m_basic_bus_busy_time;
    m_busBusyCounter_ReadNewRank = m_basic_bus_busy_time;
    markTfaw(rank);
    return true;
}
// Mark the activate in the tFaw shift register
void
RubyMemoryControl::markTfaw(int rank)
{
    if (m_tFaw) {
        m_tfaw_shift[rank] |= (1 << (m_tFaw-1));
        m_tfaw_count[rank]++;
    }
}
unsigned int
RubyMemoryControl::drain(DrainManager *dm)
{
    DPRINTF(RubyMemory, "MemoryController drain\n");
    if(m_event.scheduled()) {
        deschedule(m_event);
    }
    return 0;
}
// wakeup:  This function is called once per memory controller clock cycle.
void
RubyMemoryControl::wakeup()
{
    DPRINTF(RubyMemory, "MemoryController wakeup\n");
    // execute everything
    executeCycle();

    m_idleCount--;
    // Praveen
    if (/*m_idleCount > 0 && */!m_event.scheduled()) {
        assert(!m_event.scheduled());
        schedule(m_event, clockEdge(Cycles(1)));
    }
}
/**
 * This function reads the different buffers that exist in the Ruby Memory
 * Controller, and figures out if any of the buffers hold a message that
 * contains the data for the address provided in the packet. True is returned
 * if any of the messages was read, otherwise false is returned.
 *
 * I think we should move these buffers to being message buffers, instead of
 * being lists.
 */
bool
RubyMemoryControl::functionalReadBuffers(Packet *pkt)
{
    for (std::list<MemoryNode>::iterator it = m_input_queue.begin();
         it != m_input_queue.end(); ++it) {
        Message* msg_ptr = (*it).m_msgptr.get();
        if (msg_ptr->functionalRead(pkt)) {
            return true;
        }
    }

    for (std::list<MemoryNode>::iterator it = m_response_queue.begin();
         it != m_response_queue.end(); ++it) {
        Message* msg_ptr = (*it).m_msgptr.get();
        if (msg_ptr->functionalRead(pkt)) {
            return true;
        }
    }

    for (uint32_t bank = 0; bank < m_total_banks; ++bank) {
        for (std::list<MemoryNode>::iterator it = m_bankQueues[bank].begin();
             it != m_bankQueues[bank].end(); ++it) {
            Message* msg_ptr = (*it).m_msgptr.get();
            if (msg_ptr->functionalRead(pkt)) {
                return true;
            }
        }
    }

    return false;
}
/**
 * This function reads the different buffers that exist in the Ruby Memory
 * Controller, and figures out if any of the buffers hold a message that
 * needs to functionally written with the data in the packet.
 *
 * The number of messages written is returned at the end. This is required
 * for debugging purposes.
 */
uint32_t
RubyMemoryControl::functionalWriteBuffers(Packet *pkt)
{
    uint32_t num_functional_writes = 0;

    for (std::list<MemoryNode>::iterator it = m_input_queue.begin();
         it != m_input_queue.end(); ++it) {
        // Praveen
        /* Message* msg_ptr = (*it).m_msgptr.get();
        if (msg_ptr->functionalWrite(pkt)) {
            num_functional_writes++;
        } */
    }

    for (std::list<MemoryNode>::iterator it = m_response_queue.begin();
         it != m_response_queue.end(); ++it) {
        Message* msg_ptr = (*it).m_msgptr.get();
        if (msg_ptr->functionalWrite(pkt)) {
            num_functional_writes++;
        }
    }

    for (uint32_t bank = 0; bank < m_total_banks; ++bank) {
        for (std::list<MemoryNode>::iterator it = m_bankQueues[bank].begin();
             it != m_bankQueues[bank].end(); ++it) {
            // Praveen
            /* Message* msg_ptr = (*it).m_msgptr.get();
            if (msg_ptr->functionalWrite(pkt)) {
                num_functional_writes++;
            } */
        }
    }

    return num_functional_writes;
}
RubyMemoryControl *
RubyMemoryControlParams::create()
{
    return new RubyMemoryControl(this);
}




//-------------------------------------------------------------------------------------------------



// Added by Praveen
int bitSelect(physical_address_t address, int small, int big) // rips bits inclusive
{
  physical_address_t mask;
  assert(big >= small);

  if (big >= ADDRESS_WIDTH - 1) {
    return (address >> small);
  } else {
    mask = ~((physical_address_t)~0 << (big + 1));
    // FIXME - this is slow to manipulate a 64-bit number using 32-bits
    physical_address_t partial = (address & mask);
    return (partial >> small);
  }
}

// Using higher order bits in address to determine row
const int
RubyMemoryControl::getRow(const physical_address_t addr) const
{
    int bank_bits = (int) floorLog2(m_total_banks);
    int column_bits = (int) floorLog2(NUM_COLUMNS_IN_ROW);
    int row;

    //second parameter = 2^6 cache line size + colbits+bankbits + bits reqd to address memcontrollers
    //
    row = bitSelect(addr, 6 + column_bits + bank_bits + 0, ADDRESS_WIDTH);
    // row = bitSelect(addr, RubyConfig::pageSizeBits(), ADDRESS_WIDTH);
    // printf("getRow(%x) selecting bits from %d to %d. Got row %d\n", addr, RubyConfig::dataBlockBits() + COLUMNS_IN_ROW_BITS + bank_bits + m_memory_bits, ADDRESS_WIDTH, row);

    return row;
    // return addr >> (m_mc_bit_0 + m_memory_bits - 1);
}

bool isCPURequest(MemoryNode req)
{
	if(req.IPid == CPU)
		return true;
	else
		return false;
}

bool RubyMemoryControl::isRBHit(int bank, MemoryNode req)
{
    if (m_bankRowBuffer[bank] == getRow(req.m_addr))
        return true;

    return false;
}

// Issue a memory request: Activate the bank, reserve the address and
// data buses, and queue the request for return to the requesting
// processor after a fixed latency.
void
RubyMemoryControl::issueRequest(int bank)
{
	  list<MemoryNode>::iterator it;
	  list<MemoryNode> tempQueue;
	  bool foundHit = false;
	  bool foundCPU = false;

	  if(m_mem_prioritize_cpu)
	  {
//		  if (m_openpage_policy)
		  {  // FR-FCFS
		    for(it = m_bankQueues[bank].begin(); it != m_bankQueues[bank].end(); ++it) {  // looking for rowbuffer hit
		        if (isCPURequest(*it)) {
		            tempQueue.push_front(*it);
		            it = m_bankQueues[bank].erase(it);
		            foundCPU = true;
		            break;
		        }
		    }
		  }
	  }

	  if (!foundCPU) {
		  if (m_frfcfs_policy) {  // FR-FCFS
			for(it = m_bankQueues[bank].begin(); it != m_bankQueues[bank].end(); ++it) {  // looking for rowbuffer hit
				if (isRBHit(bank, *it)) {
					tempQueue.push_front(*it);
					it = m_bankQueues[bank].erase(it);
					foundHit = true;
					break;
				}
			}

			if(!foundHit) { // picking up oldest row buffer conflict
				tempQueue.push_front(m_bankQueues[bank].front());
				m_bankQueues[bank].pop_front();
			}
		  }
		  else {  // FCFS  for  requests if m_mem_prioritize_cpu is set and no CPU reqs found
			  tempQueue.push_front(m_bankQueues[bank].front());
			  m_bankQueues[bank].pop_front();
		  }
	  }

	  //MemoryNode req = m_bankQueues[bank].front();
      //m_bankQueues[bank].pop_front();

	  MemoryNode req = tempQueue.front();
	  if (!tempQueue.empty())
	      tempQueue.pop_front();

	int rank = getRank(bank);
	int currRowNo = getRow(req.m_addr);
	int latency;

	//if (m_mem_controller_id == 1)
	{
		int counter = 0;
		for(int i=0; i<m_total_banks; i++)
			if(m_bankBusyCounter[i] > 0)
				counter++;

		m_profiler_ptr->profileMemBLP(counter, 0);

		// cout<<"-- Bank: "<<bank<<"Addr: " << hex << req.m_addr << dec << " cycle count: " << m_cycleCount <<"BLP: " << counter <<endl;
	}

	  // Added by Praveen
	  if (m_openpage_policy) { // Open Page policy is enabled
	      if (m_bankRowBuffer[bank] == currRowNo) {  //Row Buffer Hit
	          //g_system_ptr->getProfiler()->profileMemRowBufferHit(m_version, bank, core_id);

	          m_bankBusyCounter[bank] = m_mem_rowbuffer_hit_latency;
	          latency = m_mem_rowbuffer_hit_latency;
	          m_profiler_ptr->profileMemRBHit();
	      }
	      else { //Row Buffer Conflict
	          Address address = Address(req.m_addr);
	          address.makePageAddress();
	          //g_system_ptr->getProfiler()->profileMemRowBufferConflict(m_version, bank, core_id, address.getAddress());

	          m_bankBusyCounter[bank] = m_mem_rowbuffer_conflict_latency;
	          latency = m_mem_rowbuffer_conflict_latency;
	          m_profiler_ptr->profileMemRBConflict();
	          m_bankRowBuffer[bank] = currRowNo; // Close Row
	      }
	  }
	  else { // Closed Page Policy
	      m_bankBusyCounter[bank] = m_bank_busy_time;
	      latency = m_mem_ctl_latency;
	  }


    DPRINTF(RubyMemory, "Mem issue request%7d: %#08x %c "
            "bank=%3x sched %c\n", req.m_msg_counter, req.m_addr,
            req.m_is_mem_read? 'R':'W',
            bank, m_event.scheduled() ? 'Y':'N');

//    // Praveen: msgptr will be null for IP mem requests

    int  delay = (curCycle()).getInt() - (req.m_time).getInt();
    if (delay <= 0)
    	delay = 1;
    if (delay >= 1000000)
       delay = 1;

    if(m_mem_controller_id == 0)
    	total_mem_ctrl0_delay+= delay;
    else
    	total_mem_ctrl1_delay+= delay;

    if(m_mem_controller_id == 0)
        	total_mem_ctrl0_latency+= latency;
        else
        	total_mem_ctrl1_latency+= latency;

//    cout << delay << endl;
    m_profiler_ptr->profileMemLatency(latency, delay);

//    cout<<"\n\n "<<m_mem_controller_id<<" "<<delay <<" "<<latency<<" "<<endl;

    if (req.m_msgptr) {  // don't enqueue L3 writebacks
        enqueueToDirectory(req, Cycles(latency + m_mem_fixed_delay));  //praveen changed
	}
    else  //Nachi added
    {
    	//m_popat = currentcycle + m_mem_ctl_latency + m_mem_fixed_delay;
    	int index = search_addr(Address(req.m_addr));
    	if(index >= 0)
    		outstanding_mem_requests[index].pop_at_time = m_cycleCount + latency + m_mem_fixed_delay;

    	if (req.getIPIsLast() == true && req.getIPid() == FB)
		{
			// std::cout<<"JOOMLA : End of Frame\n";
    		cpu_frames_displayed++;
    		cout<<"Frame "<<cpu_frames_displayed<<" written to memory at cycle"<<current_cycle<<endl<<endl;
    		cpu_frame_id_last_written++;
		}

    	//FIXXX Instead of 0 above, find for the address that was returned from memory here, and use that index
//
//    	if(!outstanding_cpu_requests.empty())
//        	outstanding_cpu_requests.pop_front();
        //cout<<"Front is: "<< outstanding_cpu_requests.front()<<" Number of reqs in Q:"<<outstanding_cpu_requests.size()<<endl;
    }

    m_oldRequest[bank] = 0;
    markTfaw(rank);
    // m_bankBusyCounter[bank] = m_bank_busy_time;
    m_busBusy_WhichRank = rank;
    if (req.m_is_mem_read) {
        m_profiler_ptr->profileMemRead();
        m_busBusyCounter_Basic = m_basic_bus_busy_time;
        m_busBusyCounter_Write = m_basic_bus_busy_time + m_read_write_delay;
        m_busBusyCounter_ReadNewRank =
            m_basic_bus_busy_time + m_rank_rank_delay;
    } else {
        m_profiler_ptr->profileMemWrite();
        m_busBusyCounter_Basic = m_basic_bus_busy_time;
        m_busBusyCounter_ReadNewRank = m_basic_bus_busy_time;
    }


}

// executeCycle:  This function is called once per memory clock cycle
// to simulate all the periodic hardware.
void
RubyMemoryControl::executeCycle()
{
    common_controller_to_mcs();// cout << "execute Cycle" << endl;

	if(m_mem_controller_id == 0)
	{
	    if (m_cycleCount % MEM_REQ_PERIOD == 0)
		{
			cout<<"\n MC0:"<<endl;
			interval_stat_printer();
		}
		system_controller();
	}

	if(m_mem_controller_id == 1)
	{
	    if (m_cycleCount % MEM_REQ_PERIOD == 0)
	    {
	    	cout<<"\n MC1:"<<endl;
	    	interval_stat_printer();
	    }
		slave_controller();
	}

	int counter = 0;
    // Keep track of time by counting down the busy counters:
    for (int bank=0; bank < m_total_banks; bank++) {
        if (m_bankBusyCounter[bank] > 0)
        {
        	m_bankBusyCounter[bank]--;
        	counter++;
        }
    }

    if (m_busBusyCounter_Write > 0)
        m_busBusyCounter_Write--;
    if (m_busBusyCounter_ReadNewRank > 0)
        m_busBusyCounter_ReadNewRank--;
    if (m_busBusyCounter_Basic > 0)
        m_busBusyCounter_Basic--;

    // Count down the tFAW shift registers:
    for (int rank=0; rank < m_total_ranks; rank++) {
        if (m_tfaw_shift[rank] & 1) m_tfaw_count[rank]--;
        m_tfaw_shift[rank] >>= 1;
    }

    // After time period expires, latch an indication that we need a refresh.
    // Disable refresh if in mem_fixed_delay mode.
    if (!m_mem_fixed_delay) m_refresh_count--;
    if (m_refresh_count == 0) {
        m_refresh_count = m_refresh_period_system;

        // Are we overrunning our ability to refresh?
        assert(m_need_refresh < 10);
        m_need_refresh++;
    }

    // If this batch of requests is all done, make a new batch:
    m_ageCounter++;
    int anyOld = 0;
    for (int bank=0; bank < m_total_banks; bank++) {
        anyOld |= m_oldRequest[bank];
    }
    if (!anyOld) {
        for (int bank=0; bank < m_total_banks; bank++) {
            if (!m_bankQueues[bank].empty()) m_oldRequest[bank] = 1;
        }
        m_ageCounter = 0;
    }

    // If randomness desired, re-randomize round-robin position each cycle
    if (m_mem_random_arbitrate) {
        m_roundRobin = random() % m_total_banks;
    }

    // For each channel, scan round-robin, and pick an old, ready
    // request and issue it.  Treat a refresh request as if it were at
    // the head of its bank queue.  After we issue something, keep
    // scanning the queues just to gather statistics about how many
    // are waiting.  If in mem_fixed_delay mode, we can issue more
    // than one request per cycle.
    int queueHeads = 0;
    int banksIssued = 0;
    for (int i = 0; i < m_total_banks; i++) {
        m_roundRobin++;
        if (m_roundRobin >= m_total_banks) m_roundRobin = 0;
        issueRefresh(m_roundRobin);
        int qs = m_bankQueues[m_roundRobin].size();
        if (qs > 1) {
            m_profiler_ptr->profileMemBankQ(qs-1);
        }
        if (qs > 0) {
            // we're not idle if anything is queued
            m_idleCount = IDLECOUNT_MAX_VALUE;
            queueHeads++;
            if (queueReady(m_roundRobin)) {
            	issueRequest(m_roundRobin);
				//DEBUG - Nachi
				if(m_mem_controller_id == 1)
				{
					// cout<<"-- Bank: "<<m_roundRobin<<" cycle count: " << m_cycleCount <<endl;
				}
                banksIssued++;
                if (m_mem_fixed_delay) {
                    m_profiler_ptr->profileMemWaitCycles(m_mem_fixed_delay);
                }
            }
        }
    }

    // memWaitCycles is a redundant catch-all for the specific
    // counters in queueReady
    m_profiler_ptr->profileMemWaitCycles(queueHeads - banksIssued);

    // Check input queue and move anything to bank queues if not full.
    // Since this is done here at the end of the cycle, there will
    // always be at least one cycle of latency in the bank queue.  We
    // deliberately move at most one request per cycle (to simulate
    // typical hardware).  Note that if one bank queue fills up, other
    // requests can get stuck behind it here.

    if(m_mem_two_queues)
	{
			if (!m_input_queue_cpu.empty()) {
				// we're not idle if anything is pending
				m_idleCount = IDLECOUNT_MAX_VALUE;
				MemoryNode req = m_input_queue_cpu.front();
				int bank = getBank(req.m_addr);

				if (m_bankQueues[bank].size() < m_bank_queue_size) {
					m_input_queue_cpu.pop_front();
					m_bankQueues[bank].push_back(req);
				}
				m_profiler_ptr->profileMemInputQ(m_input_queue_cpu.size());
			}
			if (!m_input_queue_ip.empty()) {
				// we're not idle if anything is pending

				m_idleCount = IDLECOUNT_MAX_VALUE;
				MemoryNode req = m_input_queue_ip.front();
				int bank = getBank(req.m_addr);


				if (m_bankQueues[bank].size() < m_bank_queue_size) {
					m_input_queue_ip.pop_front();
					m_bankQueues[bank].push_back(req);
				}
				m_profiler_ptr->profileMemInputQ(m_input_queue_ip.size());
			}
	}
    else
    {
		if (!m_input_queue.empty()) {
			// we're not idle if anything is pending
			m_idleCount = IDLECOUNT_MAX_VALUE;
			MemoryNode req = m_input_queue.front();
			int bank = getBank(req.m_addr);

//			if(m_mem_controller_id == 1){
//				cout << hex << req.m_addr << dec << "  " << bank << " " <<endl;
//			}

			if (m_bankQueues[bank].size() < m_bank_queue_size) {
				m_input_queue.pop_front();
				m_bankQueues[bank].push_back(req);

				//DEBUG - Nachi
				// if(m_mem_controller_id == 1)
				// {
					// cout<<"-- Bank: "<<bank<<" Addr: "<<req.m_addr<<"  " << req.getIPid()<< "  "<< m_cycleCount <<endl;
				// }
			}
			m_profiler_ptr->profileMemInputQ(m_input_queue.size());
		}
    }
}

void RubyMemoryControl::system_controller()
{ // Called by MC0 only

   // Praveen: enqueue ip memory requests here

    current_cycle++;
    //initializing early because of use of "goto" after it
    std::vector<mem_transaction>::iterator it;
	long instr_count_of_front_mem_request= 0;

    if(!cpu_trace_enable)
    	goto gpu_execute;

    it = search_ready(m_cycleCount);
	//Avg Outstanding Req Q.
	m_avgOutStandQSize += outstanding_mem_requests.size();

    if(fps_stall_cycles > 0)
    {
    	fps_stall_cycles--;
    	goto gpu_execute;
    }


    if(it!=outstanding_mem_requests.end())
        	it->ready_to_commit = true;

	if (!outstanding_mem_requests.empty())
	{
		instr_count_of_front_mem_request = outstanding_mem_requests.front().enter_instruction;

		if(outstanding_mem_requests.front().ready_to_commit == true)
			outstanding_mem_requests.erase(outstanding_mem_requests.begin());
	}

	//(I saw junk total_insn value each time. // So adding this extra condition.
	if(instr_count_of_front_mem_request == 0)
	{
		m_cyclesToSkip--;   // committing instructions
		m_cyclesToSkip--;   // committing instructions  // 2 - issue
	}
	else // there is some load/store in progress. When it was issued, its istruction count (id) was outstanding_cpu_requests.front().
		//That plus upto ROB size number of instructions can be allowed to progress
	{
		if(total_insns < instr_count_of_front_mem_request + MAX_OUTSTANDING_INSTRS)
		{
			m_cyclesToSkip--;   // committing instructions
			m_cyclesToSkip--;   // committing instructions  // 2 - issue
		}
		else
		{
			// THis has to be enabled when --> if instructions have to committed when a number like 5000 instructions is read..
/*			if(insns - m_cyclesToSkip*core_to_mem_freq < MAX_OUTSTANDING_INSTRS)
			{
				m_cyclesToSkip--;   // committing instructions
			}
			else*/
			m_core_stalls++;//stall till the front load/store is comitted
		}
	}


    // While enough CPU cycles skipped from the trace read AND when the memory controller queue size is
    // lesser than the MEM_MAX_INPUT_Q_SIZE defined by Praveen&Nachi, read the next trace line and do corresponding
    // operations
    if(1 && m_cyclesToSkip <= 0 && m_input_queue.size() <= MEM_MAX_INPUT_Q_SIZE && m_input_queue_cpu.size() <= MEM_MAX_INPUT_Q_SIZE)
    {
       //std::cout<<"\n Current total_insn:"<<total_insns<<" Front element in Q: "<<outstanding_cpu_requests.front()<<" Read from file.."<<endl;
       //std::cout<<"\n Input queue length :"<< m_input_queue.size()<<endl;
       string op;
       string dummy;
       physical_address_t addr;
       uint32_t size;
       MemoryMsg msg(curTick());
       Address address(0);
       Cycles cycles(0);
       MachineID machineID;
       //We are assigning each request to be of a DMA type, be default. We change to READ, WRITE later for CPU requests.
       machineID.type = MachineType_DMA;
       machineID.num = 0;

       msg.setType(MemoryRequestType_MEMORY_READ);
       msg.setMessageSize(MessageSizeType_Request_Control);
       msg.setSender(machineID);
	   bool issued = false;

	   // if (m_reqCount == 0)
			issued = cpu_driver_execute();

	   if (issued)
		   goto display_execute;

       if(m_reqCount > 0)   //If there are more requests tpo be sent with the previous IP call.. (An IP call has 100's of requests to be injected in consecutive cycles
       {
           		address.setAddress(m_reqAddr);
           		msg.setAddress(address);
           		// cout << msg << endl;
           		//outstanding_cpu_requests.push_back(total_insns);
       			msg.setIPid(ip_master);

           		if(m_reqCount == 1 && ip_master == FB)
           			msg.setIPIsLast(FB);

           		//enqueue(msg, cycles);
           		// enqueue_to_slave(msg);
           		if(m_separate_mem)
				   enqueue_to_slave(msg);
			   else
			   {
				   m_rand_enqueue = rand();
				   if(m_rand_enqueue < RAND_MAX/2)
				   {
					   enqueue(msg, cycles);
				   }
				   else
					   enqueue_to_slave(msg);
			   }
           		m_reqAddr += 64;
           		m_reqCount--;
       }
	   else
	   {
		   // cout << msg << endl;
		   em_trace_file>>op;
		   lines_read_cpu++;
		   // cout << op << endl;
		   if (op == "CPU")
		   { // CPU line
			   // em_trace_file>>dummy;
			   em_trace_file>>insns;
			   // cpu freq / memory freq = 4
			   m_cyclesToSkip = ceil((double)insns/core_to_mem_freq);
			   total_insns+=insns;
			   // m_cyclesToSkip = 1;
			   //cout << op << "  " << insns << endl;
		   }
		   else if (op == "MMU_ld")
		   {
			   em_trace_file>>hex>>addr>>dec;
			   em_trace_file>>size;

			   msg.setType(MemoryRequestType_MEMORY_READ);
			   address.setAddress(addr);
			   msg.setAddress(address);
			   ip_master = CPU;
			   msg.setIPid(ip_master);


			   if(m_separate_mem)
			   {
				   enqueue(msg, cycles);
				   m_periodicCount[ip_master] += 64;
			   }
			   else
			   {
				   m_rand_enqueue = rand();
				   if(m_rand_enqueue < RAND_MAX/2)
				   {
					   enqueue(msg, cycles);
					   m_periodicCount[ip_master] += 64;
				   }
				   else
					   enqueue_to_slave(msg);
			   }
			   //Insert into outstanding memory request queue. // FIXXX : Once finished, this has to be removed from the queue.
			   //outstanding_mem_requests.push_back(address);
			   //outstanding_cpu_requests.push_back(total_insns);
			   mem_transaction tmp_transaction(address,m_cycleCount,-1,total_insns);
			   int index = search_addr(address);
			   if(index == -1 || index == -2)
				   outstanding_mem_requests.push_back(tmp_transaction);
			   //cout<<"Added to Q: "<< total_insns <<" Number of reqs in Q:"<<outstanding_cpu_requests.size()<<endl;

			   //cout << op << "  " << hex << addr << dec << "  " << size << endl;
		   }
		   else if (op == "MMU_st")
		   {
			   em_trace_file>>hex>>addr>>dec;
			   em_trace_file>>size;

			   msg.setType(MemoryRequestType_MEMORY_WB);
			   address.setAddress(addr);
			   msg.setAddress(address);
//			   enqueue(msg, cycles);
			   ip_master = CPU;
			   msg.setIPid(ip_master);

          		if(m_separate_mem)
          		{
          			enqueue(msg, cycles);
					m_periodicCount[ip_master] += 64;
          		}
			   else
			   {
				   m_rand_enqueue = rand();
				   if(m_rand_enqueue < RAND_MAX/2)
				   {
					   enqueue(msg, cycles);
					   m_periodicCount[ip_master] += 64;
				   }
				   else
					   enqueue_to_slave(msg);
			   }
			   //Insert into outstanding memory request queue. // FIXXX : Once finished, this has to be removed from the queue.
			   //outstanding_mem_requests.push_back(address);
			   //outstanding_cpu_requests.push_back(total_insns);
			   mem_transaction tmp_transaction(address,m_cycleCount,-1,total_insns);
			   int index = search_addr(address);
   			   if(index == -1 || index == -2)
   				   outstanding_mem_requests.push_back(tmp_transaction);
			   //cout<<"Added to Q: "<< total_insns <<" Number of reqs in Q:"<<outstanding_cpu_requests.size()<<endl;

			   //cout << op << "  " << hex << addr << dec << "  " << size << endl;
		   }
		   else if (op == "END")
		   {
				cout << "Read END from trace file" << endl;
				true_fetch = false;

				cout << "Memory Controller Cycles: " << m_cycleCount << endl;
				for (int i=0; i<IPS; i++)
					cout << i << "\t" << m_periodicCount[i] << endl;

				outstanding_mem_requests.clear();
		   }
		   else if (op == "CPUSummary")
		   {
			   em_trace_file>>dummy;
			   em_trace_file>>dummy;
			   em_trace_file>>dummy;
			   em_trace_file>>dummy;
		   }
		   else
		   { // IP line
			   em_trace_file>>hex>>addr>>dec;
			   em_trace_file>>size;
			  
			//	if(0)		 //To enable if we need to remove all IP injections
			{
				 m_reqCount = ceil((double) size / 64);           
				 // cout<<"IP "<<op<<"  "<<hex<<addr<<dec<<"  "<<size<<"  "<<m_reqCount<<endl;


				if (op.find("FB-UP") != string::npos)
				{
					// std::cout<<"StartTick = "<< start_tick<<endl;
					// std::cout<<"CurTick = "<< curTick()<<endl;
					long long diff = (curTick() - start_tick);
					long double time_micro =  (diff/ (1000*1000*1000L));
					avg_fps = ((double)(cpu_frames_displayed+frames_dropped_cpu_faster)/time_micro)*1000;

					if(1 && op.find("UP") != string::npos)
					{
						cpu_frame_being_processed++;
						std::cout<<"SHLD: Current FPS = "<<avg_fps<<endl;
						std::cout<<"SHLD: Frames read from trace = "<<cpu_frame_being_processed<<endl;

						/*if(avg_fps > 59.00)
						{
							double supposed_tobe_time = (cpu_frames_displayed/59.0);
							cout << "Supposed time: " << supposed_tobe_time << endl;
							cout << "Current cycles: " << current_cycle << endl;
							cout << "Current tick: " << curTick() << endl;
							fps_stall_cycles = supposed_tobe_time * ((current_cycle * 1000.0 * 1000) / diff) * 1000.0 * 1000 - current_cycle;
							if(fps_stall_cycles < 0)
								fps_stall_cycles =0;

							total_fps_stall_cycles += fps_stall_cycles;
							cout << "SHLD: Stalling CPU for " << fps_stall_cycles << " cycles to reduce FPS" << endl;

	//						if (fps_stall_cycles < 0)
	//					        m_periodicCount[FB] += m_reqCount * 64;
						}*/

					}

//					else
//					m_periodicCount[FB] += m_reqCount * 64;

					//Tiger - Run-2 Setup
					if(0 && op.find("UP") != string::npos)
					{
						//std::size_t found = em_trace_file_name.find("camera-cpu");
						if (em_trace_file_name.find("camera")!=std::string::npos) //then substring was found
							fps_stall_cycles=(long) ((double)416438589/81.0);//+(416438589*10.0/100);
						else if (em_trace_file_name.find("youtube")!=std::string::npos) //then substring was found
							fps_stall_cycles=(long)((double)197199616/45.0);
						else if (em_trace_file_name.find("browser")!=std::string::npos) //then substring was found
							fps_stall_cycles=(long)((double)935803290/188.0);
						/*else if (em_trace_file_name.find("camera-cpu")!=std::string::npos) //then substring was found
							fps_stall_cycles=416438589/81;
						else if (em_trace_file_name.find("antutu-gfx-cpu")!=std::string::npos) //then substring was found
							fps_stall_cycles=416438589/81;*/
						if(cpu_frame_being_processed < display_frame_id)
							fps_stall_cycles=0;

						cout<<"Stalling the frame " <<cpu_frame_being_processed<< "for "<<fps_stall_cycles<< " cycles!"<<endl;

					}

				   if (addr == 0)
					   addr = 540595840;
					ip_master = FB;
					if (m_shadowcpuIPWriteCount[FB] > 0) {
						frames_dropped_cpu_ipmem[FB]++;
						m_cpuIPWriteCount[FB] = 0;
					}
					else
						m_cpuIPWriteCount[FB] += m_reqCount;
				}
				else if (op.find("NW") != string::npos)
				{
//				   m_periodicCount[NW] += m_reqCount * 64;
					msg.setType(MemoryRequestType_MEMORY_WB);
				   if (addr == 0)
					   addr = 340595840;
				   ip_master = NW;
					if (m_shadowcpuIPReadCount[NW] > 0) {
						frames_dropped_cpu_ipmem[NW]++;
						m_cpuIPReadCount[NW] = 0;
					}
					else
						m_cpuIPReadCount[NW] += m_reqCount;
				}
				else if (op.find("SND") != string::npos)
				{
//				   m_periodicCount[SND] += m_reqCount * 64;
				   if (addr == 0)
					   addr = 280595840;
				   ip_master=SND;
				   if (m_shadowcpuIPWriteCount[SND] > 0) {
					   frames_dropped_cpu_ipmem[SND]++;
					   m_cpuIPWriteCount[SND] = 0;
				   }
				   else
					   m_cpuIPWriteCount[SND] += m_reqCount;
				}
				else if (op.find("CAM") != string::npos)
				{
//				   m_periodicCount[CAM] += m_reqCount * 64;
					msg.setType(MemoryRequestType_MEMORY_WB);
				   if (addr == 0)
					   addr = 140595840;
				   ip_master=CAM;
				   if (m_shadowcpuIPReadCount[CAM] > 0) {
					   frames_dropped_cpu_ipmem[CAM]++;
					   m_cpuIPReadCount[CAM] = 0;
				   }
				   else
				       m_cpuIPReadCount[CAM] += m_reqCount;
				}
				else if (op.find("VD") != string::npos)
				{
//					m_periodicCount[VD] += m_reqCount * 64;
					 ip_master=VD;
					 if (m_shadowcpuIPWriteCount[VD] > 0) {
						 frames_dropped_cpu_ipmem[VD]++;
						 m_cpuIPWriteCount[VD] = 0;
					 }
					 else
						 m_cpuIPWriteCount[VD] += m_reqCount;
				}
				else {
					msg.setType(MemoryRequestType_MEMORY_WB);
					if (addr == 0)
						addr = 340595840;
					ip_master = FB;
				}


			   //for(int i=0; i<m_reqCount; i++) {
			   address.setAddress(addr);
			   msg.setAddress(address);
			   msg.setIPid(ip_master);
			   //enqueue(msg, cycles);
			   if(m_separate_mem)
				   enqueue_to_slave(msg);
			   else
			   {
				   m_rand_enqueue = rand();
				   if(m_rand_enqueue < RAND_MAX/2)
				   {
					   enqueue(msg, cycles);
					   m_periodicCount[ip_master] += m_reqCount * 64;
				   }
				   else
					   enqueue_to_slave(msg);
			   }

			   // cout << msg << endl;
				m_reqCount--;
				m_reqAddr = addr + 64;
			   //Insert into outstanding memory request queue. // FIXXX : Once finished, this has to be removed from the queue.
			   //outstanding_mem_requests.push_back(address);
			   //outstanding_cpu_requests.push_back(total_insns);
			   //cout<<"Added to Q: "<< total_insns <<" Number of reqs in Q:"<<outstanding_cpu_requests.size()<<endl;
			   // }
			  }
			}
       }
    }
    else
    {
        // cout << "Skipped Cycle" << endl;
    }

    gpu_execute:

    if(gpu_trace_enable) { // GPU was enabled. So, driver executed / trace reading required.
        gpu_driver_execute();
    }

    display_execute:
    display_driver_execute();

    // End Praveen
}


void RubyMemoryControl::common_controller_to_mcs()
{
    m_cycleCount++;  //Variable inside memory controller object
	//BLP Counting and AVG MemQ Size and
	BLP_Counter();
}

void RubyMemoryControl::slave_controller()
{//Called by MC1
	Cycles cycles(0);
	if(msg_to_slave_queue.size() > 0)
	{
		if(m_mem_two_queues)
		{
			/* if(msg_to_slave_queue.front().getIPid() == CPU)// CPU and IP memory is separated. CPU requests should not be in slave MC
			{
				printf("\n\n\n\n Separate mem was present, and request was a  CPU request coming to slave controller!!!\n\n");
					assert(msg_to_slave_queue.front().getIPid() != CPU);
			} */
			if(msg_to_slave_queue.front().getIPid() == CPU && m_input_queue_cpu.size() <= MEM_MAX_INPUT_Q_SIZE)
			{
				enqueue(msg_to_slave_queue.front(), cycles);
			}

			else if(msg_to_slave_queue.front().getIPid() != CPU && m_input_queue_ip.size() <= MEM_MAX_INPUT_Q_SIZE) //its an an request)
			{
				enqueue(msg_to_slave_queue.front(), cycles);
			}
			else return;
		}
		else
		{
			if(m_input_queue.size() <= MEM_MAX_INPUT_Q_SIZE) //its an an request
				enqueue(msg_to_slave_queue.front(), cycles);
			else
				return;
		}

		/* if(!mem_separate_mem && m_mem_two_queues)  // we have 2 controllers, but, any CPU/IP request can go to any controller, but have 2 queues.
		{
			if(msg_to_slave_queue.front().getIPid() == CPU && m_input_queue_cpu.size() <= MEM_MAX_INPUT_Q_SIZE)
			{
				enqueue(msg_to_slave_queue.front(), cycles);
			}
			else if(msg_to_slave_queue.front().getIPid() != CPU && m_input_queue_ip.size() <= MEM_MAX_INPUT_Q_SIZE) //its an an request)
			{
				enqueue(msg_to_slave_queue.front(), cycles);
			}
			else return;

		}

		if(!mem_separate_mem && !m_mem_two_queues) // we have 2 controllers, any CPU/IP request can go to any controller, and have only one queue
		{
			if(m_input_queue.size() <= MEM_MAX_INPUT_Q_SIZE)
				enqueue(msg_to_slave_queue.front(), cycles);
			else return;
		} */
		m_periodicCount[msg_to_slave_queue.front().getIPid()] += 64;
		msg_to_slave_queue.pop_front();
	}
}
