/* 
 * dram_sched.cc
 *
 * Copyright (c) 2009 by Tor M. Aamodt, Wilson W. L. Fung, Ali Bakhoda, 
 * George L. Yuan and the 
 * University of British Columbia
 * Vancouver, BC  V6T 1Z4
 * All Rights Reserved.
 * 
 * THIS IS A LEGAL DOCUMENT BY DOWNLOADING GPGPU-SIM, YOU ARE AGREEING TO THESE
 * TERMS AND CONDITIONS.
 * 
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNERS OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 * 
 * NOTE: The files libcuda/cuda_runtime_api.c and src/cuda-sim/cuda-math.h
 * are derived from the CUDA Toolset available from http://www.nvidia.com/cuda
 * (property of NVIDIA).  The files benchmarks/BlackScholes/ and 
 * benchmarks/template/ are derived from the CUDA SDK available from 
 * http://www.nvidia.com/cuda (also property of NVIDIA).  The files from 
 * src/intersim/ are derived from Booksim (a simulator provided with the 
 * textbook "Principles and Practices of Interconnection Networks" available 
 * from http://cva.stanford.edu/books/ppin/). As such, those files are bound by 
 * the corresponding legal terms and conditions set forth separately (original 
 * copyright notices are left in files from these sources and where we have 
 * modified a file our copyright notice appears before the original copyright 
 * notice).  
 * 
 * Using this version of GPGPU-Sim requires a complete installation of CUDA 
 * which is distributed seperately by NVIDIA under separate terms and 
 * conditions.  To use this version of GPGPU-Sim with OpenCL requires a
 * recent version of NVIDIA's drivers which support OpenCL.
 * 
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 * 
 * 1. Redistributions of source code must retain the above copyright notice,
 * this list of conditions and the following disclaimer.
 * 
 * 2. Redistributions in binary form must reproduce the above copyright notice,
 * this list of conditions and the following disclaimer in the documentation
 * and/or other materials provided with the distribution.
 * 
 * 3. Neither the name of the University of British Columbia nor the names of
 * its contributors may be used to endorse or promote products derived from
 * this software without specific prior written permission.
 * 
 * 4. This version of GPGPU-SIM is distributed freely for non-commercial use only.  
 *  
 * 5. No nonprofit user may place any restrictions on the use of this software,
 * including as modified by the user, by any other authorized user.
 * 
 * 6. GPGPU-SIM was developed primarily by Tor M. Aamodt, Wilson W. L. Fung, 
 * Ali Bakhoda, George L. Yuan, at the University of British Columbia, 
 * Vancouver, BC V6T 1Z4
 */

#include "dram_sched.h"
#include "gpu-misc.h"
#include "gpu-sim.h"
#include "../util.h"
#include<iostream>
#include <sstream>
#include<stdlib.h>
#include<stdio.h>

extern unsigned long long  gpu_sim_cycle;
extern signed long long gpu_tot_sim_cycle;
extern unsigned max_mrq_latency;
extern unsigned long long total_mrq_latency;
extern unsigned long long num_mrq;
extern unsigned mrq_lat_table[32];
extern int gpgpu_memlatency_stat;
extern int gpgpu_dram_sched_queue_size;
extern unsigned int **concurrent_row_access; //concurrent_row_access[dram chip id][bank id]
extern unsigned int **row_access; //concurrent_row_access[dram chip id][bank id]
extern unsigned int **num_activates; //num_activates[dram chip id][bank id]
extern unsigned int **max_conc_access2samerow; //max_conc_access2samerow[dram chip id][bank id]
extern unsigned int **min_conc_access2samerow; //min_conc_access2samerow[dram chip id][bank id]
extern unsigned int **max_servicetime2samerow;
extern unsigned int **dram_se_prefetch;
extern unsigned int **dram_se_actual;
extern unsigned int **dram_se_original;
extern unsigned int **prefetch_stopper;
extern unsigned int **prefetch_stopper_same_row;
extern unsigned int **curr_bank_requests;
extern int l2_prefetch_all;
extern int gpu_threshold;
extern int gpu_sched_size;
extern int gpu_prefetch_degree;
extern int gpu_prefetch_degree_lower;
extern int gpu_prefetch_degree_higher;
extern int gpu_prefetch_stopper;
extern int gpu_avg;

ideal_dram_scheduler::ideal_dram_scheduler( dram_t *dm )
{
   m_num_pending = 0;
   m_dram = dm;
   m_queue = new std::list<dram_req_t*>[dm->nbk];
   m_queue_remain = new std::list<dram_req_t*>[dm->nbk];
   m_bins = new std::map<unsigned,std::list<std::list<dram_req_t*>::iterator> >[ dm->nbk ];
   m_bins_remain = new std::map<unsigned,std::list<std::list<dram_req_t*>::iterator> >[ dm->nbk ];
   m_last_row = new std::list<std::list<dram_req_t*>::iterator>*[ dm->nbk ];
   curr_row_service_time = new unsigned[dm->nbk];
   row_service_timestamp = new unsigned[dm->nbk];
   curr_bank_requests = new unsigned[dm->nbk]; 
 
   for ( unsigned i=0; i < dm->nbk; i++ ) {
      m_queue[i].clear(); 
      m_queue_remain[i].clear();
      m_bins[i].clear();
      m_bins_remain[i].clear();
      m_last_row[i] = NULL;
      curr_row_service_time[i] = 0;
      row_service_timestamp[i] = 0;
      prefetch_necessary[i] = 0;
      curr_bank_requests[i] = 0;
   }
}

void ideal_dram_scheduler::add_req( dram_req_t *req )
{
   m_num_pending++;

   m_queue[req->bk].push_front(req);
   std::list<dram_req_t*>::iterator ptr = m_queue[req->bk].begin();
   m_bins[req->bk][req->row].push_front( ptr ); //newest reqs to the front
  
}

inline void ideal_dram_scheduler::data_collection(unsigned int bank)
{
   if (gpu_sim_cycle > row_service_timestamp[bank]) {
      curr_row_service_time[bank] = gpu_sim_cycle - row_service_timestamp[bank];
      if (curr_row_service_time[bank] > max_servicetime2samerow[m_dram->id][bank])
         max_servicetime2samerow[m_dram->id][bank] = curr_row_service_time[bank];
   }
   curr_row_service_time[bank] = 0;
   row_service_timestamp[bank] = gpu_sim_cycle;
   if (concurrent_row_access[m_dram->id][bank] > max_conc_access2samerow[m_dram->id][bank]) {
      max_conc_access2samerow[m_dram->id][bank] = concurrent_row_access[m_dram->id][bank];
   }
   if (concurrent_row_access[m_dram->id][bank] < min_conc_access2samerow[m_dram->id][bank]) {
      min_conc_access2samerow[m_dram->id][bank] = concurrent_row_access[m_dram->id][bank];
   }
   concurrent_row_access[m_dram->id][bank] = 0;
   num_activates[m_dram->id][bank]++;
}

mem_fetch_t *create_mf(unsigned long long int addr) 
{
   mem_fetch_t *mf;
   mf = (mem_fetch_t*) calloc(1,sizeof(mem_fetch_t));
   mf->addr = addr;
   mf->l2_prefetched = 1;
   return mf;
}

dram_req_t *create_dram_packet(unsigned int col, unsigned int row, unsigned int bank, unsigned int nbytes, unsigned long long int addr) {

   dram_req_t *mrq;
   mrq = (dram_req_t *) malloc(sizeof(dram_req_t));
   mrq->bk = bank;
   mrq->row = row;
   mrq->col = col;
   mrq->nbytes = nbytes;
   mrq->txbytes = 0;
   mrq->dqbytes = 0;

   mrq->timestamp = gpu_tot_sim_cycle + gpu_sim_cycle;
   //mrq->cache_hits_waiting = cache_hits_waiting;
   mrq->addr = addr;
   mrq->insertion_time = (unsigned) gpu_sim_cycle;
   mrq->rw = READ;   //request is a read
   mrq->l2_prefetched = 1;
   mrq->data = (mem_fetch_t *)create_mf(addr);
   
   return mrq;
}

// m_last_row contains requests which belong to same row (same bank)
dram_req_t *ideal_dram_scheduler::schedule( unsigned bank, unsigned curr_row)
{
   int row_hit = 0;
   
   if ( m_last_row[bank] == NULL || m_last_row[bank]->empty()  ) { // no requests of my row is opened
	  prefetch_necessary[bank] = 0;
      std::map<unsigned,std::list<std::list<dram_req_t*>::iterator> >::iterator bin_ptr = m_bins[bank].find( curr_row );

	  
      if ( bin_ptr == m_bins[bank].end()) { // if no request found in my bank queue which belongs to the same row as activated one  
	     // prefetch mode should kick in here for sure!!!!!!!!!!!!!
	     // find cache blocks in row which are not accessed...prefetch them to L2
		 if(!curr_row) { // now definately I have to change my row, note what was the previous row activated.
			m_dram->bk[bank]->prev_row = curr_row; // new row will be found later
		 }
		 
  		  int current_size = fast_scheduler_queue_length(m_dram); 
		  if(l2_prefetch_all) { //> gpu_threshold) && (current_size < gpu_sched_size))
			 if(curr_row && (col_access[bank].size() > gpu_threshold)){  // to make sure this is not first activate...prefetch should happen only if row is closing..not opening for the first time
				//printf("col_access[bank].size() = %d, bank = %d, mem = %d\n", col_access[bank].size(), bank, m_dram->id);
				//printf("scheduler_size = %d, mem = %d \n", fast_scheduler_queue_length(m_dram), m_dram->id);
				//printf("m_bins[bank].size() = %d, mem = %d, bank = %d \n", m_bins[bank].size(), m_dram->id, bank);
			
				unsigned int k = col_access[bank].at(0)->hex_address - col_access[bank].at(0)->hex_address;
				unsigned int l = k + 64*32;  // cache block size * number of blocks = row_size 
				int prefetch_sent = 0; 
				 //printf("ALARM 1 \n");
				 for(; k < l; k=k+64){
					int found_hex_me = 0;
					for(int i=0; i < (int)col_access[bank].size(); i++) { 	
					  if(col_access[bank].at(i)->hex_address == k){
						  found_hex_me = 1;
						  //printf("ALARM 2 \n");
					  }
					}
				   if(!found_hex_me) {
				   		prefetch_necessary[bank] = 1;
						prefetch_sent++;
						//printf("PF bank = %d, col = %x, addr = %x, diff = %x \n", bank, k, col_access[bank].at(0)->address, (col_access[bank].at(0)->address - k));	
						if(gpu_prefetch_degree) { // if just one limit -- use this
							if(prefetch_sent < gpu_prefetch_degree) {
								m_queue_remain[bank].push_front(create_dram_packet(k, curr_row, bank, 64, (col_access[bank].at(0)->address - k)));
							}
						}
						else {
							if(prefetch_sent < m_dram->gpu_prefetch_degree_local) {
								m_queue_remain[bank].push_front(create_dram_packet(k, curr_row, bank, 64, (col_access[bank].at(0)->address - k)));
							}
						}
				   }
				 }
					
				 col_access[bank].clear();
				 if(prefetch_necessary[bank]) {
					 std::list<dram_req_t*>::iterator it;
					 for ( it=m_queue_remain[bank].begin() ; it != m_queue_remain[bank].end(); it++ ) {
						m_bins_remain[bank][curr_row].push_front( it ); //newest reqs to the front
						//printf("prefetching = %d, bank = %d, mem = %d\n", m_bins_remain[bank][curr_row].size(), bank, m_dram->id);
					 }
						  //printf("ALARM 3 \n");
				
					 bin_ptr = m_bins_remain[bank].find( curr_row );
					 m_last_row[bank] = &(bin_ptr->second); // got the necessary prefetch requests...process them now
					
				 }
			 }
			 else { //(ROW MISS) -- find a new row	     
				 if(col_access[bank].size()) {
					col_access[bank].clear();
				 }
				 if ( m_queue[bank].empty() ) // there are no requests in my bank (moved frm top)
					 return NULL;

				 dram_req_t *req = m_queue[bank].back(); // pick new request 
				 bin_ptr = m_bins[bank].find( req->row ); // find the new row 
				 assert( bin_ptr != m_bins[bank].end() ); // where did the request go???
				 m_last_row[bank] = &(bin_ptr->second); // get ALL requests pointers which belong to this newly opened row
				 dram_se_actual[m_dram->id][bank] += m_last_row[bank]->size();
				 data_collection(bank); // reinit the counters
			 }
		 }
		 else { // original code (ROW MISS)
      		if ( m_queue[bank].empty() ) // there are no requests in my bank (moved frm top)
			      return NULL;
			 // if curr_row is NULL initially open the row with the request first queued up
			 dram_req_t *req = m_queue[bank].back(); // pick new request 
			 bin_ptr = m_bins[bank].find( req->row ); // find the new row 
			 assert( bin_ptr != m_bins[bank].end() ); // where did the request go???
			 m_last_row[bank] = &(bin_ptr->second); // get ALL requests pointers which belong to this newly opened row
			 dram_se_original[m_dram->id][bank] += m_last_row[bank]->size();
			 data_collection(bank); // reinit the counters
		 }
      } else { // ROW HIT BY ACTUAL REQUESTS
	  	 
		 m_last_row[bank] = &(bin_ptr->second); // get ALL requests pointers which belong to this row // original code
		 
		 if(l2_prefetch_all) {
		     dram_se_actual[m_dram->id][bank] += m_last_row[bank]->size();
		 }
		 else {
		     dram_se_original[m_dram->id][bank] += m_last_row[bank]->size();
		 }
		 
		 if(l2_prefetch_all) {
		 	// record what all accessed in one row. 
			 //std::list<dram_req_t*>::iterator new_it;
			 std::list<std::list<dram_req_t*>::iterator>::iterator testit;
			 
			 for ( testit = m_last_row[bank]->begin() ; testit != m_last_row[bank]->end(); testit++ ) {
					dram_req_t *req = *(*testit);
					//printf("N: bank = %d, col = %x \n", bank, req->col);	

					int found_hex = 0;
				    for(int i=0; i < (int)col_access[bank].size(); i++) { // of same row---imp
						if(col_access[bank].at(i)->hex_address == req->col) {
							found_hex = 1;
							//printf("F: bank = %d, col = %x \n", bank, col_access[bank].at(i)->hex_address);
							break;
						}
					}
					if(!found_hex) {
						col_hex_access_t *col_i;
						col_i = (col_hex_access_t*) calloc(1,sizeof(col_hex_access_t));
						col_i->hex_address = req->col;
						col_i->address = req->addr;
						col_i->access++;
						col_access[bank].push_back(col_i);
						//printf("A: bank = %d, col = %x \n", bank, col_i->hex_address);
					}
			 }
		 }
      }
}

   // initially: curr_row = curr_row, prev_row = NULL
   row_hit=1;
   
   //printf("prefetch_necessary = %d, bank = %d, mem = %d\n", prefetch_necessary[bank], bank, m_dram->id);
   //printf("m_last_row_size = %d, bank = %d, mem = %d\n", m_last_row[bank]->size(), bank, m_dram->id);
   
   //if requests came to same bank..different row..stop fetching
   
   //(1) prefetching is done when there is no request pending for the same row. 
   //(2) prefetching is done for limited blocks when there are some requests pending for the same bank but different row
	if(fast_scheduler_queue_length(m_dram) > (int) gpu_avg*get_avg_queue_length(m_dram)) {
			m_dram->gpu_prefetch_degree_local = gpu_prefetch_degree_lower; // lot of pending request -- lower the degree
	}
	else {
			m_dram->gpu_prefetch_degree_local = gpu_prefetch_degree_higher; // not many requests ( < avg) -- increase the degree.
	}
   

if(gpu_prefetch_stopper) {
	   // if very high above gpu_avg then -- stop the prefetching totally.
	   if (prefetch_necessary[bank] &&  (fast_scheduler_queue_length(m_dram) > (int) gpu_avg*get_avg_queue_length(m_dram))) {
			//|| !dq_empty(m_dram->mrqq))) { // || !dq_empty(m_dram->rwq)) ){//!controller_occ){ //!m_queue[bank].empty()){ //m_bins[bank].size()) {
			prefetch_stopper[m_dram->id][bank]++;
			// want to see if this stopage is because a demand request came for the same row which was being prefetched
			// or different row:
			//printf("STOPPING\n");
			if(m_queue[bank].size()) {
				dram_req_t *req_check = m_queue[bank].back(); // pick new request 
				if(req_check->row == curr_row) { // BAD: No use of prefetching: (Such cases should be less in number)
					prefetch_stopper_same_row[m_dram->id][bank]++;
				}
			}
			prefetch_necessary[bank] = 0;
			m_bins_remain[bank].clear();
			m_queue_remain[bank].clear();
			m_last_row[bank] = NULL;
			return NULL;
	   }
}
   std::list<dram_req_t*>::iterator next = m_last_row[bank]->back();
   dram_req_t *req = (*next);

   concurrent_row_access[m_dram->id][bank]++;
   row_access[m_dram->id][bank]++;
   m_last_row[bank]->pop_back();
 

if (!prefetch_necessary[bank]) {
			m_queue[bank].erase(next);
}
   
if(prefetch_necessary[bank]) {
			dram_se_prefetch[m_dram->id][bank]++;	
		    m_queue_remain[bank].erase(next);
}
   

if ( m_last_row[bank]->empty() ) {
		  m_last_row[bank] = NULL;
		  if(prefetch_necessary[bank]) {
				//printf("ALARM 4 \n");
				m_bins_remain[bank].erase( req->row );
		  }
		  else {
				m_bins[bank].erase( req->row );
				curr_bank_requests[req->bk]--;
		  }
}
   
   
#ifdef DEBUG_FAST_IDEAL_SCHED
   if ( req )
      printf("%08u : DRAM(%u) scheduling memory request to bank=%u, row=%u\n", 
             (unsigned)gpu_sim_cycle, m_dram->id, req->bk, req->row );
#endif
   
if(!prefetch_necessary[bank]) {
   	assert( req != NULL && m_num_pending != 0 ); 
   	m_num_pending--;
}

   return req;
}


void ideal_dram_scheduler::print( FILE *fp )
{
   for ( unsigned b=0; b < m_dram->nbk; b++ ) {
      printf(" %u: queue length = %u\n", b, (unsigned)m_queue[b].size() );
   }
}

void* alloc_fast_ideal_scheduler(dram_t *dm)
{
   return new ideal_dram_scheduler(dm);
}

void fast_scheduler_ideal(dram_t* dm)
{
   unsigned mrq_latency;
   // replacement for scheduler_ideal()
   ideal_dram_scheduler *sched = reinterpret_cast<ideal_dram_scheduler*>( dm->m_fast_ideal_scheduler );
   while ( !dq_empty(dm->mrqq) && (!gpgpu_dram_sched_queue_size || sched->num_pending() < (unsigned) gpgpu_dram_sched_queue_size)) {
      dram_req_t *req = (dram_req_t*)dq_pop(dm->mrqq); // request queue from l2 to dram?
	  int bkn = req->bk;
	  if(req) {
		if(dm->queue_delay) {
		        if(bkn == 0) {
        	          dq_push(dm->bank_0,req);
                }
	            if(bkn == 1) {
        	          dq_push(dm->bank_1,req);
                }
	            if(bkn == 2) {
        	          dq_push(dm->bank_2,req);
                }
	            if(bkn == 3) {
        	          dq_push(dm->bank_3,req);
                }	
				if(bkn == 4) {
        	          dq_push(dm->bank_4,req);
                }
	            if(bkn == 5) {
        	          dq_push(dm->bank_5,req);
                }
	            if(bkn == 6) {
        	          dq_push(dm->bank_6,req);
                }
	            if(bkn == 7) {
        	          dq_push(dm->bank_7,req);
                }	
		} 
		else {
			sched->add_req(req);
		}
		//printf("adding req (bank = %d) for mem = %d @ %lld\n", req->bk,dm->id,gpu_sim_cycle+gpu_tot_sim_cycle);
	  }
	  else {
		//printf("breaking out mem = %d @ %lld\n", dm->id,gpu_sim_cycle+gpu_tot_sim_cycle);
		break;
	  }
	   //requests_local++;
	   //bank_local[req->bk]++;
   }	
   unsigned i;
   if(dm->queue_delay) {
     for ( i=0; i < dm->nbk; i++ ) {
         if (!gpgpu_dram_sched_queue_size || sched->num_pending() < (unsigned) gpgpu_dram_sched_queue_size){  
	            if(i == 0) {
					dram_req_t *req_delayed = (dram_req_t*)dq_pop(dm->bank_0);
					if(req_delayed) 
						sched->add_req(req_delayed);
                }
                if(i == 1) {
					dram_req_t *req_delayed = (dram_req_t*)dq_pop(dm->bank_1);
					if(req_delayed) 
						sched->add_req(req_delayed);
                }
                if(i == 2) {
					dram_req_t *req_delayed = (dram_req_t*)dq_pop(dm->bank_2);
					if(req_delayed) 
						sched->add_req(req_delayed);
                }
                if(i == 3) {
					dram_req_t *req_delayed = (dram_req_t*)dq_pop(dm->bank_3);
					if(req_delayed) 
						sched->add_req(req_delayed);
                }	
				if(i == 4) {
					dram_req_t *req_delayed = (dram_req_t*)dq_pop(dm->bank_4);
					if(req_delayed) 
						sched->add_req(req_delayed);
                }
                if(i == 5) {
					dram_req_t *req_delayed = (dram_req_t*)dq_pop(dm->bank_5);
					if(req_delayed) 
						sched->add_req(req_delayed);
                }
                if(i == 6) {
					dram_req_t *req_delayed = (dram_req_t*)dq_pop(dm->bank_6);
					if(req_delayed) 
						sched->add_req(req_delayed);
                }
                if(i == 7) {
					dram_req_t *req_delayed = (dram_req_t*)dq_pop(dm->bank_7);
					if(req_delayed) 
						sched->add_req(req_delayed);
                }
		}
     }
   }
   
   dram_req_t *req;
   for ( i=0; i < dm->nbk; i++ ) {
      unsigned b = (i+dm->prio)%dm->nbk;
      if ( !dm->bk[b]->mrq ) { // if all bytes are transferred then only look for another
         req = sched->schedule(b, dm->bk[b]->curr_row);
         if ( req ) {
            dm->prio = (dm->prio+1)%dm->nbk;
            dm->bk[b]->mrq = req;
            if (gpgpu_memlatency_stat) {
               mrq_latency = gpu_sim_cycle + gpu_tot_sim_cycle - dm->bk[b]->mrq->timestamp;
               dm->bk[b]->mrq->timestamp = gpu_tot_sim_cycle + gpu_sim_cycle;
               mrq_lat_table[LOGB2(mrq_latency)]++;
			   total_mrq_latency = total_mrq_latency + mrq_latency;
			   num_mrq++;
               if (mrq_latency > max_mrq_latency) {
                  max_mrq_latency = mrq_latency;
               }
            }
            break;
         }
      }
   }
}

void dump_fast_ideal_scheduler( dram_t *dm )
{
   ideal_dram_scheduler *sched = reinterpret_cast<ideal_dram_scheduler*>( dm->m_fast_ideal_scheduler );
   sched->print(stdout);
}

unsigned fast_scheduler_queue_length(dram_t *dm)
{
   if (dm->m_fast_ideal_scheduler ) {
      ideal_dram_scheduler *sched = reinterpret_cast<ideal_dram_scheduler*>( dm->m_fast_ideal_scheduler );
      return sched->num_pending();
   } else {
      printf("fast_scheduler_queue_length(): Where did the scheduler go?\n");
      exit(1);
   }
}

float stress_scheduler(dram_t *dm)
{
   float stress[8];
   int pending_requests_bank[8];
   float max_stress = 0.0;
   ideal_dram_scheduler *sched = reinterpret_cast<ideal_dram_scheduler*>( dm->m_fast_ideal_scheduler );
   for (int j=0; j < dm->nbk; j++ ) {	   
		if(!sched->m_queue[j].empty()){
			pending_requests_bank[j] =  sched->m_queue[j].size();   
		}
		else {
			pending_requests_bank[j] =  0; 
		}
   }
   for (int j=0; j < dm->nbk; j++ ) {
		stress[j] = (float) pending_requests_bank[j]/sched->num_pending();
		if(stress[j] > max_stress) {
			max_stress = stress[j];
		}
   }
   return max_stress;
}

float get_avg_queue_length(dram_t *dm) 
{
	float avg_queue = 0.0;
	avg_queue =  (float)dm->ave_mrqs/dm->n_cmd;
	return avg_queue;
}

int wait_blp(dram_t *dm)
{
   int waitblp[8];
   int sumblp = 0;
   ideal_dram_scheduler *sched = reinterpret_cast<ideal_dram_scheduler*>( dm->m_fast_ideal_scheduler );
   for (int j=0; j < dm->nbk; j++ ) {
		if(!sched->m_queue[j].empty()){
			waitblp[j] = 1;
		}
		else {
			waitblp[j] = 0;
		}
   }
   for (int j=0; j < dm->nbk; j++ ) {
		sumblp += waitblp[j];
   }
   return sumblp;
}

