

#include <stdio.h>
#include <stdlib.h>
#include <string.h>

#include <list>
#include <set>

#include "hardware_model.h"
#include "option_parser.h"
#include "dram.h"
#include "cache.h"
#include "pe.h"
#include "simulator_sim.h"
#include "l2cache.h"

MemFetch *PartitionMfAllocator::alloc(addr_t addr,
                                      MemAccessType type, uint32_t size,
                                      bool wr,
                                      uint64_t cycle) const
{
  assert(wr);
  MemAccess access(type, addr, size, wr);
  MemFetch *mf = new MemFetch(access, WRITE_PACKET_SIZE, NULL, -1,
                              m_memory_config, cycle);
  return mf;
}

MemoryPartitionUnit::MemoryPartitionUnit(uint32_t partition_id,
                                         MemoryConfig *config,
                                         SIMULATOR *simulator,
                                         class Ramulator *ramulator_wrapper)
    : m_id(partition_id),
      m_config(config),
      m_arbitration_metadata(config),
      m_ramulator_wrapper(ramulator_wrapper),
      m_simulator(simulator)
{

  m_dram = new Dram(m_id, m_config, this, simulator);
  return_q_size = m_config->simulator_dram_return_queue_size;
  sched_q_size = config->simulator_frfcfs_dram_sched_queue_size;
  m_sub_partition = new MemorySubPartition *[m_config->m_n_sub_partition_per_memory_channel];
  for (uint32_t p = 0; p < m_config->m_n_sub_partition_per_memory_channel; p++)
  {
    uint32_t sub_partition_id = m_id * m_config->m_n_sub_partition_per_memory_channel + p;
    m_sub_partition[p] = new MemorySubPartition(sub_partition_id, m_config, simulator);
  }
}

MemoryPartitionUnit::~MemoryPartitionUnit()
{
  delete m_dram;
  for (uint32_t p = 0; p < m_config->m_n_sub_partition_per_memory_channel;
       p++)
  {
    delete m_sub_partition[p];
  }
  delete[] m_sub_partition;
}

MemoryPartitionUnit::ArbitrationMetadata::ArbitrationMetadata(
    const MemoryConfig *config)
    : m_last_borrower(config->m_n_sub_partition_per_memory_channel - 1),
      m_private_credit(config->m_n_sub_partition_per_memory_channel, 0),
      m_shared_credit(0)
{
  // each sub partition get at least 1 credit for forward progress
  // the rest is shared among with other partitions
  m_private_credit_limit = 1;
  m_shared_credit_limit = config->simulator_frfcfs_dram_sched_queue_size +
                          config->simulator_dram_return_queue_size -
                          (config->m_n_sub_partition_per_memory_channel - 1);
  if (config->seperate_write_queue_enabled)
    m_shared_credit_limit += config->simulator_frfcfs_dram_write_queue_size;
  if (config->simulator_frfcfs_dram_sched_queue_size == 0 || config->simulator_dram_return_queue_size == 0)
  {
    m_shared_credit_limit = 0; // no limit if either of the queue has no limit in size
  }
  assert(m_shared_credit_limit >= 0);
}

bool MemoryPartitionUnit::ArbitrationMetadata::has_credits(
    int inner_sub_partition_id) const
{
  int spid = inner_sub_partition_id;
  if (m_private_credit[spid] < m_private_credit_limit)
  {
    return true;
  }
  else if (m_shared_credit_limit == 0 ||
           m_shared_credit < m_shared_credit_limit)
  {
    return true;
  }
  else
  {
    return false;
  }
}

void MemoryPartitionUnit::ArbitrationMetadata::borrow_credit(
    int inner_sub_partition_id)
{
  int spid = inner_sub_partition_id;
  if (m_private_credit[spid] < m_private_credit_limit)
  {
    m_private_credit[spid] += 1;
  }
  else if (m_shared_credit_limit == 0 ||
           m_shared_credit < m_shared_credit_limit)
  {
    m_shared_credit += 1;
  }
  else
  {
    assert(0 && "DRAM arbitration error: Borrowing from depleted credit!");
  }
  m_last_borrower = spid;
}

void MemoryPartitionUnit::ArbitrationMetadata::return_credit(
    int inner_sub_partition_id)
{
  int spid = inner_sub_partition_id;
  if (m_private_credit[spid] > 0)
  {
    m_private_credit[spid] -= 1;
  }
  else
  {
    m_shared_credit -= 1;
  }
  assert((m_shared_credit >= 0) &&
         "DRAM arbitration error: Returning more than available credits!");
}

void MemoryPartitionUnit::ArbitrationMetadata::print(FILE *fp) const
{
  fprintf(fp, "private_credit = ");
  for (uint32_t p = 0; p < m_private_credit.size(); p++)
  {
    fprintf(fp, "%d ", m_private_credit[p]);
  }
  fprintf(fp, "(limit = %d)\n", m_private_credit_limit);
  fprintf(fp, "shared_credit = %d (limit = %d)\n", m_shared_credit,
          m_shared_credit_limit);
}

bool MemoryPartitionUnit::busy() const
{
  bool busy = false;
  for (uint32_t p = 0; p < m_config->m_n_sub_partition_per_memory_channel;
       p++)
  {
    if (m_sub_partition[p]->busy())
    {
      busy = true;
    }
  }
  return busy;
}

void MemoryPartitionUnit::cache_cycle(uint32_t cycle)
{
  for (uint32_t p = 0; p < m_config->m_n_sub_partition_per_memory_channel;
       p++)
  {
    m_sub_partition[p]->cache_cycle(cycle);
  }
}

// determine whether a given subpartition can issue to DRAM
bool MemoryPartitionUnit::can_issue_to_dram(int inner_sub_partition_id)
{
  int spid = inner_sub_partition_id;
  bool sub_partition_contention = m_sub_partition[spid]->dram_L2_queue_full();
  bool has_dram_resource = m_arbitration_metadata.has_credits(spid);

  // MEMPART_DPRINTF(
  //"sub partition %d sub_partition_contention=%c has_dram_resource=%c\n",
  // spid, (sub_partition_contention) ? 'T' : 'F',
  //(has_dram_resource) ? 'T' : 'F');

  return (has_dram_resource && !sub_partition_contention);
}

int MemoryPartitionUnit::global_sub_partition_id_to_local_id(
    int global_sub_partition_id) const
{
  return (global_sub_partition_id -
          m_id * m_config->m_n_sub_partition_per_memory_channel);
}

void MemoryPartitionUnit::simple_dram_model_cycle()
{
  // pop completed memory request from dram and push it to dram-to-L2 queue
  // of the original sub partition
  if (!m_dram_latency_queue.empty() &&
      ((m_simulator->simulator_sim_cycle) >=
       m_dram_latency_queue.front().ready_cycle))
  {
    MemFetch *mf_return = m_dram_latency_queue.front().req;
    if (mf_return->get_access_type() != L1_WRBK_ACC &&
        mf_return->get_access_type() != L2_WRBK_ACC)
    {
      mf_return->set_reply();

      uint32_t dest_global_spid = mf_return->get_sub_partition_id();
      int dest_spid = global_sub_partition_id_to_local_id(dest_global_spid);
      assert(m_sub_partition[dest_spid]->get_id() == dest_global_spid);
      if (!m_sub_partition[dest_spid]->dram_L2_queue_full())
      {
        if (mf_return->get_access_type() == L1_WRBK_ACC)
        {
          m_sub_partition[dest_spid]->set_done(mf_return);
          delete mf_return;
        }
        else
        {
          m_sub_partition[dest_spid]->dram_L2_queue_push(mf_return);
          mf_return->set_status(
              IN_PARTITION_DRAM_TO_L2_QUEUE,
              m_simulator->simulator_sim_cycle);
          m_arbitration_metadata.return_credit(dest_spid);
          // MEMPART_DPRINTF(
          //"MemFetch request %p return from dram to sub partition %d\n",
          // mf_return, dest_spid);
        }
        m_dram_latency_queue.pop_front();
      }
    }
    else
    {
      this->set_done(mf_return);
      delete mf_return;
      m_dram_latency_queue.pop_front();
    }
  }

  // MemFetch *mf = m_sub_partition[spid]->L2_dram_queue_top();
  // if( !m_dram->full(mf->is_write()) ) {
  // L2->DRAM queue to DRAM latency queue
  // Arbitrate among multiple L2 subpartitions
  int last_issued_partition = m_arbitration_metadata.last_borrower();
  for (uint32_t p = 0; p < m_config->m_n_sub_partition_per_memory_channel;
       p++)
  {
    int spid = (p + last_issued_partition + 1) %
               m_config->m_n_sub_partition_per_memory_channel;
    if (!m_sub_partition[spid]->L2_dram_queue_empty() &&
        can_issue_to_dram(spid))
    {
      MemFetch *mf = m_sub_partition[spid]->L2_dram_queue_top();
      if (m_dram->full(mf->is_write()))
        break;

      m_sub_partition[spid]->L2_dram_queue_pop();
      // MEMPART_DPRINTF(
      //"Issue MemFetch request %p from sub partition %d to dram\n", mf,
      // spid);
      dram_delay_t d;
      d.req = mf;
      d.ready_cycle = m_simulator->simulator_sim_cycle +
                      m_config->dram_latency;
      m_dram_latency_queue.push_back(d);
      mf->set_status(IN_PARTITION_DRAM_LATENCY_QUEUE,
                     m_simulator->simulator_sim_cycle);
      m_arbitration_metadata.borrow_credit(spid);
      break; // the DRAM should only accept one request per cycle
    }
  }
  //}
}

void MemoryPartitionUnit::dram_cycle()
{
  // pop completed memory request from dram and push it to dram-to-L2 queue
  // of the original sub partition
  // if this memory partition has any requests completed and ready to send back to L2
  if (!m_ramulator_wrapper->to_simulator_empty(m_id))
  {

    // fifo - find the oldest completed access for this m_id
    auto &pkt_q = m_ramulator_wrapper->to_simulator.find(m_id)->second;
    MemFetch *mf_return = pkt_q.front();
    if (mf_return)
    {
      unsigned dest_global_spid = mf_return->get_sub_partition_id();
      int dest_spid = global_sub_partition_id_to_local_id(dest_global_spid);
      assert(m_sub_partition[dest_spid]->get_id() == dest_global_spid);
      if (!m_sub_partition[dest_spid]->dram_L2_queue_full())
      {
        if (mf_return->get_access_type() == L1_WRBK_ACC || mf_return->get_access_type() == L2_WRBK_ACC)
        {
          set_done(mf_return);
          m_sub_partition[dest_spid]->set_done(mf_return);
          delete mf_return;
        }
        else
        {
          mf_return->set_reply();
          m_sub_partition[dest_spid]->dram_L2_queue_push(mf_return);
          mf_return->set_status(IN_PARTITION_DRAM_TO_L2_QUEUE, m_simulator->simulator_sim_cycle);
          m_arbitration_metadata.return_credit(dest_spid);
          // MEMPART_DPRINTF("mem_fetch request %p return from dram to sub partition %d\n", mf_return, dest_spid);
        }
        pkt_q.pop_front();
        if (!pkt_q.size())
        {
          m_ramulator_wrapper->to_simulator.erase(m_id);
          m_ramulator_wrapper->returned[m_id]--;
        }
      }
    }
    else
    { // buble
      pkt_q.pop_front();
      if (!pkt_q.size())
      {
        m_ramulator_wrapper->to_simulator.erase(m_id);
        m_ramulator_wrapper->returned[m_id]--;
      }
    }
  }
  m_ramulator_wrapper->advance_time(m_id); // it manages the completion of accesses recieved from Ramulator or sending new access to Ramulator but doesn't tick the Ramulator

  if (m_ramulator_wrapper->pending[m_id] < sched_q_size)
  {
    // L2->DRAM queue to DRAM latency queue
    // Arbitrate among multiple L2 subpartitions
    int last_issued_partition = m_arbitration_metadata.last_borrower();
    for (unsigned p = 0; p < m_config->m_n_sub_partition_per_memory_channel; p++)
    {
      int spid = (p + last_issued_partition + 1) % m_config->m_n_sub_partition_per_memory_channel;
      if (!m_sub_partition[spid]->L2_dram_queue_empty() && can_issue_to_dram(spid))
      {
        MemFetch *mf = m_sub_partition[spid]->L2_dram_queue_top();
        m_sub_partition[spid]->L2_dram_queue_pop();
        // MEMPART_DPRINTF("Issue mem_fetch request %p from sub partition %d to dram\n", mf, spid);

        dram_delay_t d;
        d.req = mf;
        d.ready_cycle = m_simulator->simulator_sim_cycle + m_config->dram_latency;
        m_dram_latency_queue.push_back(d);

        mf->set_status(IN_PARTITION_DRAM_LATENCY_QUEUE, m_simulator->simulator_sim_cycle);
        m_arbitration_metadata.borrow_credit(spid);
        break; // the DRAM should only accept one request per cycle
      }
    }
  }

  // DRAM latency queue
  if (!m_dram_latency_queue.empty() && ((m_simulator->simulator_sim_cycle) >= m_dram_latency_queue.front().ready_cycle) && m_ramulator_wrapper->pending[m_id] < sched_q_size)
  {
    MemFetch *mf = m_dram_latency_queue.front().req;
    mf->set_mid(m_id);
    bool done = false;
    // for now just send the return_q and sched_q size
    done = m_ramulator_wrapper->FromGpusimDram_push(m_id, mf, return_q_size, sched_q_size);
    if (done)
    {
      m_dram_latency_queue.pop_front();
    }
  }
}

void MemoryPartitionUnit::set_done(MemFetch *mf)
{
  uint32_t global_spid = mf->get_sub_partition_id();
  int spid = global_sub_partition_id_to_local_id(global_spid);
  assert(m_sub_partition[spid]->get_id() == global_spid);
  if (mf->get_access_type() == L1_WRBK_ACC ||
      mf->get_access_type() == L2_WRBK_ACC)
  {
    m_arbitration_metadata.return_credit(spid);
    // MEMPART_DPRINTF(
    //"MemFetch request %p return from dram to sub partition %d\n", mf,
    // spid);
  }
  m_sub_partition[spid]->set_done(mf);
}

MemorySubPartition::MemorySubPartition(uint32_t sub_partition_id,
                                       MemoryConfig *config,
                                       class SIMULATOR *simulator)
{
  m_id = sub_partition_id;
  m_config = config;
  m_simulator = simulator;
  m_memcpy_cycle_offset = 0;

  assert(m_id < m_config->m_n_mem_sub_partition);

  char L2c_name[32];
  snprintf(L2c_name, 32, "L2_bank_%03d", m_id);
  m_L2interface = new L2Interface(this);
  m_mf_allocator = new PartitionMfAllocator(config);

  if (!m_config->m_l2_config.disabled())
    m_L2cache =
        new L2Cache(L2c_name, m_config->m_l2_config, -1, -1, m_L2interface,
                    m_mf_allocator, IN_PARTITION_L2_MISS_QUEUE, simulator);

  uint32_t icnt_L2;
  uint32_t L2_dram;
  uint32_t dram_L2;
  uint32_t L2_icnt;
  sscanf(m_config->simulator_L2_queue_config, "%u:%u:%u:%u", &icnt_L2, &L2_dram,
         &dram_L2, &L2_icnt);
  m_icnt_L2_queue = new FifoPipeline<MemFetch>("icnt-to-L2", 0, icnt_L2);
  m_L2_dram_queue = new FifoPipeline<MemFetch>("L2-to-dram", 0, L2_dram);
  m_dram_L2_queue = new FifoPipeline<MemFetch>("dram-to-L2", 0, dram_L2);
  m_L2_icnt_queue = new FifoPipeline<MemFetch>("L2-to-icnt", 0, L2_icnt);
  wb_addr = -1;
}

MemorySubPartition::~MemorySubPartition()
{
  delete m_icnt_L2_queue;
  delete m_L2_dram_queue;
  delete m_dram_L2_queue;
  delete m_L2_icnt_queue;
  delete m_L2cache;
  delete m_L2interface;
}

void MemorySubPartition::cache_cycle(uint32_t cycle)
{
  // L2 fill responses
  if (!m_config->m_l2_config.disabled())
  {
    if (m_L2cache->access_ready() && !m_L2_icnt_queue->full())
    {
      MemFetch *mf = m_L2cache->next_access();
      if (mf->get_access_type() != L2_WR_ALLOC_R)
      { // Don't pass write allocate read request back to
        // upper level cache
        mf->set_reply();
        mf->set_status(IN_PARTITION_L2_TO_ICNT_QUEUE, m_simulator->simulator_sim_cycle);
        m_L2_icnt_queue->push(mf);
      }
      else
      {
        if (m_config->m_l2_config.m_write_alloc_policy == FETCH_ON_WRITE)
        {
          MemFetch *original_wr_mf = mf->get_original_wr_mf();
          assert(original_wr_mf);
          original_wr_mf->set_reply();
          original_wr_mf->set_status( IN_PARTITION_L2_TO_ICNT_QUEUE, m_simulator->simulator_sim_cycle);
          m_L2_icnt_queue->push(original_wr_mf);
        }
        m_request_tracker.erase(mf);
        delete mf;
      }
    }
  }

  // DRAM to L2 and icnt
  if (!m_dram_L2_queue->empty())
  {
    MemFetch *mf = m_dram_L2_queue->top();
    if (!m_config->m_l2_config.disabled() && m_L2cache->waiting_for_fill(mf))
    {
      if (m_L2cache->fill_port_free())
      {
        mf->set_status(IN_PARTITION_L2_FILL_QUEUE, m_simulator->simulator_sim_cycle);
        m_L2cache->fill(mf, m_simulator->simulator_sim_cycle + m_memcpy_cycle_offset);
        m_dram_L2_queue->pop();
      }
    }
    else if (!m_L2_icnt_queue->full())
    {
      if (mf->is_write() && mf->get_type() == WRITE_ACK)
        mf->set_status(IN_PARTITION_L2_TO_ICNT_QUEUE, m_simulator->simulator_sim_cycle);
      m_L2_icnt_queue->push(mf);
      m_dram_L2_queue->pop();
    }
  }

  // prior L2 misses inserted into m_L2_dram_queue here
  if (!m_config->m_l2_config.disabled())
    m_L2cache->cycle();

  // new L2 accesses
  if (!m_L2_dram_queue->full() && !m_icnt_L2_queue->empty())
  {
    MemFetch *mf = m_icnt_L2_queue->top();
    if (!m_config->m_l2_config.disabled())
    {
      // L2 is enabled and access is for L2
      bool output_full = m_L2_icnt_queue->full();
      bool port_free = m_L2cache->data_port_free();
      if (!output_full && port_free)
      {
        std::list<CacheEvent> events;
        enum CacheRequestState state = m_L2cache->access(mf->get_addr(), mf, m_simulator->simulator_sim_cycle + m_memcpy_cycle_offset, events);
        bool write_sent = was_write_sent(events);
        bool read_sent = was_read_sent(events);
        // MEM_SUBPART_DPRINTF("Probing L2 cache Address=%llx, status=%u\n",
        // mf->get_addr(), status);

        if (state == HIT)
        {
          if (!write_sent)
          {
            // L2 cache replies
            assert(!read_sent);
            if (mf->get_access_type() == L1_WRBK_ACC)
              {
              m_request_tracker.erase(mf);
              delete mf;
            }
            else
            {
              mf->set_reply();
              mf->set_status(IN_PARTITION_L2_TO_ICNT_QUEUE, m_simulator->simulator_sim_cycle);
              m_L2_icnt_queue->push(mf);
            }
            m_icnt_L2_queue->pop();
          }
          else
          {
            assert(write_sent);
            m_icnt_L2_queue->pop();
          }
        }
        else if (state != RESERVATION_FAIL)
        {
          if (mf->is_write() && (m_config->m_l2_config.m_write_alloc_policy == FETCH_ON_WRITE 
              || m_config->m_l2_config.m_write_alloc_policy == LAZY_FETCH_ON_READ) 
              && !was_writeallocate_sent(events))
          {
            mf->set_reply();
            mf->set_status(IN_PARTITION_L2_TO_ICNT_QUEUE, m_simulator->simulator_sim_cycle);
            m_L2_icnt_queue->push(mf);
          }
          // L2 cache accepted request
          m_icnt_L2_queue->pop();
        }
        else
        {
          assert(!write_sent);
          assert(!read_sent);
          // L2 cache lock-up: will try again next cycle
        }
      }
    }
    else
    {
      // L2 is disabled or non-texture access to texture-only L2
      mf->set_status(IN_PARTITION_L2_TO_DRAM_QUEUE, m_simulator->simulator_sim_cycle);
      m_L2_dram_queue->push(mf);
      m_icnt_L2_queue->pop();
    }
  }

  // ROP delay queue
  // if (!m_rop.empty() && (cycle >= m_rop.front().ready_cycle) &&
  //! m_icnt_L2_queue->full()) {
  // MemFetch *mf = m_rop.front().req;
  // m_rop.pop();
  // m_icnt_L2_queue->push(mf);
  // mf->set_status(IN_PARTITION_ICNT_TO_L2_QUEUE,
  // m_simulator->simulator_sim_cycle);
  //}
}

bool MemorySubPartition::full() const { return m_icnt_L2_queue->full(); }

bool MemorySubPartition::full(uint32_t size) const
{
  return m_icnt_L2_queue->is_avilable_size(size);
}

bool MemorySubPartition::L2_dram_queue_empty() const
{
  return m_L2_dram_queue->empty();
}

class MemFetch *MemorySubPartition::L2_dram_queue_top() const
{
  return m_L2_dram_queue->top();
}

void MemorySubPartition::L2_dram_queue_pop() { m_L2_dram_queue->pop(); }

bool MemorySubPartition::dram_L2_queue_full() const
{
  return m_dram_L2_queue->full();
}

void MemorySubPartition::dram_L2_queue_push(class MemFetch *mf)
{
  m_dram_L2_queue->push(mf);
}

uint32_t MemorySubPartition::flushL2()
{
  if (!m_config->m_l2_config.disabled())
  {
    m_L2cache->flush();
  }
  return 0; // TODO: write the flushed data to the main memory
}

uint32_t MemorySubPartition::invalidateL2()
{
  if (!m_config->m_l2_config.disabled())
  {
    m_L2cache->invalidate();
  }
  return 0;
}
std::vector<MemFetch *>
MemorySubPartition::breakdown_request_to_sector_requests(MemFetch *mf)
{
  std::vector<MemFetch *> result;

  if (mf->get_data_size() == SECTOR_SIZE &&
      mf->get_access_sector_mask().count() == 1)
  {
    result.push_back(mf);
  }
  else if (mf->get_data_size() == 128 || mf->get_data_size() == 64)
  {
    // We only accept 32, 64 and 128 bytes reqs
    unsigned start = 0, end = 0;
    if (mf->get_data_size() == 128)
    {
      start = 0;
      end = 3;
    }
    else if (mf->get_data_size() == 64 &&
             mf->get_access_sector_mask().to_string() == "1100")
    {
      start = 2;
      end = 3;
    }
    else if (mf->get_data_size() == 64 &&
             mf->get_access_sector_mask().to_string() == "0011")
    {
      start = 0;
      end = 1;
    }
    else if (mf->get_data_size() == 64 &&
             (mf->get_access_sector_mask().to_string() == "1111" ||
              mf->get_access_sector_mask().to_string() == "0000"))
    {
      if (mf->get_addr() % 128 == 0)
      {
        start = 0;
        end = 1;
      }
      else
      {
        start = 2;
        end = 3;
      }
    }
    else
    {
      printf(
          "Invalid sector received, address = 0x%06lx, sector mask = %s, data "
          "size = %d",
          mf->get_addr(), mf->get_access_sector_mask().to_string().c_str(), mf->get_data_size());
      assert(0 && "Undefined sector mask is received");
    }

    std::bitset<SECTOR_SIZE * SECTOR_CHUNCK_SIZE> byte_sector_mask;
    byte_sector_mask.reset();
    for (unsigned k = start * SECTOR_SIZE; k < SECTOR_SIZE; ++k)
      byte_sector_mask.set(k);

    for (unsigned j = start, i = 0; j <= end; ++j, ++i)
    {
      const MemAccess *ma = new MemAccess(
          mf->get_access_type(), mf->get_addr() + SECTOR_SIZE * i, mf->is_write(),
          SECTOR_SIZE,
          mf->get_access_byte_mask() & byte_sector_mask,
          std::bitset<SECTOR_CHUNCK_SIZE>().set(j));

      MemFetch *n_mf =
          new MemFetch(*ma, mf->get_ctrl_size(), mf->get_request_id(),
                       mf->get_mem_config(),
                       m_simulator->simulator_sim_cycle, mf);

      result.push_back(n_mf);
      byte_sector_mask <<= SECTOR_SIZE;
    }
  }
  else
  {
    printf(
        "Invalid sector received, address = 0x%06lx, sector mask = %lu, byte "
        "mask = , data size = %u",
        mf->get_addr(), mf->get_access_sector_mask().count(),
        mf->get_data_size());
    assert(0 && "Undefined data size is received");
  }

  return result;
}
bool MemorySubPartition::busy() const { 

    //if(m_simulator->m_config->m_debug_timing)
      //printf("memory request tracker size:%lu\n", m_request_tracker.size());
    return !m_request_tracker.empty(); 
  }

void MemorySubPartition::push(MemFetch *m_req, uint64_t cycle)
{
  if (m_req)
  {
    // m_stats->memlatstat_icnt2mem_pop(m_req);
    std::vector<MemFetch *> reqs;
    if (m_config->m_l2_config.m_cache_type == SECTOR)
      reqs = breakdown_request_to_sector_requests(m_req);
    else
      reqs.push_back(m_req);

    for (uint32_t i = 0; i < reqs.size(); ++i)
    {
      MemFetch *req = reqs[i];
      m_request_tracker.insert(req);
      m_icnt_L2_queue->push(req);
      req->set_status(IN_PARTITION_ICNT_TO_L2_QUEUE, m_simulator->simulator_sim_cycle);
    }
  }
}

MemFetch *MemorySubPartition::pop()
{
  MemFetch *mf = m_L2_icnt_queue->pop();
  m_request_tracker.erase(mf);
  if (mf && (mf->get_access_type() == L2_WRBK_ACC ||
             mf->get_access_type() == L1_WRBK_ACC))
  {
    delete mf;
    mf = NULL;
  }
  return mf;
}

MemFetch *MemorySubPartition::top()
{
  MemFetch *mf = m_L2_icnt_queue->top();
  if (mf && (mf->get_access_type() == L2_WRBK_ACC || mf->get_access_type() == L1_WRBK_ACC))
  {
    m_L2_icnt_queue->pop();
    m_request_tracker.erase(mf);
    delete mf;
    mf = NULL;
  }
  return mf;
}

void MemorySubPartition::set_done(MemFetch *mf)
{
  m_request_tracker.erase(mf);
}
