
#include "cache.h"
#include "pe.h"
#include "simulator_sim.h"
#include "hashing.h"

uint32_t CacheConfig::set_index(addr_t addr) const
{
  return CacheConfig::hash_function(addr, m_num_set, m_line_size_log2, m_num_set_log2,
                                    m_set_index_function);
}

uint32_t CacheConfig::hash_function(addr_t addr, uint32_t m_nset,
                                    uint32_t m_line_sz_log2,
                                    uint32_t m_nset_log2,
                                    uint32_t m_index_function) const
{
  uint32_t set_index = 0;

  switch (m_index_function)
  {
  case FERMI_HASH_SET_FUNCTION:
  {
    /*
     * Set Indexing function from "A Detailed GPU Cache Model Based on Reuse
     * Distance Theory" Cedric Nugteren et al. HPCA 2014
     */
    uint32_t lower_xor = 0;
    uint32_t upper_xor = 0;

    if (m_nset == 32 || m_nset == 64)
    {
      // Lower xor value is bits 7-11
      lower_xor = (addr >> m_line_sz_log2) & 0x1F;

      // Upper xor value is bits 13, 14, 15, 17, and 19
      upper_xor = (addr & 0xE000) >> 13;   // Bits 13, 14, 15
      upper_xor |= (addr & 0x20000) >> 14; // Bit 17
      upper_xor |= (addr & 0x80000) >> 15; // Bit 19

      set_index = (lower_xor ^ upper_xor);

      // 48KB cache prepends the set_index with bit 12
      if (m_nset == 64)
        set_index |= (addr & 0x1000) >> 7;
    }
    else
    { /* Else incorrect number of sets for the hashing function */
      assert(
          "\nsimulator cache configuration error: The number of sets should "
          "be "
          "32 or 64 for the hashing set index function.\n" &&
          0);
    }
    break;
  }

  case BITWISE_XORING_FUNCTION:
  {
    addr_t higher_bits = addr >> (m_line_sz_log2 + m_nset_log2);
    uint32_t index = (addr >> m_line_sz_log2) & (m_nset - 1);
    set_index = bitwise_hash_function(higher_bits, index, m_nset);
    break;
  }
  case HASH_IPOLY_FUNCTION:
  {
    addr_t higher_bits = addr >> (m_line_sz_log2 + m_nset_log2);
    uint32_t index = (addr >> m_line_sz_log2) & (m_nset - 1);
    set_index = ipoly_hash_function(higher_bits, index, m_nset);
    break;
  }
  case CUSTOM_SET_FUNCTION:
  {
    /* No custom set function implemented */
    break;
  }

  case LINEAR_SET_FUNCTION:
  {
    set_index = (addr >> m_line_sz_log2) & (m_nset - 1);
    break;
  }

  default:
  {
    assert("\nUndefined set index function.\n" && 0);
    break;
  }
  }

  // Linear function selected or custom set index function not implemented
  assert((set_index < m_nset) &&
         "\nError: Set index out of bounds. This is caused by "
         "an incorrect or unimplemented custom set index function.\n");

  return set_index;
}

void L2CacheConfig::init(linear_to_raw_address_translation *address_mapping)
{
  CacheConfig::init();
  m_address_mapping = address_mapping;
}

uint32_t L2CacheConfig::set_index(addr_t addr) const
{
  addr_t part_addr = addr;

  if (m_address_mapping)
  {
    // Calculate set index without memory partition bits to reduce set camping
    part_addr = m_address_mapping->partition_address(addr);
  }

  return CacheConfig::set_index(part_addr);
}

/// Checks if there is a pending request to the lower memory level already
bool MshrTable::probe(addr_t line_addr) const
{
  table::const_iterator a = m_data.find(line_addr);
  return a != m_data.end();
}

/// Checks if there is space for tracking a new memory access
bool MshrTable::full(addr_t line_addr) const
{
  table::const_iterator i = m_data.find(line_addr);
  if (i != m_data.end())
    return i->second.m_list.size() >= m_max_merged;
  else
    return m_data.size() >= m_num_entries;
}

/// Add or merge this access
void MshrTable::add(addr_t line_addr, MemFetch *mf)
{
  m_data[line_addr].m_list.push_back(mf);
  assert(m_data.size() <= m_num_entries);
  assert(m_data[line_addr].m_list.size() <= m_max_merged);
}

/// check is_read_after_write_pending
bool MshrTable::is_read_after_write_pending(addr_t line_addr)
{
  std::list<MemFetch *> my_list = m_data[line_addr].m_list;
  bool write_found = false;
  for (std::list<MemFetch *>::iterator it = my_list.begin();
       it != my_list.end(); ++it)
  {
    if ((*it)->is_write()) // Pending Write Request
      write_found = true;
    else if (write_found) // Pending Read Request and we found previous Write
      return true;
  }

  return false;
}

/// Accept a new cache fill response: mark entry ready for processing
void MshrTable::mark_ready(addr_t line_addr)
{
  assert(!busy());
  table::iterator a = m_data.find(line_addr);
  assert(a != m_data.end());
  m_current_response.push_back(line_addr);
  assert(m_current_response.size() <= m_data.size());
}

/// Returns next ready access
MemFetch *MshrTable::next_access()
{
  assert(access_ready());
  addr_t line_addr = m_current_response.front();
  assert(!m_data[line_addr].m_list.empty());
  MemFetch *result = m_data[line_addr].m_list.front();
  m_data[line_addr].m_list.pop_front();
  if (m_data[line_addr].m_list.empty())
  {
    // release entry
    m_data.erase(line_addr);
    m_current_response.pop_front();
  }
  return result;
}

TagArray::~TagArray()
{
  uint32_t cache_lines_num = m_config.get_num_lines();
  for (uint32_t i = 0; i < cache_lines_num; ++i)
    delete m_lines[i];
  delete[] m_lines;
}

TagArray::TagArray(const CacheConfig &config, uint32_t core_id,
                   CacheLine **new_lines)
    : m_config(config), m_core_id(core_id), m_lines(new_lines)
{
}

TagArray::TagArray(const CacheConfig &config, uint32_t core_id)
    : m_config(config), m_core_id(core_id)
{

  uint32_t cache_lines_num = config.get_num_lines();
  m_lines = new CacheLine *[cache_lines_num];
  for (uint32_t i = 0; i < cache_lines_num; ++i)
    m_lines[i] = new CacheLine();
}

void TagArray::Init()
{
  m_access = 0;
  m_miss = 0;
  m_pending_hit = 0;
  is_used = false;
}

enum CacheRequestState TagArray::probe(addr_t addr, uint32_t &idx,
                                       MemFetch *mf) const
{
  MemAccessSectorMask mask = mf->get_access_sector_mask();
  return probe(addr, idx, mask, mf);
}

enum CacheRequestState TagArray::probe(addr_t addr, uint32_t &idx,
                                       MemAccessSectorMask mask,
                                       MemFetch *mf) const
{
  // assert( m_config.m_write_policy == READ_ONLY );
  uint32_t set_index = m_config.set_index(addr);
  addr_t tag = m_config.tag(addr);

  uint32_t invalid_line = (uint32_t)-1;
  uint32_t valid_line = (uint32_t)-1;
  uint64_t valid_timestamp = (uint32_t)-1;

  bool all_reserved = true;

  // check for hit or pending hit
  for (uint32_t way = 0; way < m_config.m_assoc; way++)
  {
    uint32_t index = set_index * m_config.m_assoc + way;
    CacheLine *line = m_lines[index];
    if (line->m_tag == tag)
    {
      if (line->get_state(mask) == RESERVED)
      {
        idx = index;
        return HIT_RESERVED;
      }
      else if (line->get_state(mask) == VALID)
      {
        idx = index;
        return HIT;
      }
      else if (line->get_state(mask) == MODIFIED)
      {
        if (line->is_readable())
        {
          idx = index;
          return HIT;
        }
      }
      else
      {
        assert(line->get_state(mask) == INVALID);
      }
    }
    if (!line->is_reserved_line())
    {
      all_reserved = false;
      if (line->is_invalid_line())
      {
        invalid_line = index;
      }
      else
      {
        // valid line : keep track of most appropriate replacement candidate
        if (m_config.m_replacement_policy == LRU)
        {
          if (line->get_last_access_time() < valid_timestamp)
          {
            valid_timestamp = line->get_last_access_time();
            valid_line = index;
          }
        }
        else if (m_config.m_replacement_policy == FIFO)
        {
          if (line->get_alloc_time() < valid_timestamp)
          {
            valid_timestamp = line->get_alloc_time();
            valid_line = index;
          }
        }
      }
    }
  }
  if (all_reserved)
  {
    assert(m_config.m_alloc_policy == ON_MISS);
    return RESERVATION_FAIL; // miss and not enough space in cache to allocate
                             // on miss
  }

  if (invalid_line != (uint32_t)-1)
  {
    idx = invalid_line;
  }
  else if (valid_line != (uint32_t)-1)
  {
    idx = valid_line;
  }
  else
    abort(); // if an unreserved block exists, it is either invalid or
             // replaceable
  return MISS;
}

enum CacheRequestState TagArray::access(addr_t addr, cycle_t time, uint32_t &idx, MemFetch *mf)
{
  bool wb = false;
  EvictedLineInfo evicted;
  enum CacheRequestState result = access(addr, time, idx, wb, evicted, mf);
  assert(!wb);
  return result;
}

enum CacheRequestState TagArray::access(addr_t addr, cycle_t time,
                                        uint32_t &idx, bool &wb,
                                        EvictedLineInfo &evicted,
                                        MemFetch *mf)
{
  m_access++;
  is_used = true;
  enum CacheRequestState status;
  status = probe(addr, idx, mf);
  switch (status)
  {
  case HIT_RESERVED:
    m_pending_hit++;
  case HIT:
    m_lines[idx]->set_last_access_time(time, mf->get_access_sector_mask());
    break;
  case MISS:
    m_miss++;
    if (m_lines[idx]->is_modified_line())
    {
      wb = true;
      evicted.set_info(m_lines[idx]->m_line_addr,
                       m_lines[idx]->get_modified_size());
    }
    m_lines[idx]->allocate(m_config.tag(addr), m_config.line_addr(addr), time);
    break;

  case RESERVATION_FAIL:
    break;
    // default:
    // fprintf(stderr,
    //"TagArray::access - Error: Unknown, CacheRequestState %d\n",
    // status);
    // abort();
  }
  return status;
}

void TagArray::fill(addr_t addr, cycle_t time, MemFetch *mf)
{
  fill(addr, time, mf->get_access_sector_mask());
}

void TagArray::fill(addr_t addr, cycle_t time, MemAccessSectorMask mask)
{
  assert( m_config.m_alloc_policy == ON_FILL );
  uint32_t idx;
  enum CacheRequestState status = probe(addr, idx, mask);
  // assert(status==MISS||status==SECTOR_MISS); // MSHR should have prevented
  // redundant memory request
  if (status == MISS)
    m_lines[idx]->allocate(m_config.tag(addr), m_config.line_addr(addr), time);

  m_lines[idx]->fill(time);
}

void TagArray::fill(bool fill_by_index, uint32_t index, cycle_t time)
{
  // ON_MISS
  m_lines[index]->fill(time);
}

void TagArray::invalidate()
{
  if (!is_used)
    return;

  for (uint32_t i = 0; i < m_config.get_num_lines(); i++)
    for (uint32_t j = 0; j < SECTOR_CHUNCK_SIZE; j++)
      m_lines[i]->set_state(INVALID, MemAccessSectorMask().set(j));

  is_used = false;
}

// TODO: we need write back the flushed data to the upper level
void TagArray::flush()
{
  if (!is_used)
    return;

  for (uint32_t i = 0; i < m_config.get_num_lines(); i++)
    if (m_lines[i]->is_modified_line())
    {
      for (uint32_t j = 0; j < SECTOR_CHUNCK_SIZE; j++)
        m_lines[i]->set_state(INVALID, MemAccessSectorMask().set(j));
    }

  is_used = false;
}

bool was_write_sent(const std::list<CacheEvent> &events)
{
  for (std::list<CacheEvent>::const_iterator e = events.begin();
       e != events.end(); e++)
  {
    if ((*e).m_cache_event_type == WRITE_REQUEST_SENT)
      return true;
  }
  return false;
}

bool was_writeback_sent(const std::list<CacheEvent> &events,
                        CacheEvent &wb_event)
{
  for (std::list<CacheEvent>::const_iterator e = events.begin();
       e != events.end(); e++)
  {
    if ((*e).m_cache_event_type == WRITE_BACK_REQUEST_SENT)
      wb_event = *e;
    return true;
  }
  return false;
}

bool was_read_sent(const std::list<CacheEvent> &events)
{
  for (std::list<CacheEvent>::const_iterator e = events.begin();
       e != events.end(); e++)
  {
    if ((*e).m_cache_event_type == READ_REQUEST_SENT)
      return true;
  }
  return false;
}

bool was_writeallocate_sent(const std::list<CacheEvent> &events)
{
  for (std::list<CacheEvent>::const_iterator e = events.begin();
       e != events.end(); e++)
  {
    if ((*e).m_cache_event_type == WRITE_ALLOCATE_SENT)
      return true;
  }
  return false;
}

BaseCache::BandwidthManagement::BandwidthManagement(const CacheConfig &config)
    : m_config(config)
{
  m_data_port_occupied_cycles = 0;
  m_fill_port_occupied_cycles = 0;
}

/// use the data port based on the outcome and events generated by the MemFetch
/// request
void BaseCache::BandwidthManagement::use_data_port(
    MemFetch *mf, enum CacheRequestState outcome,
    const std::list<CacheEvent> &events)
{
  uint32_t data_size = mf->get_data_size();
  uint32_t port_width = m_config.m_data_port_width;
  switch (outcome)
  {
  case HIT:
  {
    uint32_t data_cycles =
        data_size / port_width + ((data_size % port_width > 0) ? 1 : 0);
    m_data_port_occupied_cycles += data_cycles;
  }
  break;
  case HIT_RESERVED:
  case MISS:
  {
    // the data array is accessed to read out the entire line for write-back
    // in case of sector cache we need to write bank only the modified sectors
    CacheEvent ev(WRITE_BACK_REQUEST_SENT);
    if (was_writeback_sent(events, ev))
    {
      uint32_t data_cycles = ev.m_evicted_block.m_modified_size / port_width;
      m_data_port_occupied_cycles += data_cycles;
    }
  }
  break;
  case RESERVATION_FAIL:
    // Does not consume any port bandwidth
    break;
  default:
    assert(0);
    break;
  }
}

/// use the fill port
void BaseCache::BandwidthManagement::use_fill_port(MemFetch *mf)
{
  // assume filling the entire line with the returned request
  uint32_t fill_cycles = m_config.get_atom_sz() / m_config.m_data_port_width;
  m_fill_port_occupied_cycles += fill_cycles;
}

/// called every cache cycle to free up the ports
void BaseCache::BandwidthManagement::replenish_port_bandwidth()
{
  if (m_data_port_occupied_cycles > 0)
  {
    m_data_port_occupied_cycles -= 1;
  }
  assert(m_data_port_occupied_cycles >= 0);

  if (m_fill_port_occupied_cycles > 0)
  {
    m_fill_port_occupied_cycles -= 1;
  }
  assert(m_fill_port_occupied_cycles >= 0);
}

/// query for data port availability
bool BaseCache::BandwidthManagement::data_port_free() const
{
  return (m_data_port_occupied_cycles == 0);
}

/// query for fill port availability
bool BaseCache::BandwidthManagement::fill_port_free() const
{
  return (m_fill_port_occupied_cycles == 0);
}
bool BaseCache::active()
{
    //if(m_pe->m_simulator->m_config->m_debug_timing)
        //printf("%s miss queue size:%lu\n", m_name.c_str(), m_miss_queue.size());
    return !m_miss_queue.empty();
}
/// Sends next request to lower level of memory
void BaseCache::cycle()
{
  if (!m_miss_queue.empty())
  {
    MemFetch *mf = m_miss_queue.front();
    if (!m_memport->full(mf->size(), mf->get_is_write()))
    {
      m_miss_queue.pop_front();

    //if(m_pe->m_simulator->m_config->m_debug_timing)
      //  printf("%s push req to inct, req addr:%ld\n", m_name.c_str(), mf->get_addr());
      m_memport->push(mf);
    }
  }
  bool data_port_busy = !m_bandwidth_management.data_port_free();
  bool fill_port_busy = !m_bandwidth_management.fill_port_free();
  m_bandwidth_management.replenish_port_bandwidth();
}

/// Interface for response from lower memory level (model bandwidth restictions
/// in caller)
void BaseCache::fill(MemFetch *mf, cycle_t time)
{
  //if (m_config.m_mshr_type == SECTOR_ASSOC) {
    //assert(mf->get_original_mf());
    //extra_mf_fields_lookup::iterator e = m_extra_mf_fields.find(mf->get_original_mf());
    //assert(e != m_extra_mf_fields.end());
    //e->second.pending_read--;

    //if (e->second.pending_read > 0) {
      //// wait for the other requests to come back
      //delete mf;
      //return;
    //} else {
      //MemFetch *temp = mf;
      //mf = mf->get_original_mf();
      //delete temp;
    //}
  //}
  extra_mf_fields_lookup::iterator e = m_extra_mf_fields.find(mf);
  assert(e != m_extra_mf_fields.end());
  assert(e->second.m_valid);
  mf->set_data_size(e->second.m_data_size);
  mf->set_addr(e->second.m_addr);

  if (m_config.m_alloc_policy == ON_MISS)
    m_tag_array->fill(e->second.m_cache_index, time, mf);
  else if (m_config.m_alloc_policy == ON_FILL)
  {
    m_tag_array->fill(e->second.m_line_addr, time, mf);
    // if (m_config.is_streaming()) m_tag_array->remove_pending_line(mf);
  }
  else
    abort();
  m_mshrs.mark_ready(e->second.m_line_addr);

  m_extra_mf_fields.erase(mf);
  m_bandwidth_management.use_fill_port(mf);
}

/// Checks if mf is waiting to be filled by lower memory level
bool BaseCache::waiting_for_fill(MemFetch *mf)
{
  extra_mf_fields_lookup::iterator e = m_extra_mf_fields.find(mf);
  return e != m_extra_mf_fields.end();
}

/// Read miss handler without writeback
void BaseCache::send_read_request(addr_t addr, addr_t linear_addr,
                                  uint32_t cache_index, MemFetch *mf,
                                  cycle_t time, bool &do_miss,
                                  std::list<CacheEvent> &events,
                                  bool read_only, bool wa)
{
  bool wb = false;
  EvictedLineInfo e;
  send_read_request(addr, linear_addr, cache_index, mf, time, do_miss, wb, e,
                    events, read_only, wa);
}

/// Read miss handler. Check MSHR hit or MSHR available
void BaseCache::send_read_request(addr_t addr, addr_t line_addr,
                                  uint32_t cache_index, MemFetch *mf, cycle_t time,
                                  bool &do_miss, bool &wb, EvictedLineInfo &evicted,
                                  std::list<CacheEvent> &events,
                                  bool read_only, bool wa)
{
  addr_t mshr_addr = m_config.mshr_addr(mf->get_addr());
  bool mshr_hit = m_mshrs.probe(mshr_addr);
  bool mshr_avail = !m_mshrs.full(mshr_addr);
  if (mshr_hit && mshr_avail)
  {
    if (read_only)
      m_tag_array->access(line_addr, time, cache_index, mf);
    else
      m_tag_array->access(line_addr, time, cache_index, wb, evicted, mf);

    m_mshrs.add(mshr_addr, mf);
    do_miss = true;
  }
  else if (!mshr_hit && mshr_avail &&
           (m_miss_queue.size() < m_config.m_miss_queue_size))
  {
    if (read_only)
      m_tag_array->access(line_addr, time, cache_index, mf);
    else
      m_tag_array->access(line_addr, time, cache_index, wb, evicted, mf);

    m_mshrs.add(mshr_addr, mf);
    // if (m_config.is_streaming() && m_config.m_cache_type == SECTOR) {
    // m_tag_array->add_pending_line(mf);
    //}
    m_extra_mf_fields[mf] = extra_mf_fields(mshr_addr, mf->get_addr(), cache_index, mf->get_data_size(), m_config);
    mf->set_data_size(m_config.get_atom_sz());
    mf->set_addr(mshr_addr);
    m_miss_queue.push_back(mf);
    mf->set_status(m_miss_queue_state, time);
    if (!wa)
      events.push_back(CacheEvent(READ_REQUEST_SENT));

    do_miss = true;
  }
  else if (mshr_hit && !mshr_avail)
  {
  }
  // m_stats.inc_fail_stats(mf->get_access_type(), MSHR_MERGE_ENRTY_FAIL);
  else if (!mshr_hit && !mshr_avail)
  {
  }
  // m_stats.inc_fail_stats(mf->get_access_type(), MSHR_ENRTY_FAIL);
  else
    assert(0);
}

/// Access cache for read_only_cache: returns RESERVATION_FAIL if
// request could not be accepted (for any reason)
enum CacheRequestState ReadOnlyCache::access(
    addr_t addr, MemFetch *mf, cycle_t time,
    std::list<CacheEvent> &events)
{
  assert(mf->get_data_size() <= m_config.get_atom_sz());
  // assert(m_config.m_write_policy == READ_ONLY);
  assert(!mf->get_is_write());
  addr_t line_addr = m_config.line_addr(addr);
  uint32_t cache_index = (uint32_t)-1;
  enum CacheRequestState state =
      m_tag_array->probe(line_addr, cache_index, mf);
  enum CacheRequestState cache_state = RESERVATION_FAIL;

  if (state == HIT)
  {
    cache_state = m_tag_array->access(line_addr, time, cache_index, mf); // update LRU state
  }
  else if (state != RESERVATION_FAIL)
  {
    if (!miss_queue_full(0))
    {
      bool do_miss = false;
      send_read_request(addr, line_addr, cache_index, mf, time, do_miss,
                        events, true, false);
      if (do_miss)
        cache_state = MISS;
      else
        cache_state = RESERVATION_FAIL;
    }
    else
    {
      cache_state = RESERVATION_FAIL;
      // m_stats.inc_fail_stats(mf->get_access_type(), MISS_QUEUE_FULL);
    }
  }
  else
  {
    // m_stats.inc_fail_stats(mf->get_access_type(), LINE_ALLOC_FAIL);
  }

  // m_stats.inc_stats(mf->get_access_type(),
  // m_stats.select_stats_status(status, cache_status));
  // m_stats.inc_stats_pw(mf->get_access_type(),
  // m_stats.select_stats_status(status, cache_status));
  return cache_state;
}

/// Sends write request to lower level memory (write or writeback)
void DataCache::send_write_request(MemFetch *mf, CacheEvent request,
                                   cycle_t time,
                                   std::list<CacheEvent> &events)
{
  events.push_back(request);
  m_miss_queue.push_back(mf);
  mf->set_status(m_miss_queue_state, time);
}

/****** Write-hit functions (Set by config file) ******/

/// Write-back hit: Mark block as modified
CacheRequestState DataCache::wr_hit_wb(addr_t addr,
                                       uint32_t cache_index, MemFetch *mf,
                                       cycle_t time,
                                       std::list<CacheEvent> &events,
                                       CacheRequestState state)
{
  addr_t block_addr = m_config.line_addr(addr);
  m_tag_array->access(block_addr, time, cache_index, mf); // update LRU state
  CacheLine *line = m_tag_array->get_line(cache_index);
  line->set_state(MODIFIED, mf->get_access_sector_mask());

  return HIT;
}

/// Write-through hit: Directly send request to lower level memory
CacheRequestState DataCache::wr_hit_wt(addr_t addr,
                                       uint32_t cache_index, MemFetch *mf,
                                       cycle_t time,
                                       std::list<CacheEvent> &events,
                                       enum CacheRequestState status)
{
  if (miss_queue_full(0))
  {
    // m_stats.inc_fail_stats(mf->get_access_type(), MISS_QUEUE_FULL);
    return RESERVATION_FAIL; // cannot handle request this cycle
  }

  addr_t block_addr = m_config.line_addr(addr);
  m_tag_array->access(block_addr, time, cache_index, mf); // update LRU state
  CacheLine *line = m_tag_array->get_line(cache_index);
  line->set_state(MODIFIED, mf->get_access_sector_mask());

  // generate a write-through
  send_write_request(mf, CacheEvent(WRITE_REQUEST_SENT), time, events);

  return HIT;
}

/// Write-evict hit: Send request to lower level memory and invalidate
/// corresponding block
CacheRequestState DataCache::wr_hit_we(addr_t addr,
                                           uint32_t cache_index, MemFetch *mf,
                                           cycle_t time,
                                           std::list<CacheEvent> &events,
                                           enum CacheRequestState status) {
  if (miss_queue_full(0)) {
    return RESERVATION_FAIL;  // cannot handle request this cycle
  }

  // generate a write-through/evict
  CacheLine *block = m_tag_array->get_line(cache_index);
  send_write_request(mf, CacheEvent(WRITE_REQUEST_SENT), time, events);

  // Invalidate block
  block->set_state(INVALID, mf->get_access_sector_mask());

  return HIT;
}

/// Global write-evict, local write-back: Useful for private caches
CacheRequestState  DataCache::wr_hit_global_we_local_wb(
    addr_t addr, uint32_t cache_index, MemFetch *mf, cycle_t time,
    std::list<CacheEvent> &events, enum CacheRequestState status) {
  bool evict = (mf->get_access_type() ==
                GLOBAL_ACC_W);  // evict a line that hits on global memory write
  if (evict)
    return wr_hit_we(addr, cache_index, mf, time, events,
                     status);  // Write-evict
  else
    return wr_hit_wb(addr, cache_index, mf, time, events,
                     status);  // Write-back
}


/****** Write-miss functions (Set by config file) ******/

/// Write-allocate miss: Send write request to lower level memory
// and send a read request for the same block
enum CacheRequestState DataCache::wr_miss_wa_naive(
    addr_t addr, uint32_t cache_index, MemFetch *mf, cycle_t time,
    std::list<CacheEvent> &events, enum CacheRequestState status)
{
  addr_t block_addr = m_config.line_addr(addr);
  addr_t mshr_addr = m_config.mshr_addr(mf->get_addr());

  // Write allocate, maximum 3 requests (write miss, read request, write back
  // request) Conservatively ensure the worst-case request can be handled this
  // cycle
  bool mshr_hit = m_mshrs.probe(mshr_addr);
  bool mshr_avail = !m_mshrs.full(mshr_addr);
  if (miss_queue_full(2) ||
      (!(mshr_hit && mshr_avail) &&
       !(!mshr_hit && mshr_avail &&
         (m_miss_queue.size() < m_config.m_miss_queue_size))))
  {
    // check what is the exactly the failure reason
    // if (miss_queue_full(2))
    // m_stats.inc_fail_stats(mf->get_access_type(), MISS_QUEUE_FULL);
    // else if (mshr_hit && !mshr_avail)
    // m_stats.inc_fail_stats(mf->get_access_type(), MSHR_MERGE_ENRTY_FAIL);
    // else if (!mshr_hit && !mshr_avail)
    // m_stats.inc_fail_stats(mf->get_access_type(), MSHR_ENRTY_FAIL);
    // else
    // assert(0);

    return RESERVATION_FAIL;
  }

  send_write_request(mf, CacheEvent(WRITE_REQUEST_SENT), time, events);
  // Tries to send write allocate request, returns true on success and false on
  // failure
  // if(!send_write_allocate(mf, addr, block_addr, cache_index, time, events))
  //    return RESERVATION_FAIL;

  const MemAccess *ma =
      new MemAccess(m_wr_alloc_type, mf->get_addr(), false, m_config.get_atom_sz(),
                    // Now performing a read
                    mf->get_access_byte_mask(),
                    mf->get_access_sector_mask());

  MemFetch *n_mf =
      new MemFetch(*ma, mf->get_ctrl_size(), mf->get_request_id(),
                   mf->get_mem_config(),
                   m_simulator->simulator_sim_cycle);

  bool do_miss = false;
  bool wb = false;
  EvictedLineInfo evicted;

  // Send read request resulting from write miss
  send_read_request(addr, block_addr, cache_index, n_mf, time, do_miss, wb,
                    evicted, events, false, true);

  events.push_back(CacheEvent(WRITE_ALLOCATE_SENT));

  if (do_miss)
  {
    // If evicted block is modified and not a write-through
    // (already modified lower level)
    if (wb && (m_config.m_write_policy != WRITE_THROUGH))
    {
      assert(status ==
             MISS); // SECTOR_MISS and HIT_RESERVED should not send write back
      MemFetch *wb = m_memfetch_creator->alloc(
          evicted.m_line_addr, m_wrbk_type, evicted.m_modified_size, true,
          m_simulator->simulator_sim_cycle);
      // the evicted block may have wrong chip id when advanced L2 hashing  is
      // used, so set the right chip address from the original mf
      wb->set_chip(mf->get_tlx_addr().chip);
      wb->set_parition(mf->get_tlx_addr().sub_partition);
      send_write_request(wb, CacheEvent(WRITE_BACK_REQUEST_SENT, evicted),
                         time, events);
    }
    return MISS;
  }

  return RESERVATION_FAIL;
}

enum CacheRequestState DataCache::wr_miss_wa_fetch_on_write(
    addr_t addr, uint32_t cache_index, MemFetch *mf, cycle_t time,
    std::list<CacheEvent> &events, enum CacheRequestState status)
{
  addr_t block_addr = m_config.line_addr(addr);
  addr_t mshr_addr = m_config.mshr_addr(mf->get_addr());

  if (mf->get_access_byte_mask().count() == m_config.get_atom_sz())
  {
    // if the request writes to the whole cache line/sector, then, write and set
    // cache line Modified. and no need to send read request to memory or
    // reserve mshr

    if (miss_queue_full(0))
    {
      // m_stats.inc_fail_stats(mf->get_access_type(), MISS_QUEUE_FULL);
      return RESERVATION_FAIL; // cannot handle request this cycle
    }

    bool wb = false;
    EvictedLineInfo evicted;

    CacheRequestState status =
        m_tag_array->access(block_addr, time, cache_index, wb, evicted, mf);
    assert(status != HIT);
    CacheLine *line = m_tag_array->get_line(cache_index);
    line->set_state(MODIFIED, mf->get_access_sector_mask());
    if (status == HIT_RESERVED)
      line->set_ignore_on_fill(true, mf->get_access_sector_mask());

    if (status != RESERVATION_FAIL)
    {
      // If evicted block is modified and not a write-through
      // (already modified lower level)
      if (wb && (m_config.m_write_policy != WRITE_THROUGH))
      {
        MemFetch *wb = m_memfetch_creator->alloc(
            evicted.m_line_addr, m_wrbk_type, evicted.m_modified_size, true,
            m_simulator->simulator_sim_cycle);
        // the evicted block may have wrong chip id when advanced L2 hashing  is
        // used, so set the right chip address from the original mf
        wb->set_chip(mf->get_tlx_addr().chip);
        wb->set_parition(mf->get_tlx_addr().sub_partition);
        send_write_request(wb, CacheEvent(WRITE_BACK_REQUEST_SENT, evicted),
                           time, events);
      }
      return MISS;
    }
    return RESERVATION_FAIL;
  }
  else
  {
    bool mshr_hit = m_mshrs.probe(mshr_addr);
    bool mshr_avail = !m_mshrs.full(mshr_addr);
    if (miss_queue_full(1) ||
        (!(mshr_hit && mshr_avail) &&
         !(!mshr_hit && mshr_avail &&
           (m_miss_queue.size() < m_config.m_miss_queue_size))))
    {
      // check what is the exactly the failure reason
      // if (miss_queue_full(1))
      // m_stats.inc_fail_stats(mf->get_access_type(), MISS_QUEUE_FULL);
      // else if (mshr_hit && !mshr_avail)
      // m_stats.inc_fail_stats(mf->get_access_type(), MSHR_MERGE_ENRTY_FAIL);
      // else if (!mshr_hit && !mshr_avail)
      // m_stats.inc_fail_stats(mf->get_access_type(), MSHR_ENRTY_FAIL);
      // else
      // assert(0);

      return RESERVATION_FAIL;
    }

    // prevent Write - Read - Write in pending mshr
    // allowing another write will override the value of the first write, and
    // the pending read request will read incorrect result from the second write
    if (m_mshrs.probe(mshr_addr) &&
        m_mshrs.is_read_after_write_pending(mshr_addr) && mf->is_write())
    {
      // assert(0);
      // m_stats.inc_fail_stats(mf->get_access_type(), MSHR_RW_PENDING);
      return RESERVATION_FAIL;
    }

    const MemAccess *ma = new MemAccess(
        m_wr_alloc_type, mf->get_addr(), false, m_config.get_atom_sz(),
        // Now performing a read
        mf->get_access_byte_mask(),
        mf->get_access_sector_mask());

    MemFetch *n_mf = new MemFetch(
        *ma, mf->get_ctrl_size(), mf->get_request_id(),
        mf->get_mem_config(),
        m_simulator->simulator_sim_cycle, NULL, mf);

    addr_t block_addr = m_config.line_addr(addr);
    bool do_miss = false;
    bool wb = false;
    EvictedLineInfo evicted;
    send_read_request(addr, block_addr, cache_index, n_mf, time, do_miss, wb,
                      evicted, events, false, true);

    CacheLine *line = m_tag_array->get_line(cache_index);
    line->set_modified_on_fill(true, mf->get_access_sector_mask());

    events.push_back(CacheEvent(WRITE_ALLOCATE_SENT));

    if (do_miss)
    {
      // If evicted block is modified and not a write-through
      // (already modified lower level)
      if (wb && (m_config.m_write_policy != WRITE_THROUGH))
      {
        MemFetch *wb = m_memfetch_creator->alloc(
            evicted.m_line_addr, m_wrbk_type, evicted.m_modified_size, true,
            m_simulator->simulator_sim_cycle);
        // the evicted block may have wrong chip id when advanced L2 hashing  is
        // used, so set the right chip address from the original mf
        wb->set_chip(mf->get_tlx_addr().chip);
        wb->set_parition(mf->get_tlx_addr().sub_partition);
        send_write_request(wb, CacheEvent(WRITE_BACK_REQUEST_SENT, evicted),
                           time, events);
      }
      return MISS;
    }
    return RESERVATION_FAIL;
  }
}

enum CacheRequestState DataCache::wr_miss_wa_lazy_fetch_on_read(
    addr_t addr, uint32_t cache_index, MemFetch *mf, cycle_t time,
    std::list<CacheEvent> &events, enum CacheRequestState status)
{
  addr_t block_addr = m_config.line_addr(addr);

  // if the request writes to the whole cache line/sector, then, write and set
  // cache line Modified. and no need to send read request to memory or reserve
  // mshr

  if (miss_queue_full(0))
  {
    // m_stats.inc_fail_stats(mf->get_access_type(), MISS_QUEUE_FULL);
    return RESERVATION_FAIL; // cannot handle request this cycle
  }

  bool wb = false;
  EvictedLineInfo evicted;

  CacheRequestState m_state =
      m_tag_array->access(block_addr, time, cache_index, wb, evicted, mf);
  assert(m_state != HIT);
  CacheLine *line = m_tag_array->get_line(cache_index);
  line->set_state(MODIFIED, mf->get_access_sector_mask());
  if (m_state == HIT_RESERVED)
  {
    line->set_ignore_on_fill(true, mf->get_access_sector_mask());
    line->set_modified_on_fill(true, mf->get_access_sector_mask());
  }

  if (mf->get_access_byte_mask().count() == m_config.get_atom_sz())
  {
    line->set_m_readable(true, mf->get_access_sector_mask());
  }
  else
  {
    line->set_m_readable(false, mf->get_access_sector_mask());
  }

  if (m_state != RESERVATION_FAIL)
  {
    // If evicted block is modified and not a write-through
    // (already modified lower level)
    if (wb && (m_config.m_write_policy != WRITE_THROUGH))
    {
      MemFetch *wb = m_memfetch_creator->alloc(
          evicted.m_line_addr, m_wrbk_type, evicted.m_modified_size, true,
          m_simulator->simulator_sim_cycle);
      // the evicted block may have wrong chip id when advanced L2 hashing  is
      // used, so set the right chip address from the original mf
      wb->set_chip(mf->get_tlx_addr().chip);
      wb->set_parition(mf->get_tlx_addr().sub_partition);
      send_write_request(wb, CacheEvent(WRITE_BACK_REQUEST_SENT, evicted),
                         time, events);
    }
    return MISS;
  }
  return RESERVATION_FAIL;
}

/// No write-allocate miss: Simply send write request to lower level memory
enum CacheRequestState DataCache::wr_miss_no_wa(
    addr_t addr, uint32_t cache_index, MemFetch *mf, cycle_t time,
    std::list<CacheEvent> &events, enum CacheRequestState status)
{
  if (miss_queue_full(0))
  {
    return RESERVATION_FAIL; // cannot handle request this cycle
  }

  // on miss, generate write through (no write buffering -- too many threads for
  // that)
  send_write_request(mf, CacheEvent(WRITE_REQUEST_SENT), time, events);

  return MISS;
}

/****** Read hit functions (Set by config file) ******/

/// Baseline read hit: Update LRU status of block.
// Special case for atomic instructions -> Mark lineas modified
enum CacheRequestState DataCache::rd_hit_base(
    addr_t addr, uint32_t cache_index, MemFetch *mf, cycle_t time,
    std::list<CacheEvent> &events, enum CacheRequestState status)
{
  addr_t line_addr = m_config.line_addr(addr);
  m_tag_array->access(line_addr, time, cache_index, mf);
  return HIT;
}

/****** Read miss functions (Set by config file) ******/

/// Baseline read miss: Send read request to lower level memory,
// perform write-back as necessary
enum CacheRequestState DataCache::rd_miss_base(
    addr_t addr, uint32_t cache_index, MemFetch *mf, cycle_t time,
    std::list<CacheEvent> &events, enum CacheRequestState status)
{
  if (miss_queue_full(1))
  {
    // cannot handle request this cycle
    // (might need to generate two requests)
    // m_stats.inc_fail_stats(mf->get_access_type(), MISS_QUEUE_FULL);
    return RESERVATION_FAIL;
  }

  addr_t line_addr = m_config.line_addr(addr);
  bool do_miss = false;
  bool wb = false;
  EvictedLineInfo evicted;
  send_read_request(addr, line_addr, cache_index, mf, time, do_miss, wb,
                    evicted, events, false, false);

  if (do_miss)
  {
    // If evicted lineis modified and not a write-through
    // (already modified lower level)
    if (wb && (m_config.m_write_policy != WRITE_THROUGH))
    {
      MemFetch *wb = m_memfetch_creator->alloc(
          evicted.m_line_addr, m_wrbk_type, evicted.m_modified_size, true,
          m_simulator->simulator_sim_cycle);
      // the evicted linemay have wrong chip id when advanced L2 hashing  is
      // used, so set the right chip address from the original mf
      wb->set_chip(mf->get_tlx_addr().chip);
      wb->set_parition(mf->get_tlx_addr().sub_partition);
      send_write_request(wb, WRITE_BACK_REQUEST_SENT, time, events);
    }
    return MISS;
  }
  return RESERVATION_FAIL;
}
//! A general function that takes the result of a tag_array probe
//  and performs the correspding functions based on the cache configuration
//  The access fucntion calls this function
enum CacheRequestState DataCache::process_tag_probe(
    bool wr, enum CacheRequestState probe_status, addr_t addr,
    uint32_t cache_index, MemFetch *mf, cycle_t time,
    std::list<CacheEvent> &events)
{
  // Each function pointer ( m_[rd/wr]_[hit/miss] ) is set in the
  // DataCache constructor to reflect the corresponding cache configuration
  // options. Function pointers were used to avoid many long conditional
  // branches resulting from many cache configuration options.
  CacheRequestState access_status = probe_status;
  if (wr)
  { // Write
    if (probe_status == HIT)
    {
      access_status =
          (this->*m_wr_hit)(addr, cache_index, mf, time, events, probe_status);
    }
    else if ((probe_status != RESERVATION_FAIL) ||
             (probe_status == RESERVATION_FAIL &&
              m_config.m_write_alloc_policy == NO_WRITE_ALLOCATE))
    {
      access_status =
          (this->*m_wr_miss)(addr, cache_index, mf, time, events, probe_status);
    }
    else
    {
      // the only reason for reservation fail here is LINE_ALLOC_FAIL (i.e all
      // lines are reserved)
      // m_stats.inc_fail_stats(mf->get_access_type(), LINE_ALLOC_FAIL);
    }
  }
  else
  { // Read
    if (probe_status == HIT)
    {
      access_status =
          (this->*m_rd_hit)(addr, cache_index, mf, time, events, probe_status);
    }
    else if (probe_status != RESERVATION_FAIL)
    {
      access_status =
          (this->*m_rd_miss)(addr, cache_index, mf, time, events, probe_status);
    }
    else
    {
      // the only reason for reservation fail here is LINE_ALLOC_FAIL (i.e all
      // lines are reserved)
      // m_stats.inc_fail_stats(mf->get_access_type(), LINE_ALLOC_FAIL);
    }
  }

  m_bandwidth_management.use_data_port(mf, access_status, events);
  return access_status;
}

// Both the L1 and L2 currently use the same access function.
// Differentiation between the two caches is done through configuration
// of caching policies.
// Both the L1 and L2 override this function to provide a means of
// performing actions specific to each cache when such actions are implemnted.
enum CacheRequestState DataCache::access(addr_t addr, MemFetch *mf,
                                         cycle_t time,
                                         std::list<CacheEvent> &events)
{
  assert(mf->get_data_size() <= m_config.get_atom_sz());
  bool wr = mf->get_is_write();
  addr_t line_addr = m_config.line_addr(addr);
  uint32_t cache_index = (uint32_t)-1;
  enum CacheRequestState probe_status =
      m_tag_array->probe(line_addr, cache_index, mf);
  enum CacheRequestState access_status =
      process_tag_probe(wr, probe_status, addr, cache_index, mf, time, events);
  return access_status;
}
/// This is meant to model the first level data cache in Fermi.
/// It is write-evict (global) or write-back (local) at the
/// granularity of individual blocks (Set by  configuration file)

enum CacheRequestState
L1Cache::access( addr_t addr,
                  MemFetch *mf,
                  cycle_t time,
                  std::list<CacheEvent> &events )
{
    return DataCache::access( addr, mf, time, events );
}

// The l2 cache access function calls the base DataCache access
// implementation.  When the L2 needs to diverge from L1, L2 specific
// changes should be made here.
enum CacheRequestState L2Cache::access(addr_t addr, MemFetch *mf,
                                       cycle_t time,
                                       std::list<CacheEvent> &events)
{
  return DataCache::access(addr, mf, time, events);
}