#ifndef _CACHE_H_
#define _CACHE_H_

#include <iostream>
#include <string>
#include <vector>
#include <list>
#include <set>
#include <map>
#include <bitset>
#include <cstring>
#include <cassert>
#include <unordered_map>

#include "types.h"
#include "simulator_misc.h"
#include "hardware_model.h"

class PE;

enum CacheReservationFailReason
{
  LINE_ALLOC_FAIL = 0, // all line are reserved
  MISS_QUEUE_FULL,     // MISS queue (i.e. interconnect or DRAM) is full
  MSHR_ENRTY_FAIL,
  MSHR_MERGE_ENRTY_FAIL,
  MSHR_RW_PENDING,
  NUM_CACHE_RESERVATION_FAIL_STATUS
};

enum SetIndexFunction
{
  LINEAR_SET_FUNCTION = 0,
  BITWISE_XORING_FUNCTION,
  HASH_IPOLY_FUNCTION,
  FERMI_HASH_SET_FUNCTION,
  CUSTOM_SET_FUNCTION
};

enum MshrType
{
  ASSOC = 0,   // normal cache
  SECTOR_ASSOC // normal cache sends requests to high-level sector cache
};

enum CacheLineState
{
  INVALID = 0,
  RESERVED,
  VALID,
  MODIFIED
};

enum CacheType
{
  NORMAL = 0,
  SECTOR
};

enum CacheRequestState
{
  HIT = 0,
  HIT_RESERVED,
  MISS,
  RESERVATION_FAIL
};

enum CacheEventType
{
  WRITE_BACK_REQUEST_SENT,
  READ_REQUEST_SENT,
  WRITE_REQUEST_SENT,
  WRITE_ALLOCATE_SENT
};

struct EvictedLineInfo
{
  addr_t m_line_addr;
  uint32_t m_modified_size;
  EvictedLineInfo()
  {
    m_line_addr = 0;
    m_modified_size = 0;
  }
  void set_info(addr_t line_addr, uint32_t modified_size)
  {
    m_line_addr = line_addr;
    m_modified_size = modified_size;
  }
};
struct CacheEvent
{
  enum CacheEventType m_cache_event_type;
  EvictedLineInfo m_evicted_block; // if it was write_back event, fill the
                                   // the evicted block info

  CacheEvent(enum CacheEventType m_CacheEvent)
  {
    m_cache_event_type = m_CacheEvent;
  }

  CacheEvent(enum CacheEventType CacheEvent,
             EvictedLineInfo evicted_block)
  {
    m_cache_event_type = CacheEvent;
    m_evicted_block = evicted_block;
  }
};

enum ReplacementPolicy
{
  LRU,
  FIFO
};

enum WritePolicy
{
  READ_ONLY,
  WRITE_BACK,
  WRITE_THROUGH,
  WRITE_EVICT,
  LOCAL_WB_GLOBAL_WT
};

enum AllocationPolicy
{
  ON_MISS,
  ON_FILL,
  STREAMING
};

enum WriteAllocatePolicy
{
  NO_WRITE_ALLOCATE,
  WRITE_ALLOCATE,
  FETCH_ON_WRITE,
  LAZY_FETCH_ON_READ
};

class CacheLine
{

public:
  CacheLine()
  {
    m_alloc_time = 0;
    m_fill_time = 0;
    m_last_access_time = 0;
    m_state = INVALID;
    m_set_modified_on_fill = false;
    m_readable = true;
  }

  void allocate(addr_t tag, addr_t line_addr, cycle_t time)
  {
    m_tag = tag;
    m_line_addr = line_addr;
    m_alloc_time = time;
    m_last_access_time = time;
    m_fill_time = 0;
    m_state = RESERVED;
    m_set_modified_on_fill = false;
  }

  void fill(cycle_t time)
  {
    m_state = m_set_modified_on_fill ? MODIFIED : VALID;
    m_fill_time = time;
  }

  bool is_invalid_line() { return m_state == INVALID; }
  bool is_valid_line() { return m_state == VALID; }
  bool is_reserved_line() { return m_state == RESERVED; }
  bool is_modified_line() { return m_state == MODIFIED; }

  virtual enum CacheLineState get_state(MemAccessSectorMask sector_mask)
  {
    return m_state;
  }

  virtual void set_state(enum CacheLineState state, MemAccessSectorMask sector_mask)
  {
    m_state = state;
  }
  virtual uint64_t get_last_access_time()
  {
    return m_last_access_time;
  }
  virtual void set_last_access_time(uint32_t long long time,
                                    MemAccessSectorMask sector_mask)
  {
    m_last_access_time = time;
  }
  virtual uint64_t get_alloc_time() { return m_alloc_time; }

  virtual void set_modified_on_fill(bool m_modified, MemAccessSectorMask mask)
  {
    m_set_modified_on_fill = m_modified;
  }
  virtual void set_ignore_on_fill(bool m_ignore,
                                  MemAccessSectorMask sector_mask)
  {
    m_ignore_on_fill_state = m_ignore;
  }

  virtual void set_m_readable(bool readable,
                              MemAccessSectorMask sector_mask)
  {
    m_readable = readable;
  }

  virtual bool is_readable()
  {
    return m_readable;
  }
  virtual void print_status()
  {
    printf("m_line_addr is %lu, state = %u\n", m_line_addr, m_state);
  }
  virtual uint32_t get_modified_size()
  {
    return 4 * 32; // i.e. cache line size
  }

public:
  uint32_t m_alloc_time;
  uint32_t m_last_access_time;
  uint32_t m_fill_time;
  CacheLineState m_state;
  cycle_t m_status_change_time;
  bool m_ignore_on_fill_state;
  bool m_set_modified_on_fill;
  bool m_readable;
  addr_t m_tag;
  addr_t m_line_addr;
};

struct SectorCacheLine : public CacheLine
{
  SectorCacheLine() { init(); }

  void init()
  {
    for (uint32_t i = 0; i < SECTOR_CHUNCK_SIZE; ++i)
    {
      m_sector_alloc_time[i] = 0;
      m_sector_fill_time[i] = 0;
      m_last_sector_access_time[i] = 0;
      m_state[i] = INVALID;
      m_ignore_on_fill_status[i] = false;
      m_set_modified_on_fill[i] = false;
      m_readable[i] = true;
    }
    m_line_alloc_time = 0;
    m_line_last_access_time = 0;
    m_line_fill_time = 0;
  }

  virtual void allocate(addr_t tag, addr_t block_addr,
                        cycle_t time, MemAccessSectorMask sector_mask)
  {
    allocate_line(tag, block_addr, time, sector_mask);
  }

  void allocate_line(addr_t tag, addr_t line_addr, cycle_t time,
                     MemAccessSectorMask sector_mask)
  {
    // allocate a new line
    // assert(m_block_addr != 0 && m_block_addr != block_addr);
    init();
    m_tag = tag;
    m_line_addr = line_addr;

    uint32_t sidx = get_sector_index(sector_mask);

    // set sector stats
    m_sector_alloc_time[sidx] = time;
    m_last_sector_access_time[sidx] = time;
    m_sector_fill_time[sidx] = 0;
    m_state[sidx] = RESERVED;
    m_ignore_on_fill_status[sidx] = false;
    m_set_modified_on_fill[sidx] = false;

    // set line stats
    m_line_alloc_time = time; // only set this for the first allocated sector
    m_line_last_access_time = time;
    m_line_fill_time = 0;
  }

  void allocate_sector(cycle_t time, MemAccessSectorMask sector_mask)
  {
    // allocate invalid sector of this allocated valid line
    assert(is_valid_line());
    uint32_t sidx = get_sector_index(sector_mask);

    // set sector stats
    m_sector_alloc_time[sidx] = time;
    m_last_sector_access_time[sidx] = time;
    m_sector_fill_time[sidx] = 0;
    if (m_state[sidx] == MODIFIED) // this should be the case only for
                                   // fetch-on-write policy //TO DO
      m_set_modified_on_fill[sidx] = true;
    else
      m_set_modified_on_fill[sidx] = false;

    m_state[sidx] = RESERVED;
    m_ignore_on_fill_status[sidx] = false;
    // m_set_modified_on_fill[sidx] = false;
    m_readable[sidx] = true;

    // set line stats
    m_line_last_access_time = time;
    m_line_fill_time = 0;
  }

  virtual void fill(cycle_t time, MemAccessSectorMask sector_mask)
  {
    uint32_t sidx = get_sector_index(sector_mask);

    //	if(!m_ignore_on_fill_status[sidx])
    //	         assert( m_state[sidx] == RESERVED );

    m_state[sidx] = m_set_modified_on_fill[sidx] ? MODIFIED : VALID;

    m_sector_fill_time[sidx] = time;
    m_line_fill_time = time;
  }
  virtual bool is_invalid_line()
  {
    // all the sectors should be invalid
    for (uint32_t i = 0; i < SECTOR_CHUNCK_SIZE; ++i)
    {
      if (m_state[i] != INVALID)
        return false;
    }
    return true;
  }
  virtual bool is_valid_line() { return !(is_invalid_line()); }
  virtual bool is_reserved_line()
  {
    // if any of the sector is reserved, then the line is reserved
    for (uint32_t i = 0; i < SECTOR_CHUNCK_SIZE; ++i)
    {
      if (m_state[i] == RESERVED)
        return true;
    }
    return false;
  }
  virtual bool is_modified_line()
  {
    // if any of the sector is modified, then the line is modified
    for (uint32_t i = 0; i < SECTOR_CHUNCK_SIZE; ++i)
    {
      if (m_state[i] == MODIFIED)
        return true;
    }
    return false;
  }

  virtual enum CacheLineState get_state(
      MemAccessSectorMask sector_mask)
  {
    uint32_t sidx = get_sector_index(sector_mask);

    return m_state[sidx];
  }

  virtual void set_state(enum CacheLineState state,
                         MemAccessSectorMask sector_mask)
  {
    uint32_t sidx = get_sector_index(sector_mask);
    m_state[sidx] = state;
  }

  virtual uint64_t get_last_access_time()
  {
    return m_line_last_access_time;
  }

  virtual void set_last_access_time(cycle_t time,
                                    MemAccessSectorMask sector_mask)
  {
    uint32_t sidx = get_sector_index(sector_mask);

    m_last_sector_access_time[sidx] = time;
    m_line_last_access_time = time;
  }

  virtual uint64_t get_alloc_time()
  {
    return m_line_alloc_time;
  }

  virtual void set_ignore_on_fill(bool m_ignore,
                                  MemAccessSectorMask sector_mask)
  {
    uint32_t sidx = get_sector_index(sector_mask);
    m_ignore_on_fill_status[sidx] = m_ignore;
  }

  virtual void set_modified_on_fill(bool m_modified,
                                    MemAccessSectorMask sector_mask)
  {
    uint32_t sidx = get_sector_index(sector_mask);
    m_set_modified_on_fill[sidx] = m_modified;
  }

  virtual void set_m_readable(bool readable,
                              MemAccessSectorMask sector_mask)
  {
    uint32_t sidx = get_sector_index(sector_mask);
    m_readable[sidx] = readable;
  }

  virtual bool is_readable(MemAccessSectorMask sector_mask)
  {
    uint32_t sidx = get_sector_index(sector_mask);
    return m_readable[sidx];
  }

  virtual uint32_t get_modified_size()
  {
    uint32_t modified = 0;
    for (uint32_t i = 0; i < SECTOR_CHUNCK_SIZE; ++i)
    {
      if (m_state[i] == MODIFIED)
        modified++;
    }
    return modified * SECTOR_SIZE;
  }

  virtual void print_status()
  {
    printf("m_block_addr is %lu, state = %u %u %u %u\n", m_line_addr,
           m_state[0], m_state[1], m_state[2], m_state[3]);
  }

private:
  uint32_t m_sector_alloc_time[SECTOR_CHUNCK_SIZE];
  uint32_t m_last_sector_access_time[SECTOR_CHUNCK_SIZE];
  uint32_t m_sector_fill_time[SECTOR_CHUNCK_SIZE];
  uint32_t m_line_alloc_time;
  uint32_t m_line_last_access_time;
  uint32_t m_line_fill_time;
  CacheLineState m_state[SECTOR_CHUNCK_SIZE];
  bool m_ignore_on_fill_status[SECTOR_CHUNCK_SIZE];
  bool m_set_modified_on_fill[SECTOR_CHUNCK_SIZE];
  bool m_readable[SECTOR_CHUNCK_SIZE];

  uint32_t get_sector_index(MemAccessSectorMask sector_mask)
  {
    assert(sector_mask.count() == 1);
    for (uint32_t i = 0; i < SECTOR_CHUNCK_SIZE; ++i)
    {
      if (sector_mask.to_ulong() & (1 << i))
        return i;
    }
  }
};

class CacheConfig
{
public:
  CacheConfig()
  {
    m_valid = false;
    m_config_string = NULL; // set by option parser
    m_config_stringPrefL1 = NULL;
    m_config_stringPrefShared = NULL;
  }

  void init()
  {
    assert(m_config_string);
    char ct, rp, wp, ap, wap, sif, mshr_type;

    int ntok = sscanf(m_config_string,
                      "%c,%u:%u:%u,%c:%c:%c:%c:%c,%c:%u:%u,%u,%u",
                      &ct,
                      &m_num_set, &m_line_size, &m_assoc,
                      &rp, &wp, &ap, &wap, &sif,
                      &mshr_type, &m_mshr_entries, &m_mshr_max_merge,
                      &m_miss_queue_size, &m_data_port_width);

    if (ntok < 13)
    {
      if (!strcmp(m_config_string, "none"))
      {
        m_disabled = true;
        return;
      }
      exit_parse_error();
    }
    switch (ct)
    {
    case 'N':
      m_cache_type = NORMAL;
      break;
    case 'S':
      m_cache_type = SECTOR;
      break;
    default:
      exit_parse_error();
    }
    switch (mshr_type)
    {
    case 'A':
      m_mshr_type = ASSOC;
      break;
    case 'S':
      m_mshr_type = SECTOR_ASSOC;
      break;
    default:
      exit_parse_error();
    }
    switch (rp)
    {
    case 'L':
      m_replacement_policy = LRU;
      break;
    case 'F':
      m_replacement_policy = FIFO;
      break;
    default:
      exit_parse_error();
    }
    switch (wp)
    {
    case 'R':
      m_write_policy = READ_ONLY;
      break;
    case 'B':
      m_write_policy = WRITE_BACK;
      break;
    case 'T':
      m_write_policy = WRITE_THROUGH;
      break;
    case 'E':
      m_write_policy = WRITE_EVICT;
      break;
    case 'L':
      m_write_policy = LOCAL_WB_GLOBAL_WT;
      break;
    default:
      exit_parse_error();
    }
    switch (ap)
    {
    case 'm':
      m_alloc_policy = ON_MISS;
      break;
    case 'f':
      m_alloc_policy = ON_FILL;
      break;
    }
    switch (wap)
    {
    case 'N':
      m_write_alloc_policy = NO_WRITE_ALLOCATE;
      break;
    case 'W':
      m_write_alloc_policy = WRITE_ALLOCATE;
      break;
    case 'F':
      m_write_alloc_policy = FETCH_ON_WRITE;
      break;
    case 'L':
      m_write_alloc_policy = LAZY_FETCH_ON_READ;
      break;
    default:
      exit_parse_error();
    }
    switch (sif)
    {
    case 'H':
      m_set_index_function = FERMI_HASH_SET_FUNCTION;
      break;
    case 'P':
      m_set_index_function = HASH_IPOLY_FUNCTION;
      break;
    case 'C':
      m_set_index_function = CUSTOM_SET_FUNCTION;
      break;
    case 'L':
      m_set_index_function = LINEAR_SET_FUNCTION;
      break;
    default:
      exit_parse_error();
    }
    m_line_size_log2 = LOG2(m_line_size);
    m_num_set_log2 = LOG2(m_num_set);
    m_atom_size = (m_cache_type == SECTOR) ? SECTOR_SIZE : m_line_size;
    m_sector_size_log2 = LOG2(SECTOR_SIZE);
    m_valid = true;
  }
  bool disabled() const { return m_disabled; }
  uint32_t get_line_size() const
  {
    return m_line_size;
  }

  uint32_t get_num_lines() const
  {
    return m_num_set * m_assoc;
  }

  void print(FILE *fp) const
  {
    fprintf(fp, "Size = %d B (%d Set x %d-way x %d byte line)\n",
            m_line_size * m_num_set * m_assoc, m_num_set, m_assoc, m_line_size);
  }
  uint32_t hash_function(addr_t addr, uint32_t m_nset,
                         uint32_t m_line_sz_log2, uint32_t m_nset_log2,
                         uint32_t m_index_function) const;
  virtual uint32_t set_index(addr_t addr) const;

  addr_t tag(addr_t addr) const
  {
    // For generality, the tag includes both index and tag. This allows for more
    // complex set index calculations that can result in different indexes
    // mapping to the same set, thus the full tag + index is required to check
    // for hit/miss. Tag is now identical to the line address.

    // return addr >> (m_line_sz_log2+m_num_set_log2);
    return addr & ~(addr_t)(m_line_size - 1);
  }

  addr_t line_addr(addr_t addr) const
  {
    return addr & ~(addr_t)(m_line_size - 1);
  }

  void set_assoc(uint32_t n)
  {
    // set new assoc. L1 cache dynamically resized in Volta
    m_assoc = n;
  }

  uint32_t get_nset() const
  {
    return m_num_set;
  }

  uint32_t get_total_size_inKB() const
  {
    return (m_assoc * m_num_set * m_line_size) / 1024;
  }
  uint32_t get_atom_sz() const
  {
    assert(m_valid);
    return m_atom_size;
  }
  addr_t mshr_addr(addr_t addr) const
  {
    return addr & ~(addr_t)(m_atom_size - 1);
  }
  char *m_config_string;
  char *m_config_stringPrefL1;
  char *m_config_stringPrefShared;

public:
  void exit_parse_error()
  {
    printf("cache configuration parsing error (%s)\n", m_config_string);
    abort();
  }

  uint32_t m_line_size;
  uint32_t m_line_size_log2;
  uint32_t m_num_set;
  uint32_t m_num_set_log2;
  uint32_t m_assoc;
  uint32_t m_data_port_width; //< number of byte the cache can access per cycle
  bool m_valid;
  uint32_t m_atom_size;
  uint32_t m_sector_size_log2;
  uint32_t m_miss_queue_size;
  uint32_t m_mshr_entries;
  uint32_t m_mshr_max_merge;
  bool m_disabled;

  CacheType m_cache_type;
  MshrType m_mshr_type;

  SetIndexFunction
      m_set_index_function; // Hash, linear, or custom set index function

  enum ReplacementPolicy m_replacement_policy;   // 'L' = LRU, 'F' = FIFO
  enum WritePolicy m_write_policy;               // 'T' = write through, 'B' = write back, 'R' = read only
  enum AllocationPolicy m_alloc_policy;          // 'm' = allocate on miss, 'f' = allocate on fill
  enum WriteAllocatePolicy m_write_alloc_policy; // 'W' = Write allocate, 'N' = No write allocate
};

class L2CacheConfig : public CacheConfig
{
public:
  L2CacheConfig() : CacheConfig() {}
  void init(linear_to_raw_address_translation *address_mapping);
  virtual uint32_t set_index(addr_t addr) const;

private:
  linear_to_raw_address_translation *m_address_mapping;
};

class MshrTable
{
public:
  MshrTable(uint32_t num_entries, uint32_t max_merged)
      : m_num_entries(num_entries),
        m_max_merged(max_merged) {}

  /// Checks if there is a pending request to the lower memory level already
  bool probe(addr_t line_addr) const;
  /// Checks if there is space for tracking a new memory access
  bool full(addr_t line_addr) const;
  /// Add or merge this access
  void add(addr_t line_addr, MemFetch *mf);
  /// Returns true if cannot accept new fill responses
  bool busy() const { return false; }
  /// Accept a new cache fill response: mark entry ready for processing
  void mark_ready(addr_t line_addr);
  /// Returns true if ready accesses exist
  bool access_ready() const { return !m_current_response.empty(); }
  /// Returns next ready access
  MemFetch *next_access();
  bool is_read_after_write_pending(addr_t line_addr);

private:
  // finite sized, fully associative table, with a finite maximum number of
  // merged requests
  const uint32_t m_num_entries;
  const uint32_t m_max_merged;

  struct mshr_entry
  {
    std::list<MemFetch *> m_list;
    mshr_entry() {}
  };
  typedef std::unordered_map<addr_t, mshr_entry> table;
  typedef std::unordered_map<addr_t, mshr_entry> line_table;
  table m_data;
  line_table pending_lines;

  // it may take several cycles to process the merged requests
  bool m_current_response_ready;
  std::list<addr_t> m_current_response;
};

class TagArray
{
public:
  // Use this constructor
  TagArray(const CacheConfig &config, uint32_t core_id);
  ~TagArray();

  enum CacheRequestState probe(addr_t addr, uint32_t &idx,
                               MemFetch *mf = NULL) const;
  enum CacheRequestState probe(addr_t addr, uint32_t &idx,
                               MemAccessSectorMask mask,
                               MemFetch *mf = NULL) const;
  enum CacheRequestState access(addr_t addr, cycle_t time,
                                uint32_t &idx, MemFetch *mf);
  enum CacheRequestState access(addr_t addr, cycle_t time,
                                uint32_t &idx, bool &wb,
                                EvictedLineInfo &evicted, MemFetch *mf);

  void fill(addr_t addr, cycle_t time, MemFetch *mf);
  void fill(addr_t addr, cycle_t time, MemAccessSectorMask mask);
  void fill(bool fill_by_index, uint32_t index, cycle_t time);

  uint32_t size() const { return m_config.get_num_lines(); }
  CacheLine *get_line(uint32_t idx) { return m_lines[idx]; }

  void flush();      // flush all written entries
  void invalidate(); // invalidate all entries
  void new_window();

  void add_pending_line(MemFetch *mf);
  void remove_pending_line(MemFetch *mf);
  void Init();

protected:
  // This constructor is intended for use only from derived classes that wish to
  // avoid unnecessary memory allocation that takes place in the
  // other tag_array constructor
  TagArray(const CacheConfig &config, uint32_t core_id,
           CacheLine **new_lines);

protected:
  const CacheConfig &m_config;

  CacheLine **m_lines; /* nbanks x nset x assoc lines in total */

  uint32_t m_access;
  uint32_t m_miss;
  uint32_t m_pending_hit; // number of cache miss that hit a line that is
                          // allocated but not filled
  uint32_t m_res_fail;

  uint32_t m_core_id; // which shader core is using this

  bool is_used; // a flag if the whole cache has ever been accessed before

  typedef std::unordered_map<addr_t, uint32_t> line_table;
  line_table pending_lines;
};

class Cache
{
public:
  virtual ~Cache() {}
  virtual enum CacheRequestState access(addr_t addr, MemFetch *mf,
                                        cycle_t time,
                                        std::list<CacheEvent> &events) = 0;

  // accessors for cache bandwidth availability
  virtual bool data_port_free() const = 0;
  virtual bool fill_port_free() const = 0;

};

class BaseCache : public Cache
{

public:
  BaseCache(const char *name, const CacheConfig &config, uint32_t core_id,
            MemFetchInterface *memport,
            enum MemFetchState state)
      : m_config(config),
        m_tag_array(new TagArray(config, core_id)),
        m_mshrs(config.m_mshr_entries, config.m_mshr_max_merge),
        m_bandwidth_management(config)
  {
    init(name, config, memport, state);
  }

  void init(const char *name, const CacheConfig &config,
            MemFetchInterface *memport, enum MemFetchState state)
  {
    m_name = name;
    // assert(config.m_mshr_type == ASSOC);
    m_memport = memport;
    m_miss_queue_state = state;
  }
  virtual enum CacheRequestState access(addr_t addr, MemFetch *mf,
                                        cycle_t time,
                                        std::list<CacheEvent> &events) = 0;

  bool active();

  /// Sends next request to lower level of memory
  void cycle();
  /// Interface for response from lower memory level (model bandwidth
  /// restictions in caller)
  void fill(MemFetch *mf, cycle_t time);
  /// Checks if mf is waiting to be filled by lower memory level
  bool waiting_for_fill(MemFetch *mf);
  /// Are any (accepted) accesses that had to wait for memory now ready? (does
  /// not include accesses that "HIT")
  bool access_ready() const { return m_mshrs.access_ready(); }
  /// Pop next ready access (does not include accesses that "HIT")
  MemFetch *next_access() { return m_mshrs.next_access(); }

  void invalidate() { m_tag_array->invalidate(); }

  // flash invalidate all entries in cache
  void flush() { m_tag_array->flush(); }
  /// Read miss handler without writeback
  void send_read_request(addr_t addr, addr_t line_addr,
                         uint32_t cache_index, MemFetch *mf, cycle_t time,
                         bool &do_miss,
                         std::list<CacheEvent> &events,
                         bool read_only, bool wa);
  /// Read miss handler. Check MSHR hit or MSHR available
  void send_read_request(addr_t addr, addr_t line_addr,
                         uint32_t cache_index, MemFetch *mf, cycle_t time,
                         bool &do_miss, bool &wb, EvictedLineInfo &evicted,
                         std::list<CacheEvent> &events,
                         bool read_only,
                         bool wa);
  /// Checks whether this request can be handled on this cycle. num_miss equals
  /// max # of misses to be handled on this cycle
  bool miss_queue_full(uint32_t num_miss)
  {
    return ((m_miss_queue.size() + num_miss) >= m_config.m_miss_queue_size);
  }
  BaseCache(const char *name, CacheConfig &config, uint32_t core_id,
            MemFetchInterface *memport,
            MemFetchState state,
            TagArray *new_tag_array): m_config(config), m_memport(memport),
                                       m_tag_array(new_tag_array),
                                       m_mshrs(config.m_mshr_entries, config.m_mshr_max_merge),
                                       m_bandwidth_management(config)
  {
    init(name, config, memport, state);
  }

protected:
  std::string m_name;
  TagArray *m_tag_array;
  MshrTable m_mshrs;
  std::list<MemFetch *> m_miss_queue;
  MemFetchInterface *m_memport;
  CacheConfig m_config;
  enum MemFetchState m_miss_queue_state;
  PE *m_pe;

  struct extra_mf_fields
  {
    extra_mf_fields() { m_valid = false; }
    extra_mf_fields(addr_t a, addr_t ad, uint32_t i, uint32_t d,
                    const CacheConfig &m_config)
    {
      m_valid = true;
      m_line_addr = a;
      m_addr = ad;
      m_cache_index = i;
      m_data_size = d;
      pending_read = 0;
    }
    bool m_valid;
    addr_t m_line_addr;
    addr_t m_addr;
    uint32_t m_cache_index;
    uint32_t m_data_size;
    // this variable is used when a load request generates multiple load
    // transactions For example, a read request from non-sector L1 request sends
    // a request to sector L2
    uint32_t pending_read;
  };

  typedef std::map<MemFetch *, extra_mf_fields> extra_mf_fields_lookup;

  extra_mf_fields_lookup m_extra_mf_fields;
  /// Sub-class containing all metadata for port bandwidth management

  class BandwidthManagement
  {
  public:
    BandwidthManagement(const CacheConfig &config);

    /// use the data port based on the outcome and events generated by the
    /// MemFetch request
    void use_data_port(MemFetch *mf, enum CacheRequestState outcome,
                       const std::list<CacheEvent> &events);

    /// use the fill port
    void use_fill_port(MemFetch *mf);

    /// called every cache cycle to free up the ports
    void replenish_port_bandwidth();

    /// query for data port availability
    bool data_port_free() const;
    /// query for fill port availability
    bool fill_port_free() const;

  protected:
    const CacheConfig &m_config;

    int m_data_port_occupied_cycles; //< Number of cycle that the data port
                                     // remains used
    int m_fill_port_occupied_cycles; //< Number of cycle that the fill port
                                     // remains used
  };

  BandwidthManagement m_bandwidth_management;

public:
  // accessors for cache bandwidth availability
  bool data_port_free() const
  {
    return m_bandwidth_management.data_port_free();
  }
  bool fill_port_free() const
  {
    return m_bandwidth_management.fill_port_free();
  }
};

/// Read only cache
class ReadOnlyCache : public BaseCache
{
public:
  ReadOnlyCache(const char *name, const CacheConfig &config, uint32_t core_id,
                MemFetchInterface *memport, MemFetchState state, PE *pe)
      : BaseCache(name, config, core_id, memport, state) 
  {
    m_pe=pe;
  }


  /// Access cache for read_only_cache: returns RESERVATION_FAIL if request
  /// could not be accepted (for any reason)
  virtual enum CacheRequestState access(addr_t addr, MemFetch *mf,
                                        cycle_t time,
                                        std::list<CacheEvent> &events);

  virtual ~ReadOnlyCache() {}
};

/// Data cache - Implements common functions for L1 and L2 data cache
class DataCache : public BaseCache
{
public:
  DataCache(const char *name, CacheConfig &config, int core_id, int type_id,
            MemFetchInterface *memport, MemFetchAllocator *mfcreator,
            MemFetchState state, MemAccessType wr_alloc_type,
            MemAccessType wrbk_type, class SIMULATOR *simulator)
      : BaseCache(name, config, core_id, memport, state)
  {
    init(mfcreator);
    m_wr_alloc_type = wr_alloc_type;
    m_wrbk_type = wrbk_type;
    m_simulator = simulator;
  }

  virtual ~DataCache() {}

  virtual void init(MemFetchAllocator *mfcreator)
  {
    m_memfetch_creator = mfcreator;

    // Set read hit function
    m_rd_hit = &DataCache::rd_hit_base;

    // Set read miss function
    m_rd_miss = &DataCache::rd_miss_base;

    // Set write hit function
    switch (m_config.m_write_policy)
    {
    // READ_ONLY is now a separate cache class, config is deprecated
    case READ_ONLY:
      assert(0 && "Error: Writable DataCache set as READ_ONLY\n");
      break;
    case WRITE_BACK:
      m_wr_hit = &DataCache::wr_hit_wb;
      break;
    case WRITE_THROUGH:
      m_wr_hit = &DataCache::wr_hit_wt;
      break;
    case LOCAL_WB_GLOBAL_WT:
      m_wr_hit = &DataCache::wr_hit_global_we_local_wb;
      break;
    default:
      assert(0 && "Error: Must set valid cache write policy\n");
      break; // Need to set a write hit function
    }

    // Set write miss function
    switch (m_config.m_write_alloc_policy)
    {
    case NO_WRITE_ALLOCATE:
      m_wr_miss = &DataCache::wr_miss_no_wa;
      break;
    case WRITE_ALLOCATE:
      m_wr_miss = &DataCache::wr_miss_wa_naive;
      break;
    case FETCH_ON_WRITE:
      m_wr_miss = &DataCache::wr_miss_wa_fetch_on_write;
      break;
    case LAZY_FETCH_ON_READ:
      m_wr_miss = &DataCache::wr_miss_wa_lazy_fetch_on_read;
      break;
    default:
      assert(0 && "Error: Must set valid cache write miss policy\n");
      break; // Need to set a write miss function
    }
  }

  virtual enum CacheRequestState access(addr_t addr, MemFetch *mf,
                                        cycle_t time,
                                        std::list<CacheEvent> &events);

protected:
  DataCache(const char *name, CacheConfig &config, int core_id, int type_id,
            MemFetchInterface *memport, MemFetchAllocator *mfcreator,
            MemFetchState state, TagArray *new_tag_array,
            MemAccessType wr_alloc_type, MemAccessType wrbk_type,
            class SIMULATOR *simulator)
      : BaseCache(name, config, core_id, memport, state,
                  new_tag_array)
  {
    init(mfcreator);
    m_wr_alloc_type = wr_alloc_type;
    m_wrbk_type = wrbk_type;
    m_simulator = simulator;
  }

  MemAccessType m_wr_alloc_type; // Specifies type of write allocate request
                                 // (e.g., L1 or L2)
  MemAccessType
      m_wrbk_type; // Specifies type of writeback request (e.g., L1 or L2)
  SIMULATOR *m_simulator;

  //! A general function that takes the result of a tag_array probe
  //  and performs the correspding functions based on the cache configuration
  //  The access fucntion calls this function
  enum CacheRequestState process_tag_probe(bool wr,
                                           CacheRequestState state,
                                           addr_t addr,
                                           uint32_t cache_index,
                                           MemFetch *mf, cycle_t time,
                                           std::list<CacheEvent> &events);

protected:
  MemFetchAllocator *m_memfetch_creator;

  // Functions for data cache access
  /// Sends write request to lower level memory (write or writeback)
  void send_write_request(MemFetch *mf, CacheEvent request, cycle_t time,
                          std::list<CacheEvent> &events);

  // Member Function pointers - Set by configuration options
  // to the functions below each grouping
  /******* Write-hit configs *******/
  enum CacheRequestState (DataCache::*m_wr_hit)(
      addr_t addr, uint32_t cache_index, MemFetch *mf, cycle_t time,
      std::list<CacheEvent> &events, enum CacheRequestState state);
  /// Marks block as MODIFIED and updates block LRU
  enum CacheRequestState wr_hit_wb(
      addr_t addr, uint32_t cache_index, MemFetch *mf, cycle_t time,
      std::list<CacheEvent> &events,
      enum CacheRequestState state); // write-back
  enum CacheRequestState wr_hit_wt(
      addr_t addr, uint32_t cache_index, MemFetch *mf, cycle_t time,
      std::list<CacheEvent> &events,
      enum CacheRequestState state); // write-through

  /// Marks block as INVALID and sends write request to lower level memory
  enum CacheRequestState wr_hit_we(
      addr_t addr, uint32_t cache_index, MemFetch *mf, cycle_t time,
      std::list<CacheEvent> &events,
      enum CacheRequestState state); // write-evict
  enum CacheRequestState wr_hit_global_we_local_wb(
      addr_t addr, uint32_t cache_index, MemFetch *mf, cycle_t time,
      std::list<CacheEvent> &events, enum CacheRequestState state);
  // global write-evict, local write-back

  /******* Write-miss configs *******/
  enum CacheRequestState (DataCache::*m_wr_miss)(
      addr_t addr, uint32_t cache_index, MemFetch *mf, cycle_t time,
      std::list<CacheEvent> &events, enum CacheRequestState state);
  /// Sends read request, and possible write-back request,
  //  to lower level memory for a write miss with write-allocate
  enum CacheRequestState wr_miss_wa_naive(
      addr_t addr, uint32_t cache_index, MemFetch *mf, cycle_t time,
      std::list<CacheEvent> &events,
      enum CacheRequestState
          state); // write-allocate-send-write-and-read-request
  enum CacheRequestState wr_miss_wa_fetch_on_write(
      addr_t addr, uint32_t cache_index, MemFetch *mf, cycle_t time,
      std::list<CacheEvent> &events,
      enum CacheRequestState
          state); // write-allocate with fetch-on-every-write
  enum CacheRequestState wr_miss_wa_lazy_fetch_on_read(
      addr_t addr, uint32_t cache_index, MemFetch *mf, cycle_t time,
      std::list<CacheEvent> &events,
      enum CacheRequestState state); // write-allocate with read-fetch-only
  enum CacheRequestState wr_miss_wa_write_validate(
      addr_t addr, uint32_t cache_index, MemFetch *mf, cycle_t time,
      std::list<CacheEvent> &events,
      enum CacheRequestState
          state); // write-allocate that writes with no read fetch
  enum CacheRequestState wr_miss_no_wa(
      addr_t addr, uint32_t cache_index, MemFetch *mf, cycle_t time,
      std::list<CacheEvent> &events,
      enum CacheRequestState state); // no write-allocate

  // Currently no separate functions for reads
  /******* Read-hit configs *******/
  enum CacheRequestState (DataCache::*m_rd_hit)(
      addr_t addr, uint32_t cache_index, MemFetch *mf, cycle_t time,
      std::list<CacheEvent> &events, enum CacheRequestState state);
  enum CacheRequestState rd_hit_base(addr_t addr,
                                     uint32_t cache_index, MemFetch *mf,
                                     cycle_t time,
                                     std::list<CacheEvent> &events,
                                     enum CacheRequestState state);

  /******* Read-miss configs *******/
  enum CacheRequestState (DataCache::*m_rd_miss)(
      addr_t addr, uint32_t cache_index, MemFetch *mf, cycle_t time,
      std::list<CacheEvent> &events, enum CacheRequestState state);
  enum CacheRequestState rd_miss_base(addr_t addr,
                                      uint32_t cache_index, MemFetch *mf,
                                      cycle_t time,
                                      std::list<CacheEvent> &events,
                                      enum CacheRequestState state);
};

class L1Cache : public DataCache {
public:
    L1Cache(const char *name, CacheConfig &config,
            int core_id, int type_id, 
            MemFetchInterface *memport,
            MemFetchAllocator *mfcreator,
            enum MemFetchState state,
            PE *pe,
            SIMULATOR *simulator)
            : DataCache(name,config,core_id,type_id,memport,mfcreator,state, L1_WR_ALLOC_R, L1_WRBK_ACC,simulator)
            {
              m_pe=pe;
            }
    

    virtual ~L1Cache(){}

    virtual enum CacheRequestState
        access( addr_t addr,
                MemFetch *mf,
                cycle_t time,
                std::list<CacheEvent> &events );

protected:

};

/// Models second level shared cache with global write-back
/// and write-allocate policies
class L2Cache : public DataCache
{
public:
  L2Cache(const char *name, CacheConfig &config, int core_id, int type_id,
          MemFetchInterface *memport, MemFetchAllocator *mfcreator,
          enum MemFetchState state, class SIMULATOR *simulator)
      : DataCache(name, config, core_id, type_id, memport, mfcreator, state,
                  L2_WR_ALLOC_R, L2_WRBK_ACC, simulator) {}

  virtual ~L2Cache() {}

  virtual enum CacheRequestState access(addr_t addr, MemFetch *mf,
                                        cycle_t time,
                                        std::list<CacheEvent> &events);
};

bool was_write_sent(const std::list<CacheEvent> &events);
bool was_read_sent(const std::list<CacheEvent> &events);
bool was_writeallocate_sent(const std::list<CacheEvent> &events);

#endif