/////////////////////////////////////////////////////////////////////////
// $Id: icache.h 10613 2011-08-21 16:44:02Z sshwarts $
/////////////////////////////////////////////////////////////////////////
//
//   Copyright (c) 2007-2011 Stanislav Shwartsman
//          Written by Stanislav Shwartsman [sshwarts at sourceforge net]
//
//  This library is free software; you can redistribute it and/or
//  modify it under the terms of the GNU Lesser General Public
//  License as published by the Free Software Foundation; either
//  version 2 of the License, or (at your option) any later version.
//
//  This library is distributed in the hope that it will be useful,
//  but WITHOUT ANY WARRANTY; without even the implied warranty of
//  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
//  Lesser General Public License for more details.
//
//  You should have received a copy of the GNU Lesser General Public
//  License along with this library; if not, write to the Free Software
//  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
//
/////////////////////////////////////////////////////////////////////////

#ifndef BX_ICACHE_H
#define BX_ICACHE_H

///@------------------------------------------------------------------ ///
// @                                                                    //
// @ Instruction Cache Lock Design                                      //
// @                                                                    //
// @ The shared icache is protected by a icache read write lock. and    //
// @ every entry is protected with a read write lock. To guarantee that //
// @ deadlock is not possible. The following needs to be taken care of  //
// @                                                                    //
// @ In the serveICacheMiss code, the write lock of the icache is held. //
// @ and then the write lock on one of the entry needs to be held as    //
// @ well. There is no deadlock possible here as when the write lock    //
// @ of the icache is held. No other processor can try to lookup any    //
// @ entry and the entry that is currently held is going to finish      //
// @ executing and the entry will eventually be released.  And if the   //
// @ entry is queued as an SMC target and the write lock is held by     //
// @ the processor that is currently serveICacheMiss. It will be        //
// @ invalidated.                                                       //
// @                                                                    //
// @ When promoting entries into the private cache, no icache lock needs//
// @ to be held, as only the owning processor will perform the lookup   //
// @ in the cache and if any other processor wants to delete an entry   //
// @ in the private cache. it needs to hold the write lock on it. and   //
// @ this guarantees that only 1 modifying processor exist.             //
///@------------------------------------------------------------------ ///

extern void handleSMC(bx_phy_address pAddr, Bit32u mask);

class bxPageWriteStampTable
{
public:
  struct physical_page_c
  {
    Bit32u code_xlation;
    pthread_rwlock_t page_lock;
  };

public:
#define PHY_MEM_PAGES (1024*1024)
  struct physical_page_c *fineGranularityMapping;
  // locks to prevent races on the fineGranularityMapping array
  pthread_mutex_t stampTableMutex;

public:
  bxPageWriteStampTable() 
  {
    fineGranularityMapping = new struct physical_page_c[PHY_MEM_PAGES];
    resetWriteStamps();
    // access to bxPageWriteStampTable needs to be protected.
    pthread_mutex_init(&stampTableMutex, NULL);
  }
 ~bxPageWriteStampTable() { delete [] fineGranularityMapping; }

  BX_CPP_INLINE Bit32u hash(bx_phy_address pAddr) const 
  {
    // can share writeStamps between multiple pages if >32 bit phy address
    return ((Bit32u) pAddr) >> 12;
  }

  /// Used to implement exclusive access to this page.
  BX_CPP_INLINE void acquirePageExclusiveAccess(bx_phy_address pAddr) 
  {
    pthread_rwlock_wrlock(&fineGranularityMapping[hash(pAddr)].page_lock);
  }
  BX_CPP_INLINE void releasePageExclusiveAccess(bx_phy_address pAddr) 
  {
    pthread_rwlock_unlock(&fineGranularityMapping[hash(pAddr)].page_lock);
  }
  BX_CPP_INLINE void acquirePageSharedAccess(bx_phy_address pAddr) 
  {
    pthread_rwlock_rdlock(&fineGranularityMapping[hash(pAddr)].page_lock);
  }
  BX_CPP_INLINE void releasePageSharedAccess(bx_phy_address pAddr) 
  {
    pthread_rwlock_unlock(&fineGranularityMapping[hash(pAddr)].page_lock);
  }

  BX_CPP_INLINE Bit32u getFineGranularityMapping(bx_phy_address pAddr) const
  {
    return fineGranularityMapping[hash(pAddr)].code_xlation;
  }

  BX_CPP_INLINE void markICache(bx_phy_address pAddr, unsigned len)
  {
    Bit32u mask  = 1 << (PAGE_OFFSET((Bit32u) pAddr) >> 7);
           mask |= 1 << (PAGE_OFFSET((Bit32u) pAddr + len - 1) >> 7);

    // need to ensure only one processor is updating this.
    pthread_mutex_lock(&stampTableMutex);
    fineGranularityMapping[hash(pAddr)].code_xlation |= mask;
    pthread_mutex_unlock(&stampTableMutex);
  }

  BX_CPP_INLINE void markICacheMask(bx_phy_address pAddr, Bit32u mask)
  {
    // need to ensure only one processor is updating this.
    pthread_mutex_lock(&stampTableMutex);
    fineGranularityMapping[hash(pAddr)].code_xlation |= mask;
    pthread_mutex_unlock(&stampTableMutex);
  }

  // whole page is being altered
  BX_CPP_INLINE void decWriteStamp(bx_phy_address pAddr)
  {
    Bit32u index = hash(pAddr);

    if (fineGranularityMapping[index].code_xlation) 
    {
       handleSMC(pAddr, 0xffffffff); // one of the CPUs might be running trace from this page
       // need to ensure only one processor is updating this.
       pthread_mutex_lock(&stampTableMutex);
       fineGranularityMapping[index].code_xlation = 0;
       pthread_mutex_unlock(&stampTableMutex);
    }
  }

  // assumption: write does not split 4K page
  BX_CPP_INLINE void decWriteStamp(bx_phy_address pAddr, unsigned len)
  {
    Bit32u index = hash(pAddr);

    if (fineGranularityMapping[index].code_xlation) 
    {
       Bit32u mask  = 1 << (PAGE_OFFSET((Bit32u) pAddr) >> 7);
              mask |= 1 << (PAGE_OFFSET((Bit32u) pAddr + len - 1) >> 7);

       if (fineGranularityMapping[index].code_xlation & mask) 
       {
          // one of the CPUs might be running trace from this page
          handleSMC(pAddr, mask);
          // need to ensure only one processor is updating this.
          pthread_mutex_lock(&stampTableMutex);
          fineGranularityMapping[index].code_xlation &= ~mask;
          pthread_mutex_unlock(&stampTableMutex);
       }       
    }
  }

  BX_CPP_INLINE void resetWriteStamps(void)
  {
    pthread_mutex_lock(&stampTableMutex);
    for(Bit32u i=0; i<PHY_MEM_PAGES; i++) 
    {
       fineGranularityMapping[i].code_xlation = 0;
    }
    pthread_mutex_unlock(&stampTableMutex);
  }

  BX_CPP_INLINE void sanityCheck(void)
  {
    // Check all the locks are released.
    for(Bit32u i=0; i<PHY_MEM_PAGES; i++) 
    {
      pthread_rwlock_wrlock(&fineGranularityMapping[i].page_lock);
      pthread_rwlock_unlock(&fineGranularityMapping[i].page_lock);
    }
  }

};

#define BxICacheEntries (1024 * 1024)  // Must be a power of 2.
#define BX_MAX_TRACE_LENGTH  8 

#define BX_ICACHE_INVALID_PHY_ADDRESS (bx_phy_address(-1))

struct bxICacheEntry_c
{
public:
   bx_phy_address pAddr;      // Physical address of the instruction
   bx_address  raddress;      // Compiled code run address.
   Bit32u traceMask;          // Use for SMC
   Bit32u fetchModeMask;      // Fetch mode mask for the entry.
   pthread_rwlock_t entryLock;// Lock to hold when using entry.
   Bit32u bx_cpuid;         // The processor that builds this entry.
   Bit32u init;               // Entry initialized.
   Bit32u priv;               // Entry is a private copy.
 
   Bit32u tlen;               // Entry length in instructions.
   bxInstruction_c *i;        // Instructions in entry.
   Bit32u icount;             // Interpretation count.
   Bit32u submitted;          // Entry submitted for compilation.

   Bit32u *hotness;           // The hotness of this entry for each processor.

   // METADATA FLAGS:
   // 0...0 repeat instruction used.
   // 1...1 block cross page boundary.  
   Bit8u metaInfo;      

public:
  /// constructor and destructor.
  bxICacheEntry_c(void) : pAddr(BX_INVALID_PHY_ADDRESS), traceMask(0), 
                          bx_cpuid(-1), init(0), priv(0), icount(0), 
                          submitted(0), tlen(0), fetchModeMask(0), metaInfo(0)
  {
    pthread_rwlock_init(&entryLock, NULL);
    hotness = new Bit32u[BX_SMP_PROCESSORS];
    for(int i=0; i<BX_SMP_PROCESSORS; i++)
    {
      hotness[i] = 0;
    }
  }

  BX_CPP_INLINE Bit32u get_private(void) { return priv; }
  BX_CPP_INLINE void   set_private(void) { priv = 1; }

  BX_CPP_INLINE void initialize(void)
  {
    // Write lock of this entry should be held prior to calling initialize.
    if (!init)
    {
       init = 1;
       i = new bxInstruction_c[BX_MAX_TRACE_LENGTH+1]; 
    }
    return;
  }


  // ========================================
  // Functions to guarantee thread safety on the entry.
  // ========================================
  BX_CPP_INLINE void acquire_entry_rdlock(void) 
  {
    pthread_rwlock_rdlock(&entryLock);
  }
  BX_CPP_INLINE void acquire_entry_wrlock(void)
  {
    pthread_rwlock_wrlock(&entryLock);
  }
  BX_CPP_INLINE void release_entry_rwlock(void)
  {
    pthread_rwlock_unlock(&entryLock);
  }

  // ========================================
  // metaInfo getters and setters.
  // ========================================
  BX_CPP_INLINE bx_bool repeat_instruction(void)  { return metaInfo & 0x1; }
  BX_CPP_INLINE void set_repeat_instruction(void) {metaInfo |= 0x1;        }
  BX_CPP_INLINE bx_bool cross_page_boundary(void) {return metaInfo & 0x2;  }
  BX_CPP_INLINE void set_cross_page_boundary(void){ metaInfo |= 0x2;       }

  BX_CPP_INLINE void copy(bxICacheEntry_c *src)
  {
    initialize();

    // The read lock on this entry should have been 
    // held prior to calling the copy method.
    pAddr = src->pAddr;
    traceMask = src->traceMask;
    fetchModeMask = src->fetchModeMask;
    tlen = src->tlen;
    bx_cpuid = src->bx_cpuid;
    metaInfo = src->metaInfo;
    raddress = src->raddress;
    submitted = src->submitted;

    // Copy the instructions as well.
    for(int idx=0;idx<tlen;idx++)
    {
      i[idx] = src->i[idx];
    }
  }

  BX_CPP_INLINE bx_bool isvalid(void)
  {
    for(int idx=0;idx<tlen;idx++)
    {
      if (!i[idx].execute) return 0;
    }
    return 1;
  }

  void annul(void);
  void private_promote(unsigned bx_cpuid);
};

/// bxICache_c - the instruction cache of the emulator.
class BOCHSAPI bxICache_c 
{
public:
  // This is private icache.
  Bit32u          priv;
  bxICacheEntry_c entry[BxICacheEntries];

  // Lock to hold while looking up or modifying entries.
  pthread_rwlock_t icacheLock;

  bxPageWriteStampTable *pageWriteStampTable;

#define BX_ICACHE_PAGE_SPLIT_ENTRIES 128 /* must be power of two */
  struct pageSplitEntryIndex {
    bx_phy_address ppf; // Physical address of 2nd page of the trace 
    bxICacheEntry_c *e; // Pointer to icache entry
  } pageSplitIndex[BX_ICACHE_PAGE_SPLIT_ENTRIES];
  int nextPageSplitIndex;

#define BX_ICACHE_VICTIM_ENTRIES 128 /* must be power of two */
  struct bxVictimCacheEntry {
    bxICacheEntry_c vc_entry;
  } victimCache[BX_ICACHE_VICTIM_ENTRIES];

public:
  /// @ constructor and destructor.
  bxICache_c(Bit32u priv) : priv(priv),  pageWriteStampTable(0)
  { 
    pthread_rwlock_init(&icacheLock, NULL);
    // Only shared icache needs bxPageWriteStampTable.
    if (!priv)
    {
       pageWriteStampTable = new bxPageWriteStampTable; 
    }
    else 
    {
      for (unsigned i=0; i<BxICacheEntries; i++)
      {
         entry[i].set_private();
      }
    }

    // initialize all victim entries.
    for (int i=0; i < BX_ICACHE_VICTIM_ENTRIES;i++)
    { 
      bxVictimCacheEntry *e = &victimCache[i];
      e->vc_entry.initialize();
    }
  
    flushICacheEntriesNoCpuRunning(); 
  }

  // Guarantee thread safety of the shared icache.
  BX_CPP_INLINE void acquire_icache_rdlock(void) 
  {
    pthread_rwlock_rdlock(&icacheLock);
  }
  BX_CPP_INLINE void acquire_icache_wrlock(void)
  {
    pthread_rwlock_wrlock(&icacheLock);
  }
  BX_CPP_INLINE void release_icache_rwlock(void)
  {
    pthread_rwlock_unlock(&icacheLock);
  }

  BX_CPP_INLINE unsigned hash(bx_phy_address pAddr, unsigned fetchModeMask) const
  {
    return ((pAddr) & (BxICacheEntries-1)) ^ fetchModeMask;
  }

  /// alloc_trace, commit_trace and commit_page_split_trace are only called
  /// by serveICacheMiss which is guarded by the write lock of this icache.
  BX_CPP_INLINE void alloc_trace(bxICacheEntry_c *e, unsigned bx_cpuid)
  {
    // The write lock on e has been held prior to calling alloc_trace.
    e->initialize();
    e->pAddr = BX_ICACHE_INVALID_PHY_ADDRESS;
    e->traceMask = 0;
    e->tlen = 0;
    e->bx_cpuid = bx_cpuid;
    e->raddress = 0;
    e->icount = 0;
    e->submitted = 0;
    e->fetchModeMask = 0;
    e->metaInfo = 0;
    // Clear the instructions...
    for(int idx=0; idx<BX_MAX_TRACE_LENGTH+1; idx++)
    {
      e->i[idx].reset(); 
    }
  }

  void commit_page_split_trace(bx_phy_address paddr, bxICacheEntry_c *entry);

  void handleSMC(bx_phy_address pAddr, Bit32u mask);

  void flushICacheEntries(void);
  void flushICacheEntriesNoCpuRunning(void);

  BX_CPP_INLINE bxICacheEntry_c *get_victim_entry(bx_phy_address pAddr, Bit32u fetchModeMask)
  {
    for (int i=0; i < BX_ICACHE_VICTIM_ENTRIES;i++) 
    {
      bxVictimCacheEntry *e = &victimCache[i];
      if (e->vc_entry.pAddr == pAddr && e->vc_entry.fetchModeMask == fetchModeMask) 
      {
        return &e->vc_entry;
      }
    }
    return NULL;
  }

  BX_CPP_INLINE Bit32u get_replacement_index(void)
  {
    return rand()%BX_ICACHE_VICTIM_ENTRIES;
  }

  BX_CPP_INLINE void victimize_entry(bxICacheEntry_c *entry)
  {
    // random replacement.
    bxVictimCacheEntry *e  = &victimCache[get_replacement_index()];
    
    // annul this entry first and then replace it.
    e->vc_entry.annul();
    e->vc_entry.copy(entry);
  }

  BX_CPP_INLINE bxICacheEntry_c* get_entry(bx_phy_address pAddr, unsigned fetchModeMask)
  {
    return &(entry[hash(pAddr, fetchModeMask)]);
  }
  BX_CPP_INLINE bxICacheEntry_c* get_entry_and_use(bx_phy_address pAddr, unsigned fetchModeMask)
  {
    acquire_icache_rdlock();
    bxICacheEntry_c* entry = get_entry(pAddr, fetchModeMask);
    entry->acquire_entry_rdlock();
    release_icache_rwlock();
    return entry;
  }
};

extern void flushICaches(void);

#endif
