/////////////////////////////////////////////////////////////////////////
// $Id: icache.cc 11090 2012-03-13 15:18:21Z sshwarts $
/////////////////////////////////////////////////////////////////////////
//
//   Copyright (c) 2007-2011 Stanislav Shwartsman
//          Written by Stanislav Shwartsman [sshwarts at sourceforge net]
//
//  This library is free software; you can redistribute it and/or
//  modify it under the terms of the GNU Lesser General Public
//  License as published by the Free Software Foundation; either
//  version 2 of the License, or (at your option) any later version.
//
//  This library is distributed in the hope that it will be useful,
//  but WITHOUT ANY WARRANTY; without even the implied warranty of
//  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
//  Lesser General Public License for more details.
//
//  You should have received a copy of the GNU Lesser General Public
//  License along with this library; if not, write to the Free Software
//  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
//
/////////////////////////////////////////////////////////////////////////

#define NEED_CPU_REG_SHORTCUTS 1
#include "bochs.h"
#include "cpu.h"
#include "compiler.h"
#define LOG_THIS BX_CPU_THIS_PTR

#include "param_names.h"

/// Called when the processor is Reset or WBINVD or INVD called.
void flushICaches(void)
{
  // Calling processor flushes the shared cache. This will also
  // trigger flush of private instruction caches.
  BX_CPU_C *myself = BX_CPU(bx_pc_system.getCurrentCpuID());
  bx_CPU_event *event=myself->create_cpu_event(BX_ASYNC_EVENT_ICACHE_FLUSH, 
                                               0,
                                               BX_ASSIST_EVENT_NONE);
  myself->assist_events.push_event(event);

  // also set the async_event. async_event is checked by the JITed code.
  myself->async_event |= BX_ASYNC_EVENT_STOP_TRACE;
}

void handleSMC(bx_phy_address pAddr, Bit32u mask)
{
  BX_CPU_C *myself = BX_CPU(bx_pc_system.getCurrentCpuID());
  myself->siCache->handleSMC(pAddr, mask);
  // Does not need to do anything for other processor. If
  // an entry is modified and some of the processors are currently
  // executing the entry. it is ok. because only the modifying 
  // processor should be able to see this modification. Unless
  // the current processor does something to indicate that the 
  // modification should have been seen by other processors. 
  // the other processors can not assume the time taken for this
  // modifcation to reach them. And before the current processor
  // can do anything, it must have removed this entry from the
  // instruction caches.
}

#if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS

BX_INSF_TYPE BX_CPU_C::BxEndTrace(bxInstruction_c *i)
{
  // do nothing, return to main cpu_loop
}

void genDummyICacheEntry(bxInstruction_c *i, BxExecutePtr_tR execute)
{
  i->setILen(0);
  i->setIaOpcode(BX_INSERTED_OPCODE);
  i->execute = execute;
}

#endif

void bxICacheEntry_c::annul(void)
{
  // Make sure no one is using me.
  acquire_entry_wrlock();
  if (!priv)
  {
    // This is an entry in the shared icache. Invalidate the private copies.
    for (int i=0; i < BX_SMP_PROCESSORS; i++)
    {
       // There could be false positive. But it is ok for now.
       if (bx_cpuid < bx_pc_system.ncpus && BX_CPU(bx_cpuid)->piCache)
       {
          BX_CPU(bx_cpuid)->piCache->get_entry(pAddr, 0)->annul();
       }
    }
  }
  pAddr = BX_ICACHE_INVALID_PHY_ADDRESS;
  traceMask = 0;
  fetchModeMask = 0;
  tlen = 0;
  icount = 0;
  submitted = 0;
  metaInfo = 0;
  // Free the machine code.
  if (raddress)
  {
    ((void(*)(void*))BX_COMP(0)->j_callbacks.invl_func)((void*)raddress);
    raddress = 0;
  }
  release_entry_rwlock();
}

void bxICacheEntry_c::private_promote(unsigned bx_cpuid)
{
  // Promote a private copy for it.
  bxICacheEntry_c *priv_entry = BX_CPU(bx_cpuid)->piCache->get_entry(pAddr, 0);
  priv_entry->acquire_entry_wrlock();
  priv_entry->initialize();
  priv_entry->copy(this);
  priv_entry->release_entry_rwlock();
}

void bxICache_c::flushICacheEntries(void)
{
  bxICacheEntry_c* e = entry;
  unsigned i;

  BX_CPU_C *myself = BX_CPU(bx_pc_system.getCurrentCpuID());
  acquire_icache_wrlock();
  for (i=0; i<BxICacheEntries; i++, e++) 
  {
    // flushICacheEntries is called when BX_ASYNC_EVENT_ICACHE_FLUSH event
    // is processed by the current CPU. wrapping all entries in an 
    // BX_ASYNC_EVENT_SMC_STOP assist event and they will be processed
    // as soon as BX_ASYNC_EVENT_ICACHE_FLUSH is finished. 
    bx_CPU_event *event=myself->create_cpu_event(BX_ASYNC_EVENT_SMC_STOP, 
                                                 e,
                                                 BX_ASSIST_EVENT_NONE);
    myself->assist_events.push_event(event);

    // also set the async_event. async_event is checked by the JITed code.
    myself->async_event |= BX_ASYNC_EVENT_STOP_TRACE;
  }

  nextPageSplitIndex = 0;
  for (i=0;i<BX_ICACHE_PAGE_SPLIT_ENTRIES;i++)
  {
    // Does not need to do anything for page crossing entries. 
    // as all entries are flushed.
    pageSplitIndex[i].ppf = BX_ICACHE_INVALID_PHY_ADDRESS;
  }

  // 2. victim entries.
  for (int i=0; i < BX_ICACHE_VICTIM_ENTRIES;i++)
  { 
    bxVictimCacheEntry *e = &victimCache[i];
    e->vc_entry.pAddr = BX_ICACHE_INVALID_PHY_ADDRESS; 
    bx_CPU_event * event=myself->create_cpu_event(BX_ASYNC_EVENT_SMC_STOP, 
                                   &e->vc_entry,
                                   BX_ASSIST_EVENT_NONE);
    myself->assist_events.push_event(event);

    // also set the async_event. async_event is checked by 
    // the JITed code.
    myself->async_event |= BX_ASYNC_EVENT_STOP_TRACE;
  } 

  release_icache_rwlock();
}

void bxICache_c::flushICacheEntriesNoCpuRunning(void)
{
  bxICacheEntry_c* e = entry;
  unsigned i;

  for (i=0; i<BxICacheEntries; i++, e++) 
  {
    e->annul();
  }

  nextPageSplitIndex = 0;
  for (i=0;i<BX_ICACHE_PAGE_SPLIT_ENTRIES;i++)
  {
    // Does not need to do anything for page crossing entries. 
    // as all entries are flushed.
    pageSplitIndex[i].ppf = BX_ICACHE_INVALID_PHY_ADDRESS;
  }
}

void bxICache_c::commit_page_split_trace(bx_phy_address paddr, bxICacheEntry_c *entry)
{
  // register page split entry
  if (pageSplitIndex[nextPageSplitIndex].ppf != BX_ICACHE_INVALID_PHY_ADDRESS)
  {
    if (pageSplitIndex[nextPageSplitIndex].e == entry)
    {
      // entry is e. does not need to do anything as the old instructions
      // in the entry is invalidated.
    }
    else 
    {
      // The current entry needs to be invalidated.
      BX_CPU_C *myself = BX_CPU(bx_pc_system.getCurrentCpuID());
      pageSplitIndex[nextPageSplitIndex].e->pAddr = BX_ICACHE_INVALID_PHY_ADDRESS;
      bx_CPU_event *event=myself->create_cpu_event(BX_ASYNC_EVENT_SMC_STOP, 
                                                   pageSplitIndex[nextPageSplitIndex].e,
                                                   BX_ASSIST_EVENT_NONE);
      myself->assist_events.push_event(event);

      // also set the async_event. async_event is checked by the JITed code.
      myself->async_event |= BX_ASYNC_EVENT_STOP_TRACE;
      //pageSplitIndex[nextPageSplitIndex].e->pAddr = BX_ICACHE_INVALID_PHY_ADDRESS;
      //pageSplitIndex[nextPageSplitIndex].e->annul();
    }
  }

  pageSplitIndex[nextPageSplitIndex].ppf = paddr;
  pageSplitIndex[nextPageSplitIndex].e = entry;

  nextPageSplitIndex = (nextPageSplitIndex+1) & (BX_ICACHE_PAGE_SPLIT_ENTRIES-1);
}

void bxICache_c::handleSMC(bx_phy_address pAddr, Bit32u mask)
{
  // Need to invalidate this entry, can not do it here as
  // we are holding the icache write lock and annuling an
  // entry requires holding the writelock on the entry.
  // This could potentially lead to deadlock. Therefore, 
  // the following procedure needs to be followed. 
  // 1. Invalidate the pAddr of the entry such that it can 
  // never be looked up and used. However, there could be
  // processors currently executing this entry. It is ok
  // because the entry will be invalidated before this processor
  // execute the next instruction as shown in step 2.
  // 2. Wrap the entry in an SMC invalidation event and
  // push the event into the assist_events queue of this
  // processor. This guarantees that the processor returns
  // to process the assist events right after the execution 
  // of this instruction.
  // The fact that the invalidation is not done atomically
  // i.e. entries are invalidated one after another in a 
  // non-atomic fashion is ok. As the writing itself is not
  // atomic either. i.e. REP MOV.

  pAddr = LPFOf(pAddr);
  unsigned i;
  bx_CPU_event *event = 0;

  // Who is SMCing ?
  BX_CPU_C *myself = BX_CPU(bx_pc_system.getCurrentCpuID());

  // There is a deadlock here. Imagine that the entry holds the
  // read lock on the current entry. and it tries to hold the 
  // write lock on the icache entry. At the same time, in the
  // serveICacheMiss. the write lock on the icache is held.
  // and the write lock on the current entry is trying to be
  // held. Therefore, deadlock !!!.
  // This is because of the order of holding the locks are not
  // obeyed. i.e. locks are not held in a total order. 
  // One possible solution to this is to release the readlock
  // on the current entry before trying to acquire the write
  // lock on the icache.
  // and also make a copy of the current executing instruction
  // and force the processor to exit after the current instruction.
#if 0
  if (myself->current_entry)
  {
     myself->current_entry->pAddr = BX_ICACHE_INVALID_PHY_ADDRESS; 
     event=myself->create_cpu_event(BX_ASYNC_EVENT_SMC_STOP, 
                                    myself->current_entry,
                                    BX_ASSIST_EVENT_NONE);
     myself->assist_events.push_event(event);
     myself->current_entry->release_entry_rwlock();
     myself->current_entry = 0;
  } 
#endif

  acquire_icache_wrlock();

  // 1. page split entries.
  if (mask & 0x1) 
  {
    // the store touched 1st cache line in the page, check for
    // page split traces to invalidate.
    for (i=0;i<BX_ICACHE_PAGE_SPLIT_ENTRIES;i++) 
    {
      if (pAddr == pageSplitIndex[i].ppf) 
      // if (pageSplitIndex[i].e) 
      {
        pageSplitIndex[i].ppf = BX_ICACHE_INVALID_PHY_ADDRESS;
        pageSplitIndex[i].e->pAddr = BX_ICACHE_INVALID_PHY_ADDRESS; 
        event=myself->create_cpu_event(BX_ASYNC_EVENT_SMC_STOP, 
                                       pageSplitIndex[i].e,
                                       BX_ASSIST_EVENT_NONE);
        myself->assist_events.push_event(event);

        // also set the async_event. async_event is checked by 
        // the JITed code.
        myself->async_event |= BX_ASYNC_EVENT_STOP_TRACE;
      }
    }
  }

  // 2. victim entries.
  for (int i=0; i < BX_ICACHE_VICTIM_ENTRIES;i++)
  { 
      bxVictimCacheEntry *e = &victimCache[i];
      if (LPFOf(e->vc_entry.pAddr) == pAddr && (e->vc_entry.traceMask & mask)!= 0)
      { 
        e->vc_entry.pAddr = BX_ICACHE_INVALID_PHY_ADDRESS; 
        event=myself->create_cpu_event(BX_ASYNC_EVENT_SMC_STOP, 
                                       &e->vc_entry,
                                       BX_ASSIST_EVENT_NONE);
        myself->assist_events.push_event(event);

        // also set the async_event. async_event is checked by 
        // the JITed code.
        myself->async_event |= BX_ASYNC_EVENT_STOP_TRACE;
      }
  } 

  // 3. icache entries.
  bxICacheEntry_c *e = get_entry(pAddr, 0);

  for (unsigned n=0; n < 32; n++) 
  {
    Bit32u line_mask = (1 << n);
    if (line_mask > mask) break;
    for (unsigned index=0; index < 128; index++, e++) 
    {
      if (pAddr == LPFOf(e->pAddr) && (e->traceMask & mask) != 0) 
      {
        e->pAddr = BX_ICACHE_INVALID_PHY_ADDRESS; 
        event=myself->create_cpu_event(BX_ASYNC_EVENT_SMC_STOP, 
                                       e,
                                       BX_ASSIST_EVENT_NONE);
        myself->assist_events.push_event(event);

        // also set the async_event. async_event is checked by 
        // the JITed code.
        myself->async_event |= BX_ASYNC_EVENT_STOP_TRACE;
      }
    }
  }
  release_icache_rwlock();
}

bxICacheEntry_c* BX_CPU_C::serveICacheMiss(Bit32u eipBiased, bx_phy_address pAddr)
{
  // Only one processor can build an icache entry on the shared
  // icache at a time.
  BX_CPU_THIS_PTR siCache->acquire_icache_wrlock();
  // Record that this processor is currently building entry.
  // use to release icache wrlock if boundaryFetch faults.
  BX_CPU_THIS_PTR what_am_i_doing |= BX_BUILD_ICACHE_ENTRY; 

  BX_CPU_THIS_PTR current_entry = BX_CPU_THIS_PTR siCache->get_entry(pAddr, BX_CPU_THIS_PTR fetchModeMask);
  // Acquire the entry's write lock so that this entry is not affected by the following.
  // it can not be executed, as read lock on the entry is requirted if the entry is
  // to be executed.
  BX_CPU_THIS_PTR current_entry->acquire_entry_wrlock();

  // Is it in the victim cache ?
  bxICacheEntry_c* victim_entry = BX_CPU_THIS_PTR siCache->get_victim_entry(pAddr, fetchModeMask);
  if (victim_entry)
  {
    // evict current_entry to victim cache and bring in victim entry.
    static bxICacheEntry_c* temp_entry = new bxICacheEntry_c;  
    temp_entry->copy(current_entry);
    current_entry->copy(victim_entry); 
    victim_entry->copy(temp_entry);
    // About to return, release the icache entry wrlock to allow 
    // other processors to build entries.
    BX_CPU_THIS_PTR current_entry->release_entry_rwlock();
    BX_CPU_THIS_PTR what_am_i_doing &= ~BX_BUILD_ICACHE_ENTRY; 
    BX_CPU_THIS_PTR siCache->release_icache_rwlock();

    return current_entry;
  }
  else 
  {
    // victimize the current entry.
    BX_CPU_THIS_PTR siCache->victimize_entry(current_entry);
  }

  
  BX_CPU_THIS_PTR siCache->alloc_trace(BX_CPU_THIS_PTR current_entry, bx_cpuid);
  BX_CPU_THIS_PTR stats->entry_count++;

  // Cache miss. We weren't so lucky, but let's be optimistic - try to build 
  // trace from incoming instruction bytes stream !
  BX_CPU_THIS_PTR current_entry->pAddr = pAddr;
  bx_phy_address entry_pAddr = pAddr;
  BX_CPU_THIS_PTR current_entry->traceMask = 0;
  BX_CPU_THIS_PTR current_entry->fetchModeMask = 0;

  unsigned remainingInPage = BX_CPU_THIS_PTR eipPageWindowSize - eipBiased;
  const Bit8u *fetchPtr = BX_CPU_THIS_PTR eipFetchPtr + eipBiased;
  int ret;

  bxInstruction_c *i = BX_CPU_THIS_PTR current_entry->i;

  Bit32u pageOffset = PAGE_OFFSET((Bit32u) pAddr);
  Bit32u traceMask = 0;

  // About to translate from this page, lock the page. Locking the page will make
  // any write to the page blocked until the page exclusive lock is released. 
  // the page exclusive lock is released after the translation is completed and
  // pageWriteStampTable updated. Once the pageWriteStampTable is updated, the write
  // will be processed correctly in case of SMC.
  BX_CPU_THIS_PTR bx_tlb_acquire_exclusive_access(pAddr, 1);
  BX_CPU_THIS_PTR addr_locked.paddress = pAddr;

  // Don't allow traces longer than cpu_loop can execute
  static unsigned quantum = 
#if BX_SUPPORT_SMP
    (BX_SMP_PROCESSORS > 1) ? SIM->get_param_num(BXPN_SMP_QUANTUM)->get() :
#endif
    BX_MAX_TRACE_LENGTH;

  if (quantum > bx_pc_system.instrmax) quantum = bx_pc_system.instrmax;
 
  for (unsigned n=0;n < quantum;n++)
  {
#if BX_SUPPORT_X86_64
    if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64)
      ret = fetchDecode64(fetchPtr, i, remainingInPage);
    else
#endif
      ret = fetchDecode32(fetchPtr, i, remainingInPage);

    if (ret < 0) {
      // Fetching instruction on segment/page boundary
      if (n > 0) {
         // The trace is already valid, it has several instructions inside,
         // in this case just drop the boundary instruction and stop
         // tracing.
         break;
      }

      // First instruction is boundary fetch, leave the trace cache entry 
      // invalid for now because boundaryFetch() can fault
      BX_CPU_THIS_PTR current_entry->pAddr = BX_ICACHE_INVALID_PHY_ADDRESS;
      BX_CPU_THIS_PTR current_entry->tlen = 1;
      boundaryFetch(fetchPtr, remainingInPage, i);

      // Add the instruction to trace cache
      BX_CPU_THIS_PTR current_entry->pAddr = entry_pAddr;
      BX_CPU_THIS_PTR current_entry->traceMask = 0x80000000; /* last line in page */
      BX_CPU_THIS_PTR siCache->pageWriteStampTable->markICacheMask(BX_CPU_THIS_PTR current_entry->pAddr, 
                                                                   BX_CPU_THIS_PTR current_entry->traceMask);
      BX_CPU_THIS_PTR siCache->pageWriteStampTable->markICacheMask(BX_CPU_THIS_PTR pAddrFetchPage, 0x1);

#if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS
      BX_CPU_THIS_PTR current_entry->tlen++; /* Add the inserted end of trace opcode */
      genDummyICacheEntry(++i, &BX_CPU_C::BxEndTrace);
#endif

      BX_CPU_THIS_PTR siCache->commit_page_split_trace(BX_CPU_THIS_PTR pAddrFetchPage, BX_CPU_THIS_PTR current_entry);
      BX_CPU_THIS_PTR current_entry->fetchModeMask = fetchModeMask;
      BX_CPU_THIS_PTR current_entry->set_cross_page_boundary();

      // Finished translating from this page and the adjacent page, unlock the pages.
      BX_CPU_THIS_PTR bx_tlb_release_exclusive_access(BX_CPU_THIS_PTR current_entry->pAddr, 1);
      BX_CPU_THIS_PTR bx_tlb_release_exclusive_access(BX_CPU_THIS_PTR pAddrFetchPage, 1);
      BX_CPU_THIS_PTR addr_locked.paddress = 0;


      // About to return, release the icache entry wrlock to allow 
      // other processors to build entries.
      BX_CPU_THIS_PTR current_entry->release_entry_rwlock();

      // Make the entry valid.
      BX_CPU_THIS_PTR siCache->release_icache_rwlock();
      BX_CPU_THIS_PTR what_am_i_doing &= ~BX_BUILD_ICACHE_ENTRY; 
      return BX_CPU_THIS_PTR current_entry;
    }

    // add instruction to the trace
    unsigned iLen = i->ilen();
    BX_CPU_THIS_PTR current_entry->tlen++;

#ifndef BX_INSTR_STORE_OPCODE_BYTES
    i->set_opcode_bytes(fetchPtr);
#endif
    if (i->repUsedL())  BX_CPU_THIS_PTR current_entry->set_repeat_instruction();
    BX_INSTR_OPCODE(BX_CPU_ID, i, fetchPtr, iLen,
       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b, long64_mode());

    i++;

    traceMask |= 1 <<  (pageOffset >> 7);
    traceMask |= 1 << ((pageOffset + iLen - 1) >> 7);

    // continue to the next instruction
    remainingInPage -= iLen;
    if (ret != 0 /* stop trace indication */ || remainingInPage == 0) 
    {
      break;
    }
    pAddr += iLen;
    pageOffset += iLen;
    fetchPtr += iLen;
  }

  BX_CPU_THIS_PTR current_entry->traceMask |= traceMask;

  BX_CPU_THIS_PTR siCache->pageWriteStampTable->markICacheMask(pAddr, BX_CPU_THIS_PTR current_entry->traceMask);

  BX_CPU_THIS_PTR current_entry->fetchModeMask = fetchModeMask;

#if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS
  entry->tlen++; /* Add the inserted end of trace opcode */
  genDummyICacheEntry(i, &BX_CPU_C::BxEndTrace);
#endif

  // Finished translating from this page, unlock the page.
  BX_CPU_THIS_PTR bx_tlb_release_exclusive_access(BX_CPU_THIS_PTR current_entry->pAddr, 1);
  BX_CPU_THIS_PTR addr_locked.paddress = 0;
  
  // About to return, release the icache entry wrlock to allow 
  // other processors to build entries.
  BX_CPU_THIS_PTR current_entry->release_entry_rwlock();
  BX_CPU_THIS_PTR what_am_i_doing &= ~BX_BUILD_ICACHE_ENTRY; 
  BX_CPU_THIS_PTR siCache->release_icache_rwlock();

  return BX_CPU_THIS_PTR current_entry;
}

void BX_CPU_C::boundaryFetch(const Bit8u *fetchPtr, unsigned remainingInPage, bxInstruction_c *i)
{
  unsigned j, k;
  Bit8u fetchBuffer[32];
  int ret;

  if (remainingInPage >= 15) {
    BX_ERROR(("boundaryFetch #GP(0): too many instruction prefixes"));
    exception(BX_GP_EXCEPTION, 0);
  }

  // Read all leftover bytes in current page up to boundary.
  for (j=0; j<remainingInPage; j++) {
    fetchBuffer[j] = *fetchPtr++;
  }

  // The 2nd chunk of the instruction is on the next page.
  // Set RIP to the 0th byte of the 2nd page, and force a
  // prefetch so direct access of that physical page is possible, and
  // all the associated info is updated.
  RIP += remainingInPage;

  // Prefetch could fault.
  prefetch();

  unsigned fetchBufferLimit = 15;
  if (BX_CPU_THIS_PTR eipPageWindowSize < 15) {
    BX_DEBUG(("boundaryFetch: small window size after prefetch=%d bytes, remainingInPage=%d bytes", BX_CPU_THIS_PTR eipPageWindowSize, remainingInPage));
    fetchBufferLimit = BX_CPU_THIS_PTR eipPageWindowSize;
  }

  // We can fetch straight from the 0th byte, which is eipFetchPtr;
  fetchPtr = BX_CPU_THIS_PTR eipFetchPtr;

  // Before translating from this page. lock the page.
  bx_tlb_acquire_shared_access(BX_CPU_THIS_PTR pAddrFetchPage, 1);

  // read leftover bytes in next page
  for (k=0; k<fetchBufferLimit; k++, j++) {
    fetchBuffer[j] = *fetchPtr++;
  }

#if BX_SUPPORT_X86_64
  if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64)
    ret = fetchDecode64(fetchBuffer, i, remainingInPage+fetchBufferLimit);
  else
#endif
    ret = fetchDecode32(fetchBuffer, i, remainingInPage+fetchBufferLimit);

  if (ret < 0) {
    BX_INFO(("boundaryFetch #GP(0): failed to complete instruction decoding"));
    exception(BX_GP_EXCEPTION, 0);
  }

  // Restore EIP since we fudged it to start at the 2nd page boundary.
  RIP = BX_CPU_THIS_PTR prev_rip;

  // Since we cross an instruction boundary, note that we need a prefetch()
  // again on the next instruction.  Perhaps we can optimize this to
  // eliminate the extra prefetch() since we do it above, but have to
  // think about repeated instructions, etc.
  // invalidate_prefetch_q();

#ifndef BX_INSTR_STORE_OPCODE_BYTES
  i->set_opcode_bytes(fetchBuffer);
#endif
  if (i->repUsedL())  BX_CPU_THIS_PTR current_entry->set_repeat_instruction();

  BX_INSTR_OPCODE(BX_CPU_ID, i, fetchBuffer, i->ilen(),
      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b, long64_mode());
}
