/*
 * Copyright (c) 2009-2010 HIT Microelectronic Center
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are
 * met: redistributions of source code must retain the above copyright
 * notice, this list of conditions and the following disclaimer;
 * redistributions in binary form must reproduce the above copyright
 * notice, this list of conditions and the following disclaimer in the
 * documentation and/or other materials provided with the distribution;
 * neither the name of the copyright holders nor the names of its
 * contributors may be used to endorse or promote products derived from
 * this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 *
 * Authors: Gou Pengfei
 *          Jin Yinghan
 *
 * Date:    Dec. 2009
 *
 */

#include <algorithm>
#include <cstring>

#include "arch/isa_traits.hh"
#include "arch/utility.hh"
#include "base/types.hh"
#include "config/the_isa.hh"
#include "config/use_checker.hh"
#include "cpu/checker/cpu.hh"
#include "cpu/exetrace.hh"
#include "cpu/edge/fetch.hh"
#include "mem/packet.hh"
#include "mem/request.hh"
#include "params/DerivEdgeCPU.hh"
#include "sim/byteswap.hh"
#include "sim/core.hh"

#include "cpu/edge/atomic/impl.hh"
#include "cpu/edge/atomic/atomic.hh"
#include "cpu/edge/atomic/atomic_dyn_inst.hh"
#include "cpu/edge/atomic/atomic_block.hh"

using namespace std;

template<class Impl>
void
SimpleEdgeFetch<Impl>::IcachePort::setPeer(Port *port)
{
    Port::setPeer(port);

    fetch->setIcache();
}

template<class Impl>
Tick
SimpleEdgeFetch<Impl>::IcachePort::recvAtomic(PacketPtr pkt)
{
    panic("SimpleEdgeFetch doesn't expect recvAtomic callback!");
    return curTick;
}

template<class Impl>
void
SimpleEdgeFetch<Impl>::IcachePort::recvFunctional(PacketPtr pkt)
{
    DPRINTF(EdgeFetch, "SimpleEdgeFetch doesn't update its state from a "
            "functional call.");
}

template<class Impl>
void
SimpleEdgeFetch<Impl>::IcachePort::recvStatusChange(Status status)
{
    if (status == RangeChange) {
        if (!snoopRangeSent) {
            snoopRangeSent = true;
            sendStatusChange(Port::RangeChange);
        }
        return;
    }

    panic("SimpleEdgeFetch doesn't expect recvStatusChange callback!");
}

template<class Impl>
bool
SimpleEdgeFetch<Impl>::IcachePort::recvTiming(PacketPtr pkt)
{
    DPRINTF(EdgeFetch, "Received timing\n");
    if (pkt->isResponse()) {
        fetch->processCacheCompletion(pkt);
    }
    //else Snooped a coherence request, just return
    return true;
}

template<class Impl>
void
SimpleEdgeFetch<Impl>::IcachePort::recvRetry()
{
    fetch->recvRetry();
}

template<class Impl>
SimpleEdgeFetch<Impl>::SimpleEdgeFetch(CPU *_cpu, DerivEdgeCPUParams *params)
    : cpu(_cpu),
      branchPred(params),
      predecoder(NULL),
      mapToFetchDelay(params->mapToFetchDelay),
      executeToFetchDelay(params->executeToFetchDelay),
      commitToFetchDelay(params->commitToFetchDelay),
      fetchWidth(params->fetchWidth),
      cacheBlocked(false),
      retryPkt(NULL),
      retryTid(InvalidThreadID),
      numThreads(params->numThreads),
      numFetchingThreads(params->smtNumFetchingThreads),
      interruptPending(false),
      drainPending(false),
      switchedOut(false),
      preExecuteMode(params->preExecuteMode),
      isPerfectBPred(false),
      isPerfectBTypePred(false),
      isPerfectBTypeOnlyPred(false),
      isPerfectSeqentialPred(false),
      isTargetOnly(false),
      isPerfectMemDep(false),
      isPerfectPredication(false)
{
    if (numThreads > Impl::MaxThreads)
        fatal("numThreads (%d) is larger than compiled limit (%d),\n"
              "\tincrease MaxThreads in src/cpu/o3/impl.hh\n",
              numThreads, static_cast<int>(Impl::MaxThreads));

    // Set fetch stage's status to inactive.
    _status = Inactive;

    std::string policy = params->smtFetchPolicy;

    // Convert string to lowercase
    std::transform(policy.begin(), policy.end(), policy.begin(),
                   (int(*)(int)) tolower);

    // Figure out fetch policy
    if (policy == "singlethread") {
        fetchPolicy = SingleThread;
        if (numThreads > 1)
            panic("Invalid Fetch Policy for a SMT workload.");
    } else if (policy == "roundrobin") {
        fetchPolicy = RoundRobin;
        DPRINTF(EdgeFetch, "Fetch policy set to Round Robin\n");
    } else if (policy == "branch") {
        fetchPolicy = Branch;
        DPRINTF(EdgeFetch, "Fetch policy set to Branch Count\n");
    } else if (policy == "iqcount") {
        fetchPolicy = IQ;
        DPRINTF(EdgeFetch, "Fetch policy set to IQ count\n");
    } else if (policy == "lsqcount") {
        fetchPolicy = LSQ;
        DPRINTF(EdgeFetch, "Fetch policy set to LSQ count\n");
    } else {
        fatal("Invalid Fetch Policy. Options Are: {SingleThread,"
              " RoundRobin,LSQcount,IQcount}\n");
    }

    // Get the size of an instruction.
    instSize = sizeof(TheISA::MachInst);

    // Name is finally available, so create the port.
    icachePort = new IcachePort(this);

    icachePort->snoopRangeSent = false;

    // Set the branch predictor type
    if (params->exitPredType == "Perfect") {

        if (!preExecuteMode) {
            fatal("Perfect branch predictor should work"
                  " with preExecuteMode.\n");
        }

        isPerfectBPred = true;
    }

    if (params->btypePredType == "Perfect") {

        if (!preExecuteMode) {
            fatal("Perfect type predictor should work"
                  " with preExecuteMode.\n");
        }

        isPerfectBTypePred = true;
    }

    if (params->btypePredType == "PerfectTypeOnly") {

        if (!preExecuteMode) {
            fatal("Perfect type predictor should work"
                  " with preExecuteMode.\n");
        }

        isPerfectBTypeOnlyPred = true;
    }


    if (params->btypePredType == "Twostage" &&
            params->btypeTwostagePredictionMode == "PerfectSeqAndHyst") {

        if (!preExecuteMode) {
            fatal("Perfect type predictor should work"
                  " with preExecuteMode.\n");
        }

        isPerfectSeqentialPred = true;
    }

    if (params->exitPredType == "TargetOnly") {

        if (!preExecuteMode) {
            fatal("TargetOnly should work"
                  " with preExecuteMode.\n");
        }

        isTargetOnly = true;
    }

    // Set the memory dependence predictor type
    if (params->memDepMode == "Perfect") {

        if (!preExecuteMode) {
            fatal("Perfect memory dependence predictor should "
                  "work with preExecuteMode.\n");
        }

        isPerfectMemDep = true;
    }

    // Set the predication mode
    if (params->predicationMode == "Perfect") {

        if (!preExecuteMode) {
            fatal("Perfect predication should "
                  "work with preExecuteMode.\n");
        }

        isPerfectPredication = true;
    }
}

template <class Impl>
std::string
SimpleEdgeFetch<Impl>::name() const
{
    return cpu->name() + ".fetch";
}

template <class Impl>
void
SimpleEdgeFetch<Impl>::regStats()
{
    icacheStallCycles
        .name(name() + ".icacheStallCycles")
        .desc("Number of cycles fetch is stalled on an Icache miss")
        .prereq(icacheStallCycles);

    fetchedInsts
        .name(name() + ".INST:Insts")
        .desc("Number of instructions fetch has processed")
        .prereq(fetchedInsts);

    fetchedBlocks
        .name(name() + ".InstBLOCK:blocks")
        .desc("Number of inst blocks fetch has processed")
        .prereq(fetchedBlocks);

    fetchedNopBlocks
        .name(name() + ".InstBLOCK:NOPblocks")
        .desc("Number of NOP inst blocks fetch has processed")
        .prereq(fetchedNopBlocks);

    fetchedChunks
        .name(name() + ".InstBLOCK:chunks")
        .desc("Number of inst chunks fetch has processed")
        .prereq(fetchedChunks);
       
    fetchedConstInsts
        .name(name() + ".INST:Consts")
        .desc("Number of Insts that generate constants")
        .prereq(fetchedConstInsts);

    fetchedMovInsts
        .name(name() + ".INST:Movs")
        .desc("Number of movs that fetch has encountered")
        .prereq(fetchedMovInsts);
        
    fetchedTestInsts
        .name(name() + ".INST:Tests")
        .desc("Number of tests that fetch has encountered")
        .prereq(fetchedTestInsts);

    fetchedControls
        .name(name() + ".INST:Controls")
        .desc("Number of Controls that fetch has encountered")
        .prereq(fetchedControls);

    fetchedDirectControls
        .name(name() + ".INST:DirectControls")
        .desc("Number of Direct Controls that fetch has encountered")
        .prereq(fetchedDirectControls);

    fetchedIndirectControls
        .name(name() + ".INST:IndirectControls")
        .desc("Number of Indirect Controls that fetch has encountered")
        .prereq(fetchedIndirectControls);
        
    fetchedNullifies
        .name(name() + ".INST:Nullifies")
        .desc("Number of Nullifies that fetch has encountered")
        .prereq(fetchedNullifies);

    fetchedMemRefs
        .name(name() + ".INST:MemRefs")
        .desc("Number of Memory References that fetch has encountered")
        .prereq(fetchedMemRefs);

    fetchedLoads
        .name(name() + ".INST:Loads")
        .desc("Number of Loads that fetch has encountered")
        .prereq(fetchedLoads);

    fetchedStores
        .name(name() + ".INST:Stores")
        .desc("Number of Stores that fetch has encountered")
        .prereq(fetchedStores);

    fetchedWrites
        .name(name() + ".INST:Writes")
        .desc("Number of Writes that fetch has encountered")
        .prereq(fetchedWrites);        

    fetchedReads
        .name(name() + ".INST:Reads")
        .desc("Number of Reads that fetch has encountered")
        .prereq(fetchedReads);

    fetchedReadWrites
        .name(name() + ".INST:ReadWrites")
        .desc("Number of ReadWrites that fetch has encountered")
        .prereq(fetchedReadWrites);

    fetchedPredOnTrues
        .name(name() + ".INST:PredOnTrues")
        .desc("Number of Predication-on-true that fetch has encountered")
        .prereq(fetchedPredOnTrues);

    fetchedPredOnFalses
        .name(name() + ".INST:PredOnFalses")
        .desc("Number of Predication-on-false that fetch has encountered")
        .prereq(fetchedPredOnFalses);

    fetchedConstInstsRatio
        .init(0,1,0.1)
        .name(name() + ".InstBLOCK:ConstsRatio")
        .desc("Number of Insts that generate constants")
        .precision(4)
        .flags(Stats::cdf);

    fetchedMovInstsRatio
        .init(0,1,0.1)
        .name(name() + ".InstBLOCK:MovsRatio")
        .desc("Number of movs that fetch has encountered")
        .precision(4)
        .flags(Stats::cdf);
        
    fetchedTestInstsRatio
        .init(0,1,0.1)
        .name(name() + ".InstBLOCK:TestsRatio")
        .desc("Number of tests that fetch has encountered")
        .precision(4)
        .flags(Stats::cdf);

    fetchedControlsRatio
        .init(0,1,0.1)
        .name(name() + ".InstBLOCK:ControlsRatio")
        .desc("Number of Controls that fetch has encountered")
        .precision(4)
        .flags(Stats::cdf);

    fetchedDirectControlsRatio
        .init(0,1,0.1)
        .name(name() + ".InstBLOCK:DirectControlsRatio")
        .desc("Number of Direct Controls that fetch has encountered")
        .precision(4)
        .flags(Stats::cdf);

    fetchedIndirectControlsRatio
        .init(0,1,0.1)
        .name(name() + ".InstBLOCK:IndirectControlsRatio")
        .desc("Number of Indirect Controls that fetch has encountered")
        .precision(4)
        .flags(Stats::cdf);
        
    fetchedNullifiesRatio
        .init(0,1,0.1)
        .name(name() + ".InstBLOCK:NullifiesRatio")
        .desc("Number of Nullifies that fetch has encountered")
        .precision(4)
        .flags(Stats::cdf);

    fetchedMemRefsRatio
        .init(0,1,0.1)
        .name(name() + ".InstBLOCK:MemRefsRatio")
        .desc("Number of Memory References that fetch has encountered")
        .precision(4)
        .flags(Stats::cdf);

    fetchedLoadsRatio
        .init(0,1,0.1)
        .name(name() + ".InstBLOCK:LoadsRatio")
        .desc("Number of Loads that fetch has encountered")
        .precision(4)
        .flags(Stats::cdf);

    fetchedStoresRatio
        .init(0,1,0.1)
        .name(name() + ".InstBLOCK:StoresRatio")
        .desc("Number of Stores that fetch has encountered")
        .precision(4)
        .flags(Stats::cdf);

    fetchedWritesRatio
        .init(0,1,0.1)
        .name(name() + ".InstBLOCK:WritesRatio")
        .desc("Number of Writes that fetch has encountered")
        .precision(4)
        .flags(Stats::cdf);   

    fetchedReadsRatio
        .init(0,1,0.1)
        .name(name() + ".InstBLOCK:ReadsRatio")
        .desc("Number of Reads that fetch has encountered")
        .precision(4)
        .flags(Stats::cdf);

    fetchedReadWritesRatio
        .init(0,1,0.1)
        .name(name() + ".InstBLOCK:ReadWritesRatio")
        .desc("Number of ReadWrites that fetch has encountered")
        .precision(4)
        .flags(Stats::cdf);

    fetchedPredOnTruesRatio
        .init(0,1,0.1)
        .name(name() + ".InstBLOCK:PredOnTruesRatio")
        .desc("Number of Predication-on-true that fetch has encountered")
        .precision(4)
        .flags(Stats::cdf);

    fetchedPredOnFalsesRatio
        .init(0,1,0.1)
        .name(name() + ".InstBLOCK:PredOnFalsesRatio")
        .desc("Number of Predication-on-false that fetch has encountered")
        .precision(4)
        .flags(Stats::cdf);

    fetchedNumConsumers
        .init(0,EDGEStaticInst::MaxInstConsumers,1)
        .name(name() + ".INST:NumConsumers")
        .desc("Number of consumers of insts  that fetch has encountered")
        .flags(Stats::cdf);

    fetchedNumOperands
        .init(0,EDGEStaticInst::MaxInstOperands,1)
        .name(name() + ".INST:NumOperands")
        .desc("Number of operands of insts  that fetch has encountered")
        .flags(Stats::cdf);

    fetchedInstBlockSize
        .init(0,TheISA::MaxInstsInBlock,32)
        .name(name() + ".InstBLOCK:InstBlockSize")
        .desc("Number of insts of each block")
        .flags(Stats::cdf);

    fetchedInstBlockSizeInChunks
        .init(0,TheISA::MaxBlockSize,1)
        .name(name() + ".InstBLOCK:InstBlockSizeInChunks")
        .desc("Number of chunks of each block")
        .flags(Stats::cdf);
        
    fetchCycles
        .name(name() + ".Cycles")
        .desc("Number of cycles fetch has run and was not squashing or"
              " blocked")
        .prereq(fetchCycles);

    fetchSquashCycles
        .name(name() + ".SquashCycles")
        .desc("Number of cycles fetch has spent squashing")
        .prereq(fetchSquashCycles);

    fetchSquashBlocks
        .name(name() + ".SquashBlocks")
        .desc("Number of squashed inst blocks.")
        .prereq(fetchSquashBlocks);

    fetchMisAlignedTimes
        .name(name() + ".MisalignTimes")
        .desc("Number of block-misalign times")
        .prereq(fetchMisAlignedTimes);

    fetchIdleCycles
        .name(name() + ".IdleCycles")
        .desc("Number of cycles fetch was idle")
        .prereq(fetchIdleCycles);

    fetchBlockedCycles
        .name(name() + ".BlockedCycles")
        .desc("Number of cycles fetch has spent blocked")
        .prereq(fetchBlockedCycles);

    fetchedCacheLines
        .name(name() + ".CacheLines")
        .desc("Number of cache lines fetched")
        .prereq(fetchedCacheLines);

    fetchMiscStallCycles
        .name(name() + ".MiscStallCycles")
        .desc("Number of cycles fetch has spent waiting on interrupts, or "
              "bad addresses, or out of MSHRs")
        .prereq(fetchMiscStallCycles);

    fetchIcacheSquashes
        .name(name() + ".IcacheSquashes")
        .desc("Number of outstanding Icache misses that were squashed")
        .prereq(fetchIcacheSquashes);

    fetchNisnDist
        .init(/* base value */ 0,
              /* last value */ fetchWidth,
              /* bucket size */ 1)
        .name(name() + ".rateDist")
        .desc("Number of instructions fetched each cycle (Total)")
        .flags(Stats::pdf);

    idleRate
        .name(name() + ".idleRate")
        .desc("Percent of cycles fetch was idle")
        .prereq(idleRate);
    idleRate = fetchIdleCycles * 100 / cpu->numCycles;

    usefulFetchRate
        .name(name() + ".usefulFetchRate")
        .desc("Number of instructions fetched without NOPs per cycle")
        .flags(Stats::total);
    usefulFetchRate = fetchedInsts / cpu->numCycles;

    fetchRate
        .name(name() + ".rate")
        .desc("Number of inst fetches per cycle")
        .flags(Stats::total);
    fetchRate = fetchedInsts / cpu->numCycles;

    fetchInflightInstBlocks
        .init(0, Impl::MaxInFlightBlockNum, 1)
        .name(name() + ".fetchInflightInstBlocks")
        .desc("Number of inflight inst blocks observed by fetch stage");

    branchPred.regStats();
}

template<class Impl>
void
SimpleEdgeFetch<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *time_buffer)
{
    timeBuffer = time_buffer;

    // Create wires to get information from proper places in time buffer.
    fromMap = timeBuffer->getWire(-mapToFetchDelay);

    fromExecute = timeBuffer->getWire(-executeToFetchDelay);
    fromCommit = timeBuffer->getWire(-commitToFetchDelay);
}

template<class Impl>
void
SimpleEdgeFetch<Impl>::setActiveThreads(std::list<ThreadID> *at_ptr)
{
    activeThreads = at_ptr;
}

template<class Impl>
void
SimpleEdgeFetch<Impl>::setFetchQueue(TimeBuffer<Fetch2Map> *fq_ptr)
{
    fetch2mapQueue = fq_ptr;

    // Create wire to write information to proper place in fetch queue.
    toMap = fetch2mapQueue->getWire(0);
}

template<class Impl>
void
SimpleEdgeFetch<Impl>::initStage()
{
    // Setup PC and nextPC with initial state.
    for (ThreadID tid = 0; tid < numThreads; tid++) {
        PC[tid] = cpu->readPC(tid);
        nextPC[tid] = cpu->readNextPC(tid);
    }

    for (ThreadID tid = 0; tid < numThreads; tid++) {

        fetchStatus[tid] = Running;
        edgeFetchStatus[tid] = Head;

        priorityList.push_back(tid);

        memReq[tid] = NULL;

        stalls[tid].fetch = false;
        stalls[tid].map = false;
        stalls[tid].execute = false;
        stalls[tid].commit = false;
    }

    // Initial chunk related variables
    chunkSize = TheISA::ChunkSize;
    chunkOffset = TheISA::ChunkOffset;
    maxBlockSize = TheISA::MaxBlockSize;

    instID = 0;
    chunkID = 0;
    blockID = 1; // Default to 1

    currFrameID = -1; // Initialize it to a negative value
    currInflightInstBlocks = 0;

    // Initialize current block ptr. Add it into cpu list.
    curEdgeBlockPtr = new Block(blockID, cpu); // Initial block ID as 1
    curEdgeBlockPtr->setStartPC(PC[0]);
//    curEdgeBlockPtr->setBlockListIt(cpu->addInstBlock(curEdgeBlockPtr));

    headerInfo.reserve(TheISA::HeaderInfoSize); // Reserve space for header info
    headerInfo.resize(TheISA::HeaderInfoSize);
    clearHeaderInfo();

    // Schedule fetch to get the correct PC from the CPU
    // scheduleFetchStartupEvent(1);

    // Fetch needs to start Running instructions at the very beginning,
    // so it must start up in active state.
    switchToActive();
}

template<class Impl>
void
SimpleEdgeFetch<Impl>::setIcache()
{
    // Size of cache block.
    cacheBlkSize = icachePort->peerBlockSize();

    // Create mask to get rid of offset bits.
    cacheBlkMask = (cacheBlkSize - 1);

    for (ThreadID tid = 0; tid < numThreads; tid++) {
        // Create space to store a cache line.
        cacheData[tid] = new uint8_t[cacheBlkSize];
        cacheDataPC[tid] = 0;
        cacheDataValid[tid] = false;
    }
}

template<class Impl>
void
SimpleEdgeFetch<Impl>::processCacheCompletion(PacketPtr pkt)
{
    ThreadID tid = pkt->req->threadId();

    DPRINTF(EdgeFetch, "[tid:%u] Waking up due to cache completion.\n",tid);

    assert(!pkt->wasNacked());

    // Only change the status if it's still waiting on the icache access
    // to return.
    if (fetchStatus[tid] != IcacheWaitResponse ||
        pkt->req != memReq[tid] ||
        isSwitchedOut()) {
        ++fetchIcacheSquashes;
        delete pkt->req;
        delete pkt;
        return;
    }

    memcpy(cacheData[tid], pkt->getPtr<uint8_t>(), cacheBlkSize);
    cacheDataValid[tid] = true;

    if (!drainPending) {
        // Wake up the CPU (if it went to sleep and was waiting on
        // this completion event).
        cpu->wakeCPU();

        DPRINTF(Activity, "[tid:%u] Activating fetch due to cache completion\n",
                tid);

        switchToActive();
    }

    // Only switch to IcacheAccessComplete if we're not stalled as well.
    if (checkStall(tid)) {
        fetchStatus[tid] = Blocked;
    } else {
        fetchStatus[tid] = IcacheAccessComplete;
    }

    // Reset the mem req to NULL.
    delete pkt->req;
    delete pkt;
    memReq[tid] = NULL;
}

template <class Impl>
bool
SimpleEdgeFetch<Impl>::drain()
{
    // Fetch is ready to drain at any time.
    cpu->signalDrained();
    drainPending = true;
    return true;
}

template <class Impl>
void
SimpleEdgeFetch<Impl>::resume()
{
    drainPending = false;
}

template <class Impl>
void
SimpleEdgeFetch<Impl>::switchOut()
{
    switchedOut = true;
    // Branch predictor needs to have its state cleared.
    // Fix me!
    branchPred.switchOut();
}

template <class Impl>
void
SimpleEdgeFetch<Impl>::takeOverFrom()
{
    // Reset all state
    for (ThreadID i = 0; i < Impl::MaxThreads; ++i) {
        stalls[i].fetch = false;
        stalls[i].map = false;
        stalls[i].execute = false;
        stalls[i].commit = false;
        PC[i] = cpu->readPC(i);
        nextPC[i] = cpu->readNextPC(i);
        fetchStatus[i] = Running;
    }
    numInst = 0;
    wroteToTimeBuffer = false;
    _status = Inactive;
    switchedOut = false;
    interruptPending = false;
    branchPred.takeOverFrom();
}

template <class Impl>
void
SimpleEdgeFetch<Impl>::wakeFromQuiesce()
{
    DPRINTF(EdgeFetch, "Waking up from quiesce\n");
    // Hopefully this is safe
    // @todo: Allow other threads to wake from quiesce.
    fetchStatus[0] = Running;
}

template <class Impl>
inline void
SimpleEdgeFetch<Impl>::switchToActive()
{
    if (_status == Inactive) {
        DPRINTF(Activity, "Activating stage.\n");

        cpu->activateStage(CPU::FetchIdx);

        _status = Active;
    }
}

template <class Impl>
inline void
SimpleEdgeFetch<Impl>::switchToInactive()
{
    if (_status == Active) {
        DPRINTF(Activity, "Deactivating stage.\n");

        cpu->deactivateStage(CPU::FetchIdx);

        _status = Inactive;
    }
}

template <class Impl>
bool
SimpleEdgeFetch<Impl>::lookupAndUpdateNextPC(DynInstPtr &inst, Addr &next_PC,
                                          Addr &next_NPC)
{

    if (!inst->isControl()) {
        next_PC  = next_NPC;
        next_NPC = next_NPC + instSize;
        return false;
    }

    // Do branch prediction check here.
    // A bit of a misnomer...next_PC is actually the current PC until
    // this function updates it.
    //bool predict_taken;

    //ThreadID tid = inst->threadNumber;
    //Addr pred_PC = next_PC;

#if ISA_HAS_DELAY_SLOT
    next_PC = next_NPC;
    next_NPC += instSize;
#else
    next_PC += instSize;
    next_NPC = next_PC + instSize;
#endif

    DPRINTF(EdgeFetch, "[tid:%i]: [sn:%i] Branch predicted to go to %#x and then %#x.\n",
            inst->threadNumber, inst->seqNum, next_PC, next_NPC);

    //if (predict_taken) {
        //++predictedBranches;
    //}

    return false;
}

template <class Impl>
bool
SimpleEdgeFetch<Impl>::fetchCacheLine(Addr fetch_PC, Fault &ret_fault, ThreadID tid)
{
    Fault fault = NoFault;

    //AlphaDep
    if (cacheBlocked) {
        DPRINTF(EdgeFetch, "[tid:%i] Can't fetch cache line, cache blocked\n",
                tid);
        return false;
    } else if (isSwitchedOut()) {
        DPRINTF(EdgeFetch, "[tid:%i] Can't fetch cache line, switched out\n",
                tid);
        return false;
    } else if (interruptPending && !(fetch_PC & 0x3)) {
        // Hold off fetch from getting new instructions when:
        // Cache is blocked, or
        // while an interrupt is pending and we're not in PAL mode, or
        // fetch is switched out.
        DPRINTF(EdgeFetch, "[tid:%i] Can't fetch cache line, interrupt pending\n",
                tid);
        return false;
    }

    // Align the fetch PC so it's at the start of a cache block.
    Addr block_PC = icacheBlockAlignPC(fetch_PC);

    // If we've already got the block, no need to try to fetch it again.
    if (cacheDataValid[tid] && block_PC == cacheDataPC[tid]) {
        return true;
    }

    // Setup the memReq to do a read of the first instruction's address.
    // Set the appropriate read size and flags as well.
    // Build request here.
    RequestPtr mem_req =
        new Request(tid, block_PC, cacheBlkSize, Request::INST_FETCH,
                    fetch_PC, cpu->thread[tid]->contextId(), tid);

    memReq[tid] = mem_req;

    // Translate the instruction request.
    fault = cpu->itb->translateAtomic(mem_req, cpu->thread[tid]->getTC(),
                                      BaseTLB::Execute);

    // In the case of faults, the fetch stage may need to stall and wait
    // for the ITB miss to be handled.

    // If translation was successful, attempt to read the first
    // instruction.
    if (fault == NoFault) {
        // Build packet here.
        PacketPtr data_pkt = new Packet(mem_req,
                                        MemCmd::ReadReq, Packet::Broadcast);
        data_pkt->dataDynamicArray(new uint8_t[cacheBlkSize]);

        cacheDataPC[tid] = block_PC;
        cacheDataValid[tid] = false;

        DPRINTF(EdgeFetch, "Fetch: Doing instruction read.\n");

        fetchedCacheLines++;

        // Now do the timing access to see whether or not the instruction
        // exists within the cache.
        if (!icachePort->sendTiming(data_pkt)) {
            assert(retryPkt == NULL);
            assert(retryTid == InvalidThreadID);
            DPRINTF(EdgeFetch, "[tid:%i] Out of MSHRs!\n", tid);
            fetchStatus[tid] = IcacheWaitRetry;
            retryPkt = data_pkt;
            retryTid = tid;
            cacheBlocked = true;
            return false;
        }

        DPRINTF(EdgeFetch, "[tid:%i]: Doing cache access.\n", tid);

        lastIcacheStall[tid] = curTick;

        DPRINTF(Activity, "[tid:%i]: Activity: Waiting on I-cache "
                "response.\n", tid);

        fetchStatus[tid] = IcacheWaitResponse;
    } else {
        delete mem_req;
        memReq[tid] = NULL;
    }

    ret_fault = fault;
    return true;
}

template <class Impl>
inline void
SimpleEdgeFetch<Impl>::doSquash(const Addr &new_PC,
        const Addr &new_NPC, ThreadID tid)
{
    DPRINTF(EdgeFetch, "[tid:%i]: Squashing, setting PC to: %#x, NPC to: %#x.\n",
            tid, new_PC, new_NPC);

    PC[tid] = new_PC;
    nextPC[tid] = new_NPC;

    // Update the current block ptr.
    edgeFetchStatus[tid] = Head;
    clearHeaderInfo();
    chunkID = 0;
    instID = 0;
    //blockID++;

    // Update stats
    fetchSquashBlocks++;

    // Clear the icache miss if it's outstanding.
    if (fetchStatus[tid] == IcacheWaitResponse) {
        DPRINTF(EdgeFetch, "[tid:%i]: Squashing outstanding Icache miss.\n",
                tid);
        memReq[tid] = NULL;
    }

    // Get rid of the retrying packet if it was from this thread.
    if (retryTid == tid) {
        assert(cacheBlocked);
        if (retryPkt) {
            delete retryPkt->req;
            delete retryPkt;
        }
        retryPkt = NULL;
        retryTid = InvalidThreadID;
    }

    fetchStatus[tid] = Squashing;

    ++fetchSquashCycles;
}

template<class Impl>
void
SimpleEdgeFetch<Impl>::squashFromMap(const Addr &new_PC, const Addr &new_NPC,
                                     const BlockID &seq_num, ThreadID tid)
{
    panic("Unimplemented func: squashFromMap\n");
}

template<class Impl>
bool
SimpleEdgeFetch<Impl>::checkStall(ThreadID tid) const
{
    bool ret_val = false;

    if (cpu->contextSwitch) {
        DPRINTF(EdgeFetch,"[tid:%i]: Stalling for a context switch.\n",tid);
        ret_val = true;
    } else if (stalls[tid].fetch) {
        DPRINTF(EdgeFetch,"[tid:%i]: Stall from Fetch stage detected.\n",tid);
        ret_val = true;
    } else if (stalls[tid].map) {
        DPRINTF(EdgeFetch,"[tid:%i]: Stall from Map stage detected.\n",tid);
        ret_val = true;
    } else if (stalls[tid].execute) {
        DPRINTF(EdgeFetch,"[tid:%i]: Stall from Execute stage detected.\n",tid);
        ret_val = true;
    } else if (stalls[tid].commit) {
        DPRINTF(EdgeFetch,"[tid:%i]: Stall from Commit stage detected.\n",tid);
        ret_val = true;
    }

    return ret_val;
}

template<class Impl>
typename SimpleEdgeFetch<Impl>::FetchStatus
SimpleEdgeFetch<Impl>::updateFetchStatus()
{
    //Check Running
    list<ThreadID>::iterator threads = activeThreads->begin();
    list<ThreadID>::iterator end = activeThreads->end();

    while (threads != end) {
        ThreadID tid = *threads++;

        if (fetchStatus[tid] == Running ||
            fetchStatus[tid] == Squashing ||
            fetchStatus[tid] == IcacheAccessComplete) {

            if (_status == Inactive) {
                DPRINTF(Activity, "[tid:%i]: Activating stage.\n",tid);

                if (fetchStatus[tid] == IcacheAccessComplete) {
                    DPRINTF(Activity, "[tid:%i]: Activating fetch due to cache"
                            "completion\n",tid);
                }

                cpu->activateStage(CPU::FetchIdx);
            }

            return Active;
        }
    }

    // Stage is switching from active to inactive, notify CPU of it.
    if (_status == Active) {
        DPRINTF(Activity, "Deactivating stage.\n");

        cpu->deactivateStage(CPU::FetchIdx);
    }

    return Inactive;
}

template <class Impl>
void
SimpleEdgeFetch<Impl>::squash(const Addr &new_PC, const Addr &new_NPC,
                           const BlockID &seq_num, ThreadID tid)
{
    DPRINTF(EdgeFetch, "[tid:%u]: Squash from commit.\n",tid);

    doSquash(new_PC, new_NPC, tid);

    // If we're still squashing this cycle, we need to decrease
    // the frame id.
    int num_squashed = fromCommit->commitInfo[tid].numSquashedInstBlocks;
    num_squashed += cpu->removeBlocksNotInROB(tid);

    DPRINTF(EdgeFetch, "Need to decrease frame id %i times."
            " Current frame id is %i\n",
            num_squashed, 
            currFrameID);

    while (num_squashed > 0) {
        decCurrFrameID();
        num_squashed--;
        currInflightInstBlocks--;
    }
}

template <class Impl>
void
SimpleEdgeFetch<Impl>::tick()
{
    list<ThreadID>::iterator threads = activeThreads->begin();
    list<ThreadID>::iterator end = activeThreads->end();
    bool status_change = false;

    wroteToTimeBuffer = false;

    while (threads != end) {
        ThreadID tid = *threads++;
        // Check the signals for each thread to determine the proper status
        // for each thread.
        bool updated_status = checkSignalsAndUpdate(tid);
        status_change =  status_change || updated_status;
    }

    DPRINTF(EdgeFetch, "Running stage.\n");

    // Reset the number of the instruction we're Running.
    numInst = 0;

    for (threadFetched = 0; threadFetched < numFetchingThreads;
         threadFetched++) {
        // Fetch each of the actively Running threads.
        fetch(status_change);
    }

    // Record number of instructions fetched this cycle for distribution.
    fetchNisnDist.sample(numInst);
    fetchInflightInstBlocks.sample(currInflightInstBlocks);

    DPRINTF(EdgeFetch, "Currently there're %i blocks inflight.\n",
            currInflightInstBlocks);

    if (status_change) {
        // Change the fetch stage status if there was a status change.
        _status = updateFetchStatus();
    }

    // If there was activity this cycle, inform the CPU of it.
    if (wroteToTimeBuffer || cpu->contextSwitch) {
        DPRINTF(Activity, "Activity this cycle.\n");

        cpu->activityThisCycle();
    }
}

template <class Impl>
bool
SimpleEdgeFetch<Impl>::checkSignalsAndUpdate(ThreadID tid)
{
    // Update the per thread stall statuses.
    if (fromMap->mapBlock[tid]) {
        stalls[tid].map = true;
    }

    if (fromMap->mapUnblock[tid]) {
        assert(stalls[tid].map);
        assert(!fromMap->mapBlock[tid]);
        stalls[tid].map = false;
    }

    if (fromExecute->executeBlock[tid]) {
        stalls[tid].execute = true;
    }

    if (fromExecute->executeUnblock[tid]) {
        assert(stalls[tid].execute);
        assert(!fromExecute->executeBlock[tid]);
        stalls[tid].execute = false;
    }

    if (fromCommit->commitBlock[tid]) {
        stalls[tid].commit = true;
    }

    if (fromCommit->commitUnblock[tid]) {
        assert(stalls[tid].commit);
        assert(!fromCommit->commitBlock[tid]);
        stalls[tid].commit = false;
    }

    // Check squash signals from commit.
    if (fromCommit->commitInfo[tid].squash) {

        DPRINTF(EdgeFetch, "[tid:%u]: Squashing instructions due to squash "
                "from commit.\n",tid);

        // In any case, squash.
        squash(fromCommit->commitInfo[tid].nextPC,
               fromCommit->commitInfo[tid].nextNPC,
               fromCommit->commitInfo[tid].doneBlockID,
               tid);

        // If this is a valid block pointer, it hasn't been added into
        // the cpu block list. As a result, we should remove the insts
        // manually.
        if (curEdgeBlockPtr) {
            curEdgeBlockPtr->removeAllInsts();
        }

        // Create a new block, add it into list.
        curEdgeBlockPtr = new Block(blockID, cpu);

        curEdgeBlockPtr->setStartPC(fromCommit->commitInfo[tid].nextPC);

//        curEdgeBlockPtr->setBlockListIt(cpu->addInstBlock(curEdgeBlockPtr));

        // Also check if there's a mispredict that happened.
        if (fromCommit->commitInfo[tid].branchMispredict) {
            DPRINTF(EdgeFetch,"Inst block[Bid:%lli] commit, update BP. NPC = %#x, Exit =%i,\n",
                fromCommit->commitInfo[tid].doneBlockID,
                fromCommit->commitInfo[tid].nextPC,
                fromCommit->commitInfo[tid].exitID);

            branchPred.squash(fromCommit->commitInfo[tid].doneBlockID, 0,
                              fromCommit->commitInfo[tid].mispredPC,
                              fromCommit->commitInfo[tid].nextPC,
                              fromCommit->commitInfo[tid].exitType,
                              fromCommit->commitInfo[tid].predExitType,
                              fromCommit->commitInfo[tid].seqMispredict,
                              fromCommit->commitInfo[tid].exitID,
                              fromCommit->commitInfo[tid].predExitID,
                              fromCommit->commitInfo[tid].exitMispredict,
                              tid);
        } else {

            if (fromCommit->executeInfo[tid].syscall) {
                branchPred.squash(fromCommit->executeInfo[tid].syscallBlockID,
                                  tid);
            } else {
                branchPred.squash(fromCommit->commitInfo[tid].doneBlockID,
                                  tid);
            }
        }

        // If we're stalled by frame id limitation before, we can
        // restore here.
        if (stalls[tid].fetch) {
            unstallStage(tid);
        }

        return true;

    } else if (fromCommit->commitInfo[tid].doneBlockID
    /*&& !fromCommit->commitInfo[tid].needSyscall*/) {
        // If there's a committed block last cycle with no squash signal,
        // it means we should wake fetch stage and fetch from the correct
        // address.

        assert(fromCommit->commitInfo[tid].doneBlockID < blockID);

        if (!fromCommit->commitInfo[tid].branchMispredict) {
            
            DPRINTF(EdgeFetch, "[Tid:%i] Inst block[Bid:%lli] commit, current fetch PC is"
                    "%#x\n",
                    tid, 
                    fromCommit->commitInfo[tid].doneBlockID,
                    PC[tid]);

            branchPred.update(fromCommit->commitInfo[tid].doneBlockID,
                              tid,
                              fromCommit->commitInfo[tid].blockPC,
                              // This address space id
                              0,
                              fromCommit->commitInfo[tid].nextPC,
                              fromCommit->commitInfo[tid].exitType,
                              fromCommit->commitInfo[tid].predExitType,
                              fromCommit->commitInfo[tid].seqMispredict,
                              fromCommit->commitInfo[tid].exitID,
                              fromCommit->commitInfo[tid].predExitID,
                              fromCommit->commitInfo[tid].exitMispredict);
        }

        // If we're stalled by frame id limitation before, we can
        // restore here.
        if (stalls[tid].fetch) {
            unstallStage(tid);
        }

        currInflightInstBlocks--;
    }

    // Check ROB squash signals from commit.
    if (fromCommit->commitInfo[tid].robSquashing) {
        DPRINTF(EdgeFetch, "[tid:%u]: ROB is still squashing.\n", tid);

        // Continue to squash.
        fetchStatus[tid] = Squashing;

        // If we're still squashing this cycle, we need to decrease
        // the frame id.
        int num_squashed = fromCommit->commitInfo[tid].numSquashedInstBlocks;

        DPRINTF(EdgeFetch, "Need to decrease frame id %i times."
                " Current frame id is %i\n",
                num_squashed, 
                currFrameID);

        while (num_squashed > 0) {
            decCurrFrameID();
            num_squashed--;
            currInflightInstBlocks--;
        }

        // If we're stalled by frame id limitation before, we can
        // restore here.
        if (stalls[tid].fetch) {
            unstallStage(tid);
        }

        return true;
    }

    // Check squash signals from decode.
    if (fromMap->mapInfo[tid].squash) {
        panic("Why does MAP stage tell me to squash\n");
    }

    //
    // This if for Atomic-Edge only ...
    //
    if (fetchStatus[tid] == Idle) {
        DPRINTF(EdgeFetch, "Still Idle ... \n");

        return true;
    }

    if (checkStall(tid) &&
        fetchStatus[tid] != IcacheWaitResponse &&
        fetchStatus[tid] != IcacheWaitRetry) {
        DPRINTF(EdgeFetch, "[tid:%i]: Setting to blocked\n",tid);

        fetchStatus[tid] = Blocked;

        return true;
    }

    if (fetchStatus[tid] == Blocked ||
        fetchStatus[tid] == Squashing) {
        // Switch status to running if fetch isn't being told to block or
        // squash this cycle.
        DPRINTF(EdgeFetch, "[tid:%i]: Done squashing, switching to running.\n",
            tid);

        fetchStatus[tid] = Running;

        return true;
    }

    // If we've reached this point, we have not gotten 
    // any signals that cause fetch to change its status.
    // Fetch remains the same as before.
    return false;
}

template<class Impl>
void 
SimpleEdgeFetch<Impl>::generateInst(ThreadID tid, TheISA::MachInst &inst_code, Addr pc, 
    int inst_id, int chunk_id, TheISA::BlockStatus status)
{
    EDGEStaticInstPtr staticInst = 
        EDGEStaticInstPtr(inst_code,pc,status);

    InstSeqNum inst_seq;
    // Get a sequence number.
    inst_seq = cpu->getAndIncrementInstSeq();

    DynInstPtr instruction = new DynInst(staticInst,
                                         pc,pc + instSize,
                                         pc,pc + instSize,
                                         inst_seq,cpu,
                                         status);

    instruction->setEDGEInstStatus(inst_id, chunk_id, blockID);

    instruction->setBlockPtr(curEdgeBlockPtr);
    instruction->setTid(tid);
    instruction->setASID(tid);
    instruction->setThreadState(cpu->thread[tid]);

    if (status == TheISA::Normal) {

        curEdgeBlockPtr->addInst(instruction, inst_id);
        updateInstStats(instruction);
        
    } else if (status == TheISA::HeaderRead || 
               status == TheISA::HeaderWrite) {

        curEdgeBlockPtr->addHead(instruction, inst_id);
        updateHeadStats(instruction);

    } else {
        panic("Unrecogonized inst status.\n");
    }

#if TRACING_ON
    instruction->traceData =
         cpu->getEdgeTracer()->getEdgeInstRecord(curTick, cpu->tcBase(tid),
                 instruction->staticInst, instruction->readPC());
#else
    instruction->traceData = NULL;
#endif
}

template<class Impl>
void
SimpleEdgeFetch<Impl>::fetch(bool &status_change)
{
    //////////////////////////////////////////
    // Start actual fetch
    //////////////////////////////////////////
    
    assert(currInflightInstBlocks >= 0 &&
           currInflightInstBlocks <= Impl::MaxInFlightBlockNum);

    ThreadID tid = getFetchingThread(fetchPolicy);

    if (tid == InvalidThreadID || drainPending) {
        DPRINTF(EdgeFetch,"There are no more threads available to fetch from.\n");

        // Breaks looping condition in tick()
        threadFetched = numFetchingThreads;
        return;
    }

    DPRINTF(EdgeFetch, "Attempting to fetch from [tid:%i]\n", tid);

    // The current PC.
    Addr fetch_PC = PC[tid];
    Addr fetch_NPC = nextPC[tid];
    
    // Fault code for memory access.
    Fault fault = NoFault;

    //bool cache_complete = false;

    // If returning from the delay of a cache miss, 
    // then update the status to running, 
    // otherwise do the cache access.  Possibly move this up
    // to tick() function.
    if (fetchStatus[tid] == IcacheAccessComplete) {

        DPRINTF(EdgeFetch, "[tid:%i]: Icache miss is complete.\n",
                tid);

        fetchStatus[tid] = Running;
        status_change = true;

    } else if (fetchStatus[tid] == Running) {

        DPRINTF(EdgeFetch, "[tid:%i]: Attempting to translate"
                " and read instruction, starting at PC %08p.\n",
                tid, fetch_PC);

        bool fetch_success = fetchCacheLine(fetch_PC, fault, tid);
        if (!fetch_success) {
            if (cacheBlocked) {
                ++icacheStallCycles;
            } else {
                ++fetchMiscStallCycles;
            }
            return;
        }
        
    } else {
        if (fetchStatus[tid] == Idle) {
            ++fetchIdleCycles;
            DPRINTF(EdgeFetch, "[tid:%i]: EdgeFetch is idle!\n", tid);
        } else if (fetchStatus[tid] == Blocked) {
            ++fetchBlockedCycles;
            DPRINTF(EdgeFetch, "[tid:%i]: EdgeFetch is blocked!\n", tid);
        } else if (fetchStatus[tid] == Squashing) {
            ++fetchSquashCycles;
            DPRINTF(EdgeFetch, "[tid:%i]: EdgeFetch is squashing!\n", tid);
        } else if (fetchStatus[tid] == IcacheWaitResponse) {
            ++icacheStallCycles;
            DPRINTF(EdgeFetch, "[tid:%i]: EdgeFetch is waiting"
                    " cache response!\n", tid);
        }

        // Status is Idle, Squashing, Blocked, or IcacheWaitResponse, so
        // fetch should do nothing.
        return;
    }

    ++fetchCycles;

    // If we had a stall due to an icache miss, then return.
    if (fetchStatus[tid] == IcacheWaitResponse) {
        ++icacheStallCycles;
        status_change = true;
        return;
    }

    Addr next_PC = fetch_PC;
    Addr next_NPC = fetch_NPC;

    MachInst inst;

    // @todo: Fix this hack.
    unsigned offset = (fetch_PC & cacheBlkMask) & ~3;
    unsigned chunk_offset = (fetch_PC & chunkOffset) & ~3;

    EDGEStaticInstPtr staticInst = NULL;

    if (fault == NoFault) {

        DPRINTF(EdgeFetch,"tid[%i]: Adding insts from "
            "cache block to inst block.\n",tid);

        //
        // Currently maybe I should use fetchWidth 
        // as a chunk identifier
        //
        while (offset < cacheBlkSize &&
              numInst < fetchWidth &&
              chunk_offset < chunkSize ) {

            // Make sure this is a valid index.
            assert(offset <= cacheBlkSize - instSize);

            // Get the instruction from the array of the cache line.
            inst = TheISA::gtoh(*reinterpret_cast<TheISA::MachInst *>
                            (&cacheData[tid][offset]));

            //predecoder.setTC(cpu->thread[tid]->getTC());
            //predecoder.moreBytes(fetch_PC, fetch_PC, inst);

            //ext_inst = predecoder.getExtMachInst();

            if (edgeFetchStatus[tid] == Head) {

                constructHeader(inst, chunk_offset, tid);

                replaceBits(inst, 31, 28, 0);

                int header_status = TheISA::checkHeader(inst);

                if (header_status == TheISA::HeaderNop) {
                    DPRINTF(EdgeFetch, "Nop in header encountered. "
                        "Never convert it into a real inst class.\n");

                } else if (header_status == TheISA::ReadAndWriteValid) {
                    // Generate register read.
                    generateInst(tid,
                                 inst,
                                 fetch_PC, 
                                 instID, 
                                 chunkID,
                                 TheISA::HeaderRead);
                    // Generate register write.
                    generateInst(tid,
                                 inst, 
                                 fetch_PC + (instSize>>1), // Inst pc plus the half of the inst code size 
                                 instID, 
                                 chunkID,
                                 TheISA::HeaderWrite);
                } else if (header_status == TheISA::ReadValid) {
                    // Generate register read.
                    generateInst(tid,
                                 inst, 
                                 fetch_PC, 
                                 instID, 
                                 chunkID,
                                 TheISA::HeaderRead);
                } else if (header_status == TheISA::WriteValid) {
                    // Generate register write.
                    generateInst(tid,
                                 inst, 
                                 fetch_PC, 
                                 instID, 
                                 chunkID,
                                 TheISA::HeaderWrite);
                } else {
                    panic("Unrecoganized header status.\n");
                }

            } else if (edgeFetchStatus[tid] == Normal) {

                bool not_nop = TheISA::checkBody(inst);

                if (not_nop) {
                    generateInst(tid,
                                 inst, 
                                 fetch_PC, 
                                 instID - 32, // Exclude header 
                                 chunkID,
                                 TheISA::Normal);
                }

             } else { // Unrecogonized status

                panic("Unknown edgeFetchStatus");

            }

            // No branch operations inside blocks, update PC directly.
            // next_PC is actually current fetch PC until this updating.
            next_PC  = next_NPC;
            next_NPC = next_NPC + instSize;

            // Move to the next instruction.
            fetch_PC = next_PC;
            fetch_NPC = next_NPC;

            ++numInst;
            offset += instSize;
            chunk_offset += instSize;
            ++instID;
        }// End of the fetch loop

        DPRINTF(EdgeFetch,"chunk_offset = %d, chunkSize = %d,"
                " instNum = %d.\n",
                chunk_offset, 
                chunkSize,
                curEdgeBlockPtr->getBlockSize());

        // Build the block. 
        if (chunk_offset >= chunkSize) { 

            // Head, then depack it.
            if (edgeFetchStatus[tid] == Head) { 

                DPRINTF(EdgeFetch, "EDGE status change from"
                        " Head to Normal\n");

                edgeFetchStatus[tid] = Normal;

                assert(((fetch_PC - instSize) & TheISA::ChunkMask) ==
                        curEdgeBlockPtr->getStartPC());

                curEdgeBlockPtr->setTid(tid);

                if (!curEdgeBlockPtr->depackHeader(headerInfo)) {

                    DPRINTF(EdgeFetch, "Not header of a inst block,"
                            " go to idle status until commit stage"
                            " tell us to squash.\n");

                    fetchStatus[tid] = Idle;
                    status_change = true;
                    curEdgeBlockPtr->removeAllInsts();

                    fetchMisAlignedTimes ++;
                }

                clearHeaderInfo();
                chunkID++;

                fetchedChunks++;

            } else if (edgeFetchStatus[tid] == Normal) { 

                DPRINTF(EdgeFetch,"chunkID = %d, chunkNum =%d.\n",
                        chunkID, 
                        curEdgeBlockPtr->getChunkNum());

                chunkID++;

                fetchedChunks++;

                if (chunkID > (curEdgeBlockPtr->getChunkNum())) {

                    DPRINTF(EdgeFetch, "EDGE status change from Normal to Head. ");

                    Addr pred_NPC = curEdgeBlockPtr->getStartPC();
                    Addr pre_exe_npc = curEdgeBlockPtr->getStartPC();

                    // If expected to execute the block in fetch,
                    // we execute it.
                    // And we can only execute this block if this
                    // block is in the correct path.
                    if (preExecuteMode) {
                        if (curEdgeBlockPtr->isInCorrectPath()) {
                            status_change = preExecute(pre_exe_npc, tid);
                        }
                    }

                    // If we have a non-perfect branch predictor, then
                    // we predict this block here.
                    if (!isPerfectBPred) {

                        BlockID oldest_blockID = cpu->readHeadInstBlockID(tid);

                        TheISA::ExitType exit_type = TheISA::InvalidExitType;
                        ExitID pred_exitID = branchPred.predict(oldest_blockID,
                                                                curEdgeBlockPtr,
                                                                pred_NPC,
                                                                exit_type,
                                                                // This address space id
                                                                0,
                                                                tid);

                        curEdgeBlockPtr->setPredInfo(pred_exitID,
                                                     exit_type,
                                                     pred_NPC);

                        DPRINTF(EdgeFetch, "Predicted exit[%i], predicted target"
                                " @%#x.\n",
                                pred_exitID, 
                                pred_NPC);
                    }

                    bool is_next_block_in_correct_path = true;

                    if (preExecuteMode) {

                        if (curEdgeBlockPtr->isInCorrectPath()) {

                            if (!isPerfectBPred) {
                                if (status_change) {

                                    // If status changed after this
                                    // block has been executed, it means
                                    // this block will get a fault or a
                                    // syscall, so the next block will
                                    // definitely be in the wrong path.
                                    is_next_block_in_correct_path =false;

                                } else {
                                    // If this is a correct-path block, then
                                    // it should have been executed before. As
                                    // a result, we should compare the next
                                    // block address of the pre-executed
                                    // result and the predicted result to
                                    // determine if the next block will be in
                                    // the correct path.
                                    if (curEdgeBlockPtr->getPredBranchTarget()
                                        != cpu->preExecuteCPU->getBranchTarget()) {

                                        is_next_block_in_correct_path = false;
                                    }
                                }
                            } else {
                                pred_NPC = pre_exe_npc;
                            }
                        } else {
                            // If we have a perfect branch predictor,
                            // we should never reach this point.
                            assert(!isPerfectBPred);

                            // If this block is in wrong path, then
                            // the next block will be in the wrong
                            // path as well.
                            is_next_block_in_correct_path = false;
                        }
                    }

                    // Update next_PC and next_NPC
                    next_PC = pred_NPC;
                    next_NPC = next_PC + instSize;

                    edgeFetchStatus[tid] = Head;

                    fetchedBlocks++;
                    updateInstBlockStats(curEdgeBlockPtr);
                    
                    curEdgeBlockPtr->setBlockListIt(cpu->addInstBlock(curEdgeBlockPtr));
                    curEdgeBlockPtr->setFrameID(getCurrFrameID());

                    toMap->instBlocks[toMap->size] = curEdgeBlockPtr;
   
                    toMap->size++;

                    currInflightInstBlocks++;

                    if (currInflightInstBlocks >= Impl::MaxInFlightBlockNum) {
                        DPRINTF(EdgeFetch, "Hit max frame limitation,"
                                " stall fetch next cycle.\n");
                        stallStage(tid);
                    }

                    assert(toMap->size <= Impl::MaxFetchWidth);

                    DPRINTF(EdgeFetch, "[tid:%i]: Adding "
                            "inst block[Bid:%lli][Fid:%i]@%#x to queue to "
                            "map. Refcount = %i\n",tid,
                            curEdgeBlockPtr->getBlockID(),
                            curEdgeBlockPtr->getFrameID(),
                            curEdgeBlockPtr->getStartPC(),
                            curEdgeBlockPtr->getCount());

                    chunkID = 0;
                    instID = 0;
                    blockID++;

                    curEdgeBlockPtr = new Block(blockID, cpu);

                    curEdgeBlockPtr->setStartPC(next_PC);

//                    curEdgeBlockPtr->setBlockListIt(cpu->addInstBlock(curEdgeBlockPtr));

                    if (!is_next_block_in_correct_path) {
                        curEdgeBlockPtr->clearInCorrectPath();
                    }

                    DPRINTF(EdgeFetch, "Block[Bid:%lli] got RefCount = %i\n",
                            curEdgeBlockPtr->getBlockID(),
                            curEdgeBlockPtr->getCount());
                }
            }
        }

        if (chunk_offset >= chunkSize ) {
            DPRINTF(EdgeFetch, "[tid:%i]: Done Running,"
                    " reached inst chunk end \n", tid);
        } else if (numInst >= fetchWidth) {
            DPRINTF(EdgeFetch, "[tid:%i]: Done Running,"
                    " reached fetch bandwidth "
                    "for this cycle.\n", tid);
        } else if (offset >= cacheBlkSize) {
            DPRINTF(EdgeFetch, "[tid:%i]: Done Running,"
                    " reached the end of cache "
                    "block.\n", tid);
        }
    }

    if (numInst > 0) {
        wroteToTimeBuffer = true;
    }

    // Now that Running is completed, update the PC to signify what the next
    // cycle will be.
    if (fault == NoFault) {
        if (fetchStatus[tid] != Idle) {

            PC[tid] = next_PC;
            nextPC[tid] = next_NPC;

            DPRINTF(EdgeFetch, "[tid:%i]: Setting PC to %08p.\n", 
                    tid, next_PC);

            // If we are RUNNING and fetch stage will not be stalled
            // next cycle, we can fetch!
            //
            if (fetchStatus[tid] == Running &&
               !stalls[tid].fetch) {

                bool fetch_success = fetchCacheLine(next_PC, fault, tid);

                if (!fetch_success) {
                    if (cacheBlocked) {
                        ++icacheStallCycles;
                    } else {
                        ++fetchMiscStallCycles;
                    }
                }
            }
        }
    }

    if (fault != NoFault) {
        
        DPRINTF(EdgeFetch, "[tid:%i]: Fault detected to %#x.\n", 
                tid, next_PC);

        edgeFetchStatus[tid] = Head;
        clearHeaderInfo();

        curEdgeBlockPtr->setTid(tid);
        //Clear insts in block and make it a nop inst block
        curEdgeBlockPtr->removeAllInsts();
        curEdgeBlockPtr->setNop();

        fetchedNopBlocks++;

        //Set fault to block so it can be handled when commit
        curEdgeBlockPtr->setFault(fault);

        curEdgeBlockPtr->setBlockListIt(cpu->addInstBlock(curEdgeBlockPtr));
        curEdgeBlockPtr->setFrameID(getCurrFrameID());

        // If we have a non-perfect branch predictor, then
        // we predict this block here.
        // Actually, this prediction here is useless. I'm just wanna
        // keep the predictor tracking the status.
        if (!isPerfectBPred) {

            Addr pred_NPC = curEdgeBlockPtr->getStartPC();
            BlockID oldest_blockID = cpu->readHeadInstBlockID(tid);

            TheISA::ExitType exit_type = TheISA::InvalidExitType;
            ExitID pred_exitID = branchPred.predict(oldest_blockID,
                                                    curEdgeBlockPtr,
                                                    pred_NPC,
                                                    exit_type,
                                                    // This address space id
                                                    0,
                                                    tid);

            curEdgeBlockPtr->setPredInfo(pred_exitID,
                                         exit_type,
                                         pred_NPC);

            DPRINTF(EdgeFetch, "Predicted exit[%i], predicted target"
                    " @%#x.\n",
                    pred_exitID, 
                    pred_NPC);
        }

        toMap->instBlocks[toMap->size] = curEdgeBlockPtr;
        toMap->size++;

        currInflightInstBlocks++;

        if (currInflightInstBlocks >= Impl::MaxInFlightBlockNum) {
            DPRINTF(EdgeFetch, "Hit max frame limitation,"
                    " stall fetch next cycle.\n");
            stallStage(tid);
        }

        assert(toMap->size <= Impl::MaxFetchWidth);

        DPRINTF(EdgeFetch, "Inst block[Bid:%lli][Fid:%i] send to map with"
                " fault directly. Refcount = %i\n",
                curEdgeBlockPtr->getBlockID(),
                curEdgeBlockPtr->getFrameID(),
                curEdgeBlockPtr->getCount());

//        Addr cur_block_pc = curEdgeBlockPtr->getStartPC();

        chunkID = 0;
        instID = 0;
        blockID++;

//        curEdgeBlockPtr = new Block(blockID, cpu);

//        curEdgeBlockPtr->setStartPC(cur_block_pc);

        //fetchStatus[tid] = TrapPending;
        fetchStatus[tid] = Idle;
        status_change = true;
    }
}


template<class Impl>
bool
SimpleEdgeFetch<Impl>::preExecute(Addr &pred_NPC, ThreadID tid)
{
    assert(curEdgeBlockPtr->isInCorrectPath());

    bool status_change = false;

    Fault fault = cpu->preExecuteCPU->execute(pred_NPC);

    if (fault == NoFault) {

        // If we have a perfect branch predictor, then set the
        // branch information here.
        if (isPerfectBPred) {

            pred_NPC = cpu->preExecuteCPU->getBranchTarget();

            ExitID pred_exit_ID = cpu->preExecuteCPU->getExitID();
            ExitType pred_exit_type = cpu->preExecuteCPU->getExitType();

            curEdgeBlockPtr->setPredInfo(pred_exit_ID,
                                         pred_exit_type,
                                         pred_NPC);

            DPRINTF(EdgeFetch, "This block get exit[%i], target"
                    " @%#x. Type:%i\n",
                    pred_exit_ID, 
                    pred_NPC,
                    pred_exit_type);
        }

        if (isPerfectBTypePred || isPerfectSeqentialPred || isTargetOnly
                || isPerfectBTypeOnlyPred) {

            // Both exit id and exit type are expected to be ideal.
            ExitID pred_exit_ID = cpu->preExecuteCPU->getExitID();
            ExitType pred_exit_type = cpu->preExecuteCPU->getExitType();
            std::vector<bool> test_inst_path =
                cpu->preExecuteCPU->getTestInstPath();

            curEdgeBlockPtr->setPredExitType(pred_exit_type);
            curEdgeBlockPtr->setPredExitID(pred_exit_ID);
            curEdgeBlockPtr->setTestInstPath(test_inst_path);

            DPRINTF(EdgeFetch, "This block get exit ID: %i, exit type:%i,"
                    " test inst path: %#x, size: %i\n",
                    pred_exit_ID, pred_exit_type,
                    curEdgeBlockPtr->getTestInstPath(),
                    test_inst_path.size());
        }

        // Set the ldstEntry status for the execute stage if we have a
        // perfect memory dependence predictor.
        if (isPerfectMemDep) {

            for (int idx = 0; idx < TheISA::MaxLdstsInBlock; idx++) {

                Addr eff_addr = 0;
                int acc_size = 0;
                int inst_id = -1;
                bool nullified = false;
                bool fault_flag = false;

                bool entry_valid =
                    cpu->preExecuteCPU->getLsqEntry(idx,
                                                    eff_addr,
                                                    acc_size,
                                                    inst_id,
                                                    nullified,
                                                    fault_flag);
                if (entry_valid) {
                    curEdgeBlockPtr->setLdstEntry(idx,
                                                  eff_addr,
                                                  acc_size,
                                                  inst_id,
                                                  nullified,
                                                  fault_flag);

                    DPRINTF(EdgeFetch, "LdstEntry[%i] get eff addr:%#x,"
                            " acc size:%i inst id:%i and nullified:%i.\n",
                            idx,
                            eff_addr, 
                            acc_size,
                            inst_id,
                            nullified ? 1:0);
                }
            }
        }

        // If we are in perfect predication mode, we need to copy the
        // predication status given by pre-execute cpu.
        if (isPerfectPredication) {
            std::bitset<TheISA::MaxInstsInBlock> pred_status =
                cpu->preExecuteCPU->getPredicationStatus();

            curEdgeBlockPtr->setPredicationStatus(pred_status);

            DPRINTF(EdgeFetch, "Set predication status of this block"
                    " to %s\n", pred_status.to_string());
        }

        bool is_syscall = cpu->preExecuteCPU->isNeedSyscall();

        // If this is a syscall block, all the
        // blocks following it will be squashed in
        // execute stage, so we should stop the
        // pipeline to avoid executing blocks in
        // the wrong path. However, as we actually
        // know exactly the branch target of this
        // block, so we still set the branch
        // information in this block to say that we
        // really have a perfect predictor.
        if (is_syscall) {

            fetchStatus[tid] = Idle;
            status_change = true;

            DPRINTF(EdgeFetch, "This block[Bid:%lli] @%#x is"
                    " expected to generate a syscall in"
                    " execute stage, so stop the"
                    " pipeline.\n",
                    curEdgeBlockPtr->getBlockID(),
                    curEdgeBlockPtr->getStartPC());
        }
    } else {

        fetchStatus[tid] = Idle;
        status_change = true;

        DPRINTF(EdgeFetch, "This block[Bid:%lli] @%#x is"
                " expected to generate a fault/syscall in"
                " execute stage, so leave the predict"
                " status to be blank.\n",
                curEdgeBlockPtr->getBlockID(),
                curEdgeBlockPtr->getStartPC());
    }

    return status_change;
}

template<class Impl>
void
SimpleEdgeFetch<Impl>::recvRetry()
{
    if (retryPkt != NULL) {
        assert(cacheBlocked);
        assert(retryTid != InvalidThreadID);
        assert(fetchStatus[retryTid] == IcacheWaitRetry);

        if (icachePort->sendTiming(retryPkt)) {
            fetchStatus[retryTid] = IcacheWaitResponse;
            retryPkt = NULL;
            retryTid = InvalidThreadID;
            cacheBlocked = false;
        }
    } else {
        assert(retryTid == InvalidThreadID);
        // Access has been squashed since it was sent out.  Just clear
        // the cache being blocked.
        cacheBlocked = false;
    }
}

template<class Impl>
int
SimpleEdgeFetch<Impl>::getCurrFrameID()
{
    if ((++currFrameID) >= Impl::MaxInFlightBlockNum) {
        currFrameID = 0;
    }

    return currFrameID;
}

template<class Impl>
void
SimpleEdgeFetch<Impl>::decCurrFrameID()
{
    if ((--currFrameID) < 0){
        currFrameID = Impl::MaxInFlightBlockNum- 1;
    }
}

///////////////////////////////////////
//                                   //
//  SMT FETCH POLICY MAINTAINED HERE //
//                                   //
///////////////////////////////////////
template<class Impl>
ThreadID
SimpleEdgeFetch<Impl>::getFetchingThread(FetchPriority &fetch_priority)
{
    if (numThreads > 1) {
        switch (fetch_priority) {

          case SingleThread:
            return 0;

          case RoundRobin:
            return roundRobin();

          case IQ:
            return iqCount();

          case LSQ:
            return lsqCount();

          case Branch:
            return branchCount();

          default:
            return InvalidThreadID;
        }
    } else {
        list<ThreadID>::iterator thread = activeThreads->begin();
        if (thread == activeThreads->end()) {
            return InvalidThreadID;
        }

        ThreadID tid = *thread;

        if (fetchStatus[tid] == Idle) {
            ++fetchIdleCycles;
        }

        if (fetchStatus[tid] == Running ||
            fetchStatus[tid] == IcacheAccessComplete
            /*fetchStatus[tid] == Idle*/) {
            return tid;
        } else {
            return InvalidThreadID;
        }
    }
}

template<class Impl>
ThreadID
SimpleEdgeFetch<Impl>::roundRobin()
{
    list<ThreadID>::iterator pri_iter = priorityList.begin();
    list<ThreadID>::iterator end      = priorityList.end();

    ThreadID high_pri;

    while (pri_iter != end) {
        high_pri = *pri_iter;

        assert(high_pri <= numThreads);

        if (fetchStatus[high_pri] == Running ||
            fetchStatus[high_pri] == IcacheAccessComplete ||
            fetchStatus[high_pri] == Idle) {

            priorityList.erase(pri_iter);
            priorityList.push_back(high_pri);

            return high_pri;
        }

        pri_iter++;
    }

    return InvalidThreadID;
}

template<class Impl>
ThreadID
SimpleEdgeFetch<Impl>::iqCount()
{
    std::priority_queue<ThreadID> PQ;

    list<ThreadID>::iterator threads = activeThreads->begin();
    list<ThreadID>::iterator end = activeThreads->end();

    while (threads != end) {
        ThreadID tid = *threads++;

        PQ.push(fromExecute->executeInfo[tid].iqCount);
    }

    while (!PQ.empty()) {
        ThreadID high_pri = PQ.top();

        if (fetchStatus[high_pri] == Running ||
            fetchStatus[high_pri] == IcacheAccessComplete ||
            fetchStatus[high_pri] == Idle)
            return high_pri;
        else
            PQ.pop();

    }

    return InvalidThreadID;
}

template<class Impl>
ThreadID
SimpleEdgeFetch<Impl>::lsqCount()
{
    std::priority_queue<ThreadID> PQ;

    list<ThreadID>::iterator threads = activeThreads->begin();
    list<ThreadID>::iterator end = activeThreads->end();

    while (threads != end) {
        ThreadID tid = *threads++;

        PQ.push(fromExecute->executeInfo[tid].ldstqCount);
    }

    while (!PQ.empty()) {
        ThreadID high_pri = PQ.top();

        if (fetchStatus[high_pri] == Running ||
            fetchStatus[high_pri] == IcacheAccessComplete ||
            fetchStatus[high_pri] == Idle)
            return high_pri;
        else
            PQ.pop();
    }

    return InvalidThreadID;
}

template<class Impl>
ThreadID
SimpleEdgeFetch<Impl>::branchCount()
{
    panic("Branch Count Fetch policy unimplemented\n");
    return InvalidThreadID;
}

template<class Impl>
void
SimpleEdgeFetch<Impl>::updateHeadStats(DynInstPtr &inst)
{
    assert(inst->getBlockID() == curEdgeBlockPtr->getBlockID());
    assert(inst->isRead() || inst->isWrite());
    assert(!inst->isNop());

    fetchedInsts++;

    if (inst->isWrite()) {
        fetchedWrites++;
        curEdgeBlockPtr->numWrites++;
    } else if (inst->isRead()) {
        fetchedReads++;
        curEdgeBlockPtr->numReads++;
    } else {
        panic("Unrecogonized head inst type.\n");
    }
}

template<class Impl>
void
SimpleEdgeFetch<Impl>::updateInstStats(DynInstPtr &inst)
{
    assert(inst->getBlockID() == curEdgeBlockPtr->getBlockID());
    assert(!inst->isRead() && !inst->isWrite());
    assert(!inst->isNop());
    
    fetchedInsts++;

    curEdgeBlockPtr->numUsefulInsts++;
    
    if (inst->isMemRef()) {
    
        fetchedMemRefs++;
        curEdgeBlockPtr->numMemRefs++;

        if (inst->isStore()) {
            fetchedStores++;
            curEdgeBlockPtr->numStores++;
        } else {
            fetchedLoads++;
            curEdgeBlockPtr->numLoads++;
        }

    } else if (inst->isControl()) {
        fetchedControls++;
        curEdgeBlockPtr->numControlInsts++;
        
        if (inst->isDirectCtrl()) {
            fetchedDirectControls++;
            curEdgeBlockPtr->numDirectControls++;
        } else if(inst->isIndirectCtrl()) {
            fetchedIndirectControls++;
            curEdgeBlockPtr->numIndirectControls++;
        }
    } else {
        if (inst->isNullify()) {
            fetchedNullifies++;
            curEdgeBlockPtr->numNullifies++;
        } else if(inst->isMove()) {
            fetchedMovInsts++;
            curEdgeBlockPtr->numMoves++;
        } else if (inst->isTest()) {
            fetchedTestInsts++;
            curEdgeBlockPtr->numTestInsts++;
        } else if (inst->isConst()) {
            fetchedConstInsts++;
            curEdgeBlockPtr->numConstInsts++;
        }
    }

    if (inst->staticInst->getPredication()
        == TheISA::PredUponFalse) {
        
        fetchedPredOnFalses++;
        curEdgeBlockPtr->numPredOnFalses++;

    } else if (inst->staticInst->getPredication()
        == TheISA::PredUponTrue) {
        
        fetchedPredOnTrues++;
        curEdgeBlockPtr->numPredOnTrues++;
    }

    fetchedNumConsumers.sample(inst->getNumConsumers());
    fetchedNumOperands.sample(inst->getNumOperands());
}

template<class Impl>
void
SimpleEdgeFetch<Impl>::updateInstBlockStats(BlockPtr &inst_block)
{
    int block_size_f = inst_block->numUsefulInsts;
    int block_size_c = inst_block->getChunkNum();

    fetchedInstBlockSize.sample(block_size_f);
    fetchedInstBlockSizeInChunks.sample(block_size_c);

    double move_ratio = 
        ((double)inst_block->numMoves) / block_size_f;
    double test_ratio = 
        ((double)inst_block->numTestInsts) / block_size_f;
    double const_ratio = 
        ((double)inst_block->numConstInsts) / block_size_f;
    double nullify_ratio = 
        ((double)inst_block->numNullifies) / block_size_f;
    double control_ratio = 
        ((double)inst_block->numControlInsts) / block_size_f;
    double direct_control_ratio = 
        ((double)inst_block->numDirectControls) / block_size_f;
    double indirect_control_ratio = 
        ((double)inst_block->numIndirectControls) / block_size_f;
    double write_ratio = 
        ((double)inst_block->numWrites) / (double)TheISA::HeaderSize;
    double read_ratio = 
        ((double)inst_block->numReads) / (double)TheISA::HeaderSize;
    double mem_ref_ratio = 
        ((double)inst_block->numMemRefs) / block_size_f;
    double store_ratio = 
        ((double)inst_block->numStores) / block_size_f;
    double load_ratio = 
        ((double)inst_block->numLoads) / block_size_f;
    double pred_on_false_ratio = 
        ((double)inst_block->numPredOnFalses) / block_size_f;
    double pred_on_true_ratio = 
        ((double)inst_block->numPredOnTrues) / block_size_f;
    
    fetchedMovInstsRatio.sample(move_ratio);
    fetchedTestInstsRatio.sample(test_ratio);
    fetchedConstInstsRatio.sample(const_ratio);
    fetchedNullifiesRatio.sample(nullify_ratio);
    fetchedControlsRatio.sample(control_ratio);
    fetchedDirectControlsRatio.sample(direct_control_ratio);
    fetchedIndirectControlsRatio.sample(indirect_control_ratio);
    fetchedWritesRatio.sample(write_ratio);
    fetchedReadsRatio.sample(read_ratio);
    fetchedMemRefsRatio.sample(mem_ref_ratio);
    fetchedStoresRatio.sample(store_ratio);
    fetchedLoadsRatio.sample(load_ratio);
    fetchedPredOnFalsesRatio.sample(pred_on_false_ratio);
    fetchedPredOnTruesRatio.sample(pred_on_true_ratio);
    
}

