/*
 * Copyright (c) 2009-2010 Microelectronic Center,
 * Harbin Institute of Technology
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are
 * met: redistributions of source code must retain the above copyright
 * notice, this list of conditions and the following disclaimer;
 * redistributions in binary form must reproduce the above copyright
 * notice, this list of conditions and the following disclaimer in the
 * documentation and/or other materials provided with the distribution;
 * neither the name of the copyright holders nor the names of its
 * contributors may be used to endorse or promote products derived from
 * this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 * Authors: Gou Pengfei
 */

#include "arch/utility.hh"
#include "base/bigint.hh"
#include "config/the_isa.hh"
#include "cpu/edge/exetrace.hh"
#include "cpu/edge/atomic/impl.hh"
#include "cpu/edge/atomic/atomic.hh"
#include "cpu/edge/atomic/atomic_dyn_inst.hh"
#include "cpu/edge/atomic/atomic_block.hh"
#include "mem/packet.hh"
#include "mem/packet_access.hh"
#include "params/AtomicEdgeCPU.hh"
#include "sim/system.hh"
#include "sim/sim_events.hh"

using namespace std;
using namespace TheISA;

#ifndef NDEBUG
bool
AtomicEdgeCPU::registerIdxComp::operator() (const DynInstPtr &lhs,
        const DynInstPtr &rhs) const
{
    return (lhs->getMappedDestReg(0) < rhs->getMappedDestReg(0));
}

bool
AtomicEdgeCPU::effAddrComp::operator() (const DynInstPtr &lhs,
        const DynInstPtr &rhs) const
{
    return (lhs->effAddr < rhs->effAddr);
}
#endif

AtomicEdgeCPU::TickEvent::TickEvent(AtomicEdgeCPU *c)
    : Event(CPU_Tick_Pri), cpu(c)
{
}

void
AtomicEdgeCPU::TickEvent::process()
{
    cpu->tick();
}

const char *
AtomicEdgeCPU::TickEvent::description() const
{
    return "AtomicEdgeCPU tick";
}

Port *
AtomicEdgeCPU::getPort(const string &if_name, int idx)
{
    if (if_name == "dcache_port")
        return &dcachePort;
    else if (if_name == "icache_port")
        return &icachePort;
    else if (if_name == "physmem_port") {
        hasPhysMemPort = true;
        return &physmemPort;
    }
    else
        panic("No Such Port\n");
}
void
AtomicEdgeCPU::init()
{
    BaseCPU::init();
#if FULL_SYSTEM
    ThreadID size = threadContexts.size();
    for (ThreadID i = 0; i < size; ++i) {
        ThreadContext *tc = threadContexts[i];

        // initialize CPU, including PC
        TheISA::initCPU(tc, tc->contextId());
    }
#endif
    if (hasPhysMemPort) {
        bool snoop = false;
        AddrRangeList pmAddrList;
        physmemPort.getPeerAddressRanges(pmAddrList, snoop);
        physMemAddr = *pmAddrList.begin();
    }
    // Atomic doesn't do MT right now, so contextId == threadId
    ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
    data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
    data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too

    dCacheBlockSize = dcachePort.peerBlockSize();
    iCacheBlockSize = icachePort.peerBlockSize();
}

bool
AtomicEdgeCPU::CpuPort::recvTiming(PacketPtr pkt)
{
    panic("AtomicEdgeCPU doesn't expect recvTiming callback!");
    return true;
}

Tick
AtomicEdgeCPU::CpuPort::recvAtomic(PacketPtr pkt)
{
    //Snooping a coherence request, just return
    return 0;
}
void
AtomicEdgeCPU::CpuPort::recvFunctional(PacketPtr pkt)
{
    //No internal storage to update, just return
    return;
}

void
AtomicEdgeCPU::CpuPort::recvStatusChange(Status status)
{
    if (status == RangeChange) {
        if (!snoopRangeSent) {
            snoopRangeSent = true;
            sendStatusChange(Port::RangeChange);
        }
        return;
    }

    panic("AtomicEdgeCPU doesn't expect recvStatusChange callback!");
}

void
AtomicEdgeCPU::CpuPort::recvRetry()
{
    panic("AtomicEdgeCPU doesn't expect recvRetry callback!");
}

void
AtomicEdgeCPU::DcachePort::setPeer(Port *port)
{
    Port::setPeer(port);

#if FULL_SYSTEM
    // Update the ThreadContext's memory ports (Functional/Virtual
    // Ports)
    cpu->tcBase()->connectMemPorts(cpu->tcBase());
#endif
}

AtomicEdgeCPU::AtomicEdgeCPU(AtomicEdgeCPUParams *p)
    : BaseAtomicEdgeCPU<AtomicEdgeCPUImpl>(p), 
      simPointSim(p->simPointSim),
      preExecuteMode(p->preExecuteMode),
      tickEvent(this), width(p->width), locked(false),
      simulate_data_stalls(p->simulate_data_stalls),
      simulate_inst_stalls(p->simulate_inst_stalls),
      icachePort(name() + "-iport", this),
      dcachePort(name() + "-iport", this),
      physmemPort(name() + "-iport", this),
      hasPhysMemPort(false)
{
    if (simPointSim) {
        // We should guarantee that we get a valid simpoint.
        simPoint = new
            EdgeSimPoint<AtomicEdgeCPUImpl>(p->simPointStartFunctionName,
                                            p->simPointEndFunctionName,
                                            p->simPointCallNum,
                                            p->simPointRetNum,
                                            p->simPointMode);
        assert(simPoint->isValid());
    } else {
        simPoint = NULL;
    }

    _status = Idle;

    icachePort.snoopRangeSent = false;
    dcachePort.snoopRangeSent = false;

}

AtomicEdgeCPU::~AtomicEdgeCPU()
{
    if (tickEvent.scheduled()) {
        deschedule(tickEvent);
    }

    if (simPointSim) {
        assert(simPoint);
        delete simPoint;
    }
}

void
AtomicEdgeCPU::serialize(ostream &os)
{
    SimObject::State so_state = SimObject::getState();
    SERIALIZE_ENUM(so_state);
    SERIALIZE_SCALAR(locked);
    BaseAtomicEdgeCPU<AtomicEdgeCPUImpl>::serialize(os);
    if (simPointSim) {
        simPoint->serialize(os);
    }
    nameOut(os, csprintf("%s.tickEvent", name()));
    tickEvent.serialize(os);
}

void
AtomicEdgeCPU::unserialize(Checkpoint *cp, const string &section)
{
    SimObject::State so_state;
    UNSERIALIZE_ENUM(so_state);
    UNSERIALIZE_SCALAR(locked);
    BaseAtomicEdgeCPU<AtomicEdgeCPUImpl>::unserialize(cp, section);

    if (simPointSim) {
        simPoint->unserialize(cp, csprintf("%s.xc.0", section));
    }

    tickEvent.unserialize(cp, csprintf("%s.tickEvent", section));
}

void 
AtomicEdgeCPU::regStats()
{
    BaseAtomicEdgeCPU<AtomicEdgeCPUImpl>::regStats();

    atomicExecutedInsts
        .name(name() + ".EXEC:atomicExecutedInsts")
        .desc("Number of instructions executed.")
        .prereq(atomicExecutedInsts);
        
    atomicExecutedNormals
        .name(name() + ".EXEC:atomicExecutedNormals")
        .desc("Number of normal instructions executed.")
        .prereq(atomicExecutedNormals);
        
    atomicExecutedNullifiedNormals
        .name(name() + ".EXEC:atomicExecutedNullifiedNormals")
        .desc("Number of normal instructions nullified.")
        .prereq(atomicExecutedNullifiedNormals);
        
    atomicExecutedLoads
        .name(name() + ".EXEC:atomicExecutedLoads")
        .desc("Number of loads executed.")
        .prereq(atomicExecutedLoads);    
        
    atomicExecutedNullifiedLoads
        .name(name() + ".EXEC:atomicExecutedNullifiedLoads")
        .desc("Number of loads nullified.")
        .prereq(atomicExecutedNullifiedLoads);
        
    atomicExecutedStores
        .name(name() + ".EXEC:atomicExecutedStores")
        .desc("Number of stores executed.")
        .prereq(atomicExecutedStores);
        
    atomicExecutedNullifiedStores
        .name(name() + ".EXEC:atomicExecutedNullifiedStores")
        .desc("Number of stores nullified.")
        .prereq(atomicExecutedNullifiedStores);    
        
    atomicExecutedControls
        .name(name() + ".EXEC:atomicExecutedControls")
        .desc("Number of controls executed.")
        .prereq(atomicExecutedControls);    
        
    atomicExecutedNullifiedControls
        .name(name() + ".EXEC:atomicExecutedNullifiedControls")
        .desc("Number of controls nullified.")
        .prereq(atomicExecutedNullifiedControls);    
        
    atomicExecutedReads
        .name(name() + ".EXEC:atomicExecutedReads")
        .desc("Number of reads executed.")
        .prereq(atomicExecutedReads);    
        
    atomicExecutedWrites
        .name(name() + ".EXEC:atomicExecutedWrites")
        .desc("Number of writes executed.")
        .prereq(atomicExecutedNullifiedNormals);
        
    atomicExecutedNullifiedWrites
        .name(name() + ".EXEC:atomicExecutedNullifiedWrites")
        .desc("Number of writes nullified.")
        .prereq(atomicExecutedNullifiedWrites);

    atomicCommitCommittedBlocks
        .name(name() + ".commit.COM:count")
        .desc("Number of committed instruction blocks.")
        .prereq(atomicCommitCommittedBlocks);

    atomicCommitCommittedInsts
        .name(name() + ".commit.COM:insts")
        .desc("Number of committed instructions.")
        .prereq(atomicCommitCommittedInsts);
}

void
AtomicEdgeCPU::resume()
{
    if (_status == Idle || _status == SwitchedOut)
        return;

    DPRINTF(AtomicEdgeCPU, "Resume\n");
    assert(system->getMemoryMode() == Enums::atomic);

    changeState(SimObject::Running);
    if (thread->status() == ThreadContext::Active) {
        if (!tickEvent.scheduled())
            schedule(tickEvent, nextCycle());
    }
}

void
AtomicEdgeCPU::switchOut()
{
    assert(_status == Running || _status == Idle);
    _status = SwitchedOut;

    tickEvent.squash();
}


void
AtomicEdgeCPU::takeOverFrom(BaseCPU *oldCPU)
{
    BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);

    assert(!tickEvent.scheduled());

    // if any of this CPU's ThreadContexts are active, mark the CPU as
    // running and schedule its tick event.
    ThreadID size = threadContexts.size();
    for (ThreadID i = 0; i < size; ++i) {
        ThreadContext *tc = threadContexts[i];
        if (tc->status() == ThreadContext::Active && _status != Running) {
            _status = Running;
            schedule(tickEvent, nextCycle());
            break;
        }
    }
    if (_status != Running) {
        _status = Idle;
    }
    assert(threadContexts.size() == 1);
    ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
    data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
    data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too
}


void
AtomicEdgeCPU::activateContext(int thread_num, int delay)
{
    DPRINTF(AtomicEdgeCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);

    assert(thread_num == 0);
    assert(thread);

    assert(_status == Idle);
    assert(!tickEvent.scheduled());

    notIdleFraction++;
    numCycles += tickToCycles(thread->lastActivate - thread->lastSuspend);

    if (!preExecuteMode) {
        //Make sure ticks are still on multiples of cycles
        schedule(tickEvent, nextCycle(curTick + ticks(delay)));
    }

    _status = Running;
}


void
AtomicEdgeCPU::suspendContext(int thread_num)
{
    DPRINTF(AtomicEdgeCPU, "SuspendContext %d\n", thread_num);

    assert(thread_num == 0);
    assert(thread);

    if (_status == Idle)
        return;

    assert(_status == Running);

    // tick event may not be scheduled if this gets called from inside
    // an instruction's execution, e.g. "quiesce"
    if (tickEvent.scheduled())
        deschedule(tickEvent);

    notIdleFraction--;
    _status = Idle;
}


template <class T>
Fault
AtomicEdgeCPU::read(RequestPtr &req, T &data, int lq_idx)
{
    assert(lq_idx >= 0);

    int load_iq_id = ldstQueue[lq_idx].instQueueIdx;
    assert(ldstQueue[lq_idx].valid &&
        (load_iq_id >= 0 && load_iq_id < instQueue.size()));
    
    DynInstPtr load_inst = instQueue[load_iq_id];

    assert(load_inst->effAddrValid);

    Addr addr = load_inst->effAddr;
    
    //The block size of our peer.
    //unsigned dCacheBlockSize = dcachePort.peerBlockSize();
    //The size of the data we're trying to read.
    int data_size = sizeof(T);

    uint8_t * dataPtr = (uint8_t *)&data;

    //The address of the second part of this access if it needs to be split
    //across a cache line boundary.
    Addr secondAddr = roundDown(addr + data_size - 1, dCacheBlockSize);

    if (secondAddr > addr)
        data_size = secondAddr - addr;

    //dcache_latency = 0;

    // Note: There is no fault will be generated in the 
    // following codes. I reserve the fault code segment 
    // for future use.
    Fault fault = NoFault;

    // Access the memory
    while(1) {

        // Now do the access.
        if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
            Packet pkt = Packet(req,
                    req->isLLSC() ? MemCmd::LoadLockedReq : MemCmd::ReadReq,
                    Packet::Broadcast);
            pkt.dataStatic(dataPtr);
            
            if (hasPhysMemPort && pkt.getAddr() == physMemAddr)
                dcache_latency += physmemPort.sendAtomic(&pkt);
            else
                dcache_latency += dcachePort.sendAtomic(&pkt);
                    
            dcache_access = true;

            assert(!pkt.isError());

        }

        // This will need a new way to tell if it has a dcache attached.
        if (req->isUncacheable())
            recordEvent("Uncached Read");

        //If we don't need to access a second cache line, stop now.
        if (secondAddr <= addr)
        {
            break;
        }

        /*
         * Set up for accessing the second cache line.
         */

        //Move the pointer we're reading into to the correct location.
        dataPtr += data_size;
        //Adjust the size to get the remaining bytes.
        data_size = addr + sizeof(T) - secondAddr;
        //And access the right address.
        addr = secondAddr;
    }

    if (fault != NoFault) {
        return fault;
    }

    int store_index_limit = lq_idx;

    // No store is added prior this load,
    // thereby no forward will happen.
    // This is because load/store is added 
    // according to the LSID order.
    if (store_index_limit <= 0) {
        data = gtoh(data);
        return fault;
    }
    
    //Check whether data can be forwarded from store for each byte
    for (int offset = 0; offset < data_size; ++offset) {

        int idx = store_index_limit;
        // Index to the first prior store entry.
        idx--;
        
        while (idx >= 0) {

            assert(ldstQueue[idx].valid && 
                (!ldstQueue[idx].store || ldstQueue[idx].executed));

            if (!ldstQueue[idx].store || ldstQueue[idx].nullified) {
            
                // Multiple stores might have the same LSID in TRIPS and
                // they may have been nullified or not at this point.
                // Thereby we should rule out all these stores.
                DPRINTF(AtomicEdgeCPU, "Loads or nullfied store is not"
                        " able to forward. Skip it.\n");
                idx--;
                continue;
            }

            int store_iq_id = ldstQueue[idx].instQueueIdx;
            assert(store_iq_id >= 0 && store_iq_id < instQueue.size());
            
            DynInstPtr store_inst = instQueue[store_iq_id];

            DPRINTF(AtomicEdgeCPU, "Checking store[LSID:%i] to"
                " see whether it can forward to this load[LSID:%i] or not.\n",
                store_inst->staticInst->getLSID(),
                load_inst->staticInst->getLSID());

            // The store is supposed to be executed,
            // because all loads wait until stores that
            // have smaller LSID executed.
            // Further more, a store with fault is not 
            // allowed to appear, because once a store
            // generate a fault, the instruction block
            // will skip executing immediately.
            assert(!store_inst->fault);
            assert(store_inst->effAddrValid &&
                store_inst->eaCalcDone &&
                store_inst->memData);
            
            // Figure out the forwarding boudaries.
            bool store_has_lower_limit =
                (req->getVaddr() + offset) >=
                store_inst->effAddr;
                
            bool store_has_upper_limit =
                (req->getVaddr() + offset) <
                (store_inst->effAddr + store_inst->memAccSize);

            if(store_has_lower_limit&&store_has_upper_limit) {
                //This Byte can be forwarded.
                //Set the forward flag and data of this byte.
                int position = req ->getVaddr()
                               + offset - store_inst ->effAddr;
                uint8_t forward_data = store_inst->memData[position];
                //load_inst ->setForward(offset , data);
                dataPtr[offset] = forward_data;

                DPRINTF(AtomicEdgeCPU, "Data[%i] of "
                        "load[Bid:%lli][Iid:%lli] is "
                        "forwarded from data %i of "
                        "store[Bid:%lli][Iid:%lli]. "
                        "Forwarded data is %#x\n",
                        offset,
                        load_inst->getBlockID(),
                        load_inst->getInstID(),
                        position,
                        store_inst->getBlockID(),
                        store_inst->getInstID(),
                        forward_data);
                break;
            }
            idx--;
        }
    }

    data = gtoh(data);

    return fault;
}

#ifndef DOXYGEN_SHOULD_SKIP_THIS


template
Fault
AtomicEdgeCPU::read(RequestPtr &req, Twin32_t &data, int lq_idx);


template
Fault
AtomicEdgeCPU::read(RequestPtr &req, Twin64_t &data, int lq_idx);


template
Fault
AtomicEdgeCPU::read(RequestPtr &req, uint64_t &data, int lq_idx);


template
Fault
AtomicEdgeCPU::read(RequestPtr &req, uint32_t &data, int lq_idx);


template
Fault
AtomicEdgeCPU::read(RequestPtr &req, uint16_t &data, int lq_idx);


template
Fault
AtomicEdgeCPU::read(RequestPtr &req, uint8_t &data, int lq_idx);

#endif //DOXYGEN_SHOULD_SKIP_THIS


template<>
Fault
AtomicEdgeCPU::read(RequestPtr &req, double &data, int lq_idx)
{
    return read(req, *(uint64_t*)&data, lq_idx);
}


template<>
Fault
AtomicEdgeCPU::read(RequestPtr &req, float &data, int lq_idx)
{
    return read(req, *(uint32_t*)&data, lq_idx);
}


template<>
Fault
AtomicEdgeCPU::read(RequestPtr &req, int32_t &data, int lq_idx)
{
    return read(req, (uint32_t&)data, lq_idx);
}


template <class T>
Fault
AtomicEdgeCPU::write(RequestPtr &req, T data, int sq_idx)
{
    assert(sq_idx >= 0);
    
    //The block size of our peer.
    //unsigned dCacheBlockSize = dcachePort.peerBlockSize();
    //The size of the data we're trying to read.
    int data_size = sizeof(T);

    uint8_t * dataPtr = (uint8_t *)&data;

    Addr addr = req->getVaddr();

    //The address of the second part of this access if it needs to be split
    //across a cache line boundary.
    Addr secondAddr = roundDown(addr + data_size - 1, dCacheBlockSize);

    if(secondAddr > addr)
        data_size = secondAddr - addr;

    //dcache_latency = 0;
    
    // Note: There is no fault will be generated in the 
    // following codes. I reserve the fault code segment 
    // for future use.
    Fault fault = NoFault;
    
    while(1) {
        // Now do the access.
        if (fault == NoFault) {
            MemCmd cmd = MemCmd::WriteReq; // default
            bool do_access = true;  // flag to suppress cache access

            if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) {
                Packet pkt = Packet(req, cmd, Packet::Broadcast);
                pkt.dataStatic(dataPtr);

                //XXX This needs to be outside of the loop in order to
                //work properly for cache line boundary crossing
                //accesses in transendian simulations.
                //data = htog(data);
                if (hasPhysMemPort && pkt.getAddr() == physMemAddr)
                    dcache_latency += physmemPort.sendAtomic(&pkt);
                else
                    dcache_latency += dcachePort.sendAtomic(&pkt);
                    
                dcache_access = true;
                assert(!pkt.isError());

            }

        }

        // This will need a new way to tell if it's hooked up to a cache or not.
        if (req->isUncacheable())
            recordEvent("Uncached Write");

        //If there's a fault or we don't need to access a second cache line,
        //stop now.
        if (secondAddr <= addr)
        {
            return fault;
        }

        /*
         * Set up for accessing the second cache line.
         */

        //Move the pointer we're reading into to the correct location.
        dataPtr += data_size;
        //Adjust the size to get the remaining bytes.
        data_size = addr + sizeof(T) - secondAddr;
        //And access the right address.
        addr = secondAddr;
    }
}


#ifndef DOXYGEN_SHOULD_SKIP_THIS


template
Fault
AtomicEdgeCPU::write(RequestPtr &req, Twin32_t data, int sq_idx);
                       

template
Fault
AtomicEdgeCPU::write(RequestPtr &req, Twin64_t data, int sq_idx);


template
Fault
AtomicEdgeCPU::write(RequestPtr &req, uint64_t data, int sq_idx);


template
Fault
AtomicEdgeCPU::write(RequestPtr &req, uint32_t data, int sq_idx);


template
Fault
AtomicEdgeCPU::write(RequestPtr &req, uint16_t data, int sq_idx);


template
Fault
AtomicEdgeCPU::write(RequestPtr &req, uint8_t data, int sq_idx);

#endif //DOXYGEN_SHOULD_SKIP_THIS


template<>
Fault
AtomicEdgeCPU::write(RequestPtr &req, double data, int sq_idx)
{
    return write(req, *(uint64_t*)&data, sq_idx);
}


template<>
Fault
AtomicEdgeCPU::write(RequestPtr &req, float data, int sq_idx)
{
    return write(req, *(uint32_t*)&data, sq_idx);
}


template<>
Fault
AtomicEdgeCPU::write(RequestPtr &req, int32_t data, int sq_idx)
{
    return write(req, (uint32_t)data, sq_idx);
}

Fault
AtomicEdgeCPU::fetchChunk(Addr start_addr, int buffer_idx, Tick &icache_latency)
{
    // Address should be aligned to chunk size.
    assert((start_addr & TheISA::ChunkOffset) == 0);

    DPRINTF(AtomicEdgeCPU, "Fetch chunk@%#x "
        "for instruction block[Bid:%lli].\n", 
        start_addr, 
        curEdgeBlockPtr->getBlockID());

    Fault fault = NoFault;

    int access_size = TheISA::ChunkSize;
    int addr = start_addr;
    
    Addr second_addr = 
        roundDown(addr + TheISA::ChunkSize - 1, iCacheBlockSize);

    if (second_addr > addr)
        access_size = second_addr - addr;
        
    while(1) {
        int internal_idx = (addr - start_addr) / instSize;
        // We will get a chunk per fetch
        ifetch_req.setVirt(0, addr, access_size, 
            Request::INST_FETCH, addr);
        
        fault = thread->itb->translateAtomic(&ifetch_req, tc,
                                             BaseTLB::Execute);
                                             
        if (fault != NoFault) {
            DPRINTF(AtomicEdgeCPU, "Fetch fault!\n");
            return fault;
        }
            
        Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq,
                                   Packet::Broadcast);
                                   
        // Fetch to the head part.
        ifetch_pkt.dataStatic(rawInstBuff[buffer_idx]+internal_idx);

        if (hasPhysMemPort && ifetch_pkt.getAddr() == physMemAddr)
            icache_latency += physmemPort.sendAtomic(&ifetch_pkt);
        else
            icache_latency += icachePort.sendAtomic(&ifetch_pkt);

        if (second_addr <= addr) {
            return fault;
        }

        access_size = start_addr + TheISA::ChunkSize - second_addr;
        addr = second_addr;
    }
}

void
AtomicEdgeCPU::fetch(Tick &icache_latency)
{
    DPRINTF(AtomicEdgeCPU, "\nFetch!\n");
    
    // The fetch part of this tick.
    Fault fault = NoFault;
    
    // Fetch the header
    //setupFetchRequest(&ifetch_req);
    Addr block_pc = thread->getBlockPC();

    curEdgeBlockPtr->setStartPC(block_pc);

    // Fetch the header.
    fault = fetchChunk(block_pc, 0, icache_latency);
                                         
    if (fault == NoFault) {
        
        // The gtoh operatino will be taken by constructHeader func
        // but it will not change the content of the inst buffer.
        // As a result, gtoh operation must be taken again 
         // before converting inst codes to StaticInst class.
        constructHeader(rawInstBuff[0]);

        // Head fetched, continued to the body.
        Addr chunk_pc = block_pc + TheISA::ChunkSize;
        
        for (int chunk_num = 0;chunk_num < curEdgeBlockPtr->getChunkNum();
            chunk_num++) {

            fault = fetchChunk(chunk_pc, chunk_num + 1, icache_latency);

            if (fault != NoFault) {
                curEdgeBlockPtr->setFault(fault);
                return;
            }  

            chunk_pc += TheISA::ChunkSize;
        }
    } else {
        curEdgeBlockPtr->setFault(fault);
        return;
    }

    return;
}

void 
AtomicEdgeCPU::generateInst(TheISA::MachInst &inst_code, Addr pc, 
    int inst_id, TheISA::BlockStatus status)
{
    EDGEStaticInstPtr staticInst = 
        EDGEStaticInstPtr(inst_code,pc,status);

    globalInstSeq++;

    DynInstPtr instruction = new DynInst(staticInst,
                                         pc,pc + instSize,
                                         pc,pc + instSize,
                                         globalInstSeq,this,
                                         status);
                                                                                                 
    // A fake chunk id ...
    instruction->setEDGEInstStatus(inst_id,0,globalBlockID);

    //curEdgeBlockPtr->addInst(instruction);
    instruction->setThreadState(thread);
    instruction->setBlockPtr(curEdgeBlockPtr);
    
    if (instruction->isRead()) {
        assert(status == TheISA::HeaderRead);
        assert(inst_id < readQueueSize);
        assert(!readQueue[inst_id]);
        
        readQueue[inst_id] = instruction;

        readyReads.push_back(inst_id);
        
    } else if(instruction->isWrite()) {
        assert(status == TheISA::HeaderWrite);
        assert(inst_id < writeQueueSize);
        assert(!writeQueue[inst_id]);
        
        writeQueue[inst_id] = instruction;
        
    } else {
        assert(status == TheISA::Normal);
        assert(inst_id < instQueueSize);
        assert(!instQueue[inst_id]);

        instQueue[inst_id] = instruction;

        if (instruction->getNumOperands() == 0 &&
            (!instruction->isNop()) &&
            (instruction->staticInst->getPredication() == TheISA::Disable ||
            instruction->staticInst->getPredication() == TheISA::Reserved)) {
                            
            readyList.push_back(inst_id);
        }

        if (instruction->isMemRef()) {
        
            TheISA::LsID inst_lsid = instruction->staticInst->getLSID();
            
            ldstQueue[inst_lsid].valid = true;
            ldstQueue[inst_lsid].instQueueIdx = inst_id;
            
            if (instruction->isStore()) {
                ldstQueue[inst_lsid].store = true;
            }
        }

        if (instruction->staticInst->isTest()) {
            int8_t test_path_id = testInstPath.size();
            instruction->setTestInstPathID(test_path_id);
            testInstPath.push_back(false);
        }
    }

    updateInstStats(instruction);
    map(instruction);

#if TRACING_ON
    instruction->traceData =
         getEdgeTracer()->getEdgeInstRecord(curTick, tc,
                 instruction->staticInst, instruction->readPC());
#else
    instruction->traceData = NULL;
#endif
}

void
AtomicEdgeCPU::preExecute(int chunk_num)
{
    assert(globalBlockID == curEdgeBlockPtr->getBlockID());
    assert(chunk_num > 0);

    DPRINTF(AtomicEdgeCPU, "\nPreExecute!\n");

    // This is the base addr of the whole 
    // inst block. Each instruction in 
    // this block will get an address
    // according to the base addr and
    // the offset of this inst itself.
    Addr inst_block_pc = thread->getBlockPC();

    // check for instruction-count-based events
    comInstEventQueue[0]->serviceEvents(numInst);

    // Decode raw bits in rawInstBuff to get
    // a real inst block. Start from the header.
    
    fetchedChunks++;
    
    TheISA::InstID header_id = 0;
    for (int num_inst = 0;num_inst < TheISA::ChunkSizeInWords;num_inst++) {
         // This is just a transform from guest ending 
        // to host ending.
        rawInstBuff[0][num_inst] = 
            gtoh(rawInstBuff[0][num_inst]);

        Addr inst_pc = inst_block_pc + (num_inst * instSize);
        
        // Replace the head nibble to 0 to avoid false decoding from
        // some resemble insts to nop.
        replaceBits(rawInstBuff[0][num_inst], 31, 28, 0);

        int header_status = TheISA::checkHeader(rawInstBuff[0][num_inst]);

        if (header_status == TheISA::HeaderNop) {
            //DPRINTF(AtomicEdgeCPU, "Nop in header encountered. "
                //"Never convert it into a real inst class.\n");

            header_id++;
            continue;
        } else if (header_status == TheISA::ReadAndWriteValid) {
            // Generate register read.
            generateInst(rawInstBuff[0][num_inst], 
                         inst_pc, 
                         header_id, 
                         TheISA::HeaderRead);
            // Generate register write.
            generateInst(rawInstBuff[0][num_inst], 
                         inst_pc + (instSize>>1), // Inst pc plus the half of the inst code size 
                         header_id, 
                         TheISA::HeaderWrite);
        } else if (header_status == TheISA::ReadValid) {
            // Generate register read.
            generateInst(rawInstBuff[0][num_inst], 
                         inst_pc, 
                         header_id, 
                         TheISA::HeaderRead);
        } else if (header_status == TheISA::WriteValid) {
            // Generate register write.
            generateInst(rawInstBuff[0][num_inst], 
                         inst_pc, 
                         header_id, 
                         TheISA::HeaderWrite);
        } else {
            panic("Unrecoganized header status.\n");
        }

        header_id++;
    }
    
    TheISA::InstID inst_id = 0;    
    
    // Then the body. Start from chunk id 1 because 
    // chunk id 0 belongs to header.
    for (int num_chunk = 1; num_chunk < chunk_num + 1; num_chunk++) {

        fetchedChunks++;
    
        Addr chunk_pc = 
            inst_block_pc + (num_chunk * TheISA::ChunkSize);
            
        for (int num_inst = 0;num_inst < TheISA::ChunkSizeInWords;num_inst++) {
            // This is just a transform from guest ending 
            // to host ending.
            rawInstBuff[num_chunk][num_inst] = 
                gtoh(rawInstBuff[num_chunk][num_inst]);
        
            Addr inst_pc = 
                chunk_pc + (num_inst * instSize);

            bool not_nop = TheISA::checkBody(rawInstBuff[num_chunk][num_inst]);

            if (not_nop) {
                generateInst(rawInstBuff[num_chunk][num_inst], 
                             inst_pc, 
                             inst_id, 
                             TheISA::Normal);
            }
            
            inst_id++;
        }
    }

    DPRINTF(EdgeBlock, "This block [PC:%#x] has %i TEST insts\n",
            inst_block_pc, testInstPath.size());

    fetchedBlocks++;

    updateInstBlockStats(curEdgeBlockPtr);
}

void
AtomicEdgeCPU::map(DynInstPtr &inst)
{
    assert(curEdgeBlockPtr);
    assert(!inst->isNop());

    if (inst->isWrite()) {
        for (int i = 0; i < inst->numDestRegs(); i ++) {

            PhysRegIndex mapped_idx =
                inst->getInstID() % 4 + inst->destRegIdx(i) * 4;

            inst->mapDestReg(i, mapped_idx);

            DPRINTF(EdgeBlock, "Mapping dest reg from %i to %i\n",
                inst->destRegIdx(i), mapped_idx );
        }

        return;
    }

    // Mapping reg idx of reg-write and reg-read insts
    // Mapping strategy comes from trips doc.
    // InstID indicates the position of reg queue.
    if (inst->isRead()) {
        for (int i = 0; i < inst->numSrcRegs(); i ++) {

            PhysRegIndex mapped_idx =
                inst->getInstID() % 4 + inst->srcRegIdx(i) * 4;

            inst->mapSrcReg(i, mapped_idx);

            DPRINTF(EdgeBlock, "Mapping src reg from %i to %i\n",
                inst->srcRegIdx(i), mapped_idx );
        }
    }
    
    DPRINTF(EdgeBlock, "Mapping consumers for inst[Iid:%lli]"
            " in inst block[Bid:%lli].\n",
            inst->getInstID(), 
            inst->getBlockID());
        
    for (int idx = 0; idx < inst->getNumConsumers(); idx++) {

        ConsumerBitfield consumer_raw_bit = inst->getRawConsumerID(idx);

        inst->setConsumerType(idx, consumer_raw_bit.type);

        if (consumer_raw_bit.type == TheISA::WriteSlotOrNoTarget) {

            if (consumer_raw_bit.subtype == TheISA::WriteSlot) {

                inst->setConsumerSubType( idx, TheISA::WriteSlot);
                inst->setConsumerID ( idx, consumer_raw_bit.write_id );
                
            } else if (consumer_raw_bit.subtype == TheISA::NoTarget) {

                inst->setConsumerSubType( idx, TheISA::NoTarget );
                inst->setConsumerID ( idx, 0 );

            } else {
                panic("Unrecogonized consumer subtype\n");
            }

        } else {
            inst->setConsumerID ( idx, consumer_raw_bit.id );
        }

        DPRINTF(EdgeBlock, "Consumer[idx:%i][Type:%i][ID:%i]\n", 
                idx,
                consumer_raw_bit.type,
                consumer_raw_bit.id);
    }

    return;
}

void
AtomicEdgeCPU::execute()
{
    assert(curEdgeBlockPtr);

    DPRINTF(AtomicEdgeCPU, "\nExecute!\n");

    if (curEdgeBlockPtr->getStoreMask() != 0) {
        // Initialize the memory blocker LSID
        TheISA::LsID blocker_lsid = curEdgeBlockPtr->getMemBlockerLSID();

        DPRINTF(AtomicEdgeCPU, "Initialize memory blocker to [LSID:%i].\n",
                blocker_lsid);
        memBlockerLSID = blocker_lsid;
    }

    assert(readyReads.size() == curEdgeBlockPtr->numReads);
    
    Fault fault = NoFault;
    
    // Execute register reads at first.
    for (int i = 0; i < readyReads.size(); i++) {
    
        int rq_id = readyReads[i];
        assert(rq_id >= 0 && rq_id < readQueue.size());
        
        DynInstPtr reg_read = readQueue[rq_id];
        
        // Reg read will never be nullified, no need to handle it.
        DPRINTF(AtomicEdgeCPU, "----------------"
                "Executing reg-read[Rid:%i].----------------\n", 
                reg_read->getInstID() );
        
        fault = reg_read->execute();
       
        // Mark reg read as executed.
        //reg_read->setReadRegExecuted();
        reg_read->setExecuted();

        atomicExecutedReads++;

        if (fault != NoFault) {
           DPRINTF(AtomicEdgeCPU, "Reg-read generate a fault.!\n");
           reg_read->setDataflowTokenType(TheISA::Exception);
           reg_read->setIntResult(0);
        }

        wakeDependences(reg_read);
    }

    readyReads.clear();
    
    dcache_latency = 0;

    int passes = 0;
    
    while (passes < maxPass) {
    
        passes++;
        
        // Execute
        DPRINTF(AtomicEdgeCPU, "%s insts are ready.\n",
            readyList.size());
            
        for (int i = 0; i < readyList.size(); i++) {

            int iq_id = readyList[i];
            assert(iq_id >= 0 && iq_id < instQueue.size());

            // Get instructions from IQ to execute.
            DynInstPtr inst = instQueue[iq_id];

            // Can never execute nop inst.
            assert(!inst->isNop());

            DPRINTF(AtomicEdgeCPU, "----------------"
                    "Execute: Processing PC %#x, [Tid:%i][Bid:%i] [Iid:%i]."
                    "----------------\n",
                    inst->readPC(),
                    inst->threadNumber,
                    inst->getBlockID(),
                    inst->getInstID());

            // Insts are from inst blocks, so maybe they can never
            // be set as squashed..
            // But I still reserve this codes for consistency with
            // M5 and future use.
            if (inst->isSquashed()) {
                DPRINTF(AtomicEdgeCPU, "Execute: Instruction was squashed.\n");
                continue;
            }

            if (inst->isBlockCompleted()) {
                DPRINTF(AtomicEdgeCPU,"Execute: Instruction was block-completed.\n");
                continue;
            }

            bool add_to_completed_list = true;
            fault = NoFault;

            // Execute instructions.
            // Note that if the instruction faults, it will be handled
            // at the commit stage.

            // Note that the data token type will be set during wakeup
            // if the inst is the propagator rather than the token
            // generator.

            if (inst->isMemRef()) {
                DPRINTF(AtomicEdgeCPU, "Execute: Mem-ref instructions,"
                        " calculating address for memory "
                        "reference.\n");

                if (inst->isLoad()) {
                    TheISA::LsID load_lsid = inst->staticInst->getLSID();
                    
                    assert(ldstQueue[load_lsid].valid && 
                           !ldstQueue[load_lsid].store &&
                           !ldstQueue[load_lsid].executed);
                    
                    // Set the lsq entry.
                    ldstQueue[load_lsid].instQueueIdx = inst->getInstID();
                    
                    // If load inst has been nullified or it
                    // is traped by the memory blocker, ignore it.
                    if (!inst->isNullified()) {
                    
                        DPRINTF(AtomicEdgeCPU,"Execute load[LSID:%i] while"
                                " memory blocker is [LSID:%i].\n", 
                                inst->staticInst->getLSID(),
                                memBlockerLSID);
                            
                        if (load_lsid < memBlockerLSID) {
                        
                            fault = inst->execute();
                            inst->setExecuted();

                            // Set flags in lsq entry.
                            ldstQueue[load_lsid].executed = true;

                            // Set these two variables so that a
                            // detailed model can take advantage of
                            // this to implement a perfect memory
                            // dependence unit.
                            ldstQueue[load_lsid].effAddr = inst->effAddr;
                            ldstQueue[load_lsid].accSize = inst->memAccSize;
     
                            atomicExecutedLoads++;       
                            
                        } else {
                            DPRINTF(AtomicEdgeCPU,"Load waits for"
                                    " memory dependence.\n");

                            replayList.push_back(inst->getInstID());

                            add_to_completed_list = false;
                        }
                        
                    } else {
                        DPRINTF(AtomicEdgeCPU,"Load is nullified.\n");
                        inst->setIntResult(0);
                        inst->setExecuted();

                        ldstQueue[load_lsid].nullified = true;
                        ldstQueue[load_lsid].executed = true;
                        
                        atomicExecutedNullifiedLoads++;
                    }

                    // If load caused a fault, we should set its output token
                    // to exception and set a fake result to wake up dependence.
                    if (fault != NoFault) {
                        DPRINTF(AtomicEdgeCPU, "Load generate a fault!\n");

                        // An execption token generated.
                        inst->setDataflowTokenType(TheISA::Exception);
                        inst->setIntResult(0);

                        // If this load generate or propagate a fault,
                        // we will set the status in the ldstEntry so
                        // that a detailed model can be notified
                        // through this. And once there is a fault,
                        // the effAddr and accSize will be set to 0,
                        // meaning that it is not a valid entry
                        // anymore.
                        ldstQueue[load_lsid].fault = true;
                        ldstQueue[load_lsid].effAddr = 0;
                        ldstQueue[load_lsid].accSize = 0;
                    } else {
                    
                        // @TODO: This may not be necessary ...
                        fault = inst->getFault();
                        
                        if (fault != NoFault) {
                            DPRINTF(AtomicEdgeCPU, "Load get a fault from its"
                                    " producer!\n");

                            // An execption token generated.
                            inst->setDataflowTokenType(TheISA::Exception);
                        }
                    }

                } else if (inst->isStore()) {
                    TheISA::LsID store_lsid = inst->staticInst->getLSID();
                    
                    assert(ldstQueue[store_lsid].valid && 
                        ldstQueue[store_lsid].store &&
                        !ldstQueue[store_lsid].executed);
                    
                    // Set the lsq entry.
                    ldstQueue[store_lsid].instQueueIdx = inst->getInstID();
                    
                    if (!inst->isNullified()) {

                        // Store will write back in commit.
                        // Here we just calculate the effective 
                        // address.
                        fault = inst->eaComp();

                        // Set these two variables so that a
                        // detailed model can take advantage of
                        // this to implement a perfect memory
                        // dependence unit.
                        ldstQueue[store_lsid].effAddr = inst->effAddr;
                        ldstQueue[store_lsid].accSize = inst->memAccSize;

                        //updateMemBlockerLSID();

                        atomicExecutedStores++;
                        
                    } else {
                        DPRINTF(AtomicEdgeCPU, "Store is nullified.\n");

                        ldstQueue[store_lsid].nullified = true;

                        atomicExecutedNullifiedStores++;
                    }
                    
                    // Mark store inst as executed.
                    // Althoug the store hasn't been executed,
                    // it has got the effective address and 
                    // the value to be written. Thereby we 
                    // consider it as executed here.
                    inst->setExecuted();

                    // Set the lsq entry
                    ldstQueue[store_lsid].executed = true;

                    // Increment executed number of stores in this block
                    curEdgeBlockPtr->incReceivedStore();

                    DPRINTF(AtomicEdgeCPU, "Set store mask with lsid %i\n", 
                            inst->staticInst->getLSID());

                    // Set store mask for block completion conditions
                    curEdgeBlockPtr->setStoreMask(inst->staticInst->getLSID());
                    
                    // Only update the blocker LSID when the current
                    // blocker is completed.
                    if (store_lsid == memBlockerLSID) {
                        updateMemBlockerLSID();
                    }

                    if (fault != NoFault) {
                        // If the instruction faulted, 
                        // then we need to set the fault
                        // to its block class so that commit can handle it.

                        // There's no need for changing the store data token type
                        // to execption because it's the leaf of whole graph.
                        DPRINTF(AtomicEdgeCPU, "Store has fault %s!"
                                " [Bid:%lli][Iid:%lli]\n",
                                fault->name(),
                                inst->getBlockID(),
                                inst->getInstID() );

                        curEdgeBlockPtr->setFault(fault);

                        // If this store generate or receive a fault,
                        // we will set the status in the ldstEntry so
                        // that a detailed model can be notified
                        // through this. And once there is a fault,
                        // the effAddr and accSize will be set to 0,
                        // meaning that it is not a valid entry
                        // anymore.
                        ldstQueue[store_lsid].fault = true;
                        ldstQueue[store_lsid].effAddr = 0;
                        ldstQueue[store_lsid].accSize = 0;

                    } else {
                        // @TODO: this may not be necessary ...
                        // Store might recevie fault from other insts.

                        fault = inst->getFault();

                        if (fault != NoFault) {

                            curEdgeBlockPtr->setFault(fault);

                            DPRINTF(AtomicEdgeCPU, "Store receive fault %s"
                                    " [Bid%lli][Iid%lli], set it to block.\n",
                                    fault->name(), 
                                    inst->getBlockID(), 
                                    inst->getInstID() );
                        }
                    }
                } else {
                    panic("Unexpected memory type!\n");
                }

            } else if (inst->isControl()) {

                if (!inst->isNullified()) {
                    // Execute pc control inst
                    fault = inst->execute();

                    atomicExecutedControls++;

                    // branch target has been set in execute of this control inst
                    curEdgeBlockPtr->incReceivedExit();
                    curEdgeBlockPtr->recordExitID(inst->staticInst->getEXIT());
                    curEdgeBlockPtr->recordExitType(inst->staticInst->isCall(),
                                                    inst->staticInst->isReturn(),
                                                    inst->staticInst->isIndirectCtrl());

                    if (inst->isSyscall()) {
                        DPRINTF(AtomicEdgeCPU, "Syscall inst, mark block"
                                " as need syscall.\n");

                        curEdgeBlockPtr->setNeedSyscall();
                    }
                    
                } else {
                    // Nullified control inst, ignore it.
                     DPRINTF(AtomicEdgeCPU, "Control inst has been nullified\n");
                     
                     inst->setIntResult(0);
                     
                     atomicExecutedNullifiedControls++;
                }

                inst->setExecuted();

                if (fault != NoFault) {
                
                    curEdgeBlockPtr->setFault(fault);

                    DPRINTF(AtomicEdgeCPU, "Control generate fault %s"
                            " [Bid%lli][Iid%lli], set it to block.\n",
                            fault->name(),
                            inst->getBlockID(),
                            inst->getInstID() );
                } else {

                    fault = inst->getFault();
                    
                    if (fault != NoFault) {

                        // Control inst receive fault from others, mark it in block
                        curEdgeBlockPtr->setFault(fault);

                        DPRINTF(AtomicEdgeCPU, "Control receive fault %s"
                                " [Bid%lli][Iid%lli], set it to block.\n",
                                fault->name(),
                                inst->getBlockID(),
                                inst->getInstID() );
                    }
                }
            } else {

                if (!inst->isNullified()) {

                    fault = inst->execute();

                    atomicExecutedNormals++;

                    if (fault != NoFault) {
                        DPRINTF(AtomicEdgeCPU, "Normal inst generate a fault!\n");
                        inst->setDataflowTokenType(TheISA::Exception);
                        inst->setIntResult(0);
                    } 

                } else {
                
                    // For consistency
                    inst->setIntResult(0);
                    DPRINTF(AtomicEdgeCPU, "Normal inst nullified\n");

                    atomicExecutedNullifiedNormals++;

                }
                
                // @todo: Is this necessary to check the fault status
                // again here?
                fault = inst->getFault();

                if (fault != NoFault) {
                    DPRINTF(AtomicEdgeCPU, "Normal inst receive a fault"
                            " from its producer!\n");

                    inst->setDataflowTokenType(TheISA::Exception);                        
                }

                inst->setExecuted();

                if (inst->isTest()) {
                    if (inst->getIntResult() > 0) {
                        int8_t test_path_id = inst->getTestInstPathID();

                        assert(test_path_id >= 0 &&
                                test_path_id < testInstPath.size());

                        testInstPath[test_path_id] = true;

                        DPRINTF(EdgeBlock, "Set pos[%i] in test path inst.\n",
                                test_path_id);
                    }
                }
            }

            if (curEdgeBlockPtr->getFault() != NoFault) {
                DPRINTF(AtomicEdgeCPU, "Fault detected in the leaf node,"
                        " skip execution.\n");
                break;
            }

            // Wakeup consumers and propagate token with respect to
            // different situations.
            if (add_to_completed_list) {
                completedList.push_back(inst->getInstID());
            }
            
            atomicExecutedInsts ++;

            countInst();
        }

        if (curEdgeBlockPtr->getFault() != NoFault) {
            DPRINTF(AtomicEdgeCPU, "Instruction block[Bid:%lli] has fault."
                " Go to commit directly!\n",
                curEdgeBlockPtr->getBlockID());
            break;
        }

        if (curEdgeBlockPtr->isCompletionCondSatisfied()) {
            DPRINTF(AtomicEdgeCPU,"Instruction block[Bid:%lli] is"
                    " ready for committing.\n",
                    curEdgeBlockPtr->getBlockID());

            curEdgeBlockPtr->setCompleted();

            break;
        }

        // Ready list should have been emptied.
        readyList.clear();

        // Wakeup consumers.
        for (int i = 0; i < completedList.size(); i++) {

            int completed_id = completedList[i];

            assert(completed_id >= 0 && completed_id < instQueue.size());

            DynInstPtr completed_inst = instQueue[completed_id];
            
            wakeDependences(completed_inst);

            TheISA::Predication inst_pred = 
                completed_inst->staticInst->getPredication();

            if (inst_pred == TheISA::PredUponTrue ||
                inst_pred == TheISA::PredUponFalse) {

                assert(!completed_inst->isGRegWR());
                assert(completed_inst->isPredMatched());

                setPredicationStatus(completed_inst->getInstID());
            }
        }

        completedList.clear();

        // Check the replay list to see whether 
        // there are loads need to be replayed.
        for (int i = 0; i < replayList.size(); i++) {

            int replay_id = replayList[i];
            
            assert(replay_id >=0 && replay_id < instQueue.size());
            
            readyList.push_back(replay_id);
        }
        replayList.clear();
    }

    DPRINTF(AtomicEdgeCPU, "Finish execution using %i passes.\n", passes);

    assert(passes < maxPass);
}

void
AtomicEdgeCPU::postExecute()
{
    DPRINTF(AtomicEdgeCPU, "\nPostExecute!\n");
    // Right now there's actually nothing to do.
    traceFunctions(thread->readPC());
}

void
AtomicEdgeCPU::commit()
{
    assert(curEdgeBlockPtr);
    // An instruction block without satisfy the completion
    // condition can reach here just because it has got a 
    // fault.
    assert(curEdgeBlockPtr->isCompletionCondSatisfied() || 
        (curEdgeBlockPtr->getFault() != NoFault));

    DPRINTF(AtomicEdgeCPU, "\nCommit!\n");

    Fault fault = curEdgeBlockPtr->getFault();

    if (fault != NoFault) {
    
        DPRINTF(AtomicEdgeCPU, "Fault %s detected for "
                "block[Bid:%lli].\n",
                fault->name(), 
                curEdgeBlockPtr->getBlockID());
            
        fault->invoke(tc);

        // If a fault is encountered, this instruction 
        // block should be replayed. So we just return
        // here without update the instruction block PC.
    } else {

        DPRINTF(AtomicEdgeCPU, "Committing instruction "
                "block[Bid:%lli].\n",
                curEdgeBlockPtr->getBlockID());

        if (simPointSim && !preExecuteMode) {
            if (simPoint->checkStartPoint(curEdgeBlockPtr)) {
                Event *limit_event =
                        new SimLoopExitEvent("simulate() limit reached", 0);
                // Assume to exit this cycle.
                schedule(limit_event, curTick + 1);
                // Stop the cpu
                _status = Idle;
                return;
            }
        }

#ifndef NDEBUG
        std::string sym_str;
        Addr sym_addr;
        debugSymbolTable->findNearestSymbol(curEdgeBlockPtr->getStartPC(),
                                            sym_str, sym_addr);

        DPRINTFR(AtomicEdgeOutput, "PC %#x\n.BLOCK %s\n", 
            curEdgeBlockPtr->getStartPC(), sym_str.c_str());

        std::vector<DynInstPtr> temp_stores;
        std::vector<DynInstPtr> temp_writes;
#endif

        bool need_syscall = curEdgeBlockPtr->isNeedSyscall();

        // Commit stores
        uint32_t store_mask = curEdgeBlockPtr->getStoreMask();
        for (int i = 0; (i < ldstQueueSize) && (store_mask > 0); i++) {
        
            if (store_mask & 0x1) {

                assert(ldstQueue[i].valid && 
                    ldstQueue[i].executed && 
                    ldstQueue[i].store);    
                    
                int store_id = ldstQueue[i].instQueueIdx;
                assert(store_id >= 0 && store_id < instQueueSize);

                if (ldstQueue[i].nullified) {
                    DPRINTF(AtomicEdgeCPU, "Nullified"
                            " store[LSID:%i].\n", 
                            i);
                    store_mask >>= 1;
                    continue;
                }

                DynInstPtr store_inst = instQueue[store_id];
                assert(store_inst->isStore() && 
                       !store_inst->isNullified());
                
                Fault fault = store_inst->execute();
                assert(fault == NoFault);

#ifndef NDEBUG
                temp_stores.push_back(store_inst);
#endif
                
            }
            store_mask >>= 1;
        }

#ifndef NDEBUG
        std::sort(temp_stores.begin(), temp_stores.end(), effAddrComp());
        for (int idx = 0; idx < temp_stores.size(); idx++) {

            DPRINTFR(AtomicEdgeOutput, "M[%#x,%i] %#x\n", 
                    temp_stores[idx]->effAddr,
                    temp_stores[idx]->memAccSize,
                    temp_stores[idx]->getIntIQOperand(1));

        }
        temp_stores.clear();
#endif 

        // Commit writes.
        for (int write_id = 0; write_id < writeQueueSize; write_id++) {
            //int write_id = readyWrites[i];
            //assert(write_id >= 0 && write_id < writeQueueSize);

            DynInstPtr write_inst = writeQueue[write_id];

            if (write_inst) {

                if (write_inst->isNullified()) {
                    DPRINTF(AtomicEdgeCPU, "Nullified write[Wid:%i].\n", 
                            write_id);
                    continue;
                }

                write_inst->execute();
                write_inst->setExecuted();

                // If this is a syscall block, record the branch target of
                // the syscall.
                if (need_syscall) {
                    if (write_inst->getMappedDestReg(0) ==
                        TheISA::ReturnAddressReg) {

                        Addr ret_addr = write_inst->getIntIQOperand(0);

                        curEdgeBlockPtr->setBranchTarget(ret_addr);
                        curEdgeBlockPtr->recordExitType(false,true,false);
                    }
                }

                // Dump exec trace
                if (write_inst->traceData) {
                    write_inst->traceData->dump();
                    delete write_inst->traceData;
                    write_inst->traceData = NULL;
                }

#ifndef NDEBUG
                temp_writes.push_back(write_inst);
#endif
            }
        }

#ifndef NDEBUG
        std::sort(temp_writes.begin(), temp_writes.end(), registerIdxComp());
        for (int idx = 0; idx < temp_writes.size(); idx++) {

            DPRINTFR(AtomicEdgeOutput, "G[%i] %#x\n", 
                     temp_writes[idx]->getMappedDestReg(0),
                     temp_writes[idx]->getIntIQOperand(0));            

        }
        temp_writes.clear();
#endif

        // Handle system call.
        if (need_syscall) {
            // This block need syscall, handle it.

            DPRINTF(AtomicEdgeCPU, "Need syscall, handle it\n");

            // Exit syscall shouldn't be invoked because I expect the
            // real processor to finish simulation.
            if (preExecuteMode && 
                tc->readIntReg(TheISA::SyscallNumReg) == 1/*exit*/) {

                DPRINTF(AtomicEdgeCPU, "The exit syscall for pre"
                        " exuecte mode! No need to invoke.\n");
            } else {
                fault = TheISA::genSyscallFault();
                fault->invoke(tc);
            }
        } 

        advancePC();

        atomicCommitCommittedBlocks++;
        atomicCommitCommittedInsts += curEdgeBlockPtr->executedInsts;

        curEdgeBlockPtr->setCommitted();
    }

    //resetQueues();
    
    readyList.clear();
    completedList.clear();
    replayList.clear();
    readyReads.clear();
    readyWrites.clear();
    
    memBlockerLSID = 0xFFFF;
}

void
AtomicEdgeCPU::tick()
{
    DPRINTF(AtomicEdgeCPU, "\n--------\n"
        "Tick\n--------\n");

    Tick latency = 0;

    for (int i = 0; i < width || locked; ++i) {

        resetQueues();

        predicationStatus.reset();

        testInstPath.clear();
        testInstPath.resize(0);
    
        numCycles++;
        
        // Increment block id.
        globalBlockID++;

        // A new block will be created.
        // This should be called in tick function
        // of atomic model in order to get a
        // new inst block each time it ticks.
        curEdgeBlockPtr = new EdgeBlock(globalBlockID,this);

        checkPcEventQueue();

        Tick icache_latency = 0;
        bool icache_access = true;
        
        fetch(icache_latency);
        
        if (curEdgeBlockPtr->getFault() == NoFault) {
            assert(curEdgeBlockPtr);
            
            // In preExecute, inst block will be built
            // and some conditions will be checked.
            preExecute(curEdgeBlockPtr->getChunkNum());

            dcache_latency = 0;
            // Execute the instruction block.
            execute();

            postExecute();

            Tick stall_ticks = 0;
            if (simulate_inst_stalls && icache_access)
                stall_ticks += icache_latency;

            if (simulate_data_stalls && dcache_access)
                stall_ticks += dcache_latency;

            if (stall_ticks) {
                Tick stall_cycles = stall_ticks / ticks(1);
                Tick aligned_stall_ticks = ticks(stall_cycles);

                if (aligned_stall_ticks < stall_ticks)
                    aligned_stall_ticks += 1;

                latency += aligned_stall_ticks;
            }

        }

        // Commit the executed instruction block
        commit();

        curEdgeBlockPtr = NULL;
    }

    // instruction takes at least one cycle
    if (latency < ticks(1))
        latency = ticks(1);

    if (_status != Idle)
        schedule(tickEvent, curTick + latency);
}

Fault
AtomicEdgeCPU::execute(Addr block_addr)
{
    DPRINTF(AtomicEdgeCPU, "\n--------\n"
        "Pre Execute Tick\n--------\n");

    assert(block_addr > 0);

    resetQueues();

    predicationStatus.reset();

    testInstPath.clear();
    testInstPath.resize(0);
    
    // Set the PC
    thread->setBlockPC(block_addr);

    numCycles++;
    
    // Increment block id.
    globalBlockID++;

    // A new block will be created.
    // This should be called in tick function
    // of atomic model in order to get a
    // new inst block each time it ticks.
    curEdgeBlockPtr = new EdgeBlock(globalBlockID,this);

    Tick icache_latency = 0;
    
    fetch(icache_latency);
    
    if (curEdgeBlockPtr->getFault() == NoFault) {
        assert(curEdgeBlockPtr);
        
        // In preExecute, inst block will be built
        // and some conditions will be checked.
        preExecute(curEdgeBlockPtr->getChunkNum());

        // Execute the instruction block.
        execute();

        postExecute();
    }

    // Commit the executed instruction block
    commit();

    return curEdgeBlockPtr->getFault();
}

Addr
AtomicEdgeCPU::getBranchTarget()
{
    assert(curEdgeBlockPtr);
    assert(curEdgeBlockPtr->isCommitted());

    return curEdgeBlockPtr->getBranchTarget();
}

TheISA::ExitID
AtomicEdgeCPU::getExitID()
{
    assert(curEdgeBlockPtr);
    assert(curEdgeBlockPtr->isCommitted());

    return curEdgeBlockPtr->getExitID();
}

TheISA::ExitType
AtomicEdgeCPU::getExitType()
{
    assert(curEdgeBlockPtr);
    assert(curEdgeBlockPtr->isCommitted());

    return curEdgeBlockPtr->getExitType();
}

bool
AtomicEdgeCPU::isNeedSyscall()
{
    assert(curEdgeBlockPtr);
    assert(curEdgeBlockPtr->isCommitted());

    return curEdgeBlockPtr->isNeedSyscall();
}

bool
AtomicEdgeCPU::getLsqEntry(TheISA::LsID lsid, Addr &eff_addr,
        int &acc_size, int &inst_id, bool &nullified, bool &fault)
{
    LsqEntry &entry = ldstQueue[lsid];

    // If this is not a valid entry, just return false.
    if (!entry.valid) {
        DPRINTF(EdgeFetch, "LdstEntry[%i] is not valid\n",
                lsid);
        return false;
    }

    // If this is a store entry, it should have been executed.
    assert((entry.store && entry.executed) ||
            !entry.store);

    if (!entry.store && !entry.executed) {
        // If this is a load entry and it hasn't been exuected, it
        // means that this load is in the false-predicated path and it
        // will never affect the output of this block.
        DPRINTF(EdgeFetch, "LdstEntry[%i] has got a load"
                " that has not been executed\n",
                lsid);
        return false;
    }

    DPRINTF(EdgeFetch, "LdstEntry[%i] is valid\n",
            lsid);

    assert(entry.instQueueIdx >= 0);
    // Ok, this is a valid entry, copy the value.
    eff_addr = entry.effAddr;
    acc_size = entry.accSize;
    inst_id = entry.instQueueIdx;
    nullified = entry.nullified;
    fault = entry.fault;

    return true;
}

void
AtomicEdgeCPU::printAddr(Addr a)
{
    dcachePort.printAddr(a);
}

////////////////////////////////////////////////////////////////////////
//
//  AtomicEdgeCPU Simulation Object
//
AtomicEdgeCPU *
AtomicEdgeCPUParams::create()
{
    numThreads = 1;

    return new AtomicEdgeCPU(this);
}
