/*
 * Copyright (c) 2009-2010 Microelectronic Center,
 * Harbin Institute of Technology
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are
 * met: redistributions of source code must retain the above copyright
 * notice, this list of conditions and the following disclaimer;
 * redistributions in binary form must reproduce the above copyright
 * notice, this list of conditions and the following disclaimer in the
 * documentation and/or other materials provided with the distribution;
 * neither the name of the copyright holders nor the names of its
 * contributors may be used to endorse or promote products derived from
 * this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 * Authors: Gou Pengfei
 */

#ifndef __CPU_EDGE_ATOMIC_HH__
#define __CPU_EDGE_ATOMIC_HH__

#include "cpu/edge/atomic/base.hh"
#include "params/AtomicEdgeCPU.hh"
#include "cpu/edge/edge_sim_point.hh"

class AtomicEdgeCPUImpl;
template<class Impl> class BaseAtomicEdgeCPU;

class AtomicEdgeCPU : public BaseAtomicEdgeCPU<AtomicEdgeCPUImpl>
{
  public:

    typedef BaseAtomicEdgeCPU<AtomicEdgeCPUImpl>::DynInst DynInst;
    typedef BaseAtomicEdgeCPU<AtomicEdgeCPUImpl>::DynInstPtr DynInstPtr;
    typedef BaseAtomicEdgeCPU<AtomicEdgeCPUImpl>::EdgeBlock EdgeBlock;
    typedef BaseAtomicEdgeCPU<AtomicEdgeCPUImpl>::EdgeBlockPtr EdgeBlockPtr;
    typedef BaseAtomicEdgeCPU<AtomicEdgeCPUImpl>::ImplState ImplState;
    typedef TheISA::ConsumerBitfield ConsumerBitfield;

    typedef TheISA::ExtMachInst ExtMachInst;
    typedef TheISA::BlockID BlockID;

    AtomicEdgeCPU(AtomicEdgeCPUParams *params);
    virtual ~AtomicEdgeCPU();

    virtual void init();

    /** Execute an instruction block of the given address. This is
     * used by a detailed model to execute an instruction block in the
     * early stage of its pipeline. */
    Fault execute(Addr block_addr);

    /** Get the branch target of the current block.*/
    Addr getBranchTarget();

    /** Get the branch exit ID of the current block.*/
    TheISA::ExitID getExitID();

    /** Get the branch exit type of the current block. */
    TheISA::ExitType getExitType();

    /** Get the system call status of the current block.*/
    bool isNeedSyscall();

    /** 
     * Get the status of a lsqEntry, including the effective address,
     * access size and the instruction id of the executed instruction
     * to this lsqEntry. This function will take three references to
     * deliver the result and will return a bool value to indicate is
     * these values are valid or not.
     * */
    bool getLsqEntry(TheISA::LsID lsid, Addr &eff_addr, int &acc_size,
            int &inst_id, bool &nullified, bool &fault);

#ifndef NDEBUG

    struct registerIdxComp
    {
        bool operator() (const DynInstPtr &lhs, const DynInstPtr &rhs) const;
    };

    struct effAddrComp
    {
        bool operator() (const DynInstPtr &lhs, const DynInstPtr &rhs) const;
    };

#endif

  private:

    /** SimPoint class for building checkpoints for EDGE. */
    EdgeSimPoint<AtomicEdgeCPUImpl> *simPoint;

    /** Indicate if simpoint mode is enable. */
    bool simPointSim;

    /**
     * Indicate if this model works in preExecute mode which means it
     * will be used to execute a block in the fetch stage of a
     * detailed model.
     * */
    bool preExecuteMode;

    struct TickEvent : public Event
    {
        AtomicEdgeCPU *cpu;

        TickEvent(AtomicEdgeCPU *c);
        void process();
        const char *description() const;
    };

    TickEvent tickEvent;

    const int width;
    bool locked;
    const bool simulate_data_stalls;
    const bool simulate_inst_stalls;

    /** main simulation loop (one cycle) */
    void tick();

    // Although this is an atomic model,
    // I introduce three functions to 
    // make the whole process clear enough.

    /** Fetch instructions and form inst blocks. */
    void fetch(Tick & icache_latency);
    
    /** Map instructions of the current instruction block. */
    void map(DynInstPtr &inst);
   
    /** Execute inst blocks. */
    void execute();
    
    /** Commit inst blocks to machine state. */
    void commit();

    /** Build edge inst block according to block size. */
    void preExecute(int block_size);
    
    void postExecute();

    /** Fetch an instruction chunk with respect to cache block size. */
    Fault fetchChunk(Addr start_addr, int buffer_idx, Tick &icache_latency);

    /** Form a dynamic instruction class and add it into the curEdgeBlock. */
    void generateInst(TheISA::MachInst &inst_code, Addr pc, 
        int inst_id, TheISA::BlockStatus status);

    class CpuPort : public Port
    {
      public:

        CpuPort(const std::string &_name, AtomicEdgeCPU *_cpu)
            : Port(_name, _cpu), cpu(_cpu)
        { }

        bool snoopRangeSent;

      protected:

        AtomicEdgeCPU *cpu;

        virtual bool recvTiming(PacketPtr pkt);

        virtual Tick recvAtomic(PacketPtr pkt);

        virtual void recvFunctional(PacketPtr pkt);

        virtual void recvStatusChange(Status status);

        virtual void recvRetry();

        virtual void getDeviceAddressRanges(AddrRangeList &resp,
            bool &snoop)
        { resp.clear(); snoop = true; }

    };
    CpuPort icachePort;

    /** Block size of icache of cpu. */
    int iCacheBlockSize;

    class DcachePort : public CpuPort
    {
      public:
        DcachePort(const std::string &_name, AtomicEdgeCPU *_cpu)
            : CpuPort(_name, _cpu)
        { }

        virtual void setPeer(Port *port);
    };
    DcachePort dcachePort;

    /** Cache block size of this cpu. */
    int dCacheBlockSize;

    CpuPort physmemPort;
    bool hasPhysMemPort;
    Request ifetch_req;
    Request data_read_req;
    Request data_write_req;

    bool dcache_access;
    Tick dcache_latency;

    Range<Addr> physMemAddr;

  public:
    virtual void regStats();

    virtual Port *getPort(const std::string &if_name, int idx = -1);

    virtual void serialize(std::ostream &os);
    virtual void unserialize(Checkpoint *cp, const std::string &section);
    virtual void resume();

    void switchOut();
    void takeOverFrom(BaseCPU *oldCPU);

    virtual void activateContext(int thread_num, int delay);
    virtual void suspendContext(int thread_num);

    template <class T>
    Fault read(RequestPtr &req, T &data, int lq_idx);

    template <class T>
    Fault write(RequestPtr &req, T data, int sq_idx);

    /**
     * Print state of address in memory system via PrintReq (for
     * debugging).
     */
    void printAddr(Addr a);

    /** Simulation statistics. */
    Stats::Scalar atomicExecutedInsts;
    Stats::Scalar atomicExecutedNormals;
    Stats::Scalar atomicExecutedNullifiedNormals;
    Stats::Scalar atomicExecutedLoads;
    Stats::Scalar atomicExecutedNullifiedLoads;
    Stats::Scalar atomicExecutedStores;
    Stats::Scalar atomicExecutedNullifiedStores;
    Stats::Scalar atomicExecutedControls;
    Stats::Scalar atomicExecutedNullifiedControls;
    Stats::Scalar atomicExecutedReads;
    Stats::Scalar atomicExecutedWrites;
    Stats::Scalar atomicExecutedNullifiedWrites;

    Stats::Scalar atomicCommitCommittedBlocks;
    Stats::Scalar atomicCommitCommittedInsts;
};

#endif // __CPU_EDGE_ATOMIC_HH__
