//! @file iris_ring.cc
//! @brief This program builds a simple simulator of multicore systems. Its purpose is to
//! demonstrate how to build such a simulator using the Manifold framework.
//! The multicore system is illustrated as follows:
//!
//! @verbatim
//!      -----------       -----------     ----------------      ----------
//!     | processor |     | processor |   | mem controller |    | L2 cache |
//!      -----------       -----------     ----------------      ----------
//!           |                 |                 |                  |
//!        -------           -------              |                  |
//!       | cache |         | cache |             |                  |
//!        -------           -------              |                  |
//!           |                 |                 |                  |
//!           |                 |                 |                  |
//!  ---------------------------------------------------------------------------
//!     ------------      ------------       ------------      ------------ 
//!    | NetIntface |    | NetIntface |     | NetIntface |    | NetIntface |  
//!     ------------      ------------       ------------      ------------ 
//!  ---------------------------------------------------------------------------
//! @endverbatim
//!
//! In this program we use the torus network of Iris.
//!
//! The components used in this program are:
//! - qsimlib_core_t: a processor model of Zesto.
//!                  It is built with the QSim library and uses QSim to get instructions.
//! - CaffDRAM: a memory controller model.
//! - mcp-cache: a coherence cache model.
//! - Iris: this program uses a ring network.
//!
//! NOTE:
//! Since the processor model is built with the Qsim library, and all processors should
//! share the same Qsim OS domain object, in this program all the processors must be
//! in the same LP. In fact, we only allow this program to be run with 1 LP.
//!
//! In this program the configuration for the components is provided by a configuration
//! file which is parsed with libconfig++. An example can be found in conf4x1.cfg.
//! 
//! To run this program, type:
//! @code
//! mpirun -np 1 iris_ring <conf_file>
//! @endcode
//! where <conf_file> is the name of the configuration file.
//!
//!
//! With simple-net, the IDs of the network interface are 0 to N-1 where N is X*Y.
//! In this program the IDs of the nodes are also 0 to N-1, so the mapping from node ID
//! to network interface ID is straightforward.
//!
#include "kernel/clock.h"
#include "kernel/component.h"
#include "kernel/manifold.h"
#include "iris/genericTopology/genericTopoCreator.h"
#include "mcp-cache/MESI_L1_cache.h"
#include "mcp-cache/MESI_L2_cache.h"
#include "zesto/qsimlib-core.h"
#include "CaffDRAM/Controller.h"
#include "CaffDRAM/McMap.h"
#include "qsim.h"
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <libconfig.h++>
#include "mpi.h"


using namespace std;
using namespace manifold::kernel;
using namespace manifold::mcp_cache_namespace;
using namespace manifold::iris;
using namespace manifold::zesto;
using namespace manifold::caffdram;
using namespace libconfig;


// use a data structure to represent either a core node or a MC node or an L2 node.
enum {EMPTY_NODE=0, CORE_NODE, MC_NODE, L2_NODE};
struct Node_cid {
    int type; //either a CORE_NODE or a MC_NODE
    CompId_t proc_cid;  //could use a union here.
    CompId_t cache_cid; //used for both L1 and L2
    CompId_t mc_cid;
};




int main(int argc, char** argv)
{
    if(argc != 2) {
        cerr << "Usage: mpirun -np 1 " << argv[0] << " <config_file>" << endl;
	exit(1);
    }

    Config config;
    try {
	config.readFile(argv[1]);
    }
    catch (FileIOException e) {
        cerr << "Cannot read configuration file " << argv[1] << endl;
	exit(1);
    }
    catch (ParseException e) {
        cerr << "Cannot parse configuration file " << argv[1] << endl;
	exit(1);
    }

    int temp_x_dimension;
    int temp_y_dimension;

    try {
	temp_x_dimension = config.lookup("network.x_dimension");
	temp_y_dimension = config.lookup("network.y_dimension");
    }
    catch (SettingNotFoundException e) {
	cout << e.getPath() << " not set." << endl;
	exit(1);
    }
    catch (SettingTypeException e) {
	cout << e.getPath() << " has incorrect type." << endl;
	exit(1);
    }

    const int X_DIMENSION = temp_x_dimension;
    const int Y_DIMENSION = temp_y_dimension;

    assert(Y_DIMENSION == 1);

    assert(X_DIMENSION > 0 && Y_DIMENSION > 0);
    assert(X_DIMENSION * Y_DIMENSION > 1);

    const int MAX_NODES = X_DIMENSION * Y_DIMENSION; //max no. of nodes

    Manifold::Init(argc, argv);

    int N_LPs = 1; //number of LPs
    MPI_Comm_size(MPI_COMM_WORLD, &N_LPs);
    cout << "Number of LPs = " << N_LPs << endl;
    if(N_LPs != 1) {
        cerr << "Number of LPs must be 1 !" << endl;
	exit(1);
    }

    //==========================================================================
    // Configuration parameters
    //==========================================================================
    int FREQ;
    Ticks_t STOP;
    cache_settings l1_cache_settings;
    cache_settings l2_cache_settings;
    unsigned L1_MSHR_SIZE;
    unsigned L2_MSHR_SIZE;

    vector<int> mc_node_idx_vec;
    set<int> mc_node_idx_set; //set is used to ensure each index is unique

    vector<int> l2_node_idx_vec;
    set<int> l2_node_idx_set; //set is used to ensure each index is unique

    vector<int> proc_node_idx_vec;
    set<int> proc_node_idx_set; //set is used to ensure each index is unique

    ring_init_params ring_params;

    try {
	FREQ = config.lookup("clock_frequency");
	assert(FREQ > 0);
	STOP = config.lookup("simulation_stop");

	// No configuration for Processor

	//cache parameters
	l1_cache_settings.name = config.lookup("l1_cache.name");
	l1_cache_settings.size = config.lookup("l1_cache.size");
	l1_cache_settings.assoc = config.lookup("l1_cache.assoc");
	l1_cache_settings.block_size = config.lookup("l1_cache.block_size");
	l1_cache_settings.hit_time = config.lookup("l1_cache.hit_time");
	l1_cache_settings.lookup_time = config.lookup("l1_cache.lookup_time");
	l1_cache_settings.replacement_policy = RP_LRU;
	L1_MSHR_SIZE = config.lookup("l1_cache.mshr_size");

	l2_cache_settings.name = config.lookup("l2_cache.name");
	l2_cache_settings.size = config.lookup("l2_cache.size");
	l2_cache_settings.assoc = config.lookup("l2_cache.assoc");
	l2_cache_settings.block_size = config.lookup("l2_cache.block_size");
	l2_cache_settings.hit_time = config.lookup("l2_cache.hit_time");
	l2_cache_settings.lookup_time = config.lookup("l2_cache.lookup_time");
	l2_cache_settings.replacement_policy = RP_LRU;
	L2_MSHR_SIZE = config.lookup("l2_cache.mshr_size");


	//network parameters
	// x and y dimensions already specified
	ring_params.no_nodes = MAX_NODES;
	ring_params.no_vcs = config.lookup("network.num_vcs");
	ring_params.credits = config.lookup("network.credits");
	ring_params.link_width = config.lookup("network.link_width");
	ring_params.rc_method = RING_ROUTING;

	//processor configuration
	//the node indices of processors are in an array, each value between 0 and MAX_NODES-1
	Setting& setting_proc = config.lookup("processor.node_idx");
	int num_proc = setting_proc.getLength(); //number of processors
	assert(num_proc >=1 && num_proc < MAX_NODES);

	proc_node_idx_vec.resize(num_proc);

	for(int i=0; i<num_proc; i++) {
	    assert((int)setting_proc[i] >=0 && (int)setting_proc[i] < MAX_NODES);
	    proc_node_idx_set.insert((int)setting_proc[i]);
	    proc_node_idx_vec[i] = (int)setting_proc[i];
	}
	assert(proc_node_idx_set.size() == (unsigned)num_proc); //verify no 2 indices are the same


	//memory controller configuration
	//the node indices of MC are in an array, each value between 0 and MAX_NODES-1
	Setting& setting_mc = config.lookup("mc.node_idx");
	int num_mc = setting_mc.getLength(); //number of mem controllers
	assert(num_mc >=1 && num_mc < MAX_NODES);

	mc_node_idx_vec.resize(num_mc);

	for(int i=0; i<num_mc; i++) {
	    assert((int)setting_mc[i] >=0 && (int)setting_mc[i] < MAX_NODES);
	    mc_node_idx_set.insert((int)setting_mc[i]);
	    mc_node_idx_vec[i] = (int)setting_mc[i];
	}
	assert(mc_node_idx_set.size() == (unsigned)num_mc); //verify no 2 indices are the same

	//verify MC indices are not used by processors
	for(int i=0; i<num_mc; i++) {
	    for(int j=0; j<num_proc; j++) {
	        assert(mc_node_idx_vec[i] != proc_node_idx_vec[j]);
	    }
	}

	//L2 configuration
	//the node indices of L2 are in an array, each value between 0 and MAX_NODES-1
	Setting& setting_l2 = config.lookup("l2_cache.node_idx");
	int num_l2 = setting_l2.getLength(); //number of L2 nodes
	assert(num_l2 >=1 && num_l2 < MAX_NODES);

	l2_node_idx_vec.resize(num_l2);

	for(int i=0; i<num_l2; i++) {
	    assert((int)setting_l2[i] >=0 && (int)setting_l2[i] < MAX_NODES);
	    l2_node_idx_set.insert((int)setting_l2[i]);
	    l2_node_idx_vec[i] = (int)setting_l2[i];
	}
	assert(l2_node_idx_set.size() == (unsigned)num_l2); //verify no 2 indices are the same

	//verify L2 indices are not used by processors
	for(int i=0; i<num_l2; i++) {
	    for(int j=0; j<num_proc; j++) {
	        assert(l2_node_idx_vec[i] != proc_node_idx_vec[j]);
	    }
	}
	//verify L2 indices are not used by MCs
	for(int i=0; i<num_l2; i++) {
	    for(int j=0; j<num_mc; j++) {
	        assert(l2_node_idx_vec[i] != mc_node_idx_vec[j]);
	    }
	}

    }
    catch (SettingNotFoundException e) {
	cout << e.getPath() << " not set." << endl;
	exit(1);
    }
    catch (SettingTypeException e) {
	cout << e.getPath() << " has incorrect type." << endl;
	exit(1);
    }

    Dsettings dram_settings;  //use default values


    //==========================================================================
    // create all the components.
    //==========================================================================
    Clock myClock(FREQ);

    // need storage for component IDs for connecting components

    Node_cid node_cids[MAX_NODES];

    //Terminal address to network address mapping
    //Using Simple_terminal_to_net_mapping means node ids must be 0 to NUM_NODES-1.
    Simple_terminal_to_net_mapping* mapping = new Simple_terminal_to_net_mapping();

    Ring<Coh_mem_req>* myNetwork = topoCreator<Coh_mem_req>::create_ring(myClock, &ring_params, mapping, 0, 0); //network on LP 0




#define REDIRECT_COUT

#ifdef REDIRECT_COUT
    // create a file into which to write debug/stats info.
    int Mytid;
    MPI_Comm_rank(MPI_COMM_WORLD, &Mytid);
    char buf[10];
    sprintf(buf, "DBG_LOG%d", Mytid);
    ofstream DBG_LOG(buf);

    //redirect cout to file.
    std::streambuf* cout_sbuf = std::cout.rdbuf(); // save original sbuf
    std::cout.rdbuf(DBG_LOG.rdbuf()); // redirect cout
#endif


    const int NPROC = proc_node_idx_set.size();

    Qsim::OSDomain* qsim_osd = new Qsim::OSDomain(NPROC, "linux/bzImage");


    CaffDramMcMap* mc_map = new CaffDramMcMap(mc_node_idx_vec, dram_settings);
    PageBasedMap* l2_map = new PageBasedMap(l2_node_idx_vec, 12);


    //if(N_LPs <= 2)
    {
	LpId_t node_lp = 0;

	//Node ID is the same as its node index: between 0 and MAX_NODES-1
	int cpuid=0;
	for(int i=0; i<MAX_NODES; i++) {
	    if(mc_node_idx_set.find(i) != mc_node_idx_set.end()) { //MC node
		node_cids[i].type = MC_NODE;
		node_cids[i].mc_cid = Component :: Create<Controller>(node_lp, i, dram_settings);
	    }
	    else if(proc_node_idx_set.find(i) != proc_node_idx_set.end()) { //proc node
		node_cids[i].type = CORE_NODE;
		node_cids[i].proc_cid = Component :: Create<qsimlib_core_t>(node_lp, i, (char*)"6.cfg", qsim_osd, cpuid++);
		node_cids[i].cache_cid = Component :: Create<MESI_L1_cache>(node_lp, i, l1_cache_settings, l2_map, L1_MSHR_SIZE);
	    }
	    else if(l2_node_idx_set.find(i) != l2_node_idx_set.end()) { //L2 node
		node_cids[i].type = L2_NODE;
		node_cids[i].cache_cid = Component :: Create<MESI_L2_cache>(node_lp, i, l2_cache_settings, mc_map, L2_MSHR_SIZE);
	    }

	    else {
		node_cids[i].type = EMPTY_NODE;
	    }
	}
    }


    qsimlib_core_t :: Start_qsim(qsim_osd);


    for(int i=0; i<MAX_NODES; i++) {
    if(node_cids[i].type == CORE_NODE)
    cout << i << "  core node" << endl;
    else if(node_cids[i].type == MC_NODE)
    cout << i << "  mc node" << endl;
    else if(node_cids[i].type == L2_NODE)
    cout << i << "  l2 node" << endl;
    else
    cout << i << "  empty node" << endl;
    }


    const std::vector<CompId_t>& ni_cids = myNetwork->get_interface_id();


    //==========================================================================
    //Now connect the components
    //==========================================================================
    const std::vector<GenNetworkInterface<Coh_mem_req>*>& nis = myNetwork->get_interfaces();

    for(int i=0; i<MAX_NODES; i++) {
        if(node_cids[i].type == CORE_NODE) {
	    //some sanity check
	    qsimlib_core_t* proc = Component :: GetComponent<qsimlib_core_t>(node_cids[i].proc_cid);
	    if(proc != 0) {
		MESI_L1_cache* cache = Component :: GetComponent<MESI_L1_cache>(node_cids[i].cache_cid);
		assert(proc->id == cache->get_node_id());

		if(nis[i] != 0) { //only true when there is only 1 LP
		    assert(cache->get_node_id() == (int)nis[i]->get_id());
		}
	    }
	

	    //proc to cache
	    Manifold :: Connect(node_cids[i].proc_cid, qsimlib_core_t::Output0,
				node_cids[i].cache_cid, MESI_L1_cache::PORT_PROC,
				&MESI_L1_cache::handle_processor_request, 1);
	    //cache to proc
	    Manifold :: Connect(node_cids[i].cache_cid, MESI_L1_cache::PORT_PROC,
				node_cids[i].proc_cid, qsimlib_core_t::Input0,
				&qsimlib_core_t::cache_response_handler, 1);
	    //cache to interface
	    Manifold :: Connect(node_cids[i].cache_cid, MESI_L1_cache::PORT_L2,
				ni_cids[i], GenNetworkInterface<Coh_mem_req>::DATAFROMTERMINAL,
				&GenNetworkInterface<Coh_mem_req>::handle_new_packet_event, 1);
	    //interface to cache
	    Manifold :: Connect(ni_cids[i], GenNetworkInterface<Coh_mem_req>::DATATOTERMINAL,
				node_cids[i].cache_cid, MESI_L1_cache::PORT_L2,
				&MESI_L1_cache::handle_peer_and_manager_request, 1);

	    //Inside the network, routing is based on the interface IDs. The adapters have
	    //their own IDs. When one adapter (or its client to be more specific) sends
	    //data to another adapter (client), it uses adapter IDs. But it needs to know
	    //the destination's network interface ID so the network can deliver the packet.
	    //Therefore, the adapter layer should maintain a mapping between adapter IDs
	    //and network interface IDs.
	    //This is similar to the mapping between IP addresses and MAC addresses, except
	    //here the mapping is static and nothing like ARP is involved.

	    //In this simple network, we simplify things further such that the network interface
	    //IDs are the same as the adapter IDs, therefore the mapping becomes M(X)=X. So
	    //we don't need to keep any mapping. Sending to adapter X would be sending to
	    //network interface X.
	}
	else if(node_cids[i].type == MC_NODE) {
	    Controller* mc = Component :: GetComponent<Controller>(node_cids[i].mc_cid);
	    if(mc != 0 ) {
		if(nis[i] != 0) { //only true when both are in the same LP
		    assert(mc->get_nid() == (int)nis[i]->get_id());
		}
	    }
	    //mc to interface
	    Manifold :: Connect(node_cids[i].mc_cid, Controller::OUT,
				ni_cids[i], GenNetworkInterface<Coh_mem_req>::DATAFROMTERMINAL,
				&GenNetworkInterface<Coh_mem_req>::handle_new_packet_event, 1);
	    //interface to mc
	    Manifold :: Connect(ni_cids[i], GenNetworkInterface<Coh_mem_req>::DATATOTERMINAL,
				node_cids[i].mc_cid, Controller::IN,
				&Controller::handle_request, 1);
	}
	else if(node_cids[i].type == L2_NODE) {
	    //l2 to interface
	    Manifold :: Connect(node_cids[i].cache_cid, MESI_L2_cache::PORT_L1,
				ni_cids[i], GenNetworkInterface<Coh_mem_req>::DATAFROMTERMINAL,
				&GenNetworkInterface<Coh_mem_req>::handle_new_packet_event, 1);
	    //interface to l2
	    Manifold :: Connect(ni_cids[i], GenNetworkInterface<Coh_mem_req>::DATATOTERMINAL,
				node_cids[i].cache_cid, MESI_L2_cache::PORT_L1,
				&MESI_L2_cache::handle_incoming, 1);
	}
	else { //Empty node
	    //do nothing
	}

    }


    //==========================================================================
    // Register processors with clock to get things started
    //==========================================================================

    for(int i=0; i<MAX_NODES; i++) {
        if(node_cids[i].type == CORE_NODE) {
	    qsimlib_core_t* proc = Component :: GetComponent<qsimlib_core_t>(node_cids[i].proc_cid);
	    assert(proc);
	    Clock :: Register((core_t*)proc, &qsimlib_core_t::tick, (void(core_t::*)(void))0);
	}
    }

    for(int i=0; i<MAX_NODES; i++) {
        if(node_cids[i].type == CORE_NODE) {
	}
	else if(node_cids[i].type == MC_NODE) {
	    Controller* mc = Component :: GetComponent<Controller>(node_cids[i].mc_cid);
	    if(mc)
		mc->print_config(cout);
	}
    }


    Manifold::StopAt(STOP);
    Manifold::Run();

    for(int i=0; i<MAX_NODES; i++) {
        if(node_cids[i].type == CORE_NODE) {
	    //qsimlib_core_t* proc = Component :: GetComponent<qsimlib_core_t>(node_cids[i].proc_cid);
	    //if(proc)
		//proc->print_stats(cout);
	}
	else if(node_cids[i].type == MC_NODE) {
	    Controller* mc = Component :: GetComponent<Controller>(node_cids[i].mc_cid);
	    if(mc)
		mc->print_stats(cout);
	}
    }

    myNetwork->print_stats(cout);

    Manifold :: print_stats(cout);


#ifdef REDIRECT_COUT
    std::cout.rdbuf(cout_sbuf);
#endif

    Manifold :: Finalize();

}
