

#include <queue>
#include "CentralController.h"

using namespace omnetpp;

namespace ai_sim {

Define_Module(CentralController);

void CentralController::initialize() {
    numMachine = par("numMachine");
    numSwitch = par("numSwitch");
    numOxc = par("numOxc");
    numGpusperMachine = par("numGpusperMachine");
    clustertype = ClusterType(par("clustertype").intValue());
    cModule * topo_module = getModuleByPath("^.topo_manager");
    topos = omnetpp::check_and_cast<TopoManager *> (topo_module);
    // Initialize random variable generator.
    srand(0);
}

void CentralController::alloc_gpu_resource(
    taskboard& tmp_taskboard) {
//////////////// Allocate gpu resources/////////////////////
// In this step, the allocater choose gpus in sequence.
    for (int i = 0; i < numMachine*numGpusperMachine; i++) {
        topoNode *node_ptr = topos->getWorkersTopo()->getNode(i);
        // EV<< node_ptr->getModule()->getModuleType();
        if (node_ptr->isEnabled()) {
            tmp_taskboard.used_gpus.push_back(node_ptr);
            node_ptr->disable();
        }
        if (tmp_taskboard.used_gpus.size() >= tmp_taskboard.the_task.numGpus) {
            EV << "numGpusperMachine" << numGpusperMachine << endl
                << "numMachine" << numMachine << endl;
            EV << "Total gpus " << numGpusperMachine * numMachine << std::endl;
            EV << "Need gpus " << tmp_taskboard.the_task.numGpus << std::endl;
            break;
        }
    }
    if (tmp_taskboard.used_gpus.size() < tmp_taskboard.the_task.numGpus) {
        EV << "Total gpus" << numGpusperMachine * numMachine  << std::endl;
        EV << "Need gpus" << tmp_taskboard.the_task.numGpus << std::endl;
        error("DONT HAVE ENOUGH GPUS");
        return;
    }
    EV << "Finished alloc gpu resources" << std::endl;
}

void CentralController::gen_comm_pair(
    taskboard& tmp_taskboard, AllreduceAlgorithm* algo) {
//////////////// Generate communication pairs/////////////////////
// Only support RingAllReduce now. Below code insert the comm pair
// ONE BY ONE to the flow_pattern map
// The ring is bi-direction.
    algo->generateCommPairs(tmp_taskboard, topos);
    EV << "Finished gen comm pairs" << std::endl;
}

void CentralController::gen_flows(
    taskboard& tmp_taskboard, AllreduceAlgorithm* algo) {
//////////////////// generate flows for gpus.//////////////////
// Set the size of the queue only number of comm_pair + 1
// Generate different flows to gpus according to the task type
    algo->generateFLows(tmp_taskboard, topos);
    EV << "Finished generate flows for gpus" << std::endl;
}


void CentralController::gen_route(
    taskboard& tmp_taskboard, RouteAlgorithm* algo) {
////////////////////// Generate and allocate routes/////////////////////////
// Find the shortest path of the comm_pair one by one
// The time complexity of algorithm is pretty HIGH.
    algo->generateRoutes(tmp_taskboard, topos);
    EV << "Finished gen routes" << std::endl;
}

void CentralController::allocAllFlows(taskboard& tmp_taskboard) {
    // Alloc flows
    map<int, FlowVector> allocing_flows;
    for (auto& stage : tmp_taskboard.stage_info) {
    // Loop in different stages
    for (auto& flows_on_gpu : stage.flows_on_gpu) {
        auto& target_vec = allocing_flows[flows_on_gpu.first];
        target_vec.insert(target_vec.end(),
            flows_on_gpu.second.begin(), flows_on_gpu.second.end());
    }
    }
    for (auto& flows_on_gpu : allocing_flows) {
        TaskFlowVecMsg *tf_msg = new TaskFlowVecMsg;
        for (auto& flow :flows_on_gpu.second) {
            if (flow.outportId > 3670018 || flow.outportId < 0) {
            EV_ERROR << "outport" << flow.outportId << endl;
            EV_ERROR << "Src, Dest" << flow.srcIdx << " , " << flow.destIdx << endl;
            }
        }
        tf_msg->setNow_task(tmp_taskboard.the_task);
        tf_msg->setThe_flows(flows_on_gpu.second);
        tf_msg->setTargetId(flows_on_gpu.first);
        tf_msg->setKind(TASK_ALLOC_MSG);
        // Get the output port to the machine the Gpu in.
        cModule *machine_mod = topos->getWorkersTopo()->
            getNode(flows_on_gpu.first)->getModule()->getParentModule();
        topoNode* machine_node =
            topos->getControlplaneTopo()->getNodeFor(machine_mod);
        assert(machine_node->getNumPaths() == 0);
        int outport = machine_node->getinPathfromDest(0)->getRemoteGateId();

        EV << "I am " << machine_mod->getFullPath() << std::endl;
        EV << "My local gate is " << topos->getControlplaneTopo()->
            getNodeFor(machine_mod)-> getinPathfromDest(0)->
            getLocalGate()->getFullPath() << std::endl;
        EV << "The remote gate is " << topos->getControlplaneTopo()->
            getNodeFor(machine_mod)-> getinPathfromDest(0)->
            getRemoteGate()->getFullPath() << std::endl;

        send(tf_msg, outport);
        EV << "Alloc a task to CPU" << flows_on_gpu.first << std::endl;
    }
    EV << "Finished alloc flows" << std::endl;
}

void CentralController::allocRoutes(const map<int, RouteVec>& routes_map) {
    int tempIndex = 0;
    for (auto tmp_routes : routes_map) {
        if (topos->getControlplaneTopo()->getNodeFor(
            getSimulation()->getModule(tmp_routes.first)) == nullptr) {
            // If the node is not the switch (for example nics)
            continue;
        }
        RouteVecMsg *ra_msg = new RouteVecMsg;
        ra_msg->setKind(ROUTE_ALLOC_MSG);
        ra_msg->setNew_route_table(tmp_routes.second);
        ra_msg->setTargetId(tmp_routes.first);
        // Get the output port to the machine the Gpu in.

        //route_algo = RouteAlgorithm::get(clustertype);
        // If cluster type is ElectricalCluster, controller will send route to adapter instead of oxc or switch directly
        if(clustertype == ClusterType::ElectricalCluster){
            EV<<"send to controller adapter"<<endl;
            ra_msg->setIndex(tempIndex);
            //std::cout<<"yaosi"<<tempIndex<<endl;
            send(ra_msg, "adapter_port");
            EV << "Alloc a route to adapter"<<endl; 
        }
        else{
            int outport = topos->getControlplaneTopo()->getNodeFor(
            getSimulation()->getModule(tmp_routes.first))->
            getinPathfromDest(0)->getRemoteGateId();

            EV << "The remote gate is " << topos->getControlplaneTopo()->getNodeFor(
                getSimulation()->getModule(tmp_routes.first))->
                getinPathfromDest(0)->getRemoteGate()->getFullPath() << std::endl;

            send(ra_msg, outport);
            EV << "Alloc a route to " << getSimulation()->
                getModule(tmp_routes.first)->getFullPath() << std::endl;

        }
        tempIndex += 1;
    }
}

void CentralController::alloc_task(const Task& tmp_task) {
    Enter_Method("alloc_task(const Task& tmp_task)");
    taskboard tmp_taskboard = {
        .the_task = tmp_task,
        .flows_count = 0,
        .start_time = omnetpp::simTime(),
        .working_workers = tmp_task.numGpus
    };
    EV << "tasktype" << tmp_task.tasktype << endl;
    allreduce_algo = AllreduceAlgorithm::get(tmp_task.tasktype);
    // allreduce_algo = AllreduceAlgorithm::get(Butterfly);
    route_algo = RouteAlgorithm::get(clustertype);

    alloc_gpu_resource(tmp_taskboard);

    gen_comm_pair(tmp_taskboard, allreduce_algo);

    gen_flows(tmp_taskboard, allreduce_algo);

    gen_route(tmp_taskboard, route_algo);

    // Allocate routes for all stages
    for (auto temp : tmp_taskboard.stage_info) {
        allocRoutes(temp.routes_on_switch);
    }

    // Allocate all flows to gpus at first
    allocAllFlows(tmp_taskboard);

    taskboards.insert(std::make_pair(tmp_task.taskId, tmp_taskboard));

    delete allreduce_algo;
    delete route_algo;
}

void CentralController::try_release_task(const Task& tmp_task, TaskMsg* to_delete) {
    taskboard the_taskboard;
    // Find the taskboard and erase it
    if (taskboards.find(tmp_task.taskId) == taskboards.end()) {
        error("NO SUCH taskboard, task id %d", tmp_task.taskId);
    } else if (taskboards[tmp_task.taskId].working_workers >1) {
        taskboards[tmp_task.taskId].working_workers--;
        EV << "Remaining workers " <<
            taskboards[tmp_task.taskId].working_workers<<" on task:"<<tmp_task.taskId << std::endl;
    } else {
        the_taskboard = taskboards[tmp_task.taskId];
        get_statistics(the_taskboard);
        // Release links and Gpus
        for (auto& stage : the_taskboard.stage_info) {
            for (auto used_link_of_stage : stage.used_links) {
                used_link_of_stage->enable();
            }

            for (auto used_gpu : the_taskboard.used_gpus) {
                used_gpu->enable();
            }
        }

        taskboards.erase(tmp_task.taskId);
        if (tmp_task.taskId == 0) {
        // error("FINISHED");
	    delete to_delete;
            endSimulation();
        }
    }
}

void CentralController::get_statistics(const taskboard& the_taskboard) {
    EV << "The task info :" << std::endl;
    EV << "GPU number = " << the_taskboard.the_task.numGpus << std::endl;
    EV << "Batches number = " << the_taskboard.the_task.numBatches << std::endl;
    EV << "Model size = " << the_taskboard.the_task.modelsize << std::endl;
    EV << "Compute time = " << the_taskboard.the_task.compute_time << std::endl;
    EV << "The simulation time of the task is " <<
        omnetpp::simTime() - the_taskboard.start_time << std::endl;
    EV << "Congratulations!" << std::endl;
    recordScalar("GPU number", the_taskboard.the_task.numGpus);
    recordScalar("Batches number", the_taskboard.the_task.numBatches);
    recordScalar("Model size", the_taskboard.the_task.modelsize);
    recordScalar("Compute time", the_taskboard.the_task.compute_time);
    recordScalar("The simulation time",
        omnetpp::simTime() - the_taskboard.start_time);
}

void CentralController::finish() {
}

void CentralController::handleMessage(omnetpp::cMessage *msg) {
    if (msg->getKind() == FINISHED_MSG) {
        TaskMsg *ta_msg = dynamic_cast<TaskMsg *>(msg);
        const Task& tmp_task = ta_msg ->getNow_task();
        //delete ta_msg;
	try_release_task(tmp_task, ta_msg);
        delete ta_msg;
    } else if (msg->getKind() == CHANGE_ROUTE_MSG) {
        // Special serve the HD communication pattern
        FlowMsg* f_msg = dynamic_cast<FlowMsg*>(msg);
        allocRoutes(f_msg->getThe_flow().dependRoute);
        delete f_msg;
    } else if (msg->getKind() == SYNC_MSG) {
        error("DONT SUPPORT SYNC MSG YET");
        // FlowMsg *sy_msg = dynamic_cast<FlowMsg *>(msg);
        // send(msg, "machine_port$o", sy_msg->getDestIdx()%numGpusperMachine);
    } else {
        delete msg;
        error("ERROR MSG ARRIVED ON CONTROLLER");
    }
}

}  // namespace ai_sim
