// "Copyright [2021] <Copyright Shengkai Lin>"


#include "resource_scheduler.h"


namespace ai_sim {

Define_Module(ResourceScheduler);

void ResourceScheduler::initialize() {
    numMachine = par("numMachine");
    numGpusperMachine = par("numGpusperMachine");
    cModule * topo_module = getModuleByPath("^.topo_manager");
    topos = omnetpp::check_and_cast<TopoManager *> (topo_module);
    // Initialize random variable generator.
    srand(0);
}


void ResourceScheduler::alloc_gpu_resource(taskboard& tmp_taskboard) {
//////////////// Allocate gpu resources/////////////////////
// In this step, the allocater choose gpus in sequence.


    for (int i = 0; i < numMachine*numGpusperMachine; i++) {
        topoNode *node_ptr = topos->getWorkersTopo()->getNode(i);
        // EV<< node_ptr->getModule()->getModuleType();
        if (node_ptr->isEnabled()) {
            tmp_taskboard.used_gpus.push_back(node_ptr);
            node_ptr->disable();
        }
        if (tmp_taskboard.used_gpus.size() >= tmp_taskboard.the_task.numGpus) {
            EV << "numGpusperMachine" << numGpusperMachine << endl
                << "numMachine" << numMachine << endl;
            EV << "Total gpus " << numGpusperMachine * numMachine << std::endl;
            EV << "Need gpus " << tmp_taskboard.the_task.numGpus << std::endl;
            break;
        }
    }
    if (tmp_taskboard.used_gpus.size() < tmp_taskboard.the_task.numGpus) {
        EV << "Total gpus" << numGpusperMachine * numMachine  << std::endl;
        EV << "Need gpus" << tmp_taskboard.the_task.numGpus << std::endl;
        error("DONT HAVE ENOUGH GPUS");
        return;
    }
    EV << "Finished alloc gpu resources" << std::endl;
}


void ResourceScheduler::handleMessage(omnetpp::cMessage *msg) {

}

void ResourceScheduler::finish() {
}



}  // namespace ai_sim
