// "Copyright [2021] <Copyright Shengkai Lin>"

#include <vector>
#include <unordered_map>
#include "task_allocator.h"

namespace ai_sim {
Define_Module(TaskAllocator)
void TaskAllocator::initialize() {
    EV << "Arrived here" << endl;
    cModule * topo_module = getModuleByPath("^.^.topo_manager");
    topos = omnetpp::check_and_cast<TopoManager *> (topo_module);
    EV_INFO << "INIT TaskAllocator FINISHED" << endl;
}

void TaskAllocator::gen_comm_pair(
    taskboard& tmp_taskboard, AllreduceAlgorithm* algo) {
    Enter_Method_Silent();
//////////////// Generate communication pairs/////////////////////
// Only support RingAllReduce now. Below code insert the comm pair
// ONE BY ONE to the flow_pattern map
// The ring is bi-direction.
    algo->generateCommPairs(tmp_taskboard, topos);
    EV_INFO << "Finished gen comm pairs" << endl;
}

void TaskAllocator::gen_flows(
    taskboard& tmp_taskboard, AllreduceAlgorithm* algo) {
    Enter_Method_Silent();
//////////////////// generate flows for gpus.//////////////////
// Set the size of the queue only number of comm_pair + 1
// Generate different flows to gpus according to the task type
    algo->generateFLows(tmp_taskboard, topos);
    EV_INFO << "Finished generate flows for gpus" << std::endl;
}

void TaskAllocator::gen_txt_flow(
    map<int, FlowVector>& tx_flows, map<int, FlowVector>& rx_flows,
    map<int, RouteVec>& routes, DetailFlow& flow) {
    // Get the outport of the flow
    auto mod = getSimulation()->getModule(flow.flow_path.front().modid);
    RouteVec& src_route = routes[flow.flow_path.front().modid];
    int outport = src_route.back().outPort;
    // Src idx
    int srcidx = flow.flow_path.front().index;
    int destidx = flow.flow_path.back().index;
    Flow tx_flow(flow.data_size, srcidx,
        destidx, outport);
    tx_flows[srcidx].push_back(tx_flow);
    rx_flows[destidx].push_back(tx_flow);
    EV << "Finished gen_txt_flow" << endl;
}

}  // namespace ai_sim
