// "Copyright [2021] <Copyright Shengkai Lin>"

#include <utility>
#include <fstream>
#include "dispatcher.h"

namespace ai_sim {
Define_Module(Dispatcher)
void Dispatcher::initialize() {
    // routefile.open("./record_route.txt", ios::in);
    cModule * resourceScheduler_module =
        getModuleByPath("^.resource_scheduler");
    if (resourceScheduler_module != nullptr) {
        resourceScheduler =
            omnetpp::check_and_cast<ResourceScheduler *>
            (resourceScheduler_module);
    } else {
        error("dispatcher nullptr error");
    }

    cModule * taskAllocator_module =
        getModuleByPath("^.task_scheduler.task_allocator");
    if (taskAllocator_module != nullptr) {
        taskAllocator =
            omnetpp::check_and_cast<TaskAllocator *> (taskAllocator_module);
    } else {
        error("dispatcher nullptr error");
    }

    cModule * networkRouting_module =
        getModuleByPath("^.network_controller.network_routing");
    if (networkRouting_module != nullptr) {
        networkRouting =
            omnetpp::check_and_cast<NetworkRouting *> (networkRouting_module);
    } else {
        error("dispatcher nullptr error");
    }

    cModule * topo_module = getModuleByPath("^.topo_manager");
    if (topo_module != nullptr) {
        topos = omnetpp::check_and_cast<TopoManager *> (topo_module);
    } else {
        error("dispatcher nullptr error");
    }

    clustertype = ClusterType(getParentModule()->par("clustertype").intValue());

    inputType = (par("inputType").intValue());

    switch (inputType) {
    case 0:
        // gen tasks by task gen
        break;
    case 1:
    {
        // get flows from txt
        EV << "Use file input now" << endl;
        flowparser = new FlowParser(
            getParentModule()->par("file_path").stringValue());
        SelfMsg* s_msg = new SelfMsg;
        s_msg->setStr("waitingforflow");
        scheduleAt(omnetpp::simTime(), s_msg);
        break;
    }
    default:
        break;
    }
}

void Dispatcher::allocFlows(map<int, FlowVector> tx_flows,
    map<int, FlowVector> rx_flows) {
    // Assume that all the flows belongs to the same task
    map<int, TaskFlowVecMsg*> flows_msg;
    // Get the_task from the tx_flows
    Task the_task = tx_flows.begin()->second.front().the_task;
    // Set tx flows msgs
    for (auto tx_it = tx_flows.begin(); tx_it != tx_flows.end(); tx_it++) {
        TaskFlowVecMsg *tf_msg = new TaskFlowVecMsg;
        tf_msg->setNow_task(the_task);
        tf_msg->setTx_flows(tx_it->second);
        tf_msg->setTargetId(tx_it->first);
        tf_msg->setKind(TASK_ALLOC_MSG);
        flows_msg[tx_it->first] = tf_msg;
    }
    // Set rx flows msgs
    for (auto rx_it = rx_flows.begin(); rx_it != rx_flows.end(); rx_it++) {
        if (flows_msg.find(rx_it->first) != flows_msg.end()) {
            flows_msg[rx_it->first]->setRx_flows(rx_it->second);
        } else {
            TaskFlowVecMsg *tf_msg = new TaskFlowVecMsg;
            tf_msg->setNow_task(the_task);
            tf_msg->setRx_flows(rx_it->second);
            tf_msg->setTargetId(rx_it->first);
            tf_msg->setKind(TASK_ALLOC_MSG);
            flows_msg[rx_it->first] = tf_msg;
        }
    }
    for (auto msgs_it = flows_msg.begin();
        msgs_it != flows_msg.end(); msgs_it++) {
            // Get the output port to the machine the Gpu in.
            cModule *machine_mod = topos->getWorkersTopo()->
                getNode(msgs_it->first)->getModule()->getParentModule();
            topoNode* machine_node =
                topos->getControlTaskTopo()->getNodeFor(machine_mod);
            assert(machine_node->getNumPaths() == 0);
            int outport = machine_node->getinPathfromDest(0)->getRemoteGateId();

            EV << "I am " << machine_mod->getFullPath() << std::endl;
            EV << "My local gate is " << topos->getControlTaskTopo()->
                getNodeFor(machine_mod)-> getinPathfromDest(0)->
                getLocalGate()->getFullPath() << std::endl;
            EV << "The remote gate is " << topos->getControlTaskTopo()->
                getNodeFor(machine_mod)-> getinPathfromDest(0)->
                getRemoteGate()->getFullPath() << std::endl;

            send(msgs_it->second, outport);
            EV << "Alloc a task to CPU" << msgs_it->first << std::endl;
    }
}

void Dispatcher::allocAllTaskFlows(taskboard& tmp_taskboard) {
    Enter_Method_Silent();
    // Alloc flows
    map<int, FlowVector> tx_flows;
    map<int, FlowVector> rx_flows;
    for (auto& stage : tmp_taskboard.stage_info) {
    // Loop in different stages
    for (auto& flows_on_gpu : stage.flows_on_gpu) {
        for (auto& flow_on_gpu : flows_on_gpu.second) {
        // Find for rx_flows
            rx_flows[flow_on_gpu.destIdx].push_back(flow_on_gpu);
        }
        auto& target_vec = tx_flows[flows_on_gpu.first];
        target_vec.insert(target_vec.end(),
            flows_on_gpu.second.begin(), flows_on_gpu.second.end());
    }
    }
    allocFlows(tx_flows, rx_flows);

    EV << "Finished alloc flows" << std::endl;
}

void Dispatcher::alloc_flow_from_txt() {
    // Alloc all flows of current time
    EV << "alloc_flow_from_txt" <<endl;
    map<int, RouteVec> route_in_switch;
    map<int, FlowVector> tx_flows;
    map<int, FlowVector> rx_flows;

    DetailFlow current_flow = flowparser->getCurrentFlow();

    while (current_flow.time != -1 &&
        (omnetpp::simtime_t)current_flow.time <= omnetpp::simTime()) {
        networkRouting->gen_txt_route(route_in_switch, current_flow);
        taskAllocator->gen_txt_flow(tx_flows, rx_flows,
            route_in_switch, current_flow);
        current_flow = flowparser->getNextFlow();
        EV << "Generated a flow" << endl;
    }

    networkRouting->allocRoutes(route_in_switch);
    allocFlows(tx_flows, rx_flows);
    if ((omnetpp::simtime_t)current_flow.time > omnetpp::simTime()) {
        SelfMsg* s_msg = new SelfMsg;
        s_msg->setStr("waitingforflow");
        scheduleAt(current_flow.time, s_msg);
    }
}

void Dispatcher::alloc_task(const Task& tmp_task) {
    Enter_Method("alloc_task(const Task& tmp_task)");
    taskboard tmp_taskboard = {
        .the_task = tmp_task,
        .flows_count = 0,
        .start_time = omnetpp::simTime(),
        .working_workers = tmp_task.numGpus
    };
    EV << "tasktype" << tmp_task.tasktype << endl;
    AllreduceAlgorithm* allreduce_algo =
        AllreduceAlgorithm::get(tmp_task.tasktype);
    // allreduce_algo = AllreduceAlgorithm::get(Butterfly);
    RouteAlgorithm* route_algo = RouteAlgorithm::get(clustertype);

    resourceScheduler->alloc_gpu_resource(tmp_taskboard);

    taskAllocator->gen_comm_pair(tmp_taskboard, allreduce_algo);

    taskAllocator->gen_flows(tmp_taskboard, allreduce_algo);

    // Allocate routes for all stages
    networkRouting->gen_route(tmp_taskboard, route_algo);
    if (clustertype == ClusterType::ElectricalCluster ||
    clustertype == ClusterType::HybridCluster) {
        for (auto& it : tmp_taskboard.stage_info) {
            networkRouting->allocRoutes(it.routes_on_switch);
        }
    } else {
            networkRouting->allocRoutes(
                tmp_taskboard.stage_info[0].routes_on_switch);
    }


    // Allocate all flows to gpus at first
    allocAllTaskFlows(tmp_taskboard);

    taskboards.insert(std::make_pair(tmp_task.taskId, tmp_taskboard));

    delete allreduce_algo;
    delete route_algo;
}

// void Dispatcher::alloc_task_from_http() {
//     char* content;
//     httprep reply = httpget(content);
//     reply.
// }

void Dispatcher::try_release_task(const Task& tmp_task) {
    Enter_Method_Silent();
    taskboard the_taskboard;
    // Find the taskboard and erase it
    if (taskboards.find(tmp_task.taskId) == taskboards.end()) {
        error("NO SUCH taskboard, task id %d", tmp_task.taskId);
    } else if (taskboards[tmp_task.taskId].working_workers > 1) {
        taskboards[tmp_task.taskId].working_workers--;
        EV << "Remaining workers " <<
            taskboards[tmp_task.taskId].working_workers << " on task:" <<
            tmp_task.taskId << std::endl;
    } else {
        the_taskboard = taskboards[tmp_task.taskId];
        get_statistics(the_taskboard);
        // Release links and Gpus
        for (auto& stage : the_taskboard.stage_info) {
            for (auto used_link_of_stage : stage.used_links) {
                used_link_of_stage->enable();
            }

            for (auto used_gpu : the_taskboard.used_gpus) {
                used_gpu->enable();
            }
        }

        taskboards.erase(tmp_task.taskId);
    }
}

void Dispatcher::get_statistics(const taskboard& the_taskboard) {
    std::cout << "\n*********************" << std::endl;
    std::cout << "The task info :" << std::endl;
    std::cout << "GPU number = " << the_taskboard.the_task.numGpus << std::endl;
    std::cout << "Batches number = " << the_taskboard.the_task.numBatches << std::endl;
    std::cout << "Model size = " << the_taskboard.the_task.modelsize << std::endl;
    std::cout << "Compute time = " << the_taskboard.the_task.compute_time << std::endl;
    std::cout << "Start time = " <<the_taskboard.start_time<<std::endl;
    std::cout << "Communication time = " <<  omnetpp::simTime() - the_taskboard.start_time<< std::endl;
    std::cout << "The simulation time of the task is " <<
        omnetpp::simTime() - the_taskboard.start_time << std::endl;
    std::cout << "Congratulations!" << std::endl;
    std::cout << "*********************\n" << std::endl;
    recordScalar("GPU number", the_taskboard.the_task.numGpus);
    recordScalar("Batches number", the_taskboard.the_task.numBatches);
    recordScalar("Model size", the_taskboard.the_task.modelsize);
    recordScalar("Compute time", the_taskboard.the_task.compute_time);
    recordScalar("The simulation time",
        omnetpp::simTime() - the_taskboard.start_time);
    // error("Stop");
}

void Dispatcher::handleMessage(omnetpp::cMessage *msg) {
    Enter_Method_Silent();
    SwitchAckMsg * oxc_ack_msg = dynamic_cast<SwitchAckMsg *> (msg);
    if (msg->isSelfMessage()) {
        SelfMsg* s_msg = dynamic_cast<SelfMsg*>(msg);
        // error(s_msg->getStr());
        string infoinmsg = s_msg->getStr();
        if (infoinmsg.find("waitingforflow") != string::npos) {
            alloc_flow_from_txt();
        }
        delete s_msg;
    } else if (msg->getKind() == SWITCH_ACK_MSG || oxc_ack_msg != nullptr) {
        Flow the_flow = oxc_ack_msg->getThe_flow();

        cModule *machine_mod = topos->getWorkersTopo()->
            getNode(the_flow.srcIdx)->getModule()->getParentModule();
        topoNode* machine_node =
            topos->getControlTaskTopo()->getNodeFor(machine_mod);
        int outport = machine_node->getinPathfromDest(0)->getRemoteGateId();
        msg->setKind(SWITCH_ACK_MSG);
        send(msg, outport);
    } else if (msg->getKind() == TASK_ALLOC_MSG) {
        auto task = dynamic_cast<TaskMsg*>(msg)->getNow_task();
        alloc_task(task);
        delete msg;
    } else if (msg->getKind() == FINISHED_MSG) {
        FlowMsg* ta_msg = dynamic_cast<FlowMsg*>(msg);
        const Flow& last_flow = ta_msg->getThe_flow();
        if (inputType == 1) {
            std::cout << "\n*********************" << std::endl;
            cout << "The flows alloced from " <<
            last_flow.srcIdx << " to " << last_flow.destIdx <<
            " finished!"<< endl;
            cout << "Current time is " << omnetpp::simTime() << endl;
            std::cout << "*********************\n" << std::endl;
            ofstream fout("resultFile.txt", ios::app);
            fout << "Finish time " << omnetpp::simTime() << endl;
            // error("Finished");
        } else {
            try_release_task(last_flow.the_task);
        }
        delete ta_msg;
    } else if (msg->getKind() == CHANGE_ROUTE_MSG) {
        FlowMsg* f_msg = dynamic_cast<FlowMsg*>(msg);
        networkRouting->allocRoutes(f_msg);
        delete f_msg;
    } else {
        delete msg;
        error("unknown type of msg");
    }
}
}  // namespace ai_sim
