// "Copyright [2021] <Copyright Zhancheng Liang, Shengkai Lin>"

// The following CPU module function as a controller of a node
// It is responsible for receiving messages from the central controller
// and forward the route info to the App and router

#include <string.h>
#include "cpu.h"

namespace ai_sim {

Define_Module(Cpu);

void Cpu::initialize() {
    my_global_index = par("my_global_index");
    my_index = par("my_index");
    omnetpp::cModule * gpu_module =
        getParentModule()->getSubmodule("gpu", my_index - 1);
    my_gpu = dynamic_cast<Gpu *> (gpu_module);
    assert(my_gpu->getIndex() == getIndex() - 1);
}

void Cpu::handleReuqires(Flow& the_flow) {
    // Handle change route require
    auto it = the_flow.requires.find(NeedControllerCooperate);
    if (it != the_flow.requires.end() && it->second == true) {
        EV << "handle controller cooperate" << std::endl;
        it->second = false;
        FlowMsg *f_msg = new FlowMsg;
        f_msg->setThe_flow(the_flow);
        f_msg->setKind(CHANGE_ROUTE_MSG);
        send(f_msg, "controller_port$o");
    }
    // Handle compute require
    it = the_flow.requires.find(NeedCompute);
    if (it != the_flow.requires.end() && it->second == true) {
        EV << "handle compute" << std::endl;
        it->second = false;
        FlowMsg *f_msg = new FlowMsg;
        f_msg->setThe_flow(the_flow);
        scheduleAt(omnetpp::simTime() + the_flow.the_task.compute_time,
            f_msg);
    } else {
        // If the msg dont need to compute, then send it out ASAP
        my_gpu->handleSend(the_flow);
    }
}

// Dependency graph
void Cpu::alloc_flow(Flow the_flow) {
    // The communication between cpu and gpu is reduced to direct function
    // call
    int remainRequire = 0;
    for (auto require : the_flow.requires) {
        remainRequire += require.second;
    }
    if (the_flow.numUpstreamflows != 0) error("The flow still have upstreams!");
    if (remainRequire == 0) {
        // EV << "requirement is met" << std::endl;
        my_gpu->handleSend(the_flow);
    } else {
        // EV << "switch topology" << std::endl;
        handleReuqires(the_flow);
    }
}

void Cpu::handleReceive(const FlowMsg *msg) {
    Enter_Method("handleReceive(FlowMsg *msg)");
    Flow r_flow = msg->getThe_flow();
    std::vector<int> d_flows_id = r_flow.downstream_flows;
    if (running_task.flows.empty()) {
        handleFinish(msg);
    }
    for (int d_flow_id : d_flows_id) {
        try {
            Flow downstreamflow = running_task.flows.at(d_flow_id);
            if (--downstreamflow.numUpstreamflows <= 0) {
                bool flowValid = true;
                for (int d_flow_id : downstreamflow.downstream_flows) {
                    EV << "Downstream flow id " << d_flow_id << std::endl;
                    if (d_flow_id == -1) flowValid = false;
                }
                if (flowValid) {
                    alloc_flow(downstreamflow);
                    running_task.flows.erase(d_flow_id);
                }
            }
        } catch (std::out_of_range) {
            // error("No such flow in the CPU");
            EV_WARN << "No such flow in the CPU" << std::endl;
        }
    }
    EV << "Remaining flows count " << running_task.flows.size()<< std::endl;
}

void Cpu::handleFinish(const FlowMsg *msg) {
    Flow r_flow = msg->getThe_flow();
    TaskMsg *f_msg = new TaskMsg;
    f_msg->setNow_task(r_flow.the_task);
    f_msg->setKind(FINISHED_MSG);
    send(f_msg, "controller_port$o");
}

void Cpu::handleNewTask(const TaskFlowVecMsg *msg) {
    ASSERT(msg->getTargetId() == getId());
    running_task.now_task = msg->getNow_task();
    // Put the flows in the unordered_map
    for (auto& flow : msg->getThe_flows()) {
        // EV << "numUpstreamflows" << flow.numUpstreamflows << endl;
        if (flow.numUpstreamflows == 0) {
        // If the flow has no upstreamflows, alloc it.
            alloc_flow(flow);
        } else {
            running_task.flows[flow.flowId] = flow;
        }
    }

    EV << "Recieved a TASK_ALLOC_MSG at cpu"
        << my_global_index << "\t The num flows is "
        << msg->getThe_flows().size() << std::endl;
}

void Cpu::handleMessage(omnetpp::cMessage *msg) {
    /////////////////// If the cpu is a normal cpu////////////////
    if (my_index != 0) {
        if (msg->getKind() == TASK_ALLOC_MSG) {
            // A new task arrived
            TaskFlowVecMsg *t_msg = dynamic_cast<TaskFlowVecMsg *>(msg);
            handleNewTask(t_msg);
            delete t_msg;
        } else if (msg->isSelfMessage()) {
            // Between batches
            // TODO(Lin) : Sync not done yet!
            FlowMsg *f_msg = dynamic_cast<FlowMsg *>(msg);
            alloc_flow(f_msg->getThe_flow());
            delete f_msg;
        } else {
            int i = msg->getKind();
            delete msg;
            error("DONT SUPPORT THE MSG TYPE %d ON NORMAL CPU", i);
        }
    } else {
        //////////////// Else this is a master cpu.///////////////
        switch (msg->getKind()) {
            case TASK_ALLOC_MSG: {
                TaskFlowVecMsg *t_msg = dynamic_cast<TaskFlowVecMsg *>(msg);
                // Get the srcIdx from the flow
                int srcIdx = t_msg->getThe_flows()[0].srcIdx;
                send(t_msg, "cpu_port$o", srcIdx % gateSize("cpu_port$o"));
                break;
            }
            case FINISHED_MSG : {
                send(msg, "controller_port$o");
                break;
            }
            case CHANGE_ROUTE_MSG : {
                send(msg, "controller_port$o");
                break;
            }
            default: {
                delete msg;
                error("DONOT SUPPORT THE MSG TYPE %d ON MASTER CPU",
                    msg->getKind());
                break;
            }
        }
    }
}
}  // namespace ai_sim
