// "Copyright [2021] <Copyright Shengkai Lin>"

#include <queue>
#include "./allreduce.h"

using namespace omnetpp;

namespace ai_sim {

// Stages of HD with gpu index 0,1,2,3,4,5,6,7
// 0-1 2-3 4-5 6-7
// 0-2 1-3 4-6 5-7
// 0-4 1-5 2-6 3-7
void ButterflyAlgorithm::generateCommPairs(
    taskboard& tmp_taskboard, TopoManager * topos) {
    if (log2(tmp_taskboard.used_gpus.size()) == -1) {
        throw cRuntimeError("Dont support the task with gpu %d to use HD",
            tmp_taskboard.used_gpus.size());
    } else {
        int power_of_two = log2(tmp_taskboard.used_gpus.size());
        for (int stage = 0; stage < power_of_two *
            tmp_taskboard.the_task.numBatches; stage++) {
            // Loop in different stages
            tmp_taskboard.stage_info.emplace_back();
            // The order of the stage in the batch
            int stageorderinbatch = stage % power_of_two;
            int numGroup = 1 << (power_of_two - stageorderinbatch - 1);
            int numWorkerinGroup = 1 << (stageorderinbatch + 1);
            for (int group = 0; group < numGroup; group++) {
            // Loop in different group
                for (int i = 0; i< numWorkerinGroup/2 ; i++) {
                // Loop in different gpus in a group
                    topoNode* first = topos->getNodeinTopo(
                        tmp_taskboard.used_gpus[group * numWorkerinGroup + i],
                        topos->getDataplaneTopo());
                    topoNode* second = topos->getNodeinTopo(
                        tmp_taskboard.used_gpus[group * numWorkerinGroup +
                            numWorkerinGroup/2 + i], topos->getDataplaneTopo());
                    tmp_taskboard.stage_info.back().
                        comm_pairs.push_back(make_pair(first, second));
                    EV_INFO << "New comm pair " <<
                        first->getModule()->getFullPath() <<
                        "<-->" << second->getModule()->getFullPath() << endl;
                }
            }
        }
    }
}

void ButterflyAlgorithm::generateFLows(
    taskboard& tmp_taskboard, TopoManager * topos) {
int numStageperBatch = log2(tmp_taskboard.the_task.numGpus);
int numBatches = tmp_taskboard.the_task.numBatches;

Flow tmp_flow;
tmp_flow.the_task = tmp_taskboard.the_task;
tmp_flow.data_size = tmp_taskboard.the_task.modelsize;

    for (int stage = 0; stage < tmp_taskboard.stage_info.size(); stage++) {
        // Loop in different stages
        auto& stage_info = tmp_taskboard.stage_info[stage];
        tmp_flow.requires.clear();
        if (stage == 0) {
            tmp_flow.numUpstreamflows = 0;
        } else {
            tmp_flow.numUpstreamflows = 1;
            tmp_flow.requires[NeedControllerCooperate] = true;
        }
        if (stage % numStageperBatch == 0)
            tmp_flow.requires[NeedCompute] = true;
        for (auto& comm_pair : stage_info.comm_pairs) {
        // Loop in different comm_pair
            const int pairsrcIdx = topos->getWorkersTopo()->getNodeId(comm_pair.first);
            const int pairdestIdx = topos->getWorkersTopo()->getNodeId(comm_pair.second);
            // Forward flow
            tmp_flow.flowId = tmp_taskboard.flows_count++;
            tmp_flow.srcIdx = pairsrcIdx;
            tmp_flow.destIdx = pairdestIdx;
            stage_info.flows_on_gpu[tmp_flow.srcIdx].push_back(tmp_flow);
            auto the_flow = stage_info.flows_on_gpu[tmp_flow.srcIdx].back();
            EV << "Generating flows:" << the_flow.srcIdx << "," << the_flow.destIdx;
            EV << " numUpstreamflows:" << the_flow.numUpstreamflows << endl;

            // Push back reverse flow
            tmp_flow.flowId = tmp_taskboard.flows_count++;
            tmp_flow.srcIdx = pairdestIdx;
            tmp_flow.destIdx = pairsrcIdx;
            // EV << "numUpstreamflows" << tmp_flow.numUpstreamflows << endl;
            stage_info.flows_on_gpu[tmp_flow.srcIdx].push_back(tmp_flow);
            the_flow = stage_info.flows_on_gpu[tmp_flow.srcIdx].back();
            EV << "Generating flows:" << the_flow.srcIdx << "," << the_flow.destIdx;
            EV << "numUpstreamflows:" << the_flow.numUpstreamflows << endl;
        }
    }
    EV << "Finish gen flows, start resolve downstream flows" << endl;
// Begin to resolve downstream flows
// Find the downstream using deep first searching.
    auto& first_stage_flows = tmp_taskboard.stage_info[0].flows_on_gpu;
    for (auto fir_flow_it = first_stage_flows.begin();
        fir_flow_it != first_stage_flows.end(); fir_flow_it++) {
        Flow* upper_flow = &fir_flow_it->second.front();
        for (int stage = 1; stage < tmp_taskboard.stage_info.size(); stage++) {
            auto& stage_info = tmp_taskboard.stage_info[stage];
            // The flow counts of each stage per gpu should be 1.
            Flow* downer_flow =
                &stage_info.flows_on_gpu[upper_flow->destIdx].front();
            upper_flow->downstream_flows.push_back(downer_flow->flowId);
            // CANNOT USE & HERE, OR THE DOWNER FLOW WILL BE ASSIGNED
            // TO UPPER FLOW
            upper_flow = downer_flow;
        }
    }
}
}  // namespace ai_sim
