// "Copyright [2021] <Copyright Shengkai Lin>"

#include <queue>
#include <cmath>
#include <limits>
#include "task_scheduler/task_type/allreduce.h"

using namespace omnetpp;

namespace ai_sim {

void RingAllreduceAlgorithm::generateCommPairs(
    taskboard& tmp_taskboard, TopoManager * topos) {
// Create a new stage
tmp_taskboard.stage_info.emplace_back();
// Temporary set the gpus to enable.
for (auto& tmp_gpu : tmp_taskboard.used_gpus) {
    tmp_gpu->enable();
}
topoNode* tmp_first_gpu = tmp_taskboard.used_gpus[0];
while(1) {
    // Use Dijkstra algorithm to generate the commpairs for ring
    // automaticly
    topos->getDataplaneTopo()->calculateWBSSPathsTo(
        topos->getNodeinTopo(tmp_first_gpu, topos->getDataplaneTopo()));
    // Assume that the longest distence between nodes will be no longer than
    // 99
    int shortest_dist = 99;
    Topology::Node* shortest_gpu = nullptr;
    for (auto& tmp_second_gpu : tmp_taskboard.used_gpus) {
        int dist = topos->getNodeinTopo(tmp_second_gpu,
            topos->getDataplaneTopo())->getDistanceToTarget();
        if (tmp_second_gpu->isEnabled() && dist < shortest_dist &&
            tmp_second_gpu != tmp_first_gpu) {
            shortest_dist = dist;
            shortest_gpu = tmp_second_gpu;
        }
    }
    tmp_first_gpu->disable();
    if (shortest_gpu == nullptr) {
        shortest_gpu = tmp_taskboard.used_gpus[0];
        // Convert to data topo in order to be convience to alloc routes
        tmp_taskboard.stage_info[0].comm_pairs.push_back(make_pair(
            topos->getNodeinTopo(tmp_first_gpu, topos->getDataplaneTopo()),
            topos->getNodeinTopo(shortest_gpu, topos->getDataplaneTopo())));
        EV_INFO << "New comm pair " <<
            tmp_first_gpu->getModule()->getFullPath() <<
            "<-->" << shortest_gpu->getModule()->getFullPath() << endl;
        break;
    } else {
        // Convert to data topo in order to be convience to alloc routes
        tmp_taskboard.stage_info[0].comm_pairs.push_back(make_pair(
            topos->getNodeinTopo(tmp_first_gpu, topos->getDataplaneTopo()),
            topos->getNodeinTopo(shortest_gpu, topos->getDataplaneTopo())));
        EV_INFO << "New comm pair " <<
            tmp_first_gpu->getModule()->getFullPath() <<
            "<-->" << shortest_gpu->getModule()->getFullPath() << endl;
        tmp_first_gpu = shortest_gpu;
    }
}
}

void RingAllreduceAlgorithm::generateFLows(
    taskboard& tmp_taskboard, TopoManager * topos) {

queue<Flow> pairing_queue;
int numStageperBatch = 2 * tmp_taskboard.the_task.numGpus - 2;
int numBatches = tmp_taskboard.the_task.numBatches;

Flow tmp_flow;
tmp_flow.the_task = tmp_taskboard.the_task;
tmp_flow.data_size = static_cast<double>(
    tmp_taskboard.the_task.modelsize) /
    (tmp_taskboard.the_task.numGpus*2);
tmp_flow.outportId = -1;

// Generate numStageperBatch * numBatches flows for all the gpus
for (int i = 0; i < numStageperBatch * numBatches; i++) {
    // Alloc forward flows below
    // The first flow should be able to run at first
    if (i == 0) {
        tmp_flow.numUpstreamflows = 0;
    } else {
        tmp_flow.numUpstreamflows = 1;
    }

    // For Ring the Special require is Compute
    tmp_flow.requires.clear();
    if (i % numStageperBatch == 0) {
        tmp_flow.requires[NeedCompute] = true;
    }

    for (auto comm_pair : tmp_taskboard.stage_info[0].comm_pairs) {
        tmp_flow.srcIdx =
            topos->getWorkersTopo()->getNodeId(comm_pair.first);
        tmp_flow.destIdx =
            topos->getWorkersTopo()->getNodeId(comm_pair.second);
        tmp_flow.flowId = tmp_taskboard.flows_count++;
        tmp_flow.downstream_flows.clear();
        // Use a queue to generate downstream flows
        if (pairing_queue.size() >=
            tmp_taskboard.stage_info[0].comm_pairs.size()) {
            Flow &alloc_flow = pairing_queue.front();
            assert(alloc_flow.destIdx == tmp_flow.srcIdx &&
                alloc_flow.srcIdx != tmp_flow.destIdx);

            alloc_flow.downstream_flows.push_back(tmp_flow.flowId);
            tmp_taskboard.stage_info[0].flows_on_gpu
                [alloc_flow.srcIdx].push_back(alloc_flow);
            EV << "Push back flow " << alloc_flow.srcIdx <<
                "<-->" << alloc_flow.destIdx;
            EV << "Special requires: " <<
                alloc_flow.requires.size() << endl;
            EV << "With downstream flow src " << tmp_flow.srcIdx <<
                "; dest " << tmp_flow.destIdx << endl;
            pairing_queue.pop();
        }
        if ( (tmp_flow.flowId + 1) %
            tmp_taskboard.stage_info[0].comm_pairs.size() == 0 ) {
            tmp_flow.downstream_flows.push_back(tmp_flow.flowId + 1);
            tmp_taskboard.stage_info[0].flows_on_gpu
                [tmp_flow.srcIdx].push_back(tmp_flow);
            EV << "Push back flow " << tmp_flow.srcIdx <<
                "<-->" << tmp_flow.destIdx;
            EV << "Special requires: " <<
                tmp_flow.requires.size() << endl;
        } else {
            pairing_queue.push(tmp_flow);
        }
    }
}
// Empty the pairing_queue
while (!pairing_queue.empty()) {
    auto remain_flow = pairing_queue.front();
    tmp_taskboard.stage_info[0].flows_on_gpu
        [remain_flow.srcIdx].push_back(remain_flow);
    EV << "Push back flow " << remain_flow.srcIdx <<
        "<-->" << remain_flow.destIdx << endl;
    EV << "Special requires: " << remain_flow.requires.size() << endl;
    pairing_queue.pop();
}

    int flowNum = 0;
    for (int i = 0; i < numStageperBatch * numBatches; i++) {
    for (auto comm_pair = tmp_taskboard.stage_info[0].comm_pairs.rbegin();
        comm_pair != tmp_taskboard.stage_info[0].comm_pairs.rend();
            ++comm_pair) {
                    flowNum++;
            }
    }

for (int i = 0; i < numStageperBatch * numBatches; i++) {
    // Alloc forward flows below
    // The first flow should be able to run at first
    if (i == 0) {
        tmp_flow.numUpstreamflows = 0;
    } else {
        tmp_flow.numUpstreamflows = 1;
    }

    // For Ring the Special require is Compute
    tmp_flow.requires.clear();
    if (i % numStageperBatch == 0) {
        tmp_flow.requires[NeedCompute] = true;
    }

    // Alloc backward flows below
    for (auto comm_pair = tmp_taskboard.stage_info[0].comm_pairs.rbegin();
        comm_pair != tmp_taskboard.stage_info[0].comm_pairs.rend();
            ++comm_pair) {
        tmp_flow.srcIdx =
            topos->getWorkersTopo()->getNodeId(comm_pair->second);
        tmp_flow.destIdx =
            topos->getWorkersTopo()->getNodeId(comm_pair->first);
        tmp_flow.flowId = tmp_taskboard.flows_count++;
        tmp_flow.downstream_flows.clear();
        // Use a queue to generate downstream flows
        if (pairing_queue.size() >=
            tmp_taskboard.stage_info[0].comm_pairs.size()) {
            Flow alloc_flow = pairing_queue.front();
            assert(alloc_flow.destIdx == tmp_flow.srcIdx &&
                alloc_flow.srcIdx != tmp_flow.destIdx);

            alloc_flow.downstream_flows.push_back(tmp_flow.flowId);
            tmp_taskboard.stage_info[0].flows_on_gpu
                [alloc_flow.srcIdx].push_back(alloc_flow);
            EV << "Push back flow " << alloc_flow.srcIdx <<
                "<-->" << alloc_flow.destIdx;
            EV << "Special requires: " <<
                alloc_flow.requires.size() << endl;
            EV << "With downstream flow src " << tmp_flow.srcIdx <<
                "; dest " << tmp_flow.destIdx << endl;
            pairing_queue.pop();
        }
        if ( (tmp_flow.flowId + 1) %
            tmp_taskboard.stage_info[0].comm_pairs.size() == 0 ) {
            if(tmp_flow.flowId==2*flowNum-1){
                 tmp_flow.downstream_flows.push_back(-1);
            }
            else{
                tmp_flow.downstream_flows.push_back(tmp_flow.flowId + 1);
            }
            tmp_taskboard.stage_info[0].flows_on_gpu
                [tmp_flow.srcIdx].push_back(tmp_flow);
            EV << "Push back flow " << tmp_flow.srcIdx <<
                "<-->" << tmp_flow.destIdx;
            EV << "Special requires: " <<
                tmp_flow.requires.size() << endl;
        } else {
            pairing_queue.push(tmp_flow);
        }
    }
}
// Empty the pairing_queue
while (!pairing_queue.empty()) {
    auto remain_flow = pairing_queue.front();
    tmp_taskboard.stage_info[0].flows_on_gpu
        [remain_flow.srcIdx].push_back(remain_flow);
    EV << "Push back flow " << remain_flow.srcIdx <<
        "<-->" << remain_flow.destIdx << endl;
    EV << "Special requires: " << remain_flow.requires.size() << endl;
    pairing_queue.pop();
}
}
}  // namespace ai_sim
