// "Copyright [2021] <Copyright Shengkai Lin>"

#include "task_generator.h"


namespace ai_sim {

Define_Module(TaskGenerator);

void TaskGenerator::initialize() {
    tmp_tasktype = TaskType(static_cast<int>(par("tmp_tasktype")));
    // error("tasktype %d", tmp_tasktype);
    maxnumGpus = par("maxnumGpus");
    maxnumBatches = par("maxnumBatches");
    modelsize = par("modelsize");
    compute_time = par("compute_time");
    task_interval = par("task_interval");
    total_tasks = 0;

    cModule * topo_module = getModuleByPath("^.topo_manager");
    topos = omnetpp::check_and_cast<TopoManager *> (topo_module);


    omnetpp::cModule *res_sch_modu = getParentModule()->getSubmodule("resourcescheduler");
    resource_schuler = dynamic_cast<ResourceScheduler*> (res_sch_modu);

    net_control = dynamic_cast<NetworkController*>(getParentModule()->getSubmodule("networkcontroller"));

    scheduleAt(omnetpp::simtime_t(0), new SelfMsg);
}


Task TaskGenerator::gen_fully_utilized_task() {
    int numGpus = maxnumGpus;
    int numBatches = maxnumBatches;
    Task tmp_task = {.taskId = total_tasks,
                    .tasktype = tmp_tasktype,
                    .numGpus = numGpus,
                    .numBatches = numBatches,
                    .modelsize = modelsize,
                    .compute_time = compute_time};
    total_tasks++;
    EV<< "A TASK GENERATED" << omnetpp::endl;
    return tmp_task;
}



void TaskGenerator::gen_comm_pair(
    taskboard& tmp_taskboard, AllreduceAlgorithm* algo) {
//////////////// Generate communication pairs/////////////////////
// Only support RingAllReduce now. Below code insert the comm pair
// ONE BY ONE to the flow_pattern map
// The ring is bi-direction.
    algo->generateCommPairs(tmp_taskboard, topos);
    EV << "Finished gen comm pairs" << std::endl;
}



void TaskGenerator::gen_flows(
    taskboard& tmp_taskboard, AllreduceAlgorithm* algo) {
//////////////////// generate flows for gpus.//////////////////
// Set the size of the queue only number of comm_pair + 1
// Generate different flows to gpus according to the task type
    algo->generateFLows(tmp_taskboard, topos);
    EV << "Finished generate flows for gpus" << std::endl;
}



void TaskGenerator::allocAllFlows(taskboard& tmp_taskboard) {
    // Alloc flows
    map<int, FlowVector> allocing_flows;
    for (auto& stage : tmp_taskboard.stage_info) {
    // Loop in different stages
    for (auto& flows_on_gpu : stage.flows_on_gpu) {
        auto& target_vec = allocing_flows[flows_on_gpu.first];
        target_vec.insert(target_vec.end(), flows_on_gpu.second.begin(), flows_on_gpu.second.end());
    }
    }
    for (auto& flows_on_gpu : allocing_flows) {

        net_control->flow_ctrl_msg_send(flows_on_gpu.first,flows_on_gpu.second,tmp_taskboard.the_task);

    }
    EV << "Finished alloc flows" << std::endl;
}




void TaskGenerator::handleMessage(omnetpp::cMessage *msg) {
    if (msg->isSelfMessage() && total_tasks <1) 
    {
        Task temp_task_v = gen_fully_utilized_task();

        taskboard tmp_taskboard = {
              .the_task = temp_task_v,
              .flows_count = 0,
              .start_time = omnetpp::simTime(),
              .working_workers = temp_task_v.numGpus
        };
 

        AllreduceAlgorithm *allreduce_algo = AllreduceAlgorithm::get(tmp_taskboard.the_task.tasktype);
        
        resource_schuler->alloc_gpu_resource(tmp_taskboard);		
        
		gen_comm_pair(tmp_taskboard, allreduce_algo);
        gen_flows(tmp_taskboard, allreduce_algo);

        

        net_control->alloc_taskroutes(tmp_taskboard);

        // Allocate all flows to gpus at first
        allocAllFlows(tmp_taskboard);

        taskboards.insert(std::make_pair(tmp_taskboard.the_task.taskId, tmp_taskboard));

        delete allreduce_algo;


        scheduleAt(omnetpp::simTime()+task_interval, msg);

    } else {
        delete msg;
    }
}



void TaskGenerator::release_task(TaskMsg *ta_msg)
{
        Enter_Method("release_task");
        const Task& tmp_task = ta_msg ->getNow_task();
        try_release_task(tmp_task, ta_msg);

}



void TaskGenerator::try_release_task(const Task& tmp_task, TaskMsg* to_delete) 
{
    taskboard the_taskboard;
    // Find the taskboard and erase it
    if (taskboards.find(tmp_task.taskId) == taskboards.end()) {
        error("NO SUCH taskboard, task id %d", tmp_task.taskId);
    } else if (taskboards[tmp_task.taskId].working_workers >1) {
        taskboards[tmp_task.taskId].working_workers--;
        EV << "Remaining workers " <<
            taskboards[tmp_task.taskId].working_workers<<" on task:"<<tmp_task.taskId << std::endl;
    } else {
        the_taskboard = taskboards[tmp_task.taskId];
        get_statistics(the_taskboard);
        // Release links and Gpus
        for (auto& stage : the_taskboard.stage_info) {
            for (auto used_link_of_stage : stage.used_links) {
                used_link_of_stage->enable();
            }

            for (auto used_gpu : the_taskboard.used_gpus) {
                used_gpu->enable();
            }
        }

        taskboards.erase(tmp_task.taskId);
        if (tmp_task.taskId == 0) {
        // error("FINISHED");
	    delete to_delete;
            endSimulation();
        }
    }
}


void TaskGenerator::get_statistics(const taskboard& the_taskboard) {
    std::cout << "\n*********************" << std::endl;
    std::cout << "The task info :" << std::endl;
    std::cout << "GPU number = " << the_taskboard.the_task.numGpus << std::endl;
    std::cout << "Batches number = " << the_taskboard.the_task.numBatches << std::endl;
    std::cout << "Model size = " << the_taskboard.the_task.modelsize << std::endl;
    std::cout << "Compute time = " << the_taskboard.the_task.compute_time << std::endl;
    std::cout << "The simulation time of the task is " <<
        omnetpp::simTime() - the_taskboard.start_time << std::endl;
    std::cout << "Congratulations!" << std::endl;
    std::cout << "*********************\n" << std::endl;
    recordScalar("GPU number", the_taskboard.the_task.numGpus);
    recordScalar("Batches number", the_taskboard.the_task.numBatches);
    recordScalar("Model size", the_taskboard.the_task.modelsize);
    recordScalar("Compute time", the_taskboard.the_task.compute_time);
    recordScalar("The simulation time",
        omnetpp::simTime() - the_taskboard.start_time);
}


void TaskGenerator::finish() {
}


}  // namespace ai_sim
