#include <string>
#include <cassert>
#include <sys/types.h>
#include <sys/wait.h>
#include "app.h"
#include "executor.h"
#include "common.h"
#include "function.h"

namespace sp{

// // 根据Fs_table和最大核数自动建立部署表从而建立App
// App::App(short id, std::vector<std::tuple<std::string, std::vector<int>, std::vector<int>, std::vector<std::string>, int>>& Fs_table, int max_cores, SharedMemoryManager* sharedMemory, Gpu_SharedMemoryManager* gpu_sharedMemory){
//     this->id = id;
//     /** 建立部署表 **/
//     std::map<int, std::vector<int>> D_table; // 部署表，每个stage负责哪几号task
//     /**
//         算法
//     **/

// /** 建立App **/
//     // 建立Qs
//     for(int task_id = 0; task_id < Fs_table.size(); task_id++){
//         for(auto j : std::get<2>(Fs_table[task_id])){
//             int next_task_id = j;
//             std::string name = "queues/app" + std::to_string(id) + "_q_" + std::to_string(task_id) + "_" + std::to_string(next_task_id);
//             BlockingQueue<Data*>* q = new BlockingQueue<Data*>(1000);
//             Qs[name] = q;
//         }
//     }
//     // 根据D_table建立function和functions_map，并建立executor，然后绑定executor，Queues，profile，设置batchsize
//     for(auto i : D_table){
//         // 建立Fucntion
//         for(auto j : i.second){
//             std::string function_name = std::get<0>(Fs_table[j]);
//             std::vector<std::string> params = std::get<3>(Fs_table[j]);
//             Function* function = Function::Create(function_name, params);
//             // bind Queues
//             std::vector<BlockingQueue<Data*>*> pre_Qs, next_Qs;
//             for(auto k : std::get<1>(Fs_table[j])){
//                 int pre_task_id = k;
//                 std::string name = "queues/app" + std::to_string(id) + "_q_" + std::to_string(pre_task_id) + "_" + std::to_string(j);
//                 assert(Qs.find(name) != Qs.end());
//                 pre_Qs.push_back(Qs.find(name)->second);
//             }
//             for(auto k : std::get<2>(Fs_table[j])){
//                 int next_task_id = k;
//                 std::string name = "queues/app" + std::to_string(id) + "_q_" + std::to_string(j) + "_" + std::to_string(next_task_id);
//                 assert(Qs.find(name) != Qs.end());
//                 next_Qs.push_back(Qs.find(name)->second);
//             }
//             function->bindQueues(pre_Qs, next_Qs);
//             // 设置batch_size
//             int batch_size = std::get<4>(Fs_table[j]);
//             function->setBatchSize(batch_size);
//             // 加入function_map
//             functions_map[i.first].push_back(function);
//         }
//         // 建立Executor
//         BlockingQueue<char>* col_cmd = new BlockingQueue<char>(1000);
//         controllers.push_back(col_cmd);
//         BlockingQueue<char>* rep_stu = new BlockingQueue<char>(1000);
//         reporters.push_back(rep_stu);
//         Executor* executor = new Executor(this, i.first, functions_map[i.first], col_cmd, rep_stu);
//         executor->bindSharedMemoryManager(sharedMemory);
//         executor->bindGpuSharedMemoryManager(gpu_sharedMemory);
//         executors.push_back(executor);
//         // bind Executor
//         for(auto function : functions_map[i.first]){
//             function->bindExecutor(executor);
//         }
//     }
//     sleep(5); // 确保管道文件描述符打开完毕
//     std::cout << "app " << id << " build." << std::endl;
// }

// App::App(short id, std::vector<std::tuple<std::string, std::vector<int>, std::vector<int>, std::vector<std::string>, int>>& Fs_table, std::map<int, std::vector<int>>& D_table, SharedMemoryManager* sharedMemory, Gpu_SharedMemoryManager* gpu_sharedMemory){
//     this->id = id;
//     /** 建立App **/
//     // 建立Qs
//     for(int task_id = 0; task_id < Fs_table.size(); task_id++){
//         for(auto j : std::get<2>(Fs_table[task_id])){
//             int next_task_id = j;
//             std::string name = "queues/app" + std::to_string(id) + "_q_" + std::to_string(task_id) + "_" + std::to_string(next_task_id);
//             BlockingQueue<Data*>* q = new BlockingQueue<Data*>(1000);
//             Qs[name] = q;
//         }
//     }
//     // 根据D_table建立function和functions_map，并建立executor，然后绑定executor，Queues，profile，设置batchsize
//     for(auto i : D_table){
//         // 建立Fucntion
//         for(auto j : i.second){
//             std::string function_name = std::get<0>(Fs_table[j]);
//             std::vector<std::string> params = std::get<3>(Fs_table[j]);
//             Function* function = Function::Create(function_name, params);
//             std::cout << function_name << " build. " << std::endl;
//             // bind Queues
//             std::vector<BlockingQueue<Data*>*> pre_Qs, next_Qs;
//             for(auto k : std::get<1>(Fs_table[j])){
//                 int pre_task_id = k;
//                 std::string name = "queues/app" + std::to_string(id) + "_q_" + std::to_string(pre_task_id) + "_" + std::to_string(j);
//                 assert(Qs.find(name) != Qs.end());
//                 pre_Qs.push_back(Qs.find(name)->second);
//             }
//             for(auto k : std::get<2>(Fs_table[j])){
//                 int next_task_id = k;
//                 std::string name = "queues/app" + std::to_string(id) + "_q_" + std::to_string(j) + "_" + std::to_string(next_task_id);
//                 assert(Qs.find(name) != Qs.end());
//                 next_Qs.push_back(Qs.find(name)->second);
//             }
//             function->bindQueues(pre_Qs, next_Qs);
//             // 设置batch_size
//             int batch_size = std::get<4>(Fs_table[j]);
//             function->setBatchSize(batch_size);
//             // 加入function_map
//             functions_map[i.first].push_back(function);
//         }
//         // 建立Executor
//         BlockingQueue<char>* col_cmd = new BlockingQueue<char>(1000);
//         controllers.push_back(col_cmd);
//         BlockingQueue<char>* rep_stu = new BlockingQueue<char>(1000);
//         reporters.push_back(rep_stu);
//         Executor* executor = new Executor(this, i.first, functions_map[i.first], col_cmd, rep_stu);
//         executor->bindSharedMemoryManager(sharedMemory);
//         executor->bindGpuSharedMemoryManager(gpu_sharedMemory);
//         executors.push_back(executor);
//         // bind Executor
//         for(auto function : functions_map[i.first]){
//             function->bindExecutor(executor);
//         }
//     }
//     sleep(5); // 确保管道文件描述符打开完毕
//     std::cout << "app " << id << " build." << std::endl;
// }

App::App(short id, std::map<short, std::vector<Function*>>& Fs_map, SharedMemoryManager* smm, Gpu_SharedMemoryManager* gpu_smm){
    this->id = id;
    for(auto i : Fs_map){
        short executor_id = i.first;
        functions_map[executor_id] = i.second;
        // 建立Executor
        BlockingQueue<char>* col_cmd = new BlockingQueue<char>(1000);
        controllers.push_back(col_cmd);
        BlockingQueue<char>* rep_stu = new BlockingQueue<char>(1000);
        reporters.push_back(rep_stu);
        Executor* executor = new Executor(this, executor_id, i.second, col_cmd, rep_stu);
        executor->bindSharedMemoryManager(smm);
        executor->bindGpuSharedMemoryManager(gpu_smm);
        executors.push_back(executor);
        // bind Executor
        for(auto function : i.second){
            function->bindExecutor(executor);
        }
    }
    std::cout << "app " << id << " build." << std::endl;
}

App::App(short id, std::map<short, std::vector<Function*>>& Fs_map, SharedMemoryManager* smm, Gpu_SharedMemoryManager* gpu_smm, bool willBindLcore){
    this->id = id;
    for(auto i : Fs_map){
        short executor_id = i.first;
        functions_map[executor_id] = i.second;
        // 建立Executor
        BlockingQueue<char>* col_cmd = new BlockingQueue<char>(1000);
        controllers.push_back(col_cmd);
        BlockingQueue<char>* rep_stu = new BlockingQueue<char>(1000);
        reporters.push_back(rep_stu);
        Executor* executor = new Executor(this, executor_id, i.second, col_cmd, rep_stu, willBindLcore);
        executor->bindSharedMemoryManager(smm);
        executor->bindGpuSharedMemoryManager(gpu_smm);
        executors.push_back(executor);
        // bind Executor
        for(auto function : i.second){
            function->bindExecutor(executor);
        }
    }
    std::cout << "app " << id << " build." << std::endl;
}

App::App(short id, std::map<short, std::vector<Function*>>& Fs_map, std::map<short, std::vector<int>>& Gpus_map, SharedMemoryManager* smm, Gpu_SharedMemoryManager* gpu_smm){
    this->id = id;
    this->functions_map = Fs_map;
    this->gpus_map = Gpus_map;
    for(auto i : Fs_map){
        short executor_id = i.first;
        functions_map[executor_id] = i.second;
        // 建立Executor
        BlockingQueue<char>* col_cmd = new BlockingQueue<char>(1000);
        controllers.push_back(col_cmd);
        BlockingQueue<char>* rep_stu = new BlockingQueue<char>(1000);
        reporters.push_back(rep_stu);
        Executor* executor = new Executor(this, executor_id, i.second, col_cmd, rep_stu);
        executor->bindSharedMemoryManager(smm);
        executor->bindGpuSharedMemoryManager(gpu_smm);
        executors.push_back(executor);
        // bind Executor
        for(auto function : i.second){
            function->bindExecutor(executor);
        }
    }
    // 对Gpus_map进行处理
    for(auto i : Gpus_map){
        short gpu_id = i.first;
        for(auto id : i.second){
            executors[id]->setGpuId(gpu_id);
        }
    }
    std::cout << "app " << id << " build." << std::endl;
}

void App::check(){
    for(auto executor : executors){
        executor->check();
    }
}

void App::printMsg(){
    for(auto executor : executors){
        executor->printMsg();
    }
}

void App::init(){
    for(auto executor : executors){
        executor->start();
    }
    // 等待app start结束再返回
    for(int i = 0; i < reporters.size(); i++){
        char status[10];
        reporters[i]->wait_dequeue(status, 10);
        if(strcmp(status, "complete") == 0){

        }else{
            std::cout << "app recv info: " << status << " expect: complete: " << std::endl;
        }
    }
    std::cout << "app " << id << " init completed." << std::endl;
}

void App::run(){
    for(auto controller : controllers){
        char command[10] = "execute";
        controller->wait_enqueue(command, 10);
    }
    std::cout << "app " << id << " start." << std::endl;
}

App::~App(){
    // 释放Qs,Functions,Executors的内存空间
}

void App::waitForComplete(){
    // 所有executor依次进行profile
    for(int i = 0; i < reporters.size(); i++){
        char status[10];
        reporters[i]->wait_dequeue(status, 10);
        if(strcmp(status, "finished") == 0){
            char command[10] = "profile";
            controllers[i]->wait_enqueue(command, 10);
            char status[10];
            reporters[i]->wait_dequeue(status, 10);
            if(strcmp(status, "profiled") == 0){

            }else{
                std::cout << status << std::endl;
                assert(false);
            }
        }else{
            std::cout << status << std::endl;
            assert(false);
        }
    }
    // 等待进程执行结束
    for(auto executor : executors){
        int exit_status = -1;
        waitpid(executor->getPid(), &exit_status, 0); // 阻塞等待子进程结束。子进程执行结束会进入僵尸状态Z状态，直到此函数执行完毕才会彻底杀死。
        assert(WIFEXITED(exit_status) != 0);          // 子进程正常退出。
    }
}

short App::getFirstExecutorId(){
    assert(executors.size() > 0);
    return executors[0]->getId();
}

short App::getLastExecutorId(){
    assert(executors.size() > 0);
    return executors[executors.size()-1]->getId();
}

int App::getGpuExecutorCount(){
    int count = 0;
    for(int i = 0; i < executors.size(); i++)
        if(executors[i]->hasGpuFunction())
            count++;
    return count;
}

bool App::isFirstGpuExecutor(short executor_id){
    // 查看是否是gpus_map的第一个元素
    for(auto i : gpus_map){
        if(i.second.size() == 0)
            continue;
        if(executor_id == i.second[0])
            return true;
    }
    return false;
}

bool App::isLastGpuExecutor(short executor_id){
    // 已弃用
    assert(false);
}

int App::getBeforeWaitTime(short executor_id){
    int time = 0;
    for(auto i : gpus_map){
        for(auto j : i.second){
            if(j == executor_id)
                return time;
            time += 10;
        }
    }
    assert(false); // 没有对应的executor
}

int App::getAfterWaitTime(short executor_id){
    return getTotalWaitTime()-getBeforeWaitTime(executor_id);
}

int App::getTotalWaitTime(){
    int time = 0;
    for(auto i : gpus_map){
        assert(i.second.size() == 2); // 确保每个GPU上只有两个进程，一个推理进程，一个数据传输进程。
        for(auto j : i.second){
            time += 10;
        }
    }
    return time;
}

};