#include <thread>
#include <chrono>
#include <memory>
#include <iomanip>
#include <unistd.h>
#include <iostream>
#include <pthread.h>
#include <sys/time.h>
#include <sched.h>
#include "data.h"
#include "executor.h"
#include "util.h"
#include "common.h"

namespace sp{

Executor::Executor(App* app, short id, std::vector<Function*> functions, BlockingQueue<char>* receiver, BlockingQueue<char>* sender){
    this->id = id;
    this->lcore_id = id%80;
    this->gpu_id = -1;
    this->functions = functions;
    this->receiver = receiver;
    this->sender = sender;
    this->app = app;
    this->sharedMemory = nullptr;
    this->gpu_sharedMemory = nullptr;
    assert(functions.size() > 0);
    if(PROFILE){
        profile = new Profile();
        profile->bindExecutor(this);
    }else{
        profile = nullptr;
    }
    name = "";
    Python_Executor = false;
    Gpu_Executor = false;
    for(auto function : functions){
        name += function->getName() + " ";
        if(function->isPythonFunction())
            Python_Executor = true;
        if(function->isGpuFunction())
            Gpu_Executor = true;
    }
    if(name != "")
        name = name.substr(0,name.size()-1);
    // 设置文件输出对象
    if(DEBUG)
        fout.open("logs/" + std::to_string(id) + ".log");
}

Executor::Executor(App* app, short id, std::vector<Function*> functions, BlockingQueue<char>* receiver, BlockingQueue<char>* sender, bool willBindLcore){
    this->id = id;
    this->willBindLcore = willBindLcore;
    if(willBindLcore)
        this->lcore_id = id%80;
    else
        this->lcore_id = -1;
    this->functions = functions;
    this->receiver = receiver;
    this->sender = sender;
    this->app = app;
    this->sharedMemory = nullptr;
    this->gpu_sharedMemory = nullptr;
    assert(functions.size() > 0);
    if(PROFILE){
        profile = new Profile();
        profile->bindExecutor(this);
    }else{
        profile = nullptr;
    }
    name = "";
    Python_Executor = false;
    Gpu_Executor = false;
    for(auto function : functions){
        name += function->getName() + " ";
        if(function->isPythonFunction())
            Python_Executor = true;
        if(function->isGpuFunction())
            Gpu_Executor = true;
    }
    if(name != "")
        name = name.substr(0,name.size()-1);
    // 设置文件输出对象
    if(DEBUG)
        fout.open("logs/" + std::to_string(id) + ".log");
}

Executor::~Executor(){
    // 关闭文件输出句柄
    if(DEBUG)
        fout.close();
}

bool Executor::hasPythonFunction(){
    return Python_Executor;
}

bool Executor::hasGpuFunction(){
    return Gpu_Executor;
}

short Executor::getId(){
    return id;
}

pid_t Executor::getPid(){
    return pid;
}

std::string Executor::getName(){
    return name;
}

void Executor::setGpuId(short gpu_id){
    assert(Gpu_Executor == true);
    this->gpu_id = gpu_id;
    std::cout << "executor " << id <<" gpu id: " << gpu_id << std::endl;
}

void Executor::bindSharedMemoryManager(SharedMemoryManager* sharedMemory){
    this->sharedMemory = sharedMemory;
}

void Executor::bindGpuSharedMemoryManager(Gpu_SharedMemoryManager* gpu_sharedMemory){
    this->gpu_sharedMemory = gpu_sharedMemory;
}

void Executor::start(){
    /**
        开启进程，在子进程中执行这些
    **/
    pid_t pid = fork();
    if(pid){
        // 父进程
        this->pid = pid;
        // 将子进程绑定核
        if(willBindLcore){
            cpu_set_t mask;
            CPU_ZERO(&mask);            //置空
            CPU_SET(lcore_id, &mask);   //设置亲和力值
            if(sched_setaffinity(pid, sizeof(mask), &mask) == -1){
                std::cout << "warning: could not set CPU affinity, continuing..." << std::endl;
            }
        }
    }else{
        // 子进程
        // 初始化python环境
        if(Python_Executor){
            Py_Initialize();
            if(DEBUG)
                fout << "init python env." << std::endl;
        }
        if(Gpu_Executor){
            // 所有初始化
            gpu_sharedMemory->cuda_env_init(gpu_id);
            // 每个GPU上第一个进程发，其余进程收。
            if(this->app->isFirstGpuExecutor(id)){
                sleep(this->app->getBeforeWaitTime(id));
                gpu_sharedMemory->allotSharedCache();
                gpu_sharedMemory->sendSharedCache();
                sleep(this->app->getAfterWaitTime(id));
                
            }else{
                sleep(this->app->getBeforeWaitTime(id));
                gpu_sharedMemory->recvSharedCache();
                sleep(this->app->getAfterWaitTime(id));
            }
        }else{
            sleep(this->app->getTotalWaitTime());
        }
        // 进行每个function的start
        for(std::vector<Function*>::iterator it = functions.begin(); it != functions.end(); it++){
            Function* function = *it;
            function->start();
        }
        if(DEBUG)
            fout << "funcs init completes." << std::endl;
        // 告知主进程初始化完毕
        char signal[10] = "complete";
        sender->wait_enqueue(signal, 10);
        // 主进程接收到之后会发送executor指令
        char cmd[10];
        receiver->wait_dequeue(cmd, 10);
        if(strcmp(cmd, "execute") == 0){
            if(DEBUG)
                fout << "start execute." << std::endl;
            // 初始化变量
            std::vector<std::vector<Data*>> data;
            std::vector<Data*> data_out;
            // 循环处理
            while(true){
                // 所有function结束后跳出
                if(functions.size() == 0)
                    break;
                // 每次都会循环一次functions中的function
                for(std::vector<Function*>::iterator it = functions.begin(); it != functions.end();){
                    Function* function = *it;
                    // 清理
                    data.clear();
                    data_out.clear();

                    // 接收
                    // if(DEBUG)
                    //     fout << function->getName() << ": " << function->getCnt() << " before recv" << std::endl;
                    if(!function->recvFromQueues(data)){ // 如果没有接收成功,先执行下个Function
                        it++;
                        sleep(0.001);
                        continue;
                    }
                    if(DEBUG){
                        fout << function->getName() << ": " << function->getCnt() << " after recv" << std::endl;
                        fout << function->getName() << " recv size: " << data.size() << std::endl;
                        for(int i = 0; i < data.size(); i++){
                            for(int j = 0; j < data[i].size(); j++){
                                fout << data[i][j] << " ";
                            }
                            fout << std::endl;
                        }
                    }
                    // 如果不是头Function且接收的size为0就不进行function的处理，否则进行最后一批处理
                    if(!function->isHeadFunction() && data.size() == 0){
                        function->finish();
                        delete function;
                        it = functions.erase(it);
                        continue;
                    }

                    // 处理
                    if(DEBUG)
                        fout << function->getName() << ": " << function->getCnt() << " before handle" << std::endl;
                    function->handle(data, data_out);
                    if(DEBUG)
                        fout << function->getName() << ": " << function->getCnt() << " after handle" << std::endl;

                    // HeadFunction，判断退出。
                    if(function->isHeadFunction() && data_out.size() != 0 && data_out[0] == DATA_END){
                        function->finish();
                        delete function;
                        it = functions.erase(it);
                        continue;
                    }

                    // 其他Funtion，判断退出。
                    if(function->isEnd()){
                        function->finish();
                        delete function;
                        it = functions.erase(it);
                        continue;
                    }

                    // 发送
                    if(DEBUG){
                        fout << function->getName() << ": " << function->getCnt() << " before send" << std::endl;
                        fout << function->getName() << " send size: " << data_out.size() << std::endl;
                        for(int i = 0; i < data_out.size(); i++){
                            fout << data_out[i] << std::endl;
                        }
                    }
                    function->sendToQueues(data_out);
                    if(DEBUG)
                        fout << function->getName() << ": " << function->getCnt() << " after send" << std::endl;
                    it++;
                }
            }
        }
        if(DEBUG)
            fout << "execute ended." << std::endl;
        // 告知主进程执行完毕
        char status[10] = "finished";
        sender->wait_enqueue(status, 10);
        // 等待接收profile命令
        receiver->wait_dequeue(cmd, 10);
        if(strcmp(cmd, "profile") == 0){
            // 每个进程做一次profile
            if(profile){
                profile->profile();
            }
            // 告知主进程profile完毕
            strcpy(status, "profiled");
            sender->wait_enqueue(status, 10);
        }else{
            std::cout << cmd << std::endl;
            assert(false);
        }
        // 回收资源
        if(Gpu_Executor && app->isFirstGpuExecutor(id))
            gpu_sharedMemory->freeSharedCache();
        // 结束Python环境
        if(Gpu_Executor)
            Py_Finalize();
        std::cout << name << " exit." << std::endl;
        exit(1); // 正常退出
    }
}

void Executor::addFunction(Function* function){
    functions.push_back(function);
}

void Executor::delFunction(std::string function_name){
    for(std::vector<Function*>::iterator it = functions.begin(); it != functions.end(); it++){
        if((*it)->getName() == function_name){
            delete (*it);
            functions.erase(it);
            break;
        }
    }
}

char* Executor::malloc(uint8_t type){
    if(sharedMemory == nullptr){
        assert(false);
    }else{
        char* addr = sharedMemory->malloc(type);
        sharedMemory->writeToFile("malloc", name + ": " + std::to_string(id), (void*)addr);
        return addr;
    }
}

char* Executor::malloc(int width, int height){
    uint8_t type;
    char* addr;
    if(sharedMemory == nullptr){
        assert(false);
    }else if(width <= 960 && height <= 540){
        type = Cv_Mat_Data_960_540_Memory;
        addr = sharedMemory->malloc(Cv_Mat_Data_960_540_Memory);
    }else if(width <= 1280 && height <= 720){
        type = Cv_Mat_Data_1280_720_Memory;
        addr = sharedMemory->malloc(Cv_Mat_Data_1280_720_Memory);
    }else if(width <= 1920 && height <= 1080){
        type = Cv_Mat_Data_1920_1080_Memory;
        addr = sharedMemory->malloc(Cv_Mat_Data_1920_1080_Memory);
    }else if(width <= 3840 && height <= 2160){
        type = Cv_Mat_Data_3840_2160_Memory;
        addr = sharedMemory->malloc(Cv_Mat_Data_3840_2160_Memory);
    }else{
        std::cout << "executor malloc error: " << width << " " << height << std::endl;
        assert(false);
    }
    sharedMemory->writeToFile("malloc", name + ": " + std::to_string(id), (void*)addr);
    return addr;
}

char* Executor::malloc(size_t size){
    if(sharedMemory == nullptr){
        assert(false);
    }else{
        char* addr = sharedMemory->malloc(size);
        sharedMemory->writeToFile("malloc", name + ": " + std::to_string(id), (void*)addr);
        return addr;
    }
}

void Executor::free(char* ptr){
    if(sharedMemory != nullptr){
        sharedMemory->writeToFile("free", name + ": " + std::to_string(id), (void*)ptr);
        sharedMemory->free(ptr);
    }else{
        assert(false);
    }
}

void Executor::freeAll(Data* ptr){
    if(sharedMemory != nullptr){
        if(ptr->type == HEAD){
            // 遍历HEAD下面的每一个节点，最后再free自身。
            Data* p = ptr->next;
            while(p){
                freeAll((Data*)p);
                p = p->next;
            }
            free((char*)ptr);
        }else if(ptr->type == BATCH_TENSOR){
            // 先将tensor_head中的节点free掉，最后再free自身。
            if(ptr->context.batch_tensor.data != nullptr)
                free((char*)ptr->context.batch_tensor.data);
            if(ptr->context.batch_tensor.tensor_head != nullptr)
                freeAll((Data*)(ptr->context.batch_tensor.tensor_head));
            free((char*)ptr);
        }else if(ptr->type == BATCH_GPU_TENSOR){
            // 先将tensor_head中的节点free掉，最后再free自身。
            if(ptr->context.batch_gpu_tensor.tensor_head != nullptr)
                freeAll((Data*)(ptr->context.batch_gpu_tensor.tensor_head));
            // XXX: 如果BATCH_GPU_TENSOR中引入其他指针需要修改此处
            free((char*)ptr); 
        }else if(ptr->type == MAT){
            if(ptr->context.mat.data != nullptr)
                free((char*)(ptr->context.mat.data));
            free((char*)ptr);
        }else if(ptr->type == RECT){
            free((char*)ptr);
        }else if(ptr->type == TENSOR){
            if(ptr->context.tensor.data != nullptr)
                free((char*)(ptr->context.tensor.data));
            free((char*)ptr);
        }else if(ptr->type == GPU_TENSOR){
            // XXX: 如果GPU_TENSOR中引入其他指针需要修改此处
            free((char*)ptr);
        }else if(ptr->type == STRING){
            free((char*)ptr);
        }else{
            assert(false);
        }
    }else{
        assert(false);
    }
}

Profile* Executor::getProfile(){
    if(profile){
        return profile;
    }else{
        return nullptr;
    }
}
                       
App* Executor::getApp(){
    return app;
}

short Executor::getStreamId(size_t pos){               // 获取streamId
    if(gpu_sharedMemory == nullptr)
        assert(false);
    else
        return gpu_sharedMemory->getStreamId(pos);
}

size_t Executor::getDeviceMemoryPos(short stream_id){  // 获取stream显存起始地址
    if(gpu_sharedMemory == nullptr)
        assert(false);
    else
        return gpu_sharedMemory->getPos(stream_id);
}

size_t Executor::getDeviceMemorySize(short stream_id){ // 获取stream大小
    if(gpu_sharedMemory == nullptr)
        assert(false);
    else
        return gpu_sharedMemory->getSize(stream_id);
}

short Executor::mallocDeviceMemory(){                  // 获取一个空闲的stream，= -1说明没有malloc成功
    if(gpu_sharedMemory == nullptr)
        assert(false);
    else
        return gpu_sharedMemory->malloc(gpu_id);             
}

void Executor::freeDeviceMemory(short stream_id){      // 释放掉此stream
    if(gpu_sharedMemory == nullptr)
        assert(false);
    else
        gpu_sharedMemory->free(gpu_id, stream_id);
}

void Executor::check(){
    for(auto function : functions){
        function->check();
    }
}

void Executor::printMsg(){
    for(auto function : functions){
        function->printCurrentTypeMsg();
    }
}

};