#include <iostream>
#include <fstream>
#include <sstream>
#include <cassert>
#include <algorithm>
#include <unordered_map>
#include <set>
#include <queue>
#include "doca_preprocess_unit.h"
#include "doca_brpc_unit.h"
#include "common_unit.h"
#include "dmatest.pb.h"
#include "debug_unit.h"
#include "pipeline_unit.h"

class DAGBuilder { //由于所有操作都在图上，这个class也相当于一个调度器，搞成一个单例吧，方便brpc的时候找
public:
    class PromiseWrapper {
    public:
        PromiseWrapper() : promise_(make_unique<std::promise<int>>()) {}
            std::future<int> GetFuture() {
            return promise_->get_future();
        }
        void SetValue(int value) {
            promise_->set_value(value);
        }
        void Reset() {
            promise_ = make_unique<std::promise<int>>();
        }

    private:
        std::unique_ptr<std::promise<int>> promise_;
    };
public:
    PromiseWrapper promise_wrappers_[DEVICE_TYPE_NUM]; //用于查看某个task是否完成

public:
    std::map<std::string, std::shared_ptr<MemoryNode>> memory_nodes; //名字对应一个memory node
    std::vector<std::shared_ptr<OperatorNode>> operator_nodes;
    std::vector<size_t> memory_offsets; // 用来统计分配内存给memory_node的offset
    // TODO:更有规则的分配内存，现在暂时直接memory_node 的id，就对应memory_offsets
    int memory_node_counter = 0;
    int operator_node_counter = 0;
    int total_batch_num = 0;
    int total_feature_num = 0;
    int init_ele_num = 1;
    void * memory_region_ptr; // 一开始分配一块巨大内存
    device_type_t dispatch_device_type; // 当前调度器位于哪个device上面

    // 新增：为每个逻辑名称记录版本号
    std::unordered_map<std::string, int> name_version_map;

    // all other utils
    DpaPreProcessUnit * dpaprocessunit; // 分配operator node给DPA处理
    CpuPreProcessUnit * cpuprocessunit; // 分配给CPU、ARM处理（是否可以用同一套代码？）
    BrpcUnit * brpcunit; // cpu，arm之间通信用。
    std::unique_ptr<PipelineUnit> pipelineunit; // for pipeline

    // task_assign[device_type][layer_id] 表示该device在这一层应该执行哪些任务。
    std::vector< std::vector<std::shared_ptr<OperatorNode> > > task_assign[DEVICE_TYPE_NUM];
    //sequence execute list 
    std::vector<std::shared_ptr<OperatorNode> > sequencial_list;

    // 每一个处理一个device。
    std::thread worker_thread[DEVICE_TYPE_NUM];
    std::atomic_int32_t execute_layer[DEVICE_TYPE_NUM];
    bool exit_flag[DEVICE_TYPE_NUM];

    static DAGBuilder & Instance() { //单例函数
        static DAGBuilder instance;
        return instance;
    }

    /* 申请一大段内存，不光用于doca dma，而且用于dpa一起使用*/
    void set_dispatch_type(device_type_t cur_device_type) {this->dispatch_device_type = cur_device_type;}
    void set_feature_num(int feature_num) {total_feature_num = feature_num;}
    void set_batch_num(int batch_num) {total_batch_num = batch_num;}
    void set_ele_num(int ele_num) {init_ele_num = ele_num;}

    /* 计算每个mem-node的size信息，得到memory_offset*/
    void memory_assign();

    // 当作为调度器所在device，作为client端
    void client_init();

    /* cpu作为server接收远程init*/
    void server_init(const rap::DmaUnitInit * req, rap::DmaUnitResponse * resp);

    /*处理远端发来的task*/
    void server_task_execute(std::vector<int> & op_node_list);

    /* for cpu->arm , just send id */
    void server_task_execute(int layer_id);

    /* cpu 预处理算子初始化*/
    void ProcessUnitInit();

    /* dpa 预处理算子初始化*/
    void dpaProcessUnitInit();

    /* brpc单元初始化+memory offset交给DMA内存malloc */
    void memory_region_init();
    
    /* task_assign[device-type][layer-id]给每个设备 每一层 分配好需要执行的操作节点 */
    /* 每一层的操作必须全部执行完，才执行下一层操作节点 */
    /* 以下三个函数，都按照task_assign[device-type][layer-id] 执行*/

    /* arm核的，直接串行执行*/
    void worker_arm();

    /* cpu，得把input-mem dma打包发过去
       将op-node id-list brpc传输给CPU执行
    */
    void worker_cpu();

    /* DPA 串行执行*/
    void worker_dpa();

    /* 朴素的dag图的分配执行，具体分配到哪个device在memory assign就指定了。*/
    void task_dispatch();

    /* 给一个task_assign的策略（task_assign vector) */
    /* 把每一层执行哪些节点预先分配，把每一层每个节点分配给哪个设备预先分配。*/
    void task_assign_simple(int ArmRatio=10, int CpuRatio=0, int DpaRatio=0);

    /* sync layer_id 3-device execute */
    void task_dispatch_layer(int layer_id);

    /* 按照assign-simple的方案执行，用promise-future同步设备执行情况 */
    void task_dispatch_parallel();

    /* 回收每个设备的线程 */
    void finish_thread();

    void start_thread();
    
    // 输出所有feature的值
    void feature_extract();

    /* 从初始化到调度整个流程 */
    void main(const std::string & filename, int feature_num, int batch_num, int ele_num, device_type_t DeviceType);

    /* 普通的pipeline分图方案 */
    void pipeline_dispatch(
        std::vector<int> & stage_thread_num,
        std::vector<std::vector<std::shared_ptr<OperatorNode> > > & subdag_list, 
        std::vector<device_type_t> & device_type_list,
        std::vector<stage_type_t> & stage_type_list,
        int & if_cross_device
    );

    /* 只包含一个stage的cpu baseline版本 */
    void pipeline_dispatch_baseline(
        std::vector<int> & stage_thread_num,
        std::vector<std::vector<std::shared_ptr<OperatorNode> > > & subdag_list, 
        std::vector<device_type_t> & device_type_list,
        std::vector<stage_type_t> & stage_type_list,
        int & if_cross_device
    );

    void pipeline_init(const std::string & filename, int feature_num, int batch_num, int ele_num, int if_cross_device);

    void pipeline_main(const std::string & filename, int feature_num, int batch_num, int ele_num, int if_cross_device);

    /* 从文件建图 */
    void build_from_file(const std::string& filename);

    /* 画图 */
    void export_to_dot(const std::string& filename);

    /* 初始化远端kvstore，建立brpc联系*/
    void Init_Kvstore();

    void sequence_assign();

    void sequence_execute();

    void Memory_Init();


private:
    std::shared_ptr<MemoryNode> get_or_create_memory(const std::string& name) {
        if (memory_nodes.count(name)) return memory_nodes[name];

        auto mem = std::make_shared<MemoryNode>();
        mem->name = name;
        mem->id = memory_node_counter++;

        mem->feature_start_id = 0;
        mem->feature_end_id = 1;
        
        mem->batch_start_id = 0;
        mem->batch_end_id = total_batch_num;

        mem->element_per_feature = init_ele_num;

        memory_nodes[name] = mem;
        return mem;
    }

    // 获取当前最新版本（不递增）
    std::string get_latest_versioned_name(const std::string& base) {
        int version = name_version_map[base];
        if (version == 0) {
            // 如果还没出现过，默认创建 #0
            name_version_map[base] = 1;
            return base + "#0";
        }
        return base + "#" + std::to_string(version - 1);
    }

    // 创建新版本（递增）
    std::string create_new_versioned_name(const std::string& base) {
        int version = name_version_map[base]++;
        return base + "#" + std::to_string(version);
    }

    size_t get_type_size(const std::string& dtype) {
        if (dtype == "double") return 8;
        if (dtype == "float32") return 4;
        if (dtype == "float64") return 8;
        if (dtype == "float") return 4;
        if (dtype == "int64") return 8;
        if (dtype == "int32") return 4;
        if (dtype == "int8") return 1;
        return 4;
    }
};

void DAGBuilder::memory_assign() {
    std::srand(std::time(nullptr));
    std::queue<std::shared_ptr<OperatorNode>> DagQueue; // 拓扑排序

    std::vector<int> rd; //入度
    rd.resize(operator_node_counter);

    for (auto operator_node : operator_nodes) {
        rd[operator_node->id] = (int)operator_node->prev_ops.size();
        if (rd[operator_node->id] == 0) {
            DagQueue.push(operator_node);
        }
    }

    while (!DagQueue.empty()) {
        auto operator_node = DagQueue.front();
        DagQueue.pop();
        for (auto neighbor : (operator_node->next_ops) ) {
            --rd[neighbor->id];
            if (rd[neighbor->id] == 0) {
                DagQueue.push(neighbor);
            }
        }

        operator_node->device_type = device_type_t::DEVICE_ARM;
        // operator_node->device_type = device_type_t::DEVICE_CPU; //暂时都放到cpu测试正确性。
        // operator_node->device_type = device_type_t::DEVICE_DPA; //暂时都放到dpa测试正确性。
        // operator_node->device_type = static_cast<device_type_t>(std::rand() % DEVICE_TYPE_NUM); //暂时都放到dpa测试正确性。

        auto mem_node = operator_node->next_mem_nodes[0]; //有且只有一个
        // 将前后memory的类型全部确定下来
        CommonUtils::memory_dtype_assign(operator_node);
        for (auto prev_mem_node : operator_node->prev_mem_nodes) {//TODO: 后续可能有多元操作，包括batch的分配, dtype
            mem_node->element_per_feature = 
                CommonUtils::calc_ele(prev_mem_node->element_per_feature, operator_node->op_type);
        }
    }

    std::vector<size_t> tmp_mem_node;
    tmp_mem_node.resize(memory_node_counter);

    for (auto& [name, mem] : memory_nodes) {
        mem->total_size = (mem->feature_end_id - mem->feature_start_id) *
                            (mem->batch_end_id - mem->batch_start_id) *
                            mem->element_per_feature *
                            get_type_size(mem->dtype);

        if ( (int)mem->prev_opt_nodes.size() > 0 && mem->prev_opt_nodes[0]->op_type == "embedding_fetch") {
            mem->total_size *= EMBEDDING_DIM;
        }
        tmp_mem_node[mem->id] = mem->total_size;
    }


    size_t total_used_space = 0;
    for (int i = 0; i < memory_node_counter; ++i) { //更新offsets信息，id对应offsets[id]
        memory_offsets.push_back(total_used_space);
        total_used_space += tmp_mem_node[i];
    } 
    memory_offsets.push_back(total_used_space); // add last one
}

// 当作为调度器所在device，作为client端
void DAGBuilder::client_init() {
    brpcunit->InitBrpc();
    brpcunit->SendDmaInfo();
}

/* cpu作为server接收远程init*/
void DAGBuilder::server_init(const rap::DmaUnitInit * req, rap::DmaUnitResponse * resp) {
    brpcunit->export_info(req, resp);
}

/*处理远端发来的task*/
void DAGBuilder::server_task_execute(std::vector<int> & op_node_list) {
    std::vector<std::shared_ptr<OperatorNode> > op_dispatch_node_list;
    std::vector<int> mem_dispach_list; // 要发哪些部分的内存过去
            
    for (auto & op_node_id : op_node_list) {
        auto operator_node = this->operator_nodes[op_node_id];
        op_dispatch_node_list.push_back(operator_node);

        for (auto mem_node : operator_node->next_mem_nodes) { //这里有不同，cpu server侧是发送next_mem_nodes
            mem_dispach_list.push_back(mem_node->id);
        }
    }

    std::string task_execute_str = 
        this->operator_nodes[op_node_list[0]]->op_type 
        + std::to_string(this->operator_nodes[op_node_list[0]]->id);

    xmh::Timer task_execute_time(task_execute_str);
    cpuprocessunit->task_execute(op_dispatch_node_list);
    task_execute_time.end();

    xmh::Timer task_dma_time(task_execute_str + "dma");
    brpcunit->task_dispatch_mem(mem_dispach_list); //发往client arm
    task_dma_time.end();
}

/* 当CPU作为调度中心，ARM去执行传输过来的任务的专用函数 */
void DAGBuilder::server_task_execute(int layer_id) {
    std::vector<std::shared_ptr<OperatorNode> > cpu_task_node_list;
    std::vector<std::shared_ptr<OperatorNode> > dpa_task_node_list;
    std::vector<int> mem_dispach_list; // 要发哪些部分的内存过去

    this->task_dispatch_layer(layer_id);
            
    for (auto & operator_node : task_assign[device_type_t::DEVICE_CPU][layer_id]) {
        /* if next ops on same device, no need to dma transfer */
        bool if_need_transfer_to_cpu = false;
        if ( (int)operator_node->next_ops.size() == 0) if_need_transfer_to_cpu = true;
        for (auto next_op_node : operator_node->next_ops) {
            if (next_op_node->device_type == device_type_t::DEVICE_ARM) {
                if_need_transfer_to_cpu = true;
                break;
            }
        }
        if (if_need_transfer_to_cpu == false) continue;
        /* if next ops on same device, no need to dma transfer */

        for (auto mem_node : operator_node->next_mem_nodes) { //这里有不同，cpu server侧是发送next_mem_nodes
            // TODO: if operator_node->prev_ops[0]->deivce_type == operator_node->device_type, continue;
            mem_dispach_list.push_back(mem_node->id);
        }
    }

    for (auto & operator_node : task_assign[device_type_t::DEVICE_DPA][layer_id]) {

        bool if_need_transfer_to_cpu = false;
        if ( (int)operator_node->next_ops.size() == 0) if_need_transfer_to_cpu = true;
        for (auto next_op_node : operator_node->next_ops) {
            if (next_op_node->device_type == device_type_t::DEVICE_ARM) {
                if_need_transfer_to_cpu = true;
                break;
            }
        }
        if (if_need_transfer_to_cpu == false) continue;

        for (auto mem_node : operator_node->next_mem_nodes) {
            mem_dispach_list.push_back(mem_node->id);
        }
    }

    xmh::Timer task_dma_time("layer_" + std::to_string(layer_id) + "_dma_time");
    brpcunit->task_dispatch_mem(mem_dispach_list); //发往client arm
    task_dma_time.end();
}

void DAGBuilder::sequence_assign(){
    std::queue<std::shared_ptr<OperatorNode>> DagQueue; // 拓扑排序

    std::vector<int> rd; //入度
    rd.resize(operator_node_counter);

    for (auto operator_node : operator_nodes) {
        rd[operator_node->id] = (int)operator_node->prev_ops.size();
        if (rd[operator_node->id] == 0) {
            DagQueue.push(operator_node);
        }
        // for debug
        DebugUnit::mem_init(operator_node->prev_mem_nodes[0]);
        // for debug
    }

    while (!DagQueue.empty()) {
        auto operator_node = DagQueue.front();
        DagQueue.pop();
        for (auto neighbor : (operator_node->next_ops) ) {
            --rd[neighbor->id];
            if (rd[neighbor->id] == 0) {
                DagQueue.push(neighbor);
            }
        }
        sequencial_list.push_back(operator_node);
    }
}

void DAGBuilder::sequence_execute() {
    cpuprocessunit->task_execute(sequencial_list);
}

void DAGBuilder::ProcessUnitInit() {
    // cpu
    cpuprocessunit = new CpuPreProcessUnit();
    cpuprocessunit->thread_init(1);
}

void DAGBuilder::dpaProcessUnitInit() {
    //dpa
    dpaprocessunit = new DpaPreProcessUnit();
    // for dpa to register
    dpaprocessunit->register_host_memory(memory_region_ptr, memory_offsets[(int)memory_offsets.size()-1]);
    dpaprocessunit->thread_init(16); //到这里才能init
}

void DAGBuilder::memory_region_init() { // 通过doca注册内存，初始化DPA的数组等等。
    /* if using dma */
#ifdef USING_BRPC
    brpcunit = new BrpcUnit();
    brpcunit->InitDmaUnit(memory_offsets);
    memory_region_ptr = brpcunit->get_ptr();
    /* if using dma */

#else
    /* if not using dma*/
    memory_region_ptr = CommonUtils::prepare_host_memory(memory_offsets[(int)memory_offsets.size() - 1]);
    /* if not using dma*/
#endif
    for (auto& [name, mem] : memory_nodes) {
        mem->storage_ptr = static_cast<void*>(static_cast<char*>(memory_region_ptr) + memory_offsets[mem->id]);
    }
}

void DAGBuilder::task_dispatch() { // 遍历dag图，分配任务到每个节点。
    std::queue<std::shared_ptr<OperatorNode>> DagQueue; // 拓扑排序

    std::vector<int> rd; //入度
    rd.resize(operator_node_counter);

    for (auto operator_node : operator_nodes) {
        rd[operator_node->id] = (int)operator_node->prev_ops.size();
        if (rd[operator_node->id] == 0) {
            DagQueue.push(operator_node);
        }
        // for debug
        DebugUnit::mem_init(operator_node->prev_mem_nodes[0]);
        // for debug
    }

    while (!DagQueue.empty()) {
        auto operator_node = DagQueue.front();
        DagQueue.pop();
        for (auto neighbor : (operator_node->next_ops) ) {
            --rd[neighbor->id];
            if (rd[neighbor->id] == 0) {
                DagQueue.push(neighbor);
            }
        }

        std::string cur_op_name = operator_node->op_type + 
            std::to_string(operator_node->id) + "_on_" + DebugUnit::device_type_str[operator_node->device_type];
        xmh::Timer operator_timer(cur_op_name);

        std::vector<std::shared_ptr<OperatorNode> > op_dispatch_node_list; // 要发哪些操作到CPU/DPU。
        op_dispatch_node_list.push_back(operator_node);

        if (operator_node->device_type == device_type_t::DEVICE_ARM) {//本机执行，给CpuPreprocessUnit
            cpuprocessunit->task_execute(op_dispatch_node_list);
        } 
        else if (operator_node->device_type == device_type_t::DEVICE_DPA) {//本机执行，给DpaPreprocessUnit
            dpaprocessunit->task_execute(op_dispatch_node_list);
        } 
        else { //用brpcunit发给CPU
            std::vector<int> mem_dispach_list; // 要发哪些部分的内存过去
            for (auto mem_node : operator_node->prev_mem_nodes) {
                mem_dispach_list.push_back(mem_node->id);
            }

            xmh::Timer dma_timer(cur_op_name+"_dma");
            brpcunit->task_dispatch_mem(mem_dispach_list); // 先发过去需要dma的内存部分，这里包含poll wait。
            dma_timer.end();

            // 传输operator_node 对应的 id
            std::vector<int> op_dispatch_id_list;
            op_dispatch_id_list.push_back(operator_node->id);
            brpcunit->task_dispatch_op(op_dispatch_id_list);
        }

        operator_timer.end();

        // debug：输出每个操作前和操作后的值
        // DebugUnit::operator_debug(operator_node);
    }
}

void DAGBuilder::worker_arm() {
    timespec ts = {
        .tv_sec = 0,
        .tv_nsec = SLEEP_IN_NANOS,
    };

    std::string device_str = "_arm";
    device_type_t device_type = device_type_t::DEVICE_ARM;

    LOG(INFO) << "Start RunningArmWorker";

    while(!exit_flag[device_type]) {
        int layer = execute_layer[device_type].load();
        if (layer >= 0) {
            xmh::Timer layer_time("layer_" + std::to_string(layer) + device_str);

        #if defined(__aarch64__) // task from cpu->arm, for arm's task, do nothing
            if (this->dispatch_device_type == device_type_t::DEVICE_CPU) {
                layer_time.end();
                execute_layer[device_type] = -1;
                promise_wrappers_[device_type].SetValue(1);
                nanosleep(&ts, &ts);
                continue;
            }
        #endif
            if ( (int)task_assign[device_type][layer].size() > 0) {
                cpuprocessunit->task_execute(task_assign[device_type][layer]);
            }
            layer_time.end();
            execute_layer[device_type] = -1;
            promise_wrappers_[device_type].SetValue(1);
        }
        nanosleep(&ts, &ts);
    }
}

void DAGBuilder::worker_cpu() {
    timespec ts = {
        .tv_sec = 0,
        .tv_nsec = SLEEP_IN_NANOS,
    };
    LOG(INFO) << "Start RunningCpuWorker";
    device_type_t device_type = device_type_t::DEVICE_CPU;

    while(!exit_flag[device_type]) {
        int layer = execute_layer[device_type].load();

        if (layer >= 0) {
            xmh::Timer layer_time("layer_" + std::to_string(layer) + "_cpu");
            #if defined(__aarch64__) // task from cpu->arm, execute task
                if (this->dispatch_device_type == device_type_t::DEVICE_CPU) {
                    if ( (int)task_assign[device_type][layer].size() > 0) {
                        cpuprocessunit->task_execute(task_assign[device_type][layer]);
                    }
                    layer_time.end();
                    execute_layer[device_type] = -1;
                    promise_wrappers_[device_type].SetValue(1);
                    nanosleep(&ts, &ts);
                    continue;
                }
            #endif


            std::vector<int> mem_dispach_list; // 要发哪些部分的内存过去(dma)
            std::vector<int> op_dispatch_id_list; // 要发哪些操作过去(brpc)

            for (auto & operator_node : task_assign[device_type][layer]) {

                op_dispatch_id_list.push_back(operator_node->id);

                /* if prev ops on same device, no need to dma transfer */
                bool if_need_transfer = false;
                if ( (int)operator_node->prev_ops.size() == 0) if_need_transfer = true;
                for (auto prev_op_node : operator_node->prev_ops) {
                    if (prev_op_node->device_type == device_type_t::DEVICE_ARM) {
                        if_need_transfer = true;
                        break;
                    }
                    if (this->dispatch_device_type == device_type_t::DEVICE_ARM) {
                        if (prev_op_node->device_type == device_type_t::DEVICE_DPA) {
                            if_need_transfer = true;
                            break;
                        }
                    }
                }
                if (if_need_transfer == false) continue;
                /* if prev ops on same device, no need to dma transfer */

                for (auto mem_node : operator_node->prev_mem_nodes) {
                    mem_dispach_list.push_back(mem_node->id);
                }
            }

            #if defined(__x86_64__) // if dispatch from cpu, must send dpa too.
            // 能运行到这里的，只可能是CPU为调度中心，给ARM分配任务
            for (auto & operator_node : task_assign[device_type_t::DEVICE_DPA][layer]) {
                
                op_dispatch_id_list.push_back(operator_node->id);

                /* if prev ops on same device, no need to dma transfer */
                bool if_need_transfer = false;
                if ( (int)operator_node->prev_ops.size() == 0) if_need_transfer = true;
                for (auto prev_op_node : operator_node->prev_ops) {
                    if (prev_op_node->device_type == device_type_t::DEVICE_ARM) {
                        if_need_transfer = true;
                        break;
                    }
                }
                if (if_need_transfer == false) continue;
                /* if prev ops on same device, no need to dma transfer */

                for (auto mem_node : operator_node->prev_mem_nodes) {
                    mem_dispach_list.push_back(mem_node->id);
                }
            }
            #endif

            if ( (int)task_assign[device_type][layer].size() > 0 
            #if defined(__x86_64__)
                || (int)task_assign[device_type_t::DEVICE_DPA][layer].size() > 0
            #endif
            ){
                // 去重
                std::unordered_set<int> unique_elements(mem_dispach_list.begin(), mem_dispach_list.end());
                mem_dispach_list.assign(unique_elements.begin(), unique_elements.end());
                // 去重结束
                brpcunit->task_dispatch_mem(mem_dispach_list); // 先发过去需要dma的内存部分，这里包含poll wait
                brpcunit->task_dispatch_op(op_dispatch_id_list, layer);
            }
            layer_time.end();

            execute_layer[device_type] = -1;
            promise_wrappers_[device_type].SetValue(1);
        }
        nanosleep(&ts, &ts);
    }
}

void DAGBuilder::worker_dpa() {
    timespec ts = {
        .tv_sec = 0,
        .tv_nsec = SLEEP_IN_NANOS,
    };
    LOG(INFO) << "Start RunningDpaWorker";
    while(!exit_flag[device_type_t::DEVICE_DPA]) {
        int layer = execute_layer[device_type_t::DEVICE_DPA].load();
        if (layer >= 0) {
            xmh::Timer layer_time("layer_" + std::to_string(layer) + "_dpa");
        #if defined(__aarch64__)
            if ( (int)task_assign[device_type_t::DEVICE_DPA][layer].size() > 0) {
                dpaprocessunit->task_execute(task_assign[device_type_t::DEVICE_DPA][layer]);
            }
        #endif
            layer_time.end();
            execute_layer[device_type_t::DEVICE_DPA] = -1;
            promise_wrappers_[device_type_t::DEVICE_DPA].SetValue(1);
        }
        nanosleep(&ts, &ts);
    }
}

// 把每一层执行哪些节点预先分配，把每一层每个节点分配给哪个设备预先分配。
void DAGBuilder::task_assign_simple(int ArmRatio, int CpuRatio, int DpaRatio) {
    // init layer num
    task_assign[device_type_t::DEVICE_ARM].resize(MAX_LAYER_NUM);
    task_assign[device_type_t::DEVICE_CPU].resize(MAX_LAYER_NUM);
    task_assign[device_type_t::DEVICE_DPA].resize(MAX_LAYER_NUM);

    std::queue<std::shared_ptr<OperatorNode>> DagQueue; // 拓扑排序

    std::vector<int> rd; //入度
    rd.resize(operator_node_counter);

    for (auto operator_node : operator_nodes) {
        rd[operator_node->id] = (int)operator_node->prev_ops.size();
        if (rd[operator_node->id] == 0) {
            DagQueue.push(operator_node);
        }
    }

    // 这里把每个节点分配到某个device上。
    int layer_num = -1;
    std::vector<int> device_assign_port;
    device_assign_port.resize(DEVICE_TYPE_NUM);
    int total_device_port = 0;
    device_assign_port[device_type_t::DEVICE_ARM] = ArmRatio;
    device_assign_port[device_type_t::DEVICE_CPU] = CpuRatio;
    device_assign_port[device_type_t::DEVICE_DPA] = DpaRatio;
    for (auto & assign_port : device_assign_port) {
        total_device_port += assign_port;
    }

    while (!DagQueue.empty()) {
        ++ layer_num;
        std::vector<std::shared_ptr<OperatorNode> > layer_task_assign;

        while (!DagQueue.empty()) {
            auto operator_node = DagQueue.front();
            layer_task_assign.push_back(operator_node);
            DagQueue.pop();
        }

        int layer_task_num = (int)layer_task_assign.size();
        // 按照10:2:1分给ARM:DPA:CPU
        std::vector<int> layer_task;
        layer_task.resize(DEVICE_TYPE_NUM);
        layer_task[device_type_t::DEVICE_CPU] = layer_task_num * device_assign_port[device_type_t::DEVICE_CPU] / total_device_port;
        layer_task[device_type_t::DEVICE_DPA] = layer_task_num * device_assign_port[device_type_t::DEVICE_DPA] / total_device_port;
        layer_task[device_type_t::DEVICE_ARM] = layer_task_num - layer_task[device_type_t::DEVICE_CPU] - layer_task[device_type_t::DEVICE_DPA];
        
        task_assign[device_type_t::DEVICE_CPU][layer_num].reserve(layer_task[device_type_t::DEVICE_CPU]);
        task_assign[device_type_t::DEVICE_DPA][layer_num].reserve(layer_task[device_type_t::DEVICE_DPA]);
        task_assign[device_type_t::DEVICE_ARM][layer_num].reserve(layer_task[device_type_t::DEVICE_ARM]);

        int task_assign_id = 0;
        int task_assign_cnt = layer_task[task_assign_id];
        
        for (int i = 0; i < layer_task_num; ++i) {
            if (i < task_assign_cnt) {
                task_assign[task_assign_id][layer_num].push_back(layer_task_assign[i]);
                layer_task_assign[i]->device_type = static_cast<device_type_t>(task_assign_id);
            }
            else {
                task_assign_id ++;
                task_assign_cnt += layer_task[task_assign_id];
                --i;
            }
        }

        for (auto & operator_node : layer_task_assign) {
            for (auto & neighbor : (operator_node->next_ops) ) {
                --rd[neighbor->id];
                if (rd[neighbor->id] == 0) {
                    DagQueue.push(neighbor);
                }
            }
        }
    }

    // init dpa task
#if defined(__aarch64__)
    for (int i = 0; i < MAX_LAYER_NUM; ++i) {
        dpaprocessunit->init_dpa_thread_info(task_assign[device_type_t::DEVICE_DPA][i]);
    }
#endif

    for (int i = 0; i < DEVICE_TYPE_NUM; ++i) {
        exit_flag[i] = false;
        execute_layer[i] = -1;
    }
}

void DAGBuilder::task_dispatch_layer(int layer_id) {
    xmh::Timer layer_time("layer_" + std::to_string(layer_id));
    for (int j = 0; j < DEVICE_TYPE_NUM; ++j) {
        promise_wrappers_[j].Reset();
        execute_layer[j] = layer_id;
    }

    for (int j = 0; j < DEVICE_TYPE_NUM; ++j) {
        auto cur_future = promise_wrappers_[j].GetFuture();
        cur_future.get();
    }
    layer_time.end();
}

void DAGBuilder::task_dispatch_parallel() { // 遍历dag图，分配任务到每个节点。
    int total_layer_num = (int)task_assign[0].size();
    for (int round = 0; round < 1; ++ round) {
        for (int i = 0; i < total_layer_num - 5; ++i) {
            task_dispatch_layer(i);
        }
    }
}

void DAGBuilder::Memory_Init() {
    for (int i = 0; i < DEVICE_TYPE_NUM; ++i) {
        // for debug
        for (auto operator_node : task_assign[i][0]) {
            DebugUnit::mem_init(operator_node->prev_mem_nodes[0]);
        }
        // for debug
    }
}

void DAGBuilder::start_thread() {
    worker_thread[device_type_t::DEVICE_ARM] = std::thread([this]() {this->worker_arm();} );
    worker_thread[device_type_t::DEVICE_DPA] = std::thread([this]() {this->worker_dpa();} );
    worker_thread[device_type_t::DEVICE_CPU] = std::thread([this]() {this->worker_cpu();} );
}

void DAGBuilder::finish_thread() {
    for (int i = 0; i < DEVICE_TYPE_NUM; ++i) {
        exit_flag[i] = true;
        worker_thread[i].join();
    }
}

// 输出所有feature的值
void DAGBuilder::feature_extract() {
    for (auto & operator_node : operator_nodes) {
        if ( (int)operator_node->next_ops.size() == 0) {
            DebugUnit::mem_debug(operator_node->next_mem_nodes[0], 10);
        }
    }
}

void DAGBuilder::pipeline_dispatch_baseline(
        std::vector<int> & stage_thread_num,
        std::vector<std::vector<std::shared_ptr<OperatorNode> > > & subdag_list, 
        std::vector<device_type_t> & device_type_list,
        std::vector<stage_type_t> & stage_type_list,
        int & if_cross_device
    ){
        
    (void)if_cross_device;
    
    stage_thread_num = std::vector<int>{8};
    std::vector<std::shared_ptr<OperatorNode> > tmp_subdag_list;
    for (int i = 0; i < MAX_LAYER_NUM; ++i) {
        for (int j = 0; j < DEVICE_TYPE_NUM; ++j) {
            for (auto & op_node : task_assign[j][i]) {
                tmp_subdag_list.push_back(op_node);
            }
        }
    }

    subdag_list.push_back(tmp_subdag_list);

    device_type_list.push_back(device_type_t::DEVICE_CPU);
    stage_type_list.push_back(stage_type_t::NORMAL_STAGE);
}

void DAGBuilder::pipeline_dispatch(
        std::vector<int> & stage_thread_num,
        std::vector<std::vector<std::shared_ptr<OperatorNode> > > & subdag_list, 
        std::vector<device_type_t> & device_type_list,
        std::vector<stage_type_t> & stage_type_list,
        int & if_cross_device
    ){

    std::vector<std::vector<std::shared_ptr<OperatorNode> > > tmp_subdag_list;
    stage_thread_num = std::vector<int>{1, 1, 1};
    if (if_cross_device) {
    #if defined(__x86_64__)
        stage_thread_num = std::vector<int>{1};
    #else
        stage_thread_num = std::vector<int>{1, 1};
    #endif
    }
    tmp_subdag_list.resize(DEVICE_TYPE_NUM); // device_type_num

    for (int i = 0; i < MAX_LAYER_NUM; ++i) {
        for (int j = 0; j < DEVICE_TYPE_NUM; ++j) {
            for (auto & op_node : task_assign[j][i]) {
                if (i < 2) {
                    tmp_subdag_list[device_type_t::DEVICE_CPU].push_back(op_node);
                }
                else if (i == 2) {
                    tmp_subdag_list[device_type_t::DEVICE_DPA].push_back(op_node);
                }
                else tmp_subdag_list[device_type_t::DEVICE_ARM].push_back(op_node);
            }
        }
    }

    for (int i = 0; i < DEVICE_TYPE_NUM; ++i) {
        if (if_cross_device) {
    #if defined(__x86_64__)
            if (device_type_t(i) == device_type_t::DEVICE_CPU) {
                device_type_list.push_back(device_type_t::DEVICE_CPU);
                stage_type_list.push_back(stage_type_t::SEND_STAGE);
                subdag_list.push_back(tmp_subdag_list[i]);
            }
    #else 
            if (device_type_t(i) == device_type_t::DEVICE_DPA) {
                device_type_list.push_back(device_type_t::DEVICE_DPA);
                stage_type_list.push_back(stage_type_t::RECV_STAGE);
                subdag_list.push_back(tmp_subdag_list[i]);
            }

            if (device_type_t(i) == device_type_t::DEVICE_ARM) {
                device_type_list.push_back(device_type_t::DEVICE_ARM);
                stage_type_list.push_back(stage_type_t::NORMAL_STAGE);
                subdag_list.push_back(tmp_subdag_list[i]);
            }
    #endif
        }
        else{
            subdag_list.push_back(tmp_subdag_list[i]);
#if defined(__aarch64__)
            if (i != 1) {
                device_type_list.push_back(device_type_t::DEVICE_CPU);
            }
            else {
                device_type_list.push_back(device_type_t::DEVICE_DPA);
            }
#else
            device_type_list.push_back(device_type_t::DEVICE_CPU);
#endif
            stage_type_list.push_back(stage_type_t::NORMAL_STAGE);
        }
    }

}

void DAGBuilder::pipeline_init(const std::string & filename, int feature_num, int batch_num, int ele_num, int if_cross_device) {
    this->set_feature_num(feature_num);
    this->set_batch_num(batch_num);
    this->set_ele_num(ele_num);
    build_from_file(filename);
#ifdef USING_KV
    Init_Kvstore();
#endif
    memory_assign();
    task_assign_simple();

    pipelineunit = std::make_unique<PipelineUnit>();
    
    std::vector<std::vector<std::shared_ptr<OperatorNode> > > subdag_list;
    std::vector<device_type_t> device_type_list;
    std::vector<stage_type_t> stage_type_list;
    std::vector<int> stage_thread_num;

    /* 这里都属于dispatch ， 得写成一个类去做 */  
    this->pipeline_dispatch_baseline(stage_thread_num, subdag_list, device_type_list, stage_type_list, if_cross_device);
    /* 这里都属于dispatch ， 得写成一个类去做 */

    // 返回stage_thread_num(vector), subdag_list(vector<vector>), device_type_list(vector), stage_type_list(vector) )

    LOG(INFO) << "Start init pipeline";

    pipelineunit->Init_stage_list(stage_thread_num, subdag_list, device_type_list, stage_type_list, if_cross_device);
}

void DAGBuilder::pipeline_main(const std::string & filename, int feature_num, int batch_num, int ele_num, int if_cross_device) {

    this->pipeline_init(filename, feature_num, batch_num, ele_num, if_cross_device);

    LOG(INFO) << "Start pipeline";

    pipelineunit->start_pipeline();

    std::this_thread::sleep_for(std::chrono::seconds(20));

    LOG(INFO) << "Start end pipeline";

    pipelineunit->end_pipeline();
}

void DAGBuilder::main(const std::string & filename, int feature_num, int batch_num, int ele_num, device_type_t DeviceType) {
    
    // 初始化信息
    this->set_feature_num(feature_num);
    this->set_batch_num(batch_num);
    this->set_ele_num(ele_num);
    this->set_dispatch_type(DeviceType);

    // 建图
    ProcessUnitInit();
    build_from_file(filename);

    /* 初始化kvstore的brpc */
#ifdef USING_KV
    Init_Kvstore();
#endif
    LOG(INFO) << "finish kvstore init";
    
    // 根据op_node的dag图，更新memory node信息（占用大小，ele-num等）
    // 获得memory_offsets 信息
    memory_assign();

    LOG(INFO) << "finish memory assign";

    // 初始化brpc通信单元（其中包含dmauint)
    // 用doca_dma_unit去分配内存(posix)，拿到内存指针
    memory_region_init();

    LOG(INFO) << "finish memory region init";
    // after memory malloc done，init dpa
#if defined(__aarch64__)
    dpaProcessUnitInit();
#endif

    // 进一步初始化BRPC
#ifdef USING_BRPC
    client_init();
#endif

    // 根据operator_node dag拓扑执行opnode操作。
    // 调用对应的preprocessunit
    
    // 开始统计时间
    

    // task_dispatch();

        /* parallel execute part end */

#ifdef USING_SEQUENCE
    /* sequence execute part start */

    sequence_assign();

    xmh::Reporter::StartReportThread();

    for (int i = 0; i < 20; ++i) {
        xmh::Timer total_timer("total_time");
        sequence_execute();
        total_timer.end();
    }

    xmh::Reporter::StopReportThread();
    xmh::Reporter::Report();
    
    /* sequence execute part end */
#else
    /* parallel execute part start */

    task_assign_simple(10, 5, 4);

    LOG(INFO) << "finish task assign";

    Memory_Init();
    start_thread();

    xmh::Reporter::StartReportThread();

    for (int i = 0; i < 20; ++i) {
        xmh::Timer total_timer("total_time");
        task_dispatch_parallel();
        total_timer.end();
    }
    
    LOG(INFO) << "finish task execute";

    finish_thread();

    xmh::Reporter::StopReportThread();
    xmh::Reporter::Report();
#endif
    // for debug
    // 把所有叶子节点提取出来（即所有feature）
    feature_extract();

    export_to_dot("/root/xeg/BenchBF3/rap/plots/picture/dag.dot");
}

void DAGBuilder::build_from_file(const std::string& filename) {
    std::ifstream infile(filename);
    std::string line;

    while (std::getline(infile, line)) {
        if (line.empty()) continue;

        std::istringstream iss(line);
        std::string op, input_base, output_base;
        iss >> op >> input_base >> output_base;

        std::map<std::string, std::string> params;
        std::string token;
        while (iss >> token) {
            auto pos = token.find('=');
            if (pos != std::string::npos) {
                params[token.substr(0, pos)] = token.substr(pos + 1);
            }
        }

        // === 核心逻辑：创建版本化的内存节点名称 ===
        std::string input_name = get_latest_versioned_name(input_base);    // 不递增
        std::string output_name = create_new_versioned_name(output_base);  // 始终递增
        //TODO: input包含多个feature，需要重写这一段

        auto op_node = std::make_shared<OperatorNode>();
        op_node->op_type = op;
        op_node->input_name = input_name;
        op_node->output_name = output_name;
        op_node->params = params;

        auto input_node = get_or_create_memory(input_name);
        auto output_node = get_or_create_memory(output_name);

        op_node->prev_mem_nodes.push_back(input_node);
        op_node->next_mem_nodes.push_back(output_node);

        input_node->next_opt_nodes.push_back(op_node);
        output_node->prev_opt_nodes.push_back(op_node);

        // OperatorNode 之间的连接
        for (auto& prev_op : input_node->prev_opt_nodes) {
            op_node->prev_ops.push_back(prev_op);
            prev_op->next_ops.push_back(op_node);
        }

        // MemoryNode 之间的连接
        for (auto& prev_op : input_node->prev_opt_nodes) {
            for (auto& prev_input : prev_op->prev_mem_nodes) {
                if (std::find(output_node->prev_memory_nodes.begin(),
                            output_node->prev_memory_nodes.end(),
                            prev_input) == output_node->prev_memory_nodes.end()) {
                    output_node->prev_memory_nodes.push_back(prev_input);
                    prev_input->next_memory_nodes.push_back(output_node);
                }
            }
        }

        op_node->id = operator_node_counter ++;
        op_node->thread_num = 0; //用几个thread，0代表不知道。
        operator_nodes.push_back(op_node);
    }
}

void DAGBuilder::export_to_dot(const std::string& filename) {
    std::ofstream dotfile(filename);
    dotfile << "digraph DAG {\n";
    dotfile << "  rankdir=LR;\n";  // 横向布局
    dotfile << "  node [shape=circle, fontsize=10];\n";

    std::set<std::string> printed_nodes;

    // Memory nodes - 修改这里：添加ID显示
    for (const auto& [name, mem] : memory_nodes) {
        dotfile << "  \"" << mem->name << "\""
                << " [label=\"" << mem->name << "\\nID: " << mem->id << "\""  // 添加ID显示
                << ", shape=circle, style=filled, fillcolor=black, fontcolor=white];\n";
        printed_nodes.insert(mem->name);
    }

    // Operator nodes
    for (size_t i = 0; i < operator_nodes.size(); ++i) {
        auto& op = operator_nodes[i];
        std::string op_id = "op_" + std::to_string(i);

        // Set color based on device_type
        std::string fillcolor;
        switch (op->device_type) {
            case 0:
                fillcolor = "red";
                break;
            case 1:
                fillcolor = "green";
                break;
            case 2:
                fillcolor = "blue";
                break;
            default:
                fillcolor = "gray"; // Default color for unknown types
                break;
        }

        dotfile << "  \"" << op_id << "\""
                << " [label=\"" << op->op_type << "\""
                << ", shape=circle, style=filled, fillcolor=" << fillcolor << ", fontcolor=black];\n";
    }

    // Edges
    for (size_t i = 0; i < operator_nodes.size(); ++i) {
        auto& op = operator_nodes[i];
        std::string op_id = "op_" + std::to_string(i);

        for (auto& input : op->prev_mem_nodes) {
            dotfile << "  \"" << input->name << "\" -> \"" << op_id << "\";\n";
        }
        for (auto& output : op->next_mem_nodes) {
            dotfile << "  \"" << op_id << "\" -> \"" << output->name << "\";\n";
        }
    }

    dotfile << "}\n";
    dotfile.close();
    std::cout << "[Info] DOT file written to: " << filename << "\n";
}


void DAGBuilder::Init_Kvstore(){
    // 初始化channel
    BrpcKvstore & brpckvstore = BrpcKvstore::getInstance();
    brpckvstore.InitChannel();

    // add embedding fetch operator
    std::vector<std::shared_ptr<OperatorNode> > tmp;
    for (auto & operator_node : operator_nodes) {
        if (operator_node->output_name.find("_int") == std::string::npos && operator_node->output_name.find("int") != std::string::npos) continue;
        if ( (int)operator_node->next_ops.size() == 0) {
            tmp.push_back(operator_node);
        }
    }

    std::vector<std::string> table_name_list;
    std::vector<int>         table_num_embeddings;

    for (auto& operator_node : tmp) {
        // 创建新的 OperatorNode
        auto embedding_fetch_node = std::make_shared<OperatorNode>();
        embedding_fetch_node->op_type = "embedding_fetch";

        // 使用原节点的第一个输出内存节点作为输入
        auto input_node = operator_node->next_mem_nodes[0];
        embedding_fetch_node->input_name = input_node->name;

        // 去掉版本号，获取基础名称
        std::string base_name = input_node->name.substr(0, input_node->name.find('#'));
        std::string output_name = create_new_versioned_name(base_name);

        // 创建新的输出节点，名称为基础名称
        auto output_node = get_or_create_memory(output_name);
        embedding_fetch_node->output_name = output_name;

        // 设置关系
        embedding_fetch_node->prev_mem_nodes.push_back(input_node);
        embedding_fetch_node->next_mem_nodes.push_back(output_node);
        input_node->next_opt_nodes.push_back(embedding_fetch_node);
        output_node->prev_opt_nodes.push_back(embedding_fetch_node);

        embedding_fetch_node->prev_ops.push_back(operator_node);
        operator_node->next_ops.push_back(embedding_fetch_node);

        embedding_fetch_node->id = operator_node_counter++;
        embedding_fetch_node->thread_num = 0; // 用几个thread，0代表不知道。
        operator_nodes.push_back(embedding_fetch_node);

        table_name_list.push_back(base_name);
        table_num_embeddings.push_back(10);
    }
    brpckvstore.Init_kv_store(EMBEDDING_DIM, "float", table_name_list, table_num_embeddings);
}