#pragma once

#include "common_unit.h"
#include "feature_cross.h"
#include "cpu_feature_operator.h"
#include <cstdint>

#include "doca_dpa_unit.h"
#include <butil/logging.h>
#include <algorithm>

class PreProcessUnit {
public:
    virtual ~PreProcessUnit() = default;

        // 创建内存处理器 (纯虚函数)
    // TODO: 除了one-hot,bucketize,ngram,firstx，其他的应该都能优化？可以实验一下（即不新开空间）
    /* node保存了能够计算size的信息，是下一块内存的size信息，如果看作图，node其实是memhandler的出边， mem_reuse，表示memhandler入边是否自环（复用内存）*/
    virtual void thread_init(int num_thread) = 0;

    // 任务执行 (纯虚函数) 传递一个op的list，都执行
    virtual void task_execute(std::vector<std::shared_ptr<OperatorNode> > & task_opnode_list) = 0;
    virtual void task_execute(std::vector<std::shared_ptr<OperatorNode> > & task_opnode_list, std::map<int , void*> & mem_addr_map) = 0;

    void map_init() {
        operation_type_map.clear();
        rev_operation_type_map.clear();
        std::vector<std::string> oplist = {
            "logit",
            "boxcox",
            "onehot",
            "bucketize",
            "sigrid_hash",
            "clamp_list",
            "ngram",
            "firstx",
            "mapid",
            "fillnull",
            "cast",
            "embedding_fetch"
        };
        for (int i = 0; i < (int)oplist.size(); ++i) {
            operation_type_map[i] = oplist[i];
            rev_operation_type_map[oplist[i]] = i;
        }
    }

public:
    // 暂时不弄复杂，先假设是一个线性的序列，后面再考虑图这些
    std::map<int , std::string> operation_type_map;
    std::map<std::string, int> rev_operation_type_map;
};

// dpa之后再考虑，先看CPU
/* 
    涉及修改的文件：
    doca_preprocess_unit.h 给定OperatorNode，执行该任务
    doca_dpa_unit 注册内存的函数需要修改，和doca-dma公用一个函数
    graph_utils.h 内存指针的传递、注册等，新增DPA模块。
    feature_cross.h 新结构体的设计

    dpa 的可能的困难：
    1. 是否和doca-dma的内存兼容，（ibv-reg之后），实验证明
    2. 无法直接使用OperatorNode的结构体信息，需要重新设计结构体，或许提前存储好没有vector的另一张图。
*/
class DpaPreProcessUnit : public PreProcessUnit{
public:
    /* 初始化线程信息 */
    void thread_init(int thread_num) {
        (void)thread_num;
        this->thread_num = thread_num;
        this->map_init();
    }

    void task_execute(std::vector<std::shared_ptr<OperatorNode> > & task_opnode_list) override{
        
        for (auto operation_node : task_opnode_list) {
            // this->mem_debug(i);
            // xmh::Timer time_calcer(optype);

            int thread_num = 1;
            if (operation_node->thread_num == 0) thread_num = std::max(thread_num, this->thread_num);
            else {
                thread_num = operation_node->thread_num;
            }

            if (sdk_type == sdk_type_t::FLEXIO_SDK) {
                
                auto mem_input_node = operation_node->prev_mem_nodes[0];
                auto mem_output_node = operation_node->next_mem_nodes[0];

                void * input_addr  = mem_input_node->storage_ptr;
                void * output_addr = mem_output_node->storage_ptr;

                (dynamic_cast<DpaFlexioUnit *> (dpaunit) ) -> init_addr(input_addr, output_addr);

                for (int j = 0; j < thread_num; ++j) {
                    dpaunit->submit_task( (flexio_uintptr_t)(operation_node->id * thread_num + j) );
                }
            }

            else {
                dpaunit->submit_task( (flexio_uintptr_t)(operation_node->id * thread_num), thread_num);
            }

            dpaunit->poll_thread();
            // time_calcer.end();
        }
        // this->mem_debug(op_sequence_len);
    }

    void task_execute(std::vector<std::shared_ptr<OperatorNode> > & task_opnode_list, std::map<int , void*> & mem_addr_map) override{
        (void)mem_addr_map;
        for (auto operation_node : task_opnode_list) {
            int thread_num = 1;
            if (operation_node->thread_num == 0) thread_num = std::max(thread_num, this->thread_num);
            else {
                thread_num = operation_node->thread_num;
            }

            if (sdk_type == sdk_type_t::FLEXIO_SDK) {
                
                auto mem_input_node = operation_node->prev_mem_nodes[0];
                auto mem_output_node = operation_node->next_mem_nodes[0];

                void * input_addr  = mem_addr_map[mem_input_node->id];
                void * output_addr = mem_addr_map[mem_output_node->id];

                (dynamic_cast<DpaFlexioUnit *> (dpaunit) ) -> init_addr(input_addr, output_addr);
                for (int j = 0; j < thread_num; ++j) {
                    dpaunit->submit_task( (flexio_uintptr_t)(operation_node->id * thread_num + j) );
                }
            }

            else {
                dpaunit->submit_task( (flexio_uintptr_t)(operation_node->id * thread_num), thread_num);
            }
            dpaunit->poll_thread();
            // time_calcer.end();
        }
        // this->mem_debug(op_sequence_len);
    }

    void init_dpa_thread_info(std::vector<std::shared_ptr<OperatorNode> > & task_opnode_list, std::map<int , void*> * mem_addr_map = nullptr) {
        for (auto operation_node : task_opnode_list) {
            int thread_num = 1;
            if (operation_node->thread_num == 0) thread_num = std::max(thread_num, this->thread_num);
            else {
                thread_num = operation_node->thread_num;
            }

            // thread_num = 1;

            auto mem_input_node = operation_node->prev_mem_nodes[0];
            auto mem_output_node = operation_node->next_mem_nodes[0];

            int Start_batch   = mem_input_node->batch_start_id;
            int End_batch     = mem_input_node->batch_end_id;

            int batch_dim = End_batch - Start_batch;

            for (int j = 0; j < 1; ++j) { //FIXME:
                DpaThreadInfo T;

                T.size       = batch_dim;
                T.stream_id  = 0;
                T.thread_num = thread_num;

                if (mem_addr_map != nullptr) {
                    T.input_mem_addr  = static_cast<char*>(mem_addr_map->at(mem_input_node->id) );
                    T.output_mem_addr = static_cast<char*>(mem_addr_map->at(mem_output_node->id));
                }
                else {
                    T.input_mem_addr  = mem_input_node->storage_ptr;
                    T.output_mem_addr = mem_output_node->storage_ptr;
                }
                
                T.preprocess_type = this->rev_operation_type_map[operation_node->op_type];
                T.thread_index    = operation_node->id * thread_num + j;

                T.num_ele_per_feature = mem_input_node->element_per_feature;
                T.output_num_ele_per_feature = mem_output_node->element_per_feature;
                // 提交任务
                dpaunit->init_dpa_thread_info(T);
            }
        }
    }

    void init_control_info(void * ptr) {
        if (sdk_type == sdk_type_t::TEST_SDK) {
            ( (DpaTestUnit *)dpaunit)->init_control_info(ptr);
        }
    }

    void register_host_memory(void * ptr, size_t size) {
        dpaunit->register_host_memory(ptr, size);
    }

    DpaPreProcessUnit(sdk_type_t sdk_type = sdk_type_t::FLEXIO_SDK) {
        this->sdk_type = sdk_type;
        if (sdk_type == sdk_type_t::FLEXIO_SDK){
            dpaunit = new DpaFlexioUnit(); //TODO:
        }

        else if (sdk_type == sdk_type_t::TEST_SDK) {
            dpaunit = new DpaTestUnit();
        }

        else {
            dpaunit = new DpaDocasdkUnit();
        }
        LOG(INFO) << "finish dpaunit init";
    }

public:
    flexio_func_t * func;
    flexio_func_t * init_func;
    struct flexio_app * app;
    DpaUnit * dpaunit;
    std::vector<DpaThreadInfo> thread_infos;
    sdk_type_t sdk_type; 
    int thread_num = 0;
};

class CpuPreProcessUnit : public PreProcessUnit {
public:
    void thread_init(int num_thread) override{
        this->cpuopunit = new CpuOperatorUnit();
        this->thread_num = num_thread;
        cpuopunit->set_thread(this->thread_num);
        this->map_init();
    }

    void task_execute(std::vector<std::shared_ptr<OperatorNode> > & task_opnode_list) override{
        ExecuteNode T;
        for (auto & operation_node : task_opnode_list) {
            // this->mem_debug(i);
            int optype = this->rev_operation_type_map[operation_node->op_type];
            // xmh::Timer time_calcer(optype);

            // this->print_node(&vec);
            auto mem_input_node = operation_node->prev_mem_nodes[0];
            auto mem_output_node = operation_node->next_mem_nodes[0];

            T.input_addr  = mem_input_node->storage_ptr;
            T.output_addr = mem_output_node->storage_ptr;
            T.op_type     = optype;

            cpuopunit->cpu_operator_execute(&T, mem_input_node.get() );

            // time_calcer.end();
        }
        // this->mem_debug(op_sequence_len);
    }

    void task_execute(std::vector<std::shared_ptr<OperatorNode> > & task_opnode_list, std::map<int , void*> & mem_addr_map) override{
        ExecuteNode T;
        for (auto & operation_node : task_opnode_list) {
            int optype = this->rev_operation_type_map[operation_node->op_type];

            auto mem_input_node = operation_node->prev_mem_nodes[0];
            auto mem_output_node = operation_node->next_mem_nodes[0];

            T.input_addr  = mem_addr_map[mem_input_node->id];
            T.output_addr = mem_addr_map[mem_output_node->id];
            T.op_type     = optype;

            xmh::Timer execute_timer(operation_node->op_type);
            cpuopunit->cpu_operator_execute(&T, mem_input_node.get() );
            execute_timer.end();
        }
    }

    ~CpuPreProcessUnit() {
        delete cpuopunit;
    }

public:
    int thread_num;
    CpuOperatorUnit * cpuopunit;
};