// 测试算子的速度，在各个设备上的表现。
#include "doca_preprocess_unit.h"
#include "debug_unit.h"
#include "doca_dpa_unit.h"
#include "memory_unit.h"

DEFINE_string(device_type, "cpu", "device to execute operator");
DEFINE_int32 (thread_num,  1, "thread for device to use");
DEFINE_string(op_type,  "fillnull", "test operator type");
DEFINE_int32(dpa_type, 0, "dpa sdk type"); 

DEFINE_int32(batch_size, 1, "batch_size");
DEFINE_int32(feature_num, 1, "feature_num");
DEFINE_int32(ele_num, 1, "ele_num per feature");
DEFINE_int32(execute_time, 1, "execute times");

std::vector<int> correct_list;

void correctness_check(int64_t * input_ptr, int64_t * output_ptr, size_t size) {
    int incorrect = 0;
    for (int i = 0; i < (int)size; ++i) {
        int64_t x = input_ptr[i];
        uint64_t value = (x ^ (uint64_t)(0));
        value = (value * 2654435761U) ^ (value >> 16);
        value = value % (uint64_t)(10);

        int64_t y = output_ptr[i];
        if (value != (uint64_t)y) incorrect ++; 
    }
    printf ("incorrect position num = %d \n", incorrect);
    correct_list.push_back(incorrect);
}

// std::vector<std::shared_ptr<MemoryNode> > mem_input_node_list;
// std::vector<std::shared_ptr<MemoryNode> > mem_output_node_list;
std::vector<std::shared_ptr<OperatorNode> > operation_node_list;
int operation_num = 0;

void create_op_node() {

    MemoryUnit & memoryunit = MemoryUnit::Instance();

    std::shared_ptr<OperatorNode> operator_node = std::make_shared<OperatorNode>();
    operation_node_list.push_back(operator_node);

    size_t size_input  = (size_t)FLAGS_batch_size * (size_t)FLAGS_feature_num * (size_t)FLAGS_ele_num * (size_t)sizeof(double);
    std::shared_ptr<MemoryNode> input_mem_node  = std::make_shared<MemoryNode>();
    std::shared_ptr<MemoryNode> output_mem_node = std::make_shared<MemoryNode>();

    input_mem_node->feature_start_id    = 0;
    input_mem_node->feature_end_id      = FLAGS_feature_num;
    input_mem_node->batch_start_id      = 0;
    input_mem_node->batch_end_id        = FLAGS_batch_size;
    input_mem_node->element_per_feature = FLAGS_ele_num;
    input_mem_node->storage_ptr  = memoryunit.Malloc(size_input);
    input_mem_node->total_size = size_input;

    (*output_mem_node) = (*input_mem_node);
    output_mem_node->element_per_feature= CommonUtils::calc_ele(FLAGS_ele_num, FLAGS_op_type);
    output_mem_node->storage_ptr = memoryunit.Malloc(size_input);

    operator_node->prev_mem_nodes.push_back(input_mem_node);
    operator_node->next_mem_nodes.push_back(output_mem_node);
    operator_node->op_type    = FLAGS_op_type;
    operator_node->thread_num = FLAGS_thread_num;
    operator_node->id = ++ operation_num;

    input_mem_node->next_opt_nodes.push_back(operator_node);
    CommonUtils::memory_dtype_assign(operator_node);

    DebugUnit::mem_init(input_mem_node);
    DebugUnit::mem_debug(input_mem_node, 10);
}

int main(int argc, char **argv) {

    gflags::ParseCommandLineFlags(&argc, &argv, true);

    xmh::Reporter::StartReportThread();

    MemoryUnit & memoryunit = MemoryUnit::Instance();
    memoryunit.Init(DEFAULT_WINDOWS_SIZE);

    // std::shared_ptr<OperatorNode> operator_node = std::make_shared<OperatorNode>();
    // std::vector<std::shared_ptr<OperatorNode> > op_list;

    // // 先开内存
    // void * ptr = nullptr;
    // int ret = posix_memalign(&ptr, 64, ( (size_t)DEFAULT_WINDOWS_SIZE + 63) & (~63));
    // if (ret != 0) {
    //     printf ("error malloc host memory\n");
    // }

    // size_t size_input  = (size_t)FLAGS_batch_size * (size_t)FLAGS_feature_num * (size_t)FLAGS_ele_num * (size_t)sizeof(double);
    // std::shared_ptr<MemoryNode> input_mem_node  = std::make_shared<MemoryNode>();
    // std::shared_ptr<MemoryNode> output_mem_node = std::make_shared<MemoryNode>();

    // input_mem_node->feature_start_id    = 0;
    // input_mem_node->feature_end_id      = FLAGS_feature_num;
    // input_mem_node->batch_start_id      = 0;
    // input_mem_node->batch_end_id        = FLAGS_batch_size;
    // input_mem_node->element_per_feature = FLAGS_ele_num;
    // (*output_mem_node) = (*input_mem_node);
    // output_mem_node->element_per_feature= CommonUtils::calc_ele(FLAGS_ele_num, FLAGS_op_type);

    // input_mem_node->storage_ptr  = ptr;
    // output_mem_node->storage_ptr = ptr + size_input;
    // // output_mem_node->storage_ptr = ptr;

    // operator_node->prev_mem_nodes.push_back(input_mem_node);
    // operator_node->next_mem_nodes.push_back(output_mem_node);
    // operator_node->op_type    = FLAGS_op_type;
    // operator_node->thread_num = FLAGS_thread_num;
    // operator_node->id = 1;

    // input_mem_node->total_size = size_input;
    // input_mem_node->next_opt_nodes.push_back(operator_node);

    // std::vector<std::shared_ptr<OperatorNode> > task_node_list;
    // task_node_list.push_back(operator_node);

    // CommonUtils::memory_dtype_assign(operator_node);

    DpaPreProcessUnit * dpaprocessunit;
    CpuPreProcessUnit * cpuprocessunit;

    // DebugUnit::mem_init(input_mem_node);
    // DebugUnit::mem_debug(input_mem_node, 10);
    // DebugUnit::mem_init(output_mem_node);

    for (int i = 0; i < 1; ++i) {
        create_op_node();
        create_op_node(); 
    }

    void * task_dispatch_ptr = memoryunit.Malloc(sizeof(int64_t) * 2);

    if (FLAGS_device_type == "dpa") {
        if (FLAGS_dpa_type == 1) {
            dpaprocessunit = new DpaPreProcessUnit(sdk_type_t::DOCA_SDK);
        }
        else if(FLAGS_dpa_type == 2) {
            dpaprocessunit = new DpaPreProcessUnit(sdk_type_t::TEST_SDK); 
        } 
        else {
            dpaprocessunit = new DpaPreProcessUnit();
        }
        dpaprocessunit->register_host_memory(memoryunit.ptr, (size_t)DEFAULT_WINDOWS_SIZE );
        dpaprocessunit->thread_init(MAX_DPA_THREAD_NUM);// 依靠operator去调整，试一试。

        dpaprocessunit->init_dpa_thread_info(operation_node_list);

        if (FLAGS_dpa_type == 2) { 
            dpaprocessunit->init_control_info(task_dispatch_ptr);   
        }
        
        for (int i = 0; i < FLAGS_execute_time; ++i) { 
            xmh::Timer dpa_timer(FLAGS_op_type); 
            dpaprocessunit->task_execute(operation_node_list);
            dpa_timer.end();
        }

        if (FLAGS_device_type == "dpa") {
            // sleep(1);
            size_t max_execute_time_per_thread = (dpaprocessunit->dpaunit)->get_execute_cycle();
            double max_execute_time_us = (double)1.0 * max_execute_time_per_thread * 1000000.00 / 1800000000.00;
            double total_execute_time_us = xmh::Timer::ManualQuery(FLAGS_op_type) / 1000.00;

            std::cout << FLAGS_dpa_type << " task_execute info" << std::endl;
            printf ("max_execute_time_per_thread = %.6lf us\n", max_execute_time_us);
            printf ("total_execute_time = %.6lf us\n", total_execute_time_us);
        }
    }
    else {
        cpuprocessunit = new CpuPreProcessUnit();
        cpuprocessunit->thread_init(FLAGS_thread_num);
        for (int i = 0; i < FLAGS_execute_time; ++i) { 
            xmh::Timer cpu_timer(FLAGS_op_type);
            cpuprocessunit->task_execute(operation_node_list);
            cpu_timer.end();
        }
    } 

   
    for (auto op_node : operation_node_list) {
        correctness_check( (int64_t *) (op_node->prev_mem_nodes[0]->storage_ptr),
                     (int64_t *) (op_node->next_mem_nodes[0]->storage_ptr), 
                     FLAGS_batch_size);
        DebugUnit::mem_debug(op_node->next_mem_nodes[0], 256);
        printf ("\n");
    }

    for (auto incorrect_num : correct_list) {
        if (incorrect_num > 0) {
            LOG(INFO) << "ERRRRRRROORRRRROORORORRR FIND";
            return 0;
        }
    }

    LOG(INFO) << "SUCCCCESSSSSSSSSSSSSSSSSS"; 

    xmh::Reporter::StopReportThread();
    xmh::Reporter::Report();

    free(memoryunit.ptr); 

    return 0;
}

// ./../bin_dpu/test_operator --op_type="sigrid_hash" --thread_num=128 --device_type="dpa" --batch_size=8192 --dpa_type="0"