#include "libORCL/ORCL.h"
#include "libORCL/ORCL_size.h"
#include <getopt.h>

#define NODE_CPU_COUNT 12
#define MAX_THREAD_NUM 8

int node_0_cpu[NODE_CPU_COUNT] = {0,1,2,3,4,5,12,13,14,15,16,17};

double thread_mops[MAX_THREAD_NUM] = {};

void run_thread(struct orcl_context* orcl_ctx){
    uint64_t seed = 0xdeadbeef;

    int i;
    // 这是随机数
    for (i = 0; i < (orcl_ctx->orcl_ctx_id + 1) * 1000000; i++)
    {
        // 一个血妈神奇的随机数生成机制，最后生成的随机数要放在seed中
        orcl_fastrand(&seed);
    }

    int iter_time = 1048576;

    struct timespec start, end;

    clock_gettime(CLOCK_REALTIME, &start);

    for(i = 0; i < iter_time; i++){
        if (i % (iter_time / 10) >= 0 && i % (iter_time / 10) < 1)
        {
            printf("thread %d, %d0 %% has been finish \n", orcl_ctx->orcl_ctx_id, i / (iter_time / 10));
        }

        int _size = (int)(orcl_fastrand(&seed) % 1024 + 1);
        int _offset = (int)((orcl_fastrand(&seed) % (orcl_ctx->message_buffer_size - _size)) / 64) * 64;

        // 指向发送
        orcl_inner_group_rdma_write(orcl_ctx, 1, _offset, _offset, _size);
    }

    clock_gettime(CLOCK_REALTIME, &end);

    double seconds = (end.tv_sec - start.tv_sec) + (double)(end.tv_nsec - start.tv_nsec) / 1000000000;

    thread_mops[orcl_ctx->orcl_ctx_id] = iter_time / seconds; 
}



int main(int argc,char** argv){

    // 服务器编号
    int server_num = -1;

    static struct option opts[] = {
        {"server_num", required_argument, 0, 'n'},
    };

    while (1){
        int c = getopt_long(argc, argv, "n:", opts, NULL);

        if (c == -1)
        {
            break;
        }

        switch (c){
            case 'n':
                server_num = atoi(optarg);
                break;
            
            default:
                printf("Invalid argument %d\n", c);
                assert(false);
        }
    }

    assert(server_num>=0);

    // 解析参数，我们现在只加入一个参数就是节点编号，两个进程分布在两个节点上
    // 在两个网卡之间负载均衡
    int port_index = server_num % 2;
    // 绑定的CPU和网卡是反着的
    int numa_id = (server_num + 1) % 2;

    // 下面进行连接
    // 测试一下初始化
    struct orcl_context* orcl_ctx = orcl_context_init(server_num,0,2,1,2,0,port_index,numa_id,NULL,M_2,16*server_num+1,NULL,0);

    // 初始化一下缓冲区
    memset((void *)orcl_ctx->message_buffer, 0, orcl_ctx->message_buffer_size);

    // 下面引入batch，建立多个上下文，分别发送
    struct orcl_context* ctx_arr[4];

    ctx_arr[0] = orcl_ctx;

    int i;
    for(i = 1; i < 4; i++){
        // 取消大页之后没有
        ctx_arr[i] = orcl_context_init(server_num,0,2,1,2,0,
            port_index,numa_id,NULL,M_2,16*server_num+1,orcl_ctx,i);
    }

    auto thread_arr = new std::thread[4];

    // 服务器端执行发送线程
    if(server_num == 0){
        // 创建4个线程，分别使用四个上下文来进行消息的发送
        for (i = 0; i < 4; i++)
        {
            // param_arr是线程run函数的参数
            // 创建线程的实例
            thread_arr[i] = std::thread(run_thread, ctx_arr[i]);

            /* Pin thread i to hardware thread 2 * i */
            // 线程绑定在对应的核上面
            cpu_set_t cpuset;
            // 将cpuset全部置为0初始化
            CPU_ZERO(&cpuset);
            // 将线程i绑定在处理器i*2上，我们可以确定是一个处理器双线程
            CPU_SET(node_0_cpu[i*2%NODE_CPU_COUNT], &cpuset);
            // 设置处理器亲和性，让一个线程优先运行在对应CPU_SET中
            int rc = pthread_setaffinity_np(thread_arr[i].native_handle(),
                                            sizeof(cpu_set_t), &cpuset);

            // 亲和性绑定
            if (rc != 0)
            {
                printf("Error %d while calling pthread_setaffinity_np\n", rc);
            }
        }

        // 等待线程们运行完
        // 这里等待所有线程运行完成
        for (i = 0; i < 4; i++)
        {
            printf("main: waiting for thread %d\n", i);
            thread_arr[i].join();
            printf("main: thread %d done\n", i);
        }
        orcl_publish_ready("finish");

        // 计算mops
        for (i = 1; i < 4; i++)
        {
            thread_mops[0] = thread_mops[0] + thread_mops[i];
        }

        printf("main: all: %.2f ops, average: %.2f ops\n", thread_mops[0], thread_mops[0] / 4);
    }else{
        orcl_wait_till_ready("finish");
    }

    return 1;
}