#include "libORCL/ORCL.h"
#include "libORCL/ORCL_size.h"
#include <getopt.h>

#define NODE_CPU_COUNT 12
#define MAX_THREAD_NUM 8

int node_0_cpu[NODE_CPU_COUNT] = {0,1,2,3,4,5,12,13,14,15,16,17};


// 多线程发送
void run_thread(struct orcl_context_sync* sync_ctx){
    printf("开启线程:%d\n", sync_ctx->orcl_ctx_id);

    // 1到3号节点之间轮训发送
    int node_id = 1;

    int i;
    for(i = 0; i < 32768; i++){
        // 摇两个随机数，一个是size，一个是offset
        uint64_t seed = 0xdeadbeef;
        // 这是随机数
        int j;
        for (j = 0; j < sync_ctx->node_id * 1000000; j++)
        {
            // 一个血妈神奇的随机数生成机制，最后生成的随机数要放在seed中
            orcl_fastrand(&seed);
        }

        int _size = 256;
        int _offset = (int)((orcl_fastrand(&seed) % (sync_ctx->message_buffer_size - _size)) / 512) * 512;

        // 换成读
        sync_read_message_from_remote(sync_ctx, _offset, node_id, _offset, _size);

        node_id = (node_id + 1) % 3 + 1;
    }
}

int main(int argc,char** argv){

    // 服务器编号
    int server_num = -1;

    static struct option opts[] = {
        {"server_num", required_argument, 0, 'n'},
    };

    while (1){
        int c = getopt_long(argc, argv, "n:", opts, NULL);

        if (c == -1)
        {
            break;
        }

        switch (c){
            case 'n':
                server_num = atoi(optarg);
                break;
            
            default:
                printf("Invalid argument %d\n", c);
                assert(false);
        }
    }

    assert(server_num>=0);

    // 解析参数，我们现在只加入一个参数就是节点编号，两个进程分布在两个节点上
    int port_index;
    int numa_id;


    // 4个节点，一个节点在一个端口上，另一个节点在另一个端口上
    if(server_num == 0){
        port_index = 0; 
        numa_id = 1;
    }else{
        port_index = 1; 
        numa_id = 0;
    }

    // 下面进行连接
    // 测试一下初始化
    struct orcl_context_sync* sync_ctx = orcl_context_sync_init(server_num,4,2,port_index,numa_id,NULL,M_2,16*server_num+1,0);

    // 执行发送
    if(server_num == 0){
        char *buf = (char *)sync_ctx->message_buffer;
        buf[0] = 'a';

        sync_write_message_to_remote(sync_ctx, 0, 1, 0, 256);
        sleep(1);

        orcl_publish_ready("finish1");
    }else{
        orcl_wait_till_ready("finish1");
    }

    if(server_num == 1){
        char *buf = (char *)sync_ctx->message_buffer;

        if(buf[0] == 'a'){
            printf("发送成功\n");
        }else{
            printf("发送失败\n");
        }
    }

    // 批量发送
    // 批量测试延迟
    if(server_num == 0){
        
        // 1到3号节点之间轮训发送
        int node_id = 1;

        struct timespec start, end;

        clock_gettime(CLOCK_REALTIME, &start);

        int i;
        for(i = 0; i < 262144; i++){
            // 摇两个随机数，一个是size，一个是offset
            uint64_t seed = 0xdeadbeef;
            // 这是随机数
            int j;
            for (j = 0; j < server_num * 1000000; j++)
            {
                // 一个血妈神奇的随机数生成机制，最后生成的随机数要放在seed中
                orcl_fastrand(&seed);
            }

            int _size = 256;
            int _offset = (int)((orcl_fastrand(&seed) % (sync_ctx->message_buffer_size - _size)) / 512) * 512;

            sync_write_message_to_remote(sync_ctx, _offset, node_id, _offset, _size);

        }

        clock_gettime(CLOCK_REALTIME, &end);

        double seconds = (end.tv_sec - start.tv_sec) + (double)(end.tv_nsec - start.tv_nsec) / 1000000000;

        double ops = 262144 / seconds;

        printf("main: %.2f ops.\n", ops);

        sleep(1);
        orcl_publish_ready("finish2");

        printf("批量测试完毕\n");
    }else{
        orcl_wait_till_ready("finish2");
    }

    // 这里测试read的正确性
    // 执行发送
    if(server_num == 1){
        char *buf = (char *)sync_ctx->message_buffer;
        buf[0] = 'z'; 
        orcl_publish_ready("finishread");
    }else{
        orcl_wait_till_ready("finishread");
    }


    if(server_num == 0){
        sync_read_message_from_remote(sync_ctx, 0, 1, 0, 256);
        sleep(1);

        char *buf = (char *)sync_ctx->message_buffer;

        if(buf[0] == 'z'){
            printf("接收成功\n");
        }else{
            printf("接收失败\n");
        }
    }

    

    
    // 另外再申请一个多线程
    struct orcl_context_sync* sync_ctx_arr[2];

    sync_ctx_arr[0] = sync_ctx;
    sync_ctx_arr[1] = orcl_context_sync_init(server_num,4,2,port_index,numa_id,NULL,M_2,16*server_num+1,1);

    // 申请两个线程
    auto thread_arr = new std::thread[2];

    if(server_num == 0){
        int i;
        for(i = 0; i < 2; i++){
            thread_arr[i] = std::thread(run_thread, sync_ctx_arr[i]);

            /* Pin thread i to hardware thread 2 * i */
            // 线程绑定在对应的核上面
            cpu_set_t cpuset;
            // 将cpuset全部置为0初始化
            CPU_ZERO(&cpuset);
            // 将线程i绑定在处理器i*2上，我们可以确定是一个处理器双线程
            CPU_SET(node_0_cpu[i*2%NODE_CPU_COUNT], &cpuset);
            // 设置处理器亲和性，让一个线程优先运行在对应CPU_SET中
            int rc = pthread_setaffinity_np(thread_arr[i].native_handle(),
                                            sizeof(cpu_set_t), &cpuset);
            // 亲和性绑定
            if (rc != 0)
            {
                printf("Error %d while calling pthread_setaffinity_np\n", rc);
            }
        }
    }



    return 1;
}