#include "libORCL/ORCL.h"
#include "libORCL/ORCL_size.h"
#include <getopt.h>

#define NODE_CPU_COUNT 12
#define MAX_THREAD_NUM 8

int node_0_cpu[NODE_CPU_COUNT] = {0,1,2,3,4,5,12,13,14,15,16,17};

int payload_size = -1;


double thread_mops[MAX_THREAD_NUM] = {};




// 多线程发送
void run_thread(struct orcl_context_sync* sync_ctx){
    printf("开启线程:%d\n", sync_ctx->orcl_ctx_id);

    // 1到3号节点之间轮训发送
    int node_id = (sync_ctx->orcl_ctx_id % 3) + 1;
    // int node_id = 1;


    int j;

    uint64_t seed = 0xdeadbeef;
    for (j = 0; j < sync_ctx->node_id * 1000000; j++)
    {
        // 一个血妈神奇的随机数生成机制，最后生成的随机数要放在seed中
        orcl_fastrand(&seed);
    }

    // 为每一个线程分配一个一段缓冲区
    int thread_message_buf_size = sync_ctx->message_buffer_size / 4;

    // 缓冲区的首地址
    int thread_message_buf_offset = sync_ctx->orcl_ctx_id * thread_message_buf_size;

    // 缓冲区内地址
    int inner_thread_buffer_offset = 0;

    sleep(2);

    // 记录执行的时间
    struct timespec start, end;

    clock_gettime(CLOCK_REALTIME, &start);

    int iter_time = 2621440;

    

    int i;
    for(i = 0; i < iter_time; i++){
        
        if (i % (iter_time / 10) >= 0 && i % (iter_time / 10) < 1)
        {
            printf("thread %d, %d0 %% has been finish \n", sync_ctx->orcl_ctx_id, i / (iter_time / 10));
        }


        // 摇两个随机数，一个是size，一个是offset
        int _size = payload_size;
        int _offset = thread_message_buf_offset + inner_thread_buffer_offset;

        sync_write_message_to_remote(sync_ctx, _offset, node_id, _offset, _size);

        inner_thread_buffer_offset = (((inner_thread_buffer_offset + 512) % thread_message_buf_size)/512)*512;

        
        if(i <= iter_time){
            node_id = (node_id + 1) % 2 + 1;
        }else{
            node_id = (node_id + 1) % 3 + 1;
        }
    }

    clock_gettime(CLOCK_REALTIME, &end);

    // 计算执行的时间
    double seconds = (end.tv_sec - start.tv_sec) + (double)(end.tv_nsec - start.tv_nsec) / 1000000000;

    thread_mops[sync_ctx->orcl_ctx_id] = iter_time / seconds;

    printf("main: Thread %d: %.2f ops.\n", sync_ctx->orcl_ctx_id, iter_time / seconds);

    printf("发送端执行完毕:%d\n", sync_ctx->orcl_ctx_id);
}

int main(int argc,char** argv){

    // 服务器编号
    int server_num = -1;

    static struct option opts[] = {
        {"server_num", required_argument, 0, 'n'},
        {"payload_size", required_argument, 0, 'p'},
    };

    while (1){
        int c = getopt_long(argc, argv, "n:p:", opts, NULL);

        if (c == -1)
        {
            break;
        }

        switch (c){
            case 'n':
                server_num = atoi(optarg);
                break;

            case 'p':
                payload_size = atoi(optarg);
                break;
            
            default:
                printf("Invalid argument %d\n", c);
                assert(false);
        }
    }

    assert(server_num>=0);
    assert(payload_size>=1 && payload_size <= 499);

    // 解析参数，我们现在只加入一个参数就是节点编号，两个进程分布在两个节点上
    int port_index;
    int numa_id;


    // 4个节点，一个节点在一个端口上，另一个节点在另一个端口上
    // 0号服务器在0号CPU上，剩下的在1号CPU上
    if(server_num == 0){
        port_index = 1; 
        numa_id = 0;
    }else{
        port_index = 0; 
        numa_id = 1;
    }

    // 下面进行连接
    // 测试一下初始化
    // 缓冲区大小128
    struct orcl_context_sync* sync_ctx = orcl_context_sync_init(server_num,4,2,port_index,numa_id,NULL,M_128,16*server_num+1,0);
    
    // 另外再申请一个多线程
    struct orcl_context_sync* sync_ctx_arr[4];

    sync_ctx_arr[0] = sync_ctx;
    sync_ctx_arr[1] = orcl_context_sync_init(server_num,4,2,port_index,numa_id,sync_ctx->message_buffer,M_128,16*server_num+1,1);
    // sync_ctx_arr[2] = orcl_context_sync_init(server_num,4,1,port_index,numa_id,sync_ctx->message_buffer,M_128,16*server_num+1,2);
    // sync_ctx_arr[3] = orcl_context_sync_init(server_num,4,1,port_index,numa_id,sync_ctx->message_buffer,M_128,16*server_num+1,3);

    // 申请四个线程
    auto thread_arr = new std::thread[4];
    int i;
    if(server_num == 0){
        
        for(i = 0; i < 2; i++){
            thread_arr[i] = std::thread(run_thread, sync_ctx_arr[i]);

            /* Pin thread i to hardware thread 2 * i */
            // 线程绑定在对应的核上面
            cpu_set_t cpuset;
            // 将cpuset全部置为0初始化
            CPU_ZERO(&cpuset);
            // 将线程i绑定在处理器i*2上，我们可以确定是一个处理器双线程
            CPU_SET(node_0_cpu[i*2%NODE_CPU_COUNT], &cpuset);
            // 设置处理器亲和性，让一个线程优先运行在对应CPU_SET中
            int rc = pthread_setaffinity_np(thread_arr[i].native_handle(),
                                            sizeof(cpu_set_t), &cpuset);
            // 亲和性绑定
            if (rc != 0)
            {
                printf("Error %d while calling pthread_setaffinity_np\n", rc);
            }
        }

        // 等待所有线程运行完
        for(i = 0; i < 2; i++){
            printf("main: waiting for thread %d\n", i);
            thread_arr[i].join();
            printf("main: thread %d done\n", i);
        }

        orcl_publish_ready("finish123");
    }else{
        orcl_wait_till_ready("finish123");
    }

    // 这里计算一下平均的MOPS
    for (i = 1; i < 2; i++)
    {
        thread_mops[0] = thread_mops[0] + thread_mops[i];
    }

    printf("main: all: %.2f ops, average: %.2f ops\n", thread_mops[0], thread_mops[0] / 2);

    if(server_num == 0){
        // 写一下文件分别是payload_size,thread_num,all_mops,avg_mops
        char log_str[100] = {};

        sprintf(&log_str[0], "%d,%d,%.2f,%.2f", payload_size, 2, thread_mops[0], thread_mops[0] / 2);

        APPEND_2_LOG("./test_2thread.csv", &log_str[0]);
    }

    return 1;
}