#include "libORCL/ORCL.h"
#include "libORCL/ORCL_size.h"
#include <getopt.h>

#define NODE_CPU_COUNT 12
#define MAX_THREAD_NUM 8

int node_0_cpu[NODE_CPU_COUNT] = {0,1,2,3,4,5,12,13,14,15,16,17};

// 负载大小
// 负载大小
int payload_size = -1;

double thread_mops[MAX_THREAD_NUM] = {};

// 只有一个发送节点
void run_thread(struct orcl_context* orcl_ctx){
    printf("发送端执行线程:%d\n", orcl_ctx->orcl_ctx_id);

    sleep(2);

    uint64_t seed = 0xdeadbeef;

    int i;
    // 这是随机数
    for (i = 0; i < (orcl_ctx->orcl_ctx_id + 1) * 1000000; i++)
    {
        // 一个血妈神奇的随机数生成机制，最后生成的随机数要放在seed中
        orcl_fastrand(&seed);
    }

    int iter_time = 2621440;

    // 123轮流发送
    int dest_node_id = (orcl_ctx->orcl_ctx_id % 3) + 1;

    // int dest_node_id = 1;

    // 一共4个线程，每个线程分配message buf的一段
    // 首先查看为每一个线程分配的缓冲区大小
    int thread_message_buf_size = orcl_ctx->message_buffer_size / 4;

    // 缓冲区的首地址
    int thread_message_buf_offset = orcl_ctx->orcl_ctx_id * thread_message_buf_size;

    // 缓冲区内地址
    int inner_thread_buffer_offset = 0;

    // 记录执行的时间
    struct timespec start, end;

    clock_gettime(CLOCK_REALTIME, &start);

    // 执行大量发送
    for(i = 0; i < iter_time; i++){
        

        if (i % (iter_time / 10) >= 0 && i % (iter_time / 10) < 1)
        {
            printf("thread %d, %d0 %% has been finish \n", orcl_ctx->orcl_ctx_id, i / (iter_time / 10));
        }

        int _size = payload_size;
        
        // 接收区
        int _offset = thread_message_buf_offset + inner_thread_buffer_offset;

        // 执行发送
        struct orcl_message_header header;
        header.magic = 'a';
        header.node_id = dest_node_id;
        header.offset = _offset;
        header.size = _size + sizeof(header);

        // 拷贝到专属缓冲区
        memcpy((void *)(orcl_ctx->message_buffer + _offset), &header, sizeof(header));

        // if(dest_node_id == 3){
        //     struct timespec req = {0}, rem = {0};
        //     req.tv_sec = 0;
        //     req.tv_nsec = 20;

        //     nanosleep(&req, &rem);
        // }

        // 这里执行发送
        // 目的地址和首地址是同一个
        orcl_write_message_to_remote(orcl_ctx, _offset);

        inner_thread_buffer_offset = (((inner_thread_buffer_offset + 512) % thread_message_buf_size)/512)*512;
        dest_node_id = (dest_node_id+1) % 3 + 1;
        // dest_node_id = 1;
    }

    clock_gettime(CLOCK_REALTIME, &end);

    // 计算执行的时间
    double seconds = (end.tv_sec - start.tv_sec) + (double)(end.tv_nsec - start.tv_nsec) / 1000000000;

    thread_mops[orcl_ctx->orcl_ctx_id] = iter_time / seconds;

    printf("main: Thread %d: %.2f ops.\n", orcl_ctx->orcl_ctx_id, iter_time / seconds);

    printf("发送端执行完毕:%d\n", orcl_ctx->orcl_ctx_id);
}

int main(int argc,char** argv){

    // 服务器编号
    int server_num = -1;
    

    static struct option opts[] = {
        {"server_num", required_argument, 0, 'n'},
        {"payload_size", required_argument, 0, 'p'},
    };

    while (1){
        int c = getopt_long(argc, argv, "n:p:", opts, NULL);

        if (c == -1)
        {
            break;
        }

        switch (c){
            case 'n':
                server_num = atoi(optarg);
                break;
            
            case 'p':
                payload_size = atoi(optarg);
                break;

            default:
                printf("Invalid argument %d\n", c);
                assert(false);
        }
    }

    assert(server_num>=0);
    assert(payload_size >= 1 && payload_size <= (511-(int)sizeof(struct orcl_message_header)));

    // 解析参数，我们现在只加入一个参数就是节点编号，两个进程分布在两个节点上
    int port_index;
    int numa_id;

    // 0和3在0号端口上，1和2在1号端口上
    if(server_num == 0 || server_num == 3){
        port_index = 1; 
        numa_id = 0;
    }else{
        port_index = 0; 
        numa_id = 1;
    }

    // 每组的数量
    int group_size = 2;
    // 当前组号，0、1在0组，2、3在1组
    int group_id = server_num / group_size;

    // 下面进行连接
    // 测试一下初始化
    struct orcl_context* orcl_ctx = orcl_context_init(server_num,group_id,4,2,group_size,1,port_index,numa_id,NULL,M_128,16*server_num+1,NULL,0);

    struct orcl_context* ctx_arr[4];

    // 开4个线程
    ctx_arr[0] = orcl_ctx;

    int i;
    for(i = 1; i < 2; i++){
        ctx_arr[i] = orcl_context_init(server_num,group_id,4,2,group_size,1,port_index,numa_id,orcl_ctx->message_buffer,M_128,16*server_num+1,NULL,i);
    }

    auto thread_arr = new std::thread[4];

    // 0号节点执行4个发送线程
    if(server_num == 0){

        // // 在主线程中直接使用第一个上下文发送
        // printf("发送端执行线程:%d\n", orcl_ctx->orcl_ctx_id);

        // sleep(2);

        // uint64_t seed = 0xdeadbeef;

        // int i;
        // // 这是随机数
        // for (i = 0; i < (orcl_ctx->orcl_ctx_id + 1) * 1000000; i++)
        // {
        //     // 一个血妈神奇的随机数生成机制，最后生成的随机数要放在seed中
        //     orcl_fastrand(&seed);
        // }

        // int iter_time = 65536;

        // // 123轮流发送
        // int dest_node_id = 1;

        // // 一共4个线程，每个线程分配message buf的一段
        // // 首先查看为每一个线程分配的缓冲区大小
        // int thread_message_buf_size = orcl_ctx->message_buffer_size / 4;

        // // 缓冲区的首地址
        // int thread_message_buf_offset = orcl_ctx->orcl_ctx_id * thread_message_buf_size;

        // // 缓冲区内地址
        // int inner_thread_buffer_offset = 0;

        // // 执行大量发送
        // for(i = 0; i < iter_time; i++){
        //     if (i % (iter_time / 10) >= 0 && i % (iter_time / 10) < 1)
        //     {
        //         printf("thread %d, %d0 %% has been finish \n", orcl_ctx->orcl_ctx_id, i / (iter_time / 10));
        //     }

        //     // int _size = (int)orcl_fastrand(&seed) % (512 - sizeof(struct orcl_message_header)) + 1;
        //     int _size = 1;
            
        //     // 接收区
        //     int _offset = thread_message_buf_offset + inner_thread_buffer_offset;

        //     // 执行发送
        //     struct orcl_message_header header;
        //     header.magic = 'a';
        //     header.node_id = dest_node_id;
        //     header.offset = _offset;
        //     header.size = _size + sizeof(header);

        //     // 拷贝到专属缓冲区
        //     memcpy((void *)(orcl_ctx->message_buffer + _offset), &header, sizeof(header));

        //     // 这里执行发送
        //     // 目的地址和首地址是同一个
        //     orcl_write_message_to_remote(orcl_ctx, _offset);

        //     inner_thread_buffer_offset = (((inner_thread_buffer_offset + 512) % thread_message_buf_size)/512)*512;
        //     dest_node_id = (dest_node_id + 1) % 3 + 1;
        // }

        // printf("发送端执行完毕:%d\n", orcl_ctx->orcl_ctx_id);
        int numa_node_index = 5;


        for(i = 0; i < 2; i++){
            thread_arr[i] = std::thread(run_thread, ctx_arr[i]);

            cpu_set_t cpuset;
            CPU_ZERO(&cpuset);


            CPU_SET(node_0_cpu[numa_node_index%NODE_CPU_COUNT], &cpuset);

            numa_node_index++;

            int rc = pthread_setaffinity_np(thread_arr[i].native_handle(),
                                            sizeof(cpu_set_t), &cpuset);
            // 亲和性绑定
            if (rc != 0)
            {
                printf("Error %d while calling pthread_setaffinity_np\n", rc);
            }
        }

        // 等待所有线程运行完
        for(i = 0; i < 2; i++){
            printf("main: waiting for thread %d\n", i);
            thread_arr[i].join();
            printf("main: thread %d done\n", i);
        }

        orcl_publish_ready("finish");
    }else{
        orcl_wait_till_ready("finish");
    }

    // 这里计算一下平均的MOPS
    for (i = 1; i < 2; i++)
    {
        thread_mops[0] = thread_mops[0] + thread_mops[i];
    }

    printf("main: all: %.2f ops, average: %.2f ops\n", thread_mops[0], thread_mops[0] / 2);
    
    if(server_num == 0){
        // 写一下文件分别是payload_size,thread_num,all_mops,avg_mops
        char log_str[100] = {};

        sprintf(&log_str[0], "%d,%d,%.2f,%.2f", payload_size, 2, thread_mops[0], thread_mops[0] / 2);

        APPEND_2_LOG("./test_2thread.csv", &log_str[0]);
    }

    return 1;
}