#include "ORCL.h"

struct orcl_context_sync* orcl_context_sync_init(int node_id, int node_num, int dci_num, int port_index, int numa_node_id, volatile void *prealloc_mes_buf, int mes_buf_size, int shm_key_start, int orcl_ctx_id){
    
    assert(port_index >= 0 && port_index <= 16);
	assert(numa_node_id >= -1 && numa_node_id <= 8);
    assert(dci_num > 0 && dci_num <= ORCL_MAX_DCI_NUM);
    assert(mes_buf_size >= 0 && mes_buf_size <= M_1024);
    assert(node_id >= 0 && node_id < node_num);

    // 申请空白结构体
    struct orcl_context_sync* sync_ctx = (struct orcl_context_sync *)malloc(sizeof(struct orcl_context_sync));
    memset(sync_ctx, 0, sizeof(struct orcl_context_sync));

    // 初始化dci的signal计数器
    memset(sync_ctx->dci_signal_period, 0, ORCL_MAX_DCI_NUM * sizeof(int));

    // 初始化基本元数据
    sync_ctx->node_id = node_id;
    sync_ctx->node_num = node_num;
    sync_ctx->dci_num = dci_num;
    sync_ctx->message_buffer_size = mes_buf_size;
    sync_ctx->shm_key_start = shm_key_start;
    sync_ctx->shm_key_end = shm_key_start;
    sync_ctx->port_index = port_index;
    sync_ctx->numa_node_id = numa_node_id;
    sync_ctx->orcl_ctx_id = orcl_ctx_id;

    // 开启可以开启的所有权限
    int ib_flags = IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_READ | 
        IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_REMOTE_ATOMIC;
    
    // 查看缓冲区的共用情况
    if(prealloc_mes_buf == NULL){
        int reg_size = 0;

        while(reg_size < mes_buf_size){
            reg_size += M_2;
        }

        assert(sync_ctx->shm_key_end >= 1 && sync_ctx->shm_key_end <= 128);

        // 发送缓冲区，在sync接口中，这个缓冲区仅此一个
        sync_ctx->message_buffer = (volatile uint8_t *) orcl_malloc_socket(sync_ctx->shm_key_end, reg_size, numa_node_id);

        sync_ctx->shm_key_end = sync_ctx->shm_key_end + 1;

        prealloc_mes_buf = sync_ctx->message_buffer;

        // 初始化
        memset((char *) sync_ctx->message_buffer, 0, reg_size);
    }else{
        sync_ctx->message_buffer = (volatile uint8_t *)prealloc_mes_buf;
    }

    // 创建dci是独享的
    sync_ctx->dci_num = dci_num;
    sync_ctx->next_dci_id = 0;

    int i;
    for(i = 0; i < dci_num; i++){
        struct ibv_device *ib_dev = sync_resolve_port_index(sync_ctx, port_index);
        CPE(!ib_dev, "ORCL: IB device not found", 0);

        sync_ctx->dci_ctx[i] = ibv_open_device(ib_dev);
        CPE(!sync_ctx->dci_ctx[i], "ORCL: Couldn't get context", 0);

        // 申请保护域
        sync_ctx->dci_pd[i] = ibv_alloc_pd(sync_ctx->dci_ctx[i]);
	    CPE(!sync_ctx->dci_pd[i], "ORCL: Couldn't allocate PD", 0);

        // 将message buf注册给dci，message buf是唯一的dci发送缓冲区，除了这个之外，没有更多用途。
        sync_ctx->dci_mes_mr[i] = ibv_reg_mr(sync_ctx->dci_pd[i], (char *) sync_ctx->message_buffer, sync_ctx->message_buffer_size, ib_flags);

        sync_create_dc_initiators(sync_ctx, i);
    }

    printf("init: create all dci node_id:%d, ctx_id:%d\n", node_id, orcl_ctx_id);

    // 初始化dm
    // 查看是否需要dm
#if SYNC_USE_DM == 1
    for(i = 0; i < dci_num; i++){
        // 申请dm
        struct ibv_exp_alloc_dm_attr dm_attr;
        memset(&dm_attr, 0, sizeof(struct ibv_exp_alloc_dm_attr));
        dm_attr.length = DCI_DM_SIZE;
        sync_ctx->dci_dm[i] = ibv_exp_alloc_dm(sync_ctx->dci_ctx[i], &dm_attr);
        assert(sync_ctx->dci_dm[i] != NULL);

        // 注册
        struct ibv_exp_reg_mr_in mr_in;
        memset(&mr_in, 0, sizeof(struct ibv_exp_reg_mr_in));
        mr_in.pd = sync_ctx->dci_pd[i];
        mr_in.addr = 0;
        mr_in.length = DCI_DM_SIZE;
        mr_in.exp_access = IBV_EXP_ACCESS_LOCAL_WRITE;
        mr_in.create_flags = 0;
        mr_in.dm = sync_ctx->dci_dm[i];
        mr_in.comp_mask = IBV_EXP_REG_MR_DM;
        sync_ctx->dci_dm_mr[i] = ibv_exp_reg_mr(&mr_in);
        assert(sync_ctx->dci_dm_mr[i] != NULL);
    }

    printf("init: create all dci dm node_id:%d, ctx_id:%d\n", node_id, orcl_ctx_id);
#endif

    // 创建一个DCT，一个上下文只需要一个dct
    struct ibv_device *ib_dev = sync_resolve_port_index(sync_ctx, port_index);
    CPE(!ib_dev, "ORCL: IB device not found", 0);

    sync_ctx->dct_ctx = ibv_open_device(ib_dev);
    CPE(!sync_ctx->dct_ctx, "ORCL: Couldn't get context", 0);

    sync_ctx->dct_pd = ibv_alloc_pd(sync_ctx->dct_ctx);
    CPE(!sync_ctx->dct_pd, "ORCL: Couldn't allocate PD", 0);

    sync_ctx->dct_mes_mr = ibv_reg_mr(sync_ctx->dct_pd, (char *)sync_ctx->message_buffer, sync_ctx->message_buffer_size, ib_flags);
    CPE(!sync_ctx->dct_mes_mr, "ORCL: Couldn't regist memory", 0);

    // 创建target
    sync_create_dc_target(sync_ctx);

    printf("init: create all dc target node_id:%d, ctx_id:%d\n", node_id, orcl_ctx_id);

    // 发布dct的基本信息
    sync_publish_dc_target(sync_ctx);

    // 创建并且发布所有的
    printf("init: publish all dc target node_id:%d, ctx_id:%d\n", node_id, orcl_ctx_id);

    // 将路由表初始化
    for(i = 0; i < dci_num; i++){
        sync_ctx->dci_connect_node_id[i] = -1;
    }

    for(i = 0; i < node_num; i++){
        sync_ctx->node_connect_dci_id[i] = -1;
    }

    // 获取其他节点的所有dct
    sync_get_all_dc_target(sync_ctx);

    printf("init: get all dc target node_id:%d, ctx_id:%d\n", node_id, orcl_ctx_id);

    // 发布自己已经准备好
    char ready_str[ORCL_QP_NAME_SIZE] = {};

    sprintf(ready_str, "init_finish_sync_%d", sync_ctx->node_id);

    orcl_publish_ready(ready_str);

    for(i = 0; i < sync_ctx->node_num; i++){
        if(i == sync_ctx->node_id){
            continue;
        }

        char remote_ready_str[ORCL_QP_NAME_SIZE] = {};

        sprintf(remote_ready_str, "init_finish_sync_%d", i);

        orcl_wait_till_ready(remote_ready_str);
    }

    return sync_ctx;
}

void sync_write_message_to_remote(struct orcl_context_sync* sync_ctx, int offset, int remote_node_id,int remote_offset, int size){
    assert(sync_ctx->node_id != remote_node_id);

    assert(sync_ctx->node_num > remote_node_id);

    assert(remote_offset % 512 == 0 && offset % 512 == 0 && size < 512);

    assert(remote_offset + size < sync_ctx->message_buffer_size && offset + size < sync_ctx->message_buffer_size);

    assert(sync_ctx != NULL);

    // 查看要发送的节点远程信息是否存在
    assert(sync_ctx->others_dct[remote_node_id].buf_size != 0);

    int dci_index = -1;

    if(sync_ctx->node_connect_dci_id[remote_node_id] == -1){
        dci_index = sync_ctx->next_dci_id;

        sync_ctx->node_connect_dci_id[remote_node_id] = dci_index;

        sync_ctx->node_connect_dci_id[sync_ctx->dci_connect_node_id[dci_index]] = -1;
        sync_ctx->dci_connect_node_id[dci_index] = remote_node_id;

        // 接下来要切换的dci
        sync_ctx->next_dci_id = (sync_ctx->next_dci_id + 1) % sync_ctx->dci_num;
    }else{
        dci_index = sync_ctx->node_connect_dci_id[remote_node_id];
    }

    // 执行发送流程
    struct ibv_exp_send_wr wr;
    struct ibv_exp_send_wr *bad_send_wr;

    // 发送缓冲区
    struct ibv_sge sgl;
    struct ibv_wc wc;

    int ret;

    int opcode = IBV_EXP_WR_RDMA_WRITE;

    // 小数据统一使用inline的方式
    if(size < 170){
        // 不使用dm
        // 初始化发送缓冲区，发送缓冲区是dm中的区域
        sgl.addr = (uint64_t)sync_ctx->message_buffer + offset;
        sgl.length = size;
        // 当前dci的message缓冲区的lkey
        sgl.lkey = sync_ctx->dci_mes_mr[dci_index]->lkey;

        wr.exp_opcode = (ibv_exp_wr_opcode)opcode;
        wr.num_sge = 1;
        wr.next = NULL;

        wr.sg_list = &sgl;

        wr.exp_send_flags = IBV_EXP_SEND_INLINE;    
    }else{
        // 不使用dm或者继续使用dm
#if SYNC_USE_DM == 1
        // 先将数据拷贝到DM中的环形缓冲区
        sync_cpy_to_device(sync_ctx, (uint8_t *)sync_ctx->message_buffer, offset, dci_index, sync_ctx->dm_write_offset[dci_index], size);

        sgl.addr = sync_ctx->dm_write_offset[dci_index];
        sgl.length = size;

        sgl.lkey = sync_ctx->dci_dm_mr[dci_index]->lkey;

        wr.exp_opcode = (ibv_exp_wr_opcode)opcode;
        wr.num_sge = 1;
        wr.next = NULL;

        wr.sg_list = &sgl;

        wr.exp_send_flags = 0;

        sync_ctx->dm_write_offset[dci_index] = (sync_ctx->dm_write_offset[dci_index] + 512) % DCI_DM_SIZE;
#endif

#if SYNC_USE_DM == 0
        // 不使用DM
        // 不使用dm
        // 初始化发送缓冲区，发送缓冲区是dm中的区域
        sgl.addr = (uint64_t)sync_ctx->message_buffer + offset;
        sgl.length = size;
        // 当前dci的message缓冲区的lkey
        sgl.lkey = sync_ctx->dci_mes_mr[dci_index]->lkey;

        wr.exp_opcode = (ibv_exp_wr_opcode)opcode;
        wr.num_sge = 1;
        wr.next = NULL;

        wr.sg_list = &sgl;

        wr.exp_send_flags = 0;    
#endif
    }

    // 发往对应远程dct的缓冲区
    wr.wr.rdma.remote_addr = (uint64_t)sync_ctx->others_dct[remote_node_id].buf_addr + remote_offset;
    // 远程缓冲区的key
    wr.wr.rdma.rkey = sync_ctx->others_dct[remote_node_id].rkey;

    // dct独有的参数
    wr.dc.ah = sync_ctx->other_dct_ah_attr[dci_index][remote_node_id];

    wr.dc.dct_access_key = ORCL_DCT_KEY;

    wr.dc.dct_number = sync_ctx->others_dct[remote_node_id].dct_num;

    sync_ctx->dci_signal_period[dci_index] = sync_ctx->dci_signal_period[dci_index] + 1;

    if(sync_ctx->dci_signal_period[dci_index] >= SYNC_SIGNAL_PERIOD){
        // 加上signal
        wr.exp_send_flags |= IBV_EXP_SEND_SIGNALED;
    }

    // 这里执行发送
    ret = ibv_exp_post_send(sync_ctx->dci_qp[dci_index], &wr, &bad_send_wr);

    if (ret != 0)
    {
        if (ret == ENOMEM)
        {
            printf("Send Queue is full or not enough resources to complete this operation, node_id=%d, ctx_id=%d, dci_dest=%d\n", sync_ctx->node_id, sync_ctx->orcl_ctx_id, remote_node_id);
        }
    }

    // 等待一次signal wr的完成
    if(sync_ctx->dci_signal_period[dci_index] >= SYNC_SIGNAL_PERIOD){
        sync_ctx->dci_signal_period[dci_index] = 0;

        orcl_poll_cq(sync_ctx->dci_cq[dci_index], 1, &wc);
    }
}

void sync_read_message_from_remote(struct orcl_context_sync* sync_ctx, int offset, int remote_node_id,int remote_offset, int size){
    assert(sync_ctx->node_id != remote_node_id);

    assert(sync_ctx->node_num > remote_node_id);

    assert(remote_offset % 512 == 0 && offset % 512 == 0 && size < 512);

    assert(remote_offset + size < sync_ctx->message_buffer_size && offset + size < sync_ctx->message_buffer_size);

    assert(sync_ctx != NULL);

    // 查看要发送的节点远程信息是否存在
    assert(sync_ctx->others_dct[remote_node_id].buf_size != 0);

    int dci_index = -1;

    if(sync_ctx->node_connect_dci_id[remote_node_id] == -1){
        dci_index = sync_ctx->next_dci_id;

        sync_ctx->node_connect_dci_id[remote_node_id] = dci_index;

        sync_ctx->node_connect_dci_id[sync_ctx->dci_connect_node_id[dci_index]] = -1;
        sync_ctx->dci_connect_node_id[dci_index] = remote_node_id;

        // 接下来要切换的dci
        sync_ctx->next_dci_id = (sync_ctx->next_dci_id + 1) % sync_ctx->dci_num;
    }else{
        dci_index = sync_ctx->node_connect_dci_id[remote_node_id];
    }

    // 这里开始发送流程，read是不使用dm和inline这两个参数的
    // 执行接收流程
    struct ibv_exp_send_wr wr;
    struct ibv_exp_send_wr *bad_send_wr;

    // 接收缓冲区
    struct ibv_sge sgl;
    struct ibv_wc wc;

    int ret;

    int opcode = IBV_EXP_WR_RDMA_READ;

    sgl.addr = (uint64_t)sync_ctx->message_buffer + offset;
    sgl.length = size;
    // 当前dci的message缓冲区的lkey
    sgl.lkey = sync_ctx->dci_mes_mr[dci_index]->lkey;

    wr.exp_opcode = (ibv_exp_wr_opcode)opcode;
    wr.num_sge = 1;
    wr.next = NULL;

    wr.sg_list = &sgl;

    wr.exp_send_flags = 0;    

    // 这里开始执行
    // 发往对应远程dct的缓冲区
    wr.wr.rdma.remote_addr = (uint64_t)sync_ctx->others_dct[remote_node_id].buf_addr + remote_offset;
    // 远程缓冲区的key
    wr.wr.rdma.rkey = sync_ctx->others_dct[remote_node_id].rkey;

    // dct独有的参数
    wr.dc.ah = sync_ctx->other_dct_ah_attr[dci_index][remote_node_id];

    wr.dc.dct_access_key = ORCL_DCT_KEY;

    wr.dc.dct_number = sync_ctx->others_dct[remote_node_id].dct_num;

    sync_ctx->dci_signal_period[dci_index] = sync_ctx->dci_signal_period[dci_index] + 1;

    if(sync_ctx->dci_signal_period[dci_index] >= SYNC_SIGNAL_PERIOD){
        // 加上signal
        wr.exp_send_flags |= IBV_EXP_SEND_SIGNALED;
    }

    // 这里执行发送
    ret = ibv_exp_post_send(sync_ctx->dci_qp[dci_index], &wr, &bad_send_wr);

    if (ret != 0)
    {
        if (ret == ENOMEM)
        {
            printf("Send Queue is full or not enough resources to complete this operation, node_id=%d, ctx_id=%d, dci_dest=%d\n", sync_ctx->node_id, sync_ctx->orcl_ctx_id, remote_node_id);
        }
    }

    // 等待一次signal wr的完成
    if(sync_ctx->dci_signal_period[dci_index] >= SYNC_SIGNAL_PERIOD){
        sync_ctx->dci_signal_period[dci_index] = 0;

        orcl_poll_cq(sync_ctx->dci_cq[dci_index], 1, &wc);
    }

}

