#include "rdma.h"

#include <string>

#include "log.h"
#include "utils.h"


int open_rdma_device(std::string dev_name, int ib_port, 
                    std::string link_type, 
                    int hint_gid_index,
                    struct rdma_device *rdma_dev) {
    assert(link_type == "IB" || link_type == "Ethernet");
    assert(rdma_dev != NULL);

    rdma_dev->link_type = link_type;

    struct ibv_device **dev_list;
    struct ibv_device *ib_dev;

    int num_devices;
    // 获取设备列表
    dev_list = ibv_get_device_list(&num_devices);
    if (!dev_list) {
        ERROR("Failed to get RDMA devices list");
        return -1;
    }

    for (int i = 0; i < num_devices; ++i) {
        char *dev_name_= (char *)ibv_get_device_name(dev_list[i]);
        if (strcmp(dev_name_, dev_name.c_str()) == 0) {
            INFO("found device {}", dev_name_);
            ib_dev = dev_list[i];
            rdma_dev->ib_ctx = ibv_open_device(ib_dev);
            break;
        }
    }

    //! 打开指定设备失败，切换为打开默认设备
    if (!rdma_dev->ib_ctx) {
        INFO(
            "Can't find or failed to open the specified device, "
            "try to open the default device {}",
            (char *)ibv_get_device_name(dev_list[0]));
        rdma_dev->ib_ctx = ibv_open_device(dev_list[0]);
        if (!rdma_dev->ib_ctx) {
            ERROR("Failed to open the default device");
            return -1;
        }
    }


    //! 查询端口属性
    struct ibv_port_attr port_attr;
    rdma_dev->ib_port = ib_port;
    if (ibv_query_port(rdma_dev->ib_ctx, ib_port, &port_attr)) {
        ERROR("Unable to query port {} attributes\n", ib_port);
        return -1;
    }


    //! 端口的属性与手动配置的类型不匹配
    if ((port_attr.link_layer == IBV_LINK_LAYER_INFINIBAND && link_type == "Ethernet") ||
        (port_attr.link_layer == IBV_LINK_LAYER_ETHERNET && link_type == "IB")) {
        ERROR("port link layer and config link type don't match");
        return -1;
    }


    if (port_attr.link_layer == IBV_LINK_LAYER_INFINIBAND) {
        // IB
        rdma_dev->gid_index = -1;        //! IB场景无效
        rdma_dev->lid = port_attr.lid;
        INFO("IB lid {}", rdma_dev->lid);
    }
    else {
        // RoCE v2
        if (hint_gid_index >= 0) {
            // 使用用户提示的GID索引
            rdma_dev->gid_index = hint_gid_index;
            WARN("RoCE choose user specified gid index {}", hint_gid_index);
        }
        else {
            // 如果用户没有指定或指定的无效, 自定查询满足要求的GID索引
            rdma_dev->gid_index = ibv_find_sgid_type(rdma_dev->ib_ctx, rdma_dev->ib_port,
                                                     IBV_GID_TYPE_ROCE_V2, AF_INET);
            if (rdma_dev->gid_index < 0) {
                ERROR("Failed to find GID index");
                return -1;
            }
        }

        if (ibv_query_gid(rdma_dev->ib_ctx, 1, rdma_dev->gid_index, &rdma_dev->gid) < 0) {
            ERROR("Failed to get GID from index {}", rdma_dev->gid_index);
            return -1;
        }

        // if gid all all zero, return error
        if (rdma_dev->gid.global.subnet_prefix == 0 && rdma_dev->gid.global.interface_id == 0) {
            ERROR("GID is all zero");
            return -1;
        }

        INFO("gid index {}, gid {}", rdma_dev->gid_index, human_readable_gid(rdma_dev->gid));
    }

    rdma_dev->active_mtu = port_attr.active_mtu;

    rdma_dev->pd = ibv_alloc_pd(rdma_dev->ib_ctx);
    if (!rdma_dev->pd) {
        ERROR("Failed to allocate PD");
        return -1;
    }
    return 0;
}




int close_rdma_device(struct rdma_device *rdma_dev) {
    if (rdma_dev == NULL) {
        WARN("rdma_dev is NULL, nothing to close");
        return 0;
    }

    if (rdma_dev->pd) {
        ibv_dealloc_pd(rdma_dev->pd);
    }
    if (rdma_dev->ib_ctx) {
        ibv_close_device(rdma_dev->ib_ctx);
    }
    return 0;
}





int init_rdma_context(struct rdma_context *ctx, 
                struct rdma_device *rdma_dev) {
    assert(ctx != NULL);
    assert(rdma_dev != NULL);

    //! 创建IO CQ完成通知隧道
    ctx->comp_channel = ibv_create_comp_channel(rdma_dev->ib_ctx);
    if (!ctx->comp_channel) {
        ERROR("Failed to create completion channel");
        return -1;
    }

    
    //! 创建完成队列
    ctx->cq = ibv_create_cq(rdma_dev->ib_ctx, 
                    MAX_SEND_WR + MAX_RECV_WR, 
                    NULL, ctx->comp_channel, 0);
    if (!ctx->cq) {
        ERROR("Failed to create CQ");
        return -1;
    }


    //! 设置CQ通知的事件类型
    // int ibv_req_notify_cq(struct ibv_cq *cq, 
	//                      int solicited_only);   // 通知出发条件的标志
	//                      0: 通知所有事件
	//                      1: 只有被标记为"solicited"的工作完成才会触发通知
    if (ibv_req_notify_cq(ctx->cq, 0)) {
        ERROR("Failed to request CQ notification");
        return -1;
    }

    // Create Queue Pair
    struct ibv_qp_init_attr qp_init_attr = {};
    qp_init_attr.send_cq = ctx->cq;
    qp_init_attr.recv_cq = ctx->cq;
    qp_init_attr.qp_type = IBV_QPT_RC;          //! RC类型
    
    qp_init_attr.cap.max_send_wr = MAX_SEND_WR;
    qp_init_attr.cap.max_recv_wr = MAX_RECV_WR;
    qp_init_attr.cap.max_send_sge = 1;
    qp_init_attr.cap.max_recv_sge = 1;

    ctx->qp = ibv_create_qp(rdma_dev->pd, &qp_init_attr);
    if (!ctx->qp) {
        ERROR("Failed to create QP, {}", strerror(errno));
        return -1;
    }

    // Modify QP to INIT state
    if (modify_qp_to_init(ctx, rdma_dev)) {
        ERROR("Failed to modify QP to INIT, {}", strerror(errno));
        return -1;
    }

    // save information to local_info for exchange data
    // ctx->local_info.qpn = ctx->qp->qp_num;
    // ctx->local_info.psn = lrand48() & 0xffffff;
    // if (rdma_dev->gid_index != -1) {
    //     ctx->local_info.gid = rdma_dev->gid;
    // }

    // ctx->local_info.lid = rdma_dev->lid;
    // ctx->local_info.mtu = (uint32_t)rdma_dev->active_mtu;
    ctx->psn = lrand48() & 0xffffff;

    return 0;
}





int destroy_rdma_context(struct rdma_context *ctx) {
    if (ctx->qp) {
        struct ibv_qp_attr attr;
        memset(&attr, 0, sizeof(attr));
        attr.qp_state = IBV_QPS_RESET;
        ibv_modify_qp(ctx->qp, &attr, IBV_QP_STATE);
        ibv_destroy_qp(ctx->qp);
    }

    if (ctx->cq) {
        ibv_destroy_cq(ctx->cq);
    }

    if (ctx->comp_channel) {
        ibv_destroy_comp_channel(ctx->comp_channel);
    }
    return 0;
}







rdma_conn_info_t get_rdma_conn_info(struct rdma_context *ctx, 
                            struct rdma_device *rdma_dev) {
    assert(ctx != NULL);
    rdma_conn_info_t conn_info = {
        .qpn = ctx->qp->qp_num,
        .psn = ctx->psn,
        .gid = rdma_dev->gid,
        .lid = rdma_dev->lid,
        .mtu = (uint32_t)rdma_dev->active_mtu,
    };
    return conn_info;
}




int modify_qp_to_init(struct rdma_context *ctx, 
                struct rdma_device *rdma_dev) {
    assert(ctx != NULL);
    assert(rdma_dev != NULL);

    struct ibv_qp_attr attr = {};


    /**
    *! @brief   QP的状态机
    *  
    * RESET   QP的初始状态，所有硬件资源已分配但未配置, 不能发送和接收任何数据，
    *         所有队列被清空， 创建QP后自动进入该状态, ERR状态恢复后也进入该状态;
    * 
    * INIT    QP已绑定基本参数，准备连接配置。设置端口绑定和基本访问权限, 准备接收
    *         连接参数（如GID、QPN等）, 必须转为RTR状态后才能接收数据;
    * 
    * RTR     Ready to Receive, QP已配置完整的连接参数，可以接收数据但还不能发送。
    *         建立完整的接收路径（包括对端QP信息）配置流控和重传参数;
    * 
    * RTS     Ready to Send, QP可同时发送和接收数据，是正常工作状态。可处理所有RDMA操作
    *        （Send/Write/Read/Atomic）硬件自动维护序列号（PSN）和重传;
    * 
    * SQD     Send Queue Drain，发送队列排空. 临时状态，用于优雅停止发送队列。停止接受新
    *         的发送请求（Send/WRITE等）继续处理已提交的请求直至完成接收队列仍正常工作.用
    *         途：动态调整QP参数或连接迁移
    *  
    * SQE     Send Queue Error，发送队列错误. 发送队列出现错误但接收队列可能仍有效。触发条件:
    *         (1)发送超时或重试次数耗尽;(2)手动置位错误状态. 必须重置到RESET状态后重新初始化.
    * 
    * ERR     QP发生不可恢复错误。触发原因: (1) 网络链路中断; (2)协议序列号错误; (3)硬件故障
    *         所有未完成的操作会被冲刷并返回错误完成事件, 必须通过重置恢复
    * 
    * UNK     动程序无法确定当前状态（通常表示查询失败）
    */

    //! QP状态机
    // stateDiagram
    // [*] --> IBV_QPS_RESET             QP创建后的初始状态
    // 当前状态	允许转换的目标状态
    // ------------------------
    //  RESET	 INIT
    //  INIT	 RTR, ERR
    //  RTR	     RTS, ERR
    //  RTS	     SQD, ERR
    //  SQD	     SQE, RTS, ERR
    //  SQE	     ERR, RESET
    //  ERR	     RESET
    attr.qp_state = IBV_QPS_INIT;      //! 创建QP后必须首先转到INIT状态
    attr.port_num = rdma_dev->ib_port; //! 绑定网卡端口
    attr.pkey_index = 0;


    //! MR权限与QP权限的区别
    // 1. qp_access_flags定义了QP的访问权限, 这些权限可在QP状态转换时动态修改
    // 2. MR的权限限制对内存区域的访问权限, MR创建后不可更改;

    //! 当QP尝试访问远程内存时, 必须同时满足:
    //   - QP有权发起该操作(QP的qp_access_flags包含对应标志);
    //   - 目标MR的访问权限允许该操作(MR的access_flags包含对应标志);

    //! 为什么需要双重配置:
    //   - MR可以被多个QP共享, 每个QP可配置不同的操作权限;
    //   - 快速权限验证: QP权限验证在发送端，MR权限验证在接收端;
    attr.qp_access_flags =
        IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_REMOTE_READ | IBV_ACCESS_LOCAL_WRITE;

    //! 属性修改掩码, 指定要修改的属性，其它字段将被忽略
    int flags = IBV_QP_STATE | IBV_QP_PKEY_INDEX | IBV_QP_PORT | IBV_QP_ACCESS_FLAGS;

    int ret = ibv_modify_qp(ctx->qp, &attr, flags);
    if (ret) {
        ERROR("Failed to modify QP to INIT");
        return ret;
    }
    return 0;
}




int modify_qp_to_rts(struct rdma_context *ctx) {
    assert(ctx != NULL);

    struct ibv_qp_attr attr = {};
    attr.qp_state  = IBV_QPS_RTS;
    attr.timeout   = 14;     // 等待ACK的超时时间(4.096us * 2^timeout) ms
    attr.retry_cnt = 7;      // 发送端最大重试次数: 0(禁止重试), 7(标准值), 255(无限重试)
    attr.rnr_retry = 7;      // 控制对端RNR（Receiver Not Ready）错误时的重试策略。
    attr.sq_psn = ctx->psn;  // 发送队列的初始数据包序列号
    attr.max_rd_atomic = 16;

    int flags = IBV_QP_STATE | IBV_QP_TIMEOUT | IBV_QP_RETRY_CNT |
                IBV_QP_RNR_RETRY | IBV_QP_SQ_PSN | 
                IBV_QP_MAX_QP_RD_ATOMIC;

    int ret = ibv_modify_qp(ctx->qp, &attr, flags);
    if (ret) {
        ERROR("Failed to modify QP to RTS");
        return ret;
    }
    return 0;
}





int modify_qp_to_rtr(struct rdma_context *ctx, 
                    struct rdma_device *rdma_dev,
                    rdma_conn_info_t *remote_info) {
    assert(ctx != NULL);
    assert(rdma_dev != NULL);

    struct ibv_qp_attr attr = {};
    attr.qp_state = IBV_QPS_RTR;

    // update MTU
    if (remote_info->mtu != (uint32_t)rdma_dev->active_mtu) {
        INFO("remote MTU: {}, local MTU: {} is not the same, update to minimal MTU",
             1 << ((uint32_t)remote_info->mtu + 7), 1 << ((uint32_t)rdma_dev->active_mtu + 7));
    }

    attr.path_mtu =
        (enum ibv_mtu)std::min((uint32_t)rdma_dev->active_mtu, 
                    (uint32_t)remote_info->mtu);

    attr.dest_qp_num = remote_info->qpn;
    attr.rq_psn = remote_info->psn;
    attr.max_dest_rd_atomic = 16;
    attr.min_rnr_timer = 12;
    attr.ah_attr.dlid = 0;
    attr.ah_attr.sl = 0;
    attr.ah_attr.src_path_bits = 0;
    attr.ah_attr.port_num = rdma_dev->ib_port;

    if (rdma_dev->gid_index == -1) {
        // IB
        attr.ah_attr.dlid = remote_info->lid;
        attr.ah_attr.is_global = 0;
    }
    else {
        // RoCE v2
        attr.ah_attr.is_global = 1;
        attr.ah_attr.grh.dgid = remote_info->gid;
        attr.ah_attr.grh.sgid_index = rdma_dev->gid_index;  // local gid
        attr.ah_attr.grh.hop_limit = 1;
    }

    int flags = IBV_QP_STATE | IBV_QP_AV | IBV_QP_PATH_MTU | 
                IBV_QP_DEST_QPN | IBV_QP_RQ_PSN |
                IBV_QP_MAX_DEST_RD_ATOMIC | IBV_QP_MIN_RNR_TIMER;

    int ret = ibv_modify_qp(ctx->qp, &attr, flags);
    if (ret) {
        ERROR("Failed to modify QP to RTR");
        return ret;
    }

    return 0;
}
