#include <linux/skbuff.h>
#include <crypto/hash.h>

#include "rxe.h"
#include "rxe_loc.h"
#include "rxe_queue.h"

static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, u32 opcode);

/*重传第一个包--write/send*/
static inline void retry_first_write_send(struct rxe_qp *qp,
        struct rxe_send_wqe *wqe, unsigned int mask, int npsn)
{
    int index;

    for(index = 0; index < npsn; index++) {
        int to_send = (wqe->dma.resid > qp->mtu) ? qp->mtu : wqe->dma.resid;

        qp->req.opcode = next_opcode(qp, wqe, wqe->wr.opcode);
        if (wqe->wr.send_flags & IB_SEND_INLINE) {
            //...
        } else {
            /*本质是修改dma->sge_offset和dma->resid的值*/
            advance_dma_data(&wqe->dma, to_send);
        }
        //为什么加qp->mtu?
        if (mask & WR_WRITE_MASK)
            wqe->iova += qp->mtu;
    }
}

/*数据包重传--收不到ack包或者收到nack包*/
static void req_retry(struct rxe_qp *qp)
{
    struct rxe_send_wqe *wqe;
    unsigned int wqe_index;
    unsigned int mask;
    int npsn;
    int first = 1;
    struct rxe_queue *q = qp->sq.queue;
    unsigned int cons;
    unsigned int prod;

    /*当前qp-sq队列的消费者索引*/
    cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
    /*当前qp-sq队列的生产者索引*/
    prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);

    //当前wqe_index的索引
    qp->req.wqe_index = cons;
    //包序号
    qp->req.psn = qp->comp.psn;
    qp->req.opcode = -1;

    for(wqe_index = cons; wqe_index != prod; wqe_index = queue_next_index(q, wqe_index)) {
        //如果wqe是这个状态，说明wqe还没有处于发包的状态,直接break就可以了,后面走正常的发包流程
        if(wqe->state == wqe_state_posted)
            break;

        //如果wqe是这个状态,说明该wqe的所有数据包已经发完，并且收到ack包了,
        if(wqe->state == wqe_state_done)
            continue;

        //对端的地址
        wqe->iova = (mask & WR_ATOMIC_MASK) ? wqe->wr.wr.atomic.remote_addr :
            (mask & WR_READ_OR_WRITE_MASK) ? wqe->wr.wr.rdma.remote_addr : 0;

        if(first) {
            first = 0;

            if (mask & WR_WRITE_OR_SEND_MASK) {
                //npsn代表？
                npsn = (qp->comp.psn - wqe->first_psn) & BTH_PSN_MASK;
                retry_first_write_send(qp, wqe, mask, npsn);
            }

            //
            if (mask & WR_READ_MASK) {

            }
        }
        //？
        wqe->state = wqe_state_posted;
    }
}

void rnr_nak_timer(struct timer_list *t)
{
    struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);

    pr_debug("qp#%d rnr nak timer fired\n", qp_num(qp));
    rxe_run_task(&qp->req.task, 1);
}

static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
{
    struct rxe_send_wqe *wqe;
    unsigned long flags;
    unsigned int prod, cons;
    struct rxe_queue *q = qp->sq.queue;
    unsigned int index = qp->req.wqe_index;//当前wqe

    wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
    cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
    prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);

    if (unlikely(qp->req.state == QP_STATE_DRAIN)) {

    }

    if(index == prod)
        return NULL;

    wqe = queue_addr_from_index(q, index);
    wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);

    return wqe;
}

static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
{
    /*刚开始qp->req.opcode为-1*/
    switch (opcode) {
    case IB_WR_RDMA_WRITE:
        if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
            qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
            return fits ?
                IB_OPCODE_RC_RDMA_WRITE_LAST :
                IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
        else
            return fits ?
                IB_OPCODE_RC_RDMA_WRITE_ONLY :
                IB_OPCODE_RC_RDMA_WRITE_FIRST;

    case IB_WR_RDMA_WRITE_WITH_IMM:
        if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
            qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
            return fits ?
                IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
                IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
        else
            return fits ?
                IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
                IB_OPCODE_RC_RDMA_WRITE_FIRST;

    case IB_WR_SEND:
        if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
            qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
            return fits ?
                IB_OPCODE_RC_SEND_LAST :
                IB_OPCODE_RC_SEND_MIDDLE;
        else
            return fits ?
                IB_OPCODE_RC_SEND_ONLY :
                IB_OPCODE_RC_SEND_FIRST;

    case IB_WR_SEND_WITH_IMM:
        if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
            qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
            return fits ?
                IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE :
                IB_OPCODE_RC_SEND_MIDDLE;
        else
            return fits ?
                IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE :
                IB_OPCODE_RC_SEND_FIRST;

    case IB_WR_RDMA_READ:
        return IB_OPCODE_RC_RDMA_READ_REQUEST;

    case IB_WR_ATOMIC_CMP_AND_SWP:
        return IB_OPCODE_RC_COMPARE_SWAP;

    case IB_WR_ATOMIC_FETCH_AND_ADD:
        return IB_OPCODE_RC_FETCH_ADD;

    case IB_WR_SEND_WITH_INV:
        if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
            qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
            return fits ? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE :
                IB_OPCODE_RC_SEND_MIDDLE;
        else
            return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE :
                IB_OPCODE_RC_SEND_FIRST;
    case IB_WR_REG_MR:
    case IB_WR_LOCAL_INV:
        return opcode;
    }

    return -EINVAL;
}

static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
{
    switch (opcode) {
    case IB_WR_RDMA_WRITE:
        if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
            qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
            return fits ?
                IB_OPCODE_UC_RDMA_WRITE_LAST :
                IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
        else
            return fits ?
                IB_OPCODE_UC_RDMA_WRITE_ONLY :
                IB_OPCODE_UC_RDMA_WRITE_FIRST;

    case IB_WR_RDMA_WRITE_WITH_IMM:
        if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
            qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
            return fits ?
                IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
                IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
        else
            return fits ?
                IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
                IB_OPCODE_UC_RDMA_WRITE_FIRST;

    case IB_WR_SEND:
        if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
            qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
            return fits ?
                IB_OPCODE_UC_SEND_LAST :
                IB_OPCODE_UC_SEND_MIDDLE;
        else
            return fits ?
                IB_OPCODE_UC_SEND_ONLY :
                IB_OPCODE_UC_SEND_FIRST;

    case IB_WR_SEND_WITH_IMM:
        if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
            qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
            return fits ?
                IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE :
                IB_OPCODE_UC_SEND_MIDDLE;
        else
            return fits ?
                IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE :
                IB_OPCODE_UC_SEND_FIRST;
    }

    return -EINVAL;
}

static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
               u32 opcode)
{
    int fits = (wqe->dma.resid <= qp->mtu);

    switch (qp_type(qp)) {
    case IB_QPT_RC:
        return next_opcode_rc(qp, opcode, fits);

    case IB_QPT_UC:
        return next_opcode_uc(qp, opcode, fits);

    case IB_QPT_SMI:
    case IB_QPT_UD:
    case IB_QPT_GSI:
        switch (opcode) {
        case IB_WR_SEND:
            return IB_OPCODE_UD_SEND_ONLY;
        case IB_WR_SEND_WITH_IMM:
            return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
        }
        break;
    default:
        break;
    }
    return -EINVAL;
}

static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
{
    int depth;



    return -EAGAIN;
}

static inline int get_mtu(struct rxe_qp *qp)
{
    struct rxe_dev *rxe = to_rdev(qp->ibqp.device);

    if ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC))
        return qp->mtu;

    return rxe->port.mtu_cap;
}

/*申请skb，填充包头(UDP+IP+BTH)*/
static struct sk_buff *init_req_packet(struct rxe_qp *qp,
        struct rxe_send_wqe *wqe, int opcode, int payload,
        struct rxe_pkt_info *pkt)
{
    struct rxe_dev  *rxe = to_rdev(qp->ibqp.device);
    struct rxe_port *port = &rxe->port;
    struct sk_buff  *skb;
    struct rxe_send_wr ibwr = &wqe->wr;
    struct rxe_av *av;
    int pad = (-payload) & 0x3; //icrc校验需要4字节对齐
    int paylen, solicited, ack_req;
    u16 pkey;
    u32 qp_num;

    /*BTH+额外扩展头(如果需要)+数据长度+pad长度(如果需要)+icrc长度*/
    paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
    pkt->opcode = opcode;
    pkt->qp = qp;
    pkt->psn = qp->req.psn;
    pkt->mask = rxe_opcode[opcode].mask;
    pkt->paylen = paylen;
    pkt->offset = 0;
    pkt->wqe = wqe;

    av = rxe_get_av(pkt);
    //申请skb
    skb = rxe_init_packet(rxe, av, paylen, pkt);
    if (unlikely(!skb))
        return NULL;

    solicited = (ibwr->send_flags & IB_SEND_SOLICITED) && (pkt->mask & RXE_END_MASK) &&
        ((pkt->mask & (RXE_SEND_MASK)) || (pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
        (RXE_WRITE_MASK | RXE_IMMDT_MASK));

    pkey = IB_DEFAULT_PKEY_FULL; //65535
    qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn : qp->attr.dest_qp_num;
    ack_req = ((pkt->mask & RXE_END_MASK) ||
        (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
    if (ack_req)
        qp->req.noack_pkts = 0;

    /*************************BTH*************************/
    bth_init(pkt, pkt->opcode, solicited, 0, pad, pkey, qp_num, ack_req, pkt->psn);

    /*****E-BTH(额外的扩展头)******/
    /*RETH头赋值*/
    if(pkt->mask & RXE_RETH_MASK) {
        reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
        reth_set_va(pkt, wqe->iova);
        reth_set_len(pkt, wqe->dma.resid);
    }

    if (pkt->mask & RXE_IMMDT_MASK)
        immdt_set_imm(pkt, ibwr->ex.imm_data);

    if (pkt->mask & RXE_IETH_MASK)
        ieth_set_rkey(pkt, ibwr->ex.invalidate_rkey);

    if (pkt->mask & RXE_ATMETH_MASK) {
        atmeth_set_va(pkt, wqe->iova);
        if (opcode == IB_OPCODE_RC_COMPARE_SWAP ||
            opcode == IB_OPCODE_RD_COMPARE_SWAP) {
            atmeth_set_swap_add(pkt, ibwr->wr.atomic.swap);
            atmeth_set_comp(pkt, ibwr->wr.atomic.compare_add);
        } else {
            atmeth_set_swap_add(pkt, ibwr->wr.atomic.compare_add);
        }
        atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey);
    }

    if (pkt->mask & RXE_DETH_MASK) {
        if (qp->ibqp.qp_num == 1)
            deth_set_qkey(pkt, GSI_QKEY);
        else
            deth_set_qkey(pkt, ibwr->wr.ud.remote_qkey);
        deth_set_sqp(pkt, qp->ibqp.qp_num);
    }

    return skb;
}

/*数据部分的填充*/
static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
        struct rxe_pkt_info *pkt, struct sk_buff *skb, int paylen)
{

    int err;

    /*ip头和udp头字段填充*/
    err = rxe_prepare(pkt, skb);
    if(err)
        return err;

    /*数据填充--只有写请求或者send请求，数据部分才会做拷贝*/
    if (pkt->mask & RXE_WRITE_OR_SEND) {
        if (wqe->wr.send_flags & IB_SEND_INLINE) {
            //...

        } else {
            err = copy_data(qp->pd, 0, &wqe->dma, payload_addr(pkt), paylen, RXE_FROM_MR_OBJ);
            if(err)
                return err;
        }
        /*判断数据是否有填充部分(数据部分4字节对齐)*/
        if(bth_pad(pkt)) {
            /*指向数据对齐起始地址处*/
            u8 *pad = payload_addr(pkt) + paylen;
            /*对填充部分的数据做清零处理*/
            memset(pad, 0, bth_pad(pkt));
        }
    }

    return 0;
}

static void update_wqe_state(struct rxe_qp *qp,
        struct rxe_send_wqe *wqe,
        struct rxe_pkt_info *pkt)
{


}

static void update_wqe_psn(struct rxe_qp *qp,
        struct rxe_send_wqe *wqe,
        struct rxe_pkt_info *pkt,
        int payload)
{
    /*当前还有几个包要发*/
    int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;

    if(num_pkt == 0)
        num_pkt = 1;

    /*首包*/
    if(pkt->mask & RXE_START_MASK) {
        wqe->first_psn = qp->req.psn; /*第一个包的序列号*/
        wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK;/*最后一个包的序列号*/
    }

    if(pkt->mask & RXE_READ_MASK)
        qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
    else
        qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
}

static void save_state(struct rxe_send_wqe *wqe,
        struct rxe_qp *qp,
        struct rxe_send_wqe *rollback_wqe,
        u32 *rollback_psn)
{


}

static void rollback_state(struct rxe_send_wqe *wqe,
        struct rxe_qp *qp,
        struct rxe_send_wqe *rollback_wqe,
        u32 rollback_psn)
{


}

static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
        struct rxe_pkt_info *pkt, int payload)
{
    qp->req.opcode = pkt->opcode;
    if(pkt->mask & RXE_END_MASK)
        /*说明当前wqe的所有数据都发送完毕*/
        qp->req.wqe_index = queue_next_index(qp->sq.queue,qp->req.wqe_index);



}

static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
{
    int ret;
    u32 rkey;
    u8 opcode = wqe->wr.opcode;


}

int rxe_requester(void *arg)
{
    struct rxe_qp *qp = (struct rxe_qp *)arg;
    struct rxe_pkt_info pkt;
    struct sk_buff *skb;
    struct rxe_send_wqe *wqe;
    enum rxe_hdr_mask mask;
    int payload, mtu, opcode, ret;
    struct rxe_send_wqe rollback_wqe;
    u32 rollback_psn;
    struct rxe_queue *q = qp->sq.queue; //发送队列buf

    rxe_add_ref(qp);
next_wqe:

    /*qp->valid设置为0是在qp销毁时*/
    if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR)) {
        goto exit;
    }

    if (unlikely(qp->req.state == QP_STATE_RESET)) {
        //...
        goto exit;
    }

    //数据包需要重传
    if (unlikely(qp->req.need_retry)) {
        req_retry(qp);
        qp->req.need_retry = 0;
    }

    /*根据wqe_index取出当前wqe*/
    wqe = req_next_wqe(qp);
    if(unlikely(!wqe)) {
        //一个wqe的数据长度发完以后从这里退出
        goto exit;
    }

    //？
    if (wqe->mask & WR_LOCAL_OP_MASK) {

    }

    if (unlikely(qp_type(qp) == IB_QPT_RC &&
        psn_compare(qp->req.psn, (qp->comp.psn +
            RXE_MAX_UNACKED_PSNS)) > 0)) {
        //...
        goto exit;
    }

    /* Limit the number of inflight SKBs per QP */
    if (unlikely(atomic_read(&qp->skb_out) > RXE_INFLIGHT_SKBS_PER_QP_HIGH)) {
        qp->need_req_skb = 1;
        goto exit;
    }

    /*根据下发wqe的信息-如，qp类型，操作类型，每个wqe的长度等取出发送每个包时的opcode信息*/
    opcode = next_opcode(qp, wqe, wqe->wr.opcode);
    if(unlikely(opcode < 0)) {
        wqe->status = IB_WC_LOC_QP_OP_ERR;
        goto err;
    }

    /*根据取出的每个包的opcode的mask信息(如首包，中间包，尾包等)*/
    mask = rxe_opcode[opcode].mask;
    if (unlikely(mask & RXE_READ_OR_ATOMIC)) {
        if(check_init_depth(qp, wqe))
            goto exit;
    }

    /*每个包应该发送的大小*/
    mtu = get_mtu(qp);
    payload = (mask & RXE_WRITE_OR_SEND) ? wqe->dma.resid : 0;
    if(payload > mtu) {
        if (qp_type(qp) == IB_QPT_UD) {
            //...
        }
        payload = mtu;
    }

    skb = init_req_packet(qp, wqe, opcode, payload, &pkt);
    if(unlikely(!skb)) {
        pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
        wqe->status = IB_WC_LOC_QP_OP_ERR;
        goto err;
    }

    ret = finish_packet(qp, wqe, &pkt, skb, payload);
    if(unlikely(ret)) {
        pr_debug("qp#%d Error during finish packet\n", qp_num(qp));
        if(ret == -EFAULT) {
            wqe->status = IB_WC_LOC_PROT_ERR;
        } else {
            wqe->status = IB_WC_LOC_QP_OP_ERR;
        }
        kfree_skb(skb);
        goto err;
    }

    //
    save_state(wqe, qp, &rollback_wqe, &rollback_psn);

    /*在rxe_xmit_packet前调用*/
    update_wqe_state(qp, wqe, &pkt);
    update_wqe_psn(qp, wqe, &pkt, payload);

    //发包函数
    ret = rxe_xmit_packet(qp, &pkt, skb);
    if(ret) {

    }

    update_state(qp, wqe, &pkt, payload);

    goto next_wqe;

err:
    wqe->state = wqe_state_error;
    __rxe_do_task(&qp->comp.task);
exit:
    rxe_drop_ref(qp);
    return -EAGAIN;
}
