in efawin/verbs.c [1463:1542]
int efa_post_send(struct ibv_qp *ibvqp, struct ibv_send_wr *wr,
struct ibv_send_wr **bad)
{
struct efa_io_tx_meta_desc *meta_desc;
struct efa_qp *qp = to_efa_qp(ibvqp);
struct efa_io_tx_wqe tx_wqe;
struct efa_sq *sq = &qp->sq;
struct efa_wq *wq = &sq->wq;
uint32_t sq_desc_offset;
uint32_t curbatch = 0;
struct efa_ah *ah;
int err = 0;
mmio_wc_spinlock(&wq->wqlock);
while (wr) {
err = efa_post_send_validate_wr(qp, wr);
if (err) {
*bad = wr;
goto ring_db;
}
memset(&tx_wqe, 0, sizeof(tx_wqe));
meta_desc = &tx_wqe.meta;
ah = to_efa_ah(wr->wr.ud.ah);
if (wr->send_flags & IBV_SEND_INLINE) {
efa_post_send_inline_data(wr, &tx_wqe);
} else {
meta_desc->length = wr->num_sge;
efa_post_send_sgl(tx_wqe.data.sgl, wr->sg_list,
wr->num_sge);
}
if (wr->opcode == IBV_WR_SEND_WITH_IMM) {
meta_desc->immediate_data = be32toh(wr->imm_data);
EFA_SET(&meta_desc->ctrl1, EFA_IO_TX_META_DESC_HAS_IMM,
1);
}
/* Set rest of the descriptor fields */
efa_set_common_ctrl_flags(meta_desc, sq, EFA_IO_SEND);
meta_desc->req_id = efa_wq_get_next_wrid_idx_locked(wq,
wr->wr_id);
meta_desc->dest_qp_num = wr->wr.ud.remote_qpn;
meta_desc->ah = ah->efa_ah;
meta_desc->qkey = wr->wr.ud.remote_qkey;
/* Copy descriptor */
sq_desc_offset = (wq->pc & wq->desc_mask) *
sizeof(tx_wqe);
mmio_memcpy_x64(sq->desc + sq_desc_offset, &tx_wqe,
sizeof(tx_wqe));
/* advance index and change phase */
efa_sq_advance_post_idx(sq);
curbatch++;
if (curbatch == sq->max_batch_wr) {
curbatch = 0;
mmio_flush_writes();
efa_sq_ring_doorbell(sq, wq->pc);
mmio_wc_start();
}
wr = wr->next;
}
ring_db:
if (curbatch) {
mmio_flush_writes();
efa_sq_ring_doorbell(sq, wq->pc);
}
/*
* Not using mmio_wc_spinunlock as the doorbell write should be done
* inside the lock.
*/
pthread_spin_unlock(&wq->wqlock);
return err;
}