in hw/irdma/verbs.c [3052:3262]
static int irdma_post_send(struct ib_qp *ibqp,
const struct ib_send_wr *ib_wr,
const struct ib_send_wr **bad_wr)
{
struct irdma_qp *iwqp;
struct irdma_qp_uk *ukqp;
struct irdma_sc_dev *dev;
struct irdma_post_sq_info info;
enum irdma_status_code ret;
int err = 0;
unsigned long flags;
bool inv_stag;
struct irdma_ah *ah;
bool reflush = false;
iwqp = to_iwqp(ibqp);
ukqp = &iwqp->sc_qp.qp_uk;
dev = &iwqp->iwdev->rf->sc_dev;
spin_lock_irqsave(&iwqp->lock, flags);
if (iwqp->flush_issued && ukqp->sq_flush_complete)
reflush = true;
while (ib_wr) {
memset(&info, 0, sizeof(info));
inv_stag = false;
info.wr_id = (ib_wr->wr_id);
if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
info.signaled = true;
if (ib_wr->send_flags & IB_SEND_FENCE)
info.read_fence = true;
switch (ib_wr->opcode) {
case IB_WR_SEND_WITH_IMM:
if (ukqp->qp_caps & IRDMA_SEND_WITH_IMM) {
info.imm_data_valid = true;
info.imm_data = ntohl(ib_wr->ex.imm_data);
} else {
err = -EINVAL;
break;
}
fallthrough;
case IB_WR_SEND:
case IB_WR_SEND_WITH_INV:
if (ib_wr->opcode == IB_WR_SEND ||
ib_wr->opcode == IB_WR_SEND_WITH_IMM) {
if (ib_wr->send_flags & IB_SEND_SOLICITED)
info.op_type = IRDMA_OP_TYPE_SEND_SOL;
else
info.op_type = IRDMA_OP_TYPE_SEND;
} else {
if (ib_wr->send_flags & IB_SEND_SOLICITED)
info.op_type = IRDMA_OP_TYPE_SEND_SOL_INV;
else
info.op_type = IRDMA_OP_TYPE_SEND_INV;
info.stag_to_inv = ib_wr->ex.invalidate_rkey;
}
if (ib_wr->send_flags & IB_SEND_INLINE) {
info.op.inline_send.data = (void *)(unsigned long)
ib_wr->sg_list[0].addr;
info.op.inline_send.len = ib_wr->sg_list[0].length;
if (iwqp->ibqp.qp_type == IB_QPT_UD ||
iwqp->ibqp.qp_type == IB_QPT_GSI) {
ah = to_iwah(ud_wr(ib_wr)->ah);
info.op.inline_send.ah_id = ah->sc_ah.ah_info.ah_idx;
info.op.inline_send.qkey = ud_wr(ib_wr)->remote_qkey;
info.op.inline_send.dest_qp = ud_wr(ib_wr)->remote_qpn;
}
ret = irdma_uk_inline_send(ukqp, &info, false);
} else {
info.op.send.num_sges = ib_wr->num_sge;
info.op.send.sg_list = ib_wr->sg_list;
if (iwqp->ibqp.qp_type == IB_QPT_UD ||
iwqp->ibqp.qp_type == IB_QPT_GSI) {
ah = to_iwah(ud_wr(ib_wr)->ah);
info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx;
info.op.send.qkey = ud_wr(ib_wr)->remote_qkey;
info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn;
}
ret = irdma_uk_send(ukqp, &info, false);
}
if (ret) {
if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED)
err = -ENOMEM;
else
err = -EINVAL;
}
break;
case IB_WR_RDMA_WRITE_WITH_IMM:
if (ukqp->qp_caps & IRDMA_WRITE_WITH_IMM) {
info.imm_data_valid = true;
info.imm_data = ntohl(ib_wr->ex.imm_data);
} else {
err = -EINVAL;
break;
}
fallthrough;
case IB_WR_RDMA_WRITE:
if (ib_wr->send_flags & IB_SEND_SOLICITED)
info.op_type = IRDMA_OP_TYPE_RDMA_WRITE_SOL;
else
info.op_type = IRDMA_OP_TYPE_RDMA_WRITE;
if (ib_wr->send_flags & IB_SEND_INLINE) {
info.op.inline_rdma_write.data = (void *)(uintptr_t)ib_wr->sg_list[0].addr;
info.op.inline_rdma_write.len =
ib_wr->sg_list[0].length;
info.op.inline_rdma_write.rem_addr.addr =
rdma_wr(ib_wr)->remote_addr;
info.op.inline_rdma_write.rem_addr.lkey =
rdma_wr(ib_wr)->rkey;
ret = irdma_uk_inline_rdma_write(ukqp, &info, false);
} else {
info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
info.op.rdma_write.rem_addr.addr = rdma_wr(ib_wr)->remote_addr;
info.op.rdma_write.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
ret = irdma_uk_rdma_write(ukqp, &info, false);
}
if (ret) {
if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED)
err = -ENOMEM;
else
err = -EINVAL;
}
break;
case IB_WR_RDMA_READ_WITH_INV:
inv_stag = true;
fallthrough;
case IB_WR_RDMA_READ:
if (ib_wr->num_sge >
dev->hw_attrs.uk_attrs.max_hw_read_sges) {
err = -EINVAL;
break;
}
info.op_type = IRDMA_OP_TYPE_RDMA_READ;
info.op.rdma_read.rem_addr.addr = rdma_wr(ib_wr)->remote_addr;
info.op.rdma_read.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list;
info.op.rdma_read.num_lo_sges = ib_wr->num_sge;
ret = irdma_uk_rdma_read(ukqp, &info, inv_stag, false);
if (ret) {
if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED)
err = -ENOMEM;
else
err = -EINVAL;
}
break;
case IB_WR_LOCAL_INV:
info.op_type = IRDMA_OP_TYPE_INV_STAG;
info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
ret = irdma_uk_stag_local_invalidate(ukqp, &info, true);
if (ret)
err = -ENOMEM;
break;
case IB_WR_REG_MR: {
struct irdma_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
struct irdma_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
struct irdma_fast_reg_stag_info stag_info = {};
stag_info.signaled = info.signaled;
stag_info.read_fence = info.read_fence;
stag_info.access_rights = irdma_get_mr_access(reg_wr(ib_wr)->access);
stag_info.stag_key = reg_wr(ib_wr)->key & 0xff;
stag_info.stag_idx = reg_wr(ib_wr)->key >> 8;
stag_info.page_size = reg_wr(ib_wr)->mr->page_size;
stag_info.wr_id = ib_wr->wr_id;
stag_info.addr_type = IRDMA_ADDR_TYPE_VA_BASED;
stag_info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
stag_info.total_len = iwmr->ibmr.length;
stag_info.reg_addr_pa = *palloc->level1.addr;
stag_info.first_pm_pbl_index = palloc->level1.idx;
stag_info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
if (iwmr->npages > IRDMA_MIN_PAGES_PER_FMR)
stag_info.chunk_size = 1;
ret = irdma_sc_mr_fast_register(&iwqp->sc_qp, &stag_info,
true);
if (ret)
err = -ENOMEM;
break;
}
default:
err = -EINVAL;
ibdev_dbg(&iwqp->iwdev->ibdev,
"VERBS: upost_send bad opcode = 0x%x\n",
ib_wr->opcode);
break;
}
if (err)
break;
ib_wr = ib_wr->next;
}
if (!iwqp->flush_issued && iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS) {
irdma_uk_qp_post_wr(ukqp);
spin_unlock_irqrestore(&iwqp->lock, flags);
} else if (reflush) {
ukqp->sq_flush_complete = false;
spin_unlock_irqrestore(&iwqp->lock, flags);
irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ | IRDMA_REFLUSH);
} else {
spin_unlock_irqrestore(&iwqp->lock, flags);
}
if (err)
*bad_wr = ib_wr;
return err;
}