in hw/qedr/verbs.c [2455:2713]
int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
struct qedr_qp *qp = get_qedr_qp(ibqp);
struct qed_rdma_modify_qp_in_params qp_params = { 0 };
struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
enum ib_qp_state old_qp_state, new_qp_state;
enum qed_roce_qp_state cur_state;
int rc = 0;
DP_DEBUG(dev, QEDR_MSG_QP,
"modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
attr->qp_state);
if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
return -EOPNOTSUPP;
old_qp_state = qedr_get_ibqp_state(qp->state);
if (attr_mask & IB_QP_STATE)
new_qp_state = attr->qp_state;
else
new_qp_state = old_qp_state;
if (rdma_protocol_roce(&dev->ibdev, 1)) {
if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
ibqp->qp_type, attr_mask)) {
DP_ERR(dev,
"modify qp: invalid attribute mask=0x%x specified for\n"
"qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
attr_mask, qp->qp_id, ibqp->qp_type,
old_qp_state, new_qp_state);
rc = -EINVAL;
goto err;
}
}
/* Translate the masks... */
if (attr_mask & IB_QP_STATE) {
SET_FIELD(qp_params.modify_flags,
QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
}
if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
qp_params.sqd_async = true;
if (attr_mask & IB_QP_PKEY_INDEX) {
SET_FIELD(qp_params.modify_flags,
QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
rc = -EINVAL;
goto err;
}
qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
}
if (attr_mask & IB_QP_QKEY)
qp->qkey = attr->qkey;
if (attr_mask & IB_QP_ACCESS_FLAGS) {
SET_FIELD(qp_params.modify_flags,
QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
qp_params.incoming_rdma_read_en = attr->qp_access_flags &
IB_ACCESS_REMOTE_READ;
qp_params.incoming_rdma_write_en = attr->qp_access_flags &
IB_ACCESS_REMOTE_WRITE;
qp_params.incoming_atomic_en = attr->qp_access_flags &
IB_ACCESS_REMOTE_ATOMIC;
}
if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
if (rdma_protocol_iwarp(&dev->ibdev, 1))
return -EINVAL;
if (attr_mask & IB_QP_PATH_MTU) {
if (attr->path_mtu < IB_MTU_256 ||
attr->path_mtu > IB_MTU_4096) {
pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
rc = -EINVAL;
goto err;
}
qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
ib_mtu_enum_to_int(iboe_get_mtu
(dev->ndev->mtu)));
}
if (!qp->mtu) {
qp->mtu =
ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
}
SET_FIELD(qp_params.modify_flags,
QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
qp_params.traffic_class_tos = grh->traffic_class;
qp_params.flow_label = grh->flow_label;
qp_params.hop_limit_ttl = grh->hop_limit;
qp->sgid_idx = grh->sgid_index;
rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
if (rc) {
DP_ERR(dev,
"modify qp: problems with GID index %d (rc=%d)\n",
grh->sgid_index, rc);
return rc;
}
rc = qedr_get_dmac(dev, &attr->ah_attr,
qp_params.remote_mac_addr);
if (rc)
return rc;
qp_params.use_local_mac = true;
ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
qp_params.remote_mac_addr);
qp_params.mtu = qp->mtu;
qp_params.lb_indication = false;
}
if (!qp_params.mtu) {
/* Stay with current MTU */
if (qp->mtu)
qp_params.mtu = qp->mtu;
else
qp_params.mtu =
ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
}
if (attr_mask & IB_QP_TIMEOUT) {
SET_FIELD(qp_params.modify_flags,
QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
/* The received timeout value is an exponent used like this:
* "12.7.34 LOCAL ACK TIMEOUT
* Value representing the transport (ACK) timeout for use by
* the remote, expressed as: 4.096 * 2^timeout [usec]"
* The FW expects timeout in msec so we need to divide the usec
* result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2,
* so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
* The value of zero means infinite so we use a 'max_t' to make
* sure that sub 1 msec values will be configured as 1 msec.
*/
if (attr->timeout)
qp_params.ack_timeout =
1 << max_t(int, attr->timeout - 8, 0);
else
qp_params.ack_timeout = 0;
}
if (attr_mask & IB_QP_RETRY_CNT) {
SET_FIELD(qp_params.modify_flags,
QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
qp_params.retry_cnt = attr->retry_cnt;
}
if (attr_mask & IB_QP_RNR_RETRY) {
SET_FIELD(qp_params.modify_flags,
QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
qp_params.rnr_retry_cnt = attr->rnr_retry;
}
if (attr_mask & IB_QP_RQ_PSN) {
SET_FIELD(qp_params.modify_flags,
QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
qp_params.rq_psn = attr->rq_psn;
qp->rq_psn = attr->rq_psn;
}
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
rc = -EINVAL;
DP_ERR(dev,
"unsupported max_rd_atomic=%d, supported=%d\n",
attr->max_rd_atomic,
dev->attr.max_qp_req_rd_atomic_resc);
goto err;
}
SET_FIELD(qp_params.modify_flags,
QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
qp_params.max_rd_atomic_req = attr->max_rd_atomic;
}
if (attr_mask & IB_QP_MIN_RNR_TIMER) {
SET_FIELD(qp_params.modify_flags,
QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
}
if (attr_mask & IB_QP_SQ_PSN) {
SET_FIELD(qp_params.modify_flags,
QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
qp_params.sq_psn = attr->sq_psn;
qp->sq_psn = attr->sq_psn;
}
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
if (attr->max_dest_rd_atomic >
dev->attr.max_qp_resp_rd_atomic_resc) {
DP_ERR(dev,
"unsupported max_dest_rd_atomic=%d, supported=%d\n",
attr->max_dest_rd_atomic,
dev->attr.max_qp_resp_rd_atomic_resc);
rc = -EINVAL;
goto err;
}
SET_FIELD(qp_params.modify_flags,
QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
}
if (attr_mask & IB_QP_DEST_QPN) {
SET_FIELD(qp_params.modify_flags,
QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
qp_params.dest_qp = attr->dest_qp_num;
qp->dest_qp_num = attr->dest_qp_num;
}
cur_state = qp->state;
/* Update the QP state before the actual ramrod to prevent a race with
* fast path. Modifying the QP state to error will cause the device to
* flush the CQEs and while polling the flushed CQEs will considered as
* a potential issue if the QP isn't in error state.
*/
if ((attr_mask & IB_QP_STATE) && qp->qp_type != IB_QPT_GSI &&
!udata && qp_params.new_state == QED_ROCE_QP_STATE_ERR)
qp->state = QED_ROCE_QP_STATE_ERR;
if (qp->qp_type != IB_QPT_GSI)
rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
qp->qed_qp, &qp_params);
if (attr_mask & IB_QP_STATE) {
if ((qp->qp_type != IB_QPT_GSI) && (!udata))
rc = qedr_update_qp_state(dev, qp, cur_state,
qp_params.new_state);
qp->state = qp_params.new_state;
}
err:
return rc;
}