in hw/irdma/verbs.c [1105:1442]
int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
struct irdma_pd *iwpd = to_iwpd(ibqp->pd);
struct irdma_qp *iwqp = to_iwqp(ibqp);
struct irdma_device *iwdev = iwqp->iwdev;
struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
struct irdma_qp_host_ctx_info *ctx_info;
struct irdma_roce_offload_info *roce_info;
struct irdma_udp_offload_info *udp_info;
struct irdma_modify_qp_info info = {};
struct irdma_modify_qp_resp uresp = {};
struct irdma_modify_qp_req ureq = {};
unsigned long flags;
u8 issue_modify_qp = 0;
int ret = 0;
ctx_info = &iwqp->ctx_info;
roce_info = &iwqp->roce_info;
udp_info = &iwqp->udp_info;
if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
return -EOPNOTSUPP;
if (attr_mask & IB_QP_DEST_QPN)
roce_info->dest_qp = attr->dest_qp_num;
if (attr_mask & IB_QP_PKEY_INDEX) {
ret = irdma_query_pkey(ibqp->device, 0, attr->pkey_index,
&roce_info->p_key);
if (ret)
return ret;
}
if (attr_mask & IB_QP_QKEY)
roce_info->qkey = attr->qkey;
if (attr_mask & IB_QP_PATH_MTU)
udp_info->snd_mss = ib_mtu_enum_to_int(attr->path_mtu);
if (attr_mask & IB_QP_SQ_PSN) {
udp_info->psn_nxt = attr->sq_psn;
udp_info->lsn = 0xffff;
udp_info->psn_una = attr->sq_psn;
udp_info->psn_max = attr->sq_psn;
}
if (attr_mask & IB_QP_RQ_PSN)
udp_info->epsn = attr->rq_psn;
if (attr_mask & IB_QP_RNR_RETRY)
udp_info->rnr_nak_thresh = attr->rnr_retry;
if (attr_mask & IB_QP_RETRY_CNT)
udp_info->rexmit_thresh = attr->retry_cnt;
ctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id;
if (attr_mask & IB_QP_AV) {
struct irdma_av *av = &iwqp->roce_ah.av;
const struct ib_gid_attr *sgid_attr;
u16 vlan_id = VLAN_N_VID;
u32 local_ip[4];
memset(&iwqp->roce_ah, 0, sizeof(iwqp->roce_ah));
if (attr->ah_attr.ah_flags & IB_AH_GRH) {
udp_info->ttl = attr->ah_attr.grh.hop_limit;
udp_info->flow_label = attr->ah_attr.grh.flow_label;
udp_info->tos = attr->ah_attr.grh.traffic_class;
udp_info->src_port =
rdma_get_udp_sport(udp_info->flow_label,
ibqp->qp_num,
roce_info->dest_qp);
irdma_qp_rem_qos(&iwqp->sc_qp);
dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri);
ctx_info->user_pri = rt_tos2priority(udp_info->tos);
iwqp->sc_qp.user_pri = ctx_info->user_pri;
if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri))
return -ENOMEM;
irdma_qp_add_qos(&iwqp->sc_qp);
}
sgid_attr = attr->ah_attr.grh.sgid_attr;
ret = rdma_read_gid_l2_fields(sgid_attr, &vlan_id,
ctx_info->roce_info->mac_addr);
if (ret)
return ret;
if (vlan_id >= VLAN_N_VID && iwdev->dcb)
vlan_id = 0;
if (vlan_id < VLAN_N_VID) {
udp_info->insert_vlan_tag = true;
udp_info->vlan_tag = vlan_id |
ctx_info->user_pri << VLAN_PRIO_SHIFT;
} else {
udp_info->insert_vlan_tag = false;
}
av->attrs = attr->ah_attr;
rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid);
rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid);
if (av->sgid_addr.saddr.sa_family == AF_INET6) {
__be32 *daddr =
av->dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
__be32 *saddr =
av->sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
irdma_copy_ip_ntohl(&udp_info->dest_ip_addr[0], daddr);
irdma_copy_ip_ntohl(&udp_info->local_ipaddr[0], saddr);
udp_info->ipv4 = false;
irdma_copy_ip_ntohl(local_ip, daddr);
udp_info->arp_idx = irdma_arp_table(iwdev->rf,
&local_ip[0],
false, NULL,
IRDMA_ARP_RESOLVE);
} else {
__be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr;
__be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr;
local_ip[0] = ntohl(daddr);
udp_info->ipv4 = true;
udp_info->dest_ip_addr[0] = 0;
udp_info->dest_ip_addr[1] = 0;
udp_info->dest_ip_addr[2] = 0;
udp_info->dest_ip_addr[3] = local_ip[0];
udp_info->local_ipaddr[0] = 0;
udp_info->local_ipaddr[1] = 0;
udp_info->local_ipaddr[2] = 0;
udp_info->local_ipaddr[3] = ntohl(saddr);
}
udp_info->arp_idx =
irdma_add_arp(iwdev->rf, local_ip, udp_info->ipv4,
attr->ah_attr.roce.dmac);
}
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
if (attr->max_rd_atomic > dev->hw_attrs.max_hw_ord) {
ibdev_err(&iwdev->ibdev,
"rd_atomic = %d, above max_hw_ord=%d\n",
attr->max_rd_atomic,
dev->hw_attrs.max_hw_ord);
return -EINVAL;
}
if (attr->max_rd_atomic)
roce_info->ord_size = attr->max_rd_atomic;
info.ord_valid = true;
}
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) {
ibdev_err(&iwdev->ibdev,
"rd_atomic = %d, above max_hw_ird=%d\n",
attr->max_rd_atomic,
dev->hw_attrs.max_hw_ird);
return -EINVAL;
}
if (attr->max_dest_rd_atomic)
roce_info->ird_size = attr->max_dest_rd_atomic;
}
if (attr_mask & IB_QP_ACCESS_FLAGS) {
if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
roce_info->wr_rdresp_en = true;
if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
roce_info->wr_rdresp_en = true;
if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
roce_info->rd_en = true;
}
wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
ibdev_dbg(&iwdev->ibdev,
"VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d attr_mask=0x%x\n",
__builtin_return_address(0), ibqp->qp_num, attr->qp_state,
iwqp->ibqp_state, iwqp->iwarp_state, attr_mask);
spin_lock_irqsave(&iwqp->lock, flags);
if (attr_mask & IB_QP_STATE) {
if (!ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state,
iwqp->ibqp.qp_type, attr_mask)) {
ibdev_warn(&iwdev->ibdev, "modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n",
iwqp->ibqp.qp_num, iwqp->ibqp_state,
attr->qp_state);
ret = -EINVAL;
goto exit;
}
info.curr_iwarp_state = iwqp->iwarp_state;
switch (attr->qp_state) {
case IB_QPS_INIT:
if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
ret = -EINVAL;
goto exit;
}
if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
issue_modify_qp = 1;
}
break;
case IB_QPS_RTR:
if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
ret = -EINVAL;
goto exit;
}
info.arp_cache_idx_valid = true;
info.cq_num_valid = true;
info.next_iwarp_state = IRDMA_QP_STATE_RTR;
issue_modify_qp = 1;
break;
case IB_QPS_RTS:
if (iwqp->ibqp_state < IB_QPS_RTR ||
iwqp->ibqp_state == IB_QPS_ERR) {
ret = -EINVAL;
goto exit;
}
info.arp_cache_idx_valid = true;
info.cq_num_valid = true;
info.ord_valid = true;
info.next_iwarp_state = IRDMA_QP_STATE_RTS;
issue_modify_qp = 1;
if (iwdev->push_mode && udata &&
iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
spin_unlock_irqrestore(&iwqp->lock, flags);
irdma_alloc_push_page(iwqp);
spin_lock_irqsave(&iwqp->lock, flags);
}
break;
case IB_QPS_SQD:
if (iwqp->iwarp_state == IRDMA_QP_STATE_SQD)
goto exit;
if (iwqp->iwarp_state != IRDMA_QP_STATE_RTS) {
ret = -EINVAL;
goto exit;
}
info.next_iwarp_state = IRDMA_QP_STATE_SQD;
issue_modify_qp = 1;
break;
case IB_QPS_SQE:
case IB_QPS_ERR:
case IB_QPS_RESET:
if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS) {
spin_unlock_irqrestore(&iwqp->lock, flags);
info.next_iwarp_state = IRDMA_QP_STATE_SQD;
irdma_hw_modify_qp(iwdev, iwqp, &info, true);
spin_lock_irqsave(&iwqp->lock, flags);
}
if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
spin_unlock_irqrestore(&iwqp->lock, flags);
if (udata) {
if (ib_copy_from_udata(&ureq, udata,
min(sizeof(ureq), udata->inlen)))
return -EINVAL;
irdma_flush_wqes(iwqp,
(ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
(ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
IRDMA_REFLUSH);
}
return 0;
}
info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
issue_modify_qp = 1;
break;
default:
ret = -EINVAL;
goto exit;
}
iwqp->ibqp_state = attr->qp_state;
}
ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
spin_unlock_irqrestore(&iwqp->lock, flags);
if (attr_mask & IB_QP_STATE) {
if (issue_modify_qp) {
ctx_info->rem_endpoint_idx = udp_info->arp_idx;
if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
return -EINVAL;
spin_lock_irqsave(&iwqp->lock, flags);
if (iwqp->iwarp_state == info.curr_iwarp_state) {
iwqp->iwarp_state = info.next_iwarp_state;
iwqp->ibqp_state = attr->qp_state;
}
if (iwqp->ibqp_state > IB_QPS_RTS &&
!iwqp->flush_issued) {
iwqp->flush_issued = 1;
spin_unlock_irqrestore(&iwqp->lock, flags);
irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ |
IRDMA_FLUSH_RQ |
IRDMA_FLUSH_WAIT);
} else {
spin_unlock_irqrestore(&iwqp->lock, flags);
}
} else {
iwqp->ibqp_state = attr->qp_state;
}
if (udata && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
struct irdma_ucontext *ucontext;
ucontext = rdma_udata_to_drv_context(udata,
struct irdma_ucontext, ibucontext);
if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
!iwqp->push_wqe_mmap_entry &&
!irdma_setup_push_mmap_entries(ucontext, iwqp,
&uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
uresp.push_valid = 1;
uresp.push_offset = iwqp->sc_qp.push_offset;
}
ret = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
udata->outlen));
if (ret) {
irdma_remove_push_mmap_entries(iwqp);
ibdev_dbg(&iwdev->ibdev,
"VERBS: copy_to_udata failed\n");
return ret;
}
}
}
return 0;
exit:
spin_unlock_irqrestore(&iwqp->lock, flags);
return ret;
}