static int __mlx4_ib_modify_qp()

in drivers/infiniband/hw/mlx4/qp.c [1415:1883]


static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
			       const struct ib_qp_attr *attr, int attr_mask,
			       enum ib_qp_state cur_state, enum ib_qp_state new_state)
{
	struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
	struct mlx4_ib_qp *qp = to_mqp(ibqp);
	struct mlx4_ib_pd *pd;
	struct mlx4_ib_cq *send_cq, *recv_cq;
	struct mlx4_qp_context *context;
	enum mlx4_qp_optpar optpar = 0;
	int sqd_event;
	int steer_qp = 0;
	int err = -EINVAL;

	/* APM is not supported under RoCE */
	if (attr_mask & IB_QP_ALT_PATH &&
	    rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
	    IB_LINK_LAYER_ETHERNET)
		return -ENOTSUPP;

	context = kzalloc(sizeof *context, GFP_KERNEL);
	if (!context)
		return -ENOMEM;

	context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) |
				     (to_mlx4_st(dev, qp->mlx4_ib_qp_type) << 16));

	if (!(attr_mask & IB_QP_PATH_MIG_STATE))
		context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);
	else {
		optpar |= MLX4_QP_OPTPAR_PM_STATE;
		switch (attr->path_mig_state) {
		case IB_MIG_MIGRATED:
			context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);
			break;
		case IB_MIG_REARM:
			context->flags |= cpu_to_be32(MLX4_QP_PM_REARM << 11);
			break;
		case IB_MIG_ARMED:
			context->flags |= cpu_to_be32(MLX4_QP_PM_ARMED << 11);
			break;
		}
	}

	if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
		context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
	else if (ibqp->qp_type == IB_QPT_RAW_PACKET)
		context->mtu_msgmax = (MLX4_RAW_QP_MTU << 5) | MLX4_RAW_QP_MSGMAX;
	else if (ibqp->qp_type == IB_QPT_UD) {
		if (qp->flags & MLX4_IB_QP_LSO)
			context->mtu_msgmax = (IB_MTU_4096 << 5) |
					      ilog2(dev->dev->caps.max_gso_sz);
		else
			context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
	} else if (attr_mask & IB_QP_PATH_MTU) {
		if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
			pr_err("path MTU (%u) is invalid\n",
			       attr->path_mtu);
			goto out;
		}
		context->mtu_msgmax = (attr->path_mtu << 5) |
			ilog2(dev->dev->caps.max_msg_sz);
	}

	if (qp->rq.wqe_cnt)
		context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3;
	context->rq_size_stride |= qp->rq.wqe_shift - 4;

	if (qp->sq.wqe_cnt)
		context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3;
	context->sq_size_stride |= qp->sq.wqe_shift - 4;

	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
		context->sq_size_stride |= !!qp->sq_no_prefetch << 7;
		context->xrcd = cpu_to_be32((u32) qp->xrcdn);
		if (ibqp->qp_type == IB_QPT_RAW_PACKET)
			context->param3 |= cpu_to_be32(1 << 30);
	}

	if (qp->ibqp.uobject)
		context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index);
	else
		context->usr_page = cpu_to_be32(dev->priv_uar.index);

	if (attr_mask & IB_QP_DEST_QPN)
		context->remote_qpn = cpu_to_be32(attr->dest_qp_num);

	if (attr_mask & IB_QP_PORT) {
		if (cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD &&
		    !(attr_mask & IB_QP_AV)) {
			mlx4_set_sched(&context->pri_path, attr->port_num);
			optpar |= MLX4_QP_OPTPAR_SCHED_QUEUE;
		}
	}

	if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
		if (dev->counters[qp->port - 1] != -1) {
			context->pri_path.counter_index =
						dev->counters[qp->port - 1];
			optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX;
		} else
			context->pri_path.counter_index = 0xff;

		if (qp->flags & MLX4_IB_QP_NETIF) {
			mlx4_ib_steer_qp_reg(dev, qp, 1);
			steer_qp = 1;
		}
	}

	if (attr_mask & IB_QP_PKEY_INDEX) {
		if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV)
			context->pri_path.disable_pkey_check = 0x40;
		context->pri_path.pkey_index = attr->pkey_index;
		optpar |= MLX4_QP_OPTPAR_PKEY_INDEX;
	}

	if (attr_mask & IB_QP_AV) {
		if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path,
				  attr_mask & IB_QP_PORT ?
				  attr->port_num : qp->port))
			goto out;

		optpar |= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH |
			   MLX4_QP_OPTPAR_SCHED_QUEUE);
	}

	if (attr_mask & IB_QP_TIMEOUT) {
		context->pri_path.ackto |= attr->timeout << 3;
		optpar |= MLX4_QP_OPTPAR_ACK_TIMEOUT;
	}

	if (attr_mask & IB_QP_ALT_PATH) {
		if (attr->alt_port_num == 0 ||
		    attr->alt_port_num > dev->dev->caps.num_ports)
			goto out;

		if (attr->alt_pkey_index >=
		    dev->dev->caps.pkey_table_len[attr->alt_port_num])
			goto out;

		if (mlx4_set_alt_path(dev, attr, attr_mask, qp,
				      &context->alt_path,
				      attr->alt_port_num))
			goto out;

		context->alt_path.pkey_index = attr->alt_pkey_index;
		context->alt_path.ackto = attr->alt_timeout << 3;
		optpar |= MLX4_QP_OPTPAR_ALT_ADDR_PATH;
	}

	pd = get_pd(qp);
	get_cqs(qp, &send_cq, &recv_cq);
	context->pd       = cpu_to_be32(pd->pdn);
	context->cqn_send = cpu_to_be32(send_cq->mcq.cqn);
	context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn);
	context->params1  = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28);

	/* Set "fast registration enabled" for all kernel QPs */
	if (!qp->ibqp.uobject)
		context->params1 |= cpu_to_be32(1 << 11);

	if (attr_mask & IB_QP_RNR_RETRY) {
		context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
		optpar |= MLX4_QP_OPTPAR_RNR_RETRY;
	}

	if (attr_mask & IB_QP_RETRY_CNT) {
		context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
		optpar |= MLX4_QP_OPTPAR_RETRY_COUNT;
	}

	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
		if (attr->max_rd_atomic)
			context->params1 |=
				cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
		optpar |= MLX4_QP_OPTPAR_SRA_MAX;
	}

	if (attr_mask & IB_QP_SQ_PSN)
		context->next_send_psn = cpu_to_be32(attr->sq_psn);

	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
		if (attr->max_dest_rd_atomic)
			context->params2 |=
				cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
		optpar |= MLX4_QP_OPTPAR_RRA_MAX;
	}

	if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
		context->params2 |= to_mlx4_access_flags(qp, attr, attr_mask);
		optpar |= MLX4_QP_OPTPAR_RWE | MLX4_QP_OPTPAR_RRE | MLX4_QP_OPTPAR_RAE;
	}

	if (ibqp->srq)
		context->params2 |= cpu_to_be32(MLX4_QP_BIT_RIC);

	if (attr_mask & IB_QP_MIN_RNR_TIMER) {
		context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
		optpar |= MLX4_QP_OPTPAR_RNR_TIMEOUT;
	}
	if (attr_mask & IB_QP_RQ_PSN)
		context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);

	/* proxy and tunnel qp qkeys will be changed in modify-qp wrappers */
	if (attr_mask & IB_QP_QKEY) {
		if (qp->mlx4_ib_qp_type &
		    (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER))
			context->qkey = cpu_to_be32(IB_QP_SET_QKEY);
		else {
			if (mlx4_is_mfunc(dev->dev) &&
			    !(qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) &&
			    (attr->qkey & MLX4_RESERVED_QKEY_MASK) ==
			    MLX4_RESERVED_QKEY_BASE) {
				pr_err("Cannot use reserved QKEY"
				       " 0x%x (range 0xffff0000..0xffffffff"
				       " is reserved)\n", attr->qkey);
				err = -EINVAL;
				goto out;
			}
			context->qkey = cpu_to_be32(attr->qkey);
		}
		optpar |= MLX4_QP_OPTPAR_Q_KEY;
	}

	if (ibqp->srq)
		context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn);

	if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
		context->db_rec_addr = cpu_to_be64(qp->db.dma);

	if (cur_state == IB_QPS_INIT &&
	    new_state == IB_QPS_RTR  &&
	    (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI ||
	     ibqp->qp_type == IB_QPT_UD ||
	     ibqp->qp_type == IB_QPT_RAW_PACKET)) {
		context->pri_path.sched_queue = (qp->port - 1) << 6;
		if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI ||
		    qp->mlx4_ib_qp_type &
		    (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) {
			context->pri_path.sched_queue |= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE;
			if (qp->mlx4_ib_qp_type != MLX4_IB_QPT_SMI)
				context->pri_path.fl = 0x80;
		} else {
			if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV)
				context->pri_path.fl = 0x80;
			context->pri_path.sched_queue |= MLX4_IB_DEFAULT_SCHED_QUEUE;
		}
		if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
		    IB_LINK_LAYER_ETHERNET) {
			if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI ||
			    qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI)
				context->pri_path.feup = 1 << 7; /* don't fsm */
			/* handle smac_index */
			if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_UD ||
			    qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI ||
			    qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) {
				err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
				if (err) {
					err = -EINVAL;
					goto out;
				}
				if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
					dev->qp1_proxy[qp->port - 1] = qp;
			}
		}
	}

	if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
		context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
					MLX4_IB_LINK_TYPE_ETH;
		if (dev->dev->caps.tunnel_offload_mode ==  MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
			/* set QP to receive both tunneled & non-tunneled packets */
			if (!(context->flags & cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET)))
				context->srqn = cpu_to_be32(7 << 28);
		}
	}

	if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
		int is_eth = rdma_port_get_link_layer(
				&dev->ib_dev, qp->port) ==
				IB_LINK_LAYER_ETHERNET;
		if (is_eth) {
			context->pri_path.ackto = MLX4_IB_LINK_TYPE_ETH;
			optpar |= MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH;
		}
	}


	if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD	&&
	    attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
		sqd_event = 1;
	else
		sqd_event = 0;

	if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
		context->rlkey |= (1 << 4);

	/*
	 * Before passing a kernel QP to the HW, make sure that the
	 * ownership bits of the send queue are set and the SQ
	 * headroom is stamped so that the hardware doesn't start
	 * processing stale work requests.
	 */
	if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
		struct mlx4_wqe_ctrl_seg *ctrl;
		int i;

		for (i = 0; i < qp->sq.wqe_cnt; ++i) {
			ctrl = get_send_wqe(qp, i);
			ctrl->owner_opcode = cpu_to_be32(1 << 31);
			if (qp->sq_max_wqes_per_wr == 1)
				ctrl->fence_size = 1 << (qp->sq.wqe_shift - 4);

			stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift);
		}
	}

	err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state),
			     to_mlx4_state(new_state), context, optpar,
			     sqd_event, &qp->mqp);
	if (err)
		goto out;

	qp->state = new_state;

	if (attr_mask & IB_QP_ACCESS_FLAGS)
		qp->atomic_rd_en = attr->qp_access_flags;
	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
		qp->resp_depth = attr->max_dest_rd_atomic;
	if (attr_mask & IB_QP_PORT) {
		qp->port = attr->port_num;
		update_mcg_macs(dev, qp);
	}
	if (attr_mask & IB_QP_ALT_PATH)
		qp->alt_port = attr->alt_port_num;

	if (is_sqp(dev, qp))
		store_sqp_attrs(to_msqp(qp), attr, attr_mask);

	/*
	 * If we moved QP0 to RTR, bring the IB link up; if we moved
	 * QP0 to RESET or ERROR, bring the link back down.
	 */
	if (is_qp0(dev, qp)) {
		if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR)
			if (mlx4_INIT_PORT(dev->dev, qp->port))
				pr_warn("INIT_PORT failed for port %d\n",
				       qp->port);

		if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
		    (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR))
			mlx4_CLOSE_PORT(dev->dev, qp->port);
	}

	/*
	 * If we moved a kernel QP to RESET, clean up all old CQ
	 * entries and reinitialize the QP.
	 */
	if (new_state == IB_QPS_RESET) {
		if (!ibqp->uobject) {
			mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
					 ibqp->srq ? to_msrq(ibqp->srq) : NULL);
			if (send_cq != recv_cq)
				mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);

			qp->rq.head = 0;
			qp->rq.tail = 0;
			qp->sq.head = 0;
			qp->sq.tail = 0;
			qp->sq_next_wqe = 0;
			if (qp->rq.wqe_cnt)
				*qp->db.db  = 0;

			if (qp->flags & MLX4_IB_QP_NETIF)
				mlx4_ib_steer_qp_reg(dev, qp, 0);
		}
		if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) {
			mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
			qp->pri.smac = 0;
			qp->pri.smac_port = 0;
		}
		if (qp->alt.smac) {
			mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
			qp->alt.smac = 0;
		}
		if (qp->pri.vid < 0x1000) {
			mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid);
			qp->pri.vid = 0xFFFF;
			qp->pri.candidate_vid = 0xFFFF;
			qp->pri.update_vid = 0;
		}

		if (qp->alt.vid < 0x1000) {
			mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid);
			qp->alt.vid = 0xFFFF;
			qp->alt.candidate_vid = 0xFFFF;
			qp->alt.update_vid = 0;
		}
	}
out:
	if (err && steer_qp)
		mlx4_ib_steer_qp_reg(dev, qp, 0);
	kfree(context);
	if (qp->pri.candidate_smac ||
	    (!qp->pri.candidate_smac && qp->pri.candidate_smac_port)) {
		if (err) {
			mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac);
		} else {
			if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port))
				mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
			qp->pri.smac = qp->pri.candidate_smac;
			qp->pri.smac_index = qp->pri.candidate_smac_index;
			qp->pri.smac_port = qp->pri.candidate_smac_port;
		}
		qp->pri.candidate_smac = 0;
		qp->pri.candidate_smac_index = 0;
		qp->pri.candidate_smac_port = 0;
	}
	if (qp->alt.candidate_smac) {
		if (err) {
			mlx4_unregister_mac(dev->dev, qp->alt.candidate_smac_port, qp->alt.candidate_smac);
		} else {
			if (qp->alt.smac)
				mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
			qp->alt.smac = qp->alt.candidate_smac;
			qp->alt.smac_index = qp->alt.candidate_smac_index;
			qp->alt.smac_port = qp->alt.candidate_smac_port;
		}
		qp->alt.candidate_smac = 0;
		qp->alt.candidate_smac_index = 0;
		qp->alt.candidate_smac_port = 0;
	}

	if (qp->pri.update_vid) {
		if (err) {
			if (qp->pri.candidate_vid < 0x1000)
				mlx4_unregister_vlan(dev->dev, qp->pri.candidate_vlan_port,
						     qp->pri.candidate_vid);
		} else {
			if (qp->pri.vid < 0x1000)
				mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port,
						     qp->pri.vid);
			qp->pri.vid = qp->pri.candidate_vid;
			qp->pri.vlan_port = qp->pri.candidate_vlan_port;
			qp->pri.vlan_index =  qp->pri.candidate_vlan_index;
		}
		qp->pri.candidate_vid = 0xFFFF;
		qp->pri.update_vid = 0;
	}

	if (qp->alt.update_vid) {
		if (err) {
			if (qp->alt.candidate_vid < 0x1000)
				mlx4_unregister_vlan(dev->dev, qp->alt.candidate_vlan_port,
						     qp->alt.candidate_vid);
		} else {
			if (qp->alt.vid < 0x1000)
				mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port,
						     qp->alt.vid);
			qp->alt.vid = qp->alt.candidate_vid;
			qp->alt.vlan_port = qp->alt.candidate_vlan_port;
			qp->alt.vlan_index =  qp->alt.candidate_vlan_index;
		}
		qp->alt.candidate_vid = 0xFFFF;
		qp->alt.update_vid = 0;
	}

	return err;
}