static int create_qp_common()

in hw/mlx4/qp.c [952:1247]


static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
			    struct ib_udata *udata, int sqpn,
			    struct mlx4_ib_qp *qp)
{
	struct mlx4_ib_dev *dev = to_mdev(pd->device);
	int qpn;
	int err;
	struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
		udata, struct mlx4_ib_ucontext, ibucontext);
	enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
	struct mlx4_ib_cq *mcq;
	unsigned long flags;

	/* When tunneling special qps, we use a plain UD qp */
	if (sqpn) {
		if (mlx4_is_mfunc(dev->dev) &&
		    (!mlx4_is_master(dev->dev) ||
		     !(init_attr->create_flags & MLX4_IB_SRIOV_SQP))) {
			if (init_attr->qp_type == IB_QPT_GSI)
				qp_type = MLX4_IB_QPT_PROXY_GSI;
			else {
				if (mlx4_is_master(dev->dev) ||
				    qp0_enabled_vf(dev->dev, sqpn))
					qp_type = MLX4_IB_QPT_PROXY_SMI_OWNER;
				else
					qp_type = MLX4_IB_QPT_PROXY_SMI;
			}
		}
		qpn = sqpn;
		/* add extra sg entry for tunneling */
		init_attr->cap.max_recv_sge++;
	} else if (init_attr->create_flags & MLX4_IB_SRIOV_TUNNEL_QP) {
		struct mlx4_ib_qp_tunnel_init_attr *tnl_init =
			container_of(init_attr,
				     struct mlx4_ib_qp_tunnel_init_attr, init_attr);
		if ((tnl_init->proxy_qp_type != IB_QPT_SMI &&
		     tnl_init->proxy_qp_type != IB_QPT_GSI)   ||
		    !mlx4_is_master(dev->dev))
			return -EINVAL;
		if (tnl_init->proxy_qp_type == IB_QPT_GSI)
			qp_type = MLX4_IB_QPT_TUN_GSI;
		else if (tnl_init->slave == mlx4_master_func_num(dev->dev) ||
			 mlx4_vf_smi_enabled(dev->dev, tnl_init->slave,
					     tnl_init->port))
			qp_type = MLX4_IB_QPT_TUN_SMI_OWNER;
		else
			qp_type = MLX4_IB_QPT_TUN_SMI;
		/* we are definitely in the PPF here, since we are creating
		 * tunnel QPs. base_tunnel_sqpn is therefore valid. */
		qpn = dev->dev->phys_caps.base_tunnel_sqpn + 8 * tnl_init->slave
			+ tnl_init->proxy_qp_type * 2 + tnl_init->port - 1;
		sqpn = qpn;
	}

	if (init_attr->qp_type == IB_QPT_SMI ||
	    init_attr->qp_type == IB_QPT_GSI || qp_type == MLX4_IB_QPT_SMI ||
	    qp_type == MLX4_IB_QPT_GSI ||
	    (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER |
			MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) {
		qp->sqp = kzalloc(sizeof(struct mlx4_ib_sqp), GFP_KERNEL);
		if (!qp->sqp)
			return -ENOMEM;
	}

	qp->mlx4_ib_qp_type = qp_type;

	spin_lock_init(&qp->sq.lock);
	spin_lock_init(&qp->rq.lock);
	INIT_LIST_HEAD(&qp->gid_list);
	INIT_LIST_HEAD(&qp->steering_rules);

	qp->state = IB_QPS_RESET;
	if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
		qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);

	if (udata) {
		struct mlx4_ib_create_qp ucmd;
		size_t copy_len;
		int shift;
		int n;

		copy_len = sizeof(struct mlx4_ib_create_qp);

		if (ib_copy_from_udata(&ucmd, udata, copy_len)) {
			err = -EFAULT;
			goto err;
		}

		qp->inl_recv_sz = ucmd.inl_recv_sz;

		if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) {
			if (!(dev->dev->caps.flags &
			      MLX4_DEV_CAP_FLAG_FCS_KEEP)) {
				pr_debug("scatter FCS is unsupported\n");
				err = -EOPNOTSUPP;
				goto err;
			}

			qp->flags |= MLX4_IB_QP_SCATTER_FCS;
		}

		err = set_rq_size(dev, &init_attr->cap, udata,
				  qp_has_rq(init_attr), qp, qp->inl_recv_sz);
		if (err)
			goto err;

		qp->sq_no_prefetch = ucmd.sq_no_prefetch;

		err = set_user_sq_size(dev, qp, &ucmd);
		if (err)
			goto err;

		qp->umem =
			ib_umem_get(pd->device, ucmd.buf_addr, qp->buf_size, 0);
		if (IS_ERR(qp->umem)) {
			err = PTR_ERR(qp->umem);
			goto err;
		}

		shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
		err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);

		if (err)
			goto err_buf;

		err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem);
		if (err)
			goto err_mtt;

		if (qp_has_rq(init_attr)) {
			err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &qp->db);
			if (err)
				goto err_mtt;
		}
		qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS;
	} else {
		err = set_rq_size(dev, &init_attr->cap, udata,
				  qp_has_rq(init_attr), qp, 0);
		if (err)
			goto err;

		qp->sq_no_prefetch = 0;

		if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
			qp->flags |= MLX4_IB_QP_LSO;

		if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
			if (dev->steering_support ==
			    MLX4_STEERING_MODE_DEVICE_MANAGED)
				qp->flags |= MLX4_IB_QP_NETIF;
			else {
				err = -EINVAL;
				goto err;
			}
		}

		err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp);
		if (err)
			goto err;

		if (qp_has_rq(init_attr)) {
			err = mlx4_db_alloc(dev->dev, &qp->db, 0);
			if (err)
				goto err;

			*qp->db.db = 0;
		}

		if (mlx4_buf_alloc(dev->dev, qp->buf_size,  PAGE_SIZE * 2,
				   &qp->buf)) {
			err = -ENOMEM;
			goto err_db;
		}

		err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift,
				    &qp->mtt);
		if (err)
			goto err_buf;

		err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf);
		if (err)
			goto err_mtt;

		qp->sq.wrid = kvmalloc_array(qp->sq.wqe_cnt,
					     sizeof(u64), GFP_KERNEL);
		qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt,
					     sizeof(u64), GFP_KERNEL);
		if (!qp->sq.wrid || !qp->rq.wrid) {
			err = -ENOMEM;
			goto err_wrid;
		}
		qp->mqp.usage = MLX4_RES_USAGE_DRIVER;
	}

	if (sqpn) {
		if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
		    MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
			if (alloc_proxy_bufs(pd->device, qp)) {
				err = -ENOMEM;
				goto err_wrid;
			}
		}
	} else {
		/* Raw packet QPNs may not have bits 6,7 set in their qp_num;
		 * otherwise, the WQE BlueFlame setup flow wrongly causes
		 * VLAN insertion. */
		if (init_attr->qp_type == IB_QPT_RAW_PACKET)
			err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn,
						    (init_attr->cap.max_send_wr ?
						     MLX4_RESERVE_ETH_BF_QP : 0) |
						    (init_attr->cap.max_recv_wr ?
						     MLX4_RESERVE_A0_QP : 0),
						    qp->mqp.usage);
		else
			if (qp->flags & MLX4_IB_QP_NETIF)
				err = mlx4_ib_steer_qp_alloc(dev, 1, &qpn);
			else
				err = mlx4_qp_reserve_range(dev->dev, 1, 1,
							    &qpn, 0, qp->mqp.usage);
		if (err)
			goto err_proxy;
	}

	if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
		qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;

	err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
	if (err)
		goto err_qpn;

	if (init_attr->qp_type == IB_QPT_XRC_TGT)
		qp->mqp.qpn |= (1 << 23);

	/*
	 * Hardware wants QPN written in big-endian order (after
	 * shifting) for send doorbell.  Precompute this value to save
	 * a little bit when posting sends.
	 */
	qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);

	qp->mqp.event = mlx4_ib_qp_event;

	spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
	mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq),
			 to_mcq(init_attr->recv_cq));
	/* Maintain device to QPs access, needed for further handling
	 * via reset flow
	 */
	list_add_tail(&qp->qps_list, &dev->qp_list);
	/* Maintain CQ to QPs access, needed for further handling
	 * via reset flow
	 */
	mcq = to_mcq(init_attr->send_cq);
	list_add_tail(&qp->cq_send_list, &mcq->send_qp_list);
	mcq = to_mcq(init_attr->recv_cq);
	list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list);
	mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq),
			   to_mcq(init_attr->recv_cq));
	spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
	return 0;

err_qpn:
	if (!sqpn) {
		if (qp->flags & MLX4_IB_QP_NETIF)
			mlx4_ib_steer_qp_free(dev, qpn, 1);
		else
			mlx4_qp_release_range(dev->dev, qpn, 1);
	}
err_proxy:
	if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
		free_proxy_bufs(pd->device, qp);
err_wrid:
	if (udata) {
		if (qp_has_rq(init_attr))
			mlx4_ib_db_unmap_user(context, &qp->db);
	} else {
		kvfree(qp->sq.wrid);
		kvfree(qp->rq.wrid);
	}

err_mtt:
	mlx4_mtt_cleanup(dev->dev, &qp->mtt);

err_buf:
	if (!qp->umem)
		mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
	ib_umem_release(qp->umem);

err_db:
	if (!udata && qp_has_rq(init_attr))
		mlx4_db_free(dev->dev, &qp->db);

err:
	kfree(qp->sqp);
	return err;
}