int bnxt_qplib_post_send()

in hw/bnxt_re/qplib_fp.c [1701:1944]


int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
			 struct bnxt_qplib_swqe *wqe)
{
	struct bnxt_qplib_nq_work *nq_work = NULL;
	int i, rc = 0, data_len = 0, pkt_num = 0;
	struct bnxt_qplib_q *sq = &qp->sq;
	struct bnxt_qplib_hwq *hwq;
	struct bnxt_qplib_swq *swq;
	bool sch_handler = false;
	u16 wqe_sz, qdf = 0;
	void *base_hdr;
	void *ext_hdr;
	__le32 temp32;
	u32 wqe_idx;
	u32 slots;
	u16 idx;

	hwq = &sq->hwq;
	if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
	    qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
		dev_err(&hwq->pdev->dev,
			"QPLIB: FP: QP (0x%x) is in the 0x%x state",
			qp->id, qp->state);
		rc = -EINVAL;
		goto done;
	}

	slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
	if (bnxt_qplib_queue_full(sq, slots + qdf)) {
		dev_err(&hwq->pdev->dev,
			"prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
			hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta);
		rc = -ENOMEM;
		goto done;
	}

	swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
	bnxt_qplib_pull_psn_buff(sq, swq);

	idx = 0;
	swq->slot_idx = hwq->prod;
	swq->slots = slots;
	swq->wr_id = wqe->wr_id;
	swq->type = wqe->type;
	swq->flags = wqe->flags;
	swq->start_psn = sq->psn & BTH_PSN_MASK;
	if (qp->sig_type)
		swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;

	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
		sch_handler = true;
		dev_dbg(&hwq->pdev->dev,
			"%s Error QP. Scheduling for poll_cq\n", __func__);
		goto queue_err;
	}

	base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
	ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
	memset(base_hdr, 0, sizeof(struct sq_sge));
	memset(ext_hdr, 0, sizeof(struct sq_sge));

	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
		/* Copy the inline data */
		data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
	else
		data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
					       &idx);
	if (data_len < 0)
		goto queue_err;
	/* Specifics */
	switch (wqe->type) {
	case BNXT_QPLIB_SWQE_TYPE_SEND:
		if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
			struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
			struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
			/* Assemble info for Raw Ethertype QPs */

			sqe->wqe_type = wqe->type;
			sqe->flags = wqe->flags;
			sqe->wqe_size = wqe_sz;
			sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
			sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
			sqe->length = cpu_to_le32(data_len);
			ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);

			break;
		}
		fallthrough;
	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
	{
		struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
		struct sq_send_hdr *sqe = base_hdr;

		sqe->wqe_type = wqe->type;
		sqe->flags = wqe->flags;
		sqe->wqe_size = wqe_sz;
		sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
		if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
		    qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
			sqe->q_key = cpu_to_le32(wqe->send.q_key);
			sqe->length = cpu_to_le32(data_len);
			sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
			ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp &
						      SQ_SEND_DST_QP_MASK);
			ext_sqe->avid = cpu_to_le32(wqe->send.avid &
						    SQ_SEND_AVID_MASK);
		} else {
			sqe->length = cpu_to_le32(data_len);
			if (qp->mtu)
				pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
			if (!pkt_num)
				pkt_num = 1;
			sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
		}
		break;
	}
	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
	case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
	{
		struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
		struct sq_rdma_hdr *sqe = base_hdr;

		sqe->wqe_type = wqe->type;
		sqe->flags = wqe->flags;
		sqe->wqe_size = wqe_sz;
		sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
		sqe->length = cpu_to_le32((u32)data_len);
		ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
		ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
		if (qp->mtu)
			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
		if (!pkt_num)
			pkt_num = 1;
		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
		break;
	}
	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
	{
		struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
		struct sq_atomic_hdr *sqe = base_hdr;

		sqe->wqe_type = wqe->type;
		sqe->flags = wqe->flags;
		sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
		sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
		ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
		ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
		if (qp->mtu)
			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
		if (!pkt_num)
			pkt_num = 1;
		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
		break;
	}
	case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
	{
		struct sq_localinvalidate *sqe = base_hdr;

		sqe->wqe_type = wqe->type;
		sqe->flags = wqe->flags;
		sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);

		break;
	}
	case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
	{
		struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
		struct sq_fr_pmr_hdr *sqe = base_hdr;

		sqe->wqe_type = wqe->type;
		sqe->flags = wqe->flags;
		sqe->access_cntl = wqe->frmr.access_cntl |
				   SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
		sqe->zero_based_page_size_log =
			(wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
			SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
			(wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
		sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
		temp32 = cpu_to_le32(wqe->frmr.length);
		memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
		sqe->numlevels_pbl_page_size_log =
			((wqe->frmr.pbl_pg_sz_log <<
					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
			((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
					SQ_FR_PMR_NUMLEVELS_MASK);

		for (i = 0; i < wqe->frmr.page_list_len; i++)
			wqe->frmr.pbl_ptr[i] = cpu_to_le64(
						wqe->frmr.page_list[i] |
						PTU_PTE_VALID);
		ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
		ext_sqe->va = cpu_to_le64(wqe->frmr.va);

		break;
	}
	case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
	{
		struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
		struct sq_bind_hdr *sqe = base_hdr;

		sqe->wqe_type = wqe->type;
		sqe->flags = wqe->flags;
		sqe->access_cntl = wqe->bind.access_cntl;
		sqe->mw_type_zero_based = wqe->bind.mw_type |
			(wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
		sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
		sqe->l_key = cpu_to_le32(wqe->bind.r_key);
		ext_sqe->va = cpu_to_le64(wqe->bind.va);
		ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
		break;
	}
	default:
		/* Bad wqe, return error */
		rc = -EINVAL;
		goto done;
	}
	swq->next_psn = sq->psn & BTH_PSN_MASK;
	bnxt_qplib_fill_psn_search(qp, wqe, swq);
queue_err:
	bnxt_qplib_swq_mod_start(sq, wqe_idx);
	bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
	qp->wqe_cnt++;
done:
	if (sch_handler) {
		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
		if (nq_work) {
			nq_work->cq = qp->scq;
			nq_work->nq = qp->scq->nq;
			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
			queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
		} else {
			dev_err(&hwq->pdev->dev,
				"FP: Failed to allocate SQ nq_work!\n");
			rc = -ENOMEM;
		}
	}
	return rc;
}