static void ntb_tx_copy_callback()

in ntb_transport.c [1725:1779]


static void ntb_tx_copy_callback(void *data,
				 const struct dmaengine_result *res)
{
	struct ntb_queue_entry *entry = data;
	struct ntb_transport_qp *qp = entry->qp;
	struct ntb_payload_header __iomem *hdr = entry->tx_hdr;

	/* we need to check DMA results if we are using DMA */
	if (res) {
		enum dmaengine_tx_result dma_err = res->result;

		switch (dma_err) {
		case DMA_TRANS_READ_FAILED:
		case DMA_TRANS_WRITE_FAILED:
			entry->errors++;
			fallthrough;
		case DMA_TRANS_ABORTED:
		{
			void __iomem *offset =
				qp->tx_mw + qp->tx_max_frame *
				entry->tx_index;

			/* resubmit via CPU */
			ntb_memcpy_tx(entry, offset);
			qp->tx_memcpy++;
			return;
		}

		case DMA_TRANS_NOERROR:
		default:
			break;
		}
	}

	iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);

	if (qp->use_msi)
		ntb_msi_peer_trigger(qp->ndev, PIDX, &qp->peer_msi_desc);
	else
		ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num));

	/* The entry length can only be zero if the packet is intended to be a
	 * "link down" or similar.  Since no payload is being sent in these
	 * cases, there is nothing to add to the completion queue.
	 */
	if (entry->len > 0) {
		qp->tx_bytes += entry->len;

		if (qp->tx_handler)
			qp->tx_handler(qp, qp->cb_data, entry->cb_data,
				       entry->len);
	}

	ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
}