static int hci_dma_queue_xfer()

in master/mipi-i3c-hci/dma.c [354:437]


static int hci_dma_queue_xfer(struct i3c_hci *hci,
			      struct hci_xfer *xfer_list, int n)
{
	struct hci_rings_data *rings = hci->io_data;
	struct hci_rh_data *rh;
	unsigned int i, ring, enqueue_ptr;
	u32 op1_val, op2_val;

	/* For now we only use ring 0 */
	ring = 0;
	rh = &rings->headers[ring];

	op1_val = rh_reg_read(RING_OPERATION1);
	enqueue_ptr = FIELD_GET(RING_OP1_CR_ENQ_PTR, op1_val);
	for (i = 0; i < n; i++) {
		struct hci_xfer *xfer = xfer_list + i;
		u32 *ring_data = rh->xfer + rh->xfer_struct_sz * enqueue_ptr;

		/* store cmd descriptor */
		*ring_data++ = xfer->cmd_desc[0];
		*ring_data++ = xfer->cmd_desc[1];
		if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
			*ring_data++ = xfer->cmd_desc[2];
			*ring_data++ = xfer->cmd_desc[3];
		}

		/* first word of Data Buffer Descriptor Structure */
		if (!xfer->data)
			xfer->data_len = 0;
		*ring_data++ =
			FIELD_PREP(DATA_BUF_BLOCK_SIZE, xfer->data_len) |
			((i == n - 1) ? DATA_BUF_IOC : 0);

		/* 2nd and 3rd words of Data Buffer Descriptor Structure */
		if (xfer->data) {
			xfer->data_dma =
				dma_map_single(&hci->master.dev,
					       xfer->data,
					       xfer->data_len,
					       xfer->rnw ?
						  DMA_FROM_DEVICE :
						  DMA_TO_DEVICE);
			if (dma_mapping_error(&hci->master.dev,
					      xfer->data_dma)) {
				hci_dma_unmap_xfer(hci, xfer_list, i);
				return -ENOMEM;
			}
			*ring_data++ = lo32(xfer->data_dma);
			*ring_data++ = hi32(xfer->data_dma);
		} else {
			*ring_data++ = 0;
			*ring_data++ = 0;
		}

		/* remember corresponding xfer struct */
		rh->src_xfers[enqueue_ptr] = xfer;
		/* remember corresponding ring/entry for this xfer structure */
		xfer->ring_number = ring;
		xfer->ring_entry = enqueue_ptr;

		enqueue_ptr = (enqueue_ptr + 1) % rh->xfer_entries;

		/*
		 * We may update the hardware view of the enqueue pointer
		 * only if we didn't reach its dequeue pointer.
		 */
		op2_val = rh_reg_read(RING_OPERATION2);
		if (enqueue_ptr == FIELD_GET(RING_OP2_CR_DEQ_PTR, op2_val)) {
			/* the ring is full */
			hci_dma_unmap_xfer(hci, xfer_list, i + 1);
			return -EBUSY;
		}
	}

	/* take care to update the hardware enqueue pointer atomically */
	spin_lock_irq(&rh->lock);
	op1_val = rh_reg_read(RING_OPERATION1);
	op1_val &= ~RING_OP1_CR_ENQ_PTR;
	op1_val |= FIELD_PREP(RING_OP1_CR_ENQ_PTR, enqueue_ptr);
	rh_reg_write(RING_OPERATION1, op1_val);
	spin_unlock_irq(&rh->lock);

	return 0;
}