static void hci_dma_process_ibi()

in master/mipi-i3c-hci/dma.c [582:729]


static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
{
	struct i3c_dev_desc *dev;
	struct i3c_hci_dev_data *dev_data;
	struct hci_dma_dev_ibi_data *dev_ibi;
	struct i3c_ibi_slot *slot;
	u32 op1_val, op2_val, ibi_status_error;
	unsigned int ptr, enq_ptr, deq_ptr;
	unsigned int ibi_size, ibi_chunks, ibi_data_offset, first_part;
	int ibi_addr, last_ptr;
	void *ring_ibi_data;
	dma_addr_t ring_ibi_data_dma;

	op1_val = rh_reg_read(RING_OPERATION1);
	deq_ptr = FIELD_GET(RING_OP1_IBI_DEQ_PTR, op1_val);

	op2_val = rh_reg_read(RING_OPERATION2);
	enq_ptr = FIELD_GET(RING_OP2_IBI_ENQ_PTR, op2_val);

	ibi_status_error = 0;
	ibi_addr = -1;
	ibi_chunks = 0;
	ibi_size = 0;
	last_ptr = -1;

	/* let's find all we can about this IBI */
	for (ptr = deq_ptr; ptr != enq_ptr;
	     ptr = (ptr + 1) % rh->ibi_status_entries) {
		u32 ibi_status, *ring_ibi_status;
		unsigned int chunks;

		ring_ibi_status = rh->ibi_status + rh->ibi_status_sz * ptr;
		ibi_status = *ring_ibi_status;
		DBG("status = %#x", ibi_status);

		if (ibi_status_error) {
			/* we no longer care */
		} else if (ibi_status & IBI_ERROR) {
			ibi_status_error = ibi_status;
		} else if (ibi_addr ==  -1) {
			ibi_addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status);
		} else if (ibi_addr != FIELD_GET(IBI_TARGET_ADDR, ibi_status)) {
			/* the address changed unexpectedly */
			ibi_status_error = ibi_status;
		}

		chunks = FIELD_GET(IBI_CHUNKS, ibi_status);
		ibi_chunks += chunks;
		if (!(ibi_status & IBI_LAST_STATUS)) {
			ibi_size += chunks * rh->ibi_chunk_sz;
		} else {
			ibi_size += FIELD_GET(IBI_DATA_LENGTH, ibi_status);
			last_ptr = ptr;
			break;
		}
	}

	/* validate what we've got */

	if (last_ptr == -1) {
		/* this IBI sequence is not yet complete */
		DBG("no LAST_STATUS available (e=%d d=%d)", enq_ptr, deq_ptr);
		return;
	}
	deq_ptr = last_ptr + 1;
	deq_ptr %= rh->ibi_status_entries;

	if (ibi_status_error) {
		dev_err(&hci->master.dev, "IBI error from %#x\n", ibi_addr);
		goto done;
	}

	/* determine who this is for */
	dev = i3c_hci_addr_to_dev(hci, ibi_addr);
	if (!dev) {
		dev_err(&hci->master.dev,
			"IBI for unknown device %#x\n", ibi_addr);
		goto done;
	}

	dev_data = i3c_dev_get_master_data(dev);
	dev_ibi = dev_data->ibi_data;
	if (ibi_size > dev_ibi->max_len) {
		dev_err(&hci->master.dev, "IBI payload too big (%d > %d)\n",
			ibi_size, dev_ibi->max_len);
		goto done;
	}

	/*
	 * This ring model is not suitable for zero-copy processing of IBIs.
	 * We have the data chunk ring wrap-around to deal with, meaning
	 * that the payload might span multiple chunks beginning at the
	 * end of the ring and wrap to the start of the ring. Furthermore
	 * there is no guarantee that those chunks will be released in order
	 * and in a timely manner by the upper driver. So let's just copy
	 * them to a discrete buffer. In practice they're supposed to be
	 * small anyway.
	 */
	slot = i3c_generic_ibi_get_free_slot(dev_ibi->pool);
	if (!slot) {
		dev_err(&hci->master.dev, "no free slot for IBI\n");
		goto done;
	}

	/* copy first part of the payload */
	ibi_data_offset = rh->ibi_chunk_sz * rh->ibi_chunk_ptr;
	ring_ibi_data = rh->ibi_data + ibi_data_offset;
	ring_ibi_data_dma = rh->ibi_data_dma + ibi_data_offset;
	first_part = (rh->ibi_chunks_total - rh->ibi_chunk_ptr)
			* rh->ibi_chunk_sz;
	if (first_part > ibi_size)
		first_part = ibi_size;
	dma_sync_single_for_cpu(&hci->master.dev, ring_ibi_data_dma,
				first_part, DMA_FROM_DEVICE);
	memcpy(slot->data, ring_ibi_data, first_part);

	/* copy second part if any */
	if (ibi_size > first_part) {
		/* we wrap back to the start and copy remaining data */
		ring_ibi_data = rh->ibi_data;
		ring_ibi_data_dma = rh->ibi_data_dma;
		dma_sync_single_for_cpu(&hci->master.dev, ring_ibi_data_dma,
					ibi_size - first_part, DMA_FROM_DEVICE);
		memcpy(slot->data + first_part, ring_ibi_data,
		       ibi_size - first_part);
	}

	/* submit it */
	slot->dev = dev;
	slot->len = ibi_size;
	i3c_master_queue_ibi(dev, slot);

done:
	/* take care to update the ibi dequeue pointer atomically */
	spin_lock(&rh->lock);
	op1_val = rh_reg_read(RING_OPERATION1);
	op1_val &= ~RING_OP1_IBI_DEQ_PTR;
	op1_val |= FIELD_PREP(RING_OP1_IBI_DEQ_PTR, deq_ptr);
	rh_reg_write(RING_OPERATION1, op1_val);
	spin_unlock(&rh->lock);

	/* update the chunk pointer */
	rh->ibi_chunk_ptr += ibi_chunks;
	rh->ibi_chunk_ptr %= rh->ibi_chunks_total;

	/* and tell the hardware about freed chunks */
	rh_reg_write(CHUNK_CONTROL, rh_reg_read(CHUNK_CONTROL) + ibi_chunks);
}