static int pdc_send_data()

in bcm-pdc-mailbox.c [1194:1254]


static int pdc_send_data(struct mbox_chan *chan, void *data)
{
	struct pdc_state *pdcs = chan->con_priv;
	struct device *dev = &pdcs->pdev->dev;
	struct brcm_message *mssg = data;
	int err = PDC_SUCCESS;
	int src_nent;
	int dst_nent;
	int nent;
	u32 tx_desc_req;
	u32 rx_desc_req;

	if (unlikely(mssg->type != BRCM_MESSAGE_SPU))
		return -ENOTSUPP;

	src_nent = sg_nents(mssg->spu.src);
	if (likely(src_nent)) {
		nent = dma_map_sg(dev, mssg->spu.src, src_nent, DMA_TO_DEVICE);
		if (unlikely(nent == 0))
			return -EIO;
	}

	dst_nent = sg_nents(mssg->spu.dst);
	if (likely(dst_nent)) {
		nent = dma_map_sg(dev, mssg->spu.dst, dst_nent,
				  DMA_FROM_DEVICE);
		if (unlikely(nent == 0)) {
			dma_unmap_sg(dev, mssg->spu.src, src_nent,
				     DMA_TO_DEVICE);
			return -EIO;
		}
	}

	/*
	 * Check if the tx and rx rings have enough space. Do this prior to
	 * writing any tx or rx descriptors. Need to ensure that we do not write
	 * a partial set of descriptors, or write just rx descriptors but
	 * corresponding tx descriptors don't fit. Note that we want this check
	 * and the entire sequence of descriptor to happen without another
	 * thread getting in. The channel spin lock in the mailbox framework
	 * ensures this.
	 */
	tx_desc_req = pdc_desc_count(mssg->spu.src);
	rx_desc_req = pdc_desc_count(mssg->spu.dst);
	if (unlikely(pdc_rings_full(pdcs, tx_desc_req, rx_desc_req + 1)))
		return -ENOSPC;

	/* Create rx descriptors to SPU catch response */
	err = pdc_rx_list_init(pdcs, mssg->spu.dst, mssg->ctx);
	err |= pdc_rx_list_sg_add(pdcs, mssg->spu.dst);

	/* Create tx descriptors to submit SPU request */
	err |= pdc_tx_list_sg_add(pdcs, mssg->spu.src);
	err |= pdc_tx_list_final(pdcs);	/* initiate transfer */

	if (unlikely(err))
		dev_err(&pdcs->pdev->dev,
			"%s failed with error %d", __func__, err);

	return err;
}