static void sba_unmap_sg_attrs()

in hp/common/sba_iommu.c [1416:1516]


static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
			       int nents, enum dma_data_direction dir,
			       unsigned long attrs);
/**
 * sba_map_sg - map Scatter/Gather list
 * @dev: instance of PCI owned by the driver that's asking.
 * @sglist:  array of buffer/length pairs
 * @nents:  number of entries in list
 * @dir:  R/W or both.
 * @attrs: optional dma attributes
 *
 * See Documentation/core-api/dma-api-howto.rst
 */
static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
			    int nents, enum dma_data_direction dir,
			    unsigned long attrs)
{
	struct ioc *ioc;
	int coalesced, filled = 0;
#ifdef ASSERT_PDIR_SANITY
	unsigned long flags;
#endif
#ifdef ALLOW_IOV_BYPASS_SG
	struct scatterlist *sg;
#endif

	DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
	ioc = GET_IOC(dev);
	ASSERT(ioc);

#ifdef ALLOW_IOV_BYPASS_SG
	ASSERT(to_pci_dev(dev)->dma_mask);
	if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
		for_each_sg(sglist, sg, nents, filled) {
			sg->dma_length = sg->length;
			sg->dma_address = virt_to_phys(sba_sg_address(sg));
		}
		return filled;
	}
#endif
	/* Fast path single entry scatterlists. */
	if (nents == 1) {
		sglist->dma_length = sglist->length;
		sglist->dma_address = sba_map_page(dev, sg_page(sglist),
				sglist->offset, sglist->length, dir, attrs);
		if (dma_mapping_error(dev, sglist->dma_address))
			return -EIO;
		return 1;
	}

#ifdef ASSERT_PDIR_SANITY
	spin_lock_irqsave(&ioc->res_lock, flags);
	if (sba_check_pdir(ioc,"Check before sba_map_sg_attrs()"))
	{
		sba_dump_sg(ioc, sglist, nents);
		panic("Check before sba_map_sg_attrs()");
	}
	spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif

	prefetch(ioc->res_hint);

	/*
	** First coalesce the chunks and allocate I/O pdir space
	**
	** If this is one DMA stream, we can properly map using the
	** correct virtual address associated with each DMA page.
	** w/o this association, we wouldn't have coherent DMA!
	** Access to the virtual address is what forces a two pass algorithm.
	*/
	coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
	if (coalesced < 0) {
		sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
		return -ENOMEM;
	}

	/*
	** Program the I/O Pdir
	**
	** map the virtual addresses to the I/O Pdir
	** o dma_address will contain the pdir index
	** o dma_len will contain the number of bytes to map
	** o address contains the virtual address.
	*/
	filled = sba_fill_pdir(ioc, sglist, nents);

#ifdef ASSERT_PDIR_SANITY
	spin_lock_irqsave(&ioc->res_lock, flags);
	if (sba_check_pdir(ioc,"Check after sba_map_sg_attrs()"))
	{
		sba_dump_sg(ioc, sglist, nents);
		panic("Check after sba_map_sg_attrs()\n");
	}
	spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif

	ASSERT(coalesced == filled);
	DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);

	return filled;
}