static int dma_4v_map_sg()

in kernel/pci_sun4v.c [468:621]


static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
			 int nelems, enum dma_data_direction direction,
			 unsigned long attrs)
{
	struct scatterlist *s, *outs, *segstart;
	unsigned long flags, handle, prot;
	dma_addr_t dma_next = 0, dma_addr;
	unsigned int max_seg_size;
	unsigned long seg_boundary_size;
	int outcount, incount, i;
	struct iommu *iommu;
	struct atu *atu;
	struct iommu_map_table *tbl;
	u64 mask;
	unsigned long base_shift;
	long err;

	BUG_ON(direction == DMA_NONE);

	iommu = dev->archdata.iommu;
	if (nelems == 0 || !iommu)
		return -EINVAL;
	atu = iommu->atu;

	prot = HV_PCI_MAP_ATTR_READ;
	if (direction != DMA_TO_DEVICE)
		prot |= HV_PCI_MAP_ATTR_WRITE;

	if (attrs & DMA_ATTR_WEAK_ORDERING)
		prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;

	outs = s = segstart = &sglist[0];
	outcount = 1;
	incount = nelems;
	handle = 0;

	/* Init first segment length for backout at failure */
	outs->dma_length = 0;

	local_irq_save(flags);

	iommu_batch_start(dev, prot, ~0UL);

	max_seg_size = dma_get_max_seg_size(dev);
	seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT);

	mask = *dev->dma_mask;
	if (!iommu_use_atu(iommu, mask))
		tbl = &iommu->tbl;
	else
		tbl = &atu->tbl;

	base_shift = tbl->table_map_base >> IO_PAGE_SHIFT;

	for_each_sg(sglist, s, nelems, i) {
		unsigned long paddr, npages, entry, out_entry = 0, slen;

		slen = s->length;
		/* Sanity check */
		if (slen == 0) {
			dma_next = 0;
			continue;
		}
		/* Allocate iommu entries for that segment */
		paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
		npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
		entry = iommu_tbl_range_alloc(dev, tbl, npages,
					      &handle, (unsigned long)(-1), 0);

		/* Handle failure */
		if (unlikely(entry == IOMMU_ERROR_CODE)) {
			pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n",
					   tbl, paddr, npages);
			goto iommu_map_failed;
		}

		iommu_batch_new_entry(entry, mask);

		/* Convert entry to a dma_addr_t */
		dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT);
		dma_addr |= (s->offset & ~IO_PAGE_MASK);

		/* Insert into HW table */
		paddr &= IO_PAGE_MASK;
		while (npages--) {
			err = iommu_batch_add(paddr, mask);
			if (unlikely(err < 0L))
				goto iommu_map_failed;
			paddr += IO_PAGE_SIZE;
		}

		/* If we are in an open segment, try merging */
		if (segstart != s) {
			/* We cannot merge if:
			 * - allocated dma_addr isn't contiguous to previous allocation
			 */
			if ((dma_addr != dma_next) ||
			    (outs->dma_length + s->length > max_seg_size) ||
			    (is_span_boundary(out_entry, base_shift,
					      seg_boundary_size, outs, s))) {
				/* Can't merge: create a new segment */
				segstart = s;
				outcount++;
				outs = sg_next(outs);
			} else {
				outs->dma_length += s->length;
			}
		}

		if (segstart == s) {
			/* This is a new segment, fill entries */
			outs->dma_address = dma_addr;
			outs->dma_length = slen;
			out_entry = entry;
		}

		/* Calculate next page pointer for contiguous check */
		dma_next = dma_addr + slen;
	}

	err = iommu_batch_end(mask);

	if (unlikely(err < 0L))
		goto iommu_map_failed;

	local_irq_restore(flags);

	if (outcount < incount) {
		outs = sg_next(outs);
		outs->dma_length = 0;
	}

	return outcount;

iommu_map_failed:
	for_each_sg(sglist, s, nelems, i) {
		if (s->dma_length != 0) {
			unsigned long vaddr, npages;

			vaddr = s->dma_address & IO_PAGE_MASK;
			npages = iommu_num_pages(s->dma_address, s->dma_length,
						 IO_PAGE_SIZE);
			iommu_tbl_range_free(tbl, vaddr, npages,
					     IOMMU_ERROR_CODE);
			/* XXX demap? XXX */
			s->dma_length = 0;
		}
		if (s == outs)
			break;
	}
	local_irq_restore(flags);

	return -EINVAL;
}