static int dma_4u_map_sg()

in kernel/iommu.c [432:582]


static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
			 int nelems, enum dma_data_direction direction,
			 unsigned long attrs)
{
	struct scatterlist *s, *outs, *segstart;
	unsigned long flags, handle, prot, ctx;
	dma_addr_t dma_next = 0, dma_addr;
	unsigned int max_seg_size;
	unsigned long seg_boundary_size;
	int outcount, incount, i;
	struct strbuf *strbuf;
	struct iommu *iommu;
	unsigned long base_shift;

	BUG_ON(direction == DMA_NONE);

	iommu = dev->archdata.iommu;
	strbuf = dev->archdata.stc;
	if (nelems == 0 || !iommu)
		return -EINVAL;

	spin_lock_irqsave(&iommu->lock, flags);

	ctx = 0;
	if (iommu->iommu_ctxflush)
		ctx = iommu_alloc_ctx(iommu);

	if (strbuf->strbuf_enabled)
		prot = IOPTE_STREAMING(ctx);
	else
		prot = IOPTE_CONSISTENT(ctx);
	if (direction != DMA_TO_DEVICE)
		prot |= IOPTE_WRITE;

	outs = s = segstart = &sglist[0];
	outcount = 1;
	incount = nelems;
	handle = 0;

	/* Init first segment length for backout at failure */
	outs->dma_length = 0;

	max_seg_size = dma_get_max_seg_size(dev);
	seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT);
	base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
	for_each_sg(sglist, s, nelems, i) {
		unsigned long paddr, npages, entry, out_entry = 0, slen;
		iopte_t *base;

		slen = s->length;
		/* Sanity check */
		if (slen == 0) {
			dma_next = 0;
			continue;
		}
		/* Allocate iommu entries for that segment */
		paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
		npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
		entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
					      &handle, (unsigned long)(-1), 0);

		/* Handle failure */
		if (unlikely(entry == IOMMU_ERROR_CODE)) {
			if (printk_ratelimit())
				printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
				       " npages %lx\n", iommu, paddr, npages);
			goto iommu_map_failed;
		}

		base = iommu->page_table + entry;

		/* Convert entry to a dma_addr_t */
		dma_addr = iommu->tbl.table_map_base +
			(entry << IO_PAGE_SHIFT);
		dma_addr |= (s->offset & ~IO_PAGE_MASK);

		/* Insert into HW table */
		paddr &= IO_PAGE_MASK;
		while (npages--) {
			iopte_val(*base) = prot | paddr;
			base++;
			paddr += IO_PAGE_SIZE;
		}

		/* If we are in an open segment, try merging */
		if (segstart != s) {
			/* We cannot merge if:
			 * - allocated dma_addr isn't contiguous to previous allocation
			 */
			if ((dma_addr != dma_next) ||
			    (outs->dma_length + s->length > max_seg_size) ||
			    (is_span_boundary(out_entry, base_shift,
					      seg_boundary_size, outs, s))) {
				/* Can't merge: create a new segment */
				segstart = s;
				outcount++;
				outs = sg_next(outs);
			} else {
				outs->dma_length += s->length;
			}
		}

		if (segstart == s) {
			/* This is a new segment, fill entries */
			outs->dma_address = dma_addr;
			outs->dma_length = slen;
			out_entry = entry;
		}

		/* Calculate next page pointer for contiguous check */
		dma_next = dma_addr + slen;
	}

	spin_unlock_irqrestore(&iommu->lock, flags);

	if (outcount < incount) {
		outs = sg_next(outs);
		outs->dma_length = 0;
	}

	return outcount;

iommu_map_failed:
	for_each_sg(sglist, s, nelems, i) {
		if (s->dma_length != 0) {
			unsigned long vaddr, npages, entry, j;
			iopte_t *base;

			vaddr = s->dma_address & IO_PAGE_MASK;
			npages = iommu_num_pages(s->dma_address, s->dma_length,
						 IO_PAGE_SIZE);

			entry = (vaddr - iommu->tbl.table_map_base)
				>> IO_PAGE_SHIFT;
			base = iommu->page_table + entry;

			for (j = 0; j < npages; j++)
				iopte_make_dummy(iommu, base + j);

			iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
					     IOMMU_ERROR_CODE);

			s->dma_length = 0;
		}
		if (s == outs)
			break;
	}
	spin_unlock_irqrestore(&iommu->lock, flags);

	return -EINVAL;
}