in kernel/pci_iommu.c [631:707]
static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
unsigned long attrs)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
struct scatterlist *start, *end, *out;
struct pci_controller *hose;
struct pci_iommu_arena *arena;
dma_addr_t max_dma;
int dac_allowed;
BUG_ON(dir == PCI_DMA_NONE);
dac_allowed = dev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
/* Fast path single entry scatterlists. */
if (nents == 1) {
sg->dma_length = sg->length;
sg->dma_address
= pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
sg->length, dac_allowed);
if (sg->dma_address == DMA_MAPPING_ERROR)
return -EIO;
return 1;
}
start = sg;
end = sg + nents;
/* First, prepare information about the entries. */
sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0);
/* Second, figure out where we're going to map things. */
if (alpha_mv.mv_pci_tbi) {
hose = pdev ? pdev->sysdata : pci_isa_hose;
max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
arena = hose->sg_pci;
if (!arena || arena->dma_base + arena->size - 1 > max_dma)
arena = hose->sg_isa;
} else {
max_dma = -1;
arena = NULL;
hose = NULL;
}
/* Third, iterate over the scatterlist leaders and allocate
dma space as needed. */
for (out = sg; sg < end; ++sg) {
if ((int) sg->dma_address < 0)
continue;
if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0)
goto error;
out++;
}
/* Mark the end of the list for pci_unmap_sg. */
if (out < end)
out->dma_length = 0;
if (out - start == 0) {
printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
return -ENOMEM;
}
DBGA("pci_map_sg: %ld entries\n", out - start);
return out - start;
error:
printk(KERN_WARNING "pci_map_sg failed: "
"could not allocate dma page tables\n");
/* Some allocation failed while mapping the scatterlist
entries. Unmap them now. */
if (out > start)
pci_unmap_sg(pdev, start, out - start, dir);
return -ENOMEM;
}