in mm/pmb.c [335:408]
int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
unsigned long size, pgprot_t prot)
{
struct pmb_entry *pmbp, *pmbe;
unsigned long orig_addr, orig_size;
unsigned long flags, pmb_flags;
int i, mapped;
if (size < SZ_16M)
return -EINVAL;
if (!pmb_addr_valid(vaddr, size))
return -EFAULT;
if (pmb_mapping_exists(vaddr, phys, size))
return 0;
orig_addr = vaddr;
orig_size = size;
flush_tlb_kernel_range(vaddr, vaddr + size);
pmb_flags = pgprot_to_pmb_flags(prot);
pmbp = NULL;
do {
for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
if (size < pmb_sizes[i].size)
continue;
pmbe = pmb_alloc(vaddr, phys, pmb_flags |
pmb_sizes[i].flag, PMB_NO_ENTRY);
if (IS_ERR(pmbe)) {
pmb_unmap_entry(pmbp, mapped);
return PTR_ERR(pmbe);
}
raw_spin_lock_irqsave(&pmbe->lock, flags);
pmbe->size = pmb_sizes[i].size;
__set_pmb_entry(pmbe);
phys += pmbe->size;
vaddr += pmbe->size;
size -= pmbe->size;
/*
* Link adjacent entries that span multiple PMB
* entries for easier tear-down.
*/
if (likely(pmbp)) {
raw_spin_lock_nested(&pmbp->lock,
SINGLE_DEPTH_NESTING);
pmbp->link = pmbe;
raw_spin_unlock(&pmbp->lock);
}
pmbp = pmbe;
/*
* Instead of trying smaller sizes on every
* iteration (even if we succeed in allocating
* space), try using pmb_sizes[i].size again.
*/
i--;
mapped++;
raw_spin_unlock_irqrestore(&pmbe->lock, flags);
}
} while (size >= SZ_16M);
flush_cache_vmap(orig_addr, orig_addr + orig_size);
return 0;
}