in kernel/dma.c [98:125]
void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
enum dma_data_direction dir)
{
unsigned long cl;
struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
switch (dir) {
case DMA_TO_DEVICE:
/* Flush the dcache for the requested range */
for (cl = addr; cl < addr + size;
cl += cpuinfo->dcache_block_size)
mtspr(SPR_DCBFR, cl);
break;
case DMA_FROM_DEVICE:
/* Invalidate the dcache for the requested range */
for (cl = addr; cl < addr + size;
cl += cpuinfo->dcache_block_size)
mtspr(SPR_DCBIR, cl);
break;
default:
/*
* NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
* flush nor invalidate the cache here as the area will need
* to be manually synced anyway.
*/
break;
}
}