in kernel/io.c [467:512]
void memcpy_fromio(void *to, const volatile void __iomem *from, long count)
{
/* Optimize co-aligned transfers. Everything else gets handled
a byte at a time. */
if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) {
count -= 8;
do {
*(u64 *)to = __raw_readq(from);
count -= 8;
to += 8;
from += 8;
} while (count >= 0);
count += 8;
}
if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) {
count -= 4;
do {
*(u32 *)to = __raw_readl(from);
count -= 4;
to += 4;
from += 4;
} while (count >= 0);
count += 4;
}
if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) {
count -= 2;
do {
*(u16 *)to = __raw_readw(from);
count -= 2;
to += 2;
from += 2;
} while (count >= 0);
count += 2;
}
while (count > 0) {
*(u8 *) to = __raw_readb(from);
count--;
to++;
from++;
}
mb();
}