in mm/cacheflush.c [201:232]
void update_mmu_cache(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
pte_t pte = *ptep;
unsigned long pfn = pte_pfn(pte);
struct page *page;
struct address_space *mapping;
reload_tlb_page(vma, address, pte);
if (!pfn_valid(pfn))
return;
/*
* The zero page is never written to, so never has any dirty
* cache lines, and therefore never needs to be flushed.
*/
page = pfn_to_page(pfn);
if (page == ZERO_PAGE(0))
return;
mapping = page_mapping_file(page);
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
__flush_dcache_page(mapping, page);
if(mapping)
{
flush_aliases(mapping, page);
if (vma->vm_flags & VM_EXEC)
flush_icache_page(vma, page);
}
}