in kernel/vdso.c [114:195]
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
struct vm_area_struct *vma;
unsigned long addr = 0;
pgprot_t prot;
int ret, vvar_page_num = 2;
vdso_text_len = vdso_pages << PAGE_SHIFT;
if(timer_mapping_base == EMPTY_VALUE)
vvar_page_num = 1;
/* Be sure to map the data page */
vdso_mapping_len = vdso_text_len + vvar_page_num * PAGE_SIZE;
#ifdef CONFIG_CPU_CACHE_ALIASING
vdso_mapping_len += L1_cache_info[DCACHE].aliasing_num - 1;
#endif
if (mmap_write_lock_killable(mm))
return -EINTR;
addr = vdso_random_addr(vdso_mapping_len);
vdso_base = get_unmapped_area(NULL, addr, vdso_mapping_len, 0, 0);
if (IS_ERR_VALUE(vdso_base)) {
ret = vdso_base;
goto up_fail;
}
#ifdef CONFIG_CPU_CACHE_ALIASING
{
unsigned int aliasing_mask =
L1_cache_info[DCACHE].aliasing_mask;
unsigned int page_colour_ofs;
page_colour_ofs = ((unsigned int)vdso_data & aliasing_mask) -
(vdso_base & aliasing_mask);
vdso_base += page_colour_ofs & aliasing_mask;
}
#endif
vma = _install_special_mapping(mm, vdso_base, vvar_page_num * PAGE_SIZE,
VM_READ | VM_MAYREAD, &vdso_spec[0]);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto up_fail;
}
/*Map vdata to user space */
ret = io_remap_pfn_range(vma, vdso_base,
virt_to_phys(vdso_data) >> PAGE_SHIFT,
PAGE_SIZE, vma->vm_page_prot);
if (ret)
goto up_fail;
/*Map timer to user space */
vdso_base += PAGE_SIZE;
prot = __pgprot(_PAGE_V | _PAGE_M_UR_KR | _PAGE_D | _PAGE_C_DEV);
ret = io_remap_pfn_range(vma, vdso_base, timer_mapping_base >> PAGE_SHIFT,
PAGE_SIZE, prot);
if (ret)
goto up_fail;
/*Map vdso to user space */
vdso_base += PAGE_SIZE;
mm->context.vdso = (void *)vdso_base;
vma = _install_special_mapping(mm, vdso_base, vdso_text_len,
VM_READ | VM_EXEC |
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
&vdso_spec[1]);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto up_fail;
}
mmap_write_unlock(mm);
return 0;
up_fail:
mm->context.vdso = NULL;
mmap_write_unlock(mm);
return ret;
}