in bsd/vm/vm_unix.c [2046:2446]
static void shared_region_map_and_slide_cleanup(
struct proc *p,
uint32_t files_count,
struct _sr_file_mappings *sr_file_mappings,
struct vm_shared_region *shared_region,
struct vnode *scdir_vp);
/*
* Setup part of _shared_region_map_and_slide().
* It had to be broken out of _shared_region_map_and_slide() to
* prevent compiler inlining from blowing out the stack.
*/
__attribute__((noinline))
static int
shared_region_map_and_slide_setup(
struct proc *p,
uint32_t files_count,
struct shared_file_np *files,
uint32_t mappings_count,
struct shared_file_mapping_slide_np *mappings,
struct _sr_file_mappings **sr_file_mappings,
struct vm_shared_region **shared_region_ptr,
struct vnode **scdir_vp,
struct vnode *rdir_vp)
{
int error = 0;
struct _sr_file_mappings *srfmp;
uint32_t mappings_next;
struct vnode_attr va;
off_t fs;
#if CONFIG_MACF
vm_prot_t maxprot = VM_PROT_ALL;
#endif
uint32_t i;
struct vm_shared_region *shared_region;
SHARED_REGION_TRACE_DEBUG(
("shared_region: %p [%d(%s)] -> map\n",
(void *)VM_KERNEL_ADDRPERM(current_thread()),
p->p_pid, p->p_comm));
if (files_count > _SR_FILE_MAPPINGS_MAX_FILES) {
error = E2BIG;
goto done;
}
if (files_count == 0) {
error = EINVAL;
goto done;
}
*sr_file_mappings = kheap_alloc(KHEAP_TEMP, files_count * sizeof(struct _sr_file_mappings), Z_WAITOK);
if (*sr_file_mappings == NULL) {
error = ENOMEM;
goto done;
}
bzero(*sr_file_mappings, files_count * sizeof(struct _sr_file_mappings));
mappings_next = 0;
for (i = 0; i < files_count; i++) {
srfmp = &(*sr_file_mappings)[i];
srfmp->fd = files[i].sf_fd;
srfmp->mappings_count = files[i].sf_mappings_count;
srfmp->mappings = &mappings[mappings_next];
mappings_next += srfmp->mappings_count;
if (mappings_next > mappings_count) {
error = EINVAL;
goto done;
}
srfmp->slide = files[i].sf_slide;
}
if (scdir_enforce) {
/* get vnode for scdir_path */
error = vnode_lookup(scdir_path, 0, scdir_vp, vfs_context_current());
if (error) {
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)]: "
"vnode_lookup(%s) failed (error=%d)\n",
(void *)VM_KERNEL_ADDRPERM(current_thread()),
p->p_pid, p->p_comm,
scdir_path, error));
goto done;
}
}
/* get the process's shared region (setup in vm_map_exec()) */
shared_region = vm_shared_region_trim_and_get(current_task());
*shared_region_ptr = shared_region;
if (shared_region == NULL) {
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map(): "
"no shared region\n",
(void *)VM_KERNEL_ADDRPERM(current_thread()),
p->p_pid, p->p_comm));
error = EINVAL;
goto done;
}
/*
* Check the shared region matches the current root
* directory of this process. Deny the mapping to
* avoid tainting the shared region with something that
* doesn't quite belong into it.
*/
struct vnode *sr_vnode = vm_shared_region_root_dir(shared_region);
if (sr_vnode != NULL ? rdir_vp != sr_vnode : rdir_vp != rootvnode) {
SHARED_REGION_TRACE_ERROR(
("shared_region: map(%p) root_dir mismatch\n",
(void *)VM_KERNEL_ADDRPERM(current_thread())));
error = EPERM;
goto done;
}
for (srfmp = &(*sr_file_mappings)[0];
srfmp < &(*sr_file_mappings)[files_count];
srfmp++) {
if (srfmp->mappings_count == 0) {
/* no mappings here... */
continue;
}
/* get file structure from file descriptor */
error = fp_get_ftype(p, srfmp->fd, DTYPE_VNODE, EINVAL, &srfmp->fp);
if (error) {
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map: "
"fd=%d lookup failed (error=%d)\n",
(void *)VM_KERNEL_ADDRPERM(current_thread()),
p->p_pid, p->p_comm, srfmp->fd, error));
goto done;
}
/* we need at least read permission on the file */
if (!(srfmp->fp->fp_glob->fg_flag & FREAD)) {
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map: "
"fd=%d not readable\n",
(void *)VM_KERNEL_ADDRPERM(current_thread()),
p->p_pid, p->p_comm, srfmp->fd));
error = EPERM;
goto done;
}
/* get vnode from file structure */
error = vnode_getwithref((vnode_t) srfmp->fp->fp_glob->fg_data);
if (error) {
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map: "
"fd=%d getwithref failed (error=%d)\n",
(void *)VM_KERNEL_ADDRPERM(current_thread()),
p->p_pid, p->p_comm, srfmp->fd, error));
goto done;
}
srfmp->vp = (struct vnode *) srfmp->fp->fp_glob->fg_data;
/* make sure the vnode is a regular file */
if (srfmp->vp->v_type != VREG) {
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map(%p:'%s'): "
"not a file (type=%d)\n",
(void *)VM_KERNEL_ADDRPERM(current_thread()),
p->p_pid, p->p_comm,
(void *)VM_KERNEL_ADDRPERM(srfmp->vp),
srfmp->vp->v_name, srfmp->vp->v_type));
error = EINVAL;
goto done;
}
#if CONFIG_MACF
/* pass in 0 for the offset argument because AMFI does not need the offset
* of the shared cache */
error = mac_file_check_mmap(vfs_context_ucred(vfs_context_current()),
srfmp->fp->fp_glob, VM_PROT_ALL, MAP_FILE, 0, &maxprot);
if (error) {
goto done;
}
#endif /* MAC */
#if XNU_TARGET_OS_OSX && defined(__arm64__)
/*
* Check if the shared cache is in the trust cache;
* if so, we can skip the root ownership check.
*/
#if DEVELOPMENT || DEBUG
/*
* Skip both root ownership and trust cache check if
* enforcement is disabled.
*/
if (!cs_system_enforcement()) {
goto after_root_check;
}
#endif /* DEVELOPMENT || DEBUG */
struct cs_blob *blob = csvnode_get_blob(srfmp->vp, 0);
if (blob == NULL) {
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map(%p:'%s'): "
"missing CS blob\n",
(void *)VM_KERNEL_ADDRPERM(current_thread()),
p->p_pid, p->p_comm,
(void *)VM_KERNEL_ADDRPERM(srfmp->vp),
srfmp->vp->v_name));
goto root_check;
}
const uint8_t *cdhash = csblob_get_cdhash(blob);
if (cdhash == NULL) {
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map(%p:'%s'): "
"missing cdhash\n",
(void *)VM_KERNEL_ADDRPERM(current_thread()),
p->p_pid, p->p_comm,
(void *)VM_KERNEL_ADDRPERM(srfmp->vp),
srfmp->vp->v_name));
goto root_check;
}
uint32_t result = pmap_lookup_in_static_trust_cache(cdhash);
boolean_t in_trust_cache = result & (TC_LOOKUP_FOUND << TC_LOOKUP_RESULT_SHIFT);
if (!in_trust_cache) {
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map(%p:'%s'): "
"not in trust cache\n",
(void *)VM_KERNEL_ADDRPERM(current_thread()),
p->p_pid, p->p_comm,
(void *)VM_KERNEL_ADDRPERM(srfmp->vp),
srfmp->vp->v_name));
goto root_check;
}
goto after_root_check;
root_check:
#endif /* XNU_TARGET_OS_OSX && defined(__arm64__) */
/* The shared cache file must be owned by root */
VATTR_INIT(&va);
VATTR_WANTED(&va, va_uid);
error = vnode_getattr(srfmp->vp, &va, vfs_context_current());
if (error) {
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map(%p:'%s'): "
"vnode_getattr(%p) failed (error=%d)\n",
(void *)VM_KERNEL_ADDRPERM(current_thread()),
p->p_pid, p->p_comm,
(void *)VM_KERNEL_ADDRPERM(srfmp->vp),
srfmp->vp->v_name,
(void *)VM_KERNEL_ADDRPERM(srfmp->vp),
error));
goto done;
}
if (va.va_uid != 0) {
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map(%p:'%s'): "
"owned by uid=%d instead of 0\n",
(void *)VM_KERNEL_ADDRPERM(current_thread()),
p->p_pid, p->p_comm,
(void *)VM_KERNEL_ADDRPERM(srfmp->vp),
srfmp->vp->v_name, va.va_uid));
error = EPERM;
goto done;
}
#if XNU_TARGET_OS_OSX && defined(__arm64__)
after_root_check:
#endif /* XNU_TARGET_OS_OSX && defined(__arm64__) */
#if CONFIG_CSR
if (csr_check(CSR_ALLOW_UNRESTRICTED_FS) != 0) {
VATTR_INIT(&va);
VATTR_WANTED(&va, va_flags);
error = vnode_getattr(srfmp->vp, &va, vfs_context_current());
if (error) {
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map(%p:'%s'): "
"vnode_getattr(%p) failed (error=%d)\n",
(void *)VM_KERNEL_ADDRPERM(current_thread()),
p->p_pid, p->p_comm,
(void *)VM_KERNEL_ADDRPERM(srfmp->vp),
srfmp->vp->v_name,
(void *)VM_KERNEL_ADDRPERM(srfmp->vp),
error));
goto done;
}
if (!(va.va_flags & SF_RESTRICTED)) {
/*
* CSR is not configured in CSR_ALLOW_UNRESTRICTED_FS mode, and
* the shared cache file is NOT SIP-protected, so reject the
* mapping request
*/
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map(%p:'%s'), "
"vnode is not SIP-protected. \n",
(void *)VM_KERNEL_ADDRPERM(current_thread()),
p->p_pid, p->p_comm,
(void *)VM_KERNEL_ADDRPERM(srfmp->vp),
srfmp->vp->v_name));
error = EPERM;
goto done;
}
}
#else /* CONFIG_CSR */
/* Devices without SIP/ROSP need to make sure that the shared cache is on the root volume. */
assert(rdir_vp != NULL);
if (srfmp->vp->v_mount != rdir_vp->v_mount) {
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map(%p:'%s'): "
"not on process's root volume\n",
(void *)VM_KERNEL_ADDRPERM(current_thread()),
p->p_pid, p->p_comm,
(void *)VM_KERNEL_ADDRPERM(srfmp->vp),
srfmp->vp->v_name));
error = EPERM;
goto done;
}
#endif /* CONFIG_CSR */
if (scdir_enforce) {
/* ensure parent is scdir_vp */
assert(*scdir_vp != NULL);
if (vnode_parent(srfmp->vp) != *scdir_vp) {
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map(%p:'%s'): "
"shared cache file not in %s\n",
(void *)VM_KERNEL_ADDRPERM(current_thread()),
p->p_pid, p->p_comm,
(void *)VM_KERNEL_ADDRPERM(srfmp->vp),
srfmp->vp->v_name, scdir_path));
error = EPERM;
goto done;
}
}
/* get vnode size */
error = vnode_size(srfmp->vp, &fs, vfs_context_current());
if (error) {
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map(%p:'%s'): "
"vnode_size(%p) failed (error=%d)\n",
(void *)VM_KERNEL_ADDRPERM(current_thread()),
p->p_pid, p->p_comm,
(void *)VM_KERNEL_ADDRPERM(srfmp->vp),
srfmp->vp->v_name,
(void *)VM_KERNEL_ADDRPERM(srfmp->vp), error));
goto done;
}
srfmp->file_size = fs;
/* get the file's memory object handle */
srfmp->file_control = ubc_getobject(srfmp->vp, UBC_HOLDOBJECT);
if (srfmp->file_control == MEMORY_OBJECT_CONTROL_NULL) {
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map(%p:'%s'): "
"no memory object\n",
(void *)VM_KERNEL_ADDRPERM(current_thread()),
p->p_pid, p->p_comm,
(void *)VM_KERNEL_ADDRPERM(srfmp->vp),
srfmp->vp->v_name));
error = EINVAL;
goto done;
}
/* check that the mappings are properly covered by code signatures */
if (!cs_system_enforcement()) {
/* code signing is not enforced: no need to check */
} else {
for (i = 0; i < srfmp->mappings_count; i++) {
if (srfmp->mappings[i].sms_init_prot & VM_PROT_ZF) {
/* zero-filled mapping: not backed by the file */
continue;
}
if (ubc_cs_is_range_codesigned(srfmp->vp,
srfmp->mappings[i].sms_file_offset,
srfmp->mappings[i].sms_size)) {
/* this mapping is fully covered by code signatures */
continue;
}
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map(%p:'%s'): "
"mapping #%d/%d [0x%llx:0x%llx:0x%llx:0x%x:0x%x] "
"is not code-signed\n",
(void *)VM_KERNEL_ADDRPERM(current_thread()),
p->p_pid, p->p_comm,
(void *)VM_KERNEL_ADDRPERM(srfmp->vp),
srfmp->vp->v_name,
i, srfmp->mappings_count,
srfmp->mappings[i].sms_address,
srfmp->mappings[i].sms_size,
srfmp->mappings[i].sms_file_offset,
srfmp->mappings[i].sms_max_prot,
srfmp->mappings[i].sms_init_prot));
error = EINVAL;
goto done;
}
}
}
done:
if (error != 0) {
shared_region_map_and_slide_cleanup(p, files_count, *sr_file_mappings, shared_region, *scdir_vp);
*sr_file_mappings = NULL;
*shared_region_ptr = NULL;
*scdir_vp = NULL;
}
return error;
}