in Source/PLCrashAsyncMObject.c [104:239]
static plcrash_error_t plcrash_async_mobject_remap_pages_workaround (mach_port_t task,
pl_vm_address_t task_addr,
pl_vm_size_t length,
bool require_full,
pl_vm_address_t *result,
pl_vm_size_t *result_length)
{
kern_return_t kt;
/* Compute the total required page size. */
pl_vm_address_t base_addr = mach_vm_trunc_page(task_addr);
pl_vm_size_t total_size = mach_vm_round_page(length + (task_addr - base_addr));
/*
* If short mappings are permitted, determine the actual mappable size of the target range. Due
* to rdar://13707406 (update_dyld_shared_cache appears to write invalid LINKEDIT vmsize), an
* LC_SEGMENT-reported VM size may be far larger than the actual mapped pages. This would result
* in us making large (eg, 36MB) allocations in cases where the mappable range is actually much
* smaller, which can trigger out-of-memory conditions on smaller devices.
*/
if (!require_full) {
pl_vm_size_t verified_size = 0;
while (verified_size < total_size) {
memory_object_size_t entry_length = total_size - verified_size;
mach_port_t mem_handle;
/* Fetch an entry reference */
kt = mach_make_memory_entry_64(task, &entry_length, base_addr + verified_size, VM_PROT_READ, &mem_handle, MACH_PORT_NULL);
if (kt != KERN_SUCCESS) {
/* Once we hit an unmappable page, break */
break;
}
/* Drop the reference */
kt = mach_port_mod_refs(mach_task_self(), mem_handle, MACH_PORT_RIGHT_SEND, -1);
if (kt != KERN_SUCCESS) {
PLCF_DEBUG("mach_port_mod_refs(-1) failed: %d", kt);
}
/* Note the size */
verified_size += entry_length;
}
/* No valid page found at the task_addr */
if (verified_size == 0) {
PLCF_DEBUG("No mappable pages found at 0x%" PRIx64, (uint64_t) task_addr);
return PLCRASH_ENOMEM;
}
/* Reduce the total size to the verified size */
if (verified_size < total_size)
total_size = verified_size;
}
/*
* Set aside a memory range large enough for the total requested number of pages. Ideally the kernel
* will lazy-allocate the backing physical pages so that we don't waste actual memory on this
* pre-emptive page range reservation.
*/
pl_vm_address_t mapping_addr = 0x0;
pl_vm_size_t mapped_size = 0;
#ifdef PL_HAVE_MACH_VM
kt = mach_vm_allocate(mach_task_self(), &mapping_addr, total_size, VM_FLAGS_ANYWHERE);
#else
kt = vm_allocate(mach_task_self(), &mapping_addr, total_size, VM_FLAGS_ANYWHERE);
#endif
if (kt != KERN_SUCCESS) {
PLCF_DEBUG("Failed to allocate a target page range for the page remapping: %d", kt);
return PLCRASH_EINTERNAL;
}
/* Map the source pages into the allocated region, overwriting the existing page mappings */
while (mapped_size < total_size) {
/* Create a reference to the target pages. The returned entry may be smaller than the total length. */
memory_object_size_t entry_length = total_size - mapped_size;
mach_port_t mem_handle;
kt = mach_make_memory_entry_64(task, &entry_length, base_addr + mapped_size, VM_PROT_READ, &mem_handle, MACH_PORT_NULL);
if (kt != KERN_SUCCESS) {
/* No pages are found at the target. When validating the total length above, we already verified the
* availability of the requested pages; if they've now disappeared, we can treat it as an error,
* even if !require_full was specified */
PLCF_DEBUG("mach_make_memory_entry_64() failed: %d", kt);
/* Clean up the reserved pages */
kt = vm_deallocate(mach_task_self(), mapping_addr, total_size);
if (kt != KERN_SUCCESS) {
PLCF_DEBUG("vm_deallocate() failed: %d", kt);
}
/* Return error */
return PLCRASH_ENOMEM;
}
/* Map the pages into our local task, overwriting the allocation used to reserve the target space above. */
pl_vm_address_t target_address = mapping_addr + mapped_size;
#ifdef PL_HAVE_MACH_VM
kt = mach_vm_map(mach_task_self(), &target_address, entry_length, 0x0, VM_FLAGS_FIXED|VM_FLAGS_OVERWRITE, mem_handle, 0x0, TRUE, VM_PROT_READ, VM_PROT_READ, VM_INHERIT_COPY);
#else
kt = vm_map(mach_task_self(), &target_address, (vm_size_t) entry_length, 0x0, VM_FLAGS_FIXED|VM_FLAGS_OVERWRITE, mem_handle, 0x0, TRUE, VM_PROT_READ, VM_PROT_READ, VM_INHERIT_COPY);
#endif /* !PL_HAVE_MACH_VM */
if (kt != KERN_SUCCESS) {
PLCF_DEBUG("vm_map() failure: %d", kt);
/* Clean up the reserved pages */
kt = vm_deallocate(mach_task_self(), mapping_addr, total_size);
if (kt != KERN_SUCCESS) {
PLCF_DEBUG("vm_deallocate() failed: %d", kt);
}
/* Drop the memory handle */
kt = mach_port_mod_refs(mach_task_self(), mem_handle, MACH_PORT_RIGHT_SEND, -1);
if (kt != KERN_SUCCESS) {
PLCF_DEBUG("mach_port_mod_refs(-1) failed: %d", kt);
}
return PLCRASH_ENOMEM;
}
/* Drop the memory handle */
kt = mach_port_mod_refs(mach_task_self(), mem_handle, MACH_PORT_RIGHT_SEND, -1);
if (kt != KERN_SUCCESS) {
PLCF_DEBUG("mach_port_mod_refs(-1) failed: %d", kt);
}
/* Adjust the total mapping size */
mapped_size += entry_length;
}
*result = mapping_addr;
*result_length = mapped_size;
return PLCRASH_ESUCCESS;
}