in osfmk/arm/pmap.c [1827:2344]
static pv_alloc_return_t pv_alloc(
pmap_t pmap,
unsigned int pai,
pv_entry_t **pvepp);
static void ptd_bootstrap(
pt_desc_t *ptdp, unsigned int ptd_cnt);
static inline pt_desc_t *ptd_alloc_unlinked(void);
static pt_desc_t *ptd_alloc(pmap_t pmap);
static void ptd_deallocate(pt_desc_t *ptdp);
static void ptd_init(
pt_desc_t *ptdp, pmap_t pmap, vm_map_address_t va, unsigned int ttlevel, pt_entry_t * pte_p);
static void pmap_set_reference(
ppnum_t pn);
pmap_paddr_t pmap_vtophys(
pmap_t pmap, addr64_t va);
void pmap_switch_user_ttb(
pmap_t pmap);
static kern_return_t pmap_expand(
pmap_t, vm_map_address_t, unsigned int options, unsigned int level);
static int pmap_remove_range(
pmap_t, vm_map_address_t, pt_entry_t *, pt_entry_t *);
static int pmap_remove_range_options(
pmap_t, vm_map_address_t, pt_entry_t *, pt_entry_t *, vm_map_address_t *, bool *, int);
static tt_entry_t *pmap_tt1_allocate(
pmap_t, vm_size_t, unsigned int);
#define PMAP_TT_ALLOCATE_NOWAIT 0x1
static void pmap_tt1_deallocate(
pmap_t, tt_entry_t *, vm_size_t, unsigned int);
#define PMAP_TT_DEALLOCATE_NOBLOCK 0x1
static kern_return_t pmap_tt_allocate(
pmap_t, tt_entry_t **, unsigned int, unsigned int);
#define PMAP_TT_ALLOCATE_NOWAIT 0x1
static void pmap_tte_deallocate(
pmap_t, vm_offset_t, vm_offset_t, bool, tt_entry_t *, unsigned int);
const unsigned int arm_hardware_page_size = ARM_PGBYTES;
const unsigned int arm_pt_desc_size = sizeof(pt_desc_t);
const unsigned int arm_pt_root_size = PMAP_ROOT_ALLOC_SIZE;
#define PMAP_TT_DEALLOCATE_NOBLOCK 0x1
#if (__ARM_VMSA__ > 7)
static inline tt_entry_t *pmap_tt1e(
pmap_t, vm_map_address_t);
static inline tt_entry_t *pmap_tt2e(
pmap_t, vm_map_address_t);
static inline pt_entry_t *pmap_tt3e(
pmap_t, vm_map_address_t);
static inline pt_entry_t *pmap_ttne(
pmap_t, unsigned int, vm_map_address_t);
static void pmap_unmap_sharedpage(
pmap_t pmap);
static boolean_t
pmap_is_64bit(pmap_t);
#endif /* (__ARM_VMSA__ > 7) */
static inline tt_entry_t *pmap_tte(
pmap_t, vm_map_address_t);
static inline pt_entry_t *pmap_pte(
pmap_t, vm_map_address_t);
static void pmap_update_cache_attributes_locked(
ppnum_t, unsigned);
static boolean_t arm_clear_fast_fault(
ppnum_t ppnum,
vm_prot_t fault_type);
static pmap_paddr_t pmap_pages_reclaim(
void);
static kern_return_t pmap_pages_alloc_zeroed(
pmap_paddr_t *pa,
unsigned size,
unsigned option);
#define PMAP_PAGES_ALLOCATE_NOWAIT 0x1
#define PMAP_PAGES_RECLAIM_NOWAIT 0x2
static void pmap_pages_free(
pmap_paddr_t pa,
unsigned size);
static void pmap_pin_kernel_pages(vm_offset_t kva, size_t nbytes);
static void pmap_unpin_kernel_pages(vm_offset_t kva, size_t nbytes);
static void pmap_trim_self(pmap_t pmap);
static void pmap_trim_subord(pmap_t subord);
/*
* Temporary prototypes, while we wait for pmap_enter to move to taking an
* address instead of a page number.
*/
static kern_return_t
pmap_enter_addr(
pmap_t pmap,
vm_map_address_t v,
pmap_paddr_t pa,
vm_prot_t prot,
vm_prot_t fault_type,
unsigned int flags,
boolean_t wired);
kern_return_t
pmap_enter_options_addr(
pmap_t pmap,
vm_map_address_t v,
pmap_paddr_t pa,
vm_prot_t prot,
vm_prot_t fault_type,
unsigned int flags,
boolean_t wired,
unsigned int options,
__unused void *arg);
#ifdef CONFIG_XNUPOST
kern_return_t pmap_test(void);
#endif /* CONFIG_XNUPOST */
#if XNU_MONITOR
static pmap_paddr_t pmap_alloc_page_for_kern(unsigned int options);
static void pmap_alloc_page_for_ppl(unsigned int options);
/*
* This macro generates prototypes for the *_internal functions, which
* represent the PPL interface. When the PPL is enabled, this will also
* generate prototypes for the PPL entrypoints (*_ppl), as well as generating
* the entrypoints.
*/
#define GEN_ASM_NAME(__function_name) _##__function_name##_ppl
#define PMAP_SUPPORT_PROTOTYPES_WITH_ASM_INTERNAL(__return_type, __function_name, __function_args, __function_index, __assembly_function_name) \
static __return_type __function_name##_internal __function_args; \
extern __return_type __function_name##_ppl __function_args; \
__asm__ (".text \n" \
".align 2 \n" \
".globl " #__assembly_function_name "\n" \
#__assembly_function_name ":\n" \
"mov x15, " #__function_index "\n" \
"b _aprr_ppl_enter\n")
#define PMAP_SUPPORT_PROTOTYPES_WITH_ASM(__return_type, __function_name, __function_args, __function_index, __assembly_function_name) \
PMAP_SUPPORT_PROTOTYPES_WITH_ASM_INTERNAL(__return_type, __function_name, __function_args, __function_index, __assembly_function_name)
#define PMAP_SUPPORT_PROTOTYPES(__return_type, __function_name, __function_args, __function_index) \
PMAP_SUPPORT_PROTOTYPES_WITH_ASM(__return_type, __function_name, __function_args, __function_index, GEN_ASM_NAME(__function_name))
#else /* XNU_MONITOR */
#define PMAP_SUPPORT_PROTOTYPES(__return_type, __function_name, __function_args, __function_index) \
static __return_type __function_name##_internal __function_args
#endif /* XNU_MONITOR */
PMAP_SUPPORT_PROTOTYPES(
kern_return_t,
arm_fast_fault, (pmap_t pmap,
vm_map_address_t va,
vm_prot_t fault_type,
bool was_af_fault,
bool from_user), ARM_FAST_FAULT_INDEX);
PMAP_SUPPORT_PROTOTYPES(
boolean_t,
arm_force_fast_fault, (ppnum_t ppnum,
vm_prot_t allow_mode,
int options), ARM_FORCE_FAST_FAULT_INDEX);
MARK_AS_PMAP_TEXT static boolean_t
arm_force_fast_fault_with_flush_range(
ppnum_t ppnum,
vm_prot_t allow_mode,
int options,
pmap_tlb_flush_range_t *flush_range);
PMAP_SUPPORT_PROTOTYPES(
kern_return_t,
mapping_free_prime, (void), MAPPING_FREE_PRIME_INDEX);
PMAP_SUPPORT_PROTOTYPES(
boolean_t,
pmap_batch_set_cache_attributes, (ppnum_t pn,
unsigned int cacheattr,
unsigned int page_cnt,
unsigned int page_index,
boolean_t doit,
unsigned int *res), PMAP_BATCH_SET_CACHE_ATTRIBUTES_INDEX);
PMAP_SUPPORT_PROTOTYPES(
void,
pmap_change_wiring, (pmap_t pmap,
vm_map_address_t v,
boolean_t wired), PMAP_CHANGE_WIRING_INDEX);
PMAP_SUPPORT_PROTOTYPES(
pmap_t,
pmap_create_options, (ledger_t ledger,
vm_map_size_t size,
unsigned int flags,
kern_return_t * kr), PMAP_CREATE_INDEX);
PMAP_SUPPORT_PROTOTYPES(
void,
pmap_destroy, (pmap_t pmap), PMAP_DESTROY_INDEX);
PMAP_SUPPORT_PROTOTYPES(
kern_return_t,
pmap_enter_options, (pmap_t pmap,
vm_map_address_t v,
pmap_paddr_t pa,
vm_prot_t prot,
vm_prot_t fault_type,
unsigned int flags,
boolean_t wired,
unsigned int options), PMAP_ENTER_OPTIONS_INDEX);
PMAP_SUPPORT_PROTOTYPES(
pmap_paddr_t,
pmap_find_pa, (pmap_t pmap,
addr64_t va), PMAP_FIND_PA_INDEX);
#if (__ARM_VMSA__ > 7)
PMAP_SUPPORT_PROTOTYPES(
kern_return_t,
pmap_insert_sharedpage, (pmap_t pmap), PMAP_INSERT_SHAREDPAGE_INDEX);
#endif
PMAP_SUPPORT_PROTOTYPES(
boolean_t,
pmap_is_empty, (pmap_t pmap,
vm_map_offset_t va_start,
vm_map_offset_t va_end), PMAP_IS_EMPTY_INDEX);
PMAP_SUPPORT_PROTOTYPES(
unsigned int,
pmap_map_cpu_windows_copy, (ppnum_t pn,
vm_prot_t prot,
unsigned int wimg_bits), PMAP_MAP_CPU_WINDOWS_COPY_INDEX);
PMAP_SUPPORT_PROTOTYPES(
kern_return_t,
pmap_nest, (pmap_t grand,
pmap_t subord,
addr64_t vstart,
uint64_t size), PMAP_NEST_INDEX);
PMAP_SUPPORT_PROTOTYPES(
void,
pmap_page_protect_options, (ppnum_t ppnum,
vm_prot_t prot,
unsigned int options,
void *arg), PMAP_PAGE_PROTECT_OPTIONS_INDEX);
PMAP_SUPPORT_PROTOTYPES(
vm_map_address_t,
pmap_protect_options, (pmap_t pmap,
vm_map_address_t start,
vm_map_address_t end,
vm_prot_t prot,
unsigned int options,
void *args), PMAP_PROTECT_OPTIONS_INDEX);
PMAP_SUPPORT_PROTOTYPES(
kern_return_t,
pmap_query_page_info, (pmap_t pmap,
vm_map_offset_t va,
int *disp_p), PMAP_QUERY_PAGE_INFO_INDEX);
PMAP_SUPPORT_PROTOTYPES(
mach_vm_size_t,
pmap_query_resident, (pmap_t pmap,
vm_map_address_t start,
vm_map_address_t end,
mach_vm_size_t * compressed_bytes_p), PMAP_QUERY_RESIDENT_INDEX);
PMAP_SUPPORT_PROTOTYPES(
void,
pmap_reference, (pmap_t pmap), PMAP_REFERENCE_INDEX);
PMAP_SUPPORT_PROTOTYPES(
vm_map_address_t,
pmap_remove_options, (pmap_t pmap,
vm_map_address_t start,
vm_map_address_t end,
int options), PMAP_REMOVE_OPTIONS_INDEX);
PMAP_SUPPORT_PROTOTYPES(
kern_return_t,
pmap_return, (boolean_t do_panic,
boolean_t do_recurse), PMAP_RETURN_INDEX);
PMAP_SUPPORT_PROTOTYPES(
void,
pmap_set_cache_attributes, (ppnum_t pn,
unsigned int cacheattr), PMAP_SET_CACHE_ATTRIBUTES_INDEX);
PMAP_SUPPORT_PROTOTYPES(
void,
pmap_update_compressor_page, (ppnum_t pn,
unsigned int prev_cacheattr, unsigned int new_cacheattr), PMAP_UPDATE_COMPRESSOR_PAGE_INDEX);
PMAP_SUPPORT_PROTOTYPES(
void,
pmap_set_nested, (pmap_t pmap), PMAP_SET_NESTED_INDEX);
#if MACH_ASSERT || XNU_MONITOR
PMAP_SUPPORT_PROTOTYPES(
void,
pmap_set_process, (pmap_t pmap,
int pid,
char *procname), PMAP_SET_PROCESS_INDEX);
#endif
PMAP_SUPPORT_PROTOTYPES(
void,
pmap_unmap_cpu_windows_copy, (unsigned int index), PMAP_UNMAP_CPU_WINDOWS_COPY_INDEX);
PMAP_SUPPORT_PROTOTYPES(
kern_return_t,
pmap_unnest_options, (pmap_t grand,
addr64_t vaddr,
uint64_t size,
unsigned int option), PMAP_UNNEST_OPTIONS_INDEX);
#if XNU_MONITOR
PMAP_SUPPORT_PROTOTYPES(
void,
pmap_cpu_data_init, (unsigned int cpu_number), PMAP_CPU_DATA_INIT_INDEX);
#endif
PMAP_SUPPORT_PROTOTYPES(
void,
phys_attribute_set, (ppnum_t pn,
unsigned int bits), PHYS_ATTRIBUTE_SET_INDEX);
#if XNU_MONITOR
PMAP_SUPPORT_PROTOTYPES(
void,
pmap_mark_page_as_ppl_page, (pmap_paddr_t pa, bool initially_free), PMAP_MARK_PAGE_AS_PMAP_PAGE_INDEX);
#endif
PMAP_SUPPORT_PROTOTYPES(
void,
phys_attribute_clear, (ppnum_t pn,
unsigned int bits,
int options,
void *arg), PHYS_ATTRIBUTE_CLEAR_INDEX);
#if __ARM_RANGE_TLBI__
PMAP_SUPPORT_PROTOTYPES(
vm_map_address_t,
phys_attribute_clear_range, (pmap_t pmap,
vm_map_address_t start,
vm_map_address_t end,
unsigned int bits,
unsigned int options), PHYS_ATTRIBUTE_CLEAR_RANGE_INDEX);
#endif /* __ARM_RANGE_TLBI__ */
PMAP_SUPPORT_PROTOTYPES(
void,
pmap_switch, (pmap_t pmap), PMAP_SWITCH_INDEX);
PMAP_SUPPORT_PROTOTYPES(
void,
pmap_switch_user_ttb, (pmap_t pmap), PMAP_SWITCH_USER_TTB_INDEX);
PMAP_SUPPORT_PROTOTYPES(
void,
pmap_clear_user_ttb, (void), PMAP_CLEAR_USER_TTB_INDEX);
#if XNU_MONITOR
PMAP_SUPPORT_PROTOTYPES(
uint64_t,
pmap_release_ppl_pages_to_kernel, (void), PMAP_RELEASE_PAGES_TO_KERNEL_INDEX);
#endif
PMAP_SUPPORT_PROTOTYPES(
void,
pmap_set_vm_map_cs_enforced, (pmap_t pmap, bool new_value), PMAP_SET_VM_MAP_CS_ENFORCED_INDEX);
PMAP_SUPPORT_PROTOTYPES(
void,
pmap_set_jit_entitled, (pmap_t pmap), PMAP_SET_JIT_ENTITLED_INDEX);
#if __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX)
PMAP_SUPPORT_PROTOTYPES(
void,
pmap_disable_user_jop, (pmap_t pmap), PMAP_DISABLE_USER_JOP_INDEX);
#endif /* __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) */
PMAP_SUPPORT_PROTOTYPES(
void,
pmap_trim, (pmap_t grand,
pmap_t subord,
addr64_t vstart,
uint64_t size), PMAP_TRIM_INDEX);
#if HAS_APPLE_PAC
PMAP_SUPPORT_PROTOTYPES(
void *,
pmap_sign_user_ptr, (void *value, ptrauth_key key, uint64_t discriminator, uint64_t jop_key), PMAP_SIGN_USER_PTR);
PMAP_SUPPORT_PROTOTYPES(
void *,
pmap_auth_user_ptr, (void *value, ptrauth_key key, uint64_t discriminator, uint64_t jop_key), PMAP_AUTH_USER_PTR);
#endif /* HAS_APPLE_PAC */
PMAP_SUPPORT_PROTOTYPES(
bool,
pmap_is_trust_cache_loaded, (const uuid_t uuid), PMAP_IS_TRUST_CACHE_LOADED_INDEX);
PMAP_SUPPORT_PROTOTYPES(
uint32_t,
pmap_lookup_in_static_trust_cache, (const uint8_t cdhash[CS_CDHASH_LEN]), PMAP_LOOKUP_IN_STATIC_TRUST_CACHE_INDEX);
PMAP_SUPPORT_PROTOTYPES(
bool,
pmap_lookup_in_loaded_trust_caches, (const uint8_t cdhash[CS_CDHASH_LEN]), PMAP_LOOKUP_IN_LOADED_TRUST_CACHES_INDEX);
PMAP_SUPPORT_PROTOTYPES(
void,
pmap_set_compilation_service_cdhash, (const uint8_t cdhash[CS_CDHASH_LEN]),
PMAP_SET_COMPILATION_SERVICE_CDHASH_INDEX);
PMAP_SUPPORT_PROTOTYPES(
bool,
pmap_match_compilation_service_cdhash, (const uint8_t cdhash[CS_CDHASH_LEN]),
PMAP_MATCH_COMPILATION_SERVICE_CDHASH_INDEX);
#if XNU_MONITOR
static void pmap_mark_page_as_ppl_page(pmap_paddr_t pa);
#endif
void pmap_footprint_suspend(vm_map_t map,
boolean_t suspend);
PMAP_SUPPORT_PROTOTYPES(
void,
pmap_footprint_suspend, (vm_map_t map,
boolean_t suspend),
PMAP_FOOTPRINT_SUSPEND_INDEX);
#if XNU_MONITOR
PMAP_SUPPORT_PROTOTYPES(
void,
pmap_ledger_alloc_init, (size_t),
PMAP_LEDGER_ALLOC_INIT_INDEX);
PMAP_SUPPORT_PROTOTYPES(
ledger_t,
pmap_ledger_alloc, (void),
PMAP_LEDGER_ALLOC_INDEX);
PMAP_SUPPORT_PROTOTYPES(
void,
pmap_ledger_free, (ledger_t),
PMAP_LEDGER_FREE_INDEX);
#endif
#if CONFIG_PGTRACE
boolean_t pgtrace_enabled = 0;
typedef struct {
queue_chain_t chain;
/*
* pmap - pmap for below addresses
* ova - original va page address
* cva - clone va addresses for pre, target and post pages
* cva_spte - clone saved ptes
* range - trace range in this map
* cloned - has been cloned or not
*/
pmap_t pmap;
vm_map_offset_t ova;
vm_map_offset_t cva[3];
pt_entry_t cva_spte[3];
struct {
pmap_paddr_t start;
pmap_paddr_t end;
} range;
bool cloned;
} pmap_pgtrace_map_t;