void __ref arc_cache_init()

in mm/cache.c [1218:1248]


void __ref arc_cache_init(void)
{
	unsigned int __maybe_unused cpu = smp_processor_id();
	char str[256];

	pr_info("%s", arc_cache_mumbojumbo(0, str, sizeof(str)));

	if (!cpu)
		arc_cache_init_master();

	/*
	 * In PAE regime, TLB and cache maintenance ops take wider addresses
	 * And even if PAE is not enabled in kernel, the upper 32-bits still need
	 * to be zeroed to keep the ops sane.
	 * As an optimization for more common !PAE enabled case, zero them out
	 * once at init, rather than checking/setting to 0 for every runtime op
	 */
	if (is_isa_arcv2() && pae40_exist_but_not_enab()) {

		if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE))
			write_aux_reg(ARC_REG_IC_PTAG_HI, 0);

		if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE))
			write_aux_reg(ARC_REG_DC_PTAG_HI, 0);

		if (l2_line_sz) {
			write_aux_reg(ARC_REG_SLC_RGN_END1, 0);
			write_aux_reg(ARC_REG_SLC_RGN_START1, 0);
		}
	}
}