void __init arc_cache_init_master()

in mm/cache.c [1134:1216]


void __init arc_cache_init_master(void)
{
	unsigned int __maybe_unused cpu = smp_processor_id();

	if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
		struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;

		if (!ic->line_len)
			panic("cache support enabled but non-existent cache\n");

		if (ic->line_len != L1_CACHE_BYTES)
			panic("ICache line [%d] != kernel Config [%d]",
			      ic->line_len, L1_CACHE_BYTES);

		/*
		 * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
		 * pair to provide vaddr/paddr respectively, just as in MMU v3
		 */
		if (is_isa_arcv2() && ic->alias)
			_cache_line_loop_ic_fn = __cache_line_loop_v3;
		else
			_cache_line_loop_ic_fn = __cache_line_loop;
	}

	if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
		struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;

		if (!dc->line_len)
			panic("cache support enabled but non-existent cache\n");

		if (dc->line_len != L1_CACHE_BYTES)
			panic("DCache line [%d] != kernel Config [%d]",
			      dc->line_len, L1_CACHE_BYTES);

		/* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
		if (is_isa_arcompact()) {
			int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
			int num_colors = dc->sz_k/dc->assoc/TO_KB(PAGE_SIZE);

			if (dc->alias) {
				if (!handled)
					panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
				if (CACHE_COLORS_NUM != num_colors)
					panic("CACHE_COLORS_NUM not optimized for config\n");
			} else if (!dc->alias && handled) {
				panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
			}
		}
	}

	/*
	 * Check that SMP_CACHE_BYTES (and hence ARCH_DMA_MINALIGN) is larger
	 * or equal to any cache line length.
	 */
	BUILD_BUG_ON_MSG(L1_CACHE_BYTES > SMP_CACHE_BYTES,
			 "SMP_CACHE_BYTES must be >= any cache line length");
	if (is_isa_arcv2() && (l2_line_sz > SMP_CACHE_BYTES))
		panic("L2 Cache line [%d] > kernel Config [%d]\n",
		      l2_line_sz, SMP_CACHE_BYTES);

	/* Note that SLC disable not formally supported till HS 3.0 */
	if (is_isa_arcv2() && l2_line_sz && !slc_enable)
		arc_slc_disable();

	if (is_isa_arcv2() && ioc_exists)
		arc_ioc_setup();

	if (is_isa_arcv2() && l2_line_sz && slc_enable) {
		__dma_cache_wback_inv = __dma_cache_wback_inv_slc;
		__dma_cache_inv = __dma_cache_inv_slc;
		__dma_cache_wback = __dma_cache_wback_slc;
	} else {
		__dma_cache_wback_inv = __dma_cache_wback_inv_l1;
		__dma_cache_inv = __dma_cache_inv_l1;
		__dma_cache_wback = __dma_cache_wback_l1;
	}
	/*
	 * In case of IOC (say IOC+SLC case), pointers above could still be set
	 * but end up not being relevant as the first function in chain is not
	 * called at all for devices using coherent DMA.
	 *     arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*()
	 */
}