noinline void slc_op_rgn()

in mm/cache.c [584:645]


noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
{
#ifdef CONFIG_ISA_ARCV2
	/*
	 * SLC is shared between all cores and concurrent aux operations from
	 * multiple cores need to be serialized using a spinlock
	 * A concurrent operation can be silently ignored and/or the old/new
	 * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
	 * below)
	 */
	static DEFINE_SPINLOCK(lock);
	unsigned long flags;
	unsigned int ctrl;
	phys_addr_t end;

	spin_lock_irqsave(&lock, flags);

	/*
	 * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
	 *  - b'000 (default) is Flush,
	 *  - b'001 is Invalidate if CTRL.IM == 0
	 *  - b'001 is Flush-n-Invalidate if CTRL.IM == 1
	 */
	ctrl = read_aux_reg(ARC_REG_SLC_CTRL);

	/* Don't rely on default value of IM bit */
	if (!(op & OP_FLUSH))		/* i.e. OP_INV */
		ctrl &= ~SLC_CTRL_IM;	/* clear IM: Disable flush before Inv */
	else
		ctrl |= SLC_CTRL_IM;

	if (op & OP_INV)
		ctrl |= SLC_CTRL_RGN_OP_INV;	/* Inv or flush-n-inv */
	else
		ctrl &= ~SLC_CTRL_RGN_OP_INV;

	write_aux_reg(ARC_REG_SLC_CTRL, ctrl);

	/*
	 * Lower bits are ignored, no need to clip
	 * END needs to be setup before START (latter triggers the operation)
	 * END can't be same as START, so add (l2_line_sz - 1) to sz
	 */
	end = paddr + sz + l2_line_sz - 1;
	if (is_pae40_enabled())
		write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end));

	write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end));

	if (is_pae40_enabled())
		write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));

	write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));

	/* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
	read_aux_reg(ARC_REG_SLC_CTRL);

	while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);

	spin_unlock_irqrestore(&lock, flags);
#endif
}