static u64 new_context()

in mm/asid.c [79:128]


static u64 new_context(struct asid_info *info, atomic64_t *pasid,
		       struct mm_struct *mm)
{
	static u32 cur_idx = 1;
	u64 asid = atomic64_read(pasid);
	u64 generation = atomic64_read(&info->generation);

	if (asid != 0) {
		u64 newasid = generation | (asid & ~ASID_MASK(info));

		/*
		 * If our current ASID was active during a rollover, we
		 * can continue to use it and this was just a false alarm.
		 */
		if (check_update_reserved_asid(info, asid, newasid))
			return newasid;

		/*
		 * We had a valid ASID in a previous life, so try to re-use
		 * it if possible.
		 */
		if (!__test_and_set_bit(asid2idx(info, asid), info->map))
			return newasid;
	}

	/*
	 * Allocate a free ASID. If we can't find one, take a note of the
	 * currently active ASIDs and mark the TLBs as requiring flushes.  We
	 * always count from ASID #2 (index 1), as we use ASID #0 when setting
	 * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
	 * pairs.
	 */
	asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), cur_idx);
	if (asid != NUM_CTXT_ASIDS(info))
		goto set_asid;

	/* We're out of ASIDs, so increment the global generation count */
	generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION(info),
						 &info->generation);
	flush_context(info);

	/* We have more ASIDs than CPUs, so this will always succeed */
	asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), 1);

set_asid:
	__set_bit(asid, info->map);
	cur_idx = asid;
	cpumask_clear(mm_cpumask(mm));
	return idx2asid(info, asid) | generation;
}