unsigned long slice_get_unmapped_area()

in mm/slice.c [429:639]


unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
				      unsigned long flags, unsigned int psize,
				      int topdown)
{
	struct slice_mask good_mask;
	struct slice_mask potential_mask;
	const struct slice_mask *maskp;
	const struct slice_mask *compat_maskp = NULL;
	int fixed = (flags & MAP_FIXED);
	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
	unsigned long page_size = 1UL << pshift;
	struct mm_struct *mm = current->mm;
	unsigned long newaddr;
	unsigned long high_limit;

	high_limit = DEFAULT_MAP_WINDOW;
	if (addr >= high_limit || (fixed && (addr + len > high_limit)))
		high_limit = TASK_SIZE;

	if (len > high_limit)
		return -ENOMEM;
	if (len & (page_size - 1))
		return -EINVAL;
	if (fixed) {
		if (addr & (page_size - 1))
			return -EINVAL;
		if (addr > high_limit - len)
			return -ENOMEM;
	}

	if (high_limit > mm_ctx_slb_addr_limit(&mm->context)) {
		/*
		 * Increasing the slb_addr_limit does not require
		 * slice mask cache to be recalculated because it should
		 * be already initialised beyond the old address limit.
		 */
		mm_ctx_set_slb_addr_limit(&mm->context, high_limit);

		on_each_cpu(slice_flush_segments, mm, 1);
	}

	/* Sanity checks */
	BUG_ON(mm->task_size == 0);
	BUG_ON(mm_ctx_slb_addr_limit(&mm->context) == 0);
	VM_BUG_ON(radix_enabled());

	slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
	slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
		  addr, len, flags, topdown);

	/* If hint, make sure it matches our alignment restrictions */
	if (!fixed && addr) {
		addr = ALIGN(addr, page_size);
		slice_dbg(" aligned addr=%lx\n", addr);
		/* Ignore hint if it's too large or overlaps a VMA */
		if (addr > high_limit - len || addr < mmap_min_addr ||
		    !slice_area_is_free(mm, addr, len))
			addr = 0;
	}

	/* First make up a "good" mask of slices that have the right size
	 * already
	 */
	maskp = slice_mask_for_size(&mm->context, psize);

	/*
	 * Here "good" means slices that are already the right page size,
	 * "compat" means slices that have a compatible page size (i.e.
	 * 4k in a 64k pagesize kernel), and "free" means slices without
	 * any VMAs.
	 *
	 * If MAP_FIXED:
	 *	check if fits in good | compat => OK
	 *	check if fits in good | compat | free => convert free
	 *	else bad
	 * If have hint:
	 *	check if hint fits in good => OK
	 *	check if hint fits in good | free => convert free
	 * Otherwise:
	 *	search in good, found => OK
	 *	search in good | free, found => convert free
	 *	search in good | compat | free, found => convert free.
	 */

	/*
	 * If we support combo pages, we can allow 64k pages in 4k slices
	 * The mask copies could be avoided in most cases here if we had
	 * a pointer to good mask for the next code to use.
	 */
	if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
		compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
		if (fixed)
			slice_or_mask(&good_mask, maskp, compat_maskp);
		else
			slice_copy_mask(&good_mask, maskp);
	} else {
		slice_copy_mask(&good_mask, maskp);
	}

	slice_print_mask(" good_mask", &good_mask);
	if (compat_maskp)
		slice_print_mask(" compat_mask", compat_maskp);

	/* First check hint if it's valid or if we have MAP_FIXED */
	if (addr != 0 || fixed) {
		/* Check if we fit in the good mask. If we do, we just return,
		 * nothing else to do
		 */
		if (slice_check_range_fits(mm, &good_mask, addr, len)) {
			slice_dbg(" fits good !\n");
			newaddr = addr;
			goto return_addr;
		}
	} else {
		/* Now let's see if we can find something in the existing
		 * slices for that size
		 */
		newaddr = slice_find_area(mm, len, &good_mask,
					  psize, topdown, high_limit);
		if (newaddr != -ENOMEM) {
			/* Found within the good mask, we don't have to setup,
			 * we thus return directly
			 */
			slice_dbg(" found area at 0x%lx\n", newaddr);
			goto return_addr;
		}
	}
	/*
	 * We don't fit in the good mask, check what other slices are
	 * empty and thus can be converted
	 */
	slice_mask_for_free(mm, &potential_mask, high_limit);
	slice_or_mask(&potential_mask, &potential_mask, &good_mask);
	slice_print_mask(" potential", &potential_mask);

	if (addr != 0 || fixed) {
		if (slice_check_range_fits(mm, &potential_mask, addr, len)) {
			slice_dbg(" fits potential !\n");
			newaddr = addr;
			goto convert;
		}
	}

	/* If we have MAP_FIXED and failed the above steps, then error out */
	if (fixed)
		return -EBUSY;

	slice_dbg(" search...\n");

	/* If we had a hint that didn't work out, see if we can fit
	 * anywhere in the good area.
	 */
	if (addr) {
		newaddr = slice_find_area(mm, len, &good_mask,
					  psize, topdown, high_limit);
		if (newaddr != -ENOMEM) {
			slice_dbg(" found area at 0x%lx\n", newaddr);
			goto return_addr;
		}
	}

	/* Now let's see if we can find something in the existing slices
	 * for that size plus free slices
	 */
	newaddr = slice_find_area(mm, len, &potential_mask,
				  psize, topdown, high_limit);

	if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && newaddr == -ENOMEM &&
	    psize == MMU_PAGE_64K) {
		/* retry the search with 4k-page slices included */
		slice_or_mask(&potential_mask, &potential_mask, compat_maskp);
		newaddr = slice_find_area(mm, len, &potential_mask,
					  psize, topdown, high_limit);
	}

	if (newaddr == -ENOMEM)
		return -ENOMEM;

	slice_range_to_mask(newaddr, len, &potential_mask);
	slice_dbg(" found potential area at 0x%lx\n", newaddr);
	slice_print_mask(" mask", &potential_mask);

 convert:
	/*
	 * Try to allocate the context before we do slice convert
	 * so that we handle the context allocation failure gracefully.
	 */
	if (need_extra_context(mm, newaddr)) {
		if (alloc_extended_context(mm, newaddr) < 0)
			return -ENOMEM;
	}

	slice_andnot_mask(&potential_mask, &potential_mask, &good_mask);
	if (compat_maskp && !fixed)
		slice_andnot_mask(&potential_mask, &potential_mask, compat_maskp);
	if (potential_mask.low_slices ||
		(SLICE_NUM_HIGH &&
		 !bitmap_empty(potential_mask.high_slices, SLICE_NUM_HIGH))) {
		slice_convert(mm, &potential_mask, psize);
		if (psize > MMU_PAGE_BASE)
			on_each_cpu(slice_flush_segments, mm, 1);
	}
	return newaddr;

return_addr:
	if (need_extra_context(mm, newaddr)) {
		if (alloc_extended_context(mm, newaddr) < 0)
			return -ENOMEM;
	}
	return newaddr;
}