static int virtio_mem_bbm_offline_remove_and_unplug_bb()

in virtio_mem.c [2102:2169]


static int virtio_mem_bbm_offline_remove_and_unplug_bb(struct virtio_mem *vm,
						       unsigned long bb_id)
{
	const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id));
	const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size);
	unsigned long end_pfn = start_pfn + nr_pages;
	unsigned long pfn;
	struct page *page;
	int rc;

	if (WARN_ON_ONCE(virtio_mem_bbm_get_bb_state(vm, bb_id) !=
			 VIRTIO_MEM_BBM_BB_ADDED))
		return -EINVAL;

	if (bbm_safe_unplug) {
		/*
		 * Start by fake-offlining all memory. Once we marked the device
		 * block as fake-offline, all newly onlined memory will
		 * automatically be kept fake-offline. Protect from concurrent
		 * onlining/offlining until we have a consistent state.
		 */
		mutex_lock(&vm->hotplug_mutex);
		virtio_mem_bbm_set_bb_state(vm, bb_id,
					    VIRTIO_MEM_BBM_BB_FAKE_OFFLINE);

		for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
			page = pfn_to_online_page(pfn);
			if (!page)
				continue;

			rc = virtio_mem_fake_offline(pfn, PAGES_PER_SECTION);
			if (rc) {
				end_pfn = pfn;
				goto rollback_safe_unplug;
			}
		}
		mutex_unlock(&vm->hotplug_mutex);
	}

	rc = virtio_mem_bbm_offline_and_remove_bb(vm, bb_id);
	if (rc) {
		if (bbm_safe_unplug) {
			mutex_lock(&vm->hotplug_mutex);
			goto rollback_safe_unplug;
		}
		return rc;
	}

	rc = virtio_mem_bbm_unplug_bb(vm, bb_id);
	if (rc)
		virtio_mem_bbm_set_bb_state(vm, bb_id,
					    VIRTIO_MEM_BBM_BB_PLUGGED);
	else
		virtio_mem_bbm_set_bb_state(vm, bb_id,
					    VIRTIO_MEM_BBM_BB_UNUSED);
	return rc;

rollback_safe_unplug:
	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
		page = pfn_to_online_page(pfn);
		if (!page)
			continue;
		virtio_mem_fake_online(pfn, PAGES_PER_SECTION);
	}
	virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED);
	mutex_unlock(&vm->hotplug_mutex);
	return rc;
}