static int vfio_pci_dev_set_hot_reset()

in pci/vfio_pci_core.c [2007:2074]


static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
				      struct vfio_pci_group_info *groups)
{
	struct vfio_pci_core_device *cur_mem;
	struct vfio_pci_core_device *cur_vma;
	struct vfio_pci_core_device *cur;
	struct pci_dev *pdev;
	bool is_mem = true;
	int ret;

	mutex_lock(&dev_set->lock);
	cur_mem = list_first_entry(&dev_set->device_list,
				   struct vfio_pci_core_device,
				   vdev.dev_set_list);

	pdev = vfio_pci_dev_set_resettable(dev_set);
	if (!pdev) {
		ret = -EINVAL;
		goto err_unlock;
	}

	list_for_each_entry(cur_vma, &dev_set->device_list, vdev.dev_set_list) {
		/*
		 * Test whether all the affected devices are contained by the
		 * set of groups provided by the user.
		 */
		if (!vfio_dev_in_groups(cur_vma, groups)) {
			ret = -EINVAL;
			goto err_undo;
		}

		/*
		 * Locking multiple devices is prone to deadlock, runaway and
		 * unwind if we hit contention.
		 */
		if (!vfio_pci_zap_and_vma_lock(cur_vma, true)) {
			ret = -EBUSY;
			goto err_undo;
		}
	}
	cur_vma = NULL;

	list_for_each_entry(cur_mem, &dev_set->device_list, vdev.dev_set_list) {
		if (!down_write_trylock(&cur_mem->memory_lock)) {
			ret = -EBUSY;
			goto err_undo;
		}
		mutex_unlock(&cur_mem->vma_lock);
	}
	cur_mem = NULL;

	ret = pci_reset_bus(pdev);

err_undo:
	list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
		if (cur == cur_mem)
			is_mem = false;
		if (cur == cur_vma)
			break;
		if (is_mem)
			up_write(&cur->memory_lock);
		else
			mutex_unlock(&cur->vma_lock);
	}
err_unlock:
	mutex_unlock(&dev_set->lock);
	return ret;
}