static int vmci_guest_probe_device()

in vmw_vmci/vmci_guest.c [428:678]


static int vmci_guest_probe_device(struct pci_dev *pdev,
				   const struct pci_device_id *id)
{
	struct vmci_guest_device *vmci_dev;
	void __iomem *iobase;
	unsigned int capabilities;
	unsigned int caps_in_use;
	unsigned long cmd;
	int vmci_err;
	int error;

	dev_dbg(&pdev->dev, "Probing for vmci/PCI guest device\n");

	error = pcim_enable_device(pdev);
	if (error) {
		dev_err(&pdev->dev,
			"Failed to enable VMCI device: %d\n", error);
		return error;
	}

	error = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME);
	if (error) {
		dev_err(&pdev->dev, "Failed to reserve/map IO regions\n");
		return error;
	}

	iobase = pcim_iomap_table(pdev)[0];

	dev_info(&pdev->dev, "Found VMCI PCI device at %#lx, irq %u\n",
		 (unsigned long)iobase, pdev->irq);

	vmci_dev = devm_kzalloc(&pdev->dev, sizeof(*vmci_dev), GFP_KERNEL);
	if (!vmci_dev) {
		dev_err(&pdev->dev,
			"Can't allocate memory for VMCI device\n");
		return -ENOMEM;
	}

	vmci_dev->dev = &pdev->dev;
	vmci_dev->exclusive_vectors = false;
	vmci_dev->iobase = iobase;

	tasklet_init(&vmci_dev->datagram_tasklet,
		     vmci_dispatch_dgs, (unsigned long)vmci_dev);
	tasklet_init(&vmci_dev->bm_tasklet,
		     vmci_process_bitmap, (unsigned long)vmci_dev);

	vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE);
	if (!vmci_dev->data_buffer) {
		dev_err(&pdev->dev,
			"Can't allocate memory for datagram buffer\n");
		return -ENOMEM;
	}

	pci_set_master(pdev);	/* To enable queue_pair functionality. */

	/*
	 * Verify that the VMCI Device supports the capabilities that
	 * we need. If the device is missing capabilities that we would
	 * like to use, check for fallback capabilities and use those
	 * instead (so we can run a new VM on old hosts). Fail the load if
	 * a required capability is missing and there is no fallback.
	 *
	 * Right now, we need datagrams. There are no fallbacks.
	 */
	capabilities = ioread32(vmci_dev->iobase + VMCI_CAPS_ADDR);
	if (!(capabilities & VMCI_CAPS_DATAGRAM)) {
		dev_err(&pdev->dev, "Device does not support datagrams\n");
		error = -ENXIO;
		goto err_free_data_buffer;
	}
	caps_in_use = VMCI_CAPS_DATAGRAM;

	/*
	 * Use 64-bit PPNs if the device supports.
	 *
	 * There is no check for the return value of dma_set_mask_and_coherent
	 * since this driver can handle the default mask values if
	 * dma_set_mask_and_coherent fails.
	 */
	if (capabilities & VMCI_CAPS_PPN64) {
		dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
		use_ppn64 = true;
		caps_in_use |= VMCI_CAPS_PPN64;
	} else {
		dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
		use_ppn64 = false;
	}

	/*
	 * If the hardware supports notifications, we will use that as
	 * well.
	 */
	if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
		vmci_dev->notification_bitmap = dma_alloc_coherent(
			&pdev->dev, PAGE_SIZE, &vmci_dev->notification_base,
			GFP_KERNEL);
		if (!vmci_dev->notification_bitmap) {
			dev_warn(&pdev->dev,
				 "Unable to allocate notification bitmap\n");
		} else {
			memset(vmci_dev->notification_bitmap, 0, PAGE_SIZE);
			caps_in_use |= VMCI_CAPS_NOTIFICATIONS;
		}
	}

	dev_info(&pdev->dev, "Using capabilities 0x%x\n", caps_in_use);

	/* Let the host know which capabilities we intend to use. */
	iowrite32(caps_in_use, vmci_dev->iobase + VMCI_CAPS_ADDR);

	/* Set up global device so that we can start sending datagrams */
	spin_lock_irq(&vmci_dev_spinlock);
	vmci_dev_g = vmci_dev;
	vmci_pdev = pdev;
	spin_unlock_irq(&vmci_dev_spinlock);

	/*
	 * Register notification bitmap with device if that capability is
	 * used.
	 */
	if (caps_in_use & VMCI_CAPS_NOTIFICATIONS) {
		unsigned long bitmap_ppn =
			vmci_dev->notification_base >> PAGE_SHIFT;
		if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) {
			dev_warn(&pdev->dev,
				 "VMCI device unable to register notification bitmap with PPN 0x%lx\n",
				 bitmap_ppn);
			error = -ENXIO;
			goto err_remove_vmci_dev_g;
		}
	}

	/* Check host capabilities. */
	error = vmci_check_host_caps(pdev);
	if (error)
		goto err_remove_bitmap;

	/* Enable device. */

	/*
	 * We subscribe to the VMCI_EVENT_CTX_ID_UPDATE here so we can
	 * update the internal context id when needed.
	 */
	vmci_err = vmci_event_subscribe(VMCI_EVENT_CTX_ID_UPDATE,
					vmci_guest_cid_update, NULL,
					&ctx_update_sub_id);
	if (vmci_err < VMCI_SUCCESS)
		dev_warn(&pdev->dev,
			 "Failed to subscribe to event (type=%d): %d\n",
			 VMCI_EVENT_CTX_ID_UPDATE, vmci_err);

	/*
	 * Enable interrupts.  Try MSI-X first, then MSI, and then fallback on
	 * legacy interrupts.
	 */
	error = pci_alloc_irq_vectors(pdev, VMCI_MAX_INTRS, VMCI_MAX_INTRS,
			PCI_IRQ_MSIX);
	if (error < 0) {
		error = pci_alloc_irq_vectors(pdev, 1, 1,
				PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY);
		if (error < 0)
			goto err_remove_bitmap;
	} else {
		vmci_dev->exclusive_vectors = true;
	}

	/*
	 * Request IRQ for legacy or MSI interrupts, or for first
	 * MSI-X vector.
	 */
	error = request_irq(pci_irq_vector(pdev, 0), vmci_interrupt,
			    IRQF_SHARED, KBUILD_MODNAME, vmci_dev);
	if (error) {
		dev_err(&pdev->dev, "Irq %u in use: %d\n",
			pci_irq_vector(pdev, 0), error);
		goto err_disable_msi;
	}

	/*
	 * For MSI-X with exclusive vectors we need to request an
	 * interrupt for each vector so that we get a separate
	 * interrupt handler routine.  This allows us to distinguish
	 * between the vectors.
	 */
	if (vmci_dev->exclusive_vectors) {
		error = request_irq(pci_irq_vector(pdev, 1),
				    vmci_interrupt_bm, 0, KBUILD_MODNAME,
				    vmci_dev);
		if (error) {
			dev_err(&pdev->dev,
				"Failed to allocate irq %u: %d\n",
				pci_irq_vector(pdev, 1), error);
			goto err_free_irq;
		}
	}

	dev_dbg(&pdev->dev, "Registered device\n");

	atomic_inc(&vmci_num_guest_devices);

	/* Enable specific interrupt bits. */
	cmd = VMCI_IMR_DATAGRAM;
	if (caps_in_use & VMCI_CAPS_NOTIFICATIONS)
		cmd |= VMCI_IMR_NOTIFICATION;
	iowrite32(cmd, vmci_dev->iobase + VMCI_IMR_ADDR);

	/* Enable interrupts. */
	iowrite32(VMCI_CONTROL_INT_ENABLE,
		  vmci_dev->iobase + VMCI_CONTROL_ADDR);

	pci_set_drvdata(pdev, vmci_dev);

	vmci_call_vsock_callback(false);
	return 0;

err_free_irq:
	free_irq(pci_irq_vector(pdev, 0), vmci_dev);
	tasklet_kill(&vmci_dev->datagram_tasklet);
	tasklet_kill(&vmci_dev->bm_tasklet);

err_disable_msi:
	pci_free_irq_vectors(pdev);

	vmci_err = vmci_event_unsubscribe(ctx_update_sub_id);
	if (vmci_err < VMCI_SUCCESS)
		dev_warn(&pdev->dev,
			 "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n",
			 VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err);

err_remove_bitmap:
	if (vmci_dev->notification_bitmap) {
		iowrite32(VMCI_CONTROL_RESET,
			  vmci_dev->iobase + VMCI_CONTROL_ADDR);
		dma_free_coherent(&pdev->dev, PAGE_SIZE,
				  vmci_dev->notification_bitmap,
				  vmci_dev->notification_base);
	}

err_remove_vmci_dev_g:
	spin_lock_irq(&vmci_dev_spinlock);
	vmci_pdev = NULL;
	vmci_dev_g = NULL;
	spin_unlock_irq(&vmci_dev_spinlock);

err_free_data_buffer:
	vfree(vmci_dev->data_buffer);

	/* The rest are managed resources and will be freed by PCI core */
	return error;
}