in host/pci.c [2338:2449]
static int nvme_setup_io_queues(struct nvme_dev *dev)
{
struct nvme_queue *adminq = &dev->queues[0];
struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned int nr_io_queues;
unsigned long size;
int result;
/*
* Sample the module parameters once at reset time so that we have
* stable values to work with.
*/
dev->nr_write_queues = write_queues;
dev->nr_poll_queues = poll_queues;
nr_io_queues = dev->nr_allocated_queues - 1;
result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
if (result < 0)
return result;
if (nr_io_queues == 0)
return 0;
/*
* Free IRQ resources as soon as NVMEQ_ENABLED bit transitions
* from set to unset. If there is a window to it is truely freed,
* pci_free_irq_vectors() jumping into this window will crash.
* And take lock to avoid racing with pci_free_irq_vectors() in
* nvme_dev_disable() path.
*/
result = nvme_setup_io_queues_trylock(dev);
if (result)
return result;
if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags))
pci_free_irq(pdev, 0, adminq);
if (dev->cmb_use_sqes) {
result = nvme_cmb_qdepth(dev, nr_io_queues,
sizeof(struct nvme_command));
if (result > 0)
dev->q_depth = result;
else
dev->cmb_use_sqes = false;
}
do {
size = db_bar_size(dev, nr_io_queues);
result = nvme_remap_bar(dev, size);
if (!result)
break;
if (!--nr_io_queues) {
result = -ENOMEM;
goto out_unlock;
}
} while (1);
adminq->q_db = dev->dbs;
retry:
/* Deregister the admin queue's interrupt */
if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags))
pci_free_irq(pdev, 0, adminq);
/*
* If we enable msix early due to not intx, disable it again before
* setting up the full range we need.
*/
pci_free_irq_vectors(pdev);
result = nvme_setup_irqs(dev, nr_io_queues);
if (result <= 0) {
result = -EIO;
goto out_unlock;
}
dev->num_vecs = result;
result = max(result - 1, 1);
dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL];
/*
* Should investigate if there's a performance win from allocating
* more queues than interrupt vectors; it might allow the submission
* path to scale better, even if the receive path is limited by the
* number of interrupts.
*/
result = queue_request_irq(adminq);
if (result)
goto out_unlock;
set_bit(NVMEQ_ENABLED, &adminq->flags);
mutex_unlock(&dev->shutdown_lock);
result = nvme_create_io_queues(dev);
if (result || dev->online_queues < 2)
return result;
if (dev->online_queues - 1 < dev->max_qid) {
nr_io_queues = dev->online_queues - 1;
nvme_disable_io_queues(dev);
result = nvme_setup_io_queues_trylock(dev);
if (result)
return result;
nvme_suspend_io_queues(dev);
goto retry;
}
dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n",
dev->io_queues[HCTX_TYPE_DEFAULT],
dev->io_queues[HCTX_TYPE_READ],
dev->io_queues[HCTX_TYPE_POLL]);
return 0;
out_unlock:
mutex_unlock(&dev->shutdown_lock);
return result;
}