in ipr.c [10171:10443]
static int ipr_probe_ioa(struct pci_dev *pdev,
const struct pci_device_id *dev_id)
{
struct ipr_ioa_cfg *ioa_cfg;
struct Scsi_Host *host;
unsigned long ipr_regs_pci;
void __iomem *ipr_regs;
int rc = PCIBIOS_SUCCESSFUL;
volatile u32 mask, uproc, interrupts;
unsigned long lock_flags, driver_lock_flags;
unsigned int irq_flag;
ENTER;
dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
if (!host) {
dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
rc = -ENOMEM;
goto out;
}
ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
if (!ioa_cfg->ipr_chip) {
dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
dev_id->vendor, dev_id->device);
goto out_scsi_host_put;
}
/* set SIS 32 or SIS 64 */
ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
if (ipr_transop_timeout)
ioa_cfg->transop_timeout = ipr_transop_timeout;
else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
else
ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
ioa_cfg->revid = pdev->revision;
ipr_init_ioa_cfg(ioa_cfg, host, pdev);
ipr_regs_pci = pci_resource_start(pdev, 0);
rc = pci_request_regions(pdev, IPR_NAME);
if (rc < 0) {
dev_err(&pdev->dev,
"Couldn't register memory range of registers\n");
goto out_scsi_host_put;
}
rc = pci_enable_device(pdev);
if (rc || pci_channel_offline(pdev)) {
if (pci_channel_offline(pdev)) {
ipr_wait_for_pci_err_recovery(ioa_cfg);
rc = pci_enable_device(pdev);
}
if (rc) {
dev_err(&pdev->dev, "Cannot enable adapter\n");
ipr_wait_for_pci_err_recovery(ioa_cfg);
goto out_release_regions;
}
}
ipr_regs = pci_ioremap_bar(pdev, 0);
if (!ipr_regs) {
dev_err(&pdev->dev,
"Couldn't map memory range of registers\n");
rc = -ENOMEM;
goto out_disable;
}
ioa_cfg->hdw_dma_regs = ipr_regs;
ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
ipr_init_regs(ioa_cfg);
if (ioa_cfg->sis64) {
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc < 0) {
dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
rc = dma_set_mask_and_coherent(&pdev->dev,
DMA_BIT_MASK(32));
}
} else
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc < 0) {
dev_err(&pdev->dev, "Failed to set DMA mask\n");
goto cleanup_nomem;
}
rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
ioa_cfg->chip_cfg->cache_line_size);
if (rc != PCIBIOS_SUCCESSFUL) {
dev_err(&pdev->dev, "Write of cache line size failed\n");
ipr_wait_for_pci_err_recovery(ioa_cfg);
rc = -EIO;
goto cleanup_nomem;
}
/* Issue MMIO read to ensure card is not in EEH */
interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
ipr_wait_for_pci_err_recovery(ioa_cfg);
if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
dev_err(&pdev->dev, "The max number of MSIX is %d\n",
IPR_MAX_MSIX_VECTORS);
ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
}
irq_flag = PCI_IRQ_LEGACY;
if (ioa_cfg->ipr_chip->has_msi)
irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
if (rc < 0) {
ipr_wait_for_pci_err_recovery(ioa_cfg);
goto cleanup_nomem;
}
ioa_cfg->nvectors = rc;
if (!pdev->msi_enabled && !pdev->msix_enabled)
ioa_cfg->clear_isr = 1;
pci_set_master(pdev);
if (pci_channel_offline(pdev)) {
ipr_wait_for_pci_err_recovery(ioa_cfg);
pci_set_master(pdev);
if (pci_channel_offline(pdev)) {
rc = -EIO;
goto out_msi_disable;
}
}
if (pdev->msi_enabled || pdev->msix_enabled) {
rc = ipr_test_msi(ioa_cfg, pdev);
switch (rc) {
case 0:
dev_info(&pdev->dev,
"Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
pdev->msix_enabled ? "-X" : "");
break;
case -EOPNOTSUPP:
ipr_wait_for_pci_err_recovery(ioa_cfg);
pci_free_irq_vectors(pdev);
ioa_cfg->nvectors = 1;
ioa_cfg->clear_isr = 1;
break;
default:
goto out_msi_disable;
}
}
ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
(unsigned int)num_online_cpus(),
(unsigned int)IPR_MAX_HRRQ_NUM);
if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
goto out_msi_disable;
if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
goto out_msi_disable;
rc = ipr_alloc_mem(ioa_cfg);
if (rc < 0) {
dev_err(&pdev->dev,
"Couldn't allocate enough memory for device driver!\n");
goto out_msi_disable;
}
/* Save away PCI config space for use following IOA reset */
rc = pci_save_state(pdev);
if (rc != PCIBIOS_SUCCESSFUL) {
dev_err(&pdev->dev, "Failed to save PCI config space\n");
rc = -EIO;
goto cleanup_nolog;
}
/*
* If HRRQ updated interrupt is not masked, or reset alert is set,
* the card is in an unknown state and needs a hard reset
*/
mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
ioa_cfg->needs_hard_reset = 1;
if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
ioa_cfg->needs_hard_reset = 1;
if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
ioa_cfg->ioa_unit_checked = 1;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
if (pdev->msi_enabled || pdev->msix_enabled) {
name_msi_vectors(ioa_cfg);
rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
ioa_cfg->vectors_info[0].desc,
&ioa_cfg->hrrq[0]);
if (!rc)
rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
} else {
rc = request_irq(pdev->irq, ipr_isr,
IRQF_SHARED,
IPR_NAME, &ioa_cfg->hrrq[0]);
}
if (rc) {
dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
pdev->irq, rc);
goto cleanup_nolog;
}
if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
(dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
ioa_cfg->needs_warm_reset = 1;
ioa_cfg->reset = ipr_reset_slot_reset;
ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
WQ_MEM_RECLAIM, host->host_no);
if (!ioa_cfg->reset_work_q) {
dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
rc = -ENOMEM;
goto out_free_irq;
}
} else
ioa_cfg->reset = ipr_reset_start_bist;
spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
LEAVE;
out:
return rc;
out_free_irq:
ipr_free_irqs(ioa_cfg);
cleanup_nolog:
ipr_free_mem(ioa_cfg);
out_msi_disable:
ipr_wait_for_pci_err_recovery(ioa_cfg);
pci_free_irq_vectors(pdev);
cleanup_nomem:
iounmap(ipr_regs);
out_disable:
pci_disable_device(pdev);
out_release_regions:
pci_release_regions(pdev);
out_scsi_host_put:
scsi_host_put(host);
goto out;
}