static int init_one()

in ethernet/chelsio/cxgb4/cxgb4_main.c [6604:7105]


static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct net_device *netdev;
	struct adapter *adapter;
	static int adap_idx = 1;
	int s_qpp, qpp, num_seg;
	struct port_info *pi;
	enum chip_type chip;
	void __iomem *regs;
	int func, chip_ver;
	u16 device_id;
	int i, err;
	u32 whoami;

	err = pci_request_regions(pdev, KBUILD_MODNAME);
	if (err) {
		/* Just info, some other driver may have claimed the device. */
		dev_info(&pdev->dev, "cannot obtain PCI resources\n");
		return err;
	}

	err = pci_enable_device(pdev);
	if (err) {
		dev_err(&pdev->dev, "cannot enable PCI device\n");
		goto out_release_regions;
	}

	regs = pci_ioremap_bar(pdev, 0);
	if (!regs) {
		dev_err(&pdev->dev, "cannot map device registers\n");
		err = -ENOMEM;
		goto out_disable_device;
	}

	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
	if (!adapter) {
		err = -ENOMEM;
		goto out_unmap_bar0;
	}

	adapter->regs = regs;
	err = t4_wait_dev_ready(regs);
	if (err < 0)
		goto out_free_adapter;

	/* We control everything through one PF */
	whoami = t4_read_reg(adapter, PL_WHOAMI_A);
	pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
	chip = t4_get_chip_type(adapter, CHELSIO_PCI_ID_VER(device_id));
	if ((int)chip < 0) {
		dev_err(&pdev->dev, "Device %d is not supported\n", device_id);
		err = chip;
		goto out_free_adapter;
	}
	chip_ver = CHELSIO_CHIP_VERSION(chip);
	func = chip_ver <= CHELSIO_T5 ?
	       SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);

	adapter->pdev = pdev;
	adapter->pdev_dev = &pdev->dev;
	adapter->name = pci_name(pdev);
	adapter->mbox = func;
	adapter->pf = func;
	adapter->params.chip = chip;
	adapter->adap_idx = adap_idx;
	adapter->msg_enable = DFLT_MSG_ENABLE;
	adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
				    (sizeof(struct mbox_cmd) *
				     T4_OS_LOG_MBOX_CMDS),
				    GFP_KERNEL);
	if (!adapter->mbox_log) {
		err = -ENOMEM;
		goto out_free_adapter;
	}
	spin_lock_init(&adapter->mbox_lock);
	INIT_LIST_HEAD(&adapter->mlist.list);
	adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
	pci_set_drvdata(pdev, adapter);

	if (func != ent->driver_data) {
		pci_disable_device(pdev);
		pci_save_state(pdev);        /* to restore SR-IOV later */
		return 0;
	}

	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
	if (err) {
		dev_err(&pdev->dev, "no usable DMA configuration\n");
		goto out_free_adapter;
	}

	pci_enable_pcie_error_reporting(pdev);
	pci_set_master(pdev);
	pci_save_state(pdev);
	adap_idx++;
	adapter->workq = create_singlethread_workqueue("cxgb4");
	if (!adapter->workq) {
		err = -ENOMEM;
		goto out_free_adapter;
	}

	/* PCI device has been enabled */
	adapter->flags |= CXGB4_DEV_ENABLED;
	memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));

	/* If possible, we use PCIe Relaxed Ordering Attribute to deliver
	 * Ingress Packet Data to Free List Buffers in order to allow for
	 * chipset performance optimizations between the Root Complex and
	 * Memory Controllers.  (Messages to the associated Ingress Queue
	 * notifying new Packet Placement in the Free Lists Buffers will be
	 * send without the Relaxed Ordering Attribute thus guaranteeing that
	 * all preceding PCIe Transaction Layer Packets will be processed
	 * first.)  But some Root Complexes have various issues with Upstream
	 * Transaction Layer Packets with the Relaxed Ordering Attribute set.
	 * The PCIe devices which under the Root Complexes will be cleared the
	 * Relaxed Ordering bit in the configuration space, So we check our
	 * PCIe configuration space to see if it's flagged with advice against
	 * using Relaxed Ordering.
	 */
	if (!pcie_relaxed_ordering_enabled(pdev))
		adapter->flags |= CXGB4_ROOT_NO_RELAXED_ORDERING;

	spin_lock_init(&adapter->stats_lock);
	spin_lock_init(&adapter->tid_release_lock);
	spin_lock_init(&adapter->win0_lock);

	INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
	INIT_WORK(&adapter->db_full_task, process_db_full);
	INIT_WORK(&adapter->db_drop_task, process_db_drop);
	INIT_WORK(&adapter->fatal_err_notify_task, notify_fatal_err);

	err = t4_prep_adapter(adapter);
	if (err)
		goto out_free_adapter;

	if (is_kdump_kernel()) {
		/* Collect hardware state and append to /proc/vmcore */
		err = cxgb4_cudbg_vmcore_add_dump(adapter);
		if (err) {
			dev_warn(adapter->pdev_dev,
				 "Fail collecting vmcore device dump, err: %d. Continuing\n",
				 err);
			err = 0;
		}
	}

	if (!is_t4(adapter->params.chip)) {
		s_qpp = (QUEUESPERPAGEPF0_S +
			(QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
			adapter->pf);
		qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
		      SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
		num_seg = PAGE_SIZE / SEGMENT_SIZE;

		/* Each segment size is 128B. Write coalescing is enabled only
		 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
		 * queue is less no of segments that can be accommodated in
		 * a page size.
		 */
		if (qpp > num_seg) {
			dev_err(&pdev->dev,
				"Incorrect number of egress queues per page\n");
			err = -EINVAL;
			goto out_free_adapter;
		}
		adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
		pci_resource_len(pdev, 2));
		if (!adapter->bar2) {
			dev_err(&pdev->dev, "cannot map device bar2 region\n");
			err = -ENOMEM;
			goto out_free_adapter;
		}
	}

	setup_memwin(adapter);
	err = adap_init0(adapter, 0);
	if (err)
		goto out_unmap_bar;

	setup_memwin_rdma(adapter);

	/* configure SGE_STAT_CFG_A to read WC stats */
	if (!is_t4(adapter->params.chip))
		t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |
			     (is_t5(adapter->params.chip) ? STATMODE_V(0) :
			      T6_STATMODE_V(0)));

	/* Initialize hash mac addr list */
	INIT_LIST_HEAD(&adapter->mac_hlist);

	for_each_port(adapter, i) {
		/* For supporting MQPRIO Offload, need some extra
		 * queues for each ETHOFLD TIDs. Keep it equal to
		 * MAX_ATIDs for now. Once we connect to firmware
		 * later and query the EOTID params, we'll come to
		 * know the actual # of EOTIDs supported.
		 */
		netdev = alloc_etherdev_mq(sizeof(struct port_info),
					   MAX_ETH_QSETS + MAX_ATIDS);
		if (!netdev) {
			err = -ENOMEM;
			goto out_free_dev;
		}

		SET_NETDEV_DEV(netdev, &pdev->dev);

		adapter->port[i] = netdev;
		pi = netdev_priv(netdev);
		pi->adapter = adapter;
		pi->xact_addr_filt = -1;
		pi->port_id = i;
		netdev->irq = pdev->irq;

		netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
			NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_GRO |
			NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
			NETIF_F_HW_TC | NETIF_F_NTUPLE | NETIF_F_HIGHDMA;

		if (chip_ver > CHELSIO_T5) {
			netdev->hw_enc_features |= NETIF_F_IP_CSUM |
						   NETIF_F_IPV6_CSUM |
						   NETIF_F_RXCSUM |
						   NETIF_F_GSO_UDP_TUNNEL |
						   NETIF_F_GSO_UDP_TUNNEL_CSUM |
						   NETIF_F_TSO | NETIF_F_TSO6;

			netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
					       NETIF_F_GSO_UDP_TUNNEL_CSUM |
					       NETIF_F_HW_TLS_RECORD;

			if (adapter->rawf_cnt)
				netdev->udp_tunnel_nic_info = &cxgb_udp_tunnels;
		}

		netdev->features |= netdev->hw_features;
		netdev->vlan_features = netdev->features & VLAN_FEAT;
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
		if (pi->adapter->params.crypto & FW_CAPS_CONFIG_TLS_HW) {
			netdev->hw_features |= NETIF_F_HW_TLS_TX;
			netdev->tlsdev_ops = &cxgb4_ktls_ops;
			/* initialize the refcount */
			refcount_set(&pi->adapter->chcr_ktls.ktls_refcount, 0);
		}
#endif /* CONFIG_CHELSIO_TLS_DEVICE */
#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
		if (pi->adapter->params.crypto & FW_CAPS_CONFIG_IPSEC_INLINE) {
			netdev->hw_enc_features |= NETIF_F_HW_ESP;
			netdev->features |= NETIF_F_HW_ESP;
			netdev->xfrmdev_ops = &cxgb4_xfrmdev_ops;
		}
#endif /* CONFIG_CHELSIO_IPSEC_INLINE */

		netdev->priv_flags |= IFF_UNICAST_FLT;

		/* MTU range: 81 - 9600 */
		netdev->min_mtu = 81;              /* accommodate SACK */
		netdev->max_mtu = MAX_MTU;

		netdev->netdev_ops = &cxgb4_netdev_ops;
#ifdef CONFIG_CHELSIO_T4_DCB
		netdev->dcbnl_ops = &cxgb4_dcb_ops;
		cxgb4_dcb_state_init(netdev);
		cxgb4_dcb_version_init(netdev);
#endif
		cxgb4_set_ethtool_ops(netdev);
	}

	cxgb4_init_ethtool_dump(adapter);

	pci_set_drvdata(pdev, adapter);

	if (adapter->flags & CXGB4_FW_OK) {
		err = t4_port_init(adapter, func, func, 0);
		if (err)
			goto out_free_dev;
	} else if (adapter->params.nports == 1) {
		/* If we don't have a connection to the firmware -- possibly
		 * because of an error -- grab the raw VPD parameters so we
		 * can set the proper MAC Address on the debug network
		 * interface that we've created.
		 */
		u8 hw_addr[ETH_ALEN];
		u8 *na = adapter->params.vpd.na;

		err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
		if (!err) {
			for (i = 0; i < ETH_ALEN; i++)
				hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
					      hex2val(na[2 * i + 1]));
			t4_set_hw_addr(adapter, 0, hw_addr);
		}
	}

	if (!(adapter->flags & CXGB4_FW_OK))
		goto fw_attach_fail;

	/* Configure queues and allocate tables now, they can be needed as
	 * soon as the first register_netdev completes.
	 */
	err = cfg_queues(adapter);
	if (err)
		goto out_free_dev;

	adapter->smt = t4_init_smt();
	if (!adapter->smt) {
		/* We tolerate a lack of SMT, giving up some functionality */
		dev_warn(&pdev->dev, "could not allocate SMT, continuing\n");
	}

	adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
	if (!adapter->l2t) {
		/* We tolerate a lack of L2T, giving up some functionality */
		dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
		adapter->params.offload = 0;
	}

#if IS_ENABLED(CONFIG_IPV6)
	if (chip_ver <= CHELSIO_T5 &&
	    (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) {
		/* CLIP functionality is not present in hardware,
		 * hence disable all offload features
		 */
		dev_warn(&pdev->dev,
			 "CLIP not enabled in hardware, continuing\n");
		adapter->params.offload = 0;
	} else {
		adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
						  adapter->clipt_end);
		if (!adapter->clipt) {
			/* We tolerate a lack of clip_table, giving up
			 * some functionality
			 */
			dev_warn(&pdev->dev,
				 "could not allocate Clip table, continuing\n");
			adapter->params.offload = 0;
		}
	}
#endif

	for_each_port(adapter, i) {
		pi = adap2pinfo(adapter, i);
		pi->sched_tbl = t4_init_sched(adapter->params.nsched_cls);
		if (!pi->sched_tbl)
			dev_warn(&pdev->dev,
				 "could not activate scheduling on port %d\n",
				 i);
	}

	if (is_offload(adapter) || is_hashfilter(adapter)) {
		if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
			u32 v;

			v = t4_read_reg(adapter, LE_DB_HASH_CONFIG_A);
			if (chip_ver <= CHELSIO_T5) {
				adapter->tids.nhash = 1 << HASHTIDSIZE_G(v);
				v = t4_read_reg(adapter, LE_DB_TID_HASHBASE_A);
				adapter->tids.hash_base = v / 4;
			} else {
				adapter->tids.nhash = HASHTBLSIZE_G(v) << 3;
				v = t4_read_reg(adapter,
						T6_LE_DB_HASH_TID_BASE_A);
				adapter->tids.hash_base = v;
			}
		}
	}

	if (tid_init(&adapter->tids) < 0) {
		dev_warn(&pdev->dev, "could not allocate TID table, "
			 "continuing\n");
		adapter->params.offload = 0;
	} else {
		adapter->tc_u32 = cxgb4_init_tc_u32(adapter);
		if (!adapter->tc_u32)
			dev_warn(&pdev->dev,
				 "could not offload tc u32, continuing\n");

		if (cxgb4_init_tc_flower(adapter))
			dev_warn(&pdev->dev,
				 "could not offload tc flower, continuing\n");

		if (cxgb4_init_tc_mqprio(adapter))
			dev_warn(&pdev->dev,
				 "could not offload tc mqprio, continuing\n");

		if (cxgb4_init_tc_matchall(adapter))
			dev_warn(&pdev->dev,
				 "could not offload tc matchall, continuing\n");
		if (cxgb4_init_ethtool_filters(adapter))
			dev_warn(&pdev->dev,
				 "could not initialize ethtool filters, continuing\n");
	}

	/* See what interrupts we'll be using */
	if (msi > 1 && enable_msix(adapter) == 0)
		adapter->flags |= CXGB4_USING_MSIX;
	else if (msi > 0 && pci_enable_msi(pdev) == 0) {
		adapter->flags |= CXGB4_USING_MSI;
		if (msi > 1)
			free_msix_info(adapter);
	}

	/* check for PCI Express bandwidth capabiltites */
	pcie_print_link_status(pdev);

	cxgb4_init_mps_ref_entries(adapter);

	err = init_rss(adapter);
	if (err)
		goto out_free_dev;

	err = setup_non_data_intr(adapter);
	if (err) {
		dev_err(adapter->pdev_dev,
			"Non Data interrupt allocation failed, err: %d\n", err);
		goto out_free_dev;
	}

	err = setup_fw_sge_queues(adapter);
	if (err) {
		dev_err(adapter->pdev_dev,
			"FW sge queue allocation failed, err %d", err);
		goto out_free_dev;
	}

fw_attach_fail:
	/*
	 * The card is now ready to go.  If any errors occur during device
	 * registration we do not fail the whole card but rather proceed only
	 * with the ports we manage to register successfully.  However we must
	 * register at least one net device.
	 */
	for_each_port(adapter, i) {
		pi = adap2pinfo(adapter, i);
		adapter->port[i]->dev_port = pi->lport;
		netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
		netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);

		netif_carrier_off(adapter->port[i]);

		err = register_netdev(adapter->port[i]);
		if (err)
			break;
		adapter->chan_map[pi->tx_chan] = i;
		print_port_info(adapter->port[i]);
	}
	if (i == 0) {
		dev_err(&pdev->dev, "could not register any net devices\n");
		goto out_free_dev;
	}
	if (err) {
		dev_warn(&pdev->dev, "only %d net devices registered\n", i);
		err = 0;
	}

	if (cxgb4_debugfs_root) {
		adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
							   cxgb4_debugfs_root);
		setup_debugfs(adapter);
	}

	/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
	pdev->needs_freset = 1;

	if (is_uld(adapter))
		cxgb4_uld_enable(adapter);

	if (!is_t4(adapter->params.chip))
		cxgb4_ptp_init(adapter);

	if (IS_REACHABLE(CONFIG_THERMAL) &&
	    !is_t4(adapter->params.chip) && (adapter->flags & CXGB4_FW_OK))
		cxgb4_thermal_init(adapter);

	print_adapter_info(adapter);
	return 0;

 out_free_dev:
	t4_free_sge_resources(adapter);
	free_some_resources(adapter);
	if (adapter->flags & CXGB4_USING_MSIX)
		free_msix_info(adapter);
	if (adapter->num_uld || adapter->num_ofld_uld)
		t4_uld_mem_free(adapter);
 out_unmap_bar:
	if (!is_t4(adapter->params.chip))
		iounmap(adapter->bar2);
 out_free_adapter:
	if (adapter->workq)
		destroy_workqueue(adapter->workq);

	kfree(adapter->mbox_log);
	kfree(adapter);
 out_unmap_bar0:
	iounmap(regs);
 out_disable_device:
	pci_disable_pcie_error_reporting(pdev);
	pci_disable_device(pdev);
 out_release_regions:
	pci_release_regions(pdev);
	return err;
}