static int tg3_reset_hw()

in ethernet/broadcom/tg3.c [9854:10765]


static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
{
	u32 val, rdmac_mode;
	int i, err, limit;
	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;

	tg3_disable_ints(tp);

	tg3_stop_fw(tp);

	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);

	if (tg3_flag(tp, INIT_COMPLETE))
		tg3_abort_hw(tp, 1);

	if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
	    !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
		tg3_phy_pull_config(tp);
		tg3_eee_pull_config(tp, NULL);
		tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
	}

	/* Enable MAC control of LPI */
	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
		tg3_setup_eee(tp);

	if (reset_phy)
		tg3_phy_reset(tp);

	err = tg3_chip_reset(tp);
	if (err)
		return err;

	tg3_write_sig_legacy(tp, RESET_KIND_INIT);

	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
		val = tr32(TG3_CPMU_CTRL);
		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
		tw32(TG3_CPMU_CTRL, val);

		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
		val |= CPMU_LSPD_10MB_MACCLK_6_25;
		tw32(TG3_CPMU_LSPD_10MB_CLK, val);

		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
		val |= CPMU_LNK_AWARE_MACCLK_6_25;
		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);

		val = tr32(TG3_CPMU_HST_ACC);
		val &= ~CPMU_HST_ACC_MACCLK_MASK;
		val |= CPMU_HST_ACC_MACCLK_6_25;
		tw32(TG3_CPMU_HST_ACC, val);
	}

	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
		       PCIE_PWR_MGMT_L1_THRESH_4MS;
		tw32(PCIE_PWR_MGMT_THRESH, val);

		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);

		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);

		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
	}

	if (tg3_flag(tp, L1PLLPD_EN)) {
		u32 grc_mode = tr32(GRC_MODE);

		/* Access the lower 1K of PL PCIE block registers. */
		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);

		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);

		tw32(GRC_MODE, grc_mode);
	}

	if (tg3_flag(tp, 57765_CLASS)) {
		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
			u32 grc_mode = tr32(GRC_MODE);

			/* Access the lower 1K of PL PCIE block registers. */
			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);

			val = tr32(TG3_PCIE_TLDLPL_PORT +
				   TG3_PCIE_PL_LO_PHYCTL5);
			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);

			tw32(GRC_MODE, grc_mode);
		}

		if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
			u32 grc_mode;

			/* Fix transmit hangs */
			val = tr32(TG3_CPMU_PADRNG_CTL);
			val |= TG3_CPMU_PADRNG_CTL_RDIV2;
			tw32(TG3_CPMU_PADRNG_CTL, val);

			grc_mode = tr32(GRC_MODE);

			/* Access the lower 1K of DL PCIE block registers. */
			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);

			val = tr32(TG3_PCIE_TLDLPL_PORT +
				   TG3_PCIE_DL_LO_FTSMAX);
			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);

			tw32(GRC_MODE, grc_mode);
		}

		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
		val |= CPMU_LSPD_10MB_MACCLK_6_25;
		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
	}

	/* This works around an issue with Athlon chipsets on
	 * B3 tigon3 silicon.  This bit has no effect on any
	 * other revision.  But do not set this on PCI Express
	 * chips and don't even touch the clocks if the CPMU is present.
	 */
	if (!tg3_flag(tp, CPMU_PRESENT)) {
		if (!tg3_flag(tp, PCI_EXPRESS))
			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
	}

	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
	    tg3_flag(tp, PCIX_MODE)) {
		val = tr32(TG3PCI_PCISTATE);
		val |= PCISTATE_RETRY_SAME_DMA;
		tw32(TG3PCI_PCISTATE, val);
	}

	if (tg3_flag(tp, ENABLE_APE)) {
		/* Allow reads and writes to the
		 * APE register and memory space.
		 */
		val = tr32(TG3PCI_PCISTATE);
		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
		       PCISTATE_ALLOW_APE_SHMEM_WR |
		       PCISTATE_ALLOW_APE_PSPACE_WR;
		tw32(TG3PCI_PCISTATE, val);
	}

	if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
		/* Enable some hw fixes.  */
		val = tr32(TG3PCI_MSI_DATA);
		val |= (1 << 26) | (1 << 28) | (1 << 29);
		tw32(TG3PCI_MSI_DATA, val);
	}

	/* Descriptor ring init may make accesses to the
	 * NIC SRAM area to setup the TX descriptors, so we
	 * can only do this after the hardware has been
	 * successfully reset.
	 */
	err = tg3_init_rings(tp);
	if (err)
		return err;

	if (tg3_flag(tp, 57765_PLUS)) {
		val = tr32(TG3PCI_DMA_RW_CTRL) &
		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
		if (!tg3_flag(tp, 57765_CLASS) &&
		    tg3_asic_rev(tp) != ASIC_REV_5717 &&
		    tg3_asic_rev(tp) != ASIC_REV_5762)
			val |= DMA_RWCTRL_TAGGED_STAT_WA;
		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
	} else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
		   tg3_asic_rev(tp) != ASIC_REV_5761) {
		/* This value is determined during the probe time DMA
		 * engine test, tg3_test_dma.
		 */
		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
	}

	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
			  GRC_MODE_4X_NIC_SEND_RINGS |
			  GRC_MODE_NO_TX_PHDR_CSUM |
			  GRC_MODE_NO_RX_PHDR_CSUM);
	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;

	/* Pseudo-header checksum is done by hardware logic and not
	 * the offload processers, so make the chip do the pseudo-
	 * header checksums on receive.  For transmit it is more
	 * convenient to do the pseudo-header checksum in software
	 * as Linux does that on transmit for us in all cases.
	 */
	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;

	val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
	if (tp->rxptpctl)
		tw32(TG3_RX_PTP_CTL,
		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);

	if (tg3_flag(tp, PTP_CAPABLE))
		val |= GRC_MODE_TIME_SYNC_ENABLE;

	tw32(GRC_MODE, tp->grc_mode | val);

	/* On one of the AMD platform, MRRS is restricted to 4000 because of
	 * south bridge limitation. As a workaround, Driver is setting MRRS
	 * to 2048 instead of default 4096.
	 */
	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
	    tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
		val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
		tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
	}

	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
	val = tr32(GRC_MISC_CFG);
	val &= ~0xff;
	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
	tw32(GRC_MISC_CFG, val);

	/* Initialize MBUF/DESC pool. */
	if (tg3_flag(tp, 5750_PLUS)) {
		/* Do nothing.  */
	} else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
		if (tg3_asic_rev(tp) == ASIC_REV_5704)
			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
		else
			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
	} else if (tg3_flag(tp, TSO_CAPABLE)) {
		int fw_len;

		fw_len = tp->fw_len;
		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
		tw32(BUFMGR_MB_POOL_ADDR,
		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
		tw32(BUFMGR_MB_POOL_SIZE,
		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
	}

	if (tp->dev->mtu <= ETH_DATA_LEN) {
		tw32(BUFMGR_MB_RDMA_LOW_WATER,
		     tp->bufmgr_config.mbuf_read_dma_low_water);
		tw32(BUFMGR_MB_MACRX_LOW_WATER,
		     tp->bufmgr_config.mbuf_mac_rx_low_water);
		tw32(BUFMGR_MB_HIGH_WATER,
		     tp->bufmgr_config.mbuf_high_water);
	} else {
		tw32(BUFMGR_MB_RDMA_LOW_WATER,
		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
		tw32(BUFMGR_MB_MACRX_LOW_WATER,
		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
		tw32(BUFMGR_MB_HIGH_WATER,
		     tp->bufmgr_config.mbuf_high_water_jumbo);
	}
	tw32(BUFMGR_DMA_LOW_WATER,
	     tp->bufmgr_config.dma_low_water);
	tw32(BUFMGR_DMA_HIGH_WATER,
	     tp->bufmgr_config.dma_high_water);

	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
	if (tg3_asic_rev(tp) == ASIC_REV_5719)
		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
	tw32(BUFMGR_MODE, val);
	for (i = 0; i < 2000; i++) {
		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
			break;
		udelay(10);
	}
	if (i >= 2000) {
		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
		return -ENODEV;
	}

	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);

	tg3_setup_rxbd_thresholds(tp);

	/* Initialize TG3_BDINFO's at:
	 *  RCVDBDI_STD_BD:	standard eth size rx ring
	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
	 *
	 * like so:
	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
	 *                              ring attribute flags
	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
	 *
	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
	 *
	 * The size of each ring is fixed in the firmware, but the location is
	 * configurable.
	 */
	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
	     ((u64) tpr->rx_std_mapping >> 32));
	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
	     ((u64) tpr->rx_std_mapping & 0xffffffff));
	if (!tg3_flag(tp, 5717_PLUS))
		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
		     NIC_SRAM_RX_BUFFER_DESC);

	/* Disable the mini ring */
	if (!tg3_flag(tp, 5705_PLUS))
		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
		     BDINFO_FLAGS_DISABLED);

	/* Program the jumbo buffer descriptor ring control
	 * blocks on those devices that have them.
	 */
	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {

		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
			     ((u64) tpr->rx_jmb_mapping >> 32));
			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
			val = TG3_RX_JMB_RING_SIZE(tp) <<
			      BDINFO_FLAGS_MAXLEN_SHIFT;
			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
			     val | BDINFO_FLAGS_USE_EXT_RECV);
			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
			    tg3_flag(tp, 57765_CLASS) ||
			    tg3_asic_rev(tp) == ASIC_REV_5762)
				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
		} else {
			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
			     BDINFO_FLAGS_DISABLED);
		}

		if (tg3_flag(tp, 57765_PLUS)) {
			val = TG3_RX_STD_RING_SIZE(tp);
			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
			val |= (TG3_RX_STD_DMA_SZ << 2);
		} else
			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
	} else
		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;

	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);

	tpr->rx_std_prod_idx = tp->rx_pending;
	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);

	tpr->rx_jmb_prod_idx =
		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);

	tg3_rings_reset(tp);

	/* Initialize MAC address and backoff seed. */
	__tg3_set_mac_addr(tp, false);

	/* MTU + ethernet header + FCS + optional VLAN tag */
	tw32(MAC_RX_MTU_SIZE,
	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);

	/* The slot time is changed by tg3_setup_phy if we
	 * run at gigabit with half duplex.
	 */
	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
	      (6 << TX_LENGTHS_IPG_SHIFT) |
	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);

	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
	    tg3_asic_rev(tp) == ASIC_REV_5762)
		val |= tr32(MAC_TX_LENGTHS) &
		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
			TX_LENGTHS_CNT_DWN_VAL_MSK);

	tw32(MAC_TX_LENGTHS, val);

	/* Receive rules. */
	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
	tw32(RCVLPC_CONFIG, 0x0181);

	/* Calculate RDMAC_MODE setting early, we need it to determine
	 * the RCVLPC_STATE_ENABLE mask.
	 */
	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
		      RDMAC_MODE_LNGREAD_ENAB);

	if (tg3_asic_rev(tp) == ASIC_REV_5717)
		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;

	if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
	    tg3_asic_rev(tp) == ASIC_REV_57780)
		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;

	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
		if (tg3_flag(tp, TSO_CAPABLE)) {
			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
			   !tg3_flag(tp, IS_5788)) {
			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
		}
	}

	if (tg3_flag(tp, PCI_EXPRESS))
		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;

	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
		tp->dma_limit = 0;
		if (tp->dev->mtu <= ETH_DATA_LEN) {
			rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
			tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
		}
	}

	if (tg3_flag(tp, HW_TSO_1) ||
	    tg3_flag(tp, HW_TSO_2) ||
	    tg3_flag(tp, HW_TSO_3))
		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;

	if (tg3_flag(tp, 57765_PLUS) ||
	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
	    tg3_asic_rev(tp) == ASIC_REV_57780)
		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;

	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
	    tg3_asic_rev(tp) == ASIC_REV_5762)
		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;

	if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
	    tg3_flag(tp, 57765_PLUS)) {
		u32 tgtreg;

		if (tg3_asic_rev(tp) == ASIC_REV_5762)
			tgtreg = TG3_RDMA_RSRVCTRL_REG2;
		else
			tgtreg = TG3_RDMA_RSRVCTRL_REG;

		val = tr32(tgtreg);
		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
		    tg3_asic_rev(tp) == ASIC_REV_5762) {
			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
		}
		tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
	}

	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
	    tg3_asic_rev(tp) == ASIC_REV_5762) {
		u32 tgtreg;

		if (tg3_asic_rev(tp) == ASIC_REV_5762)
			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
		else
			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;

		val = tr32(tgtreg);
		tw32(tgtreg, val |
		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
	}

	/* Receive/send statistics. */
	if (tg3_flag(tp, 5750_PLUS)) {
		val = tr32(RCVLPC_STATS_ENABLE);
		val &= ~RCVLPC_STATSENAB_DACK_FIX;
		tw32(RCVLPC_STATS_ENABLE, val);
	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
		   tg3_flag(tp, TSO_CAPABLE)) {
		val = tr32(RCVLPC_STATS_ENABLE);
		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
		tw32(RCVLPC_STATS_ENABLE, val);
	} else {
		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
	}
	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
	tw32(SNDDATAI_STATSENAB, 0xffffff);
	tw32(SNDDATAI_STATSCTRL,
	     (SNDDATAI_SCTRL_ENABLE |
	      SNDDATAI_SCTRL_FASTUPD));

	/* Setup host coalescing engine. */
	tw32(HOSTCC_MODE, 0);
	for (i = 0; i < 2000; i++) {
		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
			break;
		udelay(10);
	}

	__tg3_set_coalesce(tp, &tp->coal);

	if (!tg3_flag(tp, 5705_PLUS)) {
		/* Status/statistics block address.  See tg3_timer,
		 * the tg3_periodic_fetch_stats call there, and
		 * tg3_get_stats to see how this works for 5705/5750 chips.
		 */
		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
		     ((u64) tp->stats_mapping >> 32));
		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
		     ((u64) tp->stats_mapping & 0xffffffff));
		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);

		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);

		/* Clear statistics and status block memory areas */
		for (i = NIC_SRAM_STATS_BLK;
		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
		     i += sizeof(u32)) {
			tg3_write_mem(tp, i, 0);
			udelay(40);
		}
	}

	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);

	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
	if (!tg3_flag(tp, 5705_PLUS))
		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);

	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
		/* reset to prevent losing 1st rx packet intermittently */
		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
		udelay(10);
	}

	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
			MAC_MODE_FHDE_ENABLE;
	if (tg3_flag(tp, ENABLE_APE))
		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
	if (!tg3_flag(tp, 5705_PLUS) &&
	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
	    tg3_asic_rev(tp) != ASIC_REV_5700)
		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
	udelay(40);

	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
	 * If TG3_FLAG_IS_NIC is zero, we should read the
	 * register to preserve the GPIO settings for LOMs. The GPIOs,
	 * whether used as inputs or outputs, are set by boot code after
	 * reset.
	 */
	if (!tg3_flag(tp, IS_NIC)) {
		u32 gpio_mask;

		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;

		if (tg3_asic_rev(tp) == ASIC_REV_5752)
			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
				     GRC_LCLCTRL_GPIO_OUTPUT3;

		if (tg3_asic_rev(tp) == ASIC_REV_5755)
			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;

		tp->grc_local_ctrl &= ~gpio_mask;
		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;

		/* GPIO1 must be driven high for eeprom write protect */
		if (tg3_flag(tp, EEPROM_WRITE_PROT))
			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
					       GRC_LCLCTRL_GPIO_OUTPUT1);
	}
	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
	udelay(100);

	if (tg3_flag(tp, USING_MSIX)) {
		val = tr32(MSGINT_MODE);
		val |= MSGINT_MODE_ENABLE;
		if (tp->irq_cnt > 1)
			val |= MSGINT_MODE_MULTIVEC_EN;
		if (!tg3_flag(tp, 1SHOT_MSI))
			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
		tw32(MSGINT_MODE, val);
	}

	if (!tg3_flag(tp, 5705_PLUS)) {
		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
		udelay(40);
	}

	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
	       WDMAC_MODE_LNGREAD_ENAB);

	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
		if (tg3_flag(tp, TSO_CAPABLE) &&
		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
		     tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
			/* nothing */
		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
			   !tg3_flag(tp, IS_5788)) {
			val |= WDMAC_MODE_RX_ACCEL;
		}
	}

	/* Enable host coalescing bug fix */
	if (tg3_flag(tp, 5755_PLUS))
		val |= WDMAC_MODE_STATUS_TAG_FIX;

	if (tg3_asic_rev(tp) == ASIC_REV_5785)
		val |= WDMAC_MODE_BURST_ALL_DATA;

	tw32_f(WDMAC_MODE, val);
	udelay(40);

	if (tg3_flag(tp, PCIX_MODE)) {
		u16 pcix_cmd;

		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
				     &pcix_cmd);
		if (tg3_asic_rev(tp) == ASIC_REV_5703) {
			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
			pcix_cmd |= PCI_X_CMD_READ_2K;
		} else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
			pcix_cmd |= PCI_X_CMD_READ_2K;
		}
		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
				      pcix_cmd);
	}

	tw32_f(RDMAC_MODE, rdmac_mode);
	udelay(40);

	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
	    tg3_asic_rev(tp) == ASIC_REV_5720) {
		for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
			if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
				break;
		}
		if (i < TG3_NUM_RDMA_CHANNELS) {
			val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
			val |= tg3_lso_rd_dma_workaround_bit(tp);
			tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
			tg3_flag_set(tp, 5719_5720_RDMA_BUG);
		}
	}

	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
	if (!tg3_flag(tp, 5705_PLUS))
		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);

	if (tg3_asic_rev(tp) == ASIC_REV_5761)
		tw32(SNDDATAC_MODE,
		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
	else
		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);

	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
	if (tg3_flag(tp, LRG_PROD_RING_CAP))
		val |= RCVDBDI_MODE_LRG_RING_SZ;
	tw32(RCVDBDI_MODE, val);
	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
	if (tg3_flag(tp, HW_TSO_1) ||
	    tg3_flag(tp, HW_TSO_2) ||
	    tg3_flag(tp, HW_TSO_3))
		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
	if (tg3_flag(tp, ENABLE_TSS))
		val |= SNDBDI_MODE_MULTI_TXQ_EN;
	tw32(SNDBDI_MODE, val);
	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);

	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
		err = tg3_load_5701_a0_firmware_fix(tp);
		if (err)
			return err;
	}

	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
		/* Ignore any errors for the firmware download. If download
		 * fails, the device will operate with EEE disabled
		 */
		tg3_load_57766_firmware(tp);
	}

	if (tg3_flag(tp, TSO_CAPABLE)) {
		err = tg3_load_tso_firmware(tp);
		if (err)
			return err;
	}

	tp->tx_mode = TX_MODE_ENABLE;

	if (tg3_flag(tp, 5755_PLUS) ||
	    tg3_asic_rev(tp) == ASIC_REV_5906)
		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;

	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
	    tg3_asic_rev(tp) == ASIC_REV_5762) {
		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
		tp->tx_mode &= ~val;
		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
	}

	tw32_f(MAC_TX_MODE, tp->tx_mode);
	udelay(100);

	if (tg3_flag(tp, ENABLE_RSS)) {
		u32 rss_key[10];

		tg3_rss_write_indir_tbl(tp);

		netdev_rss_key_fill(rss_key, 10 * sizeof(u32));

		for (i = 0; i < 10 ; i++)
			tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
	}

	tp->rx_mode = RX_MODE_ENABLE;
	if (tg3_flag(tp, 5755_PLUS))
		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;

	if (tg3_asic_rev(tp) == ASIC_REV_5762)
		tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;

	if (tg3_flag(tp, ENABLE_RSS))
		tp->rx_mode |= RX_MODE_RSS_ENABLE |
			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
			       RX_MODE_RSS_IPV6_HASH_EN |
			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
			       RX_MODE_RSS_IPV4_HASH_EN |
			       RX_MODE_RSS_TCP_IPV4_HASH_EN;

	tw32_f(MAC_RX_MODE, tp->rx_mode);
	udelay(10);

	tw32(MAC_LED_CTRL, tp->led_ctrl);

	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
		udelay(10);
	}
	tw32_f(MAC_RX_MODE, tp->rx_mode);
	udelay(10);

	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
		if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
		    !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
			/* Set drive transmission level to 1.2V  */
			/* only if the signal pre-emphasis bit is not set  */
			val = tr32(MAC_SERDES_CFG);
			val &= 0xfffff000;
			val |= 0x880;
			tw32(MAC_SERDES_CFG, val);
		}
		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
			tw32(MAC_SERDES_CFG, 0x616000);
	}

	/* Prevent chip from dropping frames when flow control
	 * is enabled.
	 */
	if (tg3_flag(tp, 57765_CLASS))
		val = 1;
	else
		val = 2;
	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);

	if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
		/* Use hardware link auto-negotiation */
		tg3_flag_set(tp, HW_AUTONEG);
	}

	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
	    tg3_asic_rev(tp) == ASIC_REV_5714) {
		u32 tmp;

		tmp = tr32(SERDES_RX_CTRL);
		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
	}

	if (!tg3_flag(tp, USE_PHYLIB)) {
		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;

		err = tg3_setup_phy(tp, false);
		if (err)
			return err;

		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
			u32 tmp;

			/* Clear CRC stats. */
			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
				tg3_writephy(tp, MII_TG3_TEST1,
					     tmp | MII_TG3_TEST1_CRC_EN);
				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
			}
		}
	}

	__tg3_set_rx_mode(tp->dev);

	/* Initialize receive rules. */
	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);

	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
		limit = 8;
	else
		limit = 16;
	if (tg3_flag(tp, ENABLE_ASF))
		limit -= 4;
	switch (limit) {
	case 16:
		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
		fallthrough;
	case 15:
		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
		fallthrough;
	case 14:
		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
		fallthrough;
	case 13:
		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
		fallthrough;
	case 12:
		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
		fallthrough;
	case 11:
		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
		fallthrough;
	case 10:
		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
		fallthrough;
	case 9:
		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
		fallthrough;
	case 8:
		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
		fallthrough;
	case 7:
		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
		fallthrough;
	case 6:
		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
		fallthrough;
	case 5:
		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
		fallthrough;
	case 4:
		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
	case 3:
		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
	case 2:
	case 1:

	default:
		break;
	}

	if (tg3_flag(tp, ENABLE_APE))
		/* Write our heartbeat update interval to APE. */
		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
				APE_HOST_HEARTBEAT_INT_5SEC);

	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);

	return 0;
}