in arch/mips/mach-octeon/octeon_qlm.c [4634:5256]
static int octeon_configure_qlm_cn73xx(int qlm, int baud_mhz, int mode, int rc, int gen3,
int ref_clk_sel, int ref_clk_input)
{
cvmx_gserx_phy_ctl_t phy_ctl;
cvmx_gserx_lane_mode_t lmode;
cvmx_gserx_cfg_t cfg;
cvmx_gserx_refclk_sel_t refclk_sel;
int is_pcie = 0;
int is_bgx = 0;
int lane_mode = 0;
short lmac_type[4] = { 0 };
short sds_lane[4] = { 0 };
bool alt_pll = false;
int enable_training = 0;
int additional_lmacs = 0;
debug("%s(qlm: %d, baud_mhz: %d, mode: %d, rc: %d, gen3: %d, ref_clk_sel: %d, ref_clk_input: %d\n",
__func__, qlm, baud_mhz, mode, rc, gen3, ref_clk_sel, ref_clk_input);
/* Don't configure QLM4 if it is not in SATA mode */
if (qlm == 4) {
if (mode == CVMX_QLM_MODE_SATA_2X1)
return __setup_sata(qlm, baud_mhz, ref_clk_sel, ref_clk_input);
printf("Invalid mode for QLM4\n");
return 0;
}
cfg.u64 = csr_rd(CVMX_GSERX_CFG(qlm));
/* Errata PEM-31375 PEM RSL accesses to PCLK registers can timeout
* during speed change. Change SLI_WINDOW_CTL[time] to 525us
*/
__set_sli_window_ctl_errata_31375(0);
/* If PEM is in EP, no need to do anything */
if (cfg.s.pcie && rc == 0 &&
(mode == CVMX_QLM_MODE_PCIE || mode == CVMX_QLM_MODE_PCIE_1X8 ||
mode == CVMX_QLM_MODE_PCIE_1X2)) {
debug("%s: qlm %d is in PCIe endpoint mode, returning\n", __func__, qlm);
return 0;
}
/* Set the reference clock to use */
refclk_sel.u64 = 0;
if (ref_clk_input == 0) { /* External ref clock */
refclk_sel.s.com_clk_sel = 0;
refclk_sel.s.use_com1 = 0;
} else if (ref_clk_input == 1) {
refclk_sel.s.com_clk_sel = 1;
refclk_sel.s.use_com1 = 0;
} else {
refclk_sel.s.com_clk_sel = 1;
refclk_sel.s.use_com1 = 1;
}
csr_wr(CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
/* Reset the QLM after changing the reference clock */
phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
phy_ctl.s.phy_reset = 1;
phy_ctl.s.phy_pd = 1;
csr_wr(CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
udelay(1000);
/* Check if QLM is a valid BGX interface */
if (mode != CVMX_QLM_MODE_PCIE && mode != CVMX_QLM_MODE_PCIE_1X2 &&
mode != CVMX_QLM_MODE_PCIE_1X8) {
if (__is_qlm_valid_bgx_cn73xx(qlm))
return -1;
}
switch (mode) {
case CVMX_QLM_MODE_PCIE:
case CVMX_QLM_MODE_PCIE_1X2:
case CVMX_QLM_MODE_PCIE_1X8: {
cvmx_pemx_cfg_t pemx_cfg;
cvmx_pemx_on_t pemx_on;
cvmx_pemx_qlm_t pemx_qlm;
cvmx_rst_soft_prstx_t rst_prst;
int port = 0;
is_pcie = 1;
if (qlm < 5 && mode == CVMX_QLM_MODE_PCIE_1X2) {
printf("Invalid PCIe mode(%d) for QLM%d\n", mode, qlm);
return -1;
}
if (ref_clk_sel == 0) {
refclk_sel.u64 = csr_rd(CVMX_GSERX_REFCLK_SEL(qlm));
refclk_sel.s.pcie_refclk125 = 0;
csr_wr(CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
if (gen3 == 0) /* Gen1 mode */
lane_mode = R_2_5G_REFCLK100;
else if (gen3 == 1) /* Gen2 mode */
lane_mode = R_5G_REFCLK100;
else
lane_mode = R_8G_REFCLK100;
} else if (ref_clk_sel == 1) {
refclk_sel.u64 = csr_rd(CVMX_GSERX_REFCLK_SEL(qlm));
refclk_sel.s.pcie_refclk125 = 1;
csr_wr(CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
if (gen3 == 0) /* Gen1 mode */
lane_mode = R_2_5G_REFCLK125;
else if (gen3 == 1) /* Gen2 mode */
lane_mode = R_5G_REFCLK125;
else
lane_mode = R_8G_REFCLK125;
} else {
printf("Invalid reference clock for PCIe on QLM%d\n", qlm);
return -1;
}
switch (qlm) {
case 0: /* Either x4 or x8 based on PEM0 */
rst_prst.u64 = csr_rd(CVMX_RST_SOFT_PRSTX(0));
rst_prst.s.soft_prst = rc;
csr_wr(CVMX_RST_SOFT_PRSTX(0), rst_prst.u64);
__setup_pem_reset(0, 0, !rc);
pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(0));
pemx_cfg.cn78xx.lanes8 = (mode == CVMX_QLM_MODE_PCIE_1X8);
pemx_cfg.cn78xx.hostmd = rc;
pemx_cfg.cn78xx.md = gen3;
csr_wr(CVMX_PEMX_CFG(0), pemx_cfg.u64);
/* x8 mode waits for QLM1 setup before turning on the PEM */
if (mode == CVMX_QLM_MODE_PCIE) {
pemx_on.u64 = csr_rd(CVMX_PEMX_ON(0));
pemx_on.s.pemon = 1;
csr_wr(CVMX_PEMX_ON(0), pemx_on.u64);
}
break;
case 1: /* Either PEM0 x8 or PEM1 x4 */
if (mode == CVMX_QLM_MODE_PCIE) {
rst_prst.u64 = csr_rd(CVMX_RST_SOFT_PRSTX(1));
rst_prst.s.soft_prst = rc;
csr_wr(CVMX_RST_SOFT_PRSTX(1), rst_prst.u64);
__setup_pem_reset(0, 1, !rc);
pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(1));
pemx_cfg.cn78xx.lanes8 = 0;
pemx_cfg.cn78xx.hostmd = rc;
pemx_cfg.cn78xx.md = gen3;
csr_wr(CVMX_PEMX_CFG(1), pemx_cfg.u64);
pemx_on.u64 = csr_rd(CVMX_PEMX_ON(1));
pemx_on.s.pemon = 1;
csr_wr(CVMX_PEMX_ON(1), pemx_on.u64);
} else { /* x8 mode */
pemx_on.u64 = csr_rd(CVMX_PEMX_ON(0));
pemx_on.s.pemon = 1;
csr_wr(CVMX_PEMX_ON(0), pemx_on.u64);
}
break;
case 2: /* Either PEM2 x4 or PEM2 x8 or BGX0 */
{
pemx_qlm.u64 = csr_rd(CVMX_PEMX_QLM(2));
pemx_qlm.cn73xx.pemdlmsel = 0;
csr_wr(CVMX_PEMX_QLM(2), pemx_qlm.u64);
rst_prst.u64 = csr_rd(CVMX_RST_SOFT_PRSTX(2));
rst_prst.s.soft_prst = rc;
csr_wr(CVMX_RST_SOFT_PRSTX(2), rst_prst.u64);
__setup_pem_reset(0, 2, !rc);
pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(2));
pemx_cfg.cn78xx.lanes8 = (mode == CVMX_QLM_MODE_PCIE_1X8);
pemx_cfg.cn78xx.hostmd = rc;
pemx_cfg.cn78xx.md = gen3;
csr_wr(CVMX_PEMX_CFG(2), pemx_cfg.u64);
/* x8 mode waits for QLM3 setup before turning on the PEM */
if (mode == CVMX_QLM_MODE_PCIE) {
pemx_on.u64 = csr_rd(CVMX_PEMX_ON(2));
pemx_on.s.pemon = 1;
csr_wr(CVMX_PEMX_ON(2), pemx_on.u64);
}
break;
}
case 3: /* Either PEM2 x8 or PEM3 x4 or BGX1 */
/* PEM2/PEM3 are configured to use QLM2/3 */
pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(2));
if (pemx_cfg.cn78xx.lanes8) {
/* Last 4 lanes of PEM2 */
/* PEMX_CFG already setup */
pemx_on.u64 = csr_rd(CVMX_PEMX_ON(2));
pemx_on.s.pemon = 1;
csr_wr(CVMX_PEMX_ON(2), pemx_on.u64);
}
/* Check if PEM3 uses QLM3 and in x4 lane mode */
if (mode == CVMX_QLM_MODE_PCIE) {
pemx_qlm.u64 = csr_rd(CVMX_PEMX_QLM(3));
pemx_qlm.cn73xx.pemdlmsel = 0;
csr_wr(CVMX_PEMX_QLM(3), pemx_qlm.u64);
rst_prst.u64 = csr_rd(CVMX_RST_SOFT_PRSTX(3));
rst_prst.s.soft_prst = rc;
csr_wr(CVMX_RST_SOFT_PRSTX(3), rst_prst.u64);
__setup_pem_reset(0, 3, !rc);
pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(3));
pemx_cfg.cn78xx.lanes8 = 0;
pemx_cfg.cn78xx.hostmd = rc;
pemx_cfg.cn78xx.md = gen3;
csr_wr(CVMX_PEMX_CFG(3), pemx_cfg.u64);
pemx_on.u64 = csr_rd(CVMX_PEMX_ON(3));
pemx_on.s.pemon = 1;
csr_wr(CVMX_PEMX_ON(3), pemx_on.u64);
}
break;
case 5: /* PEM2/PEM3 x2 or BGX2 */
case 6:
port = (qlm == 5) ? 2 : 3;
if (mode == CVMX_QLM_MODE_PCIE_1X2) {
/* PEM2/PEM3 are configured to use DLM5/6 */
pemx_qlm.u64 = csr_rd(CVMX_PEMX_QLM(port));
pemx_qlm.cn73xx.pemdlmsel = 1;
csr_wr(CVMX_PEMX_QLM(port), pemx_qlm.u64);
/* 2 lanes of PEM3 */
rst_prst.u64 = csr_rd(CVMX_RST_SOFT_PRSTX(port));
rst_prst.s.soft_prst = rc;
csr_wr(CVMX_RST_SOFT_PRSTX(port), rst_prst.u64);
__setup_pem_reset(0, port, !rc);
pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(port));
pemx_cfg.cn78xx.lanes8 = 0;
pemx_cfg.cn78xx.hostmd = rc;
pemx_cfg.cn78xx.md = gen3;
csr_wr(CVMX_PEMX_CFG(port), pemx_cfg.u64);
pemx_on.u64 = csr_rd(CVMX_PEMX_ON(port));
pemx_on.s.pemon = 1;
csr_wr(CVMX_PEMX_ON(port), pemx_on.u64);
}
break;
default:
break;
}
break;
}
case CVMX_QLM_MODE_SGMII:
is_bgx = 1;
lmac_type[0] = 0;
lmac_type[1] = 0;
lmac_type[2] = 0;
lmac_type[3] = 0;
sds_lane[0] = 0;
sds_lane[1] = 1;
sds_lane[2] = 2;
sds_lane[3] = 3;
break;
case CVMX_QLM_MODE_SGMII_2X1:
if (qlm == 5) {
is_bgx = 1;
lmac_type[0] = 0;
lmac_type[1] = 0;
lmac_type[2] = -1;
lmac_type[3] = -1;
sds_lane[0] = 0;
sds_lane[1] = 1;
} else if (qlm == 6) {
is_bgx = 1;
lmac_type[0] = -1;
lmac_type[1] = -1;
lmac_type[2] = 0;
lmac_type[3] = 0;
sds_lane[2] = 2;
sds_lane[3] = 3;
additional_lmacs = 2;
}
break;
case CVMX_QLM_MODE_XAUI:
is_bgx = 5;
lmac_type[0] = 1;
lmac_type[1] = -1;
lmac_type[2] = -1;
lmac_type[3] = -1;
sds_lane[0] = 0xe4;
break;
case CVMX_QLM_MODE_RXAUI:
is_bgx = 3;
lmac_type[0] = 2;
lmac_type[1] = 2;
lmac_type[2] = -1;
lmac_type[3] = -1;
sds_lane[0] = 0x4;
sds_lane[1] = 0xe;
break;
case CVMX_QLM_MODE_RXAUI_1X2:
if (qlm == 5) {
is_bgx = 3;
lmac_type[0] = 2;
lmac_type[1] = -1;
lmac_type[2] = -1;
lmac_type[3] = -1;
sds_lane[0] = 0x4;
}
if (qlm == 6) {
is_bgx = 3;
lmac_type[0] = -1;
lmac_type[1] = -1;
lmac_type[2] = 2;
lmac_type[3] = -1;
sds_lane[2] = 0xe;
additional_lmacs = 2;
}
break;
case CVMX_QLM_MODE_10G_KR:
enable_training = 1;
case CVMX_QLM_MODE_XFI: /* 10GR_4X1 */
is_bgx = 1;
lmac_type[0] = 3;
lmac_type[1] = 3;
lmac_type[2] = 3;
lmac_type[3] = 3;
sds_lane[0] = 0;
sds_lane[1] = 1;
sds_lane[2] = 2;
sds_lane[3] = 3;
break;
case CVMX_QLM_MODE_10G_KR_1X2:
enable_training = 1;
case CVMX_QLM_MODE_XFI_1X2:
if (qlm == 5) {
is_bgx = 1;
lmac_type[0] = 3;
lmac_type[1] = 3;
lmac_type[2] = -1;
lmac_type[3] = -1;
sds_lane[0] = 0;
sds_lane[1] = 1;
} else if (qlm == 6) {
is_bgx = 1;
lmac_type[0] = -1;
lmac_type[1] = -1;
lmac_type[2] = 3;
lmac_type[3] = 3;
sds_lane[2] = 2;
sds_lane[3] = 3;
additional_lmacs = 2;
}
break;
case CVMX_QLM_MODE_40G_KR4:
enable_training = 1;
case CVMX_QLM_MODE_XLAUI: /* 40GR4_1X4 */
is_bgx = 5;
lmac_type[0] = 4;
lmac_type[1] = -1;
lmac_type[2] = -1;
lmac_type[3] = -1;
sds_lane[0] = 0xe4;
break;
case CVMX_QLM_MODE_RGMII_SGMII:
is_bgx = 1;
lmac_type[0] = 5;
lmac_type[1] = 0;
lmac_type[2] = 0;
lmac_type[3] = 0;
sds_lane[0] = 0;
sds_lane[1] = 1;
sds_lane[2] = 2;
sds_lane[3] = 3;
break;
case CVMX_QLM_MODE_RGMII_SGMII_1X1:
if (qlm == 5) {
is_bgx = 1;
lmac_type[0] = 5;
lmac_type[1] = 0;
lmac_type[2] = -1;
lmac_type[3] = -1;
sds_lane[0] = 0;
sds_lane[1] = 1;
}
break;
case CVMX_QLM_MODE_RGMII_SGMII_2X1:
if (qlm == 6) {
is_bgx = 1;
lmac_type[0] = 5;
lmac_type[1] = -1;
lmac_type[2] = 0;
lmac_type[3] = 0;
sds_lane[0] = 0;
sds_lane[2] = 0;
sds_lane[3] = 1;
}
break;
case CVMX_QLM_MODE_RGMII_10G_KR:
enable_training = 1;
case CVMX_QLM_MODE_RGMII_XFI:
is_bgx = 1;
lmac_type[0] = 5;
lmac_type[1] = 3;
lmac_type[2] = 3;
lmac_type[3] = 3;
sds_lane[0] = 0;
sds_lane[1] = 1;
sds_lane[2] = 2;
sds_lane[3] = 3;
break;
case CVMX_QLM_MODE_RGMII_10G_KR_1X1:
enable_training = 1;
case CVMX_QLM_MODE_RGMII_XFI_1X1:
if (qlm == 5) {
is_bgx = 3;
lmac_type[0] = 5;
lmac_type[1] = 3;
lmac_type[2] = -1;
lmac_type[3] = -1;
sds_lane[0] = 0;
sds_lane[1] = 1;
}
break;
case CVMX_QLM_MODE_RGMII_40G_KR4:
enable_training = 1;
case CVMX_QLM_MODE_RGMII_XLAUI:
is_bgx = 5;
lmac_type[0] = 5;
lmac_type[1] = 4;
lmac_type[2] = -1;
lmac_type[3] = -1;
sds_lane[0] = 0x0;
sds_lane[1] = 0xe4;
break;
case CVMX_QLM_MODE_RGMII_RXAUI:
is_bgx = 3;
lmac_type[0] = 5;
lmac_type[1] = 2;
lmac_type[2] = 2;
lmac_type[3] = -1;
sds_lane[0] = 0x0;
sds_lane[1] = 0x4;
sds_lane[2] = 0xe;
break;
case CVMX_QLM_MODE_RGMII_XAUI:
is_bgx = 5;
lmac_type[0] = 5;
lmac_type[1] = 1;
lmac_type[2] = -1;
lmac_type[3] = -1;
sds_lane[0] = 0;
sds_lane[1] = 0xe4;
break;
default:
break;
}
if (is_pcie == 0)
lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz, &alt_pll);
debug("%s: %d lane mode: %d, alternate PLL: %s\n", __func__, mode, lane_mode,
alt_pll ? "true" : "false");
if (lane_mode == -1)
return -1;
if (alt_pll) {
debug("%s: alternate PLL settings used for qlm %d, lane mode %d, reference clock %d\n",
__func__, qlm, lane_mode, ref_clk_sel);
if (__set_qlm_ref_clk_cn78xx(0, qlm, lane_mode, ref_clk_sel)) {
printf("%s: Error: reference clock %d is not supported for qlm %d, lane mode: 0x%x\n",
__func__, ref_clk_sel, qlm, lane_mode);
return -1;
}
}
/* Power up PHY, but keep it in reset */
phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
phy_ctl.s.phy_pd = 0;
phy_ctl.s.phy_reset = 1;
csr_wr(CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
/* Set GSER for the interface mode */
cfg.u64 = csr_rd(CVMX_GSERX_CFG(qlm));
cfg.s.bgx = is_bgx & 1;
cfg.s.bgx_quad = (is_bgx >> 2) & 1;
cfg.s.bgx_dual = (is_bgx >> 1) & 1;
cfg.s.pcie = is_pcie;
csr_wr(CVMX_GSERX_CFG(qlm), cfg.u64);
/* Lane mode */
lmode.u64 = csr_rd(CVMX_GSERX_LANE_MODE(qlm));
lmode.s.lmode = lane_mode;
csr_wr(CVMX_GSERX_LANE_MODE(qlm), lmode.u64);
/* Program lmac_type to figure out the type of BGX interface configured */
if (is_bgx) {
int bgx = (qlm < 4) ? qlm - 2 : 2;
cvmx_bgxx_cmrx_config_t cmr_config;
cvmx_bgxx_cmr_rx_lmacs_t rx_lmacs;
cvmx_bgxx_spux_br_pmd_control_t spu_pmd_control;
int index, total_lmacs = 0;
for (index = 0; index < 4; index++) {
cmr_config.u64 = csr_rd(CVMX_BGXX_CMRX_CONFIG(index, bgx));
cmr_config.s.enable = 0;
cmr_config.s.data_pkt_rx_en = 0;
cmr_config.s.data_pkt_tx_en = 0;
if (lmac_type[index] != -1) {
cmr_config.s.lmac_type = lmac_type[index];
cmr_config.s.lane_to_sds = sds_lane[index];
total_lmacs++;
/* RXAUI takes up 2 lmacs */
if (lmac_type[index] == 2)
total_lmacs += 1;
}
csr_wr(CVMX_BGXX_CMRX_CONFIG(index, bgx), cmr_config.u64);
/* Errata (TBD) RGMII doesn't turn on clock if its by
* itself. Force them on
*/
if (lmac_type[index] == 5) {
cvmx_bgxx_cmr_global_config_t global_config;
global_config.u64 = csr_rd(CVMX_BGXX_CMR_GLOBAL_CONFIG(bgx));
global_config.s.bgx_clk_enable = 1;
csr_wr(CVMX_BGXX_CMR_GLOBAL_CONFIG(bgx), global_config.u64);
}
/* Enable training for 10G_KR/40G_KR4 modes */
if (enable_training == 1 &&
(lmac_type[index] == 3 || lmac_type[index] == 4)) {
spu_pmd_control.u64 =
csr_rd(CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, bgx));
spu_pmd_control.s.train_en = 1;
csr_wr(CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, bgx),
spu_pmd_control.u64);
}
}
/* Update the total number of lmacs */
rx_lmacs.u64 = csr_rd(CVMX_BGXX_CMR_RX_LMACS(bgx));
rx_lmacs.s.lmacs = total_lmacs + additional_lmacs;
csr_wr(CVMX_BGXX_CMR_RX_LMACS(bgx), rx_lmacs.u64);
csr_wr(CVMX_BGXX_CMR_TX_LMACS(bgx), rx_lmacs.u64);
}
/* Bring phy out of reset */
phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
phy_ctl.s.phy_reset = 0;
csr_wr(CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
/*
* Wait 1us until the management interface is ready to accept
* read/write commands.
*/
udelay(1);
/* Wait for reset to complete and the PLL to lock */
/* PCIe mode doesn't become ready until the PEM block attempts to bring
* the interface up. Skip this check for PCIe
*/
if (!is_pcie && CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_QLM_STAT(qlm),
cvmx_gserx_qlm_stat_t,
rst_rdy, ==, 1, 10000)) {
printf("QLM%d: Timeout waiting for GSERX_QLM_STAT[rst_rdy]\n", qlm);
return -1;
}
/* Configure the gser pll */
if (!is_pcie)
__qlm_setup_pll_cn78xx(0, qlm);
/* Wait for reset to complete and the PLL to lock */
if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_PLL_STAT(qlm), cvmx_gserx_pll_stat_t,
pll_lock, ==, 1, 10000)) {
printf("QLM%d: Timeout waiting for GSERX_PLL_STAT[pll_lock]\n", qlm);
return -1;
}
/* Errata GSER-26150: 10G PHY PLL Temperature Failure */
/* This workaround must be completed after the final deassertion of
* GSERx_PHY_CTL[PHY_RESET].
* Apply the workaround to 10.3125Gbps and 8Gbps only.
*/
if (OCTEON_IS_MODEL(OCTEON_CN73XX_PASS1_0) &&
(baud_mhz == 103125 || (is_pcie && gen3 == 2)))
__qlm_errata_gser_26150(0, qlm, is_pcie);
/* Errata GSER-26636: 10G-KR/40G-KR - Inverted Tx Coefficient Direction
* Change. Applied to all 10G standards (required for KR) but also
* applied to other standards in case software training is used
*/
if (baud_mhz == 103125)
__qlm_kr_inc_dec_gser26636(0, qlm);
/* Errata GSER-25992: RX EQ Default Settings Update (CTLE Bias) */
/* This workaround will only be applied to Pass 1.x */
/* It will also only be applied if the SERDES data-rate is 10G */
/* or if PCIe Gen3 (gen3=2 is PCIe Gen3) */
if (baud_mhz == 103125 || (is_pcie && gen3 == 2))
cvmx_qlm_gser_errata_25992(0, qlm);
/* Errata GSER-27140: Updating the RX EQ settings due to temperature
* drift sensitivities
*/
/* This workaround will also only be applied if the SERDES data-rate is 10G */
if (baud_mhz == 103125)
__qlm_rx_eq_temp_gser27140(0, qlm);
/* Reduce the voltage amplitude coming from Marvell PHY and also change
* DFE threshold settings for RXAUI interface
*/
if (is_bgx) {
int l;
for (l = 0; l < 4; l++) {
cvmx_gserx_lanex_rx_cfg_4_t cfg4;
cvmx_gserx_lanex_tx_cfg_0_t cfg0;
if (lmac_type[l] == 2) {
/* Change the Q/QB error sampler 0 threshold from 0xD to 0xF */
cfg4.u64 = csr_rd(CVMX_GSERX_LANEX_RX_CFG_4(l, qlm));
cfg4.s.cfg_rx_errdet_ctrl = 0xcf6f;
csr_wr(CVMX_GSERX_LANEX_RX_CFG_4(l, qlm), cfg4.u64);
/* Reduce the voltage swing to roughly 460mV */
cfg0.u64 = csr_rd(CVMX_GSERX_LANEX_TX_CFG_0(l, qlm));
cfg0.s.cfg_tx_swing = 0x12;
csr_wr(CVMX_GSERX_LANEX_TX_CFG_0(l, qlm), cfg0.u64);
}
}
}
return 0;
}