in arch/mips/mach-octeon/octeon_qlm.c [4108:4600]
int octeon_configure_qlm_cn78xx(int node, int qlm, int baud_mhz, int mode, int rc, int gen3,
int ref_clk_sel, int ref_clk_input)
{
cvmx_gserx_phy_ctl_t phy_ctl;
cvmx_gserx_lane_mode_t lmode;
cvmx_gserx_cfg_t cfg;
cvmx_gserx_refclk_sel_t refclk_sel;
int is_pcie = 0;
int is_ilk = 0;
int is_bgx = 0;
int lane_mode = 0;
int lmac_type = 0;
bool alt_pll = false;
int num_ports = 0;
int lane_to_sds = 0;
debug("%s(node: %d, qlm: %d, baud_mhz: %d, mode: %d, rc: %d, gen3: %d, ref_clk_sel: %d, ref_clk_input: %d\n",
__func__, node, qlm, baud_mhz, mode, rc, gen3, ref_clk_sel, ref_clk_input);
if (OCTEON_IS_MODEL(OCTEON_CN76XX) && qlm > 4) {
debug("%s: qlm %d not present on CN76XX\n", __func__, qlm);
return -1;
}
/* Errata PEM-31375 PEM RSL accesses to PCLK registers can timeout
* during speed change. Change SLI_WINDOW_CTL[time] to 525us
*/
__set_sli_window_ctl_errata_31375(node);
cfg.u64 = csr_rd_node(node, CVMX_GSERX_CFG(qlm));
/* If PEM is in EP, no need to do anything */
if (cfg.s.pcie && rc == 0) {
debug("%s: node %d, qlm %d is in PCIe endpoint mode, returning\n",
__func__, node, qlm);
return 0;
}
/* Set the reference clock to use */
refclk_sel.u64 = 0;
if (ref_clk_input == 0) { /* External ref clock */
refclk_sel.s.com_clk_sel = 0;
refclk_sel.s.use_com1 = 0;
} else if (ref_clk_input == 1) {
refclk_sel.s.com_clk_sel = 1;
refclk_sel.s.use_com1 = 0;
} else {
refclk_sel.s.com_clk_sel = 1;
refclk_sel.s.use_com1 = 1;
}
csr_wr_node(node, CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
/* Reset the QLM after changing the reference clock */
phy_ctl.u64 = csr_rd_node(node, CVMX_GSERX_PHY_CTL(qlm));
phy_ctl.s.phy_reset = 1;
phy_ctl.s.phy_pd = 1;
csr_wr_node(node, CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
udelay(1000);
/* Always restore the reference clocks for a QLM */
memcpy(ref_clk_cn78xx[node][qlm], def_ref_clk_cn78xx, sizeof(def_ref_clk_cn78xx));
switch (mode) {
case CVMX_QLM_MODE_PCIE:
case CVMX_QLM_MODE_PCIE_1X8: {
cvmx_pemx_cfg_t pemx_cfg;
cvmx_pemx_on_t pemx_on;
is_pcie = 1;
if (ref_clk_sel == 0) {
refclk_sel.u64 = csr_rd_node(node, CVMX_GSERX_REFCLK_SEL(qlm));
refclk_sel.s.pcie_refclk125 = 0;
csr_wr_node(node, CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
if (gen3 == 0) /* Gen1 mode */
lane_mode = R_2_5G_REFCLK100;
else if (gen3 == 1) /* Gen2 mode */
lane_mode = R_5G_REFCLK100;
else
lane_mode = R_8G_REFCLK100;
} else if (ref_clk_sel == 1) {
refclk_sel.u64 = csr_rd_node(node, CVMX_GSERX_REFCLK_SEL(qlm));
refclk_sel.s.pcie_refclk125 = 1;
csr_wr_node(node, CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
if (gen3 == 0) /* Gen1 mode */
lane_mode = R_2_5G_REFCLK125;
else if (gen3 == 1) /* Gen2 mode */
lane_mode = R_5G_REFCLK125;
else
lane_mode = R_8G_REFCLK125;
} else {
printf("Invalid reference clock for PCIe on QLM%d\n", qlm);
return -1;
}
switch (qlm) {
case 0: /* Either x4 or x8 based on PEM0 */
{
cvmx_rst_soft_prstx_t rst_prst;
rst_prst.u64 = csr_rd_node(node, CVMX_RST_SOFT_PRSTX(0));
rst_prst.s.soft_prst = rc;
csr_wr_node(node, CVMX_RST_SOFT_PRSTX(0), rst_prst.u64);
__setup_pem_reset(node, 0, !rc);
pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(0));
pemx_cfg.cn78xx.lanes8 = (mode == CVMX_QLM_MODE_PCIE_1X8);
pemx_cfg.cn78xx.hostmd = rc;
pemx_cfg.cn78xx.md = gen3;
csr_wr_node(node, CVMX_PEMX_CFG(0), pemx_cfg.u64);
/* x8 mode waits for QLM1 setup before turning on the PEM */
if (mode == CVMX_QLM_MODE_PCIE) {
pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(0));
pemx_on.s.pemon = 1;
csr_wr_node(node, CVMX_PEMX_ON(0), pemx_on.u64);
}
break;
}
case 1: /* Either PEM0 x8 or PEM1 x4 */
{
if (mode == CVMX_QLM_MODE_PCIE) {
cvmx_rst_soft_prstx_t rst_prst;
cvmx_pemx_cfg_t pemx_cfg;
rst_prst.u64 = csr_rd_node(node, CVMX_RST_SOFT_PRSTX(1));
rst_prst.s.soft_prst = rc;
csr_wr_node(node, CVMX_RST_SOFT_PRSTX(1), rst_prst.u64);
__setup_pem_reset(node, 1, !rc);
pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(1));
pemx_cfg.cn78xx.lanes8 = 0;
pemx_cfg.cn78xx.hostmd = rc;
pemx_cfg.cn78xx.md = gen3;
csr_wr_node(node, CVMX_PEMX_CFG(1), pemx_cfg.u64);
pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(1));
pemx_on.s.pemon = 1;
csr_wr_node(node, CVMX_PEMX_ON(1), pemx_on.u64);
} else {
pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(0));
pemx_on.s.pemon = 1;
csr_wr_node(node, CVMX_PEMX_ON(0), pemx_on.u64);
}
break;
}
case 2: /* Either PEM2 x4 or PEM2 x8 */
{
cvmx_rst_soft_prstx_t rst_prst;
rst_prst.u64 = csr_rd_node(node, CVMX_RST_SOFT_PRSTX(2));
rst_prst.s.soft_prst = rc;
csr_wr_node(node, CVMX_RST_SOFT_PRSTX(2), rst_prst.u64);
__setup_pem_reset(node, 2, !rc);
pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(2));
pemx_cfg.cn78xx.lanes8 = (mode == CVMX_QLM_MODE_PCIE_1X8);
pemx_cfg.cn78xx.hostmd = rc;
pemx_cfg.cn78xx.md = gen3;
csr_wr_node(node, CVMX_PEMX_CFG(2), pemx_cfg.u64);
/* x8 mode waits for QLM3 setup before turning on the PEM */
if (mode == CVMX_QLM_MODE_PCIE) {
pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(2));
pemx_on.s.pemon = 1;
csr_wr_node(node, CVMX_PEMX_ON(2), pemx_on.u64);
}
break;
}
case 3: /* Either PEM2 x8 or PEM3 x4 */
{
pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(2));
if (pemx_cfg.cn78xx.lanes8) {
/* Last 4 lanes of PEM2 */
/* PEMX_CFG already setup */
pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(2));
pemx_on.s.pemon = 1;
csr_wr_node(node, CVMX_PEMX_ON(2), pemx_on.u64);
}
/* Check if PEM3 uses QLM3 and in x4 lane mode */
if (mode == CVMX_QLM_MODE_PCIE) {
cvmx_rst_soft_prstx_t rst_prst;
rst_prst.u64 = csr_rd_node(node, CVMX_RST_SOFT_PRSTX(3));
rst_prst.s.soft_prst = rc;
csr_wr_node(node, CVMX_RST_SOFT_PRSTX(3), rst_prst.u64);
__setup_pem_reset(node, 3, !rc);
pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(3));
pemx_cfg.cn78xx.lanes8 = 0;
pemx_cfg.cn78xx.hostmd = rc;
pemx_cfg.cn78xx.md = gen3;
csr_wr_node(node, CVMX_PEMX_CFG(3), pemx_cfg.u64);
pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(3));
pemx_on.s.pemon = 1;
csr_wr_node(node, CVMX_PEMX_ON(3), pemx_on.u64);
}
break;
}
case 4: /* Either PEM3 x4 or PEM3 x8 */
{
if (mode == CVMX_QLM_MODE_PCIE_1X8) {
/* Last 4 lanes of PEM3 */
/* PEMX_CFG already setup */
pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(3));
pemx_on.s.pemon = 1;
csr_wr_node(node, CVMX_PEMX_ON(3), pemx_on.u64);
} else {
/* 4 lanes of PEM3 */
cvmx_pemx_qlm_t pemx_qlm;
cvmx_rst_soft_prstx_t rst_prst;
rst_prst.u64 = csr_rd_node(node, CVMX_RST_SOFT_PRSTX(3));
rst_prst.s.soft_prst = rc;
csr_wr_node(node, CVMX_RST_SOFT_PRSTX(3), rst_prst.u64);
__setup_pem_reset(node, 3, !rc);
pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(3));
pemx_cfg.cn78xx.lanes8 = 0;
pemx_cfg.cn78xx.hostmd = rc;
pemx_cfg.cn78xx.md = gen3;
csr_wr_node(node, CVMX_PEMX_CFG(3), pemx_cfg.u64);
/* PEM3 is on QLM4 */
pemx_qlm.u64 = csr_rd_node(node, CVMX_PEMX_QLM(3));
pemx_qlm.cn78xx.pem3qlm = 1;
csr_wr_node(node, CVMX_PEMX_QLM(3), pemx_qlm.u64);
pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(3));
pemx_on.s.pemon = 1;
csr_wr_node(node, CVMX_PEMX_ON(3), pemx_on.u64);
}
break;
}
default:
break;
}
break;
}
case CVMX_QLM_MODE_ILK:
is_ilk = 1;
lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz, &alt_pll);
if (lane_mode == -1)
return -1;
/* FIXME: Set lane_mode for other speeds */
break;
case CVMX_QLM_MODE_SGMII:
is_bgx = 1;
lmac_type = 0;
lane_to_sds = 1;
num_ports = 4;
lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz, &alt_pll);
debug("%s: SGMII lane mode: %d, alternate PLL: %s\n", __func__, lane_mode,
alt_pll ? "true" : "false");
if (lane_mode == -1)
return -1;
break;
case CVMX_QLM_MODE_XAUI:
is_bgx = 5;
lmac_type = 1;
lane_to_sds = 0xe4;
num_ports = 1;
lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz, &alt_pll);
debug("%s: XAUI lane mode: %d\n", __func__, lane_mode);
if (lane_mode == -1)
return -1;
break;
case CVMX_QLM_MODE_RXAUI:
is_bgx = 3;
lmac_type = 2;
lane_to_sds = 0;
num_ports = 2;
debug("%s: RXAUI lane mode: %d\n", __func__, lane_mode);
lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz, &alt_pll);
if (lane_mode == -1)
return -1;
break;
case CVMX_QLM_MODE_XFI: /* 10GR_4X1 */
case CVMX_QLM_MODE_10G_KR:
is_bgx = 1;
lmac_type = 3;
lane_to_sds = 1;
num_ports = 4;
lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz, &alt_pll);
debug("%s: XFI/10G_KR lane mode: %d\n", __func__, lane_mode);
if (lane_mode == -1)
return -1;
break;
case CVMX_QLM_MODE_XLAUI: /* 40GR4_1X4 */
case CVMX_QLM_MODE_40G_KR4:
is_bgx = 5;
lmac_type = 4;
lane_to_sds = 0xe4;
num_ports = 1;
lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz, &alt_pll);
debug("%s: XLAUI/40G_KR4 lane mode: %d\n", __func__, lane_mode);
if (lane_mode == -1)
return -1;
break;
case CVMX_QLM_MODE_DISABLED:
/* Power down the QLM */
phy_ctl.u64 = csr_rd_node(node, CVMX_GSERX_PHY_CTL(qlm));
phy_ctl.s.phy_pd = 1;
phy_ctl.s.phy_reset = 1;
csr_wr_node(node, CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
/* Disable all modes */
csr_wr_node(node, CVMX_GSERX_CFG(qlm), 0);
/* Do nothing */
return 0;
default:
break;
}
if (alt_pll) {
debug("%s: alternate PLL settings used for node %d, qlm %d, lane mode %d, reference clock %d\n",
__func__, node, qlm, lane_mode, ref_clk_sel);
if (__set_qlm_ref_clk_cn78xx(node, qlm, lane_mode, ref_clk_sel)) {
printf("%s: Error: reference clock %d is not supported for node %d, qlm %d\n",
__func__, ref_clk_sel, node, qlm);
return -1;
}
}
/* Power up PHY, but keep it in reset */
phy_ctl.u64 = csr_rd_node(node, CVMX_GSERX_PHY_CTL(qlm));
phy_ctl.s.phy_pd = 0;
phy_ctl.s.phy_reset = 1;
csr_wr_node(node, CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
/* Errata GSER-20788: GSER(0..13)_CFG[BGX_QUAD]=1 is broken. Force the
* BGX_QUAD bit to be clear for CN78XX pass 1.x
*/
if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
is_bgx &= 3;
/* Set GSER for the interface mode */
cfg.u64 = csr_rd_node(node, CVMX_GSERX_CFG(qlm));
cfg.s.ila = is_ilk;
cfg.s.bgx = is_bgx & 1;
cfg.s.bgx_quad = (is_bgx >> 2) & 1;
cfg.s.bgx_dual = (is_bgx >> 1) & 1;
cfg.s.pcie = is_pcie;
csr_wr_node(node, CVMX_GSERX_CFG(qlm), cfg.u64);
/* Lane mode */
lmode.u64 = csr_rd_node(node, CVMX_GSERX_LANE_MODE(qlm));
lmode.s.lmode = lane_mode;
csr_wr_node(node, CVMX_GSERX_LANE_MODE(qlm), lmode.u64);
/* BGX0-1 can connect to QLM0-1 or QLM 2-3. Program the select bit if we're
* one of these QLMs and we're using BGX
*/
if (qlm < 4 && is_bgx) {
int bgx = qlm & 1;
int use_upper = (qlm >> 1) & 1;
cvmx_bgxx_cmr_global_config_t global_cfg;
global_cfg.u64 = csr_rd_node(node, CVMX_BGXX_CMR_GLOBAL_CONFIG(bgx));
global_cfg.s.pmux_sds_sel = use_upper;
csr_wr_node(node, CVMX_BGXX_CMR_GLOBAL_CONFIG(bgx), global_cfg.u64);
}
/* Bring phy out of reset */
phy_ctl.u64 = csr_rd_node(node, CVMX_GSERX_PHY_CTL(qlm));
phy_ctl.s.phy_reset = 0;
csr_wr_node(node, CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
csr_rd_node(node, CVMX_GSERX_PHY_CTL(qlm));
/*
* Wait 250 ns until the management interface is ready to accept
* read/write commands.
*/
udelay(1);
if (is_bgx) {
int bgx = (qlm < 2) ? qlm : qlm - 2;
cvmx_bgxx_cmrx_config_t cmr_config;
int index;
for (index = 0; index < num_ports; index++) {
cmr_config.u64 = csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, bgx));
cmr_config.s.enable = 0;
cmr_config.s.data_pkt_tx_en = 0;
cmr_config.s.data_pkt_rx_en = 0;
cmr_config.s.lmac_type = lmac_type;
cmr_config.s.lane_to_sds = ((lane_to_sds == 1) ?
index : ((lane_to_sds == 0) ?
(index ? 0xe : 4) :
lane_to_sds));
csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, bgx), cmr_config.u64);
}
csr_wr_node(node, CVMX_BGXX_CMR_TX_LMACS(bgx), num_ports);
csr_wr_node(node, CVMX_BGXX_CMR_RX_LMACS(bgx), num_ports);
/* Enable/disable training for 10G_KR/40G_KR4/XFI/XLAUI modes */
for (index = 0; index < num_ports; index++) {
cvmx_bgxx_spux_br_pmd_control_t spu_pmd_control;
spu_pmd_control.u64 =
csr_rd_node(node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, bgx));
if (mode == CVMX_QLM_MODE_10G_KR || mode == CVMX_QLM_MODE_40G_KR4)
spu_pmd_control.s.train_en = 1;
else if (mode == CVMX_QLM_MODE_XFI || mode == CVMX_QLM_MODE_XLAUI)
spu_pmd_control.s.train_en = 0;
csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, bgx),
spu_pmd_control.u64);
}
}
/* Configure the gser pll */
if (!is_pcie)
__qlm_setup_pll_cn78xx(node, qlm);
/* Wait for reset to complete and the PLL to lock */
if (CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_GSERX_PLL_STAT(qlm),
cvmx_gserx_pll_stat_t,
pll_lock, ==, 1, 10000)) {
printf("%d:QLM%d: Timeout waiting for GSERX_PLL_STAT[pll_lock]\n",
node, qlm);
return -1;
}
/* Perform PCIe errata workaround */
if (is_pcie)
__cvmx_qlm_pcie_errata_cn78xx(node, qlm);
else
__qlm_init_errata_20844(node, qlm);
/* Wait for reset to complete and the PLL to lock */
/* PCIe mode doesn't become ready until the PEM block attempts to bring
* the interface up. Skip this check for PCIe
*/
if (!is_pcie && CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_GSERX_QLM_STAT(qlm),
cvmx_gserx_qlm_stat_t, rst_rdy,
==, 1, 10000)) {
printf("%d:QLM%d: Timeout waiting for GSERX_QLM_STAT[rst_rdy]\n",
node, qlm);
return -1;
}
/* Errata GSER-26150: 10G PHY PLL Temperature Failure */
/* This workaround must be completed after the final deassertion of
* GSERx_PHY_CTL[PHY_RESET].
* Apply the workaround to 10.3125Gbps and 8Gbps only.
*/
if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) &&
(baud_mhz == 103125 || (is_pcie && gen3 == 2)))
__qlm_errata_gser_26150(0, qlm, is_pcie);
/* Errata GSER-26636: 10G-KR/40G-KR - Inverted Tx Coefficient Direction
* Change. Applied to all 10G standards (required for KR) but also
* applied to other standards in case software training is used
*/
if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) && baud_mhz == 103125)
__qlm_kr_inc_dec_gser26636(node, qlm);
/* Errata GSER-25992: RX EQ Default Settings Update (CTLE Bias) */
/* This workaround will only be applied to Pass 1.x */
/* It will also only be applied if the SERDES data-rate is 10G */
/* or if PCIe Gen3 (gen3=2 is PCIe Gen3) */
if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) &&
(baud_mhz == 103125 || (is_pcie && gen3 == 2)))
cvmx_qlm_gser_errata_25992(node, qlm);
/* Errata GSER-27140: Updating the RX EQ settings due to temperature
* drift sensitivities
*/
/* This workaround will also only be applied if the SERDES data-rate is 10G */
if (baud_mhz == 103125)
__qlm_rx_eq_temp_gser27140(node, qlm);
/* Reduce the voltage amplitude coming from Marvell PHY and also change
* DFE threshold settings for RXAUI interface
*/
if (is_bgx && mode == CVMX_QLM_MODE_RXAUI) {
int l;
for (l = 0; l < 4; l++) {
cvmx_gserx_lanex_rx_cfg_4_t cfg4;
cvmx_gserx_lanex_tx_cfg_0_t cfg0;
/* Change the Q/QB error sampler 0 threshold from 0xD to 0xF */
cfg4.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_CFG_4(l, qlm));
cfg4.s.cfg_rx_errdet_ctrl = 0xcf6f;
csr_wr_node(node, CVMX_GSERX_LANEX_RX_CFG_4(l, qlm), cfg4.u64);
/* Reduce the voltage swing to roughly 460mV */
cfg0.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_TX_CFG_0(l, qlm));
cfg0.s.cfg_tx_swing = 0x12;
csr_wr_node(node, CVMX_GSERX_LANEX_TX_CFG_0(l, qlm), cfg0.u64);
}
}
return 0;
}