in arch/mips/mach-octeon/octeon_qlm.c [5344:5765]
static int octeon_configure_qlm_cnf75xx(int qlm, int baud_mhz, int mode, int rc, int gen3,
int ref_clk_sel, int ref_clk_input)
{
cvmx_gserx_phy_ctl_t phy_ctl;
cvmx_gserx_lane_mode_t lmode;
cvmx_gserx_cfg_t cfg;
cvmx_gserx_refclk_sel_t refclk_sel;
int is_pcie = 0;
int is_bgx = 0;
int is_srio = 0;
int is_rmac = 0;
int is_rmac_pipe = 0;
int lane_mode = 0;
short lmac_type[4] = { 0 };
short sds_lane[4] = { 0 };
bool alt_pll = false;
int enable_training = 0;
int additional_lmacs = 0;
int port = (qlm == 3) ? 1 : 0;
cvmx_sriox_status_reg_t status_reg;
debug("%s(qlm: %d, baud_mhz: %d, mode: %d, rc: %d, gen3: %d, ref_clk_sel: %d, ref_clk_input: %d\n",
__func__, qlm, baud_mhz, mode, rc, gen3, ref_clk_sel, ref_clk_input);
if (qlm > 8) {
printf("Invalid qlm%d passed\n", qlm);
return -1;
}
/* Errata PEM-31375 PEM RSL accesses to PCLK registers can timeout
* during speed change. Change SLI_WINDOW_CTL[time] to 525us
*/
__set_sli_window_ctl_errata_31375(0);
cfg.u64 = csr_rd(CVMX_GSERX_CFG(qlm));
/* If PEM is in EP, no need to do anything */
if (cfg.s.pcie && rc == 0) {
debug("%s: qlm %d is in PCIe endpoint mode, returning\n", __func__, qlm);
return 0;
}
if (cfg.s.srio && rc == 0) {
debug("%s: qlm %d is in SRIO endpoint mode, returning\n", __func__, qlm);
return 0;
}
/* Set the reference clock to use */
refclk_sel.u64 = 0;
if (ref_clk_input == 0) { /* External ref clock */
refclk_sel.s.com_clk_sel = 0;
refclk_sel.s.use_com1 = 0;
} else if (ref_clk_input == 1) {
refclk_sel.s.com_clk_sel = 1;
refclk_sel.s.use_com1 = 0;
} else {
refclk_sel.s.com_clk_sel = 1;
refclk_sel.s.use_com1 = 1;
}
csr_wr(CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
/* Reset the QLM after changing the reference clock */
phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
phy_ctl.s.phy_reset = 1;
phy_ctl.s.phy_pd = 1;
csr_wr(CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
udelay(1000);
switch (mode) {
case CVMX_QLM_MODE_PCIE:
case CVMX_QLM_MODE_PCIE_1X2:
case CVMX_QLM_MODE_PCIE_2X1: {
cvmx_pemx_cfg_t pemx_cfg;
cvmx_pemx_on_t pemx_on;
cvmx_rst_soft_prstx_t rst_prst;
is_pcie = 1;
if (qlm > 1) {
printf("Invalid PCIe mode for QLM%d\n", qlm);
return -1;
}
if (ref_clk_sel == 0) {
refclk_sel.u64 = csr_rd(CVMX_GSERX_REFCLK_SEL(qlm));
refclk_sel.s.pcie_refclk125 = 0;
csr_wr(CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
if (gen3 == 0) /* Gen1 mode */
lane_mode = R_2_5G_REFCLK100;
else if (gen3 == 1) /* Gen2 mode */
lane_mode = R_5G_REFCLK100;
else
lane_mode = R_8G_REFCLK100;
} else if (ref_clk_sel == 1) {
refclk_sel.u64 = csr_rd(CVMX_GSERX_REFCLK_SEL(qlm));
refclk_sel.s.pcie_refclk125 = 1;
csr_wr(CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
if (gen3 == 0) /* Gen1 mode */
lane_mode = R_2_5G_REFCLK125;
else if (gen3 == 1) /* Gen2 mode */
lane_mode = R_5G_REFCLK125;
else
lane_mode = R_8G_REFCLK125;
} else {
printf("Invalid reference clock for PCIe on QLM%d\n", qlm);
return -1;
}
switch (qlm) {
case 0: /* Either x4 or x2 based on PEM0 */
rst_prst.u64 = csr_rd(CVMX_RST_SOFT_PRSTX(0));
rst_prst.s.soft_prst = rc;
csr_wr(CVMX_RST_SOFT_PRSTX(0), rst_prst.u64);
__setup_pem_reset(0, 0, !rc);
pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(0));
pemx_cfg.cnf75xx.hostmd = rc;
pemx_cfg.cnf75xx.lanes8 = (mode == CVMX_QLM_MODE_PCIE);
pemx_cfg.cnf75xx.md = gen3;
csr_wr(CVMX_PEMX_CFG(0), pemx_cfg.u64);
/* x4 mode waits for QLM1 setup before turning on the PEM */
if (mode == CVMX_QLM_MODE_PCIE_1X2 || mode == CVMX_QLM_MODE_PCIE_2X1) {
pemx_on.u64 = csr_rd(CVMX_PEMX_ON(0));
pemx_on.s.pemon = 1;
csr_wr(CVMX_PEMX_ON(0), pemx_on.u64);
}
break;
case 1: /* Either PEM0 x4 or PEM1 x2 */
if (mode == CVMX_QLM_MODE_PCIE_1X2 || mode == CVMX_QLM_MODE_PCIE_2X1) {
rst_prst.u64 = csr_rd(CVMX_RST_SOFT_PRSTX(1));
rst_prst.s.soft_prst = rc;
csr_wr(CVMX_RST_SOFT_PRSTX(1), rst_prst.u64);
__setup_pem_reset(0, 1, !rc);
pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(1));
pemx_cfg.cnf75xx.hostmd = rc;
pemx_cfg.cnf75xx.md = gen3;
csr_wr(CVMX_PEMX_CFG(1), pemx_cfg.u64);
pemx_on.u64 = csr_rd(CVMX_PEMX_ON(1));
pemx_on.s.pemon = 1;
csr_wr(CVMX_PEMX_ON(1), pemx_on.u64);
} else {
pemx_on.u64 = csr_rd(CVMX_PEMX_ON(0));
pemx_on.s.pemon = 1;
csr_wr(CVMX_PEMX_ON(0), pemx_on.u64);
}
break;
default:
break;
}
break;
}
case CVMX_QLM_MODE_SRIO_1X4:
case CVMX_QLM_MODE_SRIO_2X2:
case CVMX_QLM_MODE_SRIO_4X1: {
int spd = 0xf;
if (cvmx_fuse_read(1601)) {
debug("SRIO is not supported on cnf73xx model\n");
return -1;
}
switch (baud_mhz) {
case 1250:
switch (ref_clk_sel) {
case 0: /* 100 MHz ref clock */
spd = 0x3;
break;
case 1: /* 125 MHz ref clock */
spd = 0xa;
break;
case 2: /* 156.25 MHz ref clock */
spd = 0x4;
break;
default:
spd = 0xf; /* Disabled */
break;
}
break;
case 2500:
switch (ref_clk_sel) {
case 0: /* 100 MHz ref clock */
spd = 0x2;
break;
case 1: /* 125 MHz ref clock */
spd = 0x9;
break;
case 2: /* 156.25 MHz ref clock */
spd = 0x7;
break;
default:
spd = 0xf; /* Disabled */
break;
}
break;
case 3125:
switch (ref_clk_sel) {
case 1: /* 125 MHz ref clock */
spd = 0x8;
break;
case 2: /* 156.25 MHz ref clock */
spd = 0xe;
break;
default:
spd = 0xf; /* Disabled */
break;
}
break;
case 5000:
switch (ref_clk_sel) {
case 0: /* 100 MHz ref clock */
spd = 0x0;
break;
case 1: /* 125 MHz ref clock */
spd = 0x6;
break;
case 2: /* 156.25 MHz ref clock */
spd = 0xb;
break;
default:
spd = 0xf; /* Disabled */
break;
}
break;
default:
spd = 0xf;
break;
}
if (spd == 0xf) {
printf("ERROR: Invalid SRIO speed (%d) configured for QLM%d\n", baud_mhz,
qlm);
return -1;
}
status_reg.u64 = csr_rd(CVMX_SRIOX_STATUS_REG(port));
status_reg.s.spd = spd;
csr_wr(CVMX_SRIOX_STATUS_REG(port), status_reg.u64);
is_srio = 1;
break;
}
case CVMX_QLM_MODE_SGMII_2X1:
if (qlm == 4) {
is_bgx = 1;
lmac_type[0] = 0;
lmac_type[1] = 0;
lmac_type[2] = -1;
lmac_type[3] = -1;
sds_lane[0] = 0;
sds_lane[1] = 1;
} else if (qlm == 5) {
is_bgx = 1;
lmac_type[0] = -1;
lmac_type[1] = -1;
lmac_type[2] = 0;
lmac_type[3] = 0;
sds_lane[2] = 2;
sds_lane[3] = 3;
additional_lmacs = 2;
}
break;
case CVMX_QLM_MODE_10G_KR_1X2:
enable_training = 1;
case CVMX_QLM_MODE_XFI_1X2:
if (qlm == 5) {
is_bgx = 1;
lmac_type[0] = -1;
lmac_type[1] = -1;
lmac_type[2] = 3;
lmac_type[3] = 3;
sds_lane[2] = 2;
sds_lane[3] = 3;
additional_lmacs = 2;
}
break;
case CVMX_QLM_MODE_CPRI: /* CPRI / JESD204B */
is_rmac = 1;
break;
case CVMX_QLM_MODE_SDL: /* Serdes Lite (SDL) */
is_rmac = 1;
is_rmac_pipe = 1;
lane_mode = 1;
break;
default:
break;
}
if (is_rmac_pipe == 0 && is_pcie == 0) {
lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz,
&alt_pll);
}
debug("%s: %d lane mode: %d, alternate PLL: %s\n", __func__, mode, lane_mode,
alt_pll ? "true" : "false");
if (lane_mode == -1)
return -1;
if (alt_pll) {
debug("%s: alternate PLL settings used for qlm %d, lane mode %d, reference clock %d\n",
__func__, qlm, lane_mode, ref_clk_sel);
if (__set_qlm_ref_clk_cn78xx(0, qlm, lane_mode, ref_clk_sel)) {
printf("%s: Error: reference clock %d is not supported for qlm %d\n",
__func__, ref_clk_sel, qlm);
return -1;
}
}
/* Power up PHY, but keep it in reset */
phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
phy_ctl.s.phy_pd = 0;
phy_ctl.s.phy_reset = 1;
csr_wr(CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
/* Set GSER for the interface mode */
cfg.u64 = csr_rd(CVMX_GSERX_CFG(qlm));
cfg.s.bgx = is_bgx & 1;
cfg.s.bgx_quad = (is_bgx >> 2) & 1;
cfg.s.bgx_dual = (is_bgx >> 1) & 1;
cfg.s.pcie = is_pcie;
cfg.s.srio = is_srio;
cfg.s.rmac = is_rmac;
cfg.s.rmac_pipe = is_rmac_pipe;
csr_wr(CVMX_GSERX_CFG(qlm), cfg.u64);
/* Lane mode */
lmode.u64 = csr_rd(CVMX_GSERX_LANE_MODE(qlm));
lmode.s.lmode = lane_mode;
csr_wr(CVMX_GSERX_LANE_MODE(qlm), lmode.u64);
/* Because of the Errata where quad mode does not work, program
* lmac_type to figure out the type of BGX interface configured
*/
if (is_bgx) {
int bgx = 0;
cvmx_bgxx_cmrx_config_t cmr_config;
cvmx_bgxx_cmr_rx_lmacs_t rx_lmacs;
cvmx_bgxx_spux_br_pmd_control_t spu_pmd_control;
int index, total_lmacs = 0;
for (index = 0; index < 4; index++) {
cmr_config.u64 = csr_rd(CVMX_BGXX_CMRX_CONFIG(index, bgx));
cmr_config.s.enable = 0;
cmr_config.s.data_pkt_rx_en = 0;
cmr_config.s.data_pkt_tx_en = 0;
if (lmac_type[index] != -1) {
cmr_config.s.lmac_type = lmac_type[index];
cmr_config.s.lane_to_sds = sds_lane[index];
total_lmacs++;
}
csr_wr(CVMX_BGXX_CMRX_CONFIG(index, bgx), cmr_config.u64);
/* Enable training for 10G_KR/40G_KR4 modes */
if (enable_training == 1 &&
(lmac_type[index] == 3 || lmac_type[index] == 4)) {
spu_pmd_control.u64 =
csr_rd(CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, bgx));
spu_pmd_control.s.train_en = 1;
csr_wr(CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, bgx),
spu_pmd_control.u64);
}
}
/* Update the total number of lmacs */
rx_lmacs.u64 = csr_rd(CVMX_BGXX_CMR_RX_LMACS(bgx));
rx_lmacs.s.lmacs = total_lmacs + additional_lmacs;
csr_wr(CVMX_BGXX_CMR_RX_LMACS(bgx), rx_lmacs.u64);
csr_wr(CVMX_BGXX_CMR_TX_LMACS(bgx), rx_lmacs.u64);
}
/* Bring phy out of reset */
phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
phy_ctl.s.phy_reset = 0;
csr_wr(CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
/*
* Wait 1us until the management interface is ready to accept
* read/write commands.
*/
udelay(1);
if (is_srio) {
status_reg.u64 = csr_rd(CVMX_SRIOX_STATUS_REG(port));
status_reg.s.srio = 1;
csr_wr(CVMX_SRIOX_STATUS_REG(port), status_reg.u64);
return 0;
}
/* Wait for reset to complete and the PLL to lock */
/* PCIe mode doesn't become ready until the PEM block attempts to bring
* the interface up. Skip this check for PCIe
*/
if (!is_pcie && CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_QLM_STAT(qlm), cvmx_gserx_qlm_stat_t,
rst_rdy, ==, 1, 10000)) {
printf("QLM%d: Timeout waiting for GSERX_QLM_STAT[rst_rdy]\n", qlm);
return -1;
}
/* Configure the gser pll */
if (is_rmac)
__rmac_pll_config(baud_mhz, qlm, mode);
else if (!(is_pcie || is_srio))
__qlm_setup_pll_cn78xx(0, qlm);
/* Wait for reset to complete and the PLL to lock */
if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_PLL_STAT(qlm), cvmx_gserx_pll_stat_t,
pll_lock, ==, 1, 10000)) {
printf("QLM%d: Timeout waiting for GSERX_PLL_STAT[pll_lock]\n", qlm);
return -1;
}
/* Errata GSER-27140: Updating the RX EQ settings due to temperature
* drift sensitivities
*/
/* This workaround will also only be applied if the SERDES data-rate is 10G */
if (baud_mhz == 103125)
__qlm_rx_eq_temp_gser27140(0, qlm);
return 0;
}