int initialize_ddr_clock()

in drivers/ram/octeon/octeon_ddr.c [358:1452]


int initialize_ddr_clock(struct ddr_priv *priv, struct ddr_conf *ddr_conf,
			 u32 cpu_hertz, u32 ddr_hertz, u32 ddr_ref_hertz,
			 int if_num, u32 if_mask)
{
	char *s;

	if (ddr_clock_initialized(priv, if_num))
		return 0;

	if (!ddr_clock_initialized(priv, 0)) {	/* Do this once */
		union cvmx_lmcx_reset_ctl reset_ctl;
		int i;

		/*
		 * Check to see if memory is to be preserved and set global
		 * flag
		 */
		for (i = 3; i >= 0; --i) {
			if ((if_mask & (1 << i)) == 0)
				continue;

			reset_ctl.u64 = lmc_rd(priv, CVMX_LMCX_RESET_CTL(i));
			if (reset_ctl.s.ddr3psv == 1) {
				debug("LMC%d Preserving memory\n", i);
				set_ddr_memory_preserved(priv);

				/* Re-initialize flags */
				reset_ctl.s.ddr3pwarm = 0;
				reset_ctl.s.ddr3psoft = 0;
				reset_ctl.s.ddr3psv = 0;
				lmc_wr(priv, CVMX_LMCX_RESET_CTL(i),
				       reset_ctl.u64);
			}
		}
	}

	/*
	 * ToDo: Add support for these SoCs:
	 *
	 * if (octeon_is_cpuid(OCTEON_CN63XX) ||
	 * octeon_is_cpuid(OCTEON_CN66XX) ||
	 * octeon_is_cpuid(OCTEON_CN61XX) || octeon_is_cpuid(OCTEON_CNF71XX))
	 *
	 * and
	 *
	 * if (octeon_is_cpuid(OCTEON_CN68XX))
	 *
	 * and
	 *
	 * if (octeon_is_cpuid(OCTEON_CN70XX))
	 *
	 */

	if (octeon_is_cpuid(OCTEON_CN78XX) || octeon_is_cpuid(OCTEON_CN73XX) ||
	    octeon_is_cpuid(OCTEON_CNF75XX)) {
		union cvmx_lmcx_dll_ctl2 dll_ctl2;
		union cvmx_lmcx_dll_ctl3 ddr_dll_ctl3;
		union cvmx_lmcx_ddr_pll_ctl ddr_pll_ctl;
		struct dimm_config *dimm_config_table =
			ddr_conf->dimm_config_table;
		int en_idx, save_en_idx, best_en_idx = 0;
		u64 clkf, clkr, max_clkf = 127;
		u64 best_clkf = 0, best_clkr = 0;
		u64 best_pll_MHz = 0;
		u64 pll_MHz;
		u64 min_pll_MHz = 800;
		u64 max_pll_MHz = 5000;
		u64 error;
		u64 best_error;
		u64 best_calculated_ddr_hertz = 0;
		u64 calculated_ddr_hertz = 0;
		u64 orig_ddr_hertz = ddr_hertz;
		const int _en[] = { 1, 2, 3, 4, 5, 6, 7, 8, 10, 12 };
		int override_pll_settings;
		int new_bwadj;
		int ddr_type;
		int i;

		/* ddr_type only indicates DDR4 or DDR3 */
		ddr_type = (read_spd(&dimm_config_table[0], 0,
				     DDR4_SPD_KEY_BYTE_DEVICE_TYPE) ==
			    0x0C) ? DDR4_DRAM : DDR3_DRAM;

		/*
		 * 5.9 LMC Initialization Sequence
		 *
		 * There are 13 parts to the LMC initialization procedure:
		 *
		 * 1. DDR PLL initialization
		 *
		 * 2. LMC CK initialization
		 *
		 * 3. LMC interface enable initialization
		 *
		 * 4. LMC DRESET initialization
		 *
		 * 5. LMC CK local initialization
		 *
		 * 6. LMC RESET initialization
		 *
		 * 7. Early LMC initialization
		 *
		 * 8. LMC offset training
		 *
		 * 9. LMC internal Vref training
		 *
		 * 10. LMC deskew training
		 *
		 * 11. LMC write leveling
		 *
		 * 12. LMC read leveling
		 *
		 * 13. Final LMC initialization
		 *
		 * CN78XX supports two modes:
		 *
		 * - two-LMC mode: both LMCs 2/3 must not be enabled
		 * (LMC2/3_DLL_CTL2[DRESET] must be set to 1 and
		 * LMC2/3_DLL_CTL2[INTF_EN]
		 * must be set to 0) and both LMCs 0/1 must be enabled).
		 *
		 * - four-LMC mode: all four LMCs 0..3 must be enabled.
		 *
		 * Steps 4 and 6..13 should each be performed for each
		 * enabled LMC (either twice or four times). Steps 1..3 and
		 * 5 are more global in nature and each must be executed
		 * exactly once (not once per LMC) each time the DDR PLL
		 * changes or is first brought up. Steps 1..3 and 5 need
		 * not be performed if the DDR PLL is stable.
		 *
		 * Generally, the steps are performed in order. The exception
		 * is that the CK local initialization (step 5) must be
		 * performed after some DRESET initializations (step 4) and
		 * before other DRESET initializations when the DDR PLL is
		 * brought up or changed. (The CK local initialization uses
		 * information from some LMCs to bring up the other local
		 * CKs.) The following text describes these ordering
		 * requirements in more detail.
		 *
		 * Following any chip reset, the DDR PLL must be brought up,
		 * and all 13 steps should be executed. Subsequently, it is
		 * possible to execute only steps 4 and 6..13, or to execute
		 * only steps 8..13.
		 *
		 * The remainder of this section covers these initialization
		 * steps in sequence.
		 */

		/* Do the following init only once */
		if (if_num != 0)
			goto not_if0;

		/* Only for interface #0 ... */

		/*
		 * 5.9.3 LMC Interface-Enable Initialization
		 *
		 * LMC interface-enable initialization (Step 3) must be#
		 * performed after Step 2 for each chip reset and whenever
		 * the DDR clock speed changes. This step needs to be
		 * performed only once, not once per LMC. Perform the
		 * following three substeps for the LMC interface-enable
		 * initialization:
		 *
		 * 1. Without changing any other LMC2_DLL_CTL2 fields
		 * (LMC(0..3)_DLL_CTL2 should be at their reset values after
		 * Step 1), write LMC2_DLL_CTL2[INTF_EN] = 1 if four-LMC
		 * mode is desired.
		 *
		 * 2. Without changing any other LMC3_DLL_CTL2 fields, write
		 * LMC3_DLL_CTL2[INTF_EN] = 1 if four-LMC mode is desired.
		 *
		 * 3. Read LMC2_DLL_CTL2 and wait for the result.
		 *
		 * The LMC2_DLL_CTL2[INTF_EN] and LMC3_DLL_CTL2[INTF_EN]
		 * values should not be changed by software from this point.
		 */

		for (i = 0; i < 4; ++i) {
			if ((if_mask & (1 << i)) == 0)
				continue;

			dll_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL2(i));

			dll_ctl2.cn78xx.byp_setting = 0;
			dll_ctl2.cn78xx.byp_sel = 0;
			dll_ctl2.cn78xx.quad_dll_ena = 0;
			dll_ctl2.cn78xx.dreset = 1;
			dll_ctl2.cn78xx.dll_bringup = 0;
			dll_ctl2.cn78xx.intf_en = 0;

			lmc_wr(priv, CVMX_LMCX_DLL_CTL2(i), dll_ctl2.u64);
		}

		/*
		 * ###### Interface enable (intf_en) deferred until after
		 * DDR_DIV_RESET=0 #######
		 */

		/*
		 * 5.9.1 DDR PLL Initialization
		 *
		 * DDR PLL initialization (Step 1) must be performed for each
		 * chip reset and whenever the DDR clock speed changes. This
		 * step needs to be performed only once, not once per LMC.
		 *
		 * Perform the following eight substeps to initialize the
		 * DDR PLL:
		 *
		 * 1. If not done already, write all fields in
		 * LMC(0..3)_DDR_PLL_CTL and
		 * LMC(0..1)_DLL_CTL2 to their reset values, including:
		 *
		 * .. LMC0_DDR_PLL_CTL[DDR_DIV_RESET] = 1
		 * .. LMC0_DLL_CTL2[DRESET] = 1
		 *
		 * This substep is not necessary after a chip reset.
		 *
		 */

		ddr_pll_ctl.u64 = lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(0));

		ddr_pll_ctl.cn78xx.reset_n = 0;
		ddr_pll_ctl.cn78xx.ddr_div_reset = 1;
		ddr_pll_ctl.cn78xx.phy_dcok = 0;

		/*
		 * 73XX pass 1.3 has LMC0 DCLK_INVERT tied to 1; earlier
		 * 73xx passes are tied to 0
		 *
		 * 75XX needs LMC0 DCLK_INVERT set to 1 to minimize duty
		 * cycle falling points
		 *
		 * and we default all other chips LMC0 to DCLK_INVERT=0
		 */
		ddr_pll_ctl.cn78xx.dclk_invert =
		    !!(octeon_is_cpuid(OCTEON_CN73XX_PASS1_3) ||
		       octeon_is_cpuid(OCTEON_CNF75XX));

		/*
		 * allow override of LMC0 desired setting for DCLK_INVERT,
		 * but not on 73XX;
		 * we cannot change LMC0 DCLK_INVERT on 73XX any pass
		 */
		if (!(octeon_is_cpuid(OCTEON_CN73XX))) {
			s = lookup_env(priv, "ddr0_set_dclk_invert");
			if (s) {
				ddr_pll_ctl.cn78xx.dclk_invert =
				    !!simple_strtoul(s, NULL, 0);
				debug("LMC0: override DDR_PLL_CTL[dclk_invert] to %d\n",
				      ddr_pll_ctl.cn78xx.dclk_invert);
			}
		}

		lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(0), ddr_pll_ctl.u64);
		debug("%-45s : 0x%016llx\n", "LMC0: DDR_PLL_CTL",
		      ddr_pll_ctl.u64);

		// only when LMC1 is active
		if (if_mask & 0x2) {
			/*
			 * For CNF75XX, both LMC0 and LMC1 use the same PLL,
			 * so we use the LMC0 setting of DCLK_INVERT for LMC1.
			 */
			if (!octeon_is_cpuid(OCTEON_CNF75XX)) {
				int override = 0;

				/*
				 * by default, for non-CNF75XX, we want
				 * LMC1 toggled LMC0
				 */
				int lmc0_dclk_invert =
				    ddr_pll_ctl.cn78xx.dclk_invert;

				/*
				 * FIXME: work-around for DDR3 UDIMM problems
				 * is to use LMC0 setting on LMC1 and if
				 * 73xx pass 1.3, we want to default LMC1
				 * DCLK_INVERT to LMC0, not the invert of LMC0
				 */
				int lmc1_dclk_invert;

				lmc1_dclk_invert =
					((ddr_type == DDR4_DRAM) &&
					 !octeon_is_cpuid(OCTEON_CN73XX_PASS1_3))
					? lmc0_dclk_invert ^ 1 :
					lmc0_dclk_invert;

				/*
				 * allow override of LMC1 desired setting for
				 * DCLK_INVERT
				 */
				s = lookup_env(priv, "ddr1_set_dclk_invert");
				if (s) {
					lmc1_dclk_invert =
						!!simple_strtoul(s, NULL, 0);
					override = 1;
				}
				debug("LMC1: %s DDR_PLL_CTL[dclk_invert] to %d (LMC0 %d)\n",
				      (override) ? "override" :
				      "default", lmc1_dclk_invert,
				      lmc0_dclk_invert);

				ddr_pll_ctl.cn78xx.dclk_invert =
					lmc1_dclk_invert;
			}

			// but always write LMC1 CSR if it is active
			lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(1), ddr_pll_ctl.u64);
			debug("%-45s : 0x%016llx\n",
			      "LMC1: DDR_PLL_CTL", ddr_pll_ctl.u64);
		}

		/*
		 * 2. If the current DRAM contents are not preserved (see
		 * LMC(0..3)_RESET_ CTL[DDR3PSV]), this is also an appropriate
		 * time to assert the RESET# pin of the DDR3/DDR4 DRAM parts.
		 * If desired, write
		 * LMC0_RESET_ CTL[DDR3RST] = 0 without modifying any other
		 * LMC0_RESET_CTL fields to assert the DDR_RESET_L pin.
		 * No action is required here to assert DDR_RESET_L
		 * following a chip reset. Refer to Section 5.9.6. Do this
		 * for all enabled LMCs.
		 */

		for (i = 0; (!ddr_memory_preserved(priv)) && i < 4; ++i) {
			union cvmx_lmcx_reset_ctl reset_ctl;

			if ((if_mask & (1 << i)) == 0)
				continue;

			reset_ctl.u64 = lmc_rd(priv, CVMX_LMCX_RESET_CTL(i));
			reset_ctl.cn78xx.ddr3rst = 0;	/* Reset asserted */
			debug("LMC%d Asserting DDR_RESET_L\n", i);
			lmc_wr(priv, CVMX_LMCX_RESET_CTL(i), reset_ctl.u64);
			lmc_rd(priv, CVMX_LMCX_RESET_CTL(i));
		}

		/*
		 * 3. Without changing any other LMC0_DDR_PLL_CTL values,
		 * write LMC0_DDR_PLL_CTL[CLKF] with a value that gives a
		 * desired DDR PLL speed. The LMC0_DDR_PLL_CTL[CLKF] value
		 * should be selected in conjunction with the post-scalar
		 * divider values for LMC (LMC0_DDR_PLL_CTL[DDR_PS_EN]) so
		 * that the desired LMC CK speeds are is produced (all
		 * enabled LMCs must run the same speed). Section 5.14
		 * describes LMC0_DDR_PLL_CTL[CLKF] and
		 * LMC0_DDR_PLL_CTL[DDR_PS_EN] programmings that produce
		 * the desired LMC CK speed. Section 5.9.2 describes LMC CK
		 * initialization, which can be done separately from the DDR
		 * PLL initialization described in this section.
		 *
		 * The LMC0_DDR_PLL_CTL[CLKF] value must not change after
		 * this point without restarting this SDRAM PLL
		 * initialization sequence.
		 */

		/* Init to max error */
		error = ddr_hertz;
		best_error = ddr_hertz;

		debug("DDR Reference Hertz = %d\n", ddr_ref_hertz);

		while (best_error == ddr_hertz) {
			for (clkr = 0; clkr < 4; ++clkr) {
				for (en_idx =
				     sizeof(_en) / sizeof(int) -
				     1; en_idx >= 0; --en_idx) {
					save_en_idx = en_idx;
					clkf =
					    ((ddr_hertz) *
					     (clkr + 1) * (_en[save_en_idx]));
					clkf = divide_nint(clkf, ddr_ref_hertz)
					    - 1;
					pll_MHz =
					    ddr_ref_hertz *
					    (clkf + 1) / (clkr + 1) / 1000000;
					calculated_ddr_hertz =
					    ddr_ref_hertz *
					    (clkf +
					     1) / ((clkr +
						    1) * (_en[save_en_idx]));
					error =
					    ddr_hertz - calculated_ddr_hertz;

					if (pll_MHz < min_pll_MHz ||
					    pll_MHz > max_pll_MHz)
						continue;
					if (clkf > max_clkf) {
						/*
						 * PLL requires clkf to be
						 * limited
						 */
						continue;
					}
					if (abs(error) > abs(best_error))
						continue;

					debug("clkr: %2llu, en[%d]: %2d, clkf: %4llu, pll_MHz: %4llu, ddr_hertz: %8llu, error: %8lld\n",
					      clkr, save_en_idx,
					      _en[save_en_idx], clkf, pll_MHz,
					     calculated_ddr_hertz, error);

					/* Favor the highest PLL frequency. */
					if (abs(error) < abs(best_error) ||
					    pll_MHz > best_pll_MHz) {
						best_pll_MHz = pll_MHz;
						best_calculated_ddr_hertz =
							calculated_ddr_hertz;
						best_error = error;
						best_clkr = clkr;
						best_clkf = clkf;
						best_en_idx = save_en_idx;
					}
				}
			}

			override_pll_settings = 0;

			s = lookup_env(priv, "ddr_pll_clkr");
			if (s) {
				best_clkr = simple_strtoul(s, NULL, 0);
				override_pll_settings = 1;
			}

			s = lookup_env(priv, "ddr_pll_clkf");
			if (s) {
				best_clkf = simple_strtoul(s, NULL, 0);
				override_pll_settings = 1;
			}

			s = lookup_env(priv, "ddr_pll_en_idx");
			if (s) {
				best_en_idx = simple_strtoul(s, NULL, 0);
				override_pll_settings = 1;
			}

			if (override_pll_settings) {
				best_pll_MHz =
				    ddr_ref_hertz * (best_clkf +
						     1) /
				    (best_clkr + 1) / 1000000;
				best_calculated_ddr_hertz =
				    ddr_ref_hertz * (best_clkf +
						     1) /
				    ((best_clkr + 1) * (_en[best_en_idx]));
				best_error =
				    ddr_hertz - best_calculated_ddr_hertz;
			}

			debug("clkr: %2llu, en[%d]: %2d, clkf: %4llu, pll_MHz: %4llu, ddr_hertz: %8llu, error: %8lld <==\n",
			      best_clkr, best_en_idx, _en[best_en_idx],
			      best_clkf, best_pll_MHz,
			      best_calculated_ddr_hertz, best_error);

			/*
			 * Try lowering the frequency if we can't get a
			 * working configuration
			 */
			if (best_error == ddr_hertz) {
				if (ddr_hertz < orig_ddr_hertz - 10000000)
					break;
				ddr_hertz -= 1000000;
				best_error = ddr_hertz;
			}
		}

		if (best_error == ddr_hertz) {
			printf("ERROR: Can not compute a legal DDR clock speed configuration.\n");
			return -1;
		}

		new_bwadj = (best_clkf + 1) / 10;
		debug("bwadj: %2d\n", new_bwadj);

		s = lookup_env(priv, "ddr_pll_bwadj");
		if (s) {
			new_bwadj = strtoul(s, NULL, 0);
			debug("bwadj: %2d\n", new_bwadj);
		}

		for (i = 0; i < 2; ++i) {
			if ((if_mask & (1 << i)) == 0)
				continue;

			ddr_pll_ctl.u64 =
			    lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
			debug("LMC%d: DDR_PLL_CTL                             : 0x%016llx\n",
			      i, ddr_pll_ctl.u64);

			ddr_pll_ctl.cn78xx.ddr_ps_en = best_en_idx;
			ddr_pll_ctl.cn78xx.clkf = best_clkf;
			ddr_pll_ctl.cn78xx.clkr = best_clkr;
			ddr_pll_ctl.cn78xx.reset_n = 0;
			ddr_pll_ctl.cn78xx.bwadj = new_bwadj;

			lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(i), ddr_pll_ctl.u64);
			debug("LMC%d: DDR_PLL_CTL                             : 0x%016llx\n",
			      i, ddr_pll_ctl.u64);

			/*
			 * For cnf75xx LMC0 and LMC1 use the same PLL so
			 * only program LMC0 PLL.
			 */
			if (octeon_is_cpuid(OCTEON_CNF75XX))
				break;
		}

		for (i = 0; i < 4; ++i) {
			if ((if_mask & (1 << i)) == 0)
				continue;

			/*
			 * 4. Read LMC0_DDR_PLL_CTL and wait for the result.
			 */

			lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));

			/*
			 * 5. Wait a minimum of 3 us.
			 */

			udelay(3);	/* Wait 3 us */

			/*
			 * 6. Write LMC0_DDR_PLL_CTL[RESET_N] = 1 without
			 * changing any other LMC0_DDR_PLL_CTL values.
			 */

			ddr_pll_ctl.u64 =
			    lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
			ddr_pll_ctl.cn78xx.reset_n = 1;
			lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(i), ddr_pll_ctl.u64);

			/*
			 * 7. Read LMC0_DDR_PLL_CTL and wait for the result.
			 */

			lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));

			/*
			 * 8. Wait a minimum of 25 us.
			 */

			udelay(25);	/* Wait 25 us */

			/*
			 * For cnf75xx LMC0 and LMC1 use the same PLL so
			 * only program LMC0 PLL.
			 */
			if (octeon_is_cpuid(OCTEON_CNF75XX))
				break;
		}

		for (i = 0; i < 4; ++i) {
			if ((if_mask & (1 << i)) == 0)
				continue;

			/*
			 * 5.9.2 LMC CK Initialization
			 *
			 * DDR PLL initialization must be completed prior to
			 * starting LMC CK initialization.
			 *
			 * Perform the following substeps to initialize the
			 * LMC CK:
			 *
			 * 1. Without changing any other LMC(0..3)_DDR_PLL_CTL
			 * values, write
			 * LMC(0..3)_DDR_PLL_CTL[DDR_DIV_RESET] = 1 and
			 * LMC(0..3)_DDR_PLL_CTL[DDR_PS_EN] with the
			 * appropriate value to get the desired LMC CK speed.
			 * Section 5.14 discusses CLKF and DDR_PS_EN
			 * programmings.  The LMC(0..3)_DDR_PLL_CTL[DDR_PS_EN]
			 * must not change after this point without restarting
			 * this LMC CK initialization sequence.
			 */

			ddr_pll_ctl.u64 = lmc_rd(priv,
						 CVMX_LMCX_DDR_PLL_CTL(i));
			ddr_pll_ctl.cn78xx.ddr_div_reset = 1;
			lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(i), ddr_pll_ctl.u64);

			/*
			 * 2. Without changing any other fields in
			 * LMC(0..3)_DDR_PLL_CTL, write
			 * LMC(0..3)_DDR_PLL_CTL[DDR4_MODE] = 0.
			 */

			ddr_pll_ctl.u64 =
			    lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
			ddr_pll_ctl.cn78xx.ddr4_mode =
			    (ddr_type == DDR4_DRAM) ? 1 : 0;
			lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(i), ddr_pll_ctl.u64);

			/*
			 * 3. Read LMC(0..3)_DDR_PLL_CTL and wait for the
			 * result.
			 */

			lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));

			/*
			 * 4. Wait a minimum of 1 us.
			 */

			udelay(1);	/* Wait 1 us */

			/*
			 * ###### Steps 5 through 7 deferred until after
			 * DDR_DIV_RESET=0 #######
			 */

			/*
			 * 8. Without changing any other LMC(0..3)_COMP_CTL2
			 * values, write
			 * LMC(0..3)_COMP_CTL2[CK_CTL,CONTROL_CTL,CMD_CTL]
			 * to the desired DDR*_CK_*_P control and command
			 * signals drive strength.
			 */

			union cvmx_lmcx_comp_ctl2 comp_ctl2;
			const struct ddr3_custom_config *custom_lmc_config =
			    &ddr_conf->custom_lmc_config;

			comp_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_COMP_CTL2(i));

			/* Default 4=34.3 ohm */
			comp_ctl2.cn78xx.dqx_ctl =
			    (custom_lmc_config->dqx_ctl ==
			     0) ? 4 : custom_lmc_config->dqx_ctl;
			/* Default 4=34.3 ohm */
			comp_ctl2.cn78xx.ck_ctl =
			    (custom_lmc_config->ck_ctl ==
			     0) ? 4 : custom_lmc_config->ck_ctl;
			/* Default 4=34.3 ohm */
			comp_ctl2.cn78xx.cmd_ctl =
			    (custom_lmc_config->cmd_ctl ==
			     0) ? 4 : custom_lmc_config->cmd_ctl;

			comp_ctl2.cn78xx.rodt_ctl = 0x4;	/* 60 ohm */

			comp_ctl2.cn70xx.ptune_offset =
			    (abs(custom_lmc_config->ptune_offset) & 0x7)
			    | (_sign(custom_lmc_config->ptune_offset) << 3);
			comp_ctl2.cn70xx.ntune_offset =
			    (abs(custom_lmc_config->ntune_offset) & 0x7)
			    | (_sign(custom_lmc_config->ntune_offset) << 3);

			s = lookup_env(priv, "ddr_clk_ctl");
			if (s) {
				comp_ctl2.cn78xx.ck_ctl =
				    simple_strtoul(s, NULL, 0);
			}

			s = lookup_env(priv, "ddr_ck_ctl");
			if (s) {
				comp_ctl2.cn78xx.ck_ctl =
				    simple_strtoul(s, NULL, 0);
			}

			s = lookup_env(priv, "ddr_cmd_ctl");
			if (s) {
				comp_ctl2.cn78xx.cmd_ctl =
				    simple_strtoul(s, NULL, 0);
			}

			s = lookup_env(priv, "ddr_dqx_ctl");
			if (s) {
				comp_ctl2.cn78xx.dqx_ctl =
				    simple_strtoul(s, NULL, 0);
			}

			s = lookup_env(priv, "ddr_ptune_offset");
			if (s) {
				comp_ctl2.cn78xx.ptune_offset =
				    simple_strtoul(s, NULL, 0);
			}

			s = lookup_env(priv, "ddr_ntune_offset");
			if (s) {
				comp_ctl2.cn78xx.ntune_offset =
				    simple_strtoul(s, NULL, 0);
			}

			lmc_wr(priv, CVMX_LMCX_COMP_CTL2(i), comp_ctl2.u64);

			/*
			 * 9. Read LMC(0..3)_DDR_PLL_CTL and wait for the
			 * result.
			 */

			lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));

			/*
			 * 10. Wait a minimum of 200 ns.
			 */

			udelay(1);	/* Wait 1 us */

			/*
			 * 11. Without changing any other
			 * LMC(0..3)_DDR_PLL_CTL values, write
			 * LMC(0..3)_DDR_PLL_CTL[DDR_DIV_RESET] = 0.
			 */

			ddr_pll_ctl.u64 = lmc_rd(priv,
						 CVMX_LMCX_DDR_PLL_CTL(i));
			ddr_pll_ctl.cn78xx.ddr_div_reset = 0;
			lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(i), ddr_pll_ctl.u64);

			/*
			 * 12. Read LMC(0..3)_DDR_PLL_CTL and wait for the
			 * result.
			 */

			lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));

			/*
			 * 13. Wait a minimum of 200 ns.
			 */

			udelay(1);	/* Wait 1 us */
		}

		/*
		 * Relocated Interface Enable (intf_en) Step
		 */
		for (i = (octeon_is_cpuid(OCTEON_CN73XX) ||
			  octeon_is_cpuid(OCTEON_CNF75XX)) ? 1 : 2;
		     i < 4; ++i) {
			/*
			 * This step is only necessary for LMC 2 and 3 in
			 * 4-LMC mode. The mask will cause the unpopulated
			 * interfaces to be skipped.
			 */
			if ((if_mask & (1 << i)) == 0)
				continue;

			dll_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL2(i));
			dll_ctl2.cn78xx.intf_en = 1;
			lmc_wr(priv, CVMX_LMCX_DLL_CTL2(i), dll_ctl2.u64);
			lmc_rd(priv, CVMX_LMCX_DLL_CTL2(i));
		}

		/*
		 * Relocated PHY_DCOK Step
		 */
		for (i = 0; i < 4; ++i) {
			if ((if_mask & (1 << i)) == 0)
				continue;
			/*
			 * 5. Without changing any other fields in
			 * LMC(0..3)_DDR_PLL_CTL, write
			 * LMC(0..3)_DDR_PLL_CTL[PHY_DCOK] = 1.
			 */

			ddr_pll_ctl.u64 = lmc_rd(priv,
						 CVMX_LMCX_DDR_PLL_CTL(i));
			ddr_pll_ctl.cn78xx.phy_dcok = 1;
			lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(i), ddr_pll_ctl.u64);
			/*
			 * 6. Read LMC(0..3)_DDR_PLL_CTL and wait for
			 * the result.
			 */

			lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));

			/*
			 * 7. Wait a minimum of 20 us.
			 */

			udelay(20);	/* Wait 20 us */
		}

		/*
		 * 5.9.4 LMC DRESET Initialization
		 *
		 * All of the DDR PLL, LMC global CK, and LMC interface
		 * enable initializations must be completed prior to starting
		 * this LMC DRESET initialization (Step 4).
		 *
		 * This LMC DRESET step is done for all enabled LMCs.
		 *
		 * There are special constraints on the ordering of DRESET
		 * initialization (Steps 4) and CK local initialization
		 * (Step 5) whenever CK local initialization must be executed.
		 * CK local initialization must be executed whenever the DDR
		 * PLL is being brought up (for each chip reset* and whenever
		 * the DDR clock speed changes).
		 *
		 * When Step 5 must be executed in the two-LMC mode case:
		 * - LMC0 DRESET initialization must occur before Step 5.
		 * - LMC1 DRESET initialization must occur after Step 5.
		 *
		 * When Step 5 must be executed in the four-LMC mode case:
		 * - LMC2 and LMC3 DRESET initialization must occur before
		 *   Step 5.
		 * - LMC0 and LMC1 DRESET initialization must occur after
		 *   Step 5.
		 */

		if (octeon_is_cpuid(OCTEON_CN73XX)) {
			/* ONE-LMC or TWO-LMC MODE BEFORE STEP 5 for cn73xx */
			cn78xx_lmc_dreset_init(priv, 0);
		} else if (octeon_is_cpuid(OCTEON_CNF75XX)) {
			if (if_mask == 0x3) {
				/*
				 * 2-LMC Mode: LMC1 DRESET must occur
				 * before Step 5
				 */
				cn78xx_lmc_dreset_init(priv, 1);
			}
		} else {
			/* TWO-LMC MODE DRESET BEFORE STEP 5 */
			if (if_mask == 0x3)
				cn78xx_lmc_dreset_init(priv, 0);

			/* FOUR-LMC MODE BEFORE STEP 5 */
			if (if_mask == 0xf) {
				cn78xx_lmc_dreset_init(priv, 2);
				cn78xx_lmc_dreset_init(priv, 3);
			}
		}

		/*
		 * 5.9.5 LMC CK Local Initialization
		 *
		 * All of DDR PLL, LMC global CK, and LMC interface-enable
		 * initializations must be completed prior to starting this
		 * LMC CK local initialization (Step 5).
		 *
		 * LMC CK Local initialization must be performed for each
		 * chip reset and whenever the DDR clock speed changes. This
		 * step needs to be performed only once, not once per LMC.
		 *
		 * There are special constraints on the ordering of DRESET
		 * initialization (Steps 4) and CK local initialization
		 * (Step 5) whenever CK local initialization must be executed.
		 * CK local initialization must be executed whenever the
		 * DDR PLL is being brought up (for each chip reset and
		 * whenever the DDR clock speed changes).
		 *
		 * When Step 5 must be executed in the two-LMC mode case:
		 * - LMC0 DRESET initialization must occur before Step 5.
		 * - LMC1 DRESET initialization must occur after Step 5.
		 *
		 * When Step 5 must be executed in the four-LMC mode case:
		 * - LMC2 and LMC3 DRESET initialization must occur before
		 *   Step 5.
		 * - LMC0 and LMC1 DRESET initialization must occur after
		 *   Step 5.
		 *
		 * LMC CK local initialization is different depending on
		 * whether two-LMC or four-LMC modes are desired.
		 */

		if (if_mask == 0x3) {
			int temp_lmc_if_num = octeon_is_cpuid(OCTEON_CNF75XX) ?
				1 : 0;

			/*
			 * 5.9.5.1 LMC CK Local Initialization for Two-LMC
			 * Mode
			 *
			 * 1. Write LMC0_DLL_CTL3 to its reset value. (Note
			 * that LMC0_DLL_CTL3[DLL_90_BYTE_SEL] = 0x2 .. 0x8
			 * should also work.)
			 */

			ddr_dll_ctl3.u64 = 0;
			ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 1;

			if (octeon_is_cpuid(OCTEON_CNF75XX))
				ddr_dll_ctl3.cn78xx.dll90_byte_sel = 7;
			else
				ddr_dll_ctl3.cn78xx.dll90_byte_sel = 1;

			lmc_wr(priv,
			       CVMX_LMCX_DLL_CTL3(temp_lmc_if_num),
			       ddr_dll_ctl3.u64);

			/*
			 * 2. Read LMC0_DLL_CTL3 and wait for the result.
			 */

			lmc_rd(priv, CVMX_LMCX_DLL_CTL3(temp_lmc_if_num));

			/*
			 * 3. Without changing any other fields in
			 * LMC0_DLL_CTL3, write
			 * LMC0_DLL_CTL3[DCLK90_FWD] = 1.  Writing
			 * LMC0_DLL_CTL3[DCLK90_FWD] = 1
			 * causes clock-delay information to be forwarded
			 * from LMC0 to LMC1.
			 */

			ddr_dll_ctl3.cn78xx.dclk90_fwd = 1;
			lmc_wr(priv,
			       CVMX_LMCX_DLL_CTL3(temp_lmc_if_num),
			       ddr_dll_ctl3.u64);

			/*
			 * 4. Read LMC0_DLL_CTL3 and wait for the result.
			 */

			lmc_rd(priv, CVMX_LMCX_DLL_CTL3(temp_lmc_if_num));
		}

		if (if_mask == 0xf) {
			/*
			 * 5.9.5.2 LMC CK Local Initialization for Four-LMC
			 * Mode
			 *
			 * 1. Write LMC2_DLL_CTL3 to its reset value except
			 * LMC2_DLL_CTL3[DLL90_BYTE_SEL] = 0x7.
			 */

			ddr_dll_ctl3.u64 = 0;
			ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 1;
			ddr_dll_ctl3.cn78xx.dll90_byte_sel = 7;
			lmc_wr(priv, CVMX_LMCX_DLL_CTL3(2), ddr_dll_ctl3.u64);

			/*
			 * 2. Write LMC3_DLL_CTL3 to its reset value except
			 * LMC3_DLL_CTL3[DLL90_BYTE_SEL] = 0x2.
			 */

			ddr_dll_ctl3.u64 = 0;
			ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 1;
			ddr_dll_ctl3.cn78xx.dll90_byte_sel = 2;
			lmc_wr(priv, CVMX_LMCX_DLL_CTL3(3), ddr_dll_ctl3.u64);

			/*
			 * 3. Read LMC3_DLL_CTL3 and wait for the result.
			 */

			lmc_rd(priv, CVMX_LMCX_DLL_CTL3(3));

			/*
			 * 4. Without changing any other fields in
			 * LMC2_DLL_CTL3, write LMC2_DLL_CTL3[DCLK90_FWD] = 1
			 * and LMC2_DLL_CTL3[DCLK90_RECAL_ DIS] = 1.
			 * Writing LMC2_DLL_CTL3[DCLK90_FWD] = 1 causes LMC 2
			 * to forward clockdelay information to LMC0. Setting
			 * LMC2_DLL_CTL3[DCLK90_RECAL_DIS] to 1 prevents LMC2
			 * from periodically recalibrating this delay
			 * information.
			 */

			ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(2));
			ddr_dll_ctl3.cn78xx.dclk90_fwd = 1;
			ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 1;
			lmc_wr(priv, CVMX_LMCX_DLL_CTL3(2), ddr_dll_ctl3.u64);

			/*
			 * 5. Without changing any other fields in
			 * LMC3_DLL_CTL3, write LMC3_DLL_CTL3[DCLK90_FWD] = 1
			 * and LMC3_DLL_CTL3[DCLK90_RECAL_ DIS] = 1.
			 * Writing LMC3_DLL_CTL3[DCLK90_FWD] = 1 causes LMC3
			 * to forward clockdelay information to LMC1. Setting
			 * LMC3_DLL_CTL3[DCLK90_RECAL_DIS] to 1 prevents LMC3
			 * from periodically recalibrating this delay
			 * information.
			 */

			ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(3));
			ddr_dll_ctl3.cn78xx.dclk90_fwd = 1;
			ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 1;
			lmc_wr(priv, CVMX_LMCX_DLL_CTL3(3), ddr_dll_ctl3.u64);

			/*
			 * 6. Read LMC3_DLL_CTL3 and wait for the result.
			 */

			lmc_rd(priv, CVMX_LMCX_DLL_CTL3(3));
		}

		if (octeon_is_cpuid(OCTEON_CNF75XX)) {
			/*
			 * cnf75xx 2-LMC Mode: LMC0 DRESET must occur after
			 * Step 5, Do LMC0 for 1-LMC Mode here too
			 */
			cn78xx_lmc_dreset_init(priv, 0);
		}

		/* TWO-LMC MODE AFTER STEP 5 */
		if (if_mask == 0x3) {
			if (octeon_is_cpuid(OCTEON_CNF75XX)) {
				/*
				 * cnf75xx 2-LMC Mode: LMC0 DRESET must
				 * occur after Step 5
				 */
				cn78xx_lmc_dreset_init(priv, 0);
			} else {
				cn78xx_lmc_dreset_init(priv, 1);
			}
		}

		/* FOUR-LMC MODE AFTER STEP 5 */
		if (if_mask == 0xf) {
			cn78xx_lmc_dreset_init(priv, 0);
			cn78xx_lmc_dreset_init(priv, 1);

			/*
			 * Enable periodic recalibration of DDR90 delay
			 * line in.
			 */
			ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(0));
			ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 0;
			lmc_wr(priv, CVMX_LMCX_DLL_CTL3(0), ddr_dll_ctl3.u64);
			ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(1));
			ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 0;
			lmc_wr(priv, CVMX_LMCX_DLL_CTL3(1), ddr_dll_ctl3.u64);
		}

		/* Enable fine tune mode for all LMCs */
		for (i = 0; i < 4; ++i) {
			if ((if_mask & (1 << i)) == 0)
				continue;
			ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(i));
			ddr_dll_ctl3.cn78xx.fine_tune_mode = 1;
			lmc_wr(priv, CVMX_LMCX_DLL_CTL3(i), ddr_dll_ctl3.u64);
		}

		/*
		 * Enable the trim circuit on the appropriate channels to
		 * adjust the DDR clock duty cycle for chips that support
		 * it
		 */
		if (octeon_is_cpuid(OCTEON_CN78XX_PASS2_X) ||
		    octeon_is_cpuid(OCTEON_CN73XX) ||
		    octeon_is_cpuid(OCTEON_CNF75XX)) {
			union cvmx_lmcx_phy_ctl lmc_phy_ctl;
			int i;

			for (i = 0; i < 4; ++i) {
				if ((if_mask & (1 << i)) == 0)
					continue;

				lmc_phy_ctl.u64 =
				    lmc_rd(priv, CVMX_LMCX_PHY_CTL(i));

				if (octeon_is_cpuid(OCTEON_CNF75XX) ||
				    octeon_is_cpuid(OCTEON_CN73XX_PASS1_3)) {
					/* Both LMCs */
					lmc_phy_ctl.s.lv_mode = 0;
				} else {
					/* Odd LMCs = 0, Even LMCs = 1 */
					lmc_phy_ctl.s.lv_mode = (~i) & 1;
				}

				debug("LMC%d: PHY_CTL                                 : 0x%016llx\n",
				      i, lmc_phy_ctl.u64);
				lmc_wr(priv, CVMX_LMCX_PHY_CTL(i),
				       lmc_phy_ctl.u64);
			}
		}
	}

	/*
	 * 5.9.6 LMC RESET Initialization
	 *
	 * NOTE: this is now done as the first step in
	 * init_octeon3_ddr3_interface, rather than the last step in clock
	 * init. This reorg allows restarting per-LMC initialization should
	 * problems be encountered, rather than being forced to resort to
	 * resetting the chip and starting all over.
	 *
	 * Look for the code in octeon3_lmc.c: perform_lmc_reset().
	 */

	/* Fallthrough for all interfaces... */
not_if0:

	/*
	 * Start the DDR clock so that its frequency can be measured.
	 * For some chips we must activate the memory controller with
	 * init_start to make the DDR clock start to run.
	 */
	if ((!octeon_is_cpuid(OCTEON_CN6XXX)) &&
	    (!octeon_is_cpuid(OCTEON_CNF7XXX)) &&
	    (!octeon_is_cpuid(OCTEON_CN7XXX))) {
		union cvmx_lmcx_mem_cfg0 mem_cfg0;

		mem_cfg0.u64 = 0;
		mem_cfg0.s.init_start = 1;
		lmc_wr(priv, CVMX_LMCX_MEM_CFG0(if_num), mem_cfg0.u64);
		lmc_rd(priv, CVMX_LMCX_MEM_CFG0(if_num));
	}

	set_ddr_clock_initialized(priv, if_num, 1);

	return 0;
}