in hw/qib/qib_iba7322.c [6343:6680]
static int qib_init_7322_variables(struct qib_devdata *dd)
{
struct qib_pportdata *ppd;
unsigned features, pidx, sbufcnt;
int ret, mtu;
u32 sbufs, updthresh;
resource_size_t vl15off;
/* pport structs are contiguous, allocated after devdata */
ppd = (struct qib_pportdata *)(dd + 1);
dd->pport = ppd;
ppd[0].dd = dd;
ppd[1].dd = dd;
dd->cspec = (struct qib_chip_specific *)(ppd + 2);
ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
ppd[1].cpspec = &ppd[0].cpspec[1];
ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
spin_lock_init(&dd->cspec->rcvmod_lock);
spin_lock_init(&dd->cspec->gpio_lock);
/* we haven't yet set QIB_PRESENT, so use read directly */
dd->revision = readq(&dd->kregbase[kr_revision]);
if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
qib_dev_err(dd,
"Revision register read failure, giving up initialization\n");
ret = -ENODEV;
goto bail;
}
dd->flags |= QIB_PRESENT; /* now register routines work */
dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
dd->cspec->r1 = dd->minrev == 1;
get_7322_chip_params(dd);
features = qib_7322_boardname(dd);
/* now that piobcnt2k and 4k set, we can allocate these */
sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
NUM_VL15_BUFS + BITS_PER_LONG - 1;
sbufcnt /= BITS_PER_LONG;
dd->cspec->sendchkenable =
kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendchkenable),
GFP_KERNEL);
dd->cspec->sendgrhchk =
kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendgrhchk),
GFP_KERNEL);
dd->cspec->sendibchk =
kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendibchk),
GFP_KERNEL);
if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
!dd->cspec->sendibchk) {
ret = -ENOMEM;
goto bail;
}
ppd = dd->pport;
/*
* GPIO bits for TWSI data and clock,
* used for serial EEPROM.
*/
dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
QIB_HAS_THRESH_UPDATE |
(sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
dd->flags |= qib_special_trigger ?
QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
/*
* Setup initial values. These may change when PAT is enabled, but
* we need these to do initial chip register accesses.
*/
qib_7322_set_baseaddrs(dd);
mtu = ib_mtu_enum_to_int(qib_ibmtu);
if (mtu == -1)
mtu = QIB_DEFAULT_MTU;
dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
/* all hwerrors become interrupts, unless special purposed */
dd->cspec->hwerrmask = ~0ULL;
/* link_recovery setup causes these errors, so ignore them,
* other than clearing them when they occur */
dd->cspec->hwerrmask &=
~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
HWE_MASK(LATriggered));
for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
struct qib_chippport_specific *cp = ppd->cpspec;
ppd->link_speed_supported = features & PORT_SPD_CAP;
features >>= PORT_SPD_CAP_SHIFT;
if (!ppd->link_speed_supported) {
/* single port mode (7340, or configured) */
dd->skip_kctxt_mask |= 1 << pidx;
if (pidx == 0) {
/* Make sure port is disabled. */
qib_write_kreg_port(ppd, krp_rcvctrl, 0);
qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
ppd[0] = ppd[1];
dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
IBSerdesPClkNotDetectMask_0)
| SYM_MASK(HwErrMask,
SDmaMemReadErrMask_0));
dd->cspec->int_enable_mask &= ~(
SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
SYM_MASK(IntMask, SDmaIdleIntMask_0) |
SYM_MASK(IntMask, SDmaProgressIntMask_0) |
SYM_MASK(IntMask, SDmaIntMask_0) |
SYM_MASK(IntMask, ErrIntMask_0) |
SYM_MASK(IntMask, SendDoneIntMask_0));
} else {
/* Make sure port is disabled. */
qib_write_kreg_port(ppd, krp_rcvctrl, 0);
qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
IBSerdesPClkNotDetectMask_1)
| SYM_MASK(HwErrMask,
SDmaMemReadErrMask_1));
dd->cspec->int_enable_mask &= ~(
SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
SYM_MASK(IntMask, SDmaIdleIntMask_1) |
SYM_MASK(IntMask, SDmaProgressIntMask_1) |
SYM_MASK(IntMask, SDmaIntMask_1) |
SYM_MASK(IntMask, ErrIntMask_1) |
SYM_MASK(IntMask, SendDoneIntMask_1));
}
continue;
}
dd->num_pports++;
ret = qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
if (ret) {
dd->num_pports--;
goto bail;
}
ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
ppd->link_width_enabled = IB_WIDTH_4X;
ppd->link_speed_enabled = ppd->link_speed_supported;
/*
* Set the initial values to reasonable default, will be set
* for real when link is up.
*/
ppd->link_width_active = IB_WIDTH_4X;
ppd->link_speed_active = QIB_IB_SDR;
ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
switch (qib_num_cfg_vls) {
case 1:
ppd->vls_supported = IB_VL_VL0;
break;
case 2:
ppd->vls_supported = IB_VL_VL0_1;
break;
default:
qib_devinfo(dd->pcidev,
"Invalid num_vls %u, using 4 VLs\n",
qib_num_cfg_vls);
qib_num_cfg_vls = 4;
fallthrough;
case 4:
ppd->vls_supported = IB_VL_VL0_3;
break;
case 8:
if (mtu <= 2048)
ppd->vls_supported = IB_VL_VL0_7;
else {
qib_devinfo(dd->pcidev,
"Invalid num_vls %u for MTU %d , using 4 VLs\n",
qib_num_cfg_vls, mtu);
ppd->vls_supported = IB_VL_VL0_3;
qib_num_cfg_vls = 4;
}
break;
}
ppd->vls_operational = ppd->vls_supported;
init_waitqueue_head(&cp->autoneg_wait);
INIT_DELAYED_WORK(&cp->autoneg_work,
autoneg_7322_work);
if (ppd->dd->cspec->r1)
INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
/*
* For Mez and similar cards, no qsfp info, so do
* the "cable info" setup here. Can be overridden
* in adapter-specific routines.
*/
if (!(dd->flags & QIB_HAS_QSFP)) {
if (!IS_QMH(dd) && !IS_QME(dd))
qib_devinfo(dd->pcidev,
"IB%u:%u: Unknown mezzanine card type\n",
dd->unit, ppd->port);
cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
/*
* Choose center value as default tx serdes setting
* until changed through module parameter.
*/
ppd->cpspec->no_eep = IS_QMH(dd) ?
TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
} else
cp->h1_val = H1_FORCE_VAL;
/* Avoid writes to chip for mini_init */
if (!qib_mini_init)
write_7322_init_portregs(ppd);
timer_setup(&cp->chase_timer, reenable_chase, 0);
ppd++;
}
dd->rcvhdrentsize = qib_rcvhdrentsize ?
qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
dd->rcvhdrsize = qib_rcvhdrsize ?
qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
/* we always allocate at least 2048 bytes for eager buffers */
dd->rcvegrbufsize = max(mtu, 2048);
dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
qib_7322_tidtemplate(dd);
/*
* We can request a receive interrupt for 1 or
* more packets from current offset.
*/
dd->rhdrhead_intr_off =
(u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
/* setup the stats timer; the add_timer is done at end of init */
timer_setup(&dd->stats_timer, qib_get_7322_faststats, 0);
dd->ureg_align = 0x10000; /* 64KB alignment */
dd->piosize2kmax_dwords = dd->piosize2k >> 2;
qib_7322_config_ctxts(dd);
qib_set_ctxtcnt(dd);
/*
* We do not set WC on the VL15 buffers to avoid
* a rare problem with unaligned writes from
* interrupt-flushed store buffers, so we need
* to map those separately here. We can't solve
* this for the rarely used mtrr case.
*/
ret = init_chip_wc_pat(dd, 0);
if (ret)
goto bail;
/* vl15 buffers start just after the 4k buffers */
vl15off = dd->physaddr + (dd->piobufbase >> 32) +
dd->piobcnt4k * dd->align4k;
dd->piovl15base = ioremap(vl15off,
NUM_VL15_BUFS * dd->align4k);
if (!dd->piovl15base) {
ret = -ENOMEM;
goto bail;
}
qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
ret = 0;
if (qib_mini_init)
goto bail;
if (!dd->num_pports) {
qib_dev_err(dd, "No ports enabled, giving up initialization\n");
goto bail; /* no error, so can still figure out why err */
}
write_7322_initregs(dd);
ret = qib_create_ctxts(dd);
init_7322_cntrnames(dd);
updthresh = 8U; /* update threshold */
/* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
* reserve the update threshold amount for other kernel use, such
* as sending SMI, MAD, and ACKs, or 3, whichever is greater,
* unless we aren't enabling SDMA, in which case we want to use
* all the 4k bufs for the kernel.
* if this was less than the update threshold, we could wait
* a long time for an update. Coded this way because we
* sometimes change the update threshold for various reasons,
* and we want this to remain robust.
*/
if (dd->flags & QIB_HAS_SEND_DMA) {
dd->cspec->sdmabufcnt = dd->piobcnt4k;
sbufs = updthresh > 3 ? updthresh : 3;
} else {
dd->cspec->sdmabufcnt = 0;
sbufs = dd->piobcnt4k;
}
dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
dd->cspec->sdmabufcnt;
dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
dd->last_pio = dd->cspec->lastbuf_for_pio;
dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
/*
* If we have 16 user contexts, we will have 7 sbufs
* per context, so reduce the update threshold to match. We
* want to update before we actually run out, at low pbufs/ctxt
* so give ourselves some margin.
*/
if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
updthresh = dd->pbufsctxt - 2;
dd->cspec->updthresh_dflt = updthresh;
dd->cspec->updthresh = updthresh;
/* before full enable, no interrupts, no locking needed */
dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
<< SYM_LSB(SendCtrl, AvailUpdThld)) |
SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
dd->psxmitwait_supported = 1;
dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
bail:
if (!dd->ctxtcnt)
dd->ctxtcnt = 1; /* for other initialization code */
return ret;
}