in mhi/core/init.c [259:425]
int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
{
struct mhi_ctxt *mhi_ctxt;
struct mhi_chan_ctxt *chan_ctxt;
struct mhi_event_ctxt *er_ctxt;
struct mhi_cmd_ctxt *cmd_ctxt;
struct mhi_chan *mhi_chan;
struct mhi_event *mhi_event;
struct mhi_cmd *mhi_cmd;
u32 tmp;
int ret = -ENOMEM, i;
atomic_set(&mhi_cntrl->dev_wake, 0);
atomic_set(&mhi_cntrl->pending_pkts, 0);
mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL);
if (!mhi_ctxt)
return -ENOMEM;
/* Setup channel ctxt */
mhi_ctxt->chan_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
sizeof(*mhi_ctxt->chan_ctxt) *
mhi_cntrl->max_chan,
&mhi_ctxt->chan_ctxt_addr,
GFP_KERNEL);
if (!mhi_ctxt->chan_ctxt)
goto error_alloc_chan_ctxt;
mhi_chan = mhi_cntrl->mhi_chan;
chan_ctxt = mhi_ctxt->chan_ctxt;
for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
/* Skip if it is an offload channel */
if (mhi_chan->offload_ch)
continue;
tmp = chan_ctxt->chcfg;
tmp &= ~CHAN_CTX_CHSTATE_MASK;
tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT);
tmp &= ~CHAN_CTX_BRSTMODE_MASK;
tmp |= (mhi_chan->db_cfg.brstmode << CHAN_CTX_BRSTMODE_SHIFT);
tmp &= ~CHAN_CTX_POLLCFG_MASK;
tmp |= (mhi_chan->db_cfg.pollcfg << CHAN_CTX_POLLCFG_SHIFT);
chan_ctxt->chcfg = tmp;
chan_ctxt->chtype = mhi_chan->type;
chan_ctxt->erindex = mhi_chan->er_index;
mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp;
}
/* Setup event context */
mhi_ctxt->er_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
sizeof(*mhi_ctxt->er_ctxt) *
mhi_cntrl->total_ev_rings,
&mhi_ctxt->er_ctxt_addr,
GFP_KERNEL);
if (!mhi_ctxt->er_ctxt)
goto error_alloc_er_ctxt;
er_ctxt = mhi_ctxt->er_ctxt;
mhi_event = mhi_cntrl->mhi_event;
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
mhi_event++) {
struct mhi_ring *ring = &mhi_event->ring;
/* Skip if it is an offload event */
if (mhi_event->offload_ev)
continue;
tmp = er_ctxt->intmod;
tmp &= ~EV_CTX_INTMODC_MASK;
tmp &= ~EV_CTX_INTMODT_MASK;
tmp |= (mhi_event->intmod << EV_CTX_INTMODT_SHIFT);
er_ctxt->intmod = tmp;
er_ctxt->ertype = MHI_ER_TYPE_VALID;
er_ctxt->msivec = mhi_event->irq;
mhi_event->db_cfg.db_mode = true;
ring->el_size = sizeof(struct mhi_tre);
ring->len = ring->el_size * ring->elements;
ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
if (ret)
goto error_alloc_er;
/*
* If the read pointer equals to the write pointer, then the
* ring is empty
*/
ring->rp = ring->wp = ring->base;
er_ctxt->rbase = ring->iommu_base;
er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase;
er_ctxt->rlen = ring->len;
ring->ctxt_wp = &er_ctxt->wp;
}
/* Setup cmd context */
ret = -ENOMEM;
mhi_ctxt->cmd_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
sizeof(*mhi_ctxt->cmd_ctxt) *
NR_OF_CMD_RINGS,
&mhi_ctxt->cmd_ctxt_addr,
GFP_KERNEL);
if (!mhi_ctxt->cmd_ctxt)
goto error_alloc_er;
mhi_cmd = mhi_cntrl->mhi_cmd;
cmd_ctxt = mhi_ctxt->cmd_ctxt;
for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
struct mhi_ring *ring = &mhi_cmd->ring;
ring->el_size = sizeof(struct mhi_tre);
ring->elements = CMD_EL_PER_RING;
ring->len = ring->el_size * ring->elements;
ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
if (ret)
goto error_alloc_cmd;
ring->rp = ring->wp = ring->base;
cmd_ctxt->rbase = ring->iommu_base;
cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase;
cmd_ctxt->rlen = ring->len;
ring->ctxt_wp = &cmd_ctxt->wp;
}
mhi_cntrl->mhi_ctxt = mhi_ctxt;
return 0;
error_alloc_cmd:
for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) {
struct mhi_ring *ring = &mhi_cmd->ring;
dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
ring->pre_aligned, ring->dma_handle);
}
dma_free_coherent(mhi_cntrl->cntrl_dev,
sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
i = mhi_cntrl->total_ev_rings;
mhi_event = mhi_cntrl->mhi_event + i;
error_alloc_er:
for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
struct mhi_ring *ring = &mhi_event->ring;
if (mhi_event->offload_ev)
continue;
dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
ring->pre_aligned, ring->dma_handle);
}
dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) *
mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
mhi_ctxt->er_ctxt_addr);
error_alloc_er_ctxt:
dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) *
mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
mhi_ctxt->chan_ctxt_addr);
error_alloc_chan_ctxt:
kfree(mhi_ctxt);
return ret;
}