in mhi/core/main.c [1432:1515]
int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan, unsigned int flags)
{
int ret = 0;
struct device *dev = &mhi_chan->mhi_dev->dev;
if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
dev_err(dev, "Current EE: %s Required EE Mask: 0x%x\n",
TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
return -ENOTCONN;
}
mutex_lock(&mhi_chan->mutex);
/* Check of client manages channel context for offload channels */
if (!mhi_chan->offload_ch) {
ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
if (ret)
goto error_init_chan;
}
ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
MHI_CH_STATE_TYPE_START);
if (ret)
goto error_pm_state;
if (mhi_chan->dir == DMA_FROM_DEVICE)
mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS);
/* Pre-allocate buffer for xfer ring */
if (mhi_chan->pre_alloc) {
int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
&mhi_chan->tre_ring);
size_t len = mhi_cntrl->buffer_len;
while (nr_el--) {
void *buf;
struct mhi_buf_info info = { };
buf = kmalloc(len, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto error_pre_alloc;
}
/* Prepare transfer descriptors */
info.v_addr = buf;
info.cb_buf = buf;
info.len = len;
ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT);
if (ret) {
kfree(buf);
goto error_pre_alloc;
}
}
read_lock_bh(&mhi_cntrl->pm_lock);
if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
read_lock_irq(&mhi_chan->lock);
mhi_ring_chan_db(mhi_cntrl, mhi_chan);
read_unlock_irq(&mhi_chan->lock);
}
read_unlock_bh(&mhi_cntrl->pm_lock);
}
mutex_unlock(&mhi_chan->mutex);
return 0;
error_pm_state:
if (!mhi_chan->offload_ch)
mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
error_init_chan:
mutex_unlock(&mhi_chan->mutex);
return ret;
error_pre_alloc:
mutex_unlock(&mhi_chan->mutex);
mhi_unprepare_channel(mhi_cntrl, mhi_chan);
return ret;
}