in mhi/core/init.c [877:1019]
int mhi_register_controller(struct mhi_controller *mhi_cntrl,
const struct mhi_controller_config *config)
{
struct mhi_event *mhi_event;
struct mhi_chan *mhi_chan;
struct mhi_cmd *mhi_cmd;
struct mhi_device *mhi_dev;
u32 soc_info;
int ret, i;
if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs ||
!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
!mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
!mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs ||
!mhi_cntrl->irq || !mhi_cntrl->reg_len)
return -EINVAL;
ret = parse_config(mhi_cntrl, config);
if (ret)
return -EINVAL;
mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS,
sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
if (!mhi_cntrl->mhi_cmd) {
ret = -ENOMEM;
goto err_free_event;
}
INIT_LIST_HEAD(&mhi_cntrl->transition_list);
mutex_init(&mhi_cntrl->pm_mutex);
rwlock_init(&mhi_cntrl->pm_lock);
spin_lock_init(&mhi_cntrl->transition_lock);
spin_lock_init(&mhi_cntrl->wlock);
INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
init_waitqueue_head(&mhi_cntrl->state_event);
mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI);
if (!mhi_cntrl->hiprio_wq) {
dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n");
ret = -ENOMEM;
goto err_free_cmd;
}
mhi_cmd = mhi_cntrl->mhi_cmd;
for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++)
spin_lock_init(&mhi_cmd->lock);
mhi_event = mhi_cntrl->mhi_event;
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
/* Skip for offload events */
if (mhi_event->offload_ev)
continue;
mhi_event->mhi_cntrl = mhi_cntrl;
spin_lock_init(&mhi_event->lock);
if (mhi_event->data_type == MHI_ER_CTRL)
tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
(ulong)mhi_event);
else
tasklet_init(&mhi_event->task, mhi_ev_task,
(ulong)mhi_event);
}
mhi_chan = mhi_cntrl->mhi_chan;
for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
mutex_init(&mhi_chan->mutex);
init_completion(&mhi_chan->completion);
rwlock_init(&mhi_chan->lock);
/* used in setting bei field of TRE */
mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
mhi_chan->intmod = mhi_event->intmod;
}
if (mhi_cntrl->bounce_buf) {
mhi_cntrl->map_single = mhi_map_single_use_bb;
mhi_cntrl->unmap_single = mhi_unmap_single_use_bb;
} else {
mhi_cntrl->map_single = mhi_map_single_no_bb;
mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
}
/* Read the MHI device info */
ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
SOC_HW_VERSION_OFFS, &soc_info);
if (ret)
goto err_destroy_wq;
mhi_cntrl->family_number = (soc_info & SOC_HW_VERSION_FAM_NUM_BMSK) >>
SOC_HW_VERSION_FAM_NUM_SHFT;
mhi_cntrl->device_number = (soc_info & SOC_HW_VERSION_DEV_NUM_BMSK) >>
SOC_HW_VERSION_DEV_NUM_SHFT;
mhi_cntrl->major_version = (soc_info & SOC_HW_VERSION_MAJOR_VER_BMSK) >>
SOC_HW_VERSION_MAJOR_VER_SHFT;
mhi_cntrl->minor_version = (soc_info & SOC_HW_VERSION_MINOR_VER_BMSK) >>
SOC_HW_VERSION_MINOR_VER_SHFT;
mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL);
if (mhi_cntrl->index < 0) {
ret = mhi_cntrl->index;
goto err_destroy_wq;
}
/* Register controller with MHI bus */
mhi_dev = mhi_alloc_device(mhi_cntrl);
if (IS_ERR(mhi_dev)) {
dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n");
ret = PTR_ERR(mhi_dev);
goto err_ida_free;
}
mhi_dev->dev_type = MHI_DEVICE_CONTROLLER;
mhi_dev->mhi_cntrl = mhi_cntrl;
dev_set_name(&mhi_dev->dev, "mhi%d", mhi_cntrl->index);
mhi_dev->name = dev_name(&mhi_dev->dev);
/* Init wakeup source */
device_init_wakeup(&mhi_dev->dev, true);
ret = device_add(&mhi_dev->dev);
if (ret)
goto err_release_dev;
mhi_cntrl->mhi_dev = mhi_dev;
mhi_create_debugfs(mhi_cntrl);
return 0;
err_release_dev:
put_device(&mhi_dev->dev);
err_ida_free:
ida_free(&mhi_controller_ida, mhi_cntrl->index);
err_destroy_wq:
destroy_workqueue(mhi_cntrl->hiprio_wq);
err_free_cmd:
kfree(mhi_cntrl->mhi_cmd);
err_free_event:
kfree(mhi_cntrl->mhi_event);
vfree(mhi_cntrl->mhi_chan);
return ret;
}