int mhi_process_data_event_ring()

in mhi/core/main.c [962:1030]


int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
				struct mhi_event *mhi_event,
				u32 event_quota)
{
	struct mhi_tre *dev_rp, *local_rp;
	struct mhi_ring *ev_ring = &mhi_event->ring;
	struct mhi_event_ctxt *er_ctxt =
		&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
	int count = 0;
	u32 chan;
	struct mhi_chan *mhi_chan;
	dma_addr_t ptr = er_ctxt->rp;

	if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
		return -EIO;

	if (!is_valid_ring_ptr(ev_ring, ptr)) {
		dev_err(&mhi_cntrl->mhi_dev->dev,
			"Event ring rp points outside of the event ring\n");
		return -EIO;
	}

	dev_rp = mhi_to_virtual(ev_ring, ptr);
	local_rp = ev_ring->rp;

	while (dev_rp != local_rp && event_quota > 0) {
		enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);

		chan = MHI_TRE_GET_EV_CHID(local_rp);

		WARN_ON(chan >= mhi_cntrl->max_chan);

		/*
		 * Only process the event ring elements whose channel
		 * ID is within the maximum supported range.
		 */
		if (chan < mhi_cntrl->max_chan &&
		    mhi_cntrl->mhi_chan[chan].configured) {
			mhi_chan = &mhi_cntrl->mhi_chan[chan];

			if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
				parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
				event_quota--;
			} else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
				parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
				event_quota--;
			}
		}

		mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
		local_rp = ev_ring->rp;

		ptr = er_ctxt->rp;
		if (!is_valid_ring_ptr(ev_ring, ptr)) {
			dev_err(&mhi_cntrl->mhi_dev->dev,
				"Event ring rp points outside of the event ring\n");
			return -EIO;
		}

		dev_rp = mhi_to_virtual(ev_ring, ptr);
		count++;
	}
	read_lock_bh(&mhi_cntrl->pm_lock);
	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
		mhi_ring_er_db(mhi_event);
	read_unlock_bh(&mhi_cntrl->pm_lock);

	return count;
}