in intel/svm.c [909:1014]
static irqreturn_t prq_event_thread(int irq, void *d)
{
struct intel_svm_dev *sdev = NULL;
struct intel_iommu *iommu = d;
struct intel_svm *svm = NULL;
struct page_req_dsc *req;
int head, tail, handled;
u64 address;
/*
* Clear PPR bit before reading head/tail registers, to ensure that
* we get a new interrupt if needed.
*/
writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
handled = (head != tail);
while (head != tail) {
req = &iommu->prq[head / sizeof(*req)];
address = (u64)req->addr << VTD_PAGE_SHIFT;
if (unlikely(!req->pasid_present)) {
pr_err("IOMMU: %s: Page request without PASID\n",
iommu->name);
bad_req:
svm = NULL;
sdev = NULL;
handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
goto prq_advance;
}
if (unlikely(!is_canonical_address(address))) {
pr_err("IOMMU: %s: Address is not canonical\n",
iommu->name);
goto bad_req;
}
if (unlikely(req->pm_req && (req->rd_req | req->wr_req))) {
pr_err("IOMMU: %s: Page request in Privilege Mode\n",
iommu->name);
goto bad_req;
}
if (unlikely(req->exe_req && req->rd_req)) {
pr_err("IOMMU: %s: Execution request not supported\n",
iommu->name);
goto bad_req;
}
if (!svm || svm->pasid != req->pasid) {
/*
* It can't go away, because the driver is not permitted
* to unbind the mm while any page faults are outstanding.
*/
svm = pasid_private_find(req->pasid);
if (IS_ERR_OR_NULL(svm) || (svm->flags & SVM_FLAG_SUPERVISOR_MODE))
goto bad_req;
}
if (!sdev || sdev->sid != req->rid) {
sdev = svm_lookup_device_by_sid(svm, req->rid);
if (!sdev)
goto bad_req;
}
sdev->prq_seq_number++;
/*
* If prq is to be handled outside iommu driver via receiver of
* the fault notifiers, we skip the page response here.
*/
if (intel_svm_prq_report(iommu, sdev->dev, req))
handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
trace_prq_report(iommu, sdev->dev, req->qw_0, req->qw_1,
req->priv_data[0], req->priv_data[1],
sdev->prq_seq_number);
prq_advance:
head = (head + sizeof(*req)) & PRQ_RING_MASK;
}
dmar_writeq(iommu->reg + DMAR_PQH_REG, tail);
/*
* Clear the page request overflow bit and wake up all threads that
* are waiting for the completion of this handling.
*/
if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) {
pr_info_ratelimited("IOMMU: %s: PRQ overflow detected\n",
iommu->name);
head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
if (head == tail) {
iopf_queue_discard_partial(iommu->iopf_queue);
writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG);
pr_info_ratelimited("IOMMU: %s: PRQ overflow cleared",
iommu->name);
}
}
if (!completion_done(&iommu->prq_complete))
complete(&iommu->prq_complete);
return IRQ_RETVAL(handled);
}