static irqreturn_t blkif_interrupt()

in xen-blkfront.c [1504:1664]


static irqreturn_t blkif_interrupt(int irq, void *dev_id)
{
	struct request *req;
	struct blkif_response bret;
	RING_IDX i, rp;
	unsigned long flags;
	struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
	struct blkfront_info *info = rinfo->dev_info;
	unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;

	if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
		xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
		return IRQ_HANDLED;
	}

	spin_lock_irqsave(&rinfo->ring_lock, flags);
 again:
	rp = READ_ONCE(rinfo->ring.sring->rsp_prod);
	virt_rmb(); /* Ensure we see queued responses up to 'rp'. */
	if (RING_RESPONSE_PROD_OVERFLOW(&rinfo->ring, rp)) {
		pr_alert("%s: illegal number of responses %u\n",
			 info->gd->disk_name, rp - rinfo->ring.rsp_cons);
		goto err;
	}

	for (i = rinfo->ring.rsp_cons; i != rp; i++) {
		unsigned long id;
		unsigned int op;

		eoiflag = 0;

		RING_COPY_RESPONSE(&rinfo->ring, i, &bret);
		id = bret.id;

		/*
		 * The backend has messed up and given us an id that we would
		 * never have given to it (we stamp it up to BLK_RING_SIZE -
		 * look in get_id_from_freelist.
		 */
		if (id >= BLK_RING_SIZE(info)) {
			pr_alert("%s: response has incorrect id (%ld)\n",
				 info->gd->disk_name, id);
			goto err;
		}
		if (rinfo->shadow[id].status != REQ_WAITING) {
			pr_alert("%s: response references no pending request\n",
				 info->gd->disk_name);
			goto err;
		}

		rinfo->shadow[id].status = REQ_PROCESSING;
		req  = rinfo->shadow[id].request;

		op = rinfo->shadow[id].req.operation;
		if (op == BLKIF_OP_INDIRECT)
			op = rinfo->shadow[id].req.u.indirect.indirect_op;
		if (bret.operation != op) {
			pr_alert("%s: response has wrong operation (%u instead of %u)\n",
				 info->gd->disk_name, bret.operation, op);
			goto err;
		}

		if (bret.operation != BLKIF_OP_DISCARD) {
			/*
			 * We may need to wait for an extra response if the
			 * I/O request is split in 2
			 */
			if (!blkif_completion(&id, rinfo, &bret))
				continue;
		}

		if (add_id_to_freelist(rinfo, id)) {
			WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
			     info->gd->disk_name, op_name(bret.operation), id);
			continue;
		}

		if (bret.status == BLKIF_RSP_OKAY)
			blkif_req(req)->error = BLK_STS_OK;
		else
			blkif_req(req)->error = BLK_STS_IOERR;

		switch (bret.operation) {
		case BLKIF_OP_DISCARD:
			if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
				struct request_queue *rq = info->rq;

				pr_warn_ratelimited("blkfront: %s: %s op failed\n",
					   info->gd->disk_name, op_name(bret.operation));
				blkif_req(req)->error = BLK_STS_NOTSUPP;
				info->feature_discard = 0;
				info->feature_secdiscard = 0;
				blk_queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
				blk_queue_flag_clear(QUEUE_FLAG_SECERASE, rq);
			}
			break;
		case BLKIF_OP_FLUSH_DISKCACHE:
		case BLKIF_OP_WRITE_BARRIER:
			if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
				pr_warn_ratelimited("blkfront: %s: %s op failed\n",
				       info->gd->disk_name, op_name(bret.operation));
				blkif_req(req)->error = BLK_STS_NOTSUPP;
			}
			if (unlikely(bret.status == BLKIF_RSP_ERROR &&
				     rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
				pr_warn_ratelimited("blkfront: %s: empty %s op failed\n",
				       info->gd->disk_name, op_name(bret.operation));
				blkif_req(req)->error = BLK_STS_NOTSUPP;
			}
			if (unlikely(blkif_req(req)->error)) {
				if (blkif_req(req)->error == BLK_STS_NOTSUPP)
					blkif_req(req)->error = BLK_STS_OK;
				info->feature_fua = 0;
				info->feature_flush = 0;
				xlvbd_flush(info);
			}
			fallthrough;
		case BLKIF_OP_READ:
		case BLKIF_OP_WRITE:
			if (unlikely(bret.status != BLKIF_RSP_OKAY))
				dev_dbg_ratelimited(&info->xbdev->dev,
					"Bad return from blkdev data request: %#x\n",
					bret.status);

			break;
		default:
			BUG();
		}

		if (likely(!blk_should_fake_timeout(req->q)))
			blk_mq_complete_request(req);
	}

	rinfo->ring.rsp_cons = i;

	if (i != rinfo->ring.req_prod_pvt) {
		int more_to_do;
		RING_FINAL_CHECK_FOR_RESPONSES(&rinfo->ring, more_to_do);
		if (more_to_do)
			goto again;
	} else
		rinfo->ring.sring->rsp_event = i + 1;

	kick_pending_request_queues_locked(rinfo);

	spin_unlock_irqrestore(&rinfo->ring_lock, flags);

	xen_irq_lateeoi(irq, eoiflag);

	return IRQ_HANDLED;

 err:
	info->connected = BLKIF_STATE_ERROR;

	spin_unlock_irqrestore(&rinfo->ring_lock, flags);

	/* No EOI in order to avoid further interrupts. */

	pr_alert("%s disabled for further use\n", info->gd->disk_name);
	return IRQ_HANDLED;
}