static void amd_handle_event()

in hw/amd/ntb_hw_amd.c [579:648]


static void amd_handle_event(struct amd_ntb_dev *ndev, int vec)
{
	void __iomem *mmio = ndev->self_mmio;
	struct device *dev = &ndev->ntb.pdev->dev;
	u32 status;

	status = readl(mmio + AMD_INTSTAT_OFFSET);
	if (!(status & AMD_EVENT_INTMASK))
		return;

	dev_dbg(dev, "status = 0x%x and vec = %d\n", status, vec);

	status &= AMD_EVENT_INTMASK;
	switch (status) {
	case AMD_PEER_FLUSH_EVENT:
		ndev->peer_sta |= AMD_PEER_FLUSH_EVENT;
		dev_info(dev, "Flush is done.\n");
		break;
	case AMD_PEER_RESET_EVENT:
	case AMD_LINK_DOWN_EVENT:
		ndev->peer_sta |= status;
		if (status == AMD_LINK_DOWN_EVENT)
			ndev->peer_sta &= ~AMD_LINK_UP_EVENT;

		amd_ack_smu(ndev, status);

		/* link down first */
		ntb_link_event(&ndev->ntb);
		/* polling peer status */
		schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT);

		break;
	case AMD_PEER_D3_EVENT:
	case AMD_PEER_PMETO_EVENT:
	case AMD_LINK_UP_EVENT:
		ndev->peer_sta |= status;
		if (status == AMD_LINK_UP_EVENT)
			ndev->peer_sta &= ~AMD_LINK_DOWN_EVENT;
		else if (status == AMD_PEER_D3_EVENT)
			ndev->peer_sta &= ~AMD_PEER_D0_EVENT;

		amd_ack_smu(ndev, status);

		/* link down */
		ntb_link_event(&ndev->ntb);

		break;
	case AMD_PEER_D0_EVENT:
		mmio = ndev->peer_mmio;
		status = readl(mmio + AMD_PMESTAT_OFFSET);
		/* check if this is WAKEUP event */
		if (status & 0x1)
			dev_info(dev, "Wakeup is done.\n");

		ndev->peer_sta |= AMD_PEER_D0_EVENT;
		ndev->peer_sta &= ~AMD_PEER_D3_EVENT;
		amd_ack_smu(ndev, AMD_PEER_D0_EVENT);

		/* start a timer to poll link status */
		schedule_delayed_work(&ndev->hb_timer,
				      AMD_LINK_HB_TIMEOUT);
		break;
	default:
		dev_info(dev, "event status = 0x%x.\n", status);
		break;
	}

	/* Clear the interrupt status */
	writel(status, mmio + AMD_INTSTAT_OFFSET);
}