static int xeon_setup_b2b_mw()

in hw/intel/ntb_hw_gen1.c [1291:1535]


static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
			     const struct intel_b2b_addr *addr,
			     const struct intel_b2b_addr *peer_addr)
{
	struct pci_dev *pdev;
	void __iomem *mmio;
	resource_size_t bar_size;
	phys_addr_t bar_addr;
	int b2b_bar;
	u8 bar_sz;

	pdev = ndev->ntb.pdev;
	mmio = ndev->self_mmio;

	if (ndev->b2b_idx == UINT_MAX) {
		dev_dbg(&pdev->dev, "not using b2b mw\n");
		b2b_bar = 0;
		ndev->b2b_off = 0;
	} else {
		b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
		if (b2b_bar < 0)
			return -EIO;

		dev_dbg(&pdev->dev, "using b2b mw bar %d\n", b2b_bar);

		bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);

		dev_dbg(&pdev->dev, "b2b bar size %#llx\n", bar_size);

		if (b2b_mw_share && XEON_B2B_MIN_SIZE <= bar_size >> 1) {
			dev_dbg(&pdev->dev, "b2b using first half of bar\n");
			ndev->b2b_off = bar_size >> 1;
		} else if (XEON_B2B_MIN_SIZE <= bar_size) {
			dev_dbg(&pdev->dev, "b2b using whole bar\n");
			ndev->b2b_off = 0;
			--ndev->mw_count;
		} else {
			dev_dbg(&pdev->dev, "b2b bar size is too small\n");
			return -EIO;
		}
	}

	/* Reset the secondary bar sizes to match the primary bar sizes,
	 * except disable or halve the size of the b2b secondary bar.
	 *
	 * Note: code for each specific bar size register, because the register
	 * offsets are not in a consistent order (bar5sz comes after ppd, odd).
	 */
	pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &bar_sz);
	dev_dbg(&pdev->dev, "PBAR23SZ %#x\n", bar_sz);
	if (b2b_bar == 2) {
		if (ndev->b2b_off)
			bar_sz -= 1;
		else
			bar_sz = 0;
	}
	pci_write_config_byte(pdev, XEON_SBAR23SZ_OFFSET, bar_sz);
	pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &bar_sz);
	dev_dbg(&pdev->dev, "SBAR23SZ %#x\n", bar_sz);

	if (!ndev->bar4_split) {
		pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &bar_sz);
		dev_dbg(&pdev->dev, "PBAR45SZ %#x\n", bar_sz);
		if (b2b_bar == 4) {
			if (ndev->b2b_off)
				bar_sz -= 1;
			else
				bar_sz = 0;
		}
		pci_write_config_byte(pdev, XEON_SBAR45SZ_OFFSET, bar_sz);
		pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &bar_sz);
		dev_dbg(&pdev->dev, "SBAR45SZ %#x\n", bar_sz);
	} else {
		pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &bar_sz);
		dev_dbg(&pdev->dev, "PBAR4SZ %#x\n", bar_sz);
		if (b2b_bar == 4) {
			if (ndev->b2b_off)
				bar_sz -= 1;
			else
				bar_sz = 0;
		}
		pci_write_config_byte(pdev, XEON_SBAR4SZ_OFFSET, bar_sz);
		pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &bar_sz);
		dev_dbg(&pdev->dev, "SBAR4SZ %#x\n", bar_sz);

		pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &bar_sz);
		dev_dbg(&pdev->dev, "PBAR5SZ %#x\n", bar_sz);
		if (b2b_bar == 5) {
			if (ndev->b2b_off)
				bar_sz -= 1;
			else
				bar_sz = 0;
		}
		pci_write_config_byte(pdev, XEON_SBAR5SZ_OFFSET, bar_sz);
		pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &bar_sz);
		dev_dbg(&pdev->dev, "SBAR5SZ %#x\n", bar_sz);
	}

	/* SBAR01 hit by first part of the b2b bar */
	if (b2b_bar == 0)
		bar_addr = addr->bar0_addr;
	else if (b2b_bar == 2)
		bar_addr = addr->bar2_addr64;
	else if (b2b_bar == 4 && !ndev->bar4_split)
		bar_addr = addr->bar4_addr64;
	else if (b2b_bar == 4)
		bar_addr = addr->bar4_addr32;
	else if (b2b_bar == 5)
		bar_addr = addr->bar5_addr32;
	else
		return -EIO;

	dev_dbg(&pdev->dev, "SBAR01 %#018llx\n", bar_addr);
	iowrite64(bar_addr, mmio + XEON_SBAR0BASE_OFFSET);

	/* Other SBAR are normally hit by the PBAR xlat, except for b2b bar.
	 * The b2b bar is either disabled above, or configured half-size, and
	 * it starts at the PBAR xlat + offset.
	 */

	bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
	iowrite64(bar_addr, mmio + XEON_SBAR23BASE_OFFSET);
	bar_addr = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
	dev_dbg(&pdev->dev, "SBAR23 %#018llx\n", bar_addr);

	if (!ndev->bar4_split) {
		bar_addr = addr->bar4_addr64 +
			(b2b_bar == 4 ? ndev->b2b_off : 0);
		iowrite64(bar_addr, mmio + XEON_SBAR45BASE_OFFSET);
		bar_addr = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
		dev_dbg(&pdev->dev, "SBAR45 %#018llx\n", bar_addr);
	} else {
		bar_addr = addr->bar4_addr32 +
			(b2b_bar == 4 ? ndev->b2b_off : 0);
		iowrite32(bar_addr, mmio + XEON_SBAR4BASE_OFFSET);
		bar_addr = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
		dev_dbg(&pdev->dev, "SBAR4 %#010llx\n", bar_addr);

		bar_addr = addr->bar5_addr32 +
			(b2b_bar == 5 ? ndev->b2b_off : 0);
		iowrite32(bar_addr, mmio + XEON_SBAR5BASE_OFFSET);
		bar_addr = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
		dev_dbg(&pdev->dev, "SBAR5 %#010llx\n", bar_addr);
	}

	/* setup incoming bar limits == base addrs (zero length windows) */

	bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
	iowrite64(bar_addr, mmio + XEON_SBAR23LMT_OFFSET);
	bar_addr = ioread64(mmio + XEON_SBAR23LMT_OFFSET);
	dev_dbg(&pdev->dev, "SBAR23LMT %#018llx\n", bar_addr);

	if (!ndev->bar4_split) {
		bar_addr = addr->bar4_addr64 +
			(b2b_bar == 4 ? ndev->b2b_off : 0);
		iowrite64(bar_addr, mmio + XEON_SBAR45LMT_OFFSET);
		bar_addr = ioread64(mmio + XEON_SBAR45LMT_OFFSET);
		dev_dbg(&pdev->dev, "SBAR45LMT %#018llx\n", bar_addr);
	} else {
		bar_addr = addr->bar4_addr32 +
			(b2b_bar == 4 ? ndev->b2b_off : 0);
		iowrite32(bar_addr, mmio + XEON_SBAR4LMT_OFFSET);
		bar_addr = ioread32(mmio + XEON_SBAR4LMT_OFFSET);
		dev_dbg(&pdev->dev, "SBAR4LMT %#010llx\n", bar_addr);

		bar_addr = addr->bar5_addr32 +
			(b2b_bar == 5 ? ndev->b2b_off : 0);
		iowrite32(bar_addr, mmio + XEON_SBAR5LMT_OFFSET);
		bar_addr = ioread32(mmio + XEON_SBAR5LMT_OFFSET);
		dev_dbg(&pdev->dev, "SBAR5LMT %#05llx\n", bar_addr);
	}

	/* zero incoming translation addrs */
	iowrite64(0, mmio + XEON_SBAR23XLAT_OFFSET);

	if (!ndev->bar4_split) {
		iowrite64(0, mmio + XEON_SBAR45XLAT_OFFSET);
	} else {
		iowrite32(0, mmio + XEON_SBAR4XLAT_OFFSET);
		iowrite32(0, mmio + XEON_SBAR5XLAT_OFFSET);
	}

	/* zero outgoing translation limits (whole bar size windows) */
	iowrite64(0, mmio + XEON_PBAR23LMT_OFFSET);
	if (!ndev->bar4_split) {
		iowrite64(0, mmio + XEON_PBAR45LMT_OFFSET);
	} else {
		iowrite32(0, mmio + XEON_PBAR4LMT_OFFSET);
		iowrite32(0, mmio + XEON_PBAR5LMT_OFFSET);
	}

	/* set outgoing translation offsets */
	bar_addr = peer_addr->bar2_addr64;
	iowrite64(bar_addr, mmio + XEON_PBAR23XLAT_OFFSET);
	bar_addr = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
	dev_dbg(&pdev->dev, "PBAR23XLAT %#018llx\n", bar_addr);

	if (!ndev->bar4_split) {
		bar_addr = peer_addr->bar4_addr64;
		iowrite64(bar_addr, mmio + XEON_PBAR45XLAT_OFFSET);
		bar_addr = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
		dev_dbg(&pdev->dev, "PBAR45XLAT %#018llx\n", bar_addr);
	} else {
		bar_addr = peer_addr->bar4_addr32;
		iowrite32(bar_addr, mmio + XEON_PBAR4XLAT_OFFSET);
		bar_addr = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
		dev_dbg(&pdev->dev, "PBAR4XLAT %#010llx\n", bar_addr);

		bar_addr = peer_addr->bar5_addr32;
		iowrite32(bar_addr, mmio + XEON_PBAR5XLAT_OFFSET);
		bar_addr = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
		dev_dbg(&pdev->dev, "PBAR5XLAT %#010llx\n", bar_addr);
	}

	/* set the translation offset for b2b registers */
	if (b2b_bar == 0)
		bar_addr = peer_addr->bar0_addr;
	else if (b2b_bar == 2)
		bar_addr = peer_addr->bar2_addr64;
	else if (b2b_bar == 4 && !ndev->bar4_split)
		bar_addr = peer_addr->bar4_addr64;
	else if (b2b_bar == 4)
		bar_addr = peer_addr->bar4_addr32;
	else if (b2b_bar == 5)
		bar_addr = peer_addr->bar5_addr32;
	else
		return -EIO;

	/* B2B_XLAT_OFFSET is 64bit, but can only take 32bit writes */
	dev_dbg(&pdev->dev, "B2BXLAT %#018llx\n", bar_addr);
	iowrite32(bar_addr, mmio + XEON_B2B_XLAT_OFFSETL);
	iowrite32(bar_addr >> 32, mmio + XEON_B2B_XLAT_OFFSETU);

	if (b2b_bar) {
		/* map peer ntb mmio config space registers */
		ndev->peer_mmio = pci_iomap(pdev, b2b_bar,
					    XEON_B2B_MIN_SIZE);
		if (!ndev->peer_mmio)
			return -EIO;

		ndev->peer_addr = pci_resource_start(pdev, b2b_bar);
	}

	return 0;
}