static int pktdma_setup_resources()

in ti/k3-udma.c [4818:4959]


static int pktdma_setup_resources(struct udma_dev *ud)
{
	int ret, i, j;
	struct device *dev = ud->dev;
	struct ti_sci_resource *rm_res, irq_res;
	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
	const struct udma_oes_offsets *oes = &ud->soc_data->oes;
	u32 cap3;

	/* Set up the throughput level start indexes */
	cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
	if (UDMA_CAP3_UCHAN_CNT(cap3)) {
		ud->tchan_tpl.levels = 3;
		ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
		ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
	} else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
		ud->tchan_tpl.levels = 2;
		ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
	} else {
		ud->tchan_tpl.levels = 1;
	}

	ud->rchan_tpl.levels = ud->tchan_tpl.levels;
	ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
	ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];

	ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
					   sizeof(unsigned long), GFP_KERNEL);
	ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
				  GFP_KERNEL);
	ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
					   sizeof(unsigned long), GFP_KERNEL);
	ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
				  GFP_KERNEL);
	ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
					sizeof(unsigned long),
					GFP_KERNEL);
	ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
				  GFP_KERNEL);
	ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt),
					   sizeof(unsigned long), GFP_KERNEL);

	if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans ||
	    !ud->rchans || !ud->rflows || !ud->rflow_in_use)
		return -ENOMEM;

	/* Get resource ranges from tisci */
	for (i = 0; i < RM_RANGE_LAST; i++) {
		if (i == RM_RANGE_BCHAN)
			continue;

		tisci_rm->rm_ranges[i] =
			devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
						    tisci_rm->tisci_dev_id,
						    (char *)range_names[i]);
	}

	/* tchan ranges */
	rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
	if (IS_ERR(rm_res)) {
		bitmap_zero(ud->tchan_map, ud->tchan_cnt);
	} else {
		bitmap_fill(ud->tchan_map, ud->tchan_cnt);
		for (i = 0; i < rm_res->sets; i++)
			udma_mark_resource_ranges(ud, ud->tchan_map,
						  &rm_res->desc[i], "tchan");
	}

	/* rchan ranges */
	rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
	if (IS_ERR(rm_res)) {
		bitmap_zero(ud->rchan_map, ud->rchan_cnt);
	} else {
		bitmap_fill(ud->rchan_map, ud->rchan_cnt);
		for (i = 0; i < rm_res->sets; i++)
			udma_mark_resource_ranges(ud, ud->rchan_map,
						  &rm_res->desc[i], "rchan");
	}

	/* rflow ranges */
	rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
	if (IS_ERR(rm_res)) {
		/* all rflows are assigned exclusively to Linux */
		bitmap_zero(ud->rflow_in_use, ud->rflow_cnt);
		irq_res.sets = 1;
	} else {
		bitmap_fill(ud->rflow_in_use, ud->rflow_cnt);
		for (i = 0; i < rm_res->sets; i++)
			udma_mark_resource_ranges(ud, ud->rflow_in_use,
						  &rm_res->desc[i], "rflow");
		irq_res.sets = rm_res->sets;
	}

	/* tflow ranges */
	rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
	if (IS_ERR(rm_res)) {
		/* all tflows are assigned exclusively to Linux */
		bitmap_zero(ud->tflow_map, ud->tflow_cnt);
		irq_res.sets++;
	} else {
		bitmap_fill(ud->tflow_map, ud->tflow_cnt);
		for (i = 0; i < rm_res->sets; i++)
			udma_mark_resource_ranges(ud, ud->tflow_map,
						  &rm_res->desc[i], "tflow");
		irq_res.sets += rm_res->sets;
	}

	irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
	if (!irq_res.desc)
		return -ENOMEM;
	rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
	if (IS_ERR(rm_res)) {
		irq_res.desc[0].start = oes->pktdma_tchan_flow;
		irq_res.desc[0].num = ud->tflow_cnt;
		i = 1;
	} else {
		for (i = 0; i < rm_res->sets; i++) {
			irq_res.desc[i].start = rm_res->desc[i].start +
						oes->pktdma_tchan_flow;
			irq_res.desc[i].num = rm_res->desc[i].num;
		}
	}
	rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
	if (IS_ERR(rm_res)) {
		irq_res.desc[i].start = oes->pktdma_rchan_flow;
		irq_res.desc[i].num = ud->rflow_cnt;
	} else {
		for (j = 0; j < rm_res->sets; j++, i++) {
			irq_res.desc[i].start = rm_res->desc[j].start +
						oes->pktdma_rchan_flow;
			irq_res.desc[i].num = rm_res->desc[j].num;
		}
	}
	ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
	kfree(irq_res.desc);
	if (ret) {
		dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
		return ret;
	}

	return 0;
}