static int udma_setup_resources()

in ti/k3-udma.c [4452:4612]


static int udma_setup_resources(struct udma_dev *ud)
{
	int ret, i, j;
	struct device *dev = ud->dev;
	struct ti_sci_resource *rm_res, irq_res;
	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
	u32 cap3;

	/* Set up the throughput level start indexes */
	cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
	if (of_device_is_compatible(dev->of_node,
				    "ti,am654-navss-main-udmap")) {
		ud->tchan_tpl.levels = 2;
		ud->tchan_tpl.start_idx[0] = 8;
	} else if (of_device_is_compatible(dev->of_node,
					   "ti,am654-navss-mcu-udmap")) {
		ud->tchan_tpl.levels = 2;
		ud->tchan_tpl.start_idx[0] = 2;
	} else if (UDMA_CAP3_UCHAN_CNT(cap3)) {
		ud->tchan_tpl.levels = 3;
		ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
		ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
	} else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
		ud->tchan_tpl.levels = 2;
		ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
	} else {
		ud->tchan_tpl.levels = 1;
	}

	ud->rchan_tpl.levels = ud->tchan_tpl.levels;
	ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
	ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];

	ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
					   sizeof(unsigned long), GFP_KERNEL);
	ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
				  GFP_KERNEL);
	ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
					   sizeof(unsigned long), GFP_KERNEL);
	ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
				  GFP_KERNEL);
	ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
					      sizeof(unsigned long),
					      GFP_KERNEL);
	ud->rflow_gp_map_allocated = devm_kcalloc(dev,
						  BITS_TO_LONGS(ud->rflow_cnt),
						  sizeof(unsigned long),
						  GFP_KERNEL);
	ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
					sizeof(unsigned long),
					GFP_KERNEL);
	ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
				  GFP_KERNEL);

	if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
	    !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
	    !ud->rflows || !ud->rflow_in_use)
		return -ENOMEM;

	/*
	 * RX flows with the same Ids as RX channels are reserved to be used
	 * as default flows if remote HW can't generate flow_ids. Those
	 * RX flows can be requested only explicitly by id.
	 */
	bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);

	/* by default no GP rflows are assigned to Linux */
	bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);

	/* Get resource ranges from tisci */
	for (i = 0; i < RM_RANGE_LAST; i++) {
		if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW)
			continue;

		tisci_rm->rm_ranges[i] =
			devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
						    tisci_rm->tisci_dev_id,
						    (char *)range_names[i]);
	}

	/* tchan ranges */
	rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
	if (IS_ERR(rm_res)) {
		bitmap_zero(ud->tchan_map, ud->tchan_cnt);
		irq_res.sets = 1;
	} else {
		bitmap_fill(ud->tchan_map, ud->tchan_cnt);
		for (i = 0; i < rm_res->sets; i++)
			udma_mark_resource_ranges(ud, ud->tchan_map,
						  &rm_res->desc[i], "tchan");
		irq_res.sets = rm_res->sets;
	}

	/* rchan and matching default flow ranges */
	rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
	if (IS_ERR(rm_res)) {
		bitmap_zero(ud->rchan_map, ud->rchan_cnt);
		irq_res.sets++;
	} else {
		bitmap_fill(ud->rchan_map, ud->rchan_cnt);
		for (i = 0; i < rm_res->sets; i++)
			udma_mark_resource_ranges(ud, ud->rchan_map,
						  &rm_res->desc[i], "rchan");
		irq_res.sets += rm_res->sets;
	}

	irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
	if (!irq_res.desc)
		return -ENOMEM;
	rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
	if (IS_ERR(rm_res)) {
		irq_res.desc[0].start = 0;
		irq_res.desc[0].num = ud->tchan_cnt;
		i = 1;
	} else {
		for (i = 0; i < rm_res->sets; i++) {
			irq_res.desc[i].start = rm_res->desc[i].start;
			irq_res.desc[i].num = rm_res->desc[i].num;
			irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
			irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
		}
	}
	rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
	if (IS_ERR(rm_res)) {
		irq_res.desc[i].start = 0;
		irq_res.desc[i].num = ud->rchan_cnt;
	} else {
		for (j = 0; j < rm_res->sets; j++, i++) {
			if (rm_res->desc[j].num) {
				irq_res.desc[i].start = rm_res->desc[j].start +
						ud->soc_data->oes.udma_rchan;
				irq_res.desc[i].num = rm_res->desc[j].num;
			}
			if (rm_res->desc[j].num_sec) {
				irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
						ud->soc_data->oes.udma_rchan;
				irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
			}
		}
	}
	ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
	kfree(irq_res.desc);
	if (ret) {
		dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
		return ret;
	}

	/* GP rflow ranges */
	rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
	if (IS_ERR(rm_res)) {
		/* all gp flows are assigned exclusively to Linux */
		bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
			     ud->rflow_cnt - ud->rchan_cnt);
	} else {
		for (i = 0; i < rm_res->sets; i++)
			udma_mark_resource_ranges(ud, ud->rflow_gp_map,
						  &rm_res->desc[i], "gp-rflow");
	}

	return 0;
}