static int he_init_group()

in he.c [770:908]


static int he_init_group(struct he_dev *he_dev, int group)
{
	struct he_buff *heb, *next;
	dma_addr_t mapping;
	int i;

	he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
	he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
	he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
	he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
		  G0_RBPS_BS + (group * 32));

	/* bitmap table */
	he_dev->rbpl_table = kmalloc_array(BITS_TO_LONGS(RBPL_TABLE_SIZE),
					   sizeof(*he_dev->rbpl_table),
					   GFP_KERNEL);
	if (!he_dev->rbpl_table) {
		hprintk("unable to allocate rbpl bitmap table\n");
		return -ENOMEM;
	}
	bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE);

	/* rbpl_virt 64-bit pointers */
	he_dev->rbpl_virt = kmalloc_array(RBPL_TABLE_SIZE,
					  sizeof(*he_dev->rbpl_virt),
					  GFP_KERNEL);
	if (!he_dev->rbpl_virt) {
		hprintk("unable to allocate rbpl virt table\n");
		goto out_free_rbpl_table;
	}

	/* large buffer pool */
	he_dev->rbpl_pool = dma_pool_create("rbpl", &he_dev->pci_dev->dev,
					    CONFIG_RBPL_BUFSIZE, 64, 0);
	if (he_dev->rbpl_pool == NULL) {
		hprintk("unable to create rbpl pool\n");
		goto out_free_rbpl_virt;
	}

	he_dev->rbpl_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
					       CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
					       &he_dev->rbpl_phys, GFP_KERNEL);
	if (he_dev->rbpl_base == NULL) {
		hprintk("failed to alloc rbpl_base\n");
		goto out_destroy_rbpl_pool;
	}

	INIT_LIST_HEAD(&he_dev->rbpl_outstanding);

	for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {

		heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL, &mapping);
		if (!heb)
			goto out_free_rbpl;
		heb->mapping = mapping;
		list_add(&heb->entry, &he_dev->rbpl_outstanding);

		set_bit(i, he_dev->rbpl_table);
		he_dev->rbpl_virt[i] = heb;
		he_dev->rbpl_hint = i + 1;
		he_dev->rbpl_base[i].idx =  i << RBP_IDX_OFFSET;
		he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data);
	}
	he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];

	he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
	he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
						G0_RBPL_T + (group * 32));
	he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4,
						G0_RBPL_BS + (group * 32));
	he_writel(he_dev,
			RBP_THRESH(CONFIG_RBPL_THRESH) |
			RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
			RBP_INT_ENB,
						G0_RBPL_QI + (group * 32));

	/* rx buffer ready queue */

	he_dev->rbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
					       CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
					       &he_dev->rbrq_phys, GFP_KERNEL);
	if (he_dev->rbrq_base == NULL) {
		hprintk("failed to allocate rbrq\n");
		goto out_free_rbpl;
	}

	he_dev->rbrq_head = he_dev->rbrq_base;
	he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
	he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
	he_writel(he_dev,
		RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
						G0_RBRQ_Q + (group * 16));
	if (irq_coalesce) {
		hprintk("coalescing interrupts\n");
		he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
						G0_RBRQ_I + (group * 16));
	} else
		he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
						G0_RBRQ_I + (group * 16));

	/* tx buffer ready queue */

	he_dev->tbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
					       CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
					       &he_dev->tbrq_phys, GFP_KERNEL);
	if (he_dev->tbrq_base == NULL) {
		hprintk("failed to allocate tbrq\n");
		goto out_free_rbpq_base;
	}

	he_dev->tbrq_head = he_dev->tbrq_base;

	he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
	he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
	he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
	he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));

	return 0;

out_free_rbpq_base:
	dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE *
			  sizeof(struct he_rbrq), he_dev->rbrq_base,
			  he_dev->rbrq_phys);
out_free_rbpl:
	list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
		dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);

	dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE *
			  sizeof(struct he_rbp), he_dev->rbpl_base,
			  he_dev->rbpl_phys);
out_destroy_rbpl_pool:
	dma_pool_destroy(he_dev->rbpl_pool);
out_free_rbpl_virt:
	kfree(he_dev->rbpl_virt);
out_free_rbpl_table:
	kfree(he_dev->rbpl_table);

	return -ENOMEM;
}