static int tx_init()

in iphase.c [1902:2223]


static int tx_init(struct atm_dev *dev)  
{  
	IADEV *iadev;  
	struct tx_buf_desc *buf_desc_ptr;
	unsigned int tx_pkt_start;  
	void *dle_addr;  
	int i;  
	u_short tcq_st_adr;  
	u_short *tcq_start;  
	u_short prq_st_adr;  
	u_short *prq_start;  
	struct main_vc *vc;  
	struct ext_vc *evc;   
        u_short tmp16;
        u32 vcsize_sel;
 
	iadev = INPH_IA_DEV(dev);  
        spin_lock_init(&iadev->tx_lock);
 
	IF_INIT(printk("Tx MASK REG: 0x%0x\n", 
                                readw(iadev->seg_reg+SEG_MASK_REG));)  

	/* Allocate 4k (boundary aligned) bytes */
	dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
				      &iadev->tx_dle_dma, GFP_KERNEL);
	if (!dle_addr)  {
		printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
		goto err_out;
	}
	iadev->tx_dle_q.start = (struct dle*)dle_addr;  
	iadev->tx_dle_q.read = iadev->tx_dle_q.start;  
	iadev->tx_dle_q.write = iadev->tx_dle_q.start;  
	iadev->tx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);

	/* write the upper 20 bits of the start address to tx list address register */  
	writel(iadev->tx_dle_dma & 0xfffff000,
	       iadev->dma + IPHASE5575_TX_LIST_ADDR);  
	writew(0xffff, iadev->seg_reg+SEG_MASK_REG);  
	writew(0, iadev->seg_reg+MODE_REG_0);  
	writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);  
        iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
        iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
        iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
  
	/*  
	   Transmit side control memory map  
	   --------------------------------    
	 Buffer descr 	0x0000 (128 - 4K)  
	 Commn queues	0x1000	Transmit comp, Packet ready(0x1400)   
					(512 - 1K) each  
					TCQ - 4K, PRQ - 5K  
	 CBR Table 	0x1800 (as needed) - 6K  
	 UBR Table	0x3000 (1K - 4K) - 12K  
	 UBR Wait queue	0x4000 (1K - 4K) - 16K  
	 ABR sched	0x5000	and ABR wait queue (1K - 2K) each  
				ABR Tbl - 20K, ABR Wq - 22K   
	 extended VC	0x6000 (1K - 8K) - 24K  
	 VC Table	0x8000 (1K - 32K) - 32K  
	  
	Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl  
	and Wait q, which can be allotted later.  
	*/  
     
	/* Buffer Descriptor Table Base address */  
	writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);  
  
	/* initialize each entry in the buffer descriptor table */  
	buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);  
	memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
	buf_desc_ptr++;  
	tx_pkt_start = TX_PACKET_RAM;  
	for(i=1; i<=iadev->num_tx_desc; i++)  
	{  
		memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
		buf_desc_ptr->desc_mode = AAL5;  
		buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;  
		buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;  
		buf_desc_ptr++;		  
		tx_pkt_start += iadev->tx_buf_sz;  
	}  
	iadev->tx_buf = kmalloc_array(iadev->num_tx_desc,
				      sizeof(*iadev->tx_buf),
				      GFP_KERNEL);
        if (!iadev->tx_buf) {
            printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
	    goto err_free_dle;
        }
       	for (i= 0; i< iadev->num_tx_desc; i++)
       	{
	    struct cpcs_trailer *cpcs;
 
       	    cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
            if(!cpcs) {                
		printk(KERN_ERR DEV_LABEL " couldn't get freepage\n"); 
		goto err_free_tx_bufs;
            }
	    iadev->tx_buf[i].cpcs = cpcs;
	    iadev->tx_buf[i].dma_addr = dma_map_single(&iadev->pci->dev,
						       cpcs,
						       sizeof(*cpcs),
						       DMA_TO_DEVICE);
        }
	iadev->desc_tbl = kmalloc_array(iadev->num_tx_desc,
					sizeof(*iadev->desc_tbl),
					GFP_KERNEL);
	if (!iadev->desc_tbl) {
		printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
		goto err_free_all_tx_bufs;
	}
  
	/* Communication Queues base address */  
        i = TX_COMP_Q * iadev->memSize;
	writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);  
  
	/* Transmit Complete Queue */  
	writew(i, iadev->seg_reg+TCQ_ST_ADR);  
	writew(i, iadev->seg_reg+TCQ_RD_PTR);  
	writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR); 
	iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
        writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
                                              iadev->seg_reg+TCQ_ED_ADR); 
	/* Fill the TCQ with all the free descriptors. */  
	tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);  
	tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);  
	for(i=1; i<=iadev->num_tx_desc; i++)  
	{  
		*tcq_start = (u_short)i;  
		tcq_start++;  
	}  
  
	/* Packet Ready Queue */  
        i = PKT_RDY_Q * iadev->memSize; 
	writew(i, iadev->seg_reg+PRQ_ST_ADR);  
	writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
                                              iadev->seg_reg+PRQ_ED_ADR);
	writew(i, iadev->seg_reg+PRQ_RD_PTR);  
	writew(i, iadev->seg_reg+PRQ_WR_PTR);  
	 
        /* Load local copy of PRQ and TCQ ptrs */
        iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
	iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
 	iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;

	iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
	iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
	iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;

	/* Just for safety initializing the queue to have desc 1 always */  
	/* Fill the PRQ with all the free descriptors. */  
	prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);  
	prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);  
	for(i=1; i<=iadev->num_tx_desc; i++)  
	{  
		*prq_start = (u_short)0;	/* desc 1 in all entries */  
		prq_start++;  
	}  
	/* CBR Table */  
        IF_INIT(printk("Start CBR Init\n");)
#if 1  /* for 1K VC board, CBR_PTR_BASE is 0 */
        writew(0,iadev->seg_reg+CBR_PTR_BASE);
#else /* Charlie's logic is wrong ? */
        tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
        IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
        writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
#endif

        IF_INIT(printk("value in register = 0x%x\n",
                                   readw(iadev->seg_reg+CBR_PTR_BASE));)
        tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
        writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
        IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
                                        readw(iadev->seg_reg+CBR_TAB_BEG));)
        writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
        tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
        writew(tmp16, iadev->seg_reg+CBR_TAB_END);
        IF_INIT(printk("iadev->seg_reg = 0x%p CBR_PTR_BASE = 0x%x\n",
               iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
        IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
          readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
          readw(iadev->seg_reg+CBR_TAB_END+1));)

        /* Initialize the CBR Schedualing Table */
        memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize, 
                                                          0, iadev->num_vc*6); 
        iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
        iadev->CbrEntryPt = 0;
        iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
        iadev->NumEnabledCBR = 0;

	/* UBR scheduling Table and wait queue */  
	/* initialize all bytes of UBR scheduler table and wait queue to 0   
		- SCHEDSZ is 1K (# of entries).  
		- UBR Table size is 4K  
		- UBR wait queue is 4K  
	   since the table and wait queues are contiguous, all the bytes   
	   can be initialized by one memeset.
	*/  
        
        vcsize_sel = 0;
        i = 8*1024;
        while (i != iadev->num_vc) {
          i /= 2;
          vcsize_sel++;
        }
 
        i = MAIN_VC_TABLE * iadev->memSize;
        writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
        i =  EXT_VC_TABLE * iadev->memSize;
        writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
        i = UBR_SCHED_TABLE * iadev->memSize;
        writew((i & 0xffff) >> 11,  iadev->seg_reg+UBR_SBPTR_BASE);
        i = UBR_WAIT_Q * iadev->memSize; 
        writew((i >> 7) & 0xffff,  iadev->seg_reg+UBRWQ_BASE);
 	memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
                                                       0, iadev->num_vc*8);
	/* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/  
	/* initialize all bytes of ABR scheduler table and wait queue to 0   
		- SCHEDSZ is 1K (# of entries).  
		- ABR Table size is 2K  
		- ABR wait queue is 2K  
	   since the table and wait queues are contiguous, all the bytes   
	   can be initialized by one memeset.
	*/  
        i = ABR_SCHED_TABLE * iadev->memSize;
        writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
        i = ABR_WAIT_Q * iadev->memSize;
        writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
 
        i = ABR_SCHED_TABLE*iadev->memSize;
	memset((caddr_t)(iadev->seg_ram+i),  0, iadev->num_vc*4);
	vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
	evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
	iadev->testTable = kmalloc_array(iadev->num_vc,
					 sizeof(*iadev->testTable),
					 GFP_KERNEL);
        if (!iadev->testTable) {
           printk("Get freepage  failed\n");
	   goto err_free_desc_tbl;
        }
	for(i=0; i<iadev->num_vc; i++)  
	{  
		memset((caddr_t)vc, 0, sizeof(*vc));  
		memset((caddr_t)evc, 0, sizeof(*evc));  
                iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
						GFP_KERNEL);
		if (!iadev->testTable[i])
			goto err_free_test_tables;
              	iadev->testTable[i]->lastTime = 0;
 		iadev->testTable[i]->fract = 0;
                iadev->testTable[i]->vc_status = VC_UBR;
		vc++;  
		evc++;  
	}  
  
	/* Other Initialization */  
	  
	/* Max Rate Register */  
        if (iadev->phy_type & FE_25MBIT_PHY) {
	   writew(RATE25, iadev->seg_reg+MAXRATE);  
	   writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
        }
        else {
	   writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
	   writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
        }
	/* Set Idle Header Reigisters to be sure */  
	writew(0, iadev->seg_reg+IDLEHEADHI);  
	writew(0, iadev->seg_reg+IDLEHEADLO);  
  
	/* Program ABR UBR Priority Register  as  PRI_ABR_UBR_EQUAL */
        writew(0xaa00, iadev->seg_reg+ABRUBR_ARB); 

        iadev->close_pending = 0;
        init_waitqueue_head(&iadev->close_wait);
        init_waitqueue_head(&iadev->timeout_wait);
	skb_queue_head_init(&iadev->tx_dma_q);  
	ia_init_rtn_q(&iadev->tx_return_q);  

	/* RM Cell Protocol ID and Message Type */  
	writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);  
        skb_queue_head_init (&iadev->tx_backlog);
  
	/* Mode Register 1 */  
	writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);  
  
	/* Mode Register 0 */  
	writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);  
  
	/* Interrupt Status Register - read to clear */  
	readw(iadev->seg_reg+SEG_INTR_STATUS_REG);  
  
	/* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */  
        writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
        writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);  
        iadev->tx_pkt_cnt = 0;
        iadev->rate_limit = iadev->LineRate / 3;
  
	return 0;

err_free_test_tables:
	while (--i >= 0)
		kfree(iadev->testTable[i]);
	kfree(iadev->testTable);
err_free_desc_tbl:
	kfree(iadev->desc_tbl);
err_free_all_tx_bufs:
	i = iadev->num_tx_desc;
err_free_tx_bufs:
	while (--i >= 0) {
		struct cpcs_trailer_desc *desc = iadev->tx_buf + i;

		dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
				 sizeof(*desc->cpcs), DMA_TO_DEVICE);
		kfree(desc->cpcs);
	}
	kfree(iadev->tx_buf);
err_free_dle:
	dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
			  iadev->tx_dle_dma);
err_out:
	return -ENOMEM;
}