static int ctcmpc_transmit_skb()

in net/ctcm_main.c [648:823]


static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
{
	struct pdu *p_header;
	struct net_device *dev = ch->netdev;
	struct ctcm_priv *priv = dev->ml_priv;
	struct mpc_group *grp = priv->mpcg;
	struct th_header *header;
	struct sk_buff *nskb;
	int rc = 0;
	int ccw_idx;
	unsigned long hi;
	unsigned long saveflags = 0;	/* avoids compiler warning */

	CTCM_PR_DEBUG("Enter %s: %s, cp=%i ch=0x%p id=%s state=%s\n",
			__func__, dev->name, smp_processor_id(), ch,
					ch->id, fsm_getstate_str(ch->fsm));

	if ((fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) || grp->in_sweep) {
		spin_lock_irqsave(&ch->collect_lock, saveflags);
		refcount_inc(&skb->users);

		p_header = skb_push(skb, PDU_HEADER_LENGTH);
		p_header->pdu_offset = skb->len - PDU_HEADER_LENGTH;
		p_header->pdu_proto = 0x01;
		if (be16_to_cpu(skb->protocol) == ETH_P_SNAP) {
			p_header->pdu_flag = PDU_FIRST | PDU_CNTL;
		} else {
			p_header->pdu_flag = PDU_FIRST;
		}
		p_header->pdu_seq = 0;

		CTCM_PR_DEBUG("%s(%s): Put on collect_q - skb len: %04x \n"
				"pdu header and data for up to 32 bytes:\n",
				__func__, dev->name, skb->len);
		CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len));

		skb_queue_tail(&ch->collect_queue, skb);
		ch->collect_len += skb->len;

		spin_unlock_irqrestore(&ch->collect_lock, saveflags);
			goto done;
	}

	/*
	 * Protect skb against beeing free'd by upper
	 * layers.
	 */
	refcount_inc(&skb->users);

	/*
	 * IDAL support in CTCM is broken, so we have to
	 * care about skb's above 2G ourselves.
	 */
	hi = ((unsigned long)skb->tail + TH_HEADER_LENGTH) >> 31;
	if (hi) {
		nskb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
		if (!nskb) {
			goto nomem_exit;
		} else {
			skb_put_data(nskb, skb->data, skb->len);
			refcount_inc(&nskb->users);
			refcount_dec(&skb->users);
			dev_kfree_skb_irq(skb);
			skb = nskb;
		}
	}

	p_header = skb_push(skb, PDU_HEADER_LENGTH);
	p_header->pdu_offset = skb->len - PDU_HEADER_LENGTH;
	p_header->pdu_proto = 0x01;
	p_header->pdu_seq = 0;
	if (be16_to_cpu(skb->protocol) == ETH_P_SNAP) {
		p_header->pdu_flag = PDU_FIRST | PDU_CNTL;
	} else {
		p_header->pdu_flag = PDU_FIRST;
	}

	if (ch->collect_len > 0) {
		spin_lock_irqsave(&ch->collect_lock, saveflags);
		skb_queue_tail(&ch->collect_queue, skb);
		ch->collect_len += skb->len;
		skb = skb_dequeue(&ch->collect_queue);
		ch->collect_len -= skb->len;
		spin_unlock_irqrestore(&ch->collect_lock, saveflags);
	}

	p_header = (struct pdu *)skb->data;
	p_header->pdu_flag |= PDU_LAST;

	ch->prof.txlen += skb->len - PDU_HEADER_LENGTH;

	/* put the TH on the packet */
	header = skb_push(skb, TH_HEADER_LENGTH);
	memset(header, 0, TH_HEADER_LENGTH);

	header->th_ch_flag = TH_HAS_PDU;  /* Normal data */
	ch->th_seq_num++;
	header->th_seq_num = ch->th_seq_num;

	CTCM_PR_DBGDATA("%s(%s) ToVTAM_th_seq= %08x\n" ,
		       __func__, dev->name, ch->th_seq_num);

	CTCM_PR_DBGDATA("%s(%s): skb len: %04x\n - pdu header and data for "
			"up to 32 bytes sent to vtam:\n",
				__func__, dev->name, skb->len);
	CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len));

	ch->ccw[4].count = skb->len;
	if (set_normalized_cda(&ch->ccw[4], skb->data)) {
		/*
		 * idal allocation failed, try via copying to trans_skb.
		 * trans_skb usually has a pre-allocated idal.
		 */
		if (ctcm_checkalloc_buffer(ch)) {
			/*
			 * Remove our header.
			 * It gets added again on retransmit.
			 */
				goto nomem_exit;
		}

		skb_reset_tail_pointer(ch->trans_skb);
		ch->trans_skb->len = 0;
		ch->ccw[1].count = skb->len;
		skb_put_data(ch->trans_skb, skb->data, skb->len);
		refcount_dec(&skb->users);
		dev_kfree_skb_irq(skb);
		ccw_idx = 0;
		CTCM_PR_DBGDATA("%s(%s): trans_skb len: %04x\n"
				"up to 32 bytes sent to vtam:\n",
				__func__, dev->name, ch->trans_skb->len);
		CTCM_D3_DUMP((char *)ch->trans_skb->data,
				min_t(int, 32, ch->trans_skb->len));
	} else {
		skb_queue_tail(&ch->io_queue, skb);
		ccw_idx = 3;
	}
	ch->retry = 0;
	fsm_newstate(ch->fsm, CTC_STATE_TX);
	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);

	if (do_debug_ccw)
		ctcmpc_dumpit((char *)&ch->ccw[ccw_idx],
					sizeof(struct ccw1) * 3);

	spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
	ch->prof.send_stamp = jiffies;
	rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx], 0, 0xff, 0);
	spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
	if (ccw_idx == 3)
		ch->prof.doios_single++;
	if (rc != 0) {
		fsm_deltimer(&ch->timer);
		ctcm_ccw_check_rc(ch, rc, "single skb TX");
		if (ccw_idx == 3)
			skb_dequeue_tail(&ch->io_queue);
	} else if (ccw_idx == 0) {
		priv->stats.tx_packets++;
		priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH;
	}
	if (ch->th_seq_num > 0xf0000000)	/* Chose at random. */
		ctcmpc_send_sweep_req(ch);

	goto done;
nomem_exit:
	CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_CRIT,
			"%s(%s): MEMORY allocation ERROR\n",
			CTCM_FUNTAIL, ch->id);
	rc = -ENOMEM;
	refcount_dec(&skb->users);
	dev_kfree_skb_any(skb);
	fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
done:
	CTCM_PR_DEBUG("Exit %s(%s)\n", __func__, dev->name);
	return rc;
}