static void tx_only_all()

in bpf/xdpsock_user.c [1574:1665]


static void tx_only_all(void)
{
	struct pollfd fds[MAX_SOCKS] = {};
	u32 frame_nb[MAX_SOCKS] = {};
	unsigned long next_tx_ns = 0;
	int pkt_cnt = 0;
	int i, ret;

	if (opt_poll && opt_tx_cycle_ns) {
		fprintf(stderr,
			"Error: --poll and --tx-cycles are both set\n");
		return;
	}

	for (i = 0; i < num_socks; i++) {
		fds[0].fd = xsk_socket__fd(xsks[i]->xsk);
		fds[0].events = POLLOUT;
	}

	if (opt_tx_cycle_ns) {
		/* Align Tx time to micro-second boundary */
		next_tx_ns = (get_nsecs() / NSEC_PER_USEC + 1) *
			     NSEC_PER_USEC;
		next_tx_ns += opt_tx_cycle_ns;

		/* Initialize periodic Tx scheduling variance */
		tx_cycle_diff_min = 1000000000;
		tx_cycle_diff_max = 0;
		tx_cycle_diff_ave = 0.0;
	}

	while ((opt_pkt_count && pkt_cnt < opt_pkt_count) || !opt_pkt_count) {
		int batch_size = get_batch_size(pkt_cnt);
		unsigned long tx_ns = 0;
		struct timespec next;
		int tx_cnt = 0;
		long diff;
		int err;

		if (opt_poll) {
			for (i = 0; i < num_socks; i++)
				xsks[i]->app_stats.opt_polls++;
			ret = poll(fds, num_socks, opt_timeout);
			if (ret <= 0)
				continue;

			if (!(fds[0].revents & POLLOUT))
				continue;
		}

		if (opt_tx_cycle_ns) {
			next.tv_sec = next_tx_ns / NSEC_PER_SEC;
			next.tv_nsec = next_tx_ns % NSEC_PER_SEC;
			err = clock_nanosleep(opt_clock, TIMER_ABSTIME, &next, NULL);
			if (err) {
				if (err != EINTR)
					fprintf(stderr,
						"clock_nanosleep failed. Err:%d errno:%d\n",
						err, errno);
				break;
			}

			/* Measure periodic Tx scheduling variance */
			tx_ns = get_nsecs();
			diff = tx_ns - next_tx_ns;
			if (diff < tx_cycle_diff_min)
				tx_cycle_diff_min = diff;

			if (diff > tx_cycle_diff_max)
				tx_cycle_diff_max = diff;

			tx_cycle_diff_ave += (double)diff;
			tx_cycle_cnt++;
		} else if (opt_tstamp) {
			tx_ns = get_nsecs();
		}

		for (i = 0; i < num_socks; i++)
			tx_cnt += tx_only(xsks[i], &frame_nb[i], batch_size, tx_ns);

		pkt_cnt += tx_cnt;

		if (benchmark_done)
			break;

		if (opt_tx_cycle_ns)
			next_tx_ns += opt_tx_cycle_ns;
	}

	if (opt_pkt_count)
		complete_tx_only_all();
}