in nimble/host/mesh/src/transport.c [459:604]
static int send_seg(struct bt_mesh_net_tx *net_tx, struct os_mbuf *sdu,
const struct bt_mesh_send_cb *cb, void *cb_data,
uint8_t *ctl_op)
{
bool blocked = false;
struct seg_tx *tx;
uint8_t seg_o;
int i;
BT_DBG("src 0x%04x dst 0x%04x app_idx 0x%04x aszmic %u sdu_len %u",
net_tx->src, net_tx->ctx->addr, net_tx->ctx->app_idx,
net_tx->aszmic, sdu->om_len);
for (tx = NULL, i = 0; i < ARRAY_SIZE(seg_tx); i++) {
if (seg_tx[i].nack_count) {
blocked |= seg_tx_blocks(&seg_tx[i], net_tx->src,
net_tx->ctx->addr);
} else if (!tx) {
tx = &seg_tx[i];
}
}
if (!tx) {
BT_ERR("No multi-segment message contexts available");
return -EBUSY;
}
if (ctl_op) {
tx->hdr = TRANS_CTL_HDR(*ctl_op, 1);
} else if (BT_MESH_IS_DEV_KEY(net_tx->ctx->app_idx)) {
tx->hdr = SEG_HDR(0, 0);
} else {
tx->hdr = SEG_HDR(1, net_tx->aid);
}
tx->src = net_tx->src;
tx->dst = net_tx->ctx->addr;
tx->seg_n = (sdu->om_len - 1) / seg_len(!!ctl_op);
tx->seg_o = 0;
tx->len = sdu->om_len;
tx->nack_count = tx->seg_n + 1;
tx->seq_auth = SEQ_AUTH(BT_MESH_NET_IVI_TX, bt_mesh.seq);
tx->sub = net_tx->sub;
tx->cb = cb;
tx->cb_data = cb_data;
tx->attempts = SEG_RETRANSMIT_ATTEMPTS;
tx->seg_pending = 0;
tx->xmit = net_tx->xmit;
tx->aszmic = net_tx->aszmic;
tx->friend_cred = net_tx->friend_cred;
tx->blocked = blocked;
tx->started = 0;
tx->ctl = !!ctl_op;
tx->ttl = net_tx->ctx->send_ttl;
BT_DBG("SeqZero 0x%04x (segs: %u)",
(uint16_t)(tx->seq_auth & TRANS_SEQ_ZERO_MASK), tx->nack_count);
if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) &&
!bt_mesh_friend_queue_has_space(tx->sub->net_idx, net_tx->src,
tx->dst, &tx->seq_auth,
tx->seg_n + 1) &&
BT_MESH_ADDR_IS_UNICAST(tx->dst)) {
BT_ERR("Not enough space in Friend Queue for %u segments",
tx->seg_n + 1);
seg_tx_reset(tx);
return -ENOBUFS;
}
for (seg_o = 0; sdu->om_len; seg_o++) {
void *buf;
uint16_t len;
int err;
err = k_mem_slab_alloc(&segs, &buf);
if (err) {
BT_ERR("Out of segment buffers");
seg_tx_reset(tx);
return -ENOBUFS;
}
len = MIN(sdu->om_len, seg_len(!!ctl_op));
memcpy(buf, net_buf_simple_pull_mem(sdu, len), len);
BT_DBG("seg %u: %s", seg_o, bt_hex(buf, len));
tx->seg[seg_o] = buf;
if (IS_ENABLED(CONFIG_BT_MESH_FRIEND)) {
enum bt_mesh_friend_pdu_type type;
struct os_mbuf *seg = NET_BUF_SIMPLE(16);
seg_tx_buf_build(tx, seg_o, seg);
if (seg_o == tx->seg_n) {
type = BT_MESH_FRIEND_PDU_COMPLETE;
} else {
type = BT_MESH_FRIEND_PDU_PARTIAL;
}
if (bt_mesh_friend_enqueue_tx(
net_tx, type, ctl_op ? NULL : &tx->seq_auth,
tx->seg_n + 1, seg) &&
BT_MESH_ADDR_IS_UNICAST(net_tx->ctx->addr)) {
/* PDUs for a specific Friend should only go
* out through the Friend Queue.
*/
k_mem_slab_free(&segs, &buf);
tx->seg[seg_o] = NULL;
}
os_mbuf_free_chain(seg);
}
}
/* This can happen if segments only went into the Friend Queue */
if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) && !tx->seg[0]) {
seg_tx_reset(tx);
/* If there was a callback notify sending immediately since
* there's no other way to track this (at least currently)
* with the Friend Queue.
*/
send_cb_finalize(cb, cb_data);
return 0;
}
if (blocked) {
/* Move the sequence number, so we don't end up creating
* another segmented transmission with the same SeqZero while
* this one is blocked.
*/
bt_mesh_next_seq();
BT_DBG("Blocked.");
return 0;
}
seg_tx_send_unacked(tx);
if (IS_ENABLED(CONFIG_BT_MESH_LOW_POWER) &&
bt_mesh_lpn_established()) {
bt_mesh_lpn_poll();
}
return 0;
}