in binder.c [2291:2356]
static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
struct binder_buffer *buffer,
struct list_head *sgc_head,
struct list_head *pf_head)
{
int ret = 0;
struct binder_sg_copy *sgc, *tmpsgc;
struct binder_ptr_fixup *pf =
list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
node);
list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
size_t bytes_copied = 0;
while (bytes_copied < sgc->length) {
size_t copy_size;
size_t bytes_left = sgc->length - bytes_copied;
size_t offset = sgc->offset + bytes_copied;
/*
* We copy up to the fixup (pointed to by pf)
*/
copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
: bytes_left;
if (!ret && copy_size)
ret = binder_alloc_copy_user_to_buffer(
alloc, buffer,
offset,
sgc->sender_uaddr + bytes_copied,
copy_size);
bytes_copied += copy_size;
if (copy_size != bytes_left) {
BUG_ON(!pf);
/* we stopped at a fixup offset */
if (pf->skip_size) {
/*
* we are just skipping. This is for
* BINDER_TYPE_FDA where the translated
* fds will be fixed up when we get
* to target context.
*/
bytes_copied += pf->skip_size;
} else {
/* apply the fixup indicated by pf */
if (!ret)
ret = binder_alloc_copy_to_buffer(
alloc, buffer,
pf->offset,
&pf->fixup_data,
sizeof(pf->fixup_data));
bytes_copied += sizeof(pf->fixup_data);
}
list_del(&pf->node);
kfree(pf);
pf = list_first_entry_or_null(pf_head,
struct binder_ptr_fixup, node);
}
}
list_del(&sgc->node);
kfree(sgc);
}
BUG_ON(!list_empty(pf_head));
BUG_ON(!list_empty(sgc_head));
return ret > 0 ? -EINVAL : ret;
}