in operation.c [720:785]
int gb_operation_request_send(struct gb_operation *operation,
gb_operation_callback callback,
unsigned int timeout,
gfp_t gfp)
{
struct gb_connection *connection = operation->connection;
struct gb_operation_msg_hdr *header;
unsigned int cycle;
int ret;
if (gb_connection_is_offloaded(connection))
return -EBUSY;
if (!callback)
return -EINVAL;
/*
* Record the callback function, which is executed in
* non-atomic (workqueue) context when the final result
* of an operation has been set.
*/
operation->callback = callback;
/*
* Assign the operation's id, and store it in the request header.
* Zero is a reserved operation id for unidirectional operations.
*/
if (gb_operation_is_unidirectional(operation)) {
operation->id = 0;
} else {
cycle = (unsigned int)atomic_inc_return(&connection->op_cycle);
operation->id = (u16)(cycle % U16_MAX + 1);
}
header = operation->request->header;
header->operation_id = cpu_to_le16(operation->id);
gb_operation_result_set(operation, -EINPROGRESS);
/*
* Get an extra reference on the operation. It'll be dropped when the
* operation completes.
*/
gb_operation_get(operation);
ret = gb_operation_get_active(operation);
if (ret)
goto err_put;
ret = gb_message_send(operation->request, gfp);
if (ret)
goto err_put_active;
if (timeout) {
operation->timer.expires = jiffies + msecs_to_jiffies(timeout);
add_timer(&operation->timer);
}
return 0;
err_put_active:
gb_operation_put_active(operation);
err_put:
gb_operation_put(operation);
return ret;
}