void io_uring_service::notify_fork()

in demo_example/asio/asio/detail/impl/io_uring_service.ipp [105:183]


void io_uring_service::notify_fork(
    asio::execution_context::fork_event fork_ev)
{
  switch (fork_ev)
  {
  case asio::execution_context::fork_prepare:
    {
      // Cancel all outstanding operations. They will be restarted
      // after the fork completes.
      mutex::scoped_lock registration_lock(registration_mutex_);
      for (io_object* io_obj = registered_io_objects_.first();
          io_obj != 0; io_obj = io_obj->next_)
      {
        mutex::scoped_lock io_object_lock(io_obj->mutex_);
        for (int i = 0; i < max_ops; ++i)
        {
          if (!io_obj->queues_[i].op_queue_.empty()
              && !io_obj->queues_[i].cancel_requested_)
          {
            mutex::scoped_lock lock(mutex_);
            if (::io_uring_sqe* sqe = get_sqe())
              ::io_uring_prep_cancel(sqe, &io_obj->queues_[i], 0);
          }
        }
      }

      // Cancel the timeout operation.
      {
        mutex::scoped_lock lock(mutex_);
        if (::io_uring_sqe* sqe = get_sqe())
          ::io_uring_prep_cancel(sqe, &timeout_, IOSQE_IO_DRAIN);
        submit_sqes();
      }

      // Wait for all completions to come back, and post all completed I/O
      // queues to the scheduler. Note that some operations may have already
      // completed, or were explicitly cancelled. All others will be
      // automatically restarted.
      op_queue<operation> ops;
      for (; outstanding_work_ > 0; --outstanding_work_)
      {
        ::io_uring_cqe* cqe = 0;
        if (::io_uring_wait_cqe(&ring_, &cqe) != 0)
          break;
        if (void* ptr = ::io_uring_cqe_get_data(cqe))
        {
          if (ptr != this && ptr != &timer_queues_ && ptr != &timeout_)
          {
            io_queue* io_q = static_cast<io_queue*>(ptr);
            io_q->set_result(cqe->res);
            ops.push(io_q);
          }
        }
      }
      scheduler_.post_deferred_completions(ops);

      // Restart and eventfd operation.
      register_with_reactor();
    }
    break;

  case asio::execution_context::fork_parent:
    // Restart the timeout and eventfd operations.
    update_timeout();
    register_with_reactor();
    break;

  case asio::execution_context::fork_child:
    {
      // The child process gets a new io_uring instance.
      ::io_uring_queue_exit(&ring_);
      init_ring();
      register_with_reactor();
    }
    break;
  default:
    break;
  }
}