SchedulingResult FrameScheduler::scheduleFramesForPacket()

in quic/api/QuicPacketScheduler.cpp [228:313]


SchedulingResult FrameScheduler::scheduleFramesForPacket(
    PacketBuilderInterface&& builder,
    uint32_t writableBytes) {
  builder.encodePacketHeader();
  // We need to keep track of writable bytes after writing header.
  writableBytes = writableBytes > builder.getHeaderBytes()
      ? writableBytes - builder.getHeaderBytes()
      : 0;
  // We cannot return early if the writablyBytes drops to 0 here, since pure
  // acks can skip writableBytes entirely.
  PacketBuilderWrapper wrapper(builder, writableBytes);
  bool cryptoDataWritten = false;
  bool rstWritten = false;
  if (cryptoStreamScheduler_ && cryptoStreamScheduler_->hasData()) {
    cryptoDataWritten = cryptoStreamScheduler_->writeCryptoData(wrapper);
  }
  if (rstScheduler_ && rstScheduler_->hasPendingRsts()) {
    rstWritten = rstScheduler_->writeRsts(wrapper);
  }
  // Long time ago we decided RST has higher priority than Acks.
  if (ackScheduler_ && ackScheduler_->hasPendingAcks()) {
    if (cryptoDataWritten || rstWritten) {
      // If packet has non ack data, it is subject to congestion control. We
      // need to use the wrapper/
      ackScheduler_->writeNextAcks(wrapper);
    } else {
      // If we start with writing acks, we will let the ack scheduler write
      // up to the full packet space. If the ack bytes exceeds the writable
      // bytes, this will be a pure ack packet and it will skip congestion
      // controller. Otherwise, we will give other schedulers an opportunity to
      // write up to writable bytes.
      ackScheduler_->writeNextAcks(builder);
    }
  }
  if (windowUpdateScheduler_ &&
      windowUpdateScheduler_->hasPendingWindowUpdates()) {
    windowUpdateScheduler_->writeWindowUpdates(wrapper);
  }
  if (blockedScheduler_ && blockedScheduler_->hasPendingBlockedFrames()) {
    blockedScheduler_->writeBlockedFrames(wrapper);
  }
  // Simple frames should be scheduled before stream frames and retx frames
  // because those frames might fill up all available bytes for writing.
  // If we are trying to send a PathChallenge frame it may be blocked by those,
  // causing a connection to proceed slowly because of path validation rate
  // limiting.
  if (simpleFrameScheduler_ &&
      simpleFrameScheduler_->hasPendingSimpleFrames()) {
    simpleFrameScheduler_->writeSimpleFrames(wrapper);
  }
  if (pingFrameScheduler_ && pingFrameScheduler_->hasPingFrame()) {
    pingFrameScheduler_->writePing(wrapper);
  }
  if (streamFrameScheduler_ && streamFrameScheduler_->hasPendingData()) {
    streamFrameScheduler_->writeStreams(wrapper);
  }
  if (datagramFrameScheduler_ &&
      datagramFrameScheduler_->hasPendingDatagramFrames()) {
    datagramFrameScheduler_->writeDatagramFrames(wrapper);
  }

  if (builder.hasFramesPending()) {
    const LongHeader* longHeader = builder.getPacketHeader().asLong();
    bool initialPacket =
        longHeader && longHeader->getHeaderType() == LongHeader::Types::Initial;
    if (initialPacket) {
      // This is the initial packet, we need to fill er up.
      while (wrapper.remainingSpaceInPkt() > 0) {
        writeFrame(PaddingFrame(), builder);
      }
    }
    const ShortHeader* shortHeader = builder.getPacketHeader().asShort();
    if (shortHeader) {
      size_t paddingModulo = conn_.transportSettings.paddingModulo;
      if (paddingModulo > 0) {
        size_t paddingIncrement = wrapper.remainingSpaceInPkt() % paddingModulo;
        for (size_t i = 0; i < paddingIncrement; i++) {
          writeFrame(PaddingFrame(), builder);
        }
        QUIC_STATS(conn_.statsCallback, onShortHeaderPadding, paddingIncrement);
      }
    }
  }

  return SchedulingResult(folly::none, std::move(builder).buildPacket());
}