in cdk/extra/protobuf/protobuf-3.19.6/src/google/protobuf/compiler/cpp/cpp_message.cc [4249:4506]
void MessageGenerator::GenerateByteSize(io::Printer* printer) {
if (HasSimpleBaseClass(descriptor_, options_)) return;
Formatter format(printer, variables_);
if (descriptor_->options().message_set_wire_format()) {
// Special-case MessageSet.
std::map<std::string, std::string> vars;
SetUnknownFieldsVariable(descriptor_, options_, &vars);
format.AddMap(vars);
format(
"size_t $classname$::ByteSizeLong() const {\n"
"$annotate_bytesize$"
"// @@protoc_insertion_point(message_set_byte_size_start:$full_name$)\n"
" size_t total_size = _extensions_.MessageSetByteSize();\n"
" if ($have_unknown_fields$) {\n"
" total_size += ::$proto_ns$::internal::\n"
" ComputeUnknownMessageSetItemsSize($unknown_fields$);\n"
" }\n"
" int cached_size = "
"::$proto_ns$::internal::ToCachedSize(total_size);\n"
" SetCachedSize(cached_size);\n"
" return total_size;\n"
"}\n");
return;
}
if (num_required_fields_ > 1) {
// Emit a function (rarely used, we hope) that handles the required fields
// by checking for each one individually.
format(
"size_t $classname$::RequiredFieldsByteSizeFallback() const {\n"
"// @@protoc_insertion_point(required_fields_byte_size_fallback_start:"
"$full_name$)\n");
format.Indent();
format("size_t total_size = 0;\n");
for (auto field : optimized_order_) {
if (field->is_required()) {
format(
"\n"
"if (_internal_has_$1$()) {\n",
FieldName(field));
format.Indent();
PrintFieldComment(format, field);
field_generators_.get(field).GenerateByteSize(printer);
format.Outdent();
format("}\n");
}
}
format(
"\n"
"return total_size;\n");
format.Outdent();
format("}\n");
}
format(
"size_t $classname$::ByteSizeLong() const {\n"
"$annotate_bytesize$"
"// @@protoc_insertion_point(message_byte_size_start:$full_name$)\n");
format.Indent();
format(
"size_t total_size = 0;\n"
"\n");
if (descriptor_->extension_range_count() > 0) {
format(
"total_size += _extensions_.ByteSize();\n"
"\n");
}
std::map<std::string, std::string> vars;
SetUnknownFieldsVariable(descriptor_, options_, &vars);
format.AddMap(vars);
// Handle required fields (if any). We expect all of them to be
// present, so emit one conditional that checks for that. If they are all
// present then the fast path executes; otherwise the slow path executes.
if (num_required_fields_ > 1) {
// The fast path works if all required fields are present.
const std::vector<uint32_t> masks_for_has_bits = RequiredFieldsBitMask();
format("if ($1$) { // All required fields are present.\n",
ConditionalToCheckBitmasks(masks_for_has_bits));
format.Indent();
// Oneof fields cannot be required, so optimized_order_ contains all of the
// fields that we need to potentially emit.
for (auto field : optimized_order_) {
if (!field->is_required()) continue;
PrintFieldComment(format, field);
field_generators_.get(field).GenerateByteSize(printer);
format("\n");
}
format.Outdent();
format(
"} else {\n" // the slow path
" total_size += RequiredFieldsByteSizeFallback();\n"
"}\n");
} else {
// num_required_fields_ <= 1: no need to be tricky
for (auto field : optimized_order_) {
if (!field->is_required()) continue;
PrintFieldComment(format, field);
format("if (_internal_has_$1$()) {\n", FieldName(field));
format.Indent();
field_generators_.get(field).GenerateByteSize(printer);
format.Outdent();
format("}\n");
}
}
std::vector<std::vector<const FieldDescriptor*>> chunks = CollectFields(
optimized_order_,
[&](const FieldDescriptor* a, const FieldDescriptor* b) -> bool {
return a->label() == b->label() && HasByteIndex(a) == HasByteIndex(b);
});
// Remove chunks with required fields.
chunks.erase(std::remove_if(chunks.begin(), chunks.end(), IsRequired),
chunks.end());
ColdChunkSkipper cold_skipper(options_, chunks, has_bit_indices_, kColdRatio);
int cached_has_word_index = -1;
format(
"$uint32$ cached_has_bits = 0;\n"
"// Prevent compiler warnings about cached_has_bits being unused\n"
"(void) cached_has_bits;\n\n");
for (int chunk_index = 0; chunk_index < chunks.size(); chunk_index++) {
const std::vector<const FieldDescriptor*>& chunk = chunks[chunk_index];
const bool have_outer_if =
chunk.size() > 1 && HasWordIndex(chunk[0]) != kNoHasbit;
cold_skipper.OnStartChunk(chunk_index, cached_has_word_index, "", printer);
if (have_outer_if) {
// Emit an if() that will let us skip the whole chunk if none are set.
uint32_t chunk_mask = GenChunkMask(chunk, has_bit_indices_);
std::string chunk_mask_str =
StrCat(strings::Hex(chunk_mask, strings::ZERO_PAD_8));
// Check (up to) 8 has_bits at a time if we have more than one field in
// this chunk. Due to field layout ordering, we may check
// _has_bits_[last_chunk * 8 / 32] multiple times.
GOOGLE_DCHECK_LE(2, popcnt(chunk_mask));
GOOGLE_DCHECK_GE(8, popcnt(chunk_mask));
if (cached_has_word_index != HasWordIndex(chunk.front())) {
cached_has_word_index = HasWordIndex(chunk.front());
format("cached_has_bits = _has_bits_[$1$];\n", cached_has_word_index);
}
format("if (cached_has_bits & 0x$1$u) {\n", chunk_mask_str);
format.Indent();
}
// Go back and emit checks for each of the fields we processed.
for (int j = 0; j < chunk.size(); j++) {
const FieldDescriptor* field = chunk[j];
const FieldGenerator& generator = field_generators_.get(field);
bool have_enclosing_if = false;
bool need_extra_newline = false;
PrintFieldComment(format, field);
if (field->is_repeated()) {
// No presence check is required.
need_extra_newline = true;
} else if (HasHasbit(field)) {
PrintPresenceCheck(format, field, has_bit_indices_, printer,
&cached_has_word_index);
have_enclosing_if = true;
} else {
// Without field presence: field is serialized only if it has a
// non-default value.
have_enclosing_if =
EmitFieldNonDefaultCondition(printer, "this->", field);
}
generator.GenerateByteSize(printer);
if (have_enclosing_if) {
format.Outdent();
format(
"}\n"
"\n");
}
if (need_extra_newline) {
format("\n");
}
}
if (have_outer_if) {
format.Outdent();
format("}\n");
}
if (cold_skipper.OnEndChunk(chunk_index, printer)) {
// Reset here as it may have been updated in just closed if statement.
cached_has_word_index = -1;
}
}
// Fields inside a oneof don't use _has_bits_ so we count them in a separate
// pass.
for (auto oneof : OneOfRange(descriptor_)) {
format("switch ($1$_case()) {\n", oneof->name());
format.Indent();
for (auto field : FieldRange(oneof)) {
PrintFieldComment(format, field);
format("case k$1$: {\n", UnderscoresToCamelCase(field->name(), true));
format.Indent();
if (!IsFieldStripped(field, options_)) {
field_generators_.get(field).GenerateByteSize(printer);
}
format("break;\n");
format.Outdent();
format("}\n");
}
format(
"case $1$_NOT_SET: {\n"
" break;\n"
"}\n",
ToUpper(oneof->name()));
format.Outdent();
format("}\n");
}
if (num_weak_fields_) {
// TagSize + MessageSize
format("total_size += _weak_field_map_.ByteSizeLong();\n");
}
if (UseUnknownFieldSet(descriptor_->file(), options_)) {
// We go out of our way to put the computation of the uncommon path of
// unknown fields in tail position. This allows for better code generation
// of this function for simple protos.
format(
"return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_);\n");
} else {
format("if (PROTOBUF_PREDICT_FALSE($have_unknown_fields$)) {\n");
format(" total_size += $unknown_fields$.size();\n");
format("}\n");
// We update _cached_size_ even though this is a const method. Because
// const methods might be called concurrently this needs to be atomic
// operations or the program is undefined. In practice, since any
// concurrent writes will be writing the exact same value, normal writes
// will work on all common processors. We use a dedicated wrapper class to
// abstract away the underlying atomic. This makes it easier on platforms
// where even relaxed memory order might have perf impact to replace it with
// ordinary loads and stores.
format(
"int cached_size = ::$proto_ns$::internal::ToCachedSize(total_size);\n"
"SetCachedSize(cached_size);\n"
"return total_size;\n");
}
format.Outdent();
format("}\n");
}