in source/Transfer.cpp [323:490]
void apply_propagations(
MethodContext* context,
const AnalysisEnvironment* previous_environment,
AnalysisEnvironment* new_environment,
const IRInstruction* instruction,
const Callee& callee,
TaintTree& result_taint) {
const auto& instruction_sources = instruction->srcs_vec();
LOG_OR_DUMP(
context,
4,
"Processing propagations for call to `{}`",
show(callee.method_reference));
for (const auto& [output, propagations] :
callee.model.propagations().elements()) {
auto output_features = FeatureMayAlwaysSet::make_always(
callee.model.add_features_to_arguments(output.root()));
for (const auto& propagation : propagations) {
LOG_OR_DUMP(
context, 4, "Processing propagation {} to {}", propagation, output);
const auto& input = propagation.input().root();
if (!input.is_argument()) {
WARNING_OR_DUMP(
context, 2, "Ignoring propagation with a return input: {}", input);
continue;
}
auto input_parameter_position = input.parameter_position();
if (input_parameter_position >= instruction_sources.size()) {
WARNING(
2,
"Model for method `{}` contains a port on parameter {} but the method only has {} parameters. Skipping...",
input_parameter_position,
show(callee.method_reference),
instruction_sources.size());
continue;
}
auto input_register_id = instruction_sources.at(input_parameter_position);
auto taint_tree = previous_environment->read(
input_register_id, propagation.input().path());
// Collapsing the tree here is required for correctness and performance.
// Propagations can be collapsed, which results in taking the common
// prefix of the input paths. Because of this, if we don't collapse here,
// we might build invalid trees. See the end-to-end test
// `propagation_collapse` for an example.
// However, collapsing leads to FP with the builder pattern.
// eg:
// class A {
// private String s1;
//
// public A setS1(String s) {
// this.s1 = s;
// return this;
// }
// }
// In this case, collapsing propagations results in entire `this` being
// tainted. For chained calls, it can lead to FP.
// `no-collapse-on-propagation` mode is used to prevent such cases.
// See the end-to-end test `no_collapse_on_propagation` for example.
if (!callee.model.no_collapse_on_propagation()) {
LOG_OR_DUMP(context, 4, "Collapsing taint tree {}", taint_tree);
taint_tree.collapse_inplace();
}
if (taint_tree.is_bottom()) {
continue;
}
FeatureMayAlwaysSet features = output_features;
features.add(propagation.features());
features.add_always(callee.model.add_features_to_arguments(input));
auto position =
context->positions.get(callee.position, input, instruction);
taint_tree.map([&features, position](Taint& taints) {
taints.add_inferred_features_and_local_position(features, position);
});
switch (output.root().kind()) {
case Root::Kind::Return: {
LOG_OR_DUMP(
context,
4,
"Tainting invoke result path {} with {}",
output.path(),
taint_tree);
result_taint.write(
output.path(), std::move(taint_tree), UpdateKind::Weak);
break;
}
case Root::Kind::Argument: {
auto output_parameter_position = output.root().parameter_position();
auto output_register_id =
instruction_sources.at(output_parameter_position);
LOG_OR_DUMP(
context,
4,
"Tainting register {} path {} with {}",
output_register_id,
output.path(),
taint_tree);
new_environment->write(
output_register_id,
output.path(),
std::move(taint_tree),
UpdateKind::Weak);
break;
}
default:
mt_unreachable();
}
}
}
if (callee.model.add_via_obscure_feature() ||
callee.model.has_add_features_to_arguments()) {
for (std::size_t parameter_position = 0;
parameter_position < instruction_sources.size();
parameter_position++) {
auto parameter = Root(Root::Kind::Argument, parameter_position);
auto features = FeatureMayAlwaysSet::make_always(
callee.model.add_features_to_arguments(parameter));
auto register_id = instruction_sources[parameter_position];
auto memory_locations =
previous_environment->memory_locations(register_id);
// Check whether an argument of the caller is passed into a callee port
// with add_features_to_arguments on it. If so, infer an
// add_features_to_arguments on the caller argument port
if (!features.empty() && memory_locations.is_value() &&
memory_locations.size() == 1) {
auto* memory_location = *memory_locations.elements().begin();
auto access_path = memory_location->access_path();
if (access_path) {
context->model.add_add_features_to_arguments(
access_path->root(), features.always());
}
}
const auto* position = !features.empty()
? context->positions.get(callee.position, parameter, instruction)
: nullptr;
if (callee.model.add_via_obscure_feature()) {
features.add_always(context->features.get("via-obscure"));
}
if (features.empty()) {
continue;
}
for (auto* memory_location : memory_locations.elements()) {
auto taint = new_environment->read(memory_location);
taint.map([&features, position](Taint& sources) {
sources.add_inferred_features_and_local_position(features, position);
});
new_environment->write(
memory_location, std::move(taint), UpdateKind::Strong);
}
}
}
}