in critter/stack.py [0:0]
def wait_for_config_evaluation(self):
for r_id in list(self.resources.keys()):
if self.resources[r_id]["expected_compliance_type"] == "NOT_APPLICABLE":
# TODO: Determine some way to test for this. NOT_APPLICABLE evaluations are not stored by Config.
# Search thru recent PutEvaluations events in CloudTrail and look for the resource id?
logger.warning(
f"Warning - Verifying resource '{r_id}' compliance 'NOT_APPLICABLE' is not yet implemented"
)
self.resources.pop(r_id)
break
last_stack_event_timestamp = list(self.stack.events.limit(count=1))[0].timestamp
# TODO: This loop may be unnecessary. This loop waits for the Config rule evaluation to succeed. The loop below
# waits for the each of the test resources to be evaluated.
logger.info(f"Waiting for Config rule '{self.config_rule_name}' successful evaluation")
loop = 0
while True:
if loop or self.trigger_rule_evaluation:
time.sleep(self.AWS_CONFIG_API_DELAY_SEC)
loop += 1
status = self.config.describe_config_rule_evaluation_status(ConfigRuleNames=[self.config_rule_name])[
"ConfigRulesEvaluationStatus"
][0]
if "LastSuccessfulInvocationTime" not in status:
logger.warning(
f"Warning - Waiting for first successful evaluation of Config rule '{self.config_rule_name}'"
)
continue
if "LastFailedInvocationTime" in status:
if status["LastFailedInvocationTime"] > status["LastSuccessfulInvocationTime"]:
logger.warning(f"Warning - Config rule '{self.config_rule_name}' most recent invocation failed")
last_invocation_time = max(status["LastFailedInvocationTime"], status["LastSuccessfulInvocationTime"])
else:
last_invocation_time = status["LastSuccessfulInvocationTime"]
if loop >= 3:
logger.info(
f"Still waiting for Config rule '{self.config_rule_name}' successful evaluation. "
f"Evaluation status: {json.dumps(status, default=str)}"
)
if status["LastSuccessfulEvaluationTime"] > last_invocation_time:
break
loop = 0
unevaluated_resource_ids = list(self.resources.keys())
while len(unevaluated_resource_ids):
logger.info(
f"Waiting for Config rule '{self.config_rule_name}' evaluation of "
f"resource ids {unevaluated_resource_ids}"
)
if loop:
time.sleep(self.AWS_CONFIG_API_DELAY_SEC)
loop += 1
for pg in self.config.get_paginator("get_compliance_details_by_config_rule").paginate(
ConfigRuleName=self.config_rule_name
):
for result in pg["EvaluationResults"]:
qualifier = result["EvaluationResultIdentifier"]["EvaluationResultQualifier"]
r_id = qualifier["ResourceId"]
if r_id not in self.resources.keys():
continue
self.resources[r_id]["resource_type"] = qualifier["ResourceType"]
# Warn the user if evaluation result was posted before stack deploy finished
if result["ResultRecordedTime"] < last_stack_event_timestamp:
logger.warning(
f"Warning - Resource '{r_id}' Config evaluation was recorded before the most recent event "
f"on CloudFormation stack '{self.stack_name}'. This may be an indicator of unreliable test "
f"results. Consider specifying '{self.TRIGGER_RULE_EVALUATION_ARG}'."
)
self.resources[r_id]["evaluation_result"] = result
unevaluated_resource_ids = []
for r_id in self.resources.keys():
if not self.resources[r_id]["evaluation_result"]:
unevaluated_resource_ids.append(r_id)