in source/lib/blueprints/byom/model_monitor.py [0:0]
def _add_model_quality_resources(self):
"""
Adds ModelQuality specific parameters/conditions and updates
self.baseline_attributes/self.monitor_attributes. Most of these attributes are reused
by ModelBias and ModelExplainability monitors
"""
# add baseline job attributes (they are different from Monitor attributes)
if self.monitoring_type == "ModelQuality":
self.baseline_inference_attribute = pf.create_inference_attribute_parameter(self, "Baseline")
self.baseline_probability_attribute = pf.create_probability_attribute_parameter(self, "Baseline")
self.ground_truth_attribute = pf.create_ground_truth_attribute_parameter(self)
# add ModelQuality Baseline attributes
self.baseline_attributes.update(
dict(
ground_truth_attribute=self.ground_truth_attribute.value_as_string,
inference_attribute=self.baseline_inference_attribute.value_as_string,
probability_attribute=self.baseline_probability_attribute.value_as_string,
)
)
# add monitor attributes
self.monitor_inference_attribute = pf.create_inference_attribute_parameter(self, "Monitor")
self.monitor_probability_attribute = pf.create_probability_attribute_parameter(self, "Monitor")
# only create ground_truth_s3_url parameter for ModelQuality/Bias
if self.monitoring_type in ["ModelQuality", "ModelBias"]:
# ground_truth_s3_uri is only for ModelQuality/ModelBias
self.ground_truth_s3_bucket = pf.create_ground_truth_bucket_name_parameter(self)
self.ground_truth_s3_uri = pf.create_ground_truth_s3_uri_parameter(self)
self.monitor_attributes.update(dict(ground_truth_s3_uri=f"s3://{self.ground_truth_s3_uri.value_as_string}"))
# problem_type and probability_threshold_attribute are the same for both
self.problem_type = pf.create_problem_type_parameter(self)
self.probability_threshold_attribute = pf.create_probability_threshold_attribute_parameter(self)
# add conditions (used by monitor)
self.inference_attribute_provided = cf.create_attribute_provided_condition(
self, "InferenceAttributeProvided", self.monitor_inference_attribute
)
self.binary_classification_propability_attribute_provided = (
cf.create_problem_type_binary_classification_attribute_provided_condition(
self, self.problem_type, self.monitor_probability_attribute, "ProbabilityAttribute"
)
)
self.binary_classification_propability_threshold_provided = (
cf.create_problem_type_binary_classification_attribute_provided_condition(
self, self.problem_type, self.probability_threshold_attribute, "ProbabilityThreshold"
)
)
# add shared Baseline attributes
self.baseline_attributes.update(
dict(
problem_type=self.problem_type.value_as_string,
probability_threshold_attribute=self.probability_threshold_attribute.value_as_string,
)
)
# add ModelQuality Monitor attributes
self.monitor_attributes.update(
dict(
problem_type=self.problem_type.value_as_string,
# pass inference_attribute if provided
inference_attribute=core.Fn.condition_if(
self.inference_attribute_provided.logical_id,
self.monitor_inference_attribute.value_as_string,
core.Aws.NO_VALUE,
).to_string(),
# pass probability_attribute if provided and ProblemType is BinaryClassification
probability_attribute=core.Fn.condition_if(
self.binary_classification_propability_attribute_provided.logical_id,
self.monitor_probability_attribute.value_as_string,
core.Aws.NO_VALUE,
).to_string(),
# pass probability_threshold_attribute if provided and ProblemType is BinaryClassification
probability_threshold_attribute=core.Fn.condition_if(
self.binary_classification_propability_threshold_provided.logical_id,
self.probability_threshold_attribute.value_as_string,
core.Aws.NO_VALUE,
).to_string(),
)
)