in mysqloperator/controller/innodbcluster/operator_cluster.py [0:0]
def on_innodbcluster_field_backup_schedules(old: str, new: str, body: Body,
logger: Logger, **kwargs):
if old == new:
return
logger.info("on_innodbcluster_field_backup_schedules")
cluster = InnoDBCluster(body)
# Ignore spec changes if the cluster is still being initialized
# This handler will be called even when the cluster is being initialized as the
# `old` value will be None and the `new` value will be the schedules that the cluster has.
# This makes it possible to create them here and not in on_innodbcluster_create().
# There in on_innodbcluster_create(), only the objects which are critical for the creation
# of the server should be created.
# After the cluster is ready we will add the schedules. This also allows to have the schedules
# created (especially when `enabled`) after the cluster has been created, solving issues with
# cron job not bein called or cron jobs being created as suspended and then when the cluster is
# running to be enabled again - which would end to be a 2-step process.
# The cluster is created after the first instance is up and running. Thus,
# don't need to take actions in post_create_actions() in the cluster controller
# but async await for Kopf to call again this handler.
if not cluster.get_create_time():
raise kopf.TemporaryError("The cluster is not ready. Will create the schedules once the first instance is up and running", delay=10)
cluster.parsed_spec.validate(logger)
with ClusterMutex(cluster):
backup_objects.update_schedules(cluster.parsed_spec, old, new, logger)