in mysqloperator/controller/innodbcluster/cluster_objects.py [0:0]
def update_objects_for_subsystem(subsystem: InnoDBClusterSpecProperties,
cluster: InnoDBCluster,
patcher: 'InnoDBClusterObjectModifier',
logger: Logger) -> None:
logger.info(f"update_objects_for_subsystem: {subsystem}")
sts = cluster.get_stateful_set()
svc = cluster.get_service()
spec = cluster.parsed_spec
if subsystem in spec.get_configmaps_cbs:
print(f"\t\tWalking over get_configmaps_cbs len={len(spec.get_configmaps_cbs[subsystem])}")
#TODO: This won't delete old CMs but only replace old ones, if are still in use, with new content
# or create new ones. The solution is to use tuple returning like get_svc_monitor_cbs, where
# the cm name will be returned as first tuple element and second will be just None. This will
# signal that this CM should be removed, as not in use anymore.
for get_configmap_cb in spec.get_configmaps_cbs[subsystem]:
prefix = ''
new_configmaps = get_configmap_cb(prefix, logger)
if not new_configmaps:
continue
for (cm_name, new_cm) in new_configmaps:
current_cm = cluster.get_configmap(cm_name)
if current_cm:
if not new_cm:
print(f"\t\t\tDeleting CM {cluster.namespace}/{cm_name}")
#patcher.delete_configmap(cluster.namespace, cm_name, on_apiexception_404_handler)
cluster.delete_configmap(cm_name)
continue
data_differs = current_cm.data != new_cm["data"]
if data_differs:
print(f"\t\t\tReplacing CM {cluster.namespace}/{cm_name}")
current_cm.data = new_cm["data"]
#patcher.replace_configmap(cluster.namespace, cm_name, current_cm, on_apiexception_404_handler)
api_core.replace_namespaced_config_map(cm_name, cluster.namespace, body=current_cm)
else:
print(f"\t\t\tNo such cm exists. Creating {cluster.namespace}/{new_cm}")
kopf.adopt(new_cm)
#patcher.create_configmap(cluster.namespace, new_cm['metadata']['name'], new_cm, on_apiexception_generic_handler)
api_core.create_namespaced_config_map(cluster.namespace, new_cm)
if subsystem in spec.add_to_sts_cbs:
print(f"\t\tCurrent container count: {len(sts.spec.template.spec.containers)}")
print(f"\t\tWalking over add_to_sts_cbs len={len(spec.add_to_sts_cbs[subsystem])}")
changed = False
sts.spec = spec_to_dict(sts.spec)
for add_to_sts_cb in spec.add_to_sts_cbs[subsystem]:
changed = True
print("\t\t\tPatching STS")
add_to_sts_cb(sts, patcher, logger)
if changed:
new_container_names = [c["name"] for c in patcher.get_sts_path('/spec/template/spec/containers') if c["name"] not in ["mysql", "sidecar"]]
print(f"\t\t\tNew containers: {new_container_names}")
new_volumes_names = [c["name"] for c in patcher.get_sts_path('/spec/template/spec/volumes')]
print(f"\t\t\tNew volumes: {new_volumes_names}")
new_volume_mounts = [(c["name"], c["volumeMounts"]) for c in patcher.get_sts_path('/spec/template/spec/containers') if c["name"] not in ["mysql", "sidecar"]]
print(f"\t\t\tNew volume mounts: {new_volume_mounts}")
# There might be configmap changes, which when mounted will change the server, so we rollover
# For fine grained approache the get_configmap should return whether there are such changes that require
# a restart. With a restart, for example, the Cluster1LFSGeneralLogEnableDisableEnable test will hang
restart_patch = {"spec":{"template":{"metadata":{"annotations":{"kubectl.kubernetes.io/restartedAt":utils.isotime()}}}}}
patcher.patch_sts(restart_patch)
#patcher.submit_patches(restart_sts=True)
print(f"\t\t\tSTS {'patched' if changed else 'unchanged. No rollover upgrade!'}")
if subsystem in spec.get_add_to_svc_cbs:
print(f"\t\tWalking over get_add_to_svc_cbs len={len(spec.get_add_to_svc_cbs[subsystem])}")
changed = False
for add_to_svc_cb in spec.get_add_to_svc_cbs[subsystem]:
changed = True
print("\t\t\tPatching SVC")
add_to_svc_cb(svc, logger)
if changed:
api_core.replace_namespaced_service(svc.metadata.name, svc.metadata.namespace, svc)
print(f"\t\t\tSVC {'patched' if changed else 'unchanged'}")
if subsystem in spec.get_svc_monitor_cbs:
for subsystem in spec.get_svc_monitor_cbs:
for cb in spec.get_svc_monitor_cbs[subsystem]:
(monitor_name, monitor) = cb(logger)
# monitor could be empty, this means - delete old monitor with monitor_name
print(f"\t\t\tChecking for old ServiceMonitor {monitor_name}")
if cluster.get_service_monitor(monitor_name):
print(f"\t\t\tRemoving old ServiceMonitor {monitor_name}")
try:
api_customobj.delete_namespaced_custom_object("monitoring.coreos.com", "v1", cluster.namespace,
"servicemonitors", monitor_name)
except Exception as exc:
print(f"\t\t\tPrevious ServiceMonitor {monitor_name} was not removed. Reason: {exc}")
if monitor:
kopf.adopt(monitor)
print(f"\t\t\tCreating ServiceMonitor {monitor} ...")
try:
api_customobj.create_namespaced_custom_object("monitoring.coreos.com", "v1", cluster.namespace,
"servicemonitors", monitor)
except Exception as exc:
# This might be caused by Prometheus Operator missing
# we won't fail for that
print(f"\t\t\tServiceMonitor {monitor_name} NOT created!")
print(exc)
cluster.warn(action="CreateCluster", reason="CreateResourceFailed", message=f"{exc}")
else:
print(f"\t\t\tNew ServiceMonitor {monitor_name} will not be created. Monitoring disabled.")