in mysqloperator/controller/innodbcluster/cluster_controller.py [0:0]
def __remove_instance_aux(self, pod: MySQLPod, logger: Logger, force: bool = False) -> None:
print(f"Removing {pod.endpoint} from cluster FORCE={force}")
# TODO improve this check
other_pods = self.cluster.get_pods()
if len(other_pods) == 1 and pod.instance_type == 'group-member':
print("There is only one pod left in the cluster. Won't remove it, as this will dissolve the cluster. It will be removed only if the cluster is being deleted.")
if len(other_pods) > 1 or (len(other_pods) > 0 and pod.instance_type == 'read-replica'):
try:
print("connect_to_cluster")
peer_pod = self.connect_to_cluster(logger)
print(f"peer_pod={peer_pod}")
except mysqlsh.Error as e:
peer_pod = None
if self.cluster.deleting:
logger.warning(
f"Could not connect to cluster, but ignoring because we're deleting: error={e}")
else:
logger.error(f"Could not connect to cluster: error={e}")
raise
if peer_pod:
removed = False
remove_options = {}
if not force:
logger.info(
f"remove_instance: {pod.name} peer={peer_pod.name} options={remove_options}")
try:
self.dba_cluster.remove_instance(pod.endpoint, remove_options)
removed = True
logger.debug("remove_instance OK")
except mysqlsh.Error as e:
logger.warning(f"remove_instance failed: error={e}")
if e.code == mysqlsh.mysql.ErrorCode.ER_OPTION_PREVENTS_STATEMENT:
# super_read_only can still be true on a PRIMARY for a
# short time
raise kopf.TemporaryError(
f"{peer_pod.name} is a PRIMARY but super_read_only is ON", delay=5)
elif e.code == errors.SHERR_DBA_MEMBER_METADATA_MISSING:
# already removed and we're probably just retrying
removed = True
print(f"removed={removed}")
if not removed:
remove_options["force"] = True
logger.info(
f"remove_instance: {pod.name} peer={peer_pod.name} options={remove_options}")
try:
self.dba_cluster.remove_instance(pod.endpoint, remove_options)
logger.info("FORCED remove_instance OK")
except mysqlsh.Error as e:
logger.warning(f"remove_instance failed: error={e}")
if e.code == errors.SHERR_DBA_MEMBER_METADATA_MISSING:
pass
else:
deleting = not self.cluster or self.cluster.deleting
if deleting:
logger.info(
f"force remove_instance failed. Ignoring because cluster is deleted: error={e} peer={peer_pod.name}")
else:
logger.error(
f"force remove_instance failed. error={e} deleting_cluster={deleting} peer={peer_pod.name}")
raise
except RuntimeError as e:
logger.info(f"force remove_instance failed. RuntimeError {e}")
if str(e).find("The cluster object is disconnected") == -1:
logger.info(f"Can't do anything to remove {pod.name} cleanly")
raise
else:
logger.error(
f"Cluster is not available, skipping clean removal of {pod.name}")