def reconcile_pod()

in mysqloperator/controller/innodbcluster/cluster_controller.py [0:0]


    def reconcile_pod(self, primary_pod: MySQLPod, pod: MySQLPod, logger: Logger) -> None:
        with DbaWrap(shellutils.connect_dba(pod.endpoint_co, logger)) as pod_dba_session:
            cluster = self.connect_to_primary(primary_pod, logger)

            status = diagnose.diagnose_cluster_candidate(
                self.dba.session, cluster, pod, pod_dba_session, logger)

            logger.info(
                f"Reconciling {pod}: state={status.status}  deleting={pod.deleting} cluster_deleting={self.cluster.deleting}")
            if pod.deleting or self.cluster.deleting:
                return

            # TODO check case where a member pod was deleted and then rejoins with the same address but different uuid

            if status.status == diagnose.CandidateDiagStatus.JOINABLE:
                self.cluster.info(action="ReconcilePod", reason="Join",
                                  message=f"Joining {pod.name} to cluster")
                self.join_instance(pod, pod_dba_session, logger)

            elif status.status == diagnose.CandidateDiagStatus.REJOINABLE:
                self.cluster.info(action="ReconcilePod", reason="Rejoin",
                                  message=f"Rejoining {pod.name} to cluster")
                self.rejoin_instance(pod, pod_dba_session.session, logger)

            elif status.status == diagnose.CandidateDiagStatus.MEMBER:
                logger.info(f"{pod.endpoint} already a member")

                self.probe_member_status(pod, pod_dba_session.session, False, logger)

            elif status.status == diagnose.CandidateDiagStatus.UNREACHABLE:
                # TODO check if we should throw a tmp error or do nothing
                logger.error(f"{pod.endpoint} is unreachable")

                self.probe_member_status(pod, pod_dba_session.session, False, logger)
            else:
                # TODO check if we can repair broken instances
                # It would be possible to auto-repair an instance with errant
                # transactions by cloning over it, but that would mean these
                # errants are lost.
                logger.error(f"{pod.endpoint} is in state {status.status}")

                self.probe_member_status(pod, pod_dba_session.session, False, logger)