def join_instance()

in mysqloperator/controller/innodbcluster/cluster_controller.py [0:0]


    def join_instance(self, pod: MySQLPod, pod_dba_session: 'Dba', logger: Logger) -> None:
        logger.info(f"Adding {pod.endpoint} to cluster")

        peer_pod = self.connect_to_cluster(logger)

        self.log_mysql_info(pod, pod_dba_session.session, logger)

        # TODO - always use clone when dataset is big
        # With Shell Bug #33900165 fixed we should use "auto" by default
        # and remove the retry logic below
        recovery_method = "incremental"

        if self.cluster.parsed_spec.initDB and self.cluster.parsed_spec.initDB.meb:
            # With a restore from a MEB backup server might not find the right
            # binlogs for incremental restore and provision an empty replica
            # clone does the right thing
            recovery_method = "clone"

        add_options = {
            "recoveryMethod": recovery_method,
        }

        # TODO : # add_replica_instance doesn't support cert base auth, thus certSubject works only for group-member-s - WL15056
        # If a cluster was created with cert based auth between the group members no replica can join the cluster
        for option in self.dba_cluster.options()["defaultReplicaSet"]["globalOptions"]:
            if option["option"] == "memberAuthType" and option["value"] in ["CERT_SUBJECT", "CERT_SUBJECT_PASSWORD"]:
                rdns = pod.get_cluster().get_tls_issuer_and_subject_rdns()
                # add_instance() needs only certSubject and but not memberAuthType and certIssuer
                add_options["certSubject"] = rdns["subject"]


        if pod.instance_type == "group-member":
            add_options.update(common_gr_options)

        logger.info(
            f"ADD INSTANCE: target={pod.endpoint}  instance_type={pod.instance_type} cluster_peer={peer_pod.endpoint}  options={add_options}...")

        pod.add_member_finalizer()

        report_host = pod_dba_session.session.run_sql('SELECT @@report_host').fetch_one()[0]
        print(f"DBA SESSION GOES TO {report_host}!")

        try:
            if pod.instance_type == "read-replica":
                self.dba_cluster.add_replica_instance(pod.endpoint, add_options)
            else:
                self.dba_cluster.add_instance(pod.endpoint_co, add_options)

            logger.debug("add_instance OK")
        except  (mysqlsh.Error, RuntimeError) as e:
            logger.warning(f"add_instance failed: error={e}")

            # Incremetnal may fail if transactions are missing from binlog
            # retry using clone
            add_options["recoveryMethod"] = "clone"
            logger.warning(f"trying add_instance with clone")
            try:
                if pod.instance_type == "read-replica":
                    self.dba_cluster.add_replica_instance(pod.endpoint, add_options)
                else:
                    self.dba_cluster.add_instance(pod.endpoint_co, add_options)
            except (mysqlsh.Error, RuntimeError) as e:
                logger.warning(f"add_instance failed second time: error={e}")
                raise

        if pod.instance_type == "read-replica":
            # This is not perfect, as we don't track this further, but async
            # replication gives us limited information only
            pod.update_member_readiness_gate("ready", True)
        else:
            with DbaWrap(shellutils.connect_dba(pod.endpoint_co, logger)) as dba_session:
                # TODO: pod_dba_session may be invalid on caller side if the
                #       pod was provisioned via clone, which may lead to future
                #       bugs, also always using a new connection here is "inefficient"
                #       to a small degree.
                #       In case clone is used and we need a reconnect we have
                #       to communicate that to the caller, else we see bugs in
                #       futre
                minfo = self.probe_member_status(pod, dba_session.session,
                                                True, logger)
                member_id, role, status, view_id, version, member_count, reachable_member_count = minfo
            logger.info(f"JOINED {pod.name}: {minfo}")

            # if the cluster size is complete, ensure routers are deployed
            if not router_objects.get_size(self.cluster) and member_count == self.cluster.parsed_spec.instances:
                self.post_create_actions(self.dba.session, self.dba_cluster, logger)