def do_upgrade()

in upgrade_tests/upgrade_base.py [0:0]


    def do_upgrade(self, session, use_thrift=False, return_nodes=False, **kwargs):
        """
        Upgrades the first node in the cluster and returns a list of
        (is_upgraded, Session) tuples.  If `is_upgraded` is true, the
        Session is connected to the upgraded node. If `return_nodes`
        is True, a tuple of (is_upgraded, Session, Node) will be
        returned instead.
        """
        session.cluster.shutdown()
        self.install_nodetool_legacy_parsing()
        node1 = self.cluster.nodelist()[0]
        node2 = self.cluster.nodelist()[1]

        # stop the nodes, this can fail due to https://issues.apache.org/jira/browse/CASSANDRA-8220 on MacOS
        # for the tests that run against 2.0. You will need to run those in Linux.
        node1.drain()
        node1.stop(gently=True)

        # Ignore errors before upgrade on Windows
        # We ignore errors from 2.1, because windows 2.1
        # support is only beta. There are frequent log errors,
        # related to filesystem interactions that are a direct result
        # of the lack of full functionality on 2.1 Windows, and we dont
        # want these to pollute our results.
        if is_win() and self.cluster.version() <= '2.2':
            node1.mark_log_for_errors()

        logger.debug('upgrading node1 to {}'.format(self.UPGRADE_PATH.upgrade_version))

        node1.set_install_dir(version=self.UPGRADE_PATH.upgrade_version)
        self.install_legacy_parsing(node1)

        # this is a bandaid; after refactoring, upgrades should account for protocol version
        new_version_from_build = get_version_from_build(node1.get_install_dir())

        # Check if a since annotation with a max_version was set on this test.
        # The since decorator can only check the starting version of the upgrade,
        # so here we check to new version of the upgrade as well.
        if hasattr(self, 'max_version') and self.max_version is not None and new_version_from_build >= self.max_version:
            pytest.skip("Skipping test, new version {} is equal to or higher than "
                        "max version {}".format(new_version_from_build, self.max_version))

        if (new_version_from_build >= '3' and self.protocol_version is not None and self.protocol_version < 3):
            pytest.skip('Protocol version {} incompatible '
                        'with Cassandra version {}'.format(self.protocol_version, new_version_from_build))
        node1.set_log_level(logging.getLevelName(logging.root.level))
        node1.set_configuration_options(values={'internode_compression': 'none'})

        if use_thrift and node1.get_cassandra_version() < '4':
            node1.set_configuration_options(values={'start_rpc': 'true'})

        node1.start(wait_for_binary_proto=True)

        sessions_and_meta = []
        if self.CL:
            session = self.patient_exclusive_cql_connection(node1, protocol_version=self.protocol_version, consistency_level=self.CL, **kwargs)
        else:
            session = self.patient_exclusive_cql_connection(node1, protocol_version=self.protocol_version, **kwargs)
        session.set_keyspace('ks')

        if return_nodes:
            sessions_and_meta.append((True, session, node1))
        else:
            sessions_and_meta.append((True, session))

        # open a second session with the node on the old version
        if self.CL:
            session = self.patient_exclusive_cql_connection(node2, protocol_version=self.protocol_version, consistency_level=self.CL, **kwargs)
        else:
            session = self.patient_exclusive_cql_connection(node2, protocol_version=self.protocol_version, **kwargs)
        session.set_keyspace('ks')

        if return_nodes:
            sessions_and_meta.append((False, session, node2))
        else:
            sessions_and_meta.append((False, session))

        # Let the nodes settle briefly before yielding connections in turn (on the upgraded and non-upgraded alike)
        # CASSANDRA-11396 was the impetus for this change, wherein some apparent perf noise was preventing
        # CL.ALL from being reached. The newly upgraded node needs to settle because it has just barely started, and each
        # non-upgraded node needs a chance to settle as well, because the entire cluster (or isolated nodes) may have been doing resource intensive activities
        # immediately before.
        for s in sessions_and_meta:
            time.sleep(5)
            yield s