def start()

in ccmlib/cluster.py [0:0]


    def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
              wait_other_notice=True, jvm_args=None, profile_options=None,
              quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
        if jvm_args is None:
            jvm_args = []

        extension.pre_cluster_start(self)

        # check whether all loopback aliases are available before starting any nodes
        for node in list(self.nodes.values()):
            if not node.is_running():
                for itf in node.network_interfaces.values():
                    if itf is not None:
                        common.assert_socket_available(itf)

        started = []
        for node in list(self.nodes.values()):
            if not node.is_running():
                mark = 0
                if os.path.exists(node.logfilename()):
                    mark = node.mark_log()

                # if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
                node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
                p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
                               profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
                               allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)

                # Prior to JDK8, starting every node at once could lead to a
                # nanotime collision where the RNG that generates a node's tokens
                # gives identical tokens to several nodes. Thus, we stagger
                # the node starts
                if common.get_jdk_version() < '1.8':
                    time.sleep(1)

                started.append((node, p, mark))

        if no_wait:
            time.sleep(2)  # waiting 2 seconds to check for early errors and for the pid to be set
        else:
            for node, p, mark in started:
                if not node._wait_for_running(p, timeout_s=7):
                    raise NodeError("Node {} should be running before waiting for <started listening> log message, "
                                    "but C* process is terminated.".format(node.name))
                try:
                    timeout=kwargs.get('timeout', DEFAULT_CLUSTER_WAIT_TIMEOUT_IN_SECS)
                    timeout=int(os.environ.get('CCM_CLUSTER_START_TIMEOUT_OVERRIDE', timeout))
                    start_message = "Listening for thrift clients..." if self.cassandra_version() < "2.2" else "Starting listening for CQL clients"
                    node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
                                       error_on_pid_terminated=True)
                except RuntimeError:
                    return None

        self.__update_pids(started)

        for node, p, _ in started:
            if not node.is_running():
                raise NodeError("Error starting {0}.".format(node.name), p)

        if not no_wait:
            if wait_other_notice:
                for (node, _, mark), (other_node, _, _) in itertools.permutations(started, 2):
                    node.watch_log_for_alive(other_node, from_mark=mark)

            if wait_for_binary_proto:
                for node, p, mark in started:
                    node.wait_for_binary_interface(process=p, verbose=verbose, from_mark=mark)

        extension.post_cluster_start(self)

        return started