elasticsearch/_async/client/__init__.py [186:396]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ) -> None:
        if hosts is None and cloud_id is None and _transport is None:
            raise ValueError("Either 'hosts' or 'cloud_id' must be specified")

        if serializer is not None:
            if serializers is not DEFAULT:
                raise ValueError(
                    "Can't specify both 'serializer' and 'serializers' parameters "
                    "together. Instead only specify one of the other."
                )
            serializers = {default_mimetype: serializer}

        # Setting min_delay_between_sniffing=True implies sniff_before_requests=True
        if min_delay_between_sniffing is not DEFAULT:
            sniff_before_requests = True

        sniffing_options = (
            sniff_timeout,
            sniff_on_start,
            sniff_before_requests,
            sniff_on_node_failure,
            sniffed_node_callback,
            min_delay_between_sniffing,
            sniffed_node_callback,
        )
        if cloud_id is not None and any(
            x is not DEFAULT and x is not None for x in sniffing_options
        ):
            raise ValueError(
                "Sniffing should not be enabled when connecting to Elastic Cloud"
            )

        sniff_callback = None
        if sniffed_node_callback is not None:
            sniff_callback = create_sniff_callback(
                sniffed_node_callback=sniffed_node_callback
            )
        elif (
            sniff_on_start is True
            or sniff_before_requests is True
            or sniff_on_node_failure is True
        ):
            sniff_callback = default_sniff_callback

        if _transport is None:
            requests_session_auth = None
            if http_auth is not None and http_auth is not DEFAULT:
                if is_requests_http_auth(http_auth):
                    # If we're using custom requests authentication
                    # then we need to alert the user that they also
                    # need to use 'node_class=requests'.
                    if not is_requests_node_class(node_class):
                        raise ValueError(
                            "Using a custom 'requests.auth.AuthBase' class for "
                            "'http_auth' must be used with node_class='requests'"
                        )

                    # Reset 'http_auth' to DEFAULT so it's not consumed below.
                    requests_session_auth = http_auth
                    http_auth = DEFAULT

            node_configs = client_node_configs(
                hosts,
                cloud_id=cloud_id,
                requests_session_auth=requests_session_auth,
                connections_per_node=connections_per_node,
                http_compress=http_compress,
                verify_certs=verify_certs,
                ca_certs=ca_certs,
                client_cert=client_cert,
                client_key=client_key,
                ssl_assert_hostname=ssl_assert_hostname,
                ssl_assert_fingerprint=ssl_assert_fingerprint,
                ssl_version=ssl_version,
                ssl_context=ssl_context,
                ssl_show_warn=ssl_show_warn,
            )
            transport_kwargs: t.Dict[str, t.Any] = {}
            if node_class is not DEFAULT:
                transport_kwargs["node_class"] = node_class
            if node_pool_class is not DEFAULT:
                transport_kwargs["node_pool_class"] = node_pool_class
            if randomize_nodes_in_pool is not DEFAULT:
                transport_kwargs["randomize_nodes_in_pool"] = randomize_nodes_in_pool
            if node_selector_class is not DEFAULT:
                transport_kwargs["node_selector_class"] = node_selector_class
            if dead_node_backoff_factor is not DEFAULT:
                transport_kwargs["dead_node_backoff_factor"] = dead_node_backoff_factor
            if max_dead_node_backoff is not DEFAULT:
                transport_kwargs["max_dead_node_backoff"] = max_dead_node_backoff
            if meta_header is not DEFAULT:
                transport_kwargs["meta_header"] = meta_header

            transport_serializers = DEFAULT_SERIALIZERS.copy()
            if serializers is not DEFAULT:
                transport_serializers.update(serializers)

                # Override compatibility serializers from their non-compat mimetypes too.
                # So we use the same serializer for requests and responses.
                for mime_subtype in ("json", "x-ndjson"):
                    if f"application/{mime_subtype}" in serializers:
                        compat_mimetype = (
                            f"application/vnd.elasticsearch+{mime_subtype}"
                        )
                        if compat_mimetype not in serializers:
                            transport_serializers[compat_mimetype] = serializers[
                                f"application/{mime_subtype}"
                            ]

            transport_kwargs["serializers"] = transport_serializers

            transport_kwargs["default_mimetype"] = default_mimetype
            if sniff_on_start is not DEFAULT:
                transport_kwargs["sniff_on_start"] = sniff_on_start
            if sniff_before_requests is not DEFAULT:
                transport_kwargs["sniff_before_requests"] = sniff_before_requests
            if sniff_on_node_failure is not DEFAULT:
                transport_kwargs["sniff_on_node_failure"] = sniff_on_node_failure
            if sniff_timeout is not DEFAULT:
                transport_kwargs["sniff_timeout"] = sniff_timeout
            if min_delay_between_sniffing is not DEFAULT:
                transport_kwargs["min_delay_between_sniffing"] = (
                    min_delay_between_sniffing
                )

            _transport = transport_class(
                node_configs,
                client_meta_service=CLIENT_META_SERVICE,
                sniff_callback=sniff_callback,
                **transport_kwargs,
            )

            super().__init__(_transport)

            # These are set per-request so are stored separately.
            self._request_timeout = request_timeout
            self._max_retries = max_retries
            self._retry_on_timeout = retry_on_timeout
            if isinstance(retry_on_status, int):
                retry_on_status = (retry_on_status,)
            self._retry_on_status = retry_on_status

        else:
            super().__init__(_transport)

        if headers is not DEFAULT and headers is not None:
            self._headers.update(headers)
        if opaque_id is not DEFAULT and opaque_id is not None:  # type: ignore[comparison-overlap]
            self._headers["x-opaque-id"] = opaque_id
        self._headers = resolve_auth_headers(
            self._headers,
            http_auth=http_auth,
            api_key=api_key,
            basic_auth=basic_auth,
            bearer_auth=bearer_auth,
        )

        # namespaced clients for compatibility with API names
        self.async_search = AsyncSearchClient(self)
        self.autoscaling = AutoscalingClient(self)
        self.cat = CatClient(self)
        self.cluster = ClusterClient(self)
        self.connector = ConnectorClient(self)
        self.fleet = FleetClient(self)
        self.features = FeaturesClient(self)
        self.indices = IndicesClient(self)
        self.inference = InferenceClient(self)
        self.ingest = IngestClient(self)
        self.nodes = NodesClient(self)
        self.snapshot = SnapshotClient(self)
        self.tasks = TasksClient(self)

        self.xpack = XPackClient(self)
        self.ccr = CcrClient(self)
        self.dangling_indices = DanglingIndicesClient(self)
        self.enrich = EnrichClient(self)
        self.eql = EqlClient(self)
        self.esql = EsqlClient(self)
        self.graph = GraphClient(self)
        self.ilm = IlmClient(self)
        self.license = LicenseClient(self)
        self.logstash = LogstashClient(self)
        self.migration = MigrationClient(self)
        self.ml = MlClient(self)
        self.monitoring = MonitoringClient(self)
        self.query_rules = QueryRulesClient(self)
        self.rollup = RollupClient(self)
        self.search_application = SearchApplicationClient(self)
        self.searchable_snapshots = SearchableSnapshotsClient(self)
        self.security = SecurityClient(self)
        self.slm = SlmClient(self)
        self.simulate = SimulateClient(self)
        self.shutdown = ShutdownClient(self)
        self.sql = SqlClient(self)
        self.ssl = SslClient(self)
        self.synonyms = SynonymsClient(self)
        self.text_structure = TextStructureClient(self)
        self.transform = TransformClient(self)
        self.watcher = WatcherClient(self)

    def __repr__(self) -> str:
        try:
            # get a list of all connections
            nodes = [node.base_url for node in self.transport.node_pool.all()]
            # truncate to 5 if there are too many
            if len(nodes) > 5:
                nodes = nodes[:5] + ["..."]
            return f"<{self.__class__.__name__}({nodes})>"
        except Exception:
            # probably operating on custom transport and connection_pool, ignore
            return super().__repr__()
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



elasticsearch/_sync/client/__init__.py [186:396]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ) -> None:
        if hosts is None and cloud_id is None and _transport is None:
            raise ValueError("Either 'hosts' or 'cloud_id' must be specified")

        if serializer is not None:
            if serializers is not DEFAULT:
                raise ValueError(
                    "Can't specify both 'serializer' and 'serializers' parameters "
                    "together. Instead only specify one of the other."
                )
            serializers = {default_mimetype: serializer}

        # Setting min_delay_between_sniffing=True implies sniff_before_requests=True
        if min_delay_between_sniffing is not DEFAULT:
            sniff_before_requests = True

        sniffing_options = (
            sniff_timeout,
            sniff_on_start,
            sniff_before_requests,
            sniff_on_node_failure,
            sniffed_node_callback,
            min_delay_between_sniffing,
            sniffed_node_callback,
        )
        if cloud_id is not None and any(
            x is not DEFAULT and x is not None for x in sniffing_options
        ):
            raise ValueError(
                "Sniffing should not be enabled when connecting to Elastic Cloud"
            )

        sniff_callback = None
        if sniffed_node_callback is not None:
            sniff_callback = create_sniff_callback(
                sniffed_node_callback=sniffed_node_callback
            )
        elif (
            sniff_on_start is True
            or sniff_before_requests is True
            or sniff_on_node_failure is True
        ):
            sniff_callback = default_sniff_callback

        if _transport is None:
            requests_session_auth = None
            if http_auth is not None and http_auth is not DEFAULT:
                if is_requests_http_auth(http_auth):
                    # If we're using custom requests authentication
                    # then we need to alert the user that they also
                    # need to use 'node_class=requests'.
                    if not is_requests_node_class(node_class):
                        raise ValueError(
                            "Using a custom 'requests.auth.AuthBase' class for "
                            "'http_auth' must be used with node_class='requests'"
                        )

                    # Reset 'http_auth' to DEFAULT so it's not consumed below.
                    requests_session_auth = http_auth
                    http_auth = DEFAULT

            node_configs = client_node_configs(
                hosts,
                cloud_id=cloud_id,
                requests_session_auth=requests_session_auth,
                connections_per_node=connections_per_node,
                http_compress=http_compress,
                verify_certs=verify_certs,
                ca_certs=ca_certs,
                client_cert=client_cert,
                client_key=client_key,
                ssl_assert_hostname=ssl_assert_hostname,
                ssl_assert_fingerprint=ssl_assert_fingerprint,
                ssl_version=ssl_version,
                ssl_context=ssl_context,
                ssl_show_warn=ssl_show_warn,
            )
            transport_kwargs: t.Dict[str, t.Any] = {}
            if node_class is not DEFAULT:
                transport_kwargs["node_class"] = node_class
            if node_pool_class is not DEFAULT:
                transport_kwargs["node_pool_class"] = node_pool_class
            if randomize_nodes_in_pool is not DEFAULT:
                transport_kwargs["randomize_nodes_in_pool"] = randomize_nodes_in_pool
            if node_selector_class is not DEFAULT:
                transport_kwargs["node_selector_class"] = node_selector_class
            if dead_node_backoff_factor is not DEFAULT:
                transport_kwargs["dead_node_backoff_factor"] = dead_node_backoff_factor
            if max_dead_node_backoff is not DEFAULT:
                transport_kwargs["max_dead_node_backoff"] = max_dead_node_backoff
            if meta_header is not DEFAULT:
                transport_kwargs["meta_header"] = meta_header

            transport_serializers = DEFAULT_SERIALIZERS.copy()
            if serializers is not DEFAULT:
                transport_serializers.update(serializers)

                # Override compatibility serializers from their non-compat mimetypes too.
                # So we use the same serializer for requests and responses.
                for mime_subtype in ("json", "x-ndjson"):
                    if f"application/{mime_subtype}" in serializers:
                        compat_mimetype = (
                            f"application/vnd.elasticsearch+{mime_subtype}"
                        )
                        if compat_mimetype not in serializers:
                            transport_serializers[compat_mimetype] = serializers[
                                f"application/{mime_subtype}"
                            ]

            transport_kwargs["serializers"] = transport_serializers

            transport_kwargs["default_mimetype"] = default_mimetype
            if sniff_on_start is not DEFAULT:
                transport_kwargs["sniff_on_start"] = sniff_on_start
            if sniff_before_requests is not DEFAULT:
                transport_kwargs["sniff_before_requests"] = sniff_before_requests
            if sniff_on_node_failure is not DEFAULT:
                transport_kwargs["sniff_on_node_failure"] = sniff_on_node_failure
            if sniff_timeout is not DEFAULT:
                transport_kwargs["sniff_timeout"] = sniff_timeout
            if min_delay_between_sniffing is not DEFAULT:
                transport_kwargs["min_delay_between_sniffing"] = (
                    min_delay_between_sniffing
                )

            _transport = transport_class(
                node_configs,
                client_meta_service=CLIENT_META_SERVICE,
                sniff_callback=sniff_callback,
                **transport_kwargs,
            )

            super().__init__(_transport)

            # These are set per-request so are stored separately.
            self._request_timeout = request_timeout
            self._max_retries = max_retries
            self._retry_on_timeout = retry_on_timeout
            if isinstance(retry_on_status, int):
                retry_on_status = (retry_on_status,)
            self._retry_on_status = retry_on_status

        else:
            super().__init__(_transport)

        if headers is not DEFAULT and headers is not None:
            self._headers.update(headers)
        if opaque_id is not DEFAULT and opaque_id is not None:  # type: ignore[comparison-overlap]
            self._headers["x-opaque-id"] = opaque_id
        self._headers = resolve_auth_headers(
            self._headers,
            http_auth=http_auth,
            api_key=api_key,
            basic_auth=basic_auth,
            bearer_auth=bearer_auth,
        )

        # namespaced clients for compatibility with API names
        self.async_search = AsyncSearchClient(self)
        self.autoscaling = AutoscalingClient(self)
        self.cat = CatClient(self)
        self.cluster = ClusterClient(self)
        self.connector = ConnectorClient(self)
        self.fleet = FleetClient(self)
        self.features = FeaturesClient(self)
        self.indices = IndicesClient(self)
        self.inference = InferenceClient(self)
        self.ingest = IngestClient(self)
        self.nodes = NodesClient(self)
        self.snapshot = SnapshotClient(self)
        self.tasks = TasksClient(self)

        self.xpack = XPackClient(self)
        self.ccr = CcrClient(self)
        self.dangling_indices = DanglingIndicesClient(self)
        self.enrich = EnrichClient(self)
        self.eql = EqlClient(self)
        self.esql = EsqlClient(self)
        self.graph = GraphClient(self)
        self.ilm = IlmClient(self)
        self.license = LicenseClient(self)
        self.logstash = LogstashClient(self)
        self.migration = MigrationClient(self)
        self.ml = MlClient(self)
        self.monitoring = MonitoringClient(self)
        self.query_rules = QueryRulesClient(self)
        self.rollup = RollupClient(self)
        self.search_application = SearchApplicationClient(self)
        self.searchable_snapshots = SearchableSnapshotsClient(self)
        self.security = SecurityClient(self)
        self.slm = SlmClient(self)
        self.simulate = SimulateClient(self)
        self.shutdown = ShutdownClient(self)
        self.sql = SqlClient(self)
        self.ssl = SslClient(self)
        self.synonyms = SynonymsClient(self)
        self.text_structure = TextStructureClient(self)
        self.transform = TransformClient(self)
        self.watcher = WatcherClient(self)

    def __repr__(self) -> str:
        try:
            # get a list of all connections
            nodes = [node.base_url for node in self.transport.node_pool.all()]
            # truncate to 5 if there are too many
            if len(nodes) > 5:
                nodes = nodes[:5] + ["..."]
            return f"<{self.__class__.__name__}({nodes})>"
        except Exception:
            # probably operating on custom transport and connection_pool, ignore
            return super().__repr__()
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



