elasticsearch_serverless/_async/client/__init__.py [3518:3772]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        self,
        *,
        dest: t.Optional[t.Mapping[str, t.Any]] = None,
        source: t.Optional[t.Mapping[str, t.Any]] = None,
        conflicts: t.Optional[t.Union[str, t.Literal["abort", "proceed"]]] = None,
        error_trace: t.Optional[bool] = None,
        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
        human: t.Optional[bool] = None,
        max_docs: t.Optional[int] = None,
        pretty: t.Optional[bool] = None,
        refresh: t.Optional[bool] = None,
        requests_per_second: t.Optional[float] = None,
        require_alias: t.Optional[bool] = None,
        script: t.Optional[t.Mapping[str, t.Any]] = None,
        scroll: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
        size: t.Optional[int] = None,
        slices: t.Optional[t.Union[int, t.Union[str, t.Literal["auto"]]]] = None,
        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
        wait_for_active_shards: t.Optional[
            t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]]
        ] = None,
        wait_for_completion: t.Optional[bool] = None,
        body: t.Optional[t.Dict[str, t.Any]] = None,
    ) -> ObjectApiResponse[t.Any]:
        """
        .. raw:: html

          <p>Reindex documents.</p>
          <p>Copy documents from a source to a destination.
          You can copy all documents to the destination index or reindex a subset of the documents.
          The source can be any existing index, alias, or data stream.
          The destination must differ from the source.
          For example, you cannot reindex a data stream into itself.</p>
          <p>IMPORTANT: Reindex requires <code>_source</code> to be enabled for all documents in the source.
          The destination should be configured as wanted before calling the reindex API.
          Reindex does not copy the settings from the source or its associated template.
          Mappings, shard counts, and replicas, for example, must be configured ahead of time.</p>
          <p>If the Elasticsearch security features are enabled, you must have the following security privileges:</p>
          <ul>
          <li>The <code>read</code> index privilege for the source data stream, index, or alias.</li>
          <li>The <code>write</code> index privilege for the destination data stream, index, or index alias.</li>
          <li>To automatically create a data stream or index with a reindex API request, you must have the <code>auto_configure</code>, <code>create_index</code>, or <code>manage</code> index privilege for the destination data stream, index, or alias.</li>
          <li>If reindexing from a remote cluster, the <code>source.remote.user</code> must have the <code>monitor</code> cluster privilege and the <code>read</code> index privilege for the source data stream, index, or alias.</li>
          </ul>
          <p>If reindexing from a remote cluster, you must explicitly allow the remote host in the <code>reindex.remote.whitelist</code> setting.
          Automatic data stream creation requires a matching index template with data stream enabled.</p>
          <p>The <code>dest</code> element can be configured like the index API to control optimistic concurrency control.
          Omitting <code>version_type</code> or setting it to <code>internal</code> causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID.</p>
          <p>Setting <code>version_type</code> to <code>external</code> causes Elasticsearch to preserve the <code>version</code> from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source.</p>
          <p>Setting <code>op_type</code> to <code>create</code> causes the reindex API to create only missing documents in the destination.
          All existing documents will cause a version conflict.</p>
          <p>IMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an <code>op_type</code> of <code>create</code>.
          A reindex can only add new documents to a destination data stream.
          It cannot update existing documents in a destination data stream.</p>
          <p>By default, version conflicts abort the reindex process.
          To continue reindexing if there are conflicts, set the <code>conflicts</code> request body property to <code>proceed</code>.
          In this case, the response includes a count of the version conflicts that were encountered.
          Note that the handling of other error types is unaffected by the <code>conflicts</code> property.
          Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than <code>max_docs</code> until it has successfully indexed <code>max_docs</code> documents into the target or it has gone through every document in the source query.</p>
          <p>NOTE: The reindex API makes no effort to handle ID collisions.
          The last document written will &quot;win&quot; but the order isn't usually predictable so it is not a good idea to rely on this behavior.
          Instead, make sure that IDs are unique by using a script.</p>
          <p><strong>Running reindex asynchronously</strong></p>
          <p>If the request contains <code>wait_for_completion=false</code>, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task.
          Elasticsearch creates a record of this task as a document at <code>_tasks/&lt;task_id&gt;</code>.</p>
          <p><strong>Reindex from multiple sources</strong></p>
          <p>If you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources.
          That way you can resume the process if there are any errors by removing the partially completed source and starting over.
          It also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel.</p>
          <p>For example, you can use a bash script like this:</p>
          <pre><code>for index in i1 i2 i3 i4 i5; do
            curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{
              &quot;source&quot;: {
                &quot;index&quot;: &quot;'$index'&quot;
              },
              &quot;dest&quot;: {
                &quot;index&quot;: &quot;'$index'-reindexed&quot;
              }
            }'
          done
          </code></pre>
          <p><strong>Throttling</strong></p>
          <p>Set <code>requests_per_second</code> to any positive decimal number (<code>1.4</code>, <code>6</code>, <code>1000</code>, for example) to throttle the rate at which reindex issues batches of index operations.
          Requests are throttled by padding each batch with a wait time.
          To turn off throttling, set <code>requests_per_second</code> to <code>-1</code>.</p>
          <p>The throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding.
          The padding time is the difference between the batch size divided by the <code>requests_per_second</code> and the time spent writing.
          By default the batch size is <code>1000</code>, so if <code>requests_per_second</code> is set to <code>500</code>:</p>
          <pre><code>target_time = 1000 / 500 per second = 2 seconds
          wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
          </code></pre>
          <p>Since the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set.
          This is &quot;bursty&quot; instead of &quot;smooth&quot;.</p>
          <p><strong>Slicing</strong></p>
          <p>Reindex supports sliced scroll to parallelize the reindexing process.
          This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts.</p>
          <p>NOTE: Reindexing from remote clusters does not support manual or automatic slicing.</p>
          <p>You can slice a reindex request manually by providing a slice ID and total number of slices to each request.
          You can also let reindex automatically parallelize by using sliced scroll to slice on <code>_id</code>.
          The <code>slices</code> parameter specifies the number of slices to use.</p>
          <p>Adding <code>slices</code> to the reindex request just automates the manual process, creating sub-requests which means it has some quirks:</p>
          <ul>
          <li>You can see these requests in the tasks API. These sub-requests are &quot;child&quot; tasks of the task for the request with slices.</li>
          <li>Fetching the status of the task for the request with <code>slices</code> only contains the status of completed slices.</li>
          <li>These sub-requests are individually addressable for things like cancellation and rethrottling.</li>
          <li>Rethrottling the request with <code>slices</code> will rethrottle the unfinished sub-request proportionally.</li>
          <li>Canceling the request with <code>slices</code> will cancel each sub-request.</li>
          <li>Due to the nature of <code>slices</code>, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.</li>
          <li>Parameters like <code>requests_per_second</code> and <code>max_docs</code> on a request with <code>slices</code> are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using <code>max_docs</code> with <code>slices</code> might not result in exactly <code>max_docs</code> documents being reindexed.</li>
          <li>Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time.</li>
          </ul>
          <p>If slicing automatically, setting <code>slices</code> to <code>auto</code> will choose a reasonable number for most indices.
          If slicing manually or otherwise tuning automatic slicing, use the following guidelines.</p>
          <p>Query performance is most efficient when the number of slices is equal to the number of shards in the index.
          If that number is large (for example, <code>500</code>), choose a lower number as too many slices will hurt performance.
          Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.</p>
          <p>Indexing performance scales linearly across available resources with the number of slices.</p>
          <p>Whether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources.</p>
          <p><strong>Modify documents during reindexing</strong></p>
          <p>Like <code>_update_by_query</code>, reindex operations support a script that modifies the document.
          Unlike <code>_update_by_query</code>, the script is allowed to modify the document's metadata.</p>
          <p>Just as in <code>_update_by_query</code>, you can set <code>ctx.op</code> to change the operation that is run on the destination.
          For example, set <code>ctx.op</code> to <code>noop</code> if your script decides that the document doesn’t have to be indexed in the destination. This &quot;no operation&quot; will be reported in the <code>noop</code> counter in the response body.
          Set <code>ctx.op</code> to <code>delete</code> if your script decides that the document must be deleted from the destination.
          The deletion will be reported in the <code>deleted</code> counter in the response body.
          Setting <code>ctx.op</code> to anything else will return an error, as will setting any other field in <code>ctx</code>.</p>
          <p>Think of the possibilities! Just be careful; you are able to change:</p>
          <ul>
          <li><code>_id</code></li>
          <li><code>_index</code></li>
          <li><code>_version</code></li>
          <li><code>_routing</code></li>
          </ul>
          <p>Setting <code>_version</code> to <code>null</code> or clearing it from the <code>ctx</code> map is just like not sending the version in an indexing request.
          It will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API.</p>
          <p><strong>Reindex from remote</strong></p>
          <p>Reindex supports reindexing from a remote Elasticsearch cluster.
          The <code>host</code> parameter must contain a scheme, host, port, and optional path.
          The <code>username</code> and <code>password</code> parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication.
          Be sure to use HTTPS when using basic authentication or the password will be sent in plain text.
          There are a range of settings available to configure the behavior of the HTTPS connection.</p>
          <p>When using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key.
          Remote hosts must be explicitly allowed with the <code>reindex.remote.whitelist</code> setting.
          It can be set to a comma delimited list of allowed remote host and port combinations.
          Scheme is ignored; only the host and port are used.
          For example:</p>
          <pre><code>reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*&quot;]
          </code></pre>
          <p>The list of allowed hosts must be configured on any nodes that will coordinate the reindex.
          This feature should work with remote clusters of any version of Elasticsearch.
          This should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version.</p>
          <p>WARNING: Elasticsearch does not support forward compatibility across major versions.
          For example, you cannot reindex from a 7.x cluster into a 6.x cluster.</p>
          <p>To enable queries sent to older versions of Elasticsearch, the <code>query</code> parameter is sent directly to the remote host without validation or modification.</p>
          <p>NOTE: Reindexing from remote clusters does not support manual or automatic slicing.</p>
          <p>Reindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb.
          If the remote index includes very large documents you'll need to use a smaller batch size.
          It is also possible to set the socket read timeout on the remote connection with the <code>socket_timeout</code> field and the connection timeout with the <code>connect_timeout</code> field.
          Both default to 30 seconds.</p>
          <p><strong>Configuring SSL parameters</strong></p>
          <p>Reindex from remote supports configurable SSL settings.
          These must be specified in the <code>elasticsearch.yml</code> file, with the exception of the secure settings, which you add in the Elasticsearch keystore.
          It is not possible to configure SSL in the body of the reindex request.</p>


        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex>`_

        :param dest: The destination you are copying to.
        :param source: The source you are copying from.
        :param conflicts: Indicates whether to continue reindexing even when there are
            conflicts.
        :param max_docs: The maximum number of documents to reindex. By default, all
            documents are reindexed. If it is a value less then or equal to `scroll_size`,
            a scroll will not be used to retrieve the results for the operation. If `conflicts`
            is set to `proceed`, the reindex operation could attempt to reindex more
            documents from the source than `max_docs` until it has successfully indexed
            `max_docs` documents into the target or it has gone through every document
            in the source query.
        :param refresh: If `true`, the request refreshes affected shards to make this
            operation visible to search.
        :param requests_per_second: The throttle for this request in sub-requests per
            second. By default, there is no throttle.
        :param require_alias: If `true`, the destination must be an index alias.
        :param script: The script to run to update the document source or metadata when
            reindexing.
        :param scroll: The period of time that a consistent view of the index should
            be maintained for scrolled search.
        :param size:
        :param slices: The number of slices this task should be divided into. It defaults
            to one slice, which means the task isn't sliced into subtasks. Reindex supports
            sliced scroll to parallelize the reindexing process. This parallelization
            can improve efficiency and provide a convenient way to break the request
            down into smaller parts. NOTE: Reindexing from remote clusters does not support
            manual or automatic slicing. If set to `auto`, Elasticsearch chooses the
            number of slices to use. This setting will use one slice per shard, up to
            a certain limit. If there are multiple sources, it will choose the number
            of slices based on the index or backing index with the smallest number of
            shards.
        :param timeout: The period each indexing waits for automatic index creation,
            dynamic mapping updates, and waiting for active shards. By default, Elasticsearch
            waits for at least one minute before failing. The actual wait time could
            be longer, particularly when multiple waits occur.
        :param wait_for_active_shards: The number of shard copies that must be active
            before proceeding with the operation. Set it to `all` or any positive integer
            up to the total number of shards in the index (`number_of_replicas+1`). The
            default value is one, which means it waits for each primary shard to be active.
        :param wait_for_completion: If `true`, the request blocks until the operation
            is complete.
        """
        if dest is None and body is None:
            raise ValueError("Empty value passed for parameter 'dest'")
        if source is None and body is None:
            raise ValueError("Empty value passed for parameter 'source'")
        __path_parts: t.Dict[str, str] = {}
        __path = "/_reindex"
        __query: t.Dict[str, t.Any] = {}
        __body: t.Dict[str, t.Any] = body if body is not None else {}
        if error_trace is not None:
            __query["error_trace"] = error_trace
        if filter_path is not None:
            __query["filter_path"] = filter_path
        if human is not None:
            __query["human"] = human
        if pretty is not None:
            __query["pretty"] = pretty
        if refresh is not None:
            __query["refresh"] = refresh
        if requests_per_second is not None:
            __query["requests_per_second"] = requests_per_second
        if require_alias is not None:
            __query["require_alias"] = require_alias
        if scroll is not None:
            __query["scroll"] = scroll
        if slices is not None:
            __query["slices"] = slices
        if timeout is not None:
            __query["timeout"] = timeout
        if wait_for_active_shards is not None:
            __query["wait_for_active_shards"] = wait_for_active_shards
        if wait_for_completion is not None:
            __query["wait_for_completion"] = wait_for_completion
        if not __body:
            if dest is not None:
                __body["dest"] = dest
            if source is not None:
                __body["source"] = source
            if conflicts is not None:
                __body["conflicts"] = conflicts
            if max_docs is not None:
                __body["max_docs"] = max_docs
            if script is not None:
                __body["script"] = script
            if size is not None:
                __body["size"] = size
        __headers = {"accept": "application/json", "content-type": "application/json"}
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



elasticsearch_serverless/_sync/client/__init__.py [3516:3770]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        self,
        *,
        dest: t.Optional[t.Mapping[str, t.Any]] = None,
        source: t.Optional[t.Mapping[str, t.Any]] = None,
        conflicts: t.Optional[t.Union[str, t.Literal["abort", "proceed"]]] = None,
        error_trace: t.Optional[bool] = None,
        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
        human: t.Optional[bool] = None,
        max_docs: t.Optional[int] = None,
        pretty: t.Optional[bool] = None,
        refresh: t.Optional[bool] = None,
        requests_per_second: t.Optional[float] = None,
        require_alias: t.Optional[bool] = None,
        script: t.Optional[t.Mapping[str, t.Any]] = None,
        scroll: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
        size: t.Optional[int] = None,
        slices: t.Optional[t.Union[int, t.Union[str, t.Literal["auto"]]]] = None,
        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
        wait_for_active_shards: t.Optional[
            t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]]
        ] = None,
        wait_for_completion: t.Optional[bool] = None,
        body: t.Optional[t.Dict[str, t.Any]] = None,
    ) -> ObjectApiResponse[t.Any]:
        """
        .. raw:: html

          <p>Reindex documents.</p>
          <p>Copy documents from a source to a destination.
          You can copy all documents to the destination index or reindex a subset of the documents.
          The source can be any existing index, alias, or data stream.
          The destination must differ from the source.
          For example, you cannot reindex a data stream into itself.</p>
          <p>IMPORTANT: Reindex requires <code>_source</code> to be enabled for all documents in the source.
          The destination should be configured as wanted before calling the reindex API.
          Reindex does not copy the settings from the source or its associated template.
          Mappings, shard counts, and replicas, for example, must be configured ahead of time.</p>
          <p>If the Elasticsearch security features are enabled, you must have the following security privileges:</p>
          <ul>
          <li>The <code>read</code> index privilege for the source data stream, index, or alias.</li>
          <li>The <code>write</code> index privilege for the destination data stream, index, or index alias.</li>
          <li>To automatically create a data stream or index with a reindex API request, you must have the <code>auto_configure</code>, <code>create_index</code>, or <code>manage</code> index privilege for the destination data stream, index, or alias.</li>
          <li>If reindexing from a remote cluster, the <code>source.remote.user</code> must have the <code>monitor</code> cluster privilege and the <code>read</code> index privilege for the source data stream, index, or alias.</li>
          </ul>
          <p>If reindexing from a remote cluster, you must explicitly allow the remote host in the <code>reindex.remote.whitelist</code> setting.
          Automatic data stream creation requires a matching index template with data stream enabled.</p>
          <p>The <code>dest</code> element can be configured like the index API to control optimistic concurrency control.
          Omitting <code>version_type</code> or setting it to <code>internal</code> causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID.</p>
          <p>Setting <code>version_type</code> to <code>external</code> causes Elasticsearch to preserve the <code>version</code> from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source.</p>
          <p>Setting <code>op_type</code> to <code>create</code> causes the reindex API to create only missing documents in the destination.
          All existing documents will cause a version conflict.</p>
          <p>IMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an <code>op_type</code> of <code>create</code>.
          A reindex can only add new documents to a destination data stream.
          It cannot update existing documents in a destination data stream.</p>
          <p>By default, version conflicts abort the reindex process.
          To continue reindexing if there are conflicts, set the <code>conflicts</code> request body property to <code>proceed</code>.
          In this case, the response includes a count of the version conflicts that were encountered.
          Note that the handling of other error types is unaffected by the <code>conflicts</code> property.
          Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than <code>max_docs</code> until it has successfully indexed <code>max_docs</code> documents into the target or it has gone through every document in the source query.</p>
          <p>NOTE: The reindex API makes no effort to handle ID collisions.
          The last document written will &quot;win&quot; but the order isn't usually predictable so it is not a good idea to rely on this behavior.
          Instead, make sure that IDs are unique by using a script.</p>
          <p><strong>Running reindex asynchronously</strong></p>
          <p>If the request contains <code>wait_for_completion=false</code>, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task.
          Elasticsearch creates a record of this task as a document at <code>_tasks/&lt;task_id&gt;</code>.</p>
          <p><strong>Reindex from multiple sources</strong></p>
          <p>If you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources.
          That way you can resume the process if there are any errors by removing the partially completed source and starting over.
          It also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel.</p>
          <p>For example, you can use a bash script like this:</p>
          <pre><code>for index in i1 i2 i3 i4 i5; do
            curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{
              &quot;source&quot;: {
                &quot;index&quot;: &quot;'$index'&quot;
              },
              &quot;dest&quot;: {
                &quot;index&quot;: &quot;'$index'-reindexed&quot;
              }
            }'
          done
          </code></pre>
          <p><strong>Throttling</strong></p>
          <p>Set <code>requests_per_second</code> to any positive decimal number (<code>1.4</code>, <code>6</code>, <code>1000</code>, for example) to throttle the rate at which reindex issues batches of index operations.
          Requests are throttled by padding each batch with a wait time.
          To turn off throttling, set <code>requests_per_second</code> to <code>-1</code>.</p>
          <p>The throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding.
          The padding time is the difference between the batch size divided by the <code>requests_per_second</code> and the time spent writing.
          By default the batch size is <code>1000</code>, so if <code>requests_per_second</code> is set to <code>500</code>:</p>
          <pre><code>target_time = 1000 / 500 per second = 2 seconds
          wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
          </code></pre>
          <p>Since the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set.
          This is &quot;bursty&quot; instead of &quot;smooth&quot;.</p>
          <p><strong>Slicing</strong></p>
          <p>Reindex supports sliced scroll to parallelize the reindexing process.
          This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts.</p>
          <p>NOTE: Reindexing from remote clusters does not support manual or automatic slicing.</p>
          <p>You can slice a reindex request manually by providing a slice ID and total number of slices to each request.
          You can also let reindex automatically parallelize by using sliced scroll to slice on <code>_id</code>.
          The <code>slices</code> parameter specifies the number of slices to use.</p>
          <p>Adding <code>slices</code> to the reindex request just automates the manual process, creating sub-requests which means it has some quirks:</p>
          <ul>
          <li>You can see these requests in the tasks API. These sub-requests are &quot;child&quot; tasks of the task for the request with slices.</li>
          <li>Fetching the status of the task for the request with <code>slices</code> only contains the status of completed slices.</li>
          <li>These sub-requests are individually addressable for things like cancellation and rethrottling.</li>
          <li>Rethrottling the request with <code>slices</code> will rethrottle the unfinished sub-request proportionally.</li>
          <li>Canceling the request with <code>slices</code> will cancel each sub-request.</li>
          <li>Due to the nature of <code>slices</code>, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.</li>
          <li>Parameters like <code>requests_per_second</code> and <code>max_docs</code> on a request with <code>slices</code> are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using <code>max_docs</code> with <code>slices</code> might not result in exactly <code>max_docs</code> documents being reindexed.</li>
          <li>Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time.</li>
          </ul>
          <p>If slicing automatically, setting <code>slices</code> to <code>auto</code> will choose a reasonable number for most indices.
          If slicing manually or otherwise tuning automatic slicing, use the following guidelines.</p>
          <p>Query performance is most efficient when the number of slices is equal to the number of shards in the index.
          If that number is large (for example, <code>500</code>), choose a lower number as too many slices will hurt performance.
          Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.</p>
          <p>Indexing performance scales linearly across available resources with the number of slices.</p>
          <p>Whether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources.</p>
          <p><strong>Modify documents during reindexing</strong></p>
          <p>Like <code>_update_by_query</code>, reindex operations support a script that modifies the document.
          Unlike <code>_update_by_query</code>, the script is allowed to modify the document's metadata.</p>
          <p>Just as in <code>_update_by_query</code>, you can set <code>ctx.op</code> to change the operation that is run on the destination.
          For example, set <code>ctx.op</code> to <code>noop</code> if your script decides that the document doesn’t have to be indexed in the destination. This &quot;no operation&quot; will be reported in the <code>noop</code> counter in the response body.
          Set <code>ctx.op</code> to <code>delete</code> if your script decides that the document must be deleted from the destination.
          The deletion will be reported in the <code>deleted</code> counter in the response body.
          Setting <code>ctx.op</code> to anything else will return an error, as will setting any other field in <code>ctx</code>.</p>
          <p>Think of the possibilities! Just be careful; you are able to change:</p>
          <ul>
          <li><code>_id</code></li>
          <li><code>_index</code></li>
          <li><code>_version</code></li>
          <li><code>_routing</code></li>
          </ul>
          <p>Setting <code>_version</code> to <code>null</code> or clearing it from the <code>ctx</code> map is just like not sending the version in an indexing request.
          It will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API.</p>
          <p><strong>Reindex from remote</strong></p>
          <p>Reindex supports reindexing from a remote Elasticsearch cluster.
          The <code>host</code> parameter must contain a scheme, host, port, and optional path.
          The <code>username</code> and <code>password</code> parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication.
          Be sure to use HTTPS when using basic authentication or the password will be sent in plain text.
          There are a range of settings available to configure the behavior of the HTTPS connection.</p>
          <p>When using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key.
          Remote hosts must be explicitly allowed with the <code>reindex.remote.whitelist</code> setting.
          It can be set to a comma delimited list of allowed remote host and port combinations.
          Scheme is ignored; only the host and port are used.
          For example:</p>
          <pre><code>reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*&quot;]
          </code></pre>
          <p>The list of allowed hosts must be configured on any nodes that will coordinate the reindex.
          This feature should work with remote clusters of any version of Elasticsearch.
          This should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version.</p>
          <p>WARNING: Elasticsearch does not support forward compatibility across major versions.
          For example, you cannot reindex from a 7.x cluster into a 6.x cluster.</p>
          <p>To enable queries sent to older versions of Elasticsearch, the <code>query</code> parameter is sent directly to the remote host without validation or modification.</p>
          <p>NOTE: Reindexing from remote clusters does not support manual or automatic slicing.</p>
          <p>Reindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb.
          If the remote index includes very large documents you'll need to use a smaller batch size.
          It is also possible to set the socket read timeout on the remote connection with the <code>socket_timeout</code> field and the connection timeout with the <code>connect_timeout</code> field.
          Both default to 30 seconds.</p>
          <p><strong>Configuring SSL parameters</strong></p>
          <p>Reindex from remote supports configurable SSL settings.
          These must be specified in the <code>elasticsearch.yml</code> file, with the exception of the secure settings, which you add in the Elasticsearch keystore.
          It is not possible to configure SSL in the body of the reindex request.</p>


        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex>`_

        :param dest: The destination you are copying to.
        :param source: The source you are copying from.
        :param conflicts: Indicates whether to continue reindexing even when there are
            conflicts.
        :param max_docs: The maximum number of documents to reindex. By default, all
            documents are reindexed. If it is a value less then or equal to `scroll_size`,
            a scroll will not be used to retrieve the results for the operation. If `conflicts`
            is set to `proceed`, the reindex operation could attempt to reindex more
            documents from the source than `max_docs` until it has successfully indexed
            `max_docs` documents into the target or it has gone through every document
            in the source query.
        :param refresh: If `true`, the request refreshes affected shards to make this
            operation visible to search.
        :param requests_per_second: The throttle for this request in sub-requests per
            second. By default, there is no throttle.
        :param require_alias: If `true`, the destination must be an index alias.
        :param script: The script to run to update the document source or metadata when
            reindexing.
        :param scroll: The period of time that a consistent view of the index should
            be maintained for scrolled search.
        :param size:
        :param slices: The number of slices this task should be divided into. It defaults
            to one slice, which means the task isn't sliced into subtasks. Reindex supports
            sliced scroll to parallelize the reindexing process. This parallelization
            can improve efficiency and provide a convenient way to break the request
            down into smaller parts. NOTE: Reindexing from remote clusters does not support
            manual or automatic slicing. If set to `auto`, Elasticsearch chooses the
            number of slices to use. This setting will use one slice per shard, up to
            a certain limit. If there are multiple sources, it will choose the number
            of slices based on the index or backing index with the smallest number of
            shards.
        :param timeout: The period each indexing waits for automatic index creation,
            dynamic mapping updates, and waiting for active shards. By default, Elasticsearch
            waits for at least one minute before failing. The actual wait time could
            be longer, particularly when multiple waits occur.
        :param wait_for_active_shards: The number of shard copies that must be active
            before proceeding with the operation. Set it to `all` or any positive integer
            up to the total number of shards in the index (`number_of_replicas+1`). The
            default value is one, which means it waits for each primary shard to be active.
        :param wait_for_completion: If `true`, the request blocks until the operation
            is complete.
        """
        if dest is None and body is None:
            raise ValueError("Empty value passed for parameter 'dest'")
        if source is None and body is None:
            raise ValueError("Empty value passed for parameter 'source'")
        __path_parts: t.Dict[str, str] = {}
        __path = "/_reindex"
        __query: t.Dict[str, t.Any] = {}
        __body: t.Dict[str, t.Any] = body if body is not None else {}
        if error_trace is not None:
            __query["error_trace"] = error_trace
        if filter_path is not None:
            __query["filter_path"] = filter_path
        if human is not None:
            __query["human"] = human
        if pretty is not None:
            __query["pretty"] = pretty
        if refresh is not None:
            __query["refresh"] = refresh
        if requests_per_second is not None:
            __query["requests_per_second"] = requests_per_second
        if require_alias is not None:
            __query["require_alias"] = require_alias
        if scroll is not None:
            __query["scroll"] = scroll
        if slices is not None:
            __query["slices"] = slices
        if timeout is not None:
            __query["timeout"] = timeout
        if wait_for_active_shards is not None:
            __query["wait_for_active_shards"] = wait_for_active_shards
        if wait_for_completion is not None:
            __query["wait_for_completion"] = wait_for_completion
        if not __body:
            if dest is not None:
                __body["dest"] = dest
            if source is not None:
                __body["source"] = source
            if conflicts is not None:
                __body["conflicts"] = conflicts
            if max_docs is not None:
                __body["max_docs"] = max_docs
            if script is not None:
                __body["script"] = script
            if size is not None:
                __body["size"] = size
        __headers = {"accept": "application/json", "content-type": "application/json"}
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



