def _process_bulk_chunk_success()

in opensearchpy/helpers/actions.py [0:0]


def _process_bulk_chunk_success(resp, bulk_data, ignore_status, raise_on_error=True):
    # if raise on error is set, we need to collect errors per chunk before raising them
    errors = []

    # go through request-response pairs and detect failures
    for data, (op_type, item) in zip(
        bulk_data, map(methodcaller("popitem"), resp["items"])
    ):
        status_code = item.get("status", 500)

        ok = 200 <= status_code < 300
        if not ok and raise_on_error and status_code not in ignore_status:
            # include original document source
            if len(data) > 1:
                item["data"] = data[1]
            errors.append({op_type: item})

        if ok or not errors:
            # if we are not just recording all errors to be able to raise
            # them all at once, yield items individually
            yield ok, {op_type: item}

    if errors:
        raise BulkIndexError("%i document(s) failed to index." % len(errors), errors)