services/search/src/search/routes/filter.py [108:149]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                with StepProfiler(method="filter_endpoint", step="build index if missing"):
                    # get parquet urls and dataset_info
                    parquet_metadata_response = get_cache_entry_from_parquet_metadata_job(
                        dataset=dataset,
                        config=config,
                        hf_endpoint=hf_endpoint,
                        hf_token=hf_token,
                        hf_timeout_seconds=hf_timeout_seconds,
                        blocked_datasets=blocked_datasets,
                        storage_clients=storage_clients,
                    )
                    revision = parquet_metadata_response["dataset_git_revision"]
                    if parquet_metadata_response["http_status"] != HTTPStatus.OK:
                        return get_json_error_response(
                            content=parquet_metadata_response["content"],
                            status_code=parquet_metadata_response["http_status"],
                            max_age=max_age_short,
                            error_code=parquet_metadata_response["error_code"],
                            revision=revision,
                        )
                    content_parquet_metadata = parquet_metadata_response["content"]
                    split_parquet_files = [
                        parquet_file
                        for parquet_file in content_parquet_metadata["parquet_files_metadata"]
                        if parquet_file["config"] == config and parquet_file["split"] == split
                    ]
                    index_file_location, partial = await get_index_file_location_and_build_if_missing(
                        duckdb_index_file_directory=duckdb_index_file_directory,
                        dataset=dataset,
                        config=config,
                        split=split,
                        revision=revision,
                        hf_token=hf_token,
                        max_split_size_bytes=max_split_size_bytes,
                        extensions_directory=extensions_directory,
                        parquet_metadata_directory=parquet_metadata_directory,
                        split_parquet_files=split_parquet_files,
                        features=content_parquet_metadata["features"],
                    )
                    # features must contain the row idx column for full_text_search
                    features = Features.from_dict(content_parquet_metadata["features"])
                    features[ROW_IDX_COLUMN] = Value("int64")
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



services/search/src/search/routes/search.py [163:204]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                with StepProfiler(method="filter_endpoint", step="build index if missing"):
                    # get parquet urls and dataset_info
                    parquet_metadata_response = get_cache_entry_from_parquet_metadata_job(
                        dataset=dataset,
                        config=config,
                        hf_endpoint=hf_endpoint,
                        hf_token=hf_token,
                        hf_timeout_seconds=hf_timeout_seconds,
                        blocked_datasets=blocked_datasets,
                        storage_clients=storage_clients,
                    )
                    revision = parquet_metadata_response["dataset_git_revision"]
                    if parquet_metadata_response["http_status"] != HTTPStatus.OK:
                        return get_json_error_response(
                            content=parquet_metadata_response["content"],
                            status_code=parquet_metadata_response["http_status"],
                            max_age=max_age_short,
                            error_code=parquet_metadata_response["error_code"],
                            revision=revision,
                        )
                    content_parquet_metadata = parquet_metadata_response["content"]
                    split_parquet_files = [
                        parquet_file
                        for parquet_file in content_parquet_metadata["parquet_files_metadata"]
                        if parquet_file["config"] == config and parquet_file["split"] == split
                    ]
                    index_file_location, partial = await get_index_file_location_and_build_if_missing(
                        duckdb_index_file_directory=duckdb_index_file_directory,
                        dataset=dataset,
                        config=config,
                        split=split,
                        revision=revision,
                        hf_token=hf_token,
                        max_split_size_bytes=max_split_size_bytes,
                        extensions_directory=extensions_directory,
                        parquet_metadata_directory=parquet_metadata_directory,
                        split_parquet_files=split_parquet_files,
                        features=content_parquet_metadata["features"],
                    )
                    # features must contain the row idx column for full_text_search
                    features = Features.from_dict(content_parquet_metadata["features"])
                    features[ROW_IDX_COLUMN] = Value("int64")
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



