in src/datasets/load.py [0:0]
def get_module(self) -> DatasetModule:
exported_parquet_files = _dataset_viewer.get_exported_parquet_files(
dataset=self.name, commit_hash=self.commit_hash, token=self.download_config.token
)
exported_dataset_infos = _dataset_viewer.get_exported_dataset_infos(
dataset=self.name, commit_hash=self.commit_hash, token=self.download_config.token
)
dataset_infos = DatasetInfosDict(
{
config_name: DatasetInfo.from_dict(exported_dataset_infos[config_name])
for config_name in exported_dataset_infos
}
)
parquet_commit_hash = (
HfApi(
endpoint=config.HF_ENDPOINT,
token=self.download_config.token,
library_name="datasets",
library_version=__version__,
user_agent=get_datasets_user_agent(self.download_config.user_agent),
)
.dataset_info(
self.name,
revision="refs/convert/parquet",
token=self.download_config.token,
timeout=100.0,
)
.sha
) # fix the revision in case there are new commits in the meantime
metadata_configs = MetadataConfigs._from_exported_parquet_files_and_dataset_infos(
parquet_commit_hash=parquet_commit_hash,
exported_parquet_files=exported_parquet_files,
dataset_infos=dataset_infos,
)
module_path, _ = _PACKAGED_DATASETS_MODULES["parquet"]
builder_configs, default_config_name = create_builder_configs_from_metadata_configs(
module_path,
metadata_configs,
download_config=self.download_config,
)
builder_kwargs = {
"repo_id": self.name,
"dataset_name": camelcase_to_snakecase(Path(self.name).name),
}
return DatasetModule(
module_path,
self.commit_hash,
builder_kwargs,
dataset_infos=dataset_infos,
builder_configs_parameters=BuilderConfigsParameters(
metadata_configs=metadata_configs,
builder_configs=builder_configs,
default_config_name=default_config_name,
),
)