in lib/ramble/spack/stage.py [0:0]
def fetch(self, mirror_only=False, err_msg=None):
"""Retrieves the code or archive
Args:
mirror_only (bool): only fetch from a mirror
err_msg (str or None): the error message to display if all fetchers
fail or ``None`` for the default fetch failure message
"""
fetchers = []
if not mirror_only:
fetchers.append(self.default_fetcher)
# TODO: move mirror logic out of here and clean it up!
# TODO: Or @alalazo may have some ideas about how to use a
# TODO: CompositeFetchStrategy here.
self.skip_checksum_for_mirror = True
if self.mirror_paths:
# Join URLs of mirror roots with mirror paths. Because
# urljoin() will strip everything past the final '/' in
# the root, so we add a '/' if it is not present.
mirror_urls = {}
for mirror in spack.mirror.MirrorCollection().values():
for rel_path in self.mirror_paths:
mirror_url = url_util.join(mirror.fetch_url, rel_path)
mirror_urls[mirror_url] = {}
if mirror.get_access_pair("fetch") or \
mirror.get_access_token("fetch") or \
mirror.get_profile("fetch"):
mirror_urls[mirror_url] = {
"access_token": mirror.get_access_token("fetch"),
"access_pair": mirror.get_access_pair("fetch"),
"access_profile": mirror.get_profile("fetch"),
"endpoint_url": mirror.get_endpoint_url("fetch")
}
# If this archive is normally fetched from a tarball URL,
# then use the same digest. `spack mirror` ensures that
# the checksum will be the same.
digest = None
expand = True
extension = None
if isinstance(self.default_fetcher, fs.URLFetchStrategy):
digest = self.default_fetcher.digest
expand = self.default_fetcher.expand_archive
extension = self.default_fetcher.extension
# Have to skip the checksum for things archived from
# repositories. How can this be made safer?
self.skip_checksum_for_mirror = not bool(digest)
# Add URL strategies for all the mirrors with the digest
# Insert fetchers in the order that the URLs are provided.
for url in reversed(list(mirror_urls.keys())):
fetchers.insert(
0, fs.from_url_scheme(
url, digest, expand=expand, extension=extension,
connection=mirror_urls[url]))
if self.default_fetcher.cachable:
for rel_path in reversed(list(self.mirror_paths)):
cache_fetcher = spack.caches.fetch_cache.fetcher(
rel_path, digest, expand=expand,
extension=extension)
fetchers.insert(0, cache_fetcher)
def generate_fetchers():
for fetcher in fetchers:
yield fetcher
# The search function may be expensive, so wait until now to
# call it so the user can stop if a prior fetcher succeeded
if self.search_fn and not mirror_only:
dynamic_fetchers = self.search_fn()
for fetcher in dynamic_fetchers:
yield fetcher
def print_errors(errors):
for msg in errors:
tty.debug(msg)
errors = []
for fetcher in generate_fetchers():
try:
fetcher.stage = self
self.fetcher = fetcher
self.fetcher.fetch()
break
except spack.fetch_strategy.NoCacheError:
# Don't bother reporting when something is not cached.
continue
except spack.error.SpackError as e:
errors.append('Fetching from {0} failed.'.format(fetcher))
tty.debug(e)
continue
else:
print_errors(errors)
self.fetcher = self.default_fetcher
default_msg = 'All fetchers failed for {0}'.format(self.name)
raise fs.FetchError(err_msg or default_msg, None)
print_errors(errors)