def fetch()

in lib/ramble/ramble/stage.py [0:0]


    def fetch(self, mirror_only=False, err_msg=None):
        """Retrieves the code or archive

        Args:
            mirror_only (bool): only fetch from a mirror
            err_msg (str | None): the error message to display if all fetchers
                fail or ``None`` for the default fetch failure message
        """
        fetchers = []
        if not mirror_only:
            fetchers.append(self.default_fetcher)

        # TODO: move mirror logic out of here and clean it up!
        # TODO: Or @alalazo may have some ideas about how to use a
        # TODO: CompositeFetchStrategy here.
        self.skip_checksum_for_mirror = True
        if self.mirror_paths:
            # Join URLs of mirror roots with mirror paths. Because
            # urljoin() will strip everything past the final '/' in
            # the root, so we add a '/' if it is not present.
            mirror_urls = []
            for mirror in ramble.mirror.MirrorCollection().values():
                for rel_path in self.mirror_paths:
                    mirror_urls.append(url_util.join(mirror.fetch_url, rel_path))

            # If this archive is normally fetched from a tarball URL,
            # then use the same digest.  `spack mirror` ensures that
            # the checksum will be the same.
            digest = None
            expand = True
            extension = None
            if isinstance(self.default_fetcher, fs.URLFetchStrategy):
                digest = self.default_fetcher.digest
                expand = self.default_fetcher.expand_archive
                extension = self.default_fetcher.extension

            # Have to skip the checksum for things archived from
            # repositories.  How can this be made safer?
            self.skip_checksum_for_mirror = not bool(digest)

            # Add URL strategies for all the mirrors with the digest
            # Insert fetchers in the order that the URLs are provided.
            for url in reversed(mirror_urls):
                fetchers.insert(
                    0, fs.from_url_scheme(url, digest, expand=expand, extension=extension)
                )

            if self.default_fetcher.cachable:
                for rel_path in reversed(list(self.mirror_paths)):
                    cache_fetcher = ramble.caches.fetch_cache.fetcher(
                        rel_path, digest, expand=expand, extension=extension
                    )
                    fetchers.insert(0, cache_fetcher)

        def generate_fetchers():
            yield from fetchers
            # The search function may be expensive, so wait until now to
            # call it so the user can stop if a prior fetcher succeeded
            if self.search_fn and not mirror_only:
                dynamic_fetchers = self.search_fn()
                yield from dynamic_fetchers

        def print_errors(errors):
            for msg in errors:
                logger.debug(msg)

        errors = []
        for fetcher in generate_fetchers():
            try:
                fetcher.stage = self
                self.fetcher = fetcher
                self.fetcher.fetch()
                break
            except spack.fetch_strategy.NoCacheError:
                # Don't bother reporting when something is not cached.
                continue
            except ramble.error.RambleError as e:
                errors.append(f"Fetching from {fetcher} failed.")
                logger.debug(e)
                continue
            except spack.util.web.SpackWebError as e:
                errors.append(f"Fetching from {fetcher} failed.")
                logger.debug(e)
                continue

        else:
            print_errors(errors)

            self.fetcher = self.default_fetcher
            raise fs.FetchError(err_msg or "All fetchers failed", None)

        print_errors(errors)