def generate_gitlab_ci_yaml()

in lib/ramble/spack/ci.py [0:0]


def generate_gitlab_ci_yaml(env, print_summary, output_file,
                            prune_dag=False, check_index_only=False,
                            run_optimizer=False, use_dependencies=False,
                            artifacts_root=None, remote_mirror_override=None):
    """ Generate a gitlab yaml file to run a dynamic child pipeline from
        the spec matrix in the active environment.

    Arguments:
        env (spack.environment.Environment): Activated environment object
            which must contain a gitlab-ci section describing how to map
            specs to runners
        print_summary (bool): Should we print a summary of all the jobs in
            the stages in which they were placed.
        output_file (str): File path where generated file should be written
        prune_dag (bool): If True, do not generate jobs for specs already
            exist built on the mirror.
        check_index_only (bool): If True, attempt to fetch the mirror index
            and only use that to determine whether built specs on the mirror
            this mode results in faster yaml generation time). Otherwise, also
            check each spec directly by url (useful if there is no index or it
            might be out of date).
        run_optimizer (bool): If True, post-process the generated yaml to try
            try to reduce the size (attempts to collect repeated configuration
            and replace with definitions).)
        use_dependencies (bool): If true, use "dependencies" rather than "needs"
            ("needs" allows DAG scheduling).  Useful if gitlab instance cannot
            be configured to handle more than a few "needs" per job.
        artifacts_root (str): Path where artifacts like logs, environment
            files (spack.yaml, spack.lock), etc should be written.  GitLab
            requires this to be within the project directory.
        remote_mirror_override (str): Typically only needed when one spack.yaml
            is used to populate several mirrors with binaries, based on some
            criteria.  Spack protected pipelines populate different mirrors based
            on branch name, facilitated by this option.
    """
    with spack.concretize.disable_compiler_existence_check():
        with env.write_transaction():
            env.concretize()
            env.write()

    yaml_root = ev.config_dict(env.yaml)

    if 'gitlab-ci' not in yaml_root:
        tty.die('Environment yaml does not have "gitlab-ci" section')

    gitlab_ci = yaml_root['gitlab-ci']

    build_group = None
    enable_cdash_reporting = False
    cdash_auth_token = None

    if 'cdash' in yaml_root:
        enable_cdash_reporting = True
        ci_cdash = yaml_root['cdash']
        build_group = ci_cdash['build-group']
        cdash_url = ci_cdash['url']
        cdash_project = ci_cdash['project']
        cdash_site = ci_cdash['site']

        if 'SPACK_CDASH_AUTH_TOKEN' in os.environ:
            tty.verbose("Using CDash auth token from environment")
            cdash_auth_token = os.environ.get('SPACK_CDASH_AUTH_TOKEN')

    prune_untouched_packages = os.environ.get('SPACK_PRUNE_UNTOUCHED', None)
    if prune_untouched_packages:
        # Requested to prune untouched packages, but assume we won't do that
        # unless we're actually in a git repo.
        prune_untouched_packages = False
        rev1, rev2 = get_change_revisions()
        tty.debug('Got following revisions: rev1={0}, rev2={1}'.format(rev1, rev2))
        if rev1 and rev2:
            # If the stack file itself did not change, proceed with pruning
            if not get_stack_changed(env.manifest_path, rev1, rev2):
                prune_untouched_packages = True
                affected_pkgs = compute_affected_packages(rev1, rev2)
                tty.debug('affected pkgs:')
                for p in affected_pkgs:
                    tty.debug('  {0}'.format(p))
                affected_specs = get_spec_filter_list(env, affected_pkgs)
                tty.debug('all affected specs:')
                for s in affected_specs:
                    tty.debug('  {0}'.format(s.name))

    # Downstream jobs will "need" (depend on, for both scheduling and
    # artifacts, which include spack.lock file) this pipeline generation
    # job by both name and pipeline id.  If those environment variables
    # do not exist, then maybe this is just running in a shell, in which
    # case, there is no expectation gitlab will ever run the generated
    # pipeline and those environment variables do not matter.
    generate_job_name = os.environ.get('CI_JOB_NAME', 'job-does-not-exist')
    parent_pipeline_id = os.environ.get('CI_PIPELINE_ID', 'pipeline-does-not-exist')

    # Values: "spack_pull_request", "spack_protected_branch", or not set
    spack_pipeline_type = os.environ.get('SPACK_PIPELINE_TYPE', None)

    spack_buildcache_copy = os.environ.get('SPACK_COPY_BUILDCACHE', None)

    if 'mirrors' not in yaml_root or len(yaml_root['mirrors'].values()) < 1:
        tty.die('spack ci generate requires an env containing a mirror')

    ci_mirrors = yaml_root['mirrors']
    mirror_urls = [url for url in ci_mirrors.values()]
    remote_mirror_url = mirror_urls[0]

    # Check for a list of "known broken" specs that we should not bother
    # trying to build.
    broken_specs_url = ''
    known_broken_specs_encountered = []
    if 'broken-specs-url' in gitlab_ci:
        broken_specs_url = gitlab_ci['broken-specs-url']

    enable_artifacts_buildcache = False
    if 'enable-artifacts-buildcache' in gitlab_ci:
        enable_artifacts_buildcache = gitlab_ci['enable-artifacts-buildcache']

    rebuild_index_enabled = True
    if 'rebuild-index' in gitlab_ci and gitlab_ci['rebuild-index'] is False:
        rebuild_index_enabled = False

    temp_storage_url_prefix = None
    if 'temporary-storage-url-prefix' in gitlab_ci:
        temp_storage_url_prefix = gitlab_ci['temporary-storage-url-prefix']

    bootstrap_specs = []
    phases = []
    if 'bootstrap' in gitlab_ci:
        for phase in gitlab_ci['bootstrap']:
            try:
                phase_name = phase.get('name')
                strip_compilers = phase.get('compiler-agnostic')
            except AttributeError:
                phase_name = phase
                strip_compilers = False
            phases.append({
                'name': phase_name,
                'strip-compilers': strip_compilers,
            })

            for bs in env.spec_lists[phase_name]:
                bootstrap_specs.append({
                    'spec': bs,
                    'phase-name': phase_name,
                    'strip-compilers': strip_compilers,
                })

    phases.append({
        'name': 'specs',
        'strip-compilers': False,
    })

    # If a remote mirror override (alternate buildcache destination) was
    # specified, add it here in case it has already built hashes we might
    # generate.
    mirrors_to_check = None
    if remote_mirror_override:
        if spack_pipeline_type == 'spack_protected_branch':
            # Overriding the main mirror in this case might result
            # in skipping jobs on a release pipeline because specs are
            # up to date in develop.  Eventually we want to notice and take
            # advantage of this by scheduling a job to copy the spec from
            # develop to the release, but until we have that, this makes
            # sure we schedule a rebuild job if the spec isn't already in
            # override mirror.
            mirrors_to_check = {
                'override': remote_mirror_override
            }
        else:
            spack.mirror.add(
                'ci_pr_mirror', remote_mirror_override, cfg.default_modify_scope())

    pipeline_artifacts_dir = artifacts_root
    if not pipeline_artifacts_dir:
        proj_dir = os.environ.get('CI_PROJECT_DIR', os.getcwd())
        pipeline_artifacts_dir = os.path.join(proj_dir, 'jobs_scratch_dir')

    pipeline_artifacts_dir = os.path.abspath(pipeline_artifacts_dir)
    concrete_env_dir = os.path.join(
        pipeline_artifacts_dir, 'concrete_environment')

    # Now that we've added the mirrors we know about, they should be properly
    # reflected in the environment manifest file, so copy that into the
    # concrete environment directory, along with the spack.lock file.
    if not os.path.exists(concrete_env_dir):
        os.makedirs(concrete_env_dir)
    shutil.copyfile(env.manifest_path,
                    os.path.join(concrete_env_dir, 'spack.yaml'))
    shutil.copyfile(env.lock_path,
                    os.path.join(concrete_env_dir, 'spack.lock'))

    job_log_dir = os.path.join(pipeline_artifacts_dir, 'logs')
    job_repro_dir = os.path.join(pipeline_artifacts_dir, 'reproduction')
    local_mirror_dir = os.path.join(pipeline_artifacts_dir, 'mirror')
    user_artifacts_dir = os.path.join(pipeline_artifacts_dir, 'user_data')

    # We communicate relative paths to the downstream jobs to avoid issues in
    # situations where the CI_PROJECT_DIR varies between the pipeline
    # generation job and the rebuild jobs.  This can happen when gitlab
    # checks out the project into a runner-specific directory, for example,
    # and different runners are picked for generate and rebuild jobs.
    ci_project_dir = os.environ.get('CI_PROJECT_DIR')
    rel_artifacts_root = os.path.relpath(
        pipeline_artifacts_dir, ci_project_dir)
    rel_concrete_env_dir = os.path.relpath(
        concrete_env_dir, ci_project_dir)
    rel_job_log_dir = os.path.relpath(
        job_log_dir, ci_project_dir)
    rel_job_repro_dir = os.path.relpath(
        job_repro_dir, ci_project_dir)
    rel_local_mirror_dir = os.path.relpath(
        local_mirror_dir, ci_project_dir)
    rel_user_artifacts_dir = os.path.relpath(
        user_artifacts_dir, ci_project_dir)

    # Speed up staging by first fetching binary indices from all mirrors
    # (including the per-PR mirror we may have just added above).
    try:
        bindist.binary_index.update()
    except bindist.FetchCacheError as e:
        tty.error(e)

    staged_phases = {}
    try:
        for phase in phases:
            phase_name = phase['name']
            if phase_name == 'specs':
                # Anything in the "specs" of the environment are already
                # concretized by the block at the top of this method, so we
                # only need to find the concrete versions, and then avoid
                # re-concretizing them needlessly later on.
                concrete_phase_specs = [
                    concrete for abstract, concrete in env.concretized_specs()
                    if abstract in env.spec_lists[phase_name]
                ]
            else:
                # Any specs lists in other definitions (but not in the
                # "specs") of the environment are not yet concretized so we
                # have to concretize them explicitly here.
                concrete_phase_specs = env.spec_lists[phase_name]
                with spack.concretize.disable_compiler_existence_check():
                    for phase_spec in concrete_phase_specs:
                        phase_spec.concretize()
            staged_phases[phase_name] = stage_spec_jobs(
                concrete_phase_specs,
                check_index_only=check_index_only,
                mirrors_to_check=mirrors_to_check)
    finally:
        # Clean up remote mirror override if enabled
        if remote_mirror_override:
            if spack_pipeline_type != 'spack_protected_branch':
                spack.mirror.remove('ci_pr_mirror', cfg.default_modify_scope())

    all_job_names = []
    output_object = {}
    job_id = 0
    stage_id = 0

    stage_names = []

    max_length_needs = 0
    max_needs_job = ''

    # If this is configured, spack will fail "spack ci generate" if it
    # generates any hash which exists under the broken specs url.
    broken_spec_urls = None
    if broken_specs_url:
        if broken_specs_url.startswith('http'):
            # To make checking each spec against the list faster, we require
            # a url protocol that allows us to iterate the url in advance.
            tty.msg('Cannot use an http(s) url for broken specs, ignoring')
        else:
            broken_spec_urls = web_util.list_url(broken_specs_url)

    before_script, after_script = None, None
    for phase in phases:
        phase_name = phase['name']
        strip_compilers = phase['strip-compilers']

        main_phase = _is_main_phase(phase_name)
        spec_labels, dependencies, stages = staged_phases[phase_name]

        for stage_jobs in stages:
            stage_name = 'stage-{0}'.format(stage_id)
            stage_names.append(stage_name)
            stage_id += 1

            for spec_label in stage_jobs:
                spec_record = spec_labels[spec_label]
                root_spec = spec_record['rootSpec']
                pkg_name = _pkg_name_from_spec_label(spec_label)
                release_spec = root_spec[pkg_name]
                release_spec_dag_hash = release_spec.dag_hash()

                if prune_untouched_packages:
                    if release_spec not in affected_specs:
                        tty.debug('Pruning {0}, untouched by change.'.format(
                            release_spec.name))
                        spec_record['needs_rebuild'] = False
                        continue

                runner_attribs = _find_matching_config(
                    release_spec, gitlab_ci)

                if not runner_attribs:
                    tty.warn('No match found for {0}, skipping it'.format(
                        release_spec))
                    continue

                tags = [tag for tag in runner_attribs['tags']]

                if spack_pipeline_type is not None:
                    # For spack pipelines "public" and "protected" are reserved tags
                    tags = _remove_reserved_tags(tags)
                    if spack_pipeline_type == 'spack_protected_branch':
                        tags.extend(['aws', 'protected'])
                    elif spack_pipeline_type == 'spack_pull_request':
                        tags.extend(['public'])

                variables = {}
                if 'variables' in runner_attribs:
                    variables.update(runner_attribs['variables'])

                image_name = None
                image_entry = None
                if 'image' in runner_attribs:
                    build_image = runner_attribs['image']
                    try:
                        image_name = build_image.get('name')
                        entrypoint = build_image.get('entrypoint')
                        image_entry = [p for p in entrypoint]
                    except AttributeError:
                        image_name = build_image

                job_script = ['spack env activate --without-view .']

                if artifacts_root:
                    job_script.insert(0, 'cd {0}'.format(concrete_env_dir))

                job_script.extend([
                    'spack ci rebuild'
                ])

                if 'script' in runner_attribs:
                    job_script = [s for s in runner_attribs['script']]

                before_script = None
                if 'before_script' in runner_attribs:
                    before_script = [
                        s for s in runner_attribs['before_script']
                    ]

                after_script = None
                if 'after_script' in runner_attribs:
                    after_script = [s for s in runner_attribs['after_script']]

                osname = str(release_spec.architecture)
                job_name = get_job_name(phase_name, strip_compilers,
                                        release_spec, osname, build_group)

                compiler_action = 'NONE'
                if len(phases) > 1:
                    compiler_action = 'FIND_ANY'
                    if _is_main_phase(phase_name):
                        compiler_action = 'INSTALL_MISSING'

                job_vars = {
                    'SPACK_ROOT_SPEC': _format_root_spec(
                        root_spec, main_phase, strip_compilers),
                    'SPACK_JOB_SPEC_DAG_HASH': release_spec_dag_hash,
                    'SPACK_JOB_SPEC_PKG_NAME': release_spec.name,
                    'SPACK_COMPILER_ACTION': compiler_action
                }

                job_dependencies = []
                if spec_label in dependencies:
                    if enable_artifacts_buildcache:
                        # Get dependencies transitively, so they're all
                        # available in the artifacts buildcache.
                        dep_jobs = [
                            d for d in release_spec.traverse(deptype=all,
                                                             root=False)
                        ]
                    else:
                        # In this case, "needs" is only used for scheduling
                        # purposes, so we only get the direct dependencies.
                        dep_jobs = []
                        for dep_label in dependencies[spec_label]:
                            dep_pkg = _pkg_name_from_spec_label(dep_label)
                            dep_root = spec_labels[dep_label]['rootSpec']
                            dep_jobs.append(dep_root[dep_pkg])

                    job_dependencies.extend(
                        _format_job_needs(phase_name, strip_compilers,
                                          dep_jobs, osname, build_group,
                                          prune_dag, spec_labels,
                                          enable_artifacts_buildcache))

                rebuild_spec = spec_record['needs_rebuild']

                # This next section helps gitlab make sure the right
                # bootstrapped compiler exists in the artifacts buildcache by
                # creating an artificial dependency between this spec and its
                # compiler.  So, if we are in the main phase, and if the
                # compiler we are supposed to use is listed in any of the
                # bootstrap spec lists, then we will add more dependencies to
                # the job (that compiler and maybe it's dependencies as well).
                if _is_main_phase(phase_name):
                    spec_arch_family = (release_spec.architecture
                                                    .target
                                                    .microarchitecture
                                                    .family)
                    compiler_pkg_spec = compilers.pkg_spec_for_compiler(
                        release_spec.compiler)
                    for bs in bootstrap_specs:
                        c_spec = bs['spec']
                        bs_arch = c_spec.architecture
                        bs_arch_family = (bs_arch.target
                                                 .microarchitecture
                                                 .family)
                        if (c_spec.satisfies(compiler_pkg_spec) and
                            bs_arch_family == spec_arch_family):
                            # We found the bootstrap compiler this release spec
                            # should be built with, so for DAG scheduling
                            # purposes, we will at least add the compiler spec
                            # to the jobs "needs".  But if artifact buildcache
                            # is enabled, we'll have to add all transtive deps
                            # of the compiler as well.

                            # Here we check whether the bootstrapped compiler
                            # needs to be rebuilt.  Until compilers are proper
                            # dependencies, we artificially force the spec to
                            # be rebuilt if the compiler targeted to build it
                            # needs to be rebuilt.
                            bs_specs, _, _ = staged_phases[bs['phase-name']]
                            c_spec_key = _spec_deps_key(c_spec)
                            rbld_comp = bs_specs[c_spec_key]['needs_rebuild']
                            rebuild_spec = rebuild_spec or rbld_comp
                            # Also update record so dependents do not fail to
                            # add this spec to their "needs"
                            spec_record['needs_rebuild'] = rebuild_spec

                            dep_jobs = [c_spec]
                            if enable_artifacts_buildcache:
                                dep_jobs = [
                                    d for d in c_spec.traverse(deptype=all)
                                ]

                            job_dependencies.extend(
                                _format_job_needs(bs['phase-name'],
                                                  bs['strip-compilers'],
                                                  dep_jobs,
                                                  str(bs_arch),
                                                  build_group,
                                                  prune_dag,
                                                  bs_specs,
                                                  enable_artifacts_buildcache))
                        else:
                            debug_msg = ''.join([
                                'Considered compiler {0} for spec ',
                                '{1}, but rejected it either because it was ',
                                'not the compiler required by the spec, or ',
                                'because the target arch families of the ',
                                'spec and the compiler did not match'
                            ]).format(c_spec, release_spec)
                            tty.debug(debug_msg)

                if prune_dag and not rebuild_spec:
                    tty.debug('Pruning {0}, does not need rebuild.'.format(
                        release_spec.name))
                    continue

                if (broken_spec_urls is not None and
                        release_spec_dag_hash in broken_spec_urls):
                    known_broken_specs_encountered.append('{0} ({1})'.format(
                        release_spec, release_spec_dag_hash))

                if artifacts_root:
                    job_dependencies.append({
                        'job': generate_job_name,
                        'pipeline': '{0}'.format(parent_pipeline_id)
                    })

                job_vars['SPACK_SPEC_NEEDS_REBUILD'] = str(rebuild_spec)

                if enable_cdash_reporting:
                    cdash_build_name = _get_cdash_build_name(
                        release_spec, build_group)
                    all_job_names.append(cdash_build_name)
                    job_vars['SPACK_CDASH_BUILD_NAME'] = cdash_build_name

                variables.update(job_vars)

                artifact_paths = [
                    rel_job_log_dir,
                    rel_job_repro_dir,
                    rel_user_artifacts_dir
                ]

                if enable_artifacts_buildcache:
                    bc_root = os.path.join(
                        local_mirror_dir, 'build_cache')
                    artifact_paths.extend([os.path.join(bc_root, p) for p in [
                        bindist.tarball_name(release_spec, '.spec.json'),
                        bindist.tarball_directory_name(release_spec),
                    ]])

                job_object = {
                    'stage': stage_name,
                    'variables': variables,
                    'script': job_script,
                    'tags': tags,
                    'artifacts': {
                        'paths': artifact_paths,
                        'when': 'always',
                    },
                    'needs': sorted(job_dependencies, key=lambda d: d['job']),
                    'retry': {
                        'max': 2,
                        'when': JOB_RETRY_CONDITIONS,
                    },
                    'interruptible': True
                }

                length_needs = len(job_dependencies)
                if length_needs > max_length_needs:
                    max_length_needs = length_needs
                    max_needs_job = job_name

                if before_script:
                    job_object['before_script'] = before_script

                if after_script:
                    job_object['after_script'] = after_script

                if image_name:
                    job_object['image'] = image_name
                    if image_entry is not None:
                        job_object['image'] = {
                            'name': image_name,
                            'entrypoint': image_entry,
                        }

                output_object[job_name] = job_object
                job_id += 1

    if print_summary:
        for phase in phases:
            phase_name = phase['name']
            tty.msg('Stages for phase "{0}"'.format(phase_name))
            phase_stages = staged_phases[phase_name]
            _print_staging_summary(*phase_stages)

    tty.debug('{0} build jobs generated in {1} stages'.format(
        job_id, stage_id))

    if job_id > 0:
        tty.debug('The max_needs_job is {0}, with {1} needs'.format(
            max_needs_job, max_length_needs))

    # Use "all_job_names" to populate the build group for this set
    if enable_cdash_reporting and cdash_auth_token:
        try:
            _populate_buildgroup(all_job_names, build_group, cdash_project,
                                 cdash_site, cdash_auth_token, cdash_url)
        except (SpackError, HTTPError, URLError) as err:
            tty.warn('Problem populating buildgroup: {0}'.format(err))
    else:
        tty.warn('Unable to populate buildgroup without CDash credentials')

    service_job_config = None
    if 'service-job-attributes' in gitlab_ci:
        service_job_config = gitlab_ci['service-job-attributes']

    default_attrs = [
        'image',
        'tags',
        'variables',
        'before_script',
        # 'script',
        'after_script',
    ]

    service_job_retries = {
        'max': 2,
        'when': [
            'runner_system_failure',
            'stuck_or_timeout_failure'
        ]
    }

    if job_id > 0:
        if temp_storage_url_prefix:
            # There were some rebuild jobs scheduled, so we will need to
            # schedule a job to clean up the temporary storage location
            # associated with this pipeline.
            stage_names.append('cleanup-temp-storage')
            cleanup_job = {}

            if service_job_config:
                _copy_attributes(default_attrs,
                                 service_job_config,
                                 cleanup_job)

            if 'tags' in cleanup_job:
                service_tags = _remove_reserved_tags(cleanup_job['tags'])
                cleanup_job['tags'] = service_tags

            cleanup_job['stage'] = 'cleanup-temp-storage'
            cleanup_job['script'] = [
                'spack -d mirror destroy --mirror-url {0}/$CI_PIPELINE_ID'.format(
                    temp_storage_url_prefix)
            ]
            cleanup_job['when'] = 'always'
            cleanup_job['retry'] = service_job_retries
            cleanup_job['interruptible'] = True

            output_object['cleanup'] = cleanup_job

        if ('signing-job-attributes' in gitlab_ci and
                spack_pipeline_type == 'spack_protected_branch'):
            # External signing: generate a job to check and sign binary pkgs
            stage_names.append('stage-sign-pkgs')
            signing_job_config = gitlab_ci['signing-job-attributes']
            signing_job = {}

            signing_job_attrs_to_copy = [
                'image',
                'tags',
                'variables',
                'before_script',
                'script',
                'after_script',
            ]

            _copy_attributes(signing_job_attrs_to_copy,
                             signing_job_config,
                             signing_job)

            signing_job_tags = []
            if 'tags' in signing_job:
                signing_job_tags = _remove_reserved_tags(signing_job['tags'])

            for tag in ['aws', 'protected', 'notary']:
                if tag not in signing_job_tags:
                    signing_job_tags.append(tag)
            signing_job['tags'] = signing_job_tags

            signing_job['stage'] = 'stage-sign-pkgs'
            signing_job['when'] = 'always'
            signing_job['retry'] = {
                'max': 2,
                'when': ['always']
            }
            signing_job['interruptible'] = True

            output_object['sign-pkgs'] = signing_job

        if spack_buildcache_copy:
            # Generate a job to copy the contents from wherever the builds are getting
            # pushed to the url specified in the "SPACK_BUILDCACHE_COPY" environment
            # variable.
            src_url = remote_mirror_override or remote_mirror_url
            dest_url = spack_buildcache_copy

            stage_names.append('stage-copy-buildcache')
            copy_job = {
                'stage': 'stage-copy-buildcache',
                'tags': ['spack', 'public', 'medium', 'aws', 'x86_64'],
                'image': 'ghcr.io/spack/python-aws-bash:0.0.1',
                'when': 'on_success',
                'interruptible': True,
                'retry': service_job_retries,
                'script': [
                    '. ./share/spack/setup-env.sh',
                    'spack --version',
                    'aws s3 sync --exclude *index.json* --exclude *pgp* {0} {1}'.format(
                        src_url, dest_url)
                ]
            }

            output_object['copy-mirror'] = copy_job

        if rebuild_index_enabled:
            # Add a final job to regenerate the index
            stage_names.append('stage-rebuild-index')
            final_job = {}

            if service_job_config:
                _copy_attributes(default_attrs,
                                 service_job_config,
                                 final_job)

            if 'tags' in final_job:
                service_tags = _remove_reserved_tags(final_job['tags'])
                final_job['tags'] = service_tags

            index_target_mirror = mirror_urls[0]
            if remote_mirror_override:
                index_target_mirror = remote_mirror_override

            final_job['stage'] = 'stage-rebuild-index'
            final_job['script'] = [
                'spack buildcache update-index --keys -d {0}'.format(
                    index_target_mirror)
            ]
            final_job['when'] = 'always'
            final_job['retry'] = service_job_retries
            final_job['interruptible'] = True

            output_object['rebuild-index'] = final_job

        output_object['stages'] = stage_names

        # Capture the version of spack used to generate the pipeline, transform it
        # into a value that can be passed to "git checkout", and save it in a
        # global yaml variable
        spack_version = spack.main.get_version()
        version_to_clone = None
        v_match = re.match(r"^\d+\.\d+\.\d+$", spack_version)
        if v_match:
            version_to_clone = 'v{0}'.format(v_match.group(0))
        else:
            v_match = re.match(r"^[^-]+-[^-]+-([a-f\d]+)$", spack_version)
            if v_match:
                version_to_clone = v_match.group(1)
            else:
                version_to_clone = spack_version

        output_object['variables'] = {
            'SPACK_ARTIFACTS_ROOT': rel_artifacts_root,
            'SPACK_CONCRETE_ENV_DIR': rel_concrete_env_dir,
            'SPACK_VERSION': spack_version,
            'SPACK_CHECKOUT_VERSION': version_to_clone,
            'SPACK_REMOTE_MIRROR_URL': remote_mirror_url,
            'SPACK_JOB_LOG_DIR': rel_job_log_dir,
            'SPACK_JOB_REPRO_DIR': rel_job_repro_dir,
            'SPACK_LOCAL_MIRROR_DIR': rel_local_mirror_dir,
            'SPACK_PIPELINE_TYPE': str(spack_pipeline_type)
        }

        if remote_mirror_override:
            (output_object['variables']
                          ['SPACK_REMOTE_MIRROR_OVERRIDE']) = remote_mirror_override

        spack_stack_name = os.environ.get('SPACK_CI_STACK_NAME', None)
        if spack_stack_name:
            output_object['variables']['SPACK_CI_STACK_NAME'] = spack_stack_name

        sorted_output = {}
        for output_key, output_value in sorted(output_object.items()):
            sorted_output[output_key] = output_value

        # TODO(opadron): remove this or refactor
        if run_optimizer:
            import spack.ci_optimization as ci_opt
            sorted_output = ci_opt.optimizer(sorted_output)

        # TODO(opadron): remove this or refactor
        if use_dependencies:
            import spack.ci_needs_workaround as cinw
            sorted_output = cinw.needs_to_dependencies(sorted_output)
    else:
        # No jobs were generated
        tty.debug('No specs to rebuild, generating no-op job')
        noop_job = {}

        if service_job_config:
            _copy_attributes(default_attrs,
                             service_job_config,
                             noop_job)

        if 'script' not in noop_job:
            noop_job['script'] = [
                'echo "All specs already up to date, nothing to rebuild."',
            ]

        noop_job['retry'] = service_job_retries

        sorted_output = {'no-specs-to-rebuild': noop_job}

    if known_broken_specs_encountered:
        error_msg = (
            'Pipeline generation failed due to the presence of the '
            'following specs that are known to be broken in develop:\n')
        for broken_spec in known_broken_specs_encountered:
            error_msg += '* {0}\n'.format(broken_spec)
        tty.die(error_msg)

    with open(output_file, 'w') as outf:
        outf.write(syaml.dump_config(sorted_output, default_flow_style=True))