in lib/validation.py [0:0]
def _run_schema():
import voluptuous
return {
"run_id": str,
# A globally-unique ID for the run.
"project": str,
# A name for the project that this run is part of. This name is used by
# the HTML report generator and can be used to group related sets of
# runs, but is otherwise not used by litani.
"stages": [str],
# The CI stages that each job can be a member of. Stage names can
# be provided through the --stages flag of *litani-init(1)*. Default
# stages "build", "test" and "report" are used if the flag is not used.
"pools": {voluptuous.Optional(str): int},
# A mapping from pool names to the depth of the pool. Jobs can be a
# member of zero or one pool. The depth of a pool that a set of jobs
# belong to limits the number of those jobs that litani will run in
# parallel.
"start_time": _time_str,
# The time at which the run started.
"version": str,
# The version string of the Litani binary that ran this run.
"version_major": int,
# Litani's major version number.
"version_minor": int,
# Litani's minor version number.
"version_patch": int,
# Litani's patch version number.
"release_candidate": bool,
# false if this version of Litani is a tagged release.
voluptuous.Optional("end_time"): _time_str,
# The time at which the run ended. This key will only exist if *status*
# is not equal to "in_progress".
"status": _status(),
# The state of this run, see the status schema below.
"aux": dict,
# A free-form dict that users can add custom information into. There are
# no constraints on the format of this dict, but it is recommended that
# users add their information to a sub-dict with a key that indicates
# its function. For example, to add information pertaining to a CI run,
# users might add a key called "continuous_integration_data" whose value
# is a sub-dict containing all required fields.
"parallelism": voluptuous.Any({
# This dict contains information about the parallelism level of the jobs
# that litani runs. This is to measure whether the run is using as many
# processor cores as possible over the duration of the run.
voluptuous.Optional("trace"): [{
# A list of samples of the run's concurrency level.
"time": _ms_time_str,
# The time at which the sample was taken.
"finished": int,
# How many jobs have finished
"running": int,
# How many jobs are running
"total": int,
# The total number of jobs
}],
voluptuous.Optional("max_parallelism"): int,
# The maximum parallelism attained over the run
voluptuous.Optional("n_proc"): voluptuous.Any(None, int),
# The number of processors detected on this machine
}),
"pipelines": [{
# Each pipeline contains ci_stages which contain jobs.
"url": str,
"name": str,
# The pipeline name. The set of pipeline names are all the names
# passed to the --pipeline-name flag of *litani-add-job(1)*.
"status": _status(),
# The pipeline's state, see the status schema below.
"ci_stages": [{
# Each ci_stage contains a list of jobs.
"url": str,
"complete": bool,
# Whether all the jobs in this stage are complete.
"name": str,
# The stage's name. This is any of the *stages* of
# the project.
"status": _outcome(),
# The stage's state, see the outcome schema below.
"progress": voluptuous.All(int, voluptuous.Range(min=0, max=100)),
"jobs": [voluptuous.Any({
# The list of all the jobs in this ci_stage in this pipeline.
# There are three different forms the value of this key can
# take.
"complete": False,
# If *complete* is false and no *start_time* key exists,
# then this job has not yet started.
"duration_str": None,
"wrapper_arguments": _single_job_schema(),
# The arguments passed to this job, see the
# single_job_schema schema below.
}, {
"complete": False,
# If *complete* is false but the *start_time* key exists,
# then the job has started running but has not yet finished.
"start_time": _time_str,
# The time at which the job started running.
"duration_str": None,
"wrapper_arguments": _single_job_schema(),
# The arguments passed to this job, see the
# single_job_schema schema below.
}, {
"duration": int,
# How long the job ran for.
"complete": True,
# If *complete* is true, then the job has terminated.
"outcome": _outcome(),
# The job's outcome, see the outcome schema below.
"end_time": _time_str,
# The time at which the job completed.
"start_time": _time_str,
# The time at which the job started running.
"timeout_reached": bool,
# Whether the job reached its timeout limit.
"command_return_code": int,
# The command's return code.
"wrapper_return_code": int,
"stderr": voluptuous.Any([str], None),
# A list of strings that the command printed to its stderr.
"stdout": voluptuous.Any([str], None),
# A list of strings that the command printed to its stdout.
"duration_str": voluptuous.Any(str, None),
# A human-readable duration of this job (HH:MM:SS).
"wrapper_arguments": _single_job_schema(),
# The arguments passed to this job, see the
# single_job_schema schema below.
"loaded_outcome_dict": voluptuous.Any(dict, None),
# If *wrapper_arguments["outcome_table"]* is not null, the
# value of this key will be the deserialized data loaded
# from the outcome table file.
"memory_trace": {
# If *profile_memory* was set to true in the wrapper
# arguments for this job, this dict will contain samples of
# the command's memory usage.
voluptuous.Optional("peak"): {
# The command's peak memory usage.
"rss": int,
# Peak resident set
"vsz": int,
# Peak virtual memory size
"human_readable_rss": str,
# Peak resident set
"human_readable_vsz": str,
# Peak virtual memory size
},
voluptuous.Optional("trace"): [{
# A list of samples of memory usage.
"rss": int,
# Resident set
"vsz": int,
# Virtual memory
"time": _time_str,
# The time at which the sample was taken
}],
},
})],
}],
}],
"latest_symlink": voluptuous.Any(str, None),
# The symbolic link to the report advertised to users
}