artifact_downloader.py [23:107]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
try:
    from urllib.parse import urlencode
    from urllib.request import urlopen, urlretrieve
except ImportError:
    from urllib import urlencode, urlretrieve
    from urllib2 import urlopen

# Use this program to dowwnload, extract, and distribute artifact
# files that are to be used for the analyses.

# Use just the groupID, it absoutely needs to be given. With that, get the task details
# for the entire group, and find all the tests specified with the suite, chunk, and mode
# given through the parser arguments. For each of those tests, take the taskId
# and download the artifact data chunk. Continue suffixing them, however, store
# a json for a mapping from numbers to taskID's for future reference.

# The suite should include the flavor. It makes no sense to aggregate the data from
# multiple flavors together because they don't run the same tests. This is also
# why you cannot specify more than one suite and chunk.
def artifact_downloader_parser():
    parser = argparse.ArgumentParser(
        "This tool can download artifact data from a group of "
        + "taskcluster tasks. It then extracts the data, suffixes it with "
        + "a number and then stores it in an output directory."
    )
    parser.add_argument(
        "--task-group-id",
        type=str,
        nargs=1,
        help="The group of tasks that should be parsed to find all the necessary "
        + "data to be used in this analysis. ",
    )
    parser.add_argument(
        "--test-suites-list",
        type=str,
        nargs="+",
        help="The listt of tests to look at. e.g. mochitest-browser-chrome-e10s-2."
        + " If it`s empty we assume that it means nothing, if `all` is given all suites"
        + " will be processed.",
    )
    parser.add_argument(
        "--artifact-to-get",
        type=str,
        nargs="+",
        default=["grcov"],
        help="Pattern matcher for the artifact you want to download. By default, it"
        + " is set to `grcov` to get ccov artifacts. Use `per_test_coverage` to get data"
        + " from test-coverage tasks.",
    )
    parser.add_argument(
        "--unzip-artifact",
        action="store_true",
        default=False,
        help="Set to False if you don`t want the artifact to be extracted.",
    )
    parser.add_argument(
        "--platform",
        type=str,
        default="test-linux64-ccov",
        help="Platform to obtain data from.",
    )
    parser.add_argument(
        "--download-failures",
        action="store_true",
        default=False,
        help="Set this flag to download data from failed tasks.",
    )
    parser.add_argument(
        "--ingest-continue",
        action="store_true",
        default=False,
        help="Continues from the same run it was doing before.",
    )
    parser.add_argument(
        "--output",
        type=str,
        nargs=1,
        help="This is the directory where all the download, extracted, and suffixed "
        + "data will reside.",
    )
    return parser


# Used to limit the number of concurrent data requests
START_TIME = time.time()
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



variance-analysis/artifactdownloader/artifact_downloader.py [11:95]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
try:
    from urllib.parse import urlencode
    from urllib.request import urlopen, urlretrieve
except ImportError:
    from urllib import urlencode, urlretrieve
    from urllib2 import urlopen

# Use this program to dowwnload, extract, and distribute artifact
# files that are to be used for the analyses.

# Use just the groupID, it absoutely needs to be given. With that, get the task details
# for the entire group, and find all the tests specified with the suite, chunk, and mode
# given through the parser arguments. For each of those tests, take the taskId
# and download the artifact data chunk. Continue suffixing them, however, store
# a json for a mapping from numbers to taskID's for future reference.

# The suite should include the flavor. It makes no sense to aggregate the data from
# multiple flavors together because they don't run the same tests. This is also
# why you cannot specify more than one suite and chunk.
def artifact_downloader_parser():
    parser = argparse.ArgumentParser(
        "This tool can download artifact data from a group of "
        + "taskcluster tasks. It then extracts the data, suffixes it with "
        + "a number and then stores it in an output directory."
    )
    parser.add_argument(
        "--task-group-id",
        type=str,
        nargs=1,
        help="The group of tasks that should be parsed to find all the necessary "
        + "data to be used in this analysis. ",
    )
    parser.add_argument(
        "--test-suites-list",
        type=str,
        nargs="+",
        help="The listt of tests to look at. e.g. mochitest-browser-chrome-e10s-2."
        + " If it`s empty we assume that it means nothing, if `all` is given all suites"
        + " will be processed.",
    )
    parser.add_argument(
        "--artifact-to-get",
        type=str,
        nargs="+",
        default=["grcov"],
        help="Pattern matcher for the artifact you want to download. By default, it"
        + " is set to `grcov` to get ccov artifacts. Use `per_test_coverage` to get data"
        + " from test-coverage tasks.",
    )
    parser.add_argument(
        "--unzip-artifact",
        action="store_true",
        default=False,
        help="Set to False if you don`t want the artifact to be extracted.",
    )
    parser.add_argument(
        "--platform",
        type=str,
        default="test-linux64-ccov",
        help="Platform to obtain data from.",
    )
    parser.add_argument(
        "--download-failures",
        action="store_true",
        default=False,
        help="Set this flag to download data from failed tasks.",
    )
    parser.add_argument(
        "--ingest-continue",
        action="store_true",
        default=False,
        help="Continues from the same run it was doing before.",
    )
    parser.add_argument(
        "--output",
        type=str,
        nargs=1,
        help="This is the directory where all the download, extracted, and suffixed "
        + "data will reside.",
    )
    return parser


# Used to limit the number of concurrent data requests
START_TIME = time.time()
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



