artifact_downloader.py [182:243]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    log("Downloading " + artifact["name"] + " to: " + fname)
    if os.path.exists(fname):
        log("File already exists.")
        return fname

    tries = 0
    if not SECONDARYMETHOD:
        url_data = TC_PREFIX + "v1/task/" + task_id + "/artifacts/" + artifact["name"]
    else:
        url_data = TC_PREFIX2 + task_id + "/0/" + artifact["name"]

    while tries < RETRY:
        try:
            # Make the actual request
            request = requests.get(url_data, timeout=60, stream=True)

            # Open the output file and make sure we write in binary mode
            with open(fname, "wb") as fh:
                # Walk through the request response in chunks of 1024 * 1024 bytes, so 1MiB
                for chunk in request.iter_content(1024 * 1024):
                    # Write the chunk to the file
                    fh.write(chunk)
            break
        except Exception as e:
            log(
                "Failed to get data from %s: %s - %s"
                % (url_data, e.__class__.__name__, e)
            )
            if tries < RETRY:
                tries += 1
                log("Retrying %s more times..." % str(RETRY - tries))
            else:
                warning("No more retries. Failed to download %s" % url)
                FAILED.append(task_id)
                raise

    # urlretrieve(
    #     'https://queue.taskcluster.net/v1/task/' + task_id + '/artifacts/' + artifact['name'],
    #     fname
    # )
    return fname


def suite_name_from_task_name(name):
    psn = name.split("/")[-1]
    psn = "-".join(psn.split("-")[1:])
    return psn


def make_count_dir(a_path):
    os.makedirs(a_path, exist_ok=True)
    return a_path


def extract_tgz(tar_url, extract_path="."):
    import tarfile

    tar = tarfile.open(tar_url, "r")
    for item in tar:
        tar.extract(item, extract_path)
        if item.name.find(".tgz") != -1 or item.name.find(".tar") != -1:
            extract(item.name, "./" + item.name[: item.name.rfind("/")])
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



mozperftest_tools/mozperftest_tools/utils/artifact_downloader.py [163:224]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    log("Downloading " + artifact["name"] + " to: " + fname)
    if os.path.exists(fname):
        log("File already exists.")
        return fname

    tries = 0
    if not SECONDARYMETHOD:
        url_data = TC_PREFIX + "v1/task/" + task_id + "/artifacts/" + artifact["name"]
    else:
        url_data = TC_PREFIX2 + task_id + "/0/" + artifact["name"]

    while tries < RETRY:
        try:
            # Make the actual request
            request = requests.get(url_data, timeout=60, stream=True)

            # Open the output file and make sure we write in binary mode
            with open(fname, "wb") as fh:
                # Walk through the request response in chunks of 1024 * 1024 bytes, so 1MiB
                for chunk in request.iter_content(1024 * 1024):
                    # Write the chunk to the file
                    fh.write(chunk)
            break
        except Exception as e:
            log(
                "Failed to get data from %s: %s - %s"
                % (url_data, e.__class__.__name__, e)
            )
            if tries < RETRY:
                tries += 1
                log("Retrying %s more times..." % str(RETRY - tries))
            else:
                warning("No more retries. Failed to download %s" % url)
                FAILED.append(task_id)
                raise

    # urlretrieve(
    #     'https://queue.taskcluster.net/v1/task/' + task_id + '/artifacts/' + artifact['name'],
    #     fname
    # )
    return fname


def suite_name_from_task_name(name):
    psn = name.split("/")[-1]
    psn = "-".join(psn.split("-")[1:])
    return psn


def make_count_dir(a_path):
    os.makedirs(a_path, exist_ok=True)
    return a_path


def extract_tgz(tar_url, extract_path="."):
    import tarfile

    tar = tarfile.open(tar_url, "r")
    for item in tar:
        tar.extract(item, extract_path)
        if item.name.find(".tgz") != -1 or item.name.find(".tar") != -1:
            extract(item.name, "./" + item.name[: item.name.rfind("/")])
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



