in src/patch_helper.py [0:0]
def conduct_autopatch_build_setup(pre_push_image_object: DockerImage, download_path: str):
"""
This method conducts the setup for the AutoPatch builds. It pulls the already released image and then triggers the autopatching
procedures on the image to get the packages that need to be modified. Thereafter, it modifies pre_push_image_object to make changes
to the original build process such that it starts to utilize miscellaneous_dockerfiles/Dockerfile.autopatch Dockerfile for building the image.
:param pre_push_image_object: Object of type DockerImage, The original DockerImage object that gets modified by this method.
:param download_path: str, Path of the file where the relevant scripts have alread been downloaded.
:return: str, Returns constants.SUCCESS to allow the multi-threaded caller to know that the method has succeeded.
"""
from test.test_utils import get_sha_of_an_image_from_ecr
info = pre_push_image_object.info
image_name = info.get("name")
latest_released_image_uri = info.get("release_image_uri")
run(f"docker pull {latest_released_image_uri}", hide=True)
first_image_sha = extract_first_image_sha_using_patching_info_contents_of_given_image(
image_uri=latest_released_image_uri
)
base_image_uri_for_patch_builds = latest_released_image_uri
if first_image_sha:
# In case the latest released image is an autopatched image first_image_sha will not be None
# In those cases, pull the first image using the SHA and use that as base
base_image_uri_for_patch_builds = pull_base_image_uri_for_patch_builds_and_get_the_tag(
latest_released_image_uri=latest_released_image_uri, first_image_sha=first_image_sha
)
assert verify_if_child_image_is_built_on_top_of_base_image(
base_image_uri=base_image_uri_for_patch_builds, child_image_uri=latest_released_image_uri
), f"Child image {latest_released_image_uri} is not built on {base_image_uri_for_patch_builds}"
ecr_client = boto3.client("ecr", region_name=os.getenv("REGION"))
latest_released_image_sha = get_sha_of_an_image_from_ecr(
ecr_client=ecr_client, image_uri=latest_released_image_uri
)
current_patch_details_path = os.path.join(
os.sep, download_path, base_image_uri_for_patch_builds.replace("/", "_").replace(":", "_")
)
if not os.path.exists(current_patch_details_path):
run(f"mkdir {current_patch_details_path}", hide=True)
complete_patching_info_dump_location = os.path.join(
os.sep,
get_cloned_folder_path(),
f"""{base_image_uri_for_patch_builds.replace("/", "_").replace(":", "_")}_patch-dump""",
)
if not os.path.exists(complete_patching_info_dump_location):
run(f"mkdir {complete_patching_info_dump_location}", hide=True)
extract_patching_relevant_data_from_latest_released_image(
image_uri=latest_released_image_uri,
extraction_location=complete_patching_info_dump_location,
)
THREADS = {}
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
get_dummy_boto_client()
THREADS[f"trigger_language_patching-{base_image_uri_for_patch_builds}"] = executor.submit(
trigger_language_patching,
image_uri=base_image_uri_for_patch_builds,
s3_downloaded_path=download_path,
python_version=info.get("python_version"),
)
THREADS[
f"trigger_enhanced_scan_patching-{base_image_uri_for_patch_builds}"
] = executor.submit(
trigger_enhanced_scan_patching,
image_uri=base_image_uri_for_patch_builds,
patch_details_path=current_patch_details_path,
python_version=info.get("python_version"),
)
FORMATTER.progress(THREADS)
run(
f"cp -r {current_patch_details_path}/. {complete_patching_info_dump_location}/patch-details-current"
)
pre_push_image_object.dockerfile = os.path.join(
os.sep, get_cloned_folder_path(), "miscellaneous_dockerfiles", "Dockerfile.autopatch"
)
miscellaneous_scripts_path = os.path.join(
os.sep, get_cloned_folder_path(), "miscellaneous_scripts"
)
pytorch_inference_artifacts_path = os.path.join(
os.sep,
get_cloned_folder_path(),
"pytorch",
"inference",
"docker",
"build_artifacts",
)
pytorch_training_artifacts_path = os.path.join(
os.sep,
get_cloned_folder_path(),
"pytorch",
"training",
"docker",
"build_artifacts",
)
torchserve_entrypoint_path = os.path.join(
pytorch_inference_artifacts_path,
"torchserve-entrypoint.py",
)
start_with_right_hostname_path = os.path.join(
pytorch_training_artifacts_path,
"start_with_right_hostname.sh",
)
pytorch_inference_start_cuda_compat_path = os.path.join(
pytorch_inference_artifacts_path,
"start_cuda_compat.sh",
)
pytorch_training_start_cuda_compat_path = os.path.join(
pytorch_training_artifacts_path,
"start_cuda_compat.sh",
)
verify_artifact_contents_for_patch_builds(
patching_info_folder_path=complete_patching_info_dump_location,
miscellaneous_scripts_path=miscellaneous_scripts_path,
)
pre_push_image_object.target = None
info["extra_build_args"].update({"BASE_IMAGE_FOR_PATCH_BUILD": base_image_uri_for_patch_builds})
info["extra_build_args"].update({"LATEST_RELEASED_IMAGE_SHA": latest_released_image_sha})
info["extra_build_args"].update({"LATEST_RELEASED_IMAGE_URI": latest_released_image_uri})
autopatch_artifacts = {
"miscellaneous_scripts": {
"source": miscellaneous_scripts_path,
"target": "miscellaneous_scripts",
},
"dockerfile": {
"source": pre_push_image_object.dockerfile,
"target": "Dockerfile",
},
"patching-info": {
"source": complete_patching_info_dump_location,
"target": "patching-info",
},
"new-torchserve-entrypoint": {
"source": torchserve_entrypoint_path,
"target": "new-torchserve-entrypoint",
},
"new_start_with_right_hostname": {
"source": start_with_right_hostname_path,
"target": "new_start_with_right_hostname",
},
"new_pytorch_inference_start_cuda_compat": {
"source": pytorch_inference_start_cuda_compat_path,
"target": "new_pytorch_inference_start_cuda_compat",
},
"new_pytorch_training_start_cuda_compat": {
"source": pytorch_training_start_cuda_compat_path,
"target": "new_pytorch_training_start_cuda_compat",
},
}
context = Context(
autopatch_artifacts,
f"build/{image_name}.tar.gz",
os.path.join(os.sep, get_cloned_folder_path(), "src"),
)
pre_push_image_object.info = info
pre_push_image_object.context = context
# add latest released image SHA as an additional tag
datetime_str = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
sha_after_colon = latest_released_image_sha.split(":")[1]
pre_push_image_object.additional_tags.insert(0, f"lastsha-{datetime_str}-{sha_after_colon}")
return constants.SUCCESS