src/cloud/pipelines/image_classification/pipeline.py [59:87]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ):
    """Gets a SageMaker ML Pipeline instance working with on DefectDetection data.

    Args:
        region: AWS region to create and run the pipeline.
        role: IAM role to create and run steps and pipeline.
        default_bucket: the bucket to use for storing the artifacts

    Returns:
        an instance of a pipeline
    """
    sagemaker_session = get_session(region, default_bucket)
    if role is None:
        role = sagemaker.session.get_execution_role(sagemaker_session)

    ## By enabling cache, if you run this pipeline again, without changing the input 
    ## parameters it will skip the training part and reuse the previous trained model
    cache_config = CacheConfig(enable_caching=True, expire_after="30d")
    ts = time.strftime('%Y-%m-%d-%H-%M-%S')

    # Data prep
    processing_instance_type = ParameterString( # instance type for data preparation
        name="ProcessingInstanceType",
        default_value="ml.m5.xlarge"
    )
    processing_instance_count = ParameterInteger( # number of instances used for data preparation
        name="ProcessingInstanceCount",
        default_value=1
    )
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



src/cloud/pipelines/semantic_segmentation/pipeline.py [49:77]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ):
    """Gets a SageMaker ML Pipeline instance working with on DefectDetection data.

    Args:
        region: AWS region to create and run the pipeline.
        role: IAM role to create and run steps and pipeline.
        default_bucket: the bucket to use for storing the artifacts

    Returns:
        an instance of a pipeline
    """
    sagemaker_session = get_session(region, default_bucket)
    if role is None:
        role = sagemaker.session.get_execution_role(sagemaker_session)

    ## By enabling cache, if you run this pipeline again, without changing the input 
    ## parameters it will skip the training part and reuse the previous trained model
    cache_config = CacheConfig(enable_caching=True, expire_after="30d")
    ts = time.strftime('%Y-%m-%d-%H-%M-%S')

    # Data prep
    processing_instance_type = ParameterString( # instance type for data preparation
        name="ProcessingInstanceType",
        default_value="ml.m5.xlarge"
    )
    processing_instance_count = ParameterInteger( # number of instances used for data preparation
        name="ProcessingInstanceCount",
        default_value=1
    )
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



