def cli_argparser()

in sagemaker_run_notebook/cli.py [0:0]


def cli_argparser():
    region = boto3.session.Session().region_name
    default_execution_role = f"BasicExecuteNotebookRole-{region}"
    container_build_role = f"ExecuteNotebookCodeBuildRole-{region}"

    parser = argparse.ArgumentParser(
        description="A command line interface for running and scheduling notebooks in SageMaker"
    )
    parser.add_argument(
        "-v", "--version", action="store_true", help="Display current version and exit"
    )
    subparsers = parser.add_subparsers(dest="subcommand")

    run_parser = subparsers.add_parser("run", help="Run a notebook now")
    run_parser.add_argument(
        "notebook", help="The name of the notebook to run (local file or an s3 URL)"
    )
    run_parser.add_argument(
        "-p",
        action="append",
        nargs=1,
        help="Specify a parameter like -p x=7. Can be repeated.",
    )
    run_parser.add_argument(
        "--output-prefix",
        help="Where in S3 to put the output (default: SageMaker Python SDK bucket)",
    )
    run_parser.add_argument(
        "--role",
        help=f"The IAM role to use when running the notebook (default: {default_execution_role})",
        default=default_execution_role,
    )
    run_parser.add_argument(
        "--instance",
        help="The EC2 instance type to use to run the notebook (default: ml.m5.large)",
        default="ml.m5.large",
    )
    run_parser.add_argument(
        "--image",
        help="The Docker image in ECR to use to run the notebook (default: notebook-runner)",
        default="notebook-runner",
    )
    run_parser.add_argument(
        "--extra",
        help="Extra arguments to pass to SageMaker processing formatted as JSON (use @filename to read JSON from a file) (default: None)",
    )
    run_parser.add_argument(
        "--emr",
        help="The name of an EMR cluster to connect to for SparkMagic (default: None)",
    )
    run_parser.add_argument(
        "--output-dir",
        help="The directory to download the notebook to (default: .)",
        default=".",
    )
    run_parser.add_argument(
        "--no-wait",
        help="Launch the notebook run but don't wait for it to complete",
        action="store_true",
    )
    run_parser.set_defaults(func=run_notebook)

    download_parser = subparsers.add_parser(
        "download", help="Download the output of a notebook execution"
    )
    download_parser.add_argument(
        "run_name", metavar="run-name", help="The name of the notebook execution run"
    )
    download_parser.add_argument(
        "--output-dir",
        help="The directory to download the notebook to (default: .)",
        default=".",
    )
    download_parser.add_argument(
        "--wait",
        help="Wait for the job to complete before downloading",
        action="store_true",
    )
    download_parser.set_defaults(func=download_notebook)

    stoprun_parser = subparsers.add_parser(
        "stop-run",
        help="Stop the specified notebook execution run without waiting for it to complete",
    )
    stoprun_parser.add_argument(
        "run_name", metavar="run-name", help="The name of the notebook execution run"
    )
    stoprun_parser.set_defaults(func=stop_run)

    listrun_parser = subparsers.add_parser("list-runs", help="List notebook runs")
    listrun_parser.add_argument(
        "--rule", help="List only runs started by the specified schedule rule"
    )
    listrun_parser.add_argument(
        "--notebook", help="List only runs of the specified notebook"
    )
    listrun_parser.add_argument(
        "--max", help="Maximum number of runs to show", type=int, default=9999999
    )
    listrun_parser.set_defaults(func=list_runs)

    schedule_parser = subparsers.add_parser(
        "schedule",
        help="Create a rule to run a notebook on a schedule or in response to an event",
    )
    schedule_parser.add_argument(
        "notebook", help="The name of the notebook to run (local file or an s3 URL)"
    )
    schedule_parser.add_argument(
        "--name", help="The name of the rule to create.", required=True
    )
    schedule_parser.add_argument(
        "-p",
        action="append",
        nargs=1,
        help="Specify a parameter like -p x=7. Can be repeated.",
    )
    schedule_parser.add_argument(
        "--output-prefix",
        help="Where in S3 to put the output (default: SageMaker Python SDK bucket)",
    )
    schedule_parser.add_argument(
        "--role",
        help=f"The IAM role to use when running the notebook (default: {default_execution_role})",
        default=default_execution_role,
    )
    schedule_parser.add_argument(
        "--instance",
        help="The EC2 instance type to use to run the notebook (default: ml.m5.large)",
        default="ml.m5.large",
    )
    schedule_parser.add_argument(
        "--image",
        help="The Docker image in ECR to use to run the notebook (default: notebook-runner)",
        default="notebook-runner",
    )
    schedule_parser.add_argument(
        "--emr",
        help="The name of an EMR cluster to connect to for SparkMagic (default: None)",
    )
    schedule_parser.add_argument(
        "--extra",
        help="Extra arguments to pass to SageMaker processing formatted as JSON (use @filename to read JSON from a file) (default: None)",
    )
    schedule_parser.add_argument(
        "--at",
        help="When to run the notebook (see https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html for syntax)",
    )
    schedule_parser.add_argument(
        "--event", help="Event that will trigger the notebook run"
    )
    schedule_parser.set_defaults(func=schedule)

    unschedule_parser = subparsers.add_parser(
        "unschedule", help="Delete the specified schedule rule"
    )
    unschedule_parser.add_argument(
        "rule_name", metavar="rule-name", help="The name of the rule to delete"
    )
    unschedule_parser.set_defaults(func=unschedule)

    listrules_parser = subparsers.add_parser("list-rules", help="List schedule rules")
    listrules_parser.add_argument(
        "--prefix", help="List only rules where the rule name has the specified prefix"
    )
    listrules_parser.add_argument(
        "--notebook", help="List only rules with the specified notebook"
    )
    listrules_parser.add_argument(
        "--max", help="Maximum number of rules to show", type=int, default=9999999
    )
    listrules_parser.set_defaults(func=list_rules)

    local_parser = subparsers.add_parser(
        "local", help="Run a notebook locally using Docker"
    )
    local_parser.add_argument("notebook", help="The name of the notebook file to run")
    local_parser.add_argument(
        "-p",
        action="append",
        nargs=1,
        help="Specify a parameter like -p x=7. Can be repeated.",
    )
    local_parser.add_argument(
        "--image",
        help="The Docker image in ECR to use to run the notebook (default: notebook-runner)",
        default="notebook-runner",
    )
    local_parser.add_argument(
        "--output-dir",
        help="The directory to output the notebook to (default: .)",
        default=".",
    )
    local_parser.add_argument(
        "--no-wait",
        help="Launch the notebook run but don't wait for it to complete",
        action="store_true",
    )
    local_parser.set_defaults(func=local_notebook)

    createinfra_parser = subparsers.add_parser(
        "create-infrastructure",
        help="Use CloudFormation to set up the required Lambda function and IAM roles and policies",
    )
    createinfra_parser.add_argument(
        "--update",
        help="Add this flag to update an existing stack",
        action="store_true",
    )
    createinfra_parser.set_defaults(func=create_infrastructure)

    container_parser = subparsers.add_parser(
        "create-container",
        help="Use CodeBuild to build a Docker image for notebook execution",
    )
    container_parser.add_argument(
        "repository",
        help="The ECR repository for the image (default: notebook-runner)",
        nargs="?",
        default="notebook-runner",
    )
    container_parser.add_argument(
        "--base",
        help=f"The Docker image to base the new image on (default: {container_build.default_base})",
        default=container_build.default_base,
    )
    container_parser.add_argument(
        "--requirements",
        help="A requirements.txt file to define custom dependencies for the container",
    )
    container_parser.add_argument(
        "--script",
        help="A shell script to run while building the container (after any requirements are installed)",
    )
    container_parser.add_argument(
        "-k",
        "--kernel",
        help="The name of the kernel to use to run the notebook (default: first Python kernel)",
    )
    container_parser.add_argument(
        "--role",
        help=f"The IAM role for CodeBuild to use (default: {container_build_role}).",
        default=container_build_role,
    )
    container_parser.add_argument(
        "--bucket",
        help="The S3 bucket to use for sending data to CodeBuild (if None, use the SageMaker SDK default bucket).",
    )
    container_parser.add_argument(
        "--no-logs",
        action="store_true",
        help="Don't show the logs of the running CodeBuild build",
    )

    container_parser.set_defaults(func=create_container)

    return parser