in ml_service/pipelines/run_parallel_batchscore_pipeline.py [0:0]
def run_batchscore_pipeline():
try:
env = Env()
args = parse_args()
aml_workspace = Workspace.get(
name=env.workspace_name,
subscription_id=env.subscription_id,
resource_group=env.resource_group,
)
scoringpipeline = get_pipeline(args.pipeline_id, aml_workspace, env)
experiment = Experiment(workspace=aml_workspace, name=env.experiment_name) # NOQA: E501
run = experiment.submit(
scoringpipeline,
pipeline_parameters={
"model_name": env.model_name,
"model_version": env.model_version,
"model_tag_name": " ",
"model_tag_value": " ",
},
)
run.wait_for_completion(show_output=True)
if run.get_status() == "Finished":
copy_output(list(run.get_steps())[0].id, env)
except Exception as ex:
print("Error: {}".format(ex))