assets/large_language_models/rag/components/create_promptflow/spec.yaml (54 lines of code) (raw):
$schema: https://azuremlschemas.azureedge.net/latest/commandComponent.schema.json
type: command
tags:
Preview: ""
version: 0.0.89
name: llm_rag_create_promptflow
display_name: LLM - Create Prompt Flow
is_deterministic: true
description: |
This component is used to create a RAG flow based on your mlindex data and best prompts. The flow will look into your indexed data and give answers based on your own data context. The flow also provides the capability to bulk test with any built-in or custom evaluation flows.
inputs:
best_prompts:
type: uri_file
optional: true
description: "JSON file containing prompt options to create variants from. Must either have single key of 'best_prompt' with a value of a list of prompt strings or have best prompts specific to certain metrics."
mlindex_asset_id:
type: uri_file
description: "Asset ID for MLIndex file that contains information about index to use for document lookup in promptflow"
mlindex_name:
type: string
optional: true
description: "Name of the MLIndex asset"
mlindex_asset_uri:
type: uri_folder
mode: ro_mount
description: Folder containing MLIndex to use in the generated flow.
llm_connection_name:
type: string
optional: true
description: "Workspace connection full name for completion or chat"
llm_config:
type: string
optional: true
description: "JSON describing the llm provider and model details to use for completion generation."
embedding_connection:
type: string
optional: true
description: "Workspace connection full name for embedding."
embeddings_model:
type: string
optional: true
description: "The model to use to embed data. E.g. 'hugging_face://model/sentence-transformers/all-mpnet-base-v2' or 'azure_open_ai://deployment/{deployment_name}/model/{model_name}'"
environment: azureml:llm-rag-embeddings@latest
code: '../src'
command: >-
python flow_creation.py
--mlindex_asset_id '${{inputs.mlindex_asset_id}}'
--mlindex_asset_uri '${{inputs.mlindex_asset_uri}}'
$[[--best_prompts '${{inputs.best_prompts}}']]
$[[--mlindex_name '${{inputs.mlindex_name}}']]
$[[--llm_connection_name '${{inputs.llm_connection_name}}']]
$[[--llm_config '${{inputs.llm_config}}']]
$[[--embedding_connection '${{inputs.embedding_connection}}']]
$[[--embeddings_model '${{inputs.embeddings_model}}']]