in Tools/WinMLRunner/src/CommandLineArgs.cpp [12:89]
void CommandLineArgs::PrintUsage()
{
#ifdef USE_WINML_NUGET
std::cout << "MicrosoftML Runner" << std::endl;
#else
std::cout << "WinML Runner" << std::endl;
#endif
std::cout << " ---------------------------------------------------------------" << std::endl;
#ifdef USE_WINML_NUGET
std::cout << "MicrosoftMLRunner.exe <-model | -folder> <fully qualified path> [options]" << std::endl;
#else
std::cout << "WinMLRunner.exe <-model | -folder> <fully qualified path> [options]" << std::endl;
#endif
std::cout << std::endl;
std::cout << "options: " << std::endl;
#ifdef USE_WINML_NUGET
std::cout << " -version: prints the version information for this build of MicrosoftMLRunner.exe" << std::endl;
#else
std::cout << " -version: prints the version information for this build of WinMLRunner.exe" << std::endl;
#endif
std::cout << " -CPU : run model on default CPU" << std::endl;
std::cout << " -GPU : run model on default GPU" << std::endl;
std::cout << " -GPUHighPerformance : run model on GPU with highest performance" << std::endl;
std::cout << " -GPUMinPower : run model on GPU with the least power" << std::endl;
#ifdef DXCORE_SUPPORTED_BUILD
std::cout << " -GPUAdapterName <adapter name substring>: run model on GPU specified by its name. NOTE: Please "
"only use this flag on DXCore supported machines."
<< std::endl;
#endif
std::cout << " -CreateDeviceOnClient : create the D3D device on the client and pass it to WinML to create session"
<< std::endl;
std::cout << " -CreateDeviceInWinML : create the device inside WinML" << std::endl;
std::cout << " -CPUBoundInput : bind the input to the CPU" << std::endl;
std::cout << " -GPUBoundInput : bind the input to the GPU" << std::endl;
std::cout << " -RGB : load the input as an RGB image" << std::endl;
std::cout << " -BGR : load the input as a BGR image" << std::endl;
std::cout << " -Tensor [function] : load the input as a tensor, with optional function for input preprocessing"
<< std::endl;
std::cout << " Optional function arguments:" << std::endl;
std::cout << " Identity(default) : No input transformations will be performed." << std::endl;
std::cout << " Normalize <scale> <means> <stddevs> : float scale factor and comma separated per channel "
"means and stddev for normalization."
<< std::endl;
std::cout << " -Perf [all]: capture performance measurements such as timing and memory usage. Specifying \"all\" "
"will output all measurements"
<< std::endl;
std::cout << " -Iterations : # times perf measurements will be run/averaged. (maximum: 1024 times)" << std::endl;
std::cout << " -Input <path to input file>: binds image or CSV to model" << std::endl;
std::cout << " -InputImageFolder <path to directory of images> : specify folder of images to bind to model"
<< std::endl;
std::cout << " -TopK <number> : print top <number> values in the result. Default to 1" << std::endl;
std::cout << " -GarbageDataMaxValue <number> : limit garbage data range to a max random value" << std::endl;
std::cout << " -BaseOutputPath [<fully qualified path>] : base output directory path for results, default to cwd"
<< std::endl;
std::cout << " -PerfOutput [<path>] : fully qualified or relative path including csv filename for perf results"
<< std::endl;
std::cout << " -SavePerIterationPerf : save per iteration performance results to csv file" << std::endl;
std::cout << " -PerIterationPath <directory_path> : Relative or fully qualified path for per iteration and save "
"tensor output results. If not specified a default(timestamped) folder will be created."
<< std::endl;
std::cout << " -SaveTensorData <saveMode>: saveMode: save first iteration or all iteration output "
"tensor results to csv file [First, All]"
<< std::endl;
std::cout << " -DebugEvaluate: Print evaluation debug output to debug console if debugger is present."
<< std::endl;
std::cout << " -Terse: Terse Mode (suppresses repetitive console output)" << std::endl;
std::cout << " -AutoScale <interpolationMode> : Enable image autoscaling and set the interpolation mode [Nearest, "
"Linear, Cubic, Fant]"
<< std::endl;
std::cout << std::endl;
std::cout << "Concurrency Options:" << std::endl;
std::cout << " -ConcurrentLoad: load models concurrently" << std::endl;
std::cout << " -NumThreads <number>: number of threads to load a model. By default this will be the number of "
"model files to be executed"
<< std::endl;
std::cout << " -ThreadInterval <milliseconds>: interval time between two thread creations in milliseconds"
<< std::endl;
}