def main()

in tools/ci_build/build.py [0:0]


def main():
    log.debug("Command line arguments:\n  {}".format(" ".join(shlex.quote(arg) for arg in sys.argv[1:])))

    args = parse_arguments()
    cmake_extra_defines = (args.cmake_extra_defines
                           if args.cmake_extra_defines else [])
    cross_compiling = args.arm or args.arm64 or args.arm64ec or args.android

    # If there was no explicit argument saying what to do, default
    # to update, build and test (for native builds).
    if not (args.update or args.clean or args.build or args.test):
        log.debug("Defaulting to running update, build [and test for native builds].")
        args.update = True
        args.build = True
        if cross_compiling:
            args.test = args.android_abi == 'x86_64' or args.android_abi == 'arm64-v8a'
        else:
            args.test = True

    if args.skip_tests:
        args.test = False

    if args.use_tensorrt:
        args.use_cuda = True

    if args.use_migraphx:
        args.use_rocm = True

    if args.build_wheel or args.gen_doc:
        args.enable_pybind = True

    if args.build_csharp or args.build_nuget or args.build_java or args.build_nodejs:
        args.build_shared_lib = True

    if args.build_nuget and cross_compiling:
        raise BuildError('Currently nuget package creation is not supported while cross-compiling')

    if args.enable_pybind and args.disable_rtti:
        raise BuildError("Python bindings use typeid so you can't disable RTTI")

    if args.enable_pybind and args.disable_exceptions:
        raise BuildError('Python bindings require exceptions to be enabled.')

    if args.nnapi_min_api:
        if not args.use_nnapi:
            raise BuildError("Using --nnapi_min_api requires --use_nnapi")
        if args.nnapi_min_api < 27:
            raise BuildError("--nnapi_min_api should be 27+")

    if args.build_wasm_static_lib:
        args.build_wasm = True

    if args.build_wasm:
        if not args.disable_wasm_exception_catching and args.disable_exceptions:
            # When '--disable_exceptions' is set, we set '--disable_wasm_exception_catching' as well
            args.disable_wasm_exception_catching = True
        if args.test and args.disable_wasm_exception_catching and not args.minimal_build:
            raise BuildError("WebAssembly tests need exception catching enabled to run if it's not minimal build")
        if args.test and args.enable_wasm_debug_info:
            # With flag --enable_wasm_debug_info, onnxruntime_test_all.wasm will be very huge (>1GB). This will fail
            # Node.js when trying to load the .wasm file.
            # To debug ONNX Runtime WebAssembly, use ONNX Runtime Web to debug ort-wasm.wasm in browsers.
            raise BuildError("WebAssembly tests cannot be enabled with flag --enable_wasm_debug_info")

    if args.code_coverage and not args.android:
        raise BuildError("Using --code_coverage requires --android")

    if args.gen_api_doc and len(args.config) != 1:
        raise BuildError('Using --get-api-doc requires a single build config')

    # Disabling unit tests for VAD-F as FPGA only supports
    # models with NCHW layout
    if args.use_openvino == "VAD-F_FP32":
        args.test = False

    # Disabling unit tests for GPU and MYRIAD on nuget creation
    if args.use_openvino != "CPU_FP32" and args.build_nuget:
        args.test = False

    configs = set(args.config)

    # setup paths and directories
    # cmake_path and ctest_path can be None. For example, if a person only wants to run the tests, he/she doesn't need
    # to have cmake/ctest.
    cmake_path = resolve_executable_path(args.cmake_path)
    ctest_path = None if args.use_vstest else resolve_executable_path(
        args.ctest_path)
    build_dir = args.build_dir
    script_dir = os.path.realpath(os.path.dirname(__file__))
    source_dir = os.path.normpath(os.path.join(script_dir, "..", ".."))

    # if using cuda, setup cuda paths and env vars
    cuda_home, cudnn_home = setup_cuda_vars(args)

    mpi_home = args.mpi_home
    nccl_home = args.nccl_home

    acl_home = args.acl_home
    acl_libs = args.acl_libs

    armnn_home = args.armnn_home
    armnn_libs = args.armnn_libs

    # if using tensorrt, setup tensorrt paths
    tensorrt_home = setup_tensorrt_vars(args)

    # if using migraphx, setup migraphx paths
    migraphx_home = setup_migraphx_vars(args)

    # if using rocm, setup rocm paths
    rocm_home = setup_rocm_build(args, configs)

    if args.update or args.build:
        for config in configs:
            os.makedirs(get_config_build_dir(build_dir, config), exist_ok=True)

    log.info("Build started")

    if args.update:
        if is_reduced_ops_build(args):
            from reduce_op_kernels import reduce_ops
            for config in configs:
                reduce_ops(
                    config_path=args.include_ops_by_config,
                    build_dir=get_config_build_dir(build_dir, config),
                    enable_type_reduction=args.enable_reduced_operator_type_support,
                    use_cuda=args.use_cuda)

        cmake_extra_args = []
        path_to_protoc_exe = args.path_to_protoc_exe
        if not args.skip_submodule_sync:
            update_submodules(source_dir)
        if is_windows():
            cpu_arch = platform.architecture()[0]
            if args.build_wasm:
                cmake_extra_args = ['-G', 'Ninja']
            elif args.cmake_generator == 'Ninja':
                if cpu_arch == '32bit' or args.arm or args.arm64 or args.arm64ec:
                    raise BuildError(
                        "To cross-compile with Ninja, load the toolset "
                        "environment for the target processor (e.g. Cross "
                        "Tools Command Prompt for VS)")
                cmake_extra_args = ['-G', args.cmake_generator]
            elif args.arm or args.arm64 or args.arm64ec:
                # Cross-compiling for ARM(64) architecture
                # First build protoc for host to use during cross-compilation
                if path_to_protoc_exe is None:
                    path_to_protoc_exe = build_protoc_for_host(
                        cmake_path, source_dir, build_dir, args)
                if args.arm:
                    cmake_extra_args = ['-A', 'ARM']
                elif args.arm64:
                    cmake_extra_args = ['-A', 'ARM64']
                elif args.arm64ec:
                    cmake_extra_args = ['-A', 'ARM64EC']
                cmake_extra_args += ['-G', args.cmake_generator]
                # Cannot test on host build machine for cross-compiled
                # builds (Override any user-defined behaviour for test if any)
                if args.test:
                    log.warning(
                        "Cannot test on host build machine for cross-compiled "
                        "ARM(64) builds. Will skip test running after build.")
                    args.test = False
            elif cpu_arch == '32bit' or args.x86:
                cmake_extra_args = [
                    '-A', 'Win32', '-T', 'host=x64', '-G', args.cmake_generator
                ]
            else:
                if args.msvc_toolset:
                    toolset = 'host=x64,version=' + args.msvc_toolset
                else:
                    toolset = 'host=x64'
                if args.cuda_version:
                    toolset += ',cuda=' + args.cuda_version
                cmake_extra_args = [
                    '-A', 'x64', '-T', toolset, '-G', args.cmake_generator
                ]
            if args.enable_windows_store:
                cmake_extra_defines.append(
                    'CMAKE_TOOLCHAIN_FILE=' + os.path.join(
                        source_dir, 'cmake', 'store_toolchain.cmake'))
            if args.enable_wcos:
                cmake_extra_defines.append('CMAKE_USER_MAKE_RULES_OVERRIDE=wcos_rules_override.cmake')
        elif args.cmake_generator is not None and not (is_macOS() and args.use_xcode):
            cmake_extra_args += ['-G', args.cmake_generator]
        elif is_macOS():
            if args.use_xcode:
                cmake_extra_args += ['-G', 'Xcode']
            if not args.ios and not args.android and \
                    args.osx_arch == 'arm64' and platform.machine() == 'x86_64':
                if args.test:
                    log.warning(
                        "Cannot test ARM64 build on X86_64. Will skip test running after build.")
                    args.test = False

        if args.build_wasm:
            emsdk_version = args.emsdk_version
            emsdk_dir = os.path.join(source_dir, "cmake", "external", "emsdk")
            emsdk_file = os.path.join(emsdk_dir, "emsdk.bat") if is_windows() else os.path.join(emsdk_dir, "emsdk")

            log.info("Installing emsdk...")
            run_subprocess([emsdk_file, "install", emsdk_version], cwd=emsdk_dir)
            log.info("Activating emsdk...")
            run_subprocess([emsdk_file, "activate", emsdk_version], cwd=emsdk_dir)

        if (args.android or args.ios or args.enable_windows_store or args.build_wasm
                or is_cross_compiling_on_apple(args)) and args.path_to_protoc_exe is None:
            # Cross-compiling for Android, iOS, and WebAssembly
            path_to_protoc_exe = build_protoc_for_host(
                cmake_path, source_dir, build_dir, args)

        if is_ubuntu_1604():
            if (args.arm or args.arm64):
                raise BuildError(
                    "Only Windows ARM(64) cross-compiled builds supported "
                    "currently through this script")
            if not is_docker() and not args.use_acl and not args.use_armnn:
                install_python_deps()

        if args.enable_pybind and is_windows():
            install_python_deps(args.numpy_version)

        if args.enable_onnx_tests:
            setup_test_data(build_dir, configs)

        if args.use_cuda and args.cuda_version is None:
            if is_windows():
                # cuda_version is used while generating version_info.py on Windows.
                raise BuildError("cuda_version must be specified on Windows.")
            else:
                args.cuda_version = ""
        if args.use_rocm and args.rocm_version is None:
            args.rocm_version = ""

        if args.build_eager_mode:
            eager_root_dir = os.path.join(source_dir, "orttraining", "orttraining", "eager")
            if args.eager_customop_module and not args.eager_customop_header:
                raise Exception('eager_customop_header must be provided when eager_customop_module is')
            elif args.eager_customop_header and not args.eager_customop_module:
                raise Exception('eager_customop_module must be provided when eager_customop_header is')

            def gen_ops(gen_cpp_name: str, header_file: str, ops_module: str, custom_ops: bool):
                gen_cpp_scratch_name = gen_cpp_name + '.working'
                print(f'Generating ORT ATen overrides (output_file: {gen_cpp_name}, header_file: {header_file},'
                      f'ops_module: {ops_module}), custom_ops: {custom_ops}')

                cmd = [sys.executable, os.path.join(os.path.join(eager_root_dir, 'opgen', 'opgen.py')),
                       '--output_file', gen_cpp_scratch_name,
                       '--ops_module', ops_module,
                       '--header_file', header_file]

                if custom_ops:
                    cmd += ["--custom_ops"]

                subprocess.check_call(cmd)

                import filecmp
                if (not os.path.isfile(gen_cpp_name) or
                   not filecmp.cmp(gen_cpp_name, gen_cpp_scratch_name, shallow=False)):
                    os.rename(gen_cpp_scratch_name, gen_cpp_name)
                else:
                    os.remove(gen_cpp_scratch_name)

            def gen_ort_ops():
                # generate native aten ops
                import torch
                regdecs_path = os.path.join(os.path.dirname(torch.__file__), 'include/ATen/RegistrationDeclarations.h')

                ops_module = os.path.join(eager_root_dir, 'opgen/opgen/atenops.py')
                gen_ops(os.path.join(eager_root_dir, 'ort_aten.g.cpp'), regdecs_path, ops_module, False)

                # generate custom ops
                if not args.eager_customop_header:
                    args.eager_customop_header = os.path.realpath(os.path.join(
                        eager_root_dir,
                        "opgen",
                        "CustomOpDeclarations.h"))

                if not args.eager_customop_module:
                    args.eager_customop_module = os.path.join(eager_root_dir, 'opgen/opgen/custom_ops.py')

                gen_ops(os.path.join(eager_root_dir, 'ort_customops.g.cpp'),
                        args.eager_customop_header, args.eager_customop_module, True)

            gen_ort_ops()
        if args.enable_external_custom_op_schemas and not is_linux():
            raise BuildError("Registering external custom op schemas is only supported on Linux.")

        generate_build_tree(
            cmake_path, source_dir, build_dir, cuda_home, cudnn_home, rocm_home, mpi_home, nccl_home,
            tensorrt_home, migraphx_home, acl_home, acl_libs, armnn_home, armnn_libs,
            path_to_protoc_exe, configs, cmake_extra_defines, args, cmake_extra_args)

    if args.clean:
        clean_targets(cmake_path, build_dir, configs)

    # if using DML, perform initial nuget package restore
    setup_dml_build(args, cmake_path, build_dir, configs)

    if args.build:
        if args.parallel < 0:
            raise BuildError("Invalid parallel job count: {}".format(args.parallel))
        num_parallel_jobs = os.cpu_count() if args.parallel == 0 else args.parallel
        build_targets(args, cmake_path, build_dir, configs, num_parallel_jobs, args.target)

    if args.test:
        run_onnxruntime_tests(args, source_dir, ctest_path, build_dir, configs)

        if args.enable_pybind and not args.skip_onnx_tests and args.use_nuphar:
            nuphar_run_python_tests(build_dir, configs)

        if args.enable_pybind and not args.skip_onnx_tests and args.use_stvm:
            stvm_run_python_tests(build_dir, configs)

        # run node.js binding tests
        if args.build_nodejs and not args.skip_nodejs_tests:
            nodejs_binding_dir = os.path.normpath(os.path.join(source_dir, "js", "node"))
            run_nodejs_tests(nodejs_binding_dir)

    # Build packages after running the tests.
    # NOTE: if you have a test that rely on a file which only get copied/generated during packaging step, it could
    # fail unexpectedly. Similar, if your packaging step forgot to copy a file into the package, we don't know it
    # either.
    if args.build:
        if args.build_wheel:
            nightly_build = bool(os.getenv('NIGHTLY_BUILD') == '1')
            default_training_package_device = bool(os.getenv('DEFAULT_TRAINING_PACKAGE_DEVICE') == '1')
            build_python_wheel(
                source_dir,
                build_dir,
                configs,
                args.use_cuda,
                args.cuda_version,
                args.use_rocm,
                args.rocm_version,
                args.use_dnnl,
                args.use_tensorrt,
                args.use_openvino,
                args.use_nuphar,
                args.use_stvm,
                args.use_vitisai,
                args.use_acl,
                args.use_armnn,
                args.use_dml,
                args.wheel_name_suffix,
                args.enable_training,
                nightly_build=nightly_build,
                default_training_package_device=default_training_package_device,
                use_ninja=(args.cmake_generator == 'Ninja'),
                build_eager_mode=args.build_eager_mode
            )
        if args.build_nuget:
            build_nuget_package(
                source_dir,
                build_dir,
                configs,
                args.use_cuda,
                args.use_openvino,
                args.use_tensorrt,
                args.use_dnnl,
                args.use_nuphar,
                args.use_stvm,
                args.use_winml,
            )

    if args.test and args.build_nuget:
        run_csharp_tests(
            source_dir,
            build_dir,
            args.use_cuda,
            args.use_openvino,
            args.use_tensorrt,
            args.use_dnnl)

    if args.gen_doc and (args.build or args.test):
        generate_documentation(source_dir, build_dir, configs, args.gen_doc == 'validate')

    if args.gen_api_doc and (args.build or args.test):
        print('Generating Python doc for ORTModule...')
        docbuild_dir = os.path.join(source_dir, 'tools', 'doc')
        run_subprocess(['bash', 'builddoc.sh', os.path.dirname(sys.executable),
                        source_dir, build_dir, args.config[0]], cwd=docbuild_dir)

    log.info("Build complete")