public String dumpConfigurations()

in frontend/server/src/main/java/org/pytorch/serve/util/ConfigManager.java [575:637]


    public String dumpConfigurations() {
        Runtime runtime = Runtime.getRuntime();
        return "\nTorchserve version: "
                + prop.getProperty(VERSION)
                + "\nTS Home: "
                + getModelServerHome()
                + "\nCurrent directory: "
                + getCanonicalPath(".")
                + "\nTemp directory: "
                + System.getProperty("java.io.tmpdir")
                + "\nNumber of GPUs: "
                + getNumberOfGpu()
                + "\nNumber of CPUs: "
                + runtime.availableProcessors()
                + "\nMax heap size: "
                + (runtime.maxMemory() / 1024 / 1024)
                + " M\nPython executable: "
                + (getPythonExecutable() == null ? "N/A" : getPythonExecutable())
                + "\nConfig file: "
                + prop.getProperty("tsConfigFile", "N/A")
                + "\nInference address: "
                + getListener(ConnectorType.INFERENCE_CONNECTOR)
                + "\nManagement address: "
                + getListener(ConnectorType.MANAGEMENT_CONNECTOR)
                + "\nMetrics address: "
                + getListener(ConnectorType.METRICS_CONNECTOR)
                + "\nModel Store: "
                + (getModelStore() == null ? "N/A" : getModelStore())
                + "\nInitial Models: "
                + (getLoadModels() == null ? "N/A" : getLoadModels())
                + "\nLog dir: "
                + getCanonicalPath(System.getProperty("LOG_LOCATION"))
                + "\nMetrics dir: "
                + getCanonicalPath(System.getProperty("METRICS_LOCATION"))
                + "\nNetty threads: "
                + getNettyThreads()
                + "\nNetty client threads: "
                + getNettyClientThreads()
                + "\nDefault workers per model: "
                + getDefaultWorkers()
                + "\nBlacklist Regex: "
                + prop.getProperty(TS_BLACKLIST_ENV_VARS, "N/A")
                + "\nMaximum Response Size: "
                + prop.getProperty(TS_MAX_RESPONSE_SIZE, "6553500")
                + "\nMaximum Request Size: "
                + prop.getProperty(TS_MAX_REQUEST_SIZE, "6553500")
                + "\nLimit Maximum Image Pixels: "
                + prop.getProperty(TS_LIMIT_MAX_IMAGE_PIXELS, "true")
                + "\nPrefer direct buffer: "
                + prop.getProperty(TS_PREFER_DIRECT_BUFFER, "false")
                + "\nAllowed Urls: "
                + getAllowedUrls()
                + "\nCustom python dependency for model allowed: "
                + prop.getProperty(TS_INSTALL_PY_DEP_PER_MODEL, "false")
                + "\nMetrics report format: "
                + prop.getProperty(TS_METRICS_FORMAT, METRIC_FORMAT_PROMETHEUS)
                + "\nEnable metrics API: "
                + prop.getProperty(TS_ENABLE_METRICS_API, "true")
                + "\nWorkflow Store: "
                + (getWorkflowStore() == null ? "N/A" : getWorkflowStore())
                + "\nModel config: "
                + prop.getProperty(MODEL_CONFIG, "N/A");
    }