init()

in Sources/TensorFlow/Core/Runtime.swift [207:308]


  init() {
    configureRuntimeFromEnvironment()

    // Suppress TensorFlow logging, unless the user specified a log level.
    setenv("TF_CPP_MIN_LOG_LEVEL", "3", /*override*/ 0)

    debugLog("Initializing global context.")

    // Initialize the TF runtime exactly once. Only affects local execution
    // (when _RuntimeConfig.tensorFlowServer is set to "").
    if !_RuntimeConfig.tensorFlowRuntimeInitialized {
      // Install a signal handler to ensure we exit when interrupted.
      signal(SIGINT) { _ in
        print("Caught interrupt signal, exiting...")
        exit(1)
      }

      var args = ["dummyProgramName"]
      if _RuntimeConfig.printsDebugLog {
        args.append("--alsologtostderr")
      }
      if _RuntimeConfig.tensorflowVerboseLogLevel > 0 {
        args.append("--v=\(_RuntimeConfig.tensorflowVerboseLogLevel)")
      }
      // Collect all the strings' utf8 bytes into a single array so that we can
      // address all the strings with a single `flattenedStringBytes.withUnsafeBufferPointer`.
      var flattenedStringBytes: [Int8] = []
      var lengths: [Int] = []
      for arg in args {
        let bytes = arg.utf8CString
        flattenedStringBytes.append(contentsOf: bytes)
        lengths.append(bytes.count)
      }

      // Calculate the addresses of all the strings within our single buffer, and then call
      // TF_InitMain.
      flattenedStringBytes.withUnsafeMutableBufferPointer { buffer in
        var stringAddrs: [UnsafeMutablePointer<Int8>?] = []
        var currentStringAddr = buffer.baseAddress
          .map(UnsafeMutablePointer.init)
        for length in lengths {
          stringAddrs.append(currentStringAddr)
          currentStringAddr = currentStringAddr?.advanced(by: length)
        }
      }
      _RuntimeConfig.tensorFlowRuntimeInitialized = true
    }

    guard let opts = TFE_NewContextOptions() else {
      fatalError("ContextOptions object can never be nil.")
    }

    // Create TF config object.
    if _RuntimeConfig.gpuMemoryAllowGrowth {
      debugLog("Allowing growth for GPU memory allocator.")
    }
    self.tensorFlowConfig = TF_CreateConfig(
      /* enable_xla_compilation */0,
      _RuntimeConfig.gpuMemoryAllowGrowth ? 1 : 0,
      _RuntimeConfig.cpuDeviceCount)
    TFE_ContextOptionsSetConfig(
      opts,
      tensorFlowConfig.pointee.data,
      tensorFlowConfig.pointee.length,
      status)
    checkOk(status)

    let ctx = TFE_NewContext(opts, status)
    checkOk(status)
    self.eagerContext = ctx!
    TFE_DeleteContextOptions(opts)
    checkOk(status)

    #if !os(Windows)
    if case .remote(let serverDef) = _RuntimeConfig.session {
      debugLog("Setting up the server def to \(serverDef)...")	
      serverDef.utf8CString.withUnsafeBufferPointer { ptr in
        TFE_ContextSetServerDef(
          eagerContext, /*keep_alive_secs*/ 0, ptr.baseAddress,
          serverDef.utf8CString.count, status)
        checkOk(status)
      }
    }
    #endif

    let devices = TFE_ContextListDevices(eagerContext, status)
    checkOk(status)
    defer { TF_DeleteDeviceList(devices!) }

    let deviceCount = TF_DeviceListCount(devices!)
    debugLog("There are \(deviceCount) devices.")
    for deviceId in 0..<deviceCount {
      let cDeviceName = TF_DeviceListName(devices, deviceId, status)
      checkOk(status)
      let deviceName = String(cString: cDeviceName!)
      let cDeviceType = TF_DeviceListType(devices, deviceId, status)
      checkOk(status)
      let deviceType = String(cString: cDeviceType!)
      debugLog("Device \(deviceId) has type \(deviceType) and name \(deviceName).")
      deviceNames.append(deviceName)
    }
  }