void PerfLogThread::loop()

in watchman/PerfSample.cpp [178:299]


void PerfLogThread::loop() noexcept {
  json_ref samples;
  json_ref perf_cmd;
  int64_t sample_batch;

  w_set_thread_name("perflog");

  auto stateDir =
      w_string_piece(flags.watchman_state_file).dirName().asWString();

  perf_cmd = cfg_get_json("perf_logger_command");
  if (perf_cmd.isString()) {
    perf_cmd = json_array({perf_cmd});
  }
  if (!perf_cmd.isArray()) {
    logf(
        FATAL,
        "perf_logger_command must be either a string or an array of strings\n");
  }

  sample_batch = cfg_get_int("perf_logger_command_max_samples_per_call", 4);

  while (true) {
    {
      auto state = state_.lock();
      while (true) {
        if (state->samples) {
          // We found samples to process
          break;
        }
        if (!state->running) {
          // No samples remaining, and we have been asked to quit.
          return;
        }
        cond_.wait(state.as_lock());
      }

      samples = nullptr;
      std::swap(samples, state->samples);
    }

    if (samples) {
      // Hack: Divide by two because this limit includes environment variables
      // and perf_cmd.
      // It's possible to compute this correctly on every platform given the
      // current environment and any specified environment variables, but it's
      // fine to be conservative here.
      const size_t argv_limit = ChildProcess::getArgMax() / 2;

      processSamples(
          argv_limit,
          sample_batch,
          samples,
          [&](std::vector<std::string> sample_args) {
            std::vector<std::string_view> cmd;
            cmd.reserve(perf_cmd.array().size() + sample_args.size());

            for (auto& c : perf_cmd.array()) {
              cmd.push_back(json_to_w_string(c).view());
            }
            for (auto& sample : sample_args) {
              cmd.push_back(sample);
            }

            ChildProcess::Options opts;
            opts.environment().set(
                {{"WATCHMAN_STATE_DIR", stateDir},
                 {"WATCHMAN_SOCK", get_sock_name_legacy()}});
            opts.open(STDIN_FILENO, "/dev/null", O_RDONLY, 0666);
            opts.open(STDOUT_FILENO, "/dev/null", O_WRONLY, 0666);
            opts.open(STDERR_FILENO, "/dev/null", O_WRONLY, 0666);

            try {
              ChildProcess proc(cmd, std::move(opts));
              proc.wait();
            } catch (const std::exception& exc) {
              log(ERR, "failed to spawn perf logger: ", exc.what(), "\n");
            }
          },
          [&](std::string sample_stdin) {
            ChildProcess::Options opts;
            opts.environment().set(
                {{"WATCHMAN_STATE_DIR", stateDir},
                 {"WATCHMAN_SOCK", get_sock_name_legacy()}});
            opts.pipeStdin();
            opts.open(STDOUT_FILENO, "/dev/null", O_WRONLY, 0666);
            opts.open(STDERR_FILENO, "/dev/null", O_WRONLY, 0666);

            try {
              ChildProcess proc({perf_cmd}, std::move(opts));
              auto stdinPipe = proc.takeStdin();

              try {
                const char* data = sample_stdin.data();
                size_t size = sample_stdin.size();

                size_t total_written = 0;
                while (total_written < sample_stdin.size()) {
                  auto result = stdinPipe->write.write(data, size);
                  result.throwIfError();
                  auto written = result.value();
                  data += written;
                  size -= written;
                  total_written += written;
                }
              } catch (const std::exception& exc) {
                log(ERR,
                    "failed to send data to perf logger: ",
                    exc.what(),
                    "\n");
              }

              // close stdin to allow the process to terminate
              stdinPipe.reset();
              proc.wait();
            } catch (const std::exception& exc) {
              log(ERR, "failed to spawn perf logger: ", exc.what(), "\n");
            }
          });
    }
  }
}