bool executeTasks()

in lib/Core/BuildEngine.cpp [710:1054]


  bool executeTasks(const KeyType& buildKey) {
    // Clear any previous build state
    finishedInputRequests.clear();

    // Push a dummy input request for the rule to build.
    inputRequests.push_back({ nullptr, 0, &getRuleInfoForKey(buildKey), false, false });

    // Process requests as long as we have work to do.
    while (true) {
      bool didWork = false;

      // Cancel the build, if requested.
      if (buildCancelled) {
        // Force completion of all outstanding tasks.
        cancelRemainingTasks();
        return false;
      }

      // Process all of the pending rule scan requests.
      //
      // FIXME: We don't want to process all of these requests, this amounts to
      // doing all of the dependency scanning up-front.
      while (!ruleInfosToScan.empty()) {
        TracingEngineQueueItemEvent i(EngineQueueItemKind::RuleToScan, buildKey.c_str());

        didWork = true;

        auto request = ruleInfosToScan.back();
        ruleInfosToScan.pop_back();

        processRuleScanRequest(request);
      }

      // Process all of the pending input requests.
      while (true) {
        TracingEngineQueueItemEvent i(EngineQueueItemKind::InputRequest, buildKey.c_str());

        TaskInputRequest request;
        bool found = false;
        {
          std::lock_guard<std::mutex> guard(inputRequestsMutex);
          if (!inputRequests.empty()) {
            // IMPORTANT: Dependency recording below relies on the assumption
            // that we will process input requests in FIFO order. DO NOT CHANGE
            // this without adjusting for this expectation.
            request = inputRequests.front();
            inputRequests.pop_front();
            found = true;
          }
        }
        if (!found)
          break;

        didWork = true;

        if (trace) {
          if (request.taskInfo) {
            trace->handlingTaskInputRequest(request.taskInfo->task.get(),
                                            request.inputRuleInfo->rule.get());
          } else {
            trace->handlingBuildInputRequest(request.inputRuleInfo->rule.get());
          }
        }

        // Request the input rule be scanned.
        bool isScanned = scanRule(*request.inputRuleInfo);

        // If the rule is not yet scanned, suspend this input request.
        if (!isScanned) {
          assert(request.inputRuleInfo->isScanning());
          if (trace)
            trace->pausedInputRequestForRuleScan(
              request.inputRuleInfo->rule.get());
          request.inputRuleInfo->getPendingScanRecord()
            ->pausedInputRequests.push_back(request);
          continue;
        }

        // Request the input rule be computed.
        bool isAvailable = demandRule(*request.inputRuleInfo);

        // If this is a dummy input request, we are done.
        if (!request.taskInfo)
          continue;

        // Update the recorded dependencies of this task.
        //
        // FIXME: This is very performance critical and should be highly
        // optimized. By itself, this addition added about 25% user time to the
        // "ack 3 16" experiment.
        //
        // There are multiple options for when to record these dependencies:
        // 1. Record at the time it is requested.
        // 2. Record at the time it is popped off the input request queue.
        // 3. Record at the time the input is supplied.
        //
        // Here we have chosen option 2 for two reasons. First, we want to avoid
        // the need to synchronize concurrent access to the rule info data
        // structure, therefore we want to perform this work on the engine
        // thread (and as such rules out option 1).
        //
        // Second, we explicitly wish to record dependencies in the order in
        // which they have been requested by the task/client. This ensures that
        // dependencies will be recorded in a deterministic order that is under
        // client control. This can be important for performance in subsequent
        // incremental builds, where scanning order is important for discovering
        // work.
        //
        // NOTE: In order to achieve the latter, we rely processing input
        // requests in FIFO order.
        //
        request.taskInfo->forRuleInfo->result.dependencies.push_back(
            request.inputRuleInfo->keyID, request.orderOnly);


        // If the rule is already available, enqueue the finalize request.
        if (isAvailable) {
          if (trace)
            trace->readyingTaskInputRequest(request.taskInfo->task.get(),
                                            request.inputRuleInfo->rule.get());
          finishedInputRequests.push_back(request);
        } else {
          // Otherwise, record the pending input request.
          assert(request.inputRuleInfo->getPendingTaskInfo());
          if (trace)
            trace->addedRulePendingTask(request.inputRuleInfo->rule.get(),
                                        request.taskInfo->task.get());
          request.inputRuleInfo->getPendingTaskInfo()->requestedBy.push_back(
            request);
        }
      }

      // Process all of the finished inputs.
      while (!finishedInputRequests.empty()) {
        TracingEngineQueueItemEvent i(EngineQueueItemKind::FinishedInputRequest, buildKey.c_str());

        didWork = true;

        auto request = finishedInputRequests.back();
        finishedInputRequests.pop_back();

        if (trace)
          trace->completedTaskInputRequest(request.taskInfo->task.get(),
                                           request.inputRuleInfo->rule.get());

        // Otherwise, we are processing a regular input dependency.

        // Provide the requesting task with the input.
        //
        // FIXME: Should we provide the input key here? We have it available
        // cheaply.
        assert(request.inputRuleInfo->isComplete(this) || request.forcePriorValue || request.orderOnly);
        if (request.orderOnly) {
          assert(request.inputID == kMustFollowInputID);
        } else {
          TracingEngineTaskCallback i(EngineTaskCallbackKind::ProvideValue, request.inputRuleInfo->keyID);
          TaskInterface iface{this, request.taskInfo->task.get()};
          request.taskInfo->task->provideValue(
              iface, request.inputID, request.inputRuleInfo->result.value);
        }

        // Decrement the wait count, and move to finish queue if necessary.
        decrementTaskWaitCount(request.taskInfo);
      }

      // Process all of the ready to run tasks.
      while (!readyTaskInfos.empty()) {
        TracingEngineQueueItemEvent i(EngineQueueItemKind::ReadyTask, buildKey.c_str());

        didWork = true;

        // Process ready tasks FIFO to preserve the relative ordering they were
        // requested by tasks above.
        TaskInfo* taskInfo = readyTaskInfos.front();
        readyTaskInfos.pop_front();

        RuleInfo* ruleInfo = taskInfo->forRuleInfo;
        assert(taskInfo == ruleInfo->getPendingTaskInfo());

        if (trace)
            trace->readiedTask(taskInfo->task.get(), ruleInfo->rule.get());

        // Transition the rule state.
        ruleInfo->setComputing(this);

        // Inform the task its inputs are ready and it should finish.
        //
        // FIXME: We need to track this state, and generate an error if this
        // task ever requests additional inputs.
        {
          TracingEngineTaskCallback i(EngineTaskCallbackKind::InputsAvailable, ruleInfo->keyID);
          TaskInterface iface{this, taskInfo->task.get()};
          taskInfo->task->inputsAvailable(iface);
        }

        // Increment our count of outstanding tasks.
        ++numOutstandingUnfinishedTasks;
      }

      // Process all of the finished tasks.
      while (true) {
        TracingEngineQueueItemEvent i(EngineQueueItemKind::FinishedTask, buildKey.c_str());

        // Try to take a task from the finished queue.
        TaskInfo* taskInfo = nullptr;
        {
          std::lock_guard<std::mutex> guard(finishedTaskInfosMutex);
          if (!finishedTaskInfos.empty()) {
            taskInfo = finishedTaskInfos.back();
            finishedTaskInfos.pop_back();
          }
        }
        if (!taskInfo)
          break;

        didWork = true;

        RuleInfo* ruleInfo = taskInfo->forRuleInfo;
        assert(taskInfo == ruleInfo->getPendingTaskInfo());

        // The task was changed if was computed in the current iteration.
        if (trace) {
          bool wasChanged = ruleInfo->result.computedAt == currentEpoch;
          trace->finishedTask(taskInfo->task.get(), ruleInfo->rule.get(),
                              wasChanged);
        }

        // Transition the rule state by completing the rule (the value itself is
        // stored in the taskIsFinished call).
        assert(ruleInfo->state == RuleInfo::StateKind::InProgressComputing);
        ruleInfo->setPendingTaskInfo(nullptr);
        ruleInfo->setComplete(this);

        // Report the status change.
        ruleInfo->rule->updateStatus(buildEngine, Rule::StatusKind::IsComplete);

        // Add all of the task's discovered dependencies.
        //
        // FIXME: We could audit these dependencies at this point to verify that
        // they are not keys for rules which have not been run, which would
        // indicate an underspecified build (e.g., a generated header).
        ruleInfo->result.dependencies.append(taskInfo->discoveredDependencies);

        // Push back dummy input requests for any discovered dependencies, which
        // must be at least built in order to be brought up-to-date.
        //
        // FIXME: The need to do this makes it questionable that we use this
        // approach for discovered dependencies instead of just providing
        // support for request() even after the task has started computing and
        // from parallel contexts.
        {
          std::lock_guard<std::mutex> guard(inputRequestsMutex);
          for (auto keyIDAndFlag: taskInfo->discoveredDependencies) {
            inputRequests.push_back({ nullptr, 0, &getRuleInfoForKey(keyIDAndFlag.keyID), keyIDAndFlag.flag, false });
          }
        }

        // Update the database record, if attached.
        if (db) {
          std::string error;
          bool result = db->setRuleResult(
              ruleInfo->keyID, *ruleInfo->rule, ruleInfo->result, &error);
          if (!result) {
            delegate.error(error);

            // Decrement our count of outstanding tasks. This must be done prior
            // the subsequent synchronous cancellation, since it will otherwise
            // deadlock waiting for the task we have already popped off the
            // queue. rdar://problem/50203529
            --numOutstandingUnfinishedTasks;

            cancelRemainingTasks();
            return false;
          }
        }

        // Wake up all of the pending scan requests.
        for (const auto& request: taskInfo->deferredScanRequests) {
          ruleInfosToScan.push_back(request);
        }

        // Push all pending input requests onto the work queue.
        if (trace) {
          for (auto& request: taskInfo->requestedBy) {
            trace->readyingTaskInputRequest(request.taskInfo->task.get(),
                                            request.inputRuleInfo->rule.get());
          }
        }
        finishedInputRequests.insert(finishedInputRequests.end(),
                                     taskInfo->requestedBy.begin(),
                                     taskInfo->requestedBy.end());

        // Decrement our count of outstanding tasks.
        --numOutstandingUnfinishedTasks;

        // Delete the pending task.
        {
          std::lock_guard<std::mutex> guard(taskInfosMutex);
          auto it = taskInfos.find(taskInfo->task.get());
          assert(it != taskInfos.end());
          taskInfos.erase(it);
        }
      }

      // If we haven't done any other work at this point but we have pending
      // tasks, we need to wait for a task to complete.
      //
      // NOTE: Cancellation also implements this process, if you modify this
      // code please also validate that \see cancelRemainingTasks() is still
      // correct.
      if (!didWork && numOutstandingUnfinishedTasks != 0) {
        TracingEngineQueueItemEvent i(EngineQueueItemKind::Waiting, buildKey.c_str());

        // Wait for our condition variable.
        std::unique_lock<std::mutex> lock(finishedTaskInfosMutex);

        // Ensure we still don't have enqueued operations under the protection
        // of the mutex, if one has been added then we may have already missed
        // the condition notification and cannot safely wait.
        if (finishedTaskInfos.empty()) {
          finishedTaskInfosCondition.wait(lock);
        }

        didWork = true;
      }

      if (!didWork) {
        // If there was no work to do, but we still have running tasks, then
        // we have found a cycle. Try to resolve it and continue.
        if (!taskInfos.empty()) {
          if (resolveCycle(buildKey)) {
            continue;
          } else {
            cancelRemainingTasks();
            return false;
          }
        }

        // We didn't do any work, and we have nothing more we can/need to do.
        break;
      }
    }

    return true;
  }