in src/com/facebook/buck/cli/MainRunner.java [625:1552]
public ExitCode runMainWithExitCode(
WatchmanWatcher.FreshInstanceAction watchmanFreshInstanceAction,
long initTimestamp,
ImmutableList<String> unexpandedCommandLineArgs)
throws Exception {
// Set initial exitCode value to FATAL. This will eventually get reassigned unless an exception
// happens
ExitCode exitCode = ExitCode.FATAL_GENERIC;
// Setup filesystem and buck config.
AbsPath canonicalRootPath = AbsPath.of(projectRoot.toRealPath()).normalize();
ImmutableMap<CellName, AbsPath> rootCellMapping = getCellMapping(canonicalRootPath);
ImmutableList<String> args =
BuckArgsMethods.expandAtFiles(unexpandedCommandLineArgs, rootCellMapping);
// Filter out things like --command-args-file from the arguments lists that we log
ImmutableList<String> filteredUnexpandedArgsForLogging =
filterArgsForLogging(unexpandedCommandLineArgs);
ImmutableList<String> filteredArgsForLogging = filterArgsForLogging(args);
// Parse command line arguments
BuckCommand command = new BuckCommand();
command.setPluginManager(pluginManager);
// Parse the command line args.
AdditionalOptionsCmdLineParser cmdLineParser =
new AdditionalOptionsCmdLineParser(pluginManager, command);
try {
cmdLineParser.parseArgument(args);
} catch (CmdLineException e) {
throw new CommandLineException(e, e.getLocalizedMessage() + "\nFor help see 'buck --help'.");
}
// Return help strings fast if the command is a help request.
Optional<ExitCode> result = command.runHelp(printConsole.getStdOut());
if (result.isPresent()) {
return result.get();
}
// If this command is not read only, acquire the command semaphore to become the only executing
// read/write command. Early out will also help to not rotate log on each BUSY status which
// happens in setupLogging().
ImmutableList.Builder<String> previousCommandArgsBuilder = new ImmutableList.Builder<>();
try (CloseableWrapper<Semaphore> semaphore =
getSemaphoreWrapper(command, unexpandedCommandLineArgs, previousCommandArgsBuilder)) {
if (!command.isReadOnly() && semaphore == null) {
// buck_tool will set BUCK_BUSY_DISPLAYED if it already displayed the busy error
if (!clientEnvironment.containsKey("BUCK_BUSY_DISPLAYED")) {
String activeCommandLine = "buck " + String.join(" ", previousCommandArgsBuilder.build());
if (activeCommandLine.length() > COMMAND_PATH_MAX_LENGTH) {
String ending = "...";
activeCommandLine =
activeCommandLine.substring(0, COMMAND_PATH_MAX_LENGTH - ending.length()) + ending;
}
System.err.println(
String.format("Buck Daemon is busy executing '%s'.", activeCommandLine));
LOG.info(
"Buck server was busy executing '%s'. Maybe retrying later will help.",
activeCommandLine);
}
return ExitCode.BUSY;
}
// statically configure Buck logging environment based on Buck config, usually buck-x.log
// files
setupLogging(command, filteredArgsForLogging);
ProjectFilesystemFactory projectFilesystemFactory = new DefaultProjectFilesystemFactory();
UnconfiguredBuildTargetViewFactory buildTargetFactory =
new ParsingUnconfiguredBuildTargetViewFactory();
Config currentConfig = setupDefaultConfig(rootCellMapping, command);
Config config;
ProjectFilesystem filesystem;
DefaultCellPathResolver cellPathResolver;
BuckConfig buckConfig;
boolean reusePreviousConfig =
isReuseCurrentConfigPropertySet(command)
&& buckGlobalStateLifecycleManager.hasStoredBuckGlobalState();
if (reusePreviousConfig) {
printWarnMessage(UIMessagesFormatter.reuseConfigPropertyProvidedMessage());
buckConfig =
buckGlobalStateLifecycleManager
.getBuckConfig()
.orElseThrow(
() -> new IllegalStateException("Daemon is present but config is missing."));
config = buckConfig.getConfig();
filesystem = buckConfig.getFilesystem();
cellPathResolver = DefaultCellPathResolver.create(filesystem.getRootPath(), config);
Map<String, ConfigChange> configDiff = ConfigDifference.compare(config, currentConfig);
UIMessagesFormatter.reusedConfigWarning(configDiff).ifPresent(this::printWarnMessage);
} else {
config = currentConfig;
filesystem =
projectFilesystemFactory.createProjectFilesystem(
CanonicalCellName.rootCell(),
canonicalRootPath,
config,
BuckPaths.getBuckOutIncludeTargetConfigHashFromRootCellConfig(config));
cellPathResolver = DefaultCellPathResolver.create(filesystem.getRootPath(), config);
buckConfig =
new BuckConfig(
config,
filesystem,
architecture,
platform,
clientEnvironment,
buildTargetName ->
buildTargetFactory.create(
buildTargetName, cellPathResolver.getCellNameResolver()));
}
// Set so that we can use some settings when we print out messages to users
parsedRootConfig = Optional.of(buckConfig);
CliConfig cliConfig = buckConfig.getView(CliConfig.class);
// if we are reusing previous configuration then no need to warn about config override
if (!reusePreviousConfig) {
warnAboutConfigFileOverrides(filesystem.getRootPath(), cliConfig);
}
ImmutableSet<AbsPath> projectWatchList =
getProjectWatchList(canonicalRootPath, buckConfig, cellPathResolver);
Verbosity verbosity = VerbosityParser.parse(args);
// Setup the printConsole.
printConsole = makeCustomConsole(context, verbosity, buckConfig);
ExecutionEnvironment executionEnvironment =
new DefaultExecutionEnvironment(clientEnvironment, System.getProperties());
// Automatically use distributed build for supported repositories and users, unless
// Remote Execution is in use. All RE builds should not use distributed build.
final boolean isRemoteExecutionBuild =
isRemoteExecutionBuild(command, buckConfig, executionEnvironment.getUsername());
Optional<String> projectPrefix = Optional.empty();
if (command.subcommand instanceof BuildCommand) {
BuildCommand subcommand = (BuildCommand) command.subcommand;
executionEnvironment.getUsername();
projectPrefix =
RemoteExecutionUtil.getCommonProjectPrefix(subcommand.getArguments(), buckConfig);
}
RuleKeyConfiguration ruleKeyConfiguration =
ConfigRuleKeyConfigurationFactory.create(buckConfig, moduleManager);
String previousBuckCoreKey;
if (!command.isReadOnly()) {
Optional<String> currentBuckCoreKey =
filesystem.readFileIfItExists(filesystem.getBuckPaths().getCurrentVersionFile());
BuckPaths unconfiguredPaths =
filesystem.getBuckPaths().withConfiguredBuckOut(filesystem.getBuckPaths().getBuckOut());
previousBuckCoreKey = currentBuckCoreKey.orElse("<NOT_FOUND>");
if (!currentBuckCoreKey.isPresent()
|| !currentBuckCoreKey.get().equals(ruleKeyConfiguration.getCoreKey())
|| (filesystem.exists(unconfiguredPaths.getGenDir(), LinkOption.NOFOLLOW_LINKS)
&& (filesystem.isSymLink(unconfiguredPaths.getGenDir())
^ buckConfig.getView(BuildBuckConfig.class).getBuckOutCompatLink()))) {
// Migrate any version-dependent directories (which might be huge) to a trash directory
// so we can delete it asynchronously after the command is done.
moveToTrash(
filesystem,
printConsole,
buildId,
filesystem.getBuckPaths().getAnnotationDir(),
filesystem.getBuckPaths().getGenDir(),
filesystem.getBuckPaths().getScratchDir(),
filesystem.getBuckPaths().getResDir());
filesystem.mkdirs(filesystem.getBuckPaths().getCurrentVersionFile().getParent());
filesystem.writeContentsToPath(
ruleKeyConfiguration.getCoreKey(), filesystem.getBuckPaths().getCurrentVersionFile());
}
} else {
previousBuckCoreKey = "";
}
LOG.verbose("Buck core key from the previous Buck instance: %s", previousBuckCoreKey);
ProcessExecutor processExecutor = new DefaultProcessExecutor(printConsole);
SandboxExecutionStrategyFactory sandboxExecutionStrategyFactory =
new PlatformSandboxExecutionStrategyFactory();
Clock clock;
boolean enableThreadCpuTime =
buckConfig.getBooleanValue("build", "enable_thread_cpu_time", true);
if (BUCKD_LAUNCH_TIME_NANOS.isPresent()) {
long nanosEpoch = Long.parseLong(BUCKD_LAUNCH_TIME_NANOS.get(), 10);
LOG.verbose("Using nanos epoch: %d", nanosEpoch);
clock = new NanosAdjustedClock(nanosEpoch, enableThreadCpuTime);
} else {
clock = new DefaultClock(enableThreadCpuTime);
}
ParserConfig parserConfig = buckConfig.getView(ParserConfig.class);
Watchman watchman =
buildWatchman(
context, parserConfig, projectWatchList, clientEnvironment, printConsole, clock);
ImmutableList<ConfigurationRuleDescription<?, ?>> knownConfigurationDescriptions =
PluginBasedKnownConfigurationDescriptionsFactory.createFromPlugins(pluginManager);
DefaultCellPathResolver rootCellCellPathResolver =
DefaultCellPathResolver.create(filesystem.getRootPath(), buckConfig.getConfig());
TargetConfigurationFactory targetConfigurationFactory =
new TargetConfigurationFactory(buildTargetFactory, cellPathResolver);
Optional<TargetConfiguration> targetConfiguration =
createTargetConfiguration(command, buckConfig, targetConfigurationFactory);
Optional<TargetConfiguration> hostConfiguration =
createHostConfiguration(command, buckConfig, targetConfigurationFactory);
// NOTE: This new KnownUserDefinedRuleTypes is only used if BuckGlobals need to be invalidated
// Otherwise, everything should use the KnownUserDefinedRuleTypes object from BuckGlobals
KnownRuleTypesProvider knownRuleTypesProvider =
new KnownRuleTypesProvider(
knownRuleTypesFactoryFactory.create(
processExecutor,
pluginManager,
sandboxExecutionStrategyFactory,
knownConfigurationDescriptions));
ExecutableFinder executableFinder = new ExecutableFinder();
ToolchainProviderFactory toolchainProviderFactory =
new DefaultToolchainProviderFactory(
pluginManager, clientEnvironment, processExecutor, executableFinder);
Cells cells =
new Cells(
LocalCellProviderFactory.create(
filesystem,
buckConfig,
command.getConfigOverrides(rootCellMapping),
rootCellCellPathResolver.getPathMapping(),
rootCellCellPathResolver,
moduleManager,
toolchainProviderFactory,
projectFilesystemFactory,
buildTargetFactory)
.getCellByPath(filesystem.getRootPath()));
TargetConfigurationSerializer targetConfigurationSerializer =
new JsonTargetConfigurationSerializer(
targetName ->
buildTargetFactory.create(targetName, cells.getRootCell().getCellNameResolver()));
Pair<BuckGlobalState, LifecycleStatus> buckGlobalStateRequest =
buckGlobalStateLifecycleManager.getBuckGlobalState(
cells,
knownRuleTypesProvider,
watchman,
printConsole,
clock,
buildTargetFactory,
targetConfigurationSerializer);
BuckGlobalState buckGlobalState = buckGlobalStateRequest.getFirst();
LifecycleStatus stateLifecycleStatus = buckGlobalStateRequest.getSecond();
if (!context.isPresent()) {
// Clean up the trash on a background thread if this was a
// non-buckd read-write command. (We don't bother waiting
// for it to complete; the thread is a daemon thread which
// will just be terminated at shutdown time.)
TRASH_CLEANER.startCleaningDirectory(
filesystem.resolve(filesystem.getBuckPaths().getTrashDir()));
}
ImmutableList<BuckEventListener> eventListeners = ImmutableList.of();
ImmutableList.Builder<ProjectFileHashCache> allCaches = ImmutableList.builder();
// Build up the hash cache, which is a collection of the stateful cell cache and some
// per-run caches.
//
// TODO(coneko, ruibm, agallagher): Determine whether we can use the existing filesystem
// object that is in scope instead of creating a new rootCellProjectFilesystem. The primary
// difference appears to be that filesystem is created with a Config that is used to produce
// ImmutableSet<PathMatcher> and BuckPaths for the ProjectFilesystem, whereas this one
// uses the defaults.
ProjectFilesystem rootCellProjectFilesystem =
projectFilesystemFactory.createOrThrow(
CanonicalCellName.rootCell(),
cells.getRootCell().getFilesystem().getRootPath(),
BuckPaths.getBuckOutIncludeTargetConfigHashFromRootCellConfig(config));
BuildBuckConfig buildBuckConfig =
cells.getRootCell().getBuckConfig().getView(BuildBuckConfig.class);
allCaches.addAll(buckGlobalState.getFileHashCaches());
cells
.getAllCells()
.forEach(
cell -> {
if (cell.getCanonicalName() != CanonicalCellName.rootCell()) {
allCaches.add(
DefaultFileHashCache.createBuckOutFileHashCache(
cell.getFilesystem(), buildBuckConfig.getFileHashCacheMode()));
}
});
// A cache which caches hashes of cell-relative paths which may have been ignore by
// the main cell cache, and only serves to prevent rehashing the same file multiple
// times in a single run.
allCaches.add(
DefaultFileHashCache.createDefaultFileHashCache(
rootCellProjectFilesystem, buildBuckConfig.getFileHashCacheMode()));
allCaches.addAll(
DefaultFileHashCache.createOsRootDirectoriesCaches(
projectFilesystemFactory, buildBuckConfig.getFileHashCacheMode()));
StackedFileHashCache fileHashCache = new StackedFileHashCache(allCaches.build());
stackedFileHashCache = Optional.of(fileHashCache);
Optional<WebServer> webServer = buckGlobalState.getWebServer();
ConcurrentMap<String, WorkerProcessPool> persistentWorkerPools =
buckGlobalState.getPersistentWorkerPools();
TestBuckConfig testConfig = buckConfig.getView(TestBuckConfig.class);
ArtifactCacheBuckConfig cacheBuckConfig = new ArtifactCacheBuckConfig(buckConfig);
SuperConsoleConfig superConsoleConfig = new SuperConsoleConfig(buckConfig);
// Eventually, we'll want to get allow websocket and/or nailgun clients to specify locale
// when connecting. For now, we'll use the default from the server environment.
Locale locale = Locale.getDefault();
InvocationInfo invocationInfo =
InvocationInfo.of(
buildId,
superConsoleConfig.isEnabled(printConsole.getAnsi(), printConsole.getVerbosity()),
context.isPresent(),
command.getSubCommandNameForLogging(),
filteredArgsForLogging,
filteredUnexpandedArgsForLogging,
filesystem.getBuckPaths().getLogDir(),
isRemoteExecutionBuild,
cacheBuckConfig.getRepository(),
watchman.getVersion());
RemoteExecutionConfig remoteExecutionConfig = buckConfig.getView(RemoteExecutionConfig.class);
if (isRemoteExecutionBuild) {
remoteExecutionConfig.validateCertificatesOrThrow();
}
Optional<RemoteExecutionEventListener> remoteExecutionListener =
remoteExecutionConfig.isConsoleEnabled()
? Optional.of(new RemoteExecutionEventListener())
: Optional.empty();
MetadataProvider metadataProvider =
MetadataProviderFactory.minimalMetadataProviderForBuild(
buildId,
executionEnvironment.getUsername(),
cacheBuckConfig.getRepository(),
cacheBuckConfig.getScheduleType(),
remoteExecutionConfig.getReSessionLabel(),
remoteExecutionConfig.getTenantId(),
remoteExecutionConfig.getAuxiliaryBuildTag(),
projectPrefix.orElse(""),
executionEnvironment);
LogBuckConfig logBuckConfig = buckConfig.getView(LogBuckConfig.class);
try (TaskManagerCommandScope managerScope =
bgTaskManager.getNewScope(
buildId,
!context.isPresent()
|| cells
.getRootCell()
.getBuckConfig()
.getView(CliConfig.class)
.getFlushEventsBeforeExit());
GlobalStateManager.LoggerIsMappedToThreadScope loggerThreadMappingScope =
GlobalStateManager.singleton()
.setupLoggers(
invocationInfo,
printConsole.getStdErr(),
printConsole.getStdErr().getRawStream(),
verbosity);
DefaultBuckEventBus buildEventBus = new DefaultBuckEventBus(clock, buildId);
) {
BuckConfigWriter.writeConfig(
filesystem.getRootPath().getPath(), invocationInfo, buckConfig);
CommonThreadFactoryState commonThreadFactoryState =
GlobalStateManager.singleton().getThreadToCommandRegister();
Optional<Exception> exceptionForFix = Optional.empty();
Path simpleConsoleLog =
invocationInfo
.getLogDirectoryPath()
.resolve(BuckConstant.BUCK_SIMPLE_CONSOLE_LOG_FILE_NAME);
PrintStream simpleConsolePrintStream = new PrintStream(simpleConsoleLog.toFile());
Console simpleLogConsole =
new Console(
Verbosity.STANDARD_INFORMATION,
simpleConsolePrintStream,
simpleConsolePrintStream,
printConsole.getAnsi());
printConsole.setDuplicatingConsole(Optional.of(simpleLogConsole));
Path testLogPath = filesystem.getBuckPaths().getLogDir().resolve("test.log");
try (ThrowingCloseableWrapper<ExecutorService, InterruptedException> diskIoExecutorService =
getExecutorWrapper(
MostExecutors.newSingleThreadExecutor("Disk I/O"),
"Disk IO",
DISK_IO_STATS_TIMEOUT_SECONDS);
ThrowingCloseableWrapper<ListeningExecutorService, InterruptedException>
httpWriteExecutorService =
getExecutorWrapper(
getHttpWriteExecutorService(cacheBuckConfig),
"HTTP Write",
cacheBuckConfig.getHttpWriterShutdownTimeout());
ThrowingCloseableWrapper<ListeningExecutorService, InterruptedException>
httpFetchExecutorService =
getExecutorWrapper(
getHttpFetchExecutorService(
"standard", cacheBuckConfig.getHttpFetchConcurrency()),
"HTTP Read",
cacheBuckConfig.getHttpWriterShutdownTimeout());
ThrowingCloseableWrapper<ScheduledExecutorService, InterruptedException>
counterAggregatorExecutor =
getExecutorWrapper(
Executors.newSingleThreadScheduledExecutor(
new CommandThreadFactory(
"CounterAggregatorThread", commonThreadFactoryState)),
"CounterAggregatorExecutor",
COUNTER_AGGREGATOR_SERVICE_TIMEOUT_SECONDS);
ThrowingCloseableWrapper<ScheduledExecutorService, InterruptedException>
scheduledExecutorPool =
getExecutorWrapper(
Executors.newScheduledThreadPool(
buckConfig
.getView(BuildBuckConfig.class)
.getNumThreadsForSchedulerPool(),
new CommandThreadFactory(
getClass().getName() + "SchedulerThreadPool",
commonThreadFactoryState)),
"ScheduledExecutorService",
EXECUTOR_SERVICES_TIMEOUT_SECONDS);
// Create a cached thread pool for cpu intensive tasks
ThrowingCloseableWrapper<ListeningExecutorService, InterruptedException>
cpuExecutorService =
getExecutorWrapper(
listeningDecorator(Executors.newCachedThreadPool()),
ExecutorPool.CPU.toString(),
EXECUTOR_SERVICES_TIMEOUT_SECONDS);
// Create a cached thread pool for cpu intensive tasks
ThrowingCloseableWrapper<ListeningExecutorService, InterruptedException>
graphCpuExecutorService =
getExecutorWrapper(
listeningDecorator(
MostExecutors.newMultiThreadExecutor(
"graph-cpu",
buckConfig.getView(BuildBuckConfig.class).getNumThreads())),
ExecutorPool.GRAPH_CPU.toString(),
EXECUTOR_SERVICES_TIMEOUT_SECONDS);
// Create a thread pool for network I/O tasks
ThrowingCloseableWrapper<ListeningExecutorService, InterruptedException>
networkExecutorService =
getExecutorWrapper(
newDirectExecutorService(),
ExecutorPool.NETWORK.toString(),
EXECUTOR_SERVICES_TIMEOUT_SECONDS);
ThrowingCloseableWrapper<ListeningExecutorService, InterruptedException>
projectExecutorService =
getExecutorWrapper(
listeningDecorator(
MostExecutors.newMultiThreadExecutor(
"Project",
buckConfig.getView(BuildBuckConfig.class).getNumThreads())),
ExecutorPool.PROJECT.toString(),
EXECUTOR_SERVICES_TIMEOUT_SECONDS);
BuildInfoStoreManager storeManager = new BuildInfoStoreManager();
AbstractConsoleEventBusListener fileLoggerConsoleListener =
new SimpleConsoleEventBusListener(
new RenderingConsole(clock, simpleLogConsole),
clock,
testConfig.getResultSummaryVerbosity(),
superConsoleConfig.getHideSucceededRulesInLogMode(),
superConsoleConfig.getNumberOfSlowRulesToShow(),
true, // Always show slow rules in File Logger Console
locale,
testLogPath,
executionEnvironment,
buildId,
logBuckConfig.isLogBuildIdToConsoleEnabled(),
logBuckConfig.getBuildDetailsTemplate(),
logBuckConfig.getBuildDetailsCommands(),
isRemoteExecutionBuild
? Optional.of(
remoteExecutionConfig.getDebugURLString(
metadataProvider.get().getReSessionId()))
: Optional.empty(),
createAdditionalConsoleLinesProviders(
remoteExecutionListener, remoteExecutionConfig, metadataProvider));
AbstractConsoleEventBusListener consoleListener =
createConsoleEventListener(
clock,
superConsoleConfig,
printConsole,
testConfig.getResultSummaryVerbosity(),
executionEnvironment,
locale,
testLogPath,
logBuckConfig.isLogBuildIdToConsoleEnabled(),
logBuckConfig.getBuildDetailsTemplate(),
logBuckConfig.getBuildDetailsCommands(),
createAdditionalConsoleLinesProviders(
remoteExecutionListener, remoteExecutionConfig, metadataProvider),
isRemoteExecutionBuild ? Optional.of(remoteExecutionConfig) : Optional.empty(),
metadataProvider);
// This makes calls to LOG.error(...) post to the EventBus, instead of writing to
// stderr.
Closeable logErrorToEventBus =
loggerThreadMappingScope.setWriter(createWriterForConsole(consoleListener));
Scope ddmLibLogRedirector = DdmLibLogRedirector.redirectDdmLogger(buildEventBus);
// NOTE: This will only run during the lifetime of the process and will flush on close.
CounterRegistry counterRegistry =
new CounterRegistryImpl(
counterAggregatorExecutor.get(),
buildEventBus,
buckConfig
.getView(CounterBuckConfig.class)
.getCountersFirstFlushIntervalMillis(),
buckConfig.getView(CounterBuckConfig.class).getCountersFlushIntervalMillis());
PerfStatsTracking perfStatsTracking =
new PerfStatsTracking(buildEventBus, invocationInfo);
ProcessTracker processTracker =
logBuckConfig.isProcessTrackerEnabled() && platform != Platform.WINDOWS
? new ProcessTracker(
buildEventBus,
invocationInfo,
context.isPresent(),
logBuckConfig.isProcessTrackerDeepEnabled())
: null;
ArtifactCaches artifactCacheFactory =
new ArtifactCaches(
cacheBuckConfig,
buildEventBus,
target ->
buildTargetFactory.create(target, cellPathResolver.getCellNameResolver()),
targetConfigurationSerializer,
filesystem,
executionEnvironment.getWifiSsid(),
httpWriteExecutorService.get(),
httpFetchExecutorService.get(),
getDirCacheStoreExecutor(cacheBuckConfig, diskIoExecutorService),
managerScope,
getArtifactProducerId(executionEnvironment),
executionEnvironment.getHostname(),
ClientCertificateHandler.fromConfiguration(cacheBuckConfig));
// Once command completes it should be safe to not wait for executors and other stateful
// objects to terminate and release semaphore right away. It will help to retry
// command faster if user terminated with Ctrl+C.
// Ideally, we should come up with a better lifecycle management strategy for the
// semaphore object
CloseableWrapper<Optional<CloseableWrapper<Semaphore>>> semaphoreCloser =
CloseableWrapper.of(
Optional.ofNullable(semaphore),
s -> {
s.ifPresent(AbstractCloseableWrapper::close);
});
CloseableMemoizedSupplier<DepsAwareExecutor<? super ComputeResult, ?>>
depsAwareExecutorSupplier =
getDepsAwareExecutorSupplier(buckConfig, buildEventBus);
// This will get executed first once it gets out of try block and just wait for
// event bus to dispatch all pending events before we proceed to termination
// procedures
CloseableWrapper<BuckEventBus> waitEvents = getWaitEventsWrapper(buildEventBus)) {
LOG.debug(invocationInfo.toLogLine());
buildEventBus.register(HANG_MONITOR.getHangMonitor());
if (logBuckConfig.isJavaGCEventLoggingEnabled()) {
// Register for GC events to be published to the event bus.
GCNotificationEventEmitter.register(buildEventBus);
}
ImmutableMap<ExecutorPool, ListeningExecutorService> executors =
ImmutableMap.of(
ExecutorPool.CPU,
cpuExecutorService.get(),
ExecutorPool.GRAPH_CPU,
graphCpuExecutorService.get(),
ExecutorPool.NETWORK,
networkExecutorService.get(),
ExecutorPool.PROJECT,
projectExecutorService.get());
// No need to kick off ProgressEstimator for commands that
// don't build anything -- it has overhead and doesn't seem
// to work for (e.g.) query anyway. ProgressEstimator has
// special support for project so we have to include it
// there too.
if (consoleListener.displaysEstimatedProgress()
&& (command.performsBuild()
|| command.subcommand instanceof ProjectCommand
|| command.subcommand instanceof AbstractQueryCommand)) {
boolean persistent = !(command.subcommand instanceof AbstractQueryCommand);
ProgressEstimator progressEstimator =
new ProgressEstimator(
persistent
? Optional.of(
filesystem
.resolve(filesystem.getBuckPaths().getBuckOut())
.resolve(ProgressEstimator.PROGRESS_ESTIMATIONS_JSON))
: Optional.empty(),
buildEventBus);
consoleListener.setProgressEstimator(progressEstimator);
}
BuildEnvironmentDescription buildEnvironmentDescription =
getBuildEnvironmentDescription(
executionEnvironment,
buckConfig);
Iterable<BuckEventListener> commandEventListeners =
command.getSubcommand().isPresent()
? command
.getSubcommand()
.get()
.getEventListeners(executors, scheduledExecutorPool.get())
: ImmutableList.of();
if (isRemoteExecutionBuild) {
List<BuckEventListener> remoteExecutionsListeners = Lists.newArrayList();
remoteExecutionListener.ifPresent(remoteExecutionsListeners::add);
commandEventListeners =
new ImmutableList.Builder<BuckEventListener>()
.addAll(commandEventListeners)
.addAll(remoteExecutionsListeners)
.build();
}
eventListeners =
addEventListeners(
buildEventBus,
cells.getRootCell().getFilesystem(),
invocationInfo,
cells.getRootCell().getBuckConfig(),
webServer,
clock,
executionEnvironment,
counterRegistry,
commandEventListeners,
remoteExecutionListener.isPresent()
? Optional.of(remoteExecutionListener.get())
: Optional.empty(),
managerScope);
consoleListener.register(buildEventBus);
fileLoggerConsoleListener.register(buildEventBus);
if (logBuckConfig.isBuckConfigLocalWarningEnabled()
&& !printConsole.getVerbosity().isSilent()) {
ImmutableList<AbsPath> localConfigFiles =
cells.getAllCells().stream()
.map(
cell ->
cell.getRoot().resolve(Configs.DEFAULT_BUCK_CONFIG_OVERRIDE_FILE_NAME))
.filter(path -> Files.isRegularFile(path.getPath()))
.collect(ImmutableList.toImmutableList());
if (localConfigFiles.size() > 0) {
String message =
localConfigFiles.size() == 1
? "Using local configuration:"
: "Using local configurations:";
buildEventBus.post(ConsoleEvent.warning(message));
for (AbsPath localConfigFile : localConfigFiles) {
buildEventBus.post(ConsoleEvent.warning(String.format("- %s", localConfigFile)));
}
}
}
if (commandMode == CommandMode.RELEASE && logBuckConfig.isPublicAnnouncementsEnabled()) {
PublicAnnouncementManager announcementManager =
new PublicAnnouncementManager(
clock,
buildEventBus,
consoleListener,
new RemoteLogBuckConfig(buckConfig),
Objects.requireNonNull(executors.get(ExecutorPool.CPU)));
announcementManager.getAndPostAnnouncements();
}
// This needs to be after the registration of the event listener so they can pick it up.
stateLifecycleStatus
.getLifecycleStatusString()
.ifPresent(event -> buildEventBus.post(DaemonEvent.newDaemonInstance(event)));
ListenableFuture<Optional<FullVersionControlStats>> vcStatsFuture =
Futures.immediateFuture(Optional.empty());
boolean shouldUploadBuildReport = BuildReportUtils.shouldUploadBuildReport(buckConfig);
VersionControlBuckConfig vcBuckConfig = new VersionControlBuckConfig(buckConfig);
VersionControlStatsGenerator vcStatsGenerator =
new VersionControlStatsGenerator(
new DelegatingVersionControlCmdLineInterface(
cells.getRootCell().getFilesystem().getRootPath().getPath(),
new PrintStreamProcessExecutorFactory(),
vcBuckConfig.getHgCmd(),
buckConfig.getEnvironment()),
vcBuckConfig.getPregeneratedVersionControlStats());
if ((vcBuckConfig.shouldGenerateStatistics() || shouldUploadBuildReport)
&& command.subcommand instanceof AbstractCommand) {
AbstractCommand subcommand = (AbstractCommand) command.subcommand;
if (!commandMode.equals(CommandMode.TEST)) {
boolean shouldPreGenerate = !subcommand.isSourceControlStatsGatheringEnabled();
vcStatsFuture =
vcStatsGenerator.generateStatsAsync(
shouldUploadBuildReport,
shouldPreGenerate,
buildEventBus,
listeningDecorator(diskIoExecutorService.get()));
}
}
if (command.getSubcommand().isPresent()
&& command.getSubcommand().get() instanceof BuildCommand
&& shouldUploadBuildReport) {
BuildReportUpload.runBuildReportUpload(
managerScope, vcStatsFuture, buckConfig, buildId);
}
NetworkInfo.generateActiveNetworkAsync(diskIoExecutorService.get(), buildEventBus);
ImmutableList<String> remainingArgs =
filteredArgsForLogging.isEmpty()
? ImmutableList.of()
: filteredUnexpandedArgsForLogging.subList(
1, filteredUnexpandedArgsForLogging.size());
Path absoluteClientPwd = getClientPwd(cells.getRootCell(), clientEnvironment);
CommandEvent.Started startedEvent =
CommandEvent.started(
command.getDeclaredSubCommandName(),
remainingArgs,
cells.getRootCell().getRoot().getPath().relativize(absoluteClientPwd).normalize(),
context.isPresent()
? OptionalLong.of(buckGlobalState.getUptime())
: OptionalLong.empty(),
getBuckPID());
buildEventBus.post(startedEvent);
TargetSpecResolver targetSpecResolver =
getTargetSpecResolver(
parserConfig,
watchman,
cells.getRootCell(),
buckGlobalState,
buildEventBus,
depsAwareExecutorSupplier);
// This also queries watchman, posts events to global and local event buses and
// invalidates all related caches
// TODO (buck_team): extract invalidation from getParserAndCaches()
ParserAndCaches parserAndCaches =
getParserAndCaches(
context,
watchmanFreshInstanceAction,
filesystem,
buckConfig,
watchman,
knownRuleTypesProvider,
cells.getRootCell(),
buckGlobalState,
buildEventBus,
executors,
ruleKeyConfiguration,
depsAwareExecutorSupplier,
executableFinder,
buildTargetFactory,
hostConfiguration.orElse(UnconfiguredTargetConfiguration.INSTANCE),
targetSpecResolver);
// Because the Parser is potentially constructed before the CounterRegistry,
// we need to manually register its counters after it's created.
//
// The counters will be unregistered once the counter registry is closed.
counterRegistry.registerCounters(
parserAndCaches.getParser().getPermState().getCounters());
Optional<ProcessManager> processManager;
if (platform == Platform.WINDOWS) {
processManager = Optional.empty();
} else {
processManager = Optional.of(new PkillProcessManager(processExecutor));
}
// At this point, we have parsed options but haven't started
// running the command yet. This is a good opportunity to
// augment the event bus with our serialize-to-file
// event-listener.
if (command.subcommand instanceof AbstractCommand) {
AbstractCommand subcommand = (AbstractCommand) command.subcommand;
Optional<Path> eventsOutputPath = subcommand.getEventsOutputPath();
if (eventsOutputPath.isPresent()) {
BuckEventListener listener =
new FileSerializationEventBusListener(eventsOutputPath.get());
buildEventBus.register(listener);
}
}
buildEventBus.post(
new BuckInitializationDurationEvent(
TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - initTimestamp)));
try {
exitCode =
command.run(
ImmutableCommandRunnerParams.of(
printConsole,
stdIn,
cells,
watchman,
parserAndCaches.getVersionedTargetGraphCache(),
artifactCacheFactory,
parserAndCaches.getTypeCoercerFactory(),
buildTargetFactory,
targetConfiguration,
hostConfiguration,
targetConfigurationSerializer,
parserAndCaches.getParser(),
buildEventBus,
platform,
clientEnvironment,
cells
.getRootCell()
.getBuckConfig()
.getView(JavaBuckConfig.class)
.createDefaultJavaPackageFinder(),
clock,
vcStatsGenerator,
processManager,
webServer,
persistentWorkerPools,
buckConfig,
fileHashCache,
executors,
scheduledExecutorPool.get(),
buildEnvironmentDescription,
parserAndCaches.getActionGraphProvider(),
knownRuleTypesProvider,
storeManager,
Optional.of(invocationInfo),
parserAndCaches.getDefaultRuleKeyFactoryCacheRecycler(),
projectFilesystemFactory,
ruleKeyConfiguration,
processExecutor,
executableFinder,
pluginManager,
moduleManager,
depsAwareExecutorSupplier,
metadataProvider,
buckGlobalState,
absoluteClientPwd));
} catch (InterruptedException | ClosedByInterruptException e) {
buildEventBus.post(CommandEvent.interrupted(startedEvent, ExitCode.SIGNAL_INTERRUPT));
throw e;
} finally {
buildEventBus.post(CommandEvent.finished(startedEvent, exitCode));
buildEventBus.post(
new CacheStatsEvent(
"versioned_target_graph_cache",
parserAndCaches.getVersionedTargetGraphCache().getCacheStats()));
}
} catch (Exception e) {
exceptionForFix = Optional.of(e);
throw e;
} finally {
if (exitCode != ExitCode.SUCCESS) {
handleAutoFix(
filesystem,
printConsole,
clientEnvironment,
command,
buckConfig,
buildId,
exitCode,
exceptionForFix,
invocationInfo);
}
// signal nailgun that we are not interested in client disconnect events anymore
context.ifPresent(c -> c.removeAllClientListeners());
if (context.isPresent()) {
// Clean up the trash in the background if this was a buckd
// read-write command. (We don't bother waiting for it to
// complete; the cleaner will ensure subsequent cleans are
// serialized with this one.)
TRASH_CLEANER.startCleaningDirectory(filesystem.getBuckPaths().getTrashDir());
}
// Exit Nailgun earlier if command succeeded to now block the client while performing
// telemetry upload in background
// For failures, always do it synchronously because exitCode in fact may be overridden up
// the stack
// TODO(buck_team): refactor this as in case of exception exitCode is reported incorrectly
// to the CommandEvent listener
if (exitCode == ExitCode.SUCCESS
&& context.isPresent()
&& !cliConfig.getFlushEventsBeforeExit()) {
context.get().in.close(); // Avoid client exit triggering client disconnection handling.
context.get().exit(exitCode.getCode());
}
// TODO(buck_team): refactor eventListeners for RAII
flushAndCloseEventListeners(printConsole, eventListeners);
}
}
}
return exitCode;
}