protected void setup()

in src/java/org/apache/cassandra/service/CassandraDaemon.java [230:432]


    protected void setup()
    {
        DiskErrorsHandlerService.configure();

        // Since CASSANDRA-14793 the local system keyspaces data are not dispatched across the data directories
        // anymore to reduce the risks in case of disk failures. By consequence, the system need to ensure in case of
        // upgrade that the old data files have been migrated to the new directories before we start deleting
        // snapshots and upgrading system tables.
        try
        {
            migrateSystemDataIfNeeded();
        }
        catch (IOException e)
        {
            exitOrFail(StartupException.ERR_WRONG_DISK_STATE, e.getMessage(), e);
        }

        maybeInitJmx();

        Mx4jTool.maybeLoad();

        ThreadAwareSecurityManager.install();

        logSystemInfo(logger);

        NativeLibrary.tryMlockall();

        DatabaseDescriptor.createAllDirectories();
        Keyspace.setInitialized();
        CommitLog.instance.start();

        SnapshotManager.instance.start(false);
        SnapshotManager.instance.clearExpiredSnapshots();
        SnapshotManager.instance.clearEphemeralSnapshots();
        SnapshotManager.instance.resumeSnapshotCleanup();
        SnapshotManager.instance.registerMBean();

        // clearing of snapshots above here will in fact clear all ephemeral snapshots
        // which were cleared as part of startup checks before CASSANDRA-18111
        runStartupChecks();

        try
        {
            disableAutoCompaction(Schema.instance.localKeyspaces().names());
            Startup.initialize(DatabaseDescriptor.getSeeds());
            disableAutoCompaction(Schema.instance.distributedKeyspaces().names());
            CMSOperations.initJmx();
            AccordOperations.initJmx();
            if (ClusterMetadata.current().myNodeId() != null)
                RegistrationStatus.instance.onRegistration();
        }
        catch (InterruptedException | ExecutionException | IOException e)
        {
            throw new AssertionError("Can't initialize cluster metadata service", e);
        }
        catch (StartupException e)
        {
            exitOrFail(e.returnCode, e.getMessage(), e.getCause());
        }

        QueryProcessor.registerStatementInvalidatingListener();

        try
        {
            SystemKeyspace.snapshotOnVersionChange();
        }
        catch (Throwable e)
        {
            exitOrFail(StartupException.ERR_WRONG_DISK_STATE, e.getMessage(), e.getCause());
        }

        // We need to persist this as soon as possible after startup checks.
        // This should be the first write to SystemKeyspace (CASSANDRA-11742)
        SystemKeyspace.persistLocalMetadata();

        Thread.setDefaultUncaughtExceptionHandler(JVMStabilityInspector::uncaughtException);

        SystemKeyspaceMigrator41.migrate();
        setupVirtualKeyspaces();

        try
        {
            loadRowAndKeyCacheAsync().get();
        }
        catch (Throwable t)
        {
            JVMStabilityInspector.inspectThrowable(t);
            logger.warn("Error loading key or row cache", t);
        }

        if (!SKIP_GC_INSPECTOR)
        {
            try
            {
                GCInspector.register();
            }
            catch (Throwable t)
            {
                JVMStabilityInspector.inspectThrowable(t);
                logger.warn("Unable to start GCInspector (currently only supported on the Sun JVM)");
            }
        }

        // Replay any CommitLogSegments found on disk
        PaxosState.initializeTrackers();

        // replay the log if necessary
        // TODO samt - when restarting a previously running instance, this needs to happen after reconstructing schema
        //  from the cluster metadata log or all mutations will throw IncompatibleSchemaException on deserialisation
        try
        {
            CommitLog.instance.recoverSegmentsOnDisk();
        }
        catch (IOException e)
        {
            throw new RuntimeException(e);
        }

        try
        {
            PaxosState.maybeRebuildUncommittedState();
        }
        catch (IOException e)
        {
            throw new RuntimeException(e);
        }

        // Clean up system.size_estimates entries left lying around from missed keyspace drops (CASSANDRA-14905)
        SystemKeyspace.clearAllEstimates();

        // schedule periodic dumps of table size estimates into SystemKeyspace.SIZE_ESTIMATES_CF
        // set cassandra.size_recorder_interval to 0 to disable
        int sizeRecorderInterval = SIZE_RECORDER_INTERVAL.getInt();
        if (sizeRecorderInterval > 0)
            ScheduledExecutors.optionalTasks.scheduleWithFixedDelay(SizeEstimatesRecorder.instance, 30, sizeRecorderInterval, TimeUnit.SECONDS);

        ActiveRepairService.instance().start();
        StreamManager.instance.start();

        // Prepared statements
        QueryProcessor.instance.preloadPreparedStatements();

        // start server internals
        StorageService.instance.registerDaemon(this);
        try
        {
            StorageService.instance.initServer();
        }
        catch (ConfigurationException e)
        {
            System.err.println(e.getMessage() + "\nFatal configuration error; unable to start server.  See log for stacktrace.");
            exitOrFail(1, "Fatal configuration error", e);
        }

        // The local rack may have been changed at some point, which will now be reflected in cluster metadata. Update
        // the system.local table just in case the actual value doesn't match what the configured location provided
        // reported when the earlier call to SystemKeyspace::persistLocalMetadata was made prior to initialising cluster
        // metadata.
        SystemKeyspace.updateRack(ClusterMetadata.current().locator.local().rack);
        ScheduledExecutors.optionalTasks.execute(() -> ClusterMetadataService.instance().processor().fetchLogAndWait());

        // Because we are writing to the system_distributed keyspace, this should happen after that is created, which
        // happens in StorageService.instance.initServer()
        Runnable viewRebuild = () -> {
            for (Keyspace keyspace : Keyspace.all())
            {
                keyspace.viewManager.buildAllViews();
            }
            logger.debug("Completed submission of build tasks for any materialized views defined at startup");
        };

        ScheduledExecutors.optionalTasks.schedule(viewRebuild, StorageService.RING_DELAY_MILLIS, TimeUnit.MILLISECONDS);
        StorageService.instance.doAuthSetup();

        // re-enable auto-compaction after replay, so correct disk boundaries are used
        enableAutoCompaction(Schema.instance.getKeyspaces());

        AuditLogManager.instance.initialize();

        StorageService.instance.doAutoRepairSetup();

        // schedule periodic background compaction task submission. this is simply a backstop against compactions stalling
        // due to scheduling errors or race conditions
        ScheduledExecutors.optionalTasks.scheduleWithFixedDelay(ColumnFamilyStore.getBackgroundCompactionTaskSubmitter(), 5, 1, TimeUnit.MINUTES);

        // schedule periodic recomputation of speculative retry thresholds
        ScheduledExecutors.optionalTasks.scheduleWithFixedDelay(SPECULATION_THRESHOLD_UPDATER, 
                                                                DatabaseDescriptor.getReadRpcTimeout(NANOSECONDS),
                                                                DatabaseDescriptor.getReadRpcTimeout(NANOSECONDS),
                                                                NANOSECONDS);

        initializeClientTransports();

        // Ensure you've registered all caches during startup you want pre-warmed before this call -> be wary of adding
        // init below this mark before completeSetup().
        if (DatabaseDescriptor.getAuthCacheWarmingEnabled())
            AuthCacheService.instance.warmCaches();
        else
            logger.info("Prewarming of auth caches is disabled");

        PaxosState.startAutoRepairs();
        completeSetup();
    }