func onDownCommand()

in Sources/DistributedActors/Cluster/ClusterShell.swift [1219:1270]


    func onDownCommand(_ context: _ActorContext<Message>, state: ClusterShellState, member memberToDown: Cluster.Member) -> ClusterShellState {
        var state = state

        guard let change = state.membership.applyMembershipChange(.init(member: memberToDown, toStatus: .down)) else {
            // the change was ineffective, e.g. the node was already replaced by another node and down events were already sent
            return state
        }

        // the change was applied, so we should update the membership and publish an event
        context.system.cluster.updateMembershipSnapshot(state.membership)
        self.clusterEvents.publish(.membershipChange(change))
        self.tryConfirmDeadToSWIM(context, state, change: change)

        if let logChangeLevel = state.settings.logMembershipChanges {
            context.log.log(level: logChangeLevel, "Cluster membership change: \(reflecting: change)", metadata: [
                "cluster/membership/change": "\(change)",
                "cluster/membership": Logger.MetadataValue.array(state.membership.members(atMost: .down).map { "\($0)" }),
            ])
        }

        // whenever we down a node we must ensure to confirm it to swim, so it won't keep monitoring it forever needlessly
        self._swimRef?.tell(.local(SWIM.LocalMessage.confirmDead(memberToDown.uniqueNode)))

        if memberToDown.uniqueNode == state.selfNode {
            // ==== ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
            // Down(self node); ensuring SWIM knows about this and should likely initiate graceful shutdown
            context.log.warning(
                "Self node was marked [.down]!",
                metadata: [ // TODO: carry reason why -- was it gossip, manual or other?
                    "cluster/membership": "\(state.membership)",
                ]
            )

            do {
                let onDownAction = context.system.settings.cluster.onDownAction.make()
                try onDownAction(context.system) // TODO: return a future and run with a timeout
            } catch {
                context.system.log.error("Failed to executed onDownAction! Shutting down system forcefully! Error: \(error)")
                context.system.shutdown()
            }

            state = self.interpretLeaderActions(context.system, state, state.collectLeaderActions())
            return state
        } else {
            // ==== ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
            // Terminate association and Down the (other) node

            state = self.interpretLeaderActions(context.system, state, state.collectLeaderActions())
            self.terminateAssociation(context.system, state: &state, memberToDown.uniqueNode)
            return state
        }
    }