Sources/DistributedActors/Cluster/Transport/TransportPipelines.swift (29 lines): - line 22: import struct Foundation.Data // FIXME: would want to not have to use Data in our infra as it forces us to copy - line 62: // TODO: https://github.com/apple/swift-distributed-actors/issues/605 use the serialization infra from the system rather - line 64: // FIXME: make the promise dance here - line 67: // TODO: change since serialization which can throw should be shipped of to a future - line 161: // _ = context.close() // TODO: maybe? - line 266: let buffer = serialized.buffer.asByteBuffer(allocator: self.serialization.allocator) // FIXME: yes we double allocate, no good ways around it today - line 341: // TODO: drop message when it fails to be serialized? - line 429: // TODO: optimize by carrying ID in envelope of if we need to special handle this as system message - line 457: recipientPath: wireEnvelope.recipient.path, // TODO: use addresses - line 483: // TODO: potential for coalescing some ACKs here; schedule "lets write back in 300ms" - line 496: // TODO: potential for coalescing some ACKs here; schedule "lets write back in 300ms" - line 513: """) // TODO: metadata: self.outboundSystemMessages.metadata - line 529: """) // TODO: metadata: self.outboundSystemMessages.metadata - line 545: // FIXME: implement this once we have the Kill or Down command on cluster shell - line 690: let group: EventLoopGroup = settings.eventLoopGroup ?? settings.makeDefaultEventLoopGroup() // TODO: share the loop with client side? - line 717: log[metadataKey: "actor/path"] = "/system/transport.server" // TODO: this is a fake path, we could use log source: here if it gets merged - line 719: // FIXME: PASS IN FROM ASSOCIATION SINCE MUST SURVIVE CONNECTIONS! // TODO: tests about killing connections the hard way - line 723: // TODO: Ensure we don't read faster than we can write by adding the BackPressureHandler into the pipeline. - line 730: // ("bytes dumper", DumpRawBytesDebugHandler(role: .server, log: log)), // FIXME: only include for debug -DSACT_TRACE_NIO things? - line 749: return bootstrap.bind(host: bindAddress.node.host, port: Int(bindAddress.node.port)) // TODO: separate setup from using it - line 755: // TODO: Implement "setup" inside settings, so that parts of bootstrap can be done there, e.g. by end users without digging into remoting internals - line 781: log[metadataKey: "actor/path"] = "/system/transport.client" // TODO: this is a fake path, we could use log source: here if it gets merged - line 783: // FIXME: PASS IN FROM ASSOCIATION SINCE MUST SURVIVE CONNECTIONS !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - line 786: // FIXME: PASS IN FROM ASSOCIATION SINCE MUST SURVIVE CONNECTIONS !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - line 794: // ("bytes dumper", DumpRawBytesDebugHandler(role: .client, log: log)), // FIXME: make available via compilation flag - line 807: return bootstrap.connect(host: targetNode.host, port: Int(targetNode.port)) // TODO: separate setup from using it - line 867: // TODO: we could merge ACK and NACK if NACKs were to carry "the gap" - line 875: // TODO: carry same data as Envelope -- baggage etc - line 889: // TODO: carry metadata from Envelope Sources/DistributedActors/Cluster/ClusterShell.swift (27 lines): - line 44: // TODO: consider ReadWriteLock lock, these accesses are very strongly read only biased - line 57: // TODO: a bit terrible; perhaps key should be Node and then confirm by UniqueNode? - line 187: let shootTheNodeWriteTimeout: NIO.TimeAmount = .seconds(10) // FIXME: hardcoded last write timeout... - line 189: shootTheOtherNodePromise.fail(TimeoutError(message: "Timed out writing final STONITH to \(remoteNode), should close forcefully.", timeout: .seconds(10))) // FIXME: same timeout but diff type - line 257: // FIXME: see if we can restructure this to avoid these nil/then-set dance - line 267: // TODO: concurrency... lock the ref as others may read it? - line 305: case requestMembershipChange(Cluster.Event) // TODO: make a command - line 325: case downCommand(Node) // TODO: add reason - line 328: case shutdown(BlockingReceptacle) // TODO: could be NIO future - line 332: case associatedNodes(_ActorRef>) // TODO: better type here - line 340: case handshakeFailed(Node, Error) // TODO: remove? - line 349: // TODO: reformulate as Wire.accept / reject? - line 526: return self.onHandshakeFailed(context, state, with: fromNode, error: error) // FIXME: implement this basically disassociate() right away? - line 646: // TODO: make it cleaner? though we decided to go with manual peer management as the ClusterShell owns it, hm - line 648: // TODO: consider receptionist instead of this; we're "early" but receptionist could already be spreading its info to this node, since we associated. - line 652: // FIXME: make sure that if the peer terminated, we don't add it again in here, receptionist would be better then to power this... - line 685: // TODO: soundness check if this isn't about handshaking with a replacement, then we should continue; - line 712: // // FIXME: this needs more work... - line 786: // TODO: guard that the target node is actually "us"? i.e. if we're exposed over various protocols and/or ports etc? - line 849: state.events.publish(.membershipChange(change)) // TODO: need a test where a leader observes a replacement, and we ensure that it does not end up signalling up or removal twice? - line 877: state.log.warning("Aborting incoming handshake: \(error)") // TODO: remove - line 1005: state.events.publish(.membershipChange(change)) // TODO: need a test where a leader observes a replacement, and we ensure that it does not end up signalling up or removal twice? - line 1080: // FIXME: don't retry on rejections; those are final; just failures are not, clarify this - line 1105: // TODO: tweak logging some more, this is actually not scary in racy handshakes; so it may happen often - line 1176: // FIXME: also close all associations (!!!) - line 1247: metadata: [ // TODO: carry reason why -- was it gossip, manual or other? - line 1254: try onDownAction(context.system) // TODO: return a future and run with a timeout Sources/DistributedActors/Serialization/Serialization.swift (27 lines): - line 48: internal let metrics: ActorSystemMetrics // TODO: rather, do this via instrumentation - line 75: // ==== Declare mangled names of some known popular types // TODO: hardcoded mangled name until we have _mangledTypeName - line 130: // TODO: document how to deal with `protocol` message accepting actors, those should be very rare. - line 131: // TODO: do we HAVE to do this in the Receptionist? - line 134: settings.register(_OperationLogClusterReceptionist.AckOps.self) // TODO: can be removed once https://github.com/apple/swift/pull/30318 lands - line 136: // FIXME: This will go away once https://github.com/apple/swift/pull/30318 is merged and we can rely on summoning types - line 137: settings.register(_OperationLogClusterReceptionist.PushOps.self) // TODO: can be removed once https://github.com/apple/swift/pull/30318 lands - line 138: settings.register(DistributedActors.OpLogDistributedReceptionist.PushOps.self) // TODO: can be removed once https://github.com/apple/swift/pull/30318 lands - line 143: // FIXME: This will go away once https://github.com/apple/swift/pull/30318 is merged and we can rely on summoning types - line 155: // TODO: Allow plugins to register types...? - line 157: settings.register(ActorAddress.self, serializerID: .foundationJSON) // TODO: this was protobuf - line 164: settings.register(ErrorEnvelope.self) // TODO: can be removed once https://github.com/apple/swift/pull/30318 lands - line 165: settings.register(BestEffortStringError.self) // TODO: can be removed once https://github.com/apple/swift/pull/30318 lands - line 168: settings.register(WallTimeClock.self) // TODO: can be removed once https://github.com/apple/swift/pull/30318 lands - line 176: // TODO: Dry up setting this metadata - line 178: log[metadataKey: "actor/path"] = "/system/serialization" // TODO: this is a fake path, we could use log source: here if it gets merged - line 271: // TODO: decide if we log or crash when new things reg ensured during runtime - line 272: // FIXME: https://github.com/apple/swift-distributed-actors/issues/552 - line 324: // TODO: determine what custom one to use, proto or what else - line 336: // TODO: shall we make those return something async-capable, or is our assumption that we invoke these in the serialization pools enough at least until proven wrong? - line 369: // TODO: metrics how often we really have to copy - line 375: // FIXME: Avoid the copying, needs SwiftProtobuf changes - line 379: // TODO: metrics how often we really have to copy - line 430: // TODO: we need to be able to abstract over Coders to collapse this into "giveMeACoder().encode()" - line 554: // TODO: we need to be able to abstract over Coders to collapse this into "giveMeACoder().decode()" - line 593: // TODO: Do we really need to store them at all? - line 667: // TODO: remove and always just use the Any.Type Sources/DistributedActors/_ActorShell.swift (21 lines): - line 30: // TODO: remove this and replace by the infrastructure which is now Swift's `actor` - line 112: // TODO: we can likely optimize not having to call "through" supervisor if we are .stop anyway - line 168: // TODO: replace with TestMetrics which we could use to inspect the start/stop counts - line 201: self._myCell = ref // TODO: atomic? - line 291: #endif // TODO: make the \next printout nice TODO dont log messages (could leak pass etc) - line 382: try self.becomeNext(behavior: .ignore) // TODO: make .drop once implemented - line 390: next = .unhandled // TODO: could be .drop - line 449: self.log.error("\(message)") // TODO: configurable logging? in props? - line 452: self.log.warning("Actor threw error, reason: [\(error)]:\(type(of: error)). Terminating.") // TODO: configurable logging? in props? - line 481: self.timers.cancelAll() // TODO: cancel all except the restart timer - line 507: // TODO: handling "unhandled" would be good here... though I think type wise this won't fly, since we care about signal too - line 562: // TODO: stop all children? depends which style we'll end up with... - line 563: // TODO: the thing is, I think we can express the entire "wait for children to stop" as a behavior, and no need to make it special implementation in the cell - line 581: // TODO: should probably .escalate instead; - line 585: // TODO: validate all the niling out; can we null out the cell itself? - line 699: return .same // TODO: make .drop once implemented - line 717: let adaptedAddress = try self.address.makeChildAddress(name: name, incarnation: .random()) // TODO: actor name to BE the identity - line 720: self._children.insert(ref) // TODO: separate adapters collection? - line 770: self._children.insert(adapter) // TODO: separate adapters collection? - line 843: try self.becomeNext(behavior: next) // FIXME: make sure we don't drop the behavior...? - line 906: // TODO: we always want to call "through" the supervisor, make it more obvious that that should be the case internal API wise? Sources/DistributedActors/Cluster/Reception/OperationLogDistributedReceptionist.swift (17 lines): - line 88: // TODO: This is done "automatically" once we do log compaction - line 114: /// and the pull from thereon happens directly between those peers, which the recipient MAY flow control if it so wanted to; TODO: more detailed flow control rather than just the maxChunk? - line 142: // TODO: compact the log whenever we know all members of the cluster have seen - line 143: // TODO: Optimization: gap collapsing: [+a,+b,+c,-c] -> [+a,+b,gap(until:4)] - line 144: // TODO: Optimization: head collapsing: [+a,+b,+c,-b,-a] -> [gap(until:2),+c,-b] - line 146: // TODO: slow/fast ticks: When we know there's nothing new to share with others, we use the slow tick (which should be increased to 5 seconds or less) - line 151: // TODO: remove this - line 318: return // TODO: This restriction could be lifted; perhaps we can direct the register to the right node? - line 349: // TODO: reply "registered"? - line 363: // self.instrumentation.actorSubscribed(key: anyKey, address: self.id._unwrapActorAddress) // FIXME: remove the address parameter, it does not make sense anymore - line 415: // TODO: also flush when a key has seen e.g. 100 changes? - line 532: self.nextPeriodicAckPermittedDeadline[peer.id] = Deadline.fromNow(nextPeriodicAckAllowedIn) // TODO: system.timeSource - line 662: // TODO: This should pick a few at random rather than ping everyone - line 673: /// - this may cause the other peer to pull (ack) from any other peer receptionist, if it notices it is "behind" with regards to any of them. // FIXME: what if a peer notices "twice" so we also need to prevent a timer from resending that ack? - line 734: ) // TODO: metadata pattern - line 803: let equalityHackRef = try! system._resolveStub(identity: identity) // FIXME: cleanup the try! - line 994: // TODO: annoyance; init MUST be defined here rather than in extension since it is required Sources/DistributedActors/ActorSystem.swift (16 lines): - line 37: // TODO: avoid the lock... - line 56: // TODO: converge into one tree - line 201: // TODO: we should not rely on NIO for futures - line 205: // TODO: should we share this, or have a separate ELG for IO? - line 250: // TODO: want to reconcile those into one, and allow /dead as well - line 453: // self._serialization = nil // FIXME: need to release serialization - line 542: // TODO: lock inside provider, not here - line 543: // FIXME: protect the naming context access and name reservation; add a test - line 548: // FIXME: reserve the name, atomically - line 563: fatalError("selected dispatcher [\(props.dispatcher)] not implemented yet; ") // FIXME: remove any not implemented ones simply from API - line 594: fatalError("selected dispatcher [\(props.dispatcher)] not implemented yet; ") // FIXME: remove any not implemented ones simply from API - line 714: // TODO: The looping through transports could be ineffective... but realistically we dont have many - line 715: // TODO: realistically we ARE becoming a transport and thus should be able to remove 'transports' entirely - line 756: // TODO: The looping through transports could be ineffective... but realistically we dont have many - line 757: // TODO: realistically we ARE becoming a transport and thus should be able to remove 'transports' entirely - line 843: // TODO: can we skip the Any... and use the underlying existential somehow? Sources/DistributedActors/_Mailbox.swift (15 lines): - line 84: // TODO: not entirely happy about the added weight, but I suppose avoiding going all the way "into" the settings on each send is even worse? - line 107: // TODO: not entirely happy about the added weight, but I suppose avoiding going all the way "into" the settings on each send is even worse? - line 145: // TODO: soundness check; we can't immediately send it to dead letters just yet since first all user messages - line 156: sendAndDropAsDeadLetter() // TODO: "Drop" rather than DeadLetter - line 224: // TODO: should deadLetters be special, since watching it is nonsense? - line 275: // TODO: If we used some bits for system message queue count, we could avoid this issue... Consider this at some point perhaps - line 277: // TODO: Alternatively locking on system message things could be a solution... Though heavy one. - line 279: // TODO: This is not a full solution, however lessens the amount of instances in which we may enqueue to a terminating actor - line 342: // TODO: not in love that we have to do logic like this here... with a plain book to continue running or not it is easier - line 385: // TODO: rename ActorRunResult -- the mailbox run is "the run", this is more like the actors per reduction directive... need to not overload the name "run" - line 386: var runResult = ActorRunResult.continueRunning // TODO: hijack the run_length, and reformulate it as "fuel", and set it to zero when we need to stop - line 502: // TODO: Figure out why this can ever happen - line 705: // TODO: explain this more - line 715: // TODO: let trace: TraceMetadata - line 831: let backtrace: [String] // TODO: Could be worth it to carry it as struct rather than the raw string? Sources/DistributedActors/Behaviors.swift (14 lines): - line 59: // TODO: pretty sub-optimal, but we'll flatten this all out eventually - line 102: // TODO: pretty sub-optimal, but we'll flatten this all out eventually - line 172: // TODO: and their logging rate should be configurable - line 260: /// If the alternative behavior contains a `.setup` or other deferred behavior, it will be canonicalized on its first execution // TODO: make a test for it - line 357: // TODO: better type printout so we know we only handle SpecificSignal with this one - line 480: indirect case intercept(behavior: _Behavior, with: _Interceptor) // TODO: for printing it would be nicer to have "supervised" here, though, modeling wise it is exactly an intercept - line 525: // TODO: more docs - line 607: case .signalHandling(let recvMsg, _): return try recvMsg.interpretMessage(context: context, message: message) // TODO: should we keep the signal handler even if not .same? // TODO: more signal handling tests - line 608: case .signalHandlingAsync(let recvMsg, _): return try recvMsg.interpretMessage(context: context, message: message) // TODO: should we keep the signal handler even if not .same? // TODO: more signal handling tests - line 673: return try interceptor.interceptSignal(target: behavior, context: context, signal: signal) // TODO: do we need to try? - line 843: // TODO: what if already stopped or failed - line 906: // TODO: make not recursive perhaps since could blow up on large chain? - line 919: // FIXME: this should technically offload onto storage and then apply them again... - line 936: default: // TODO: remove the use of default: it is the devil { // TODO: need the Codable? Sources/DistributedActors/ActorAddress.swift (13 lines): - line 74: // TODO: public var identity: ActorIdentity = Path + Name - line 278: // TODO: instead back with a String and keep a pos to index quickly into the name for Substring? - line 401: /// Such relationships must be confirmed by using the `_ActorContext.children.hasChild(:UniqueActorPath)` method. TODO: this does not exist yet - line 406: Array(self.segments.dropLast()) == maybeParentPath.segments // TODO: more efficient impl, without the copying - line 413: /// Such relationships must be confirmed by using the `_ActorContext.children.hasChild(:UniqueActorPath)` method. TODO: this does not exist yet - line 439: // FIXME: optimize so we don't alloc into the String() here - line 449: // TODO: benchmark - line 457: // TODO: accept hex and url encoded things as well - line 464: // TODO: used to be but too much hassle: throw ActorPathError.illegalActorPathElement(name: name, illegal: "\(c)", index: pos) - line 577: // TODO: Would want to rename; this is really protocol + host + port, and a "cute name for humans" we on purpose do not take the name as part or identity - line 590: /// actor system logs, to other external systems. TODO: Note also node roles, which we do not have yet... those are dynamic key/value pairs paired to a unique node. - line 594: // TODO: collapse into one String and index into it? - line 596: public var systemName: String // TODO: some other name, to signify "this is just for humans"? Sources/DistributedActorsGenerator/Decls+GenActorRendering.swift (11 lines): - line 101: // FIXME: if the actor has lifecycle hooks, call them - line 120: // FIXME: if we'd expose lifecycle hooks, call them - line 124: if let watcher = instance as? (DistributedActor & DistributedActors.LifecycleWatch) { // TODO: cleanup once LifecycleWatch implies DistributedActor - line 133: // FIXME: if we had signal handlers, invoke it - line 167: "messageAccess": "public", // TODO: allow non public actor messages - line 301: "case \(decl.nameFirstLowercased)(/*TODO: MODULE.*/GeneratedActor.Messages.\(decl.name))" - line 351: // FIXME: super naive... replace with something more proper - line 458: // FIXME: super naive... replace with something more proper - line 504: // TODO: Finally change the triple tuple into a specific type with more helpers - line 556: // TODO: make this nicer... the ID could serve as the ref - line 725: // TODO: dedup with the boxed one Sources/DistributedActors/Cluster/SystemMessages+Redelivery.swift (10 lines): - line 52: // TODO: association id for logging? - line 208: // TODO: we COULD aggressively re-deliver right now here though this is only an optimization - line 238: // TODO: is this a change to coalesce some system messages? what if someone did some silly watching the same exact thing many times? - line 255: // FIXME: implement giving up reconnecting - line 264: // TODO: redeliver everything - line 269: // FIXME: implement once cluster.down() is available issue #848 - line 290: // TODO: we do not keep any "future" messages and rely on them being re-sent, this is most likely fine (and is in reality in other impls), - line 298: // TODO: accept association id? - line 377: // TODO: not used - line 378: // var maxRedeliveryTicksWithoutACK = 10_000 // TODO settings Sources/DistributedActors/Props.swift (10 lines): - line 80: // TODO: likely better as class hierarchy, by we'll see... - line 100: // TODO: Eventually: probably also best as not enum but a bunch of factories? - line 106: // case dispatch(qosClass: Dispatch.DispatchQoS.QoSClass) // TODO: we want diff actors to be able to run on diff priorities, thus this setting - line 108: // TODO: definitely good, though likely not as first thing We can base it on Akka's recent "Affinity" one, - line 119: case pinnedThread // TODO: implement pinned thread dispatcher - line 120: // TODO: CPU Affinity when pinning - line 127: // TODO: not extensively tested but should just-work™ since we treat NIO as plain thread pool basically here. - line 128: // TODO: not sure if we'd need this or not in reality, we'll see... executing futures safely would be more interesting perhaps - line 131: // TODO: or hide it completely somehow; too dangerous - line 182: // TODO: those only apply when bounded mailboxes Sources/DistributedActors/LifecycleMonitoring/LifecycleWatch.swift (10 lines): - line 26: nonisolated var actorTransport: ActorTransport { get } // FIXME: replace with DistributedActor conformance - line 27: nonisolated var id: AnyActorIdentity { get } // FIXME: replace with DistributedActor conformance - line 42: fatalError("TODO: handle more gracefully") // TODO: handle more gracefully, i.e. say that we can't watch that actor - line 70: fatalError("TODO: handle more gracefully") // TODO: handle more gracefully, i.e. say that we can't watch that actor - line 97: fatalError("Can't \(#function) \(watchee) @ (\(watchee.id)), does not seem to be managed by ActorSystem") // TODO: handle more gracefully, i.e. say that we can't watch that actor - line 115: return // TODO: error instead - line 127: // MARK: System extensions to support watching // TODO: move those into context, and make the ActorIdentity the context - line 159: private weak var myself: DistributedActor? // TODO: make this just store the address instead? - line 334: // TODO: remove actors as we notify about them - line 343: let existenceConfirmed = true // TODO: implement support for existence confirmed or drop it? Sources/DistributedActors/Cluster/NodeDeathWatcher.swift (9 lines): - line 40: // TODO: clear after a few days, or some max count of nodes, use sorted set for this - line 84: existingWatchers.insert(watcher) // FIXME: we have to remove it once it terminates... - line 110: // TODO: this can be optimized a bit more I suppose, with a reverse lookup table - line 124: // TODO: make sure we only handle ONCE? - line 158: // TODO: this will change to subscribing to cluster events once those land - line 182: // FIXME: death watcher is incomplete, should handle snapshot!! - line 205: instance.onActorWatched(by: watcher, remoteNode: remoteNode) // TODO: return and interpret directives - line 217: instance.onMembershipChanged(change) // TODO: return and interpret directives - line 221: instance.onMembershipChanged(change) // TODO: return and interpret directives Sources/DistributedActors/Metrics/Metrics+ActorSystem.swift (9 lines): - line 47: // TODO: use specific dimensions if shell has it configured or groups etc - line 48: // TODO: generalize this such that we can do props -> dimensions -> done, and not special case the system ones - line 58: fatalError("TODO other actor path roots not supported; Was: \(shell)") - line 63: // TODO: use specific dimensions if shell has it configured or groups etc - line 64: // TODO: generalize this such that we can do props -> dimensions -> done, and not special case the system ones - line 74: fatalError("TODO other actor path roots not supported; Was: \(shell)") - line 192: // TODO: note to self measurements of rate can be done in two ways: - line 238: // TODO: record message types by type - line 254: self._cluster_members_removed = .init(label: clusterMembersLabel, dimensions: [("status", Cluster.MemberStatus.removed.rawValue)]) // TODO: this is equal to number of stored tombstones kind of Sources/DistributedActors/Supervision.swift (9 lines): - line 191: case restart(atMost: Int, within: TimeAmount?, backoff: BackoffStrategy?) // TODO: would like to remove the `?` and model more properly - line 389: case continuation(() throws -> _Behavior) // TODO: make it a Carry type for better debugging - line 593: // TODO: matters perhaps only for metrics where we'd want to "please count this specific type of error" so leaving this logic as-is - line 751: // TODO: could be configurable to escalate once restarts exhausted - line 757: // TODO: could be configurable to escalate once restarts exhausted - line 814: // TODO: make proper .ordinalString function - line 816: // TODO: has to modify restart counters here and supervise with modified supervisor - line 831: // TODO: complete impl - line 869: // TODO: don't forget to include config in string repr once we do it Sources/DistributedActors/Serialization/Serialization+Settings.swift (9 lines): - line 28: // TODO: Workaround for https://bugs.swift.org/browse/SR-12315 "Extension of nested type does not have access to types it is nested in" - line 45: // TODO: We are using an internal function here to allow us to automatically enable the more strict mode in release builds. - line 61: /// // TODO: This should default to some nice binary format rather than JSON. - line 81: /// // TODO: detailed docs on how to use this for a serialization changing rollout of a type - line 121: // TODO: add test for sending raw SwiftProtobuf.Message - line 163: // FIXME: THIS IS A WORKAROUND UNTIL WE CAN GET MANGLED NAMES https://github.com/apple/swift/pull/30318 - line 164: let hint = hintOverride ?? _typeName(type) // FIXME: _mangledTypeName https://github.com/apple/swift/pull/30318 - line 189: // FIXME: THIS IS A WORKAROUND UNTIL WE CAN GET MANGLED NAMES https://github.com/apple/swift/pull/30318 - line 190: let hint = hintOverride ?? _typeName(type) // FIXME: _mangledTypeName https://github.com/apple/swift/pull/30318 Sources/DistributedActorsGenerator/DistributedActorAnalysis.swift (9 lines): - line 55: self.imports.append("\(node)") // TODO: more special type, since cross module etc - line 74: guard isDistributedProtocol else { // FIXME: detect DistributedActor constrained protocol - line 86: // TODO: quite inefficient way to scan it, tho list is short - line 254: // TODO: there is no TokenKind.mutatingKeyword in swift-syntax and it's expressed as .identifier("mutating"), could be a bug/omission - line 265: // TODO: we could require it to be async as well or something - line 326: // TODO: in reality should be FQN, for cross module support - line 339: // TODO: This could be expressed as some "actorable.implements(protocol)" - line 442: // FIXME: a complete impl would need to "resolve the types" to know if it happens to be a dist protocol - line 452: // TODO: get the name more properly Sources/DistributedActors/Gossip/Gossiper+Shell.swift (8 lines): - line 167: // TODO: keep and remove logics - line 176: // TODO: callback into client or not? - line 184: let allPeers: [AddressableActorRef] = Array(self.peers).map { $0.asAddressable } // TODO: some protocol Addressable so we can avoid this mapping? - line 192: let selectedPeers = logic.selectPeers(allPeers) // TODO: OrderedSet would be the right thing here... - line 223: // TODO: signal "gossip round complete" perhaps? - line 364: // // TODO: implement this rather as "high priority peer to gossip to" - line 365: // // TODO: remove this most likely - line 366: // // TODO: or rather, ask the logic if it wants to eagerly push? Sources/DistributedActors/Refs.swift (8 lines): - line 176: func _tellOrDeadLetter(_ message: Any, file: String, line: UInt) // TODO: This must die? - line 219: // TODO: should this always be personalized dead letters instead? - line 237: // FIXME: can this be removed? - line 242: return // TODO: "drop" the message rather than dead letter it? - line 296: // TODO: last resort, print error (system could be going down) - line 323: return deadLetters.system // FIXME: do we really need this - line 432: // TODO: we could use this to make TestProbes more "real" rather than wrappers - line 475: internal struct TheOneWhoHasNoParent: _ReceivesSystemMessages { // FIXME: fix the name Sources/DistributedActors/ActorLogging.swift (8 lines): - line 22: // TODO: deprecate, we should not need this explicit type - line 26: // TODO: want to eventually not have this; also move to more structured logging perhaps... - line 106: // TODO: implement logging infrastructure - pipe as messages to dedicated logging actor - line 151: // TODO: this actually would be dispatching to the logging infra (has ticket) - line 158: effectiveMetadata: self.context.effectiveMetadata(overrides: metadata), // TODO: should force lazies - line 168: // TODO: here we can either log... or dispatch to actor... or invoke Logging. etc - line 236: // TODO: hope to remove this one - line 255: // TODO: This seems worse to implement since I can't pass through my "reads of lazy cause rendering" Sources/DistributedActors/Cluster/Association.swift (8 lines): - line 37: // TODO: Terrible lock which we want to get rid of; it means that every remote send has to content against all other sends about getting this ref - line 41: // TODO: This style of implementation queue -> channel swapping can only ever work with coarse locking and is just temporary - line 50: case associated(channel: Channel) // TODO: _InternalActorTransport.Node/Peer/Target ??? - line 68: // TODO: This style can only ever work since we lock around the entirety of enqueueing messages and this setting; make it such that we don't need the lock eventually - line 182: // TODO: Reimplement association such that we don't need locks here - line 240: /// Tombstones are slightly lighter than a real association, and are kept for a maximum of `settings.cluster.associationTombstoneTTL` TODO: make this setting (!!!) - line 247: let removalDeadline: Deadline // TODO: cluster should have timer to try to remove those periodically - line 250: // TODO: if we made system carry system.time we could always count from that point in time with a TimeAmount; require Clock and settings then Sources/DistributedActors/Cluster/HandshakeStateMachine.swift (8 lines): - line 92: // TODO: maybe store also at what time we sent the handshake, so we can diagnose if we should reject replies for being late etc - line 100: // TODO: call into an connection error? - line 101: // TODO: the remote REJECTING must not trigger backoffs - line 103: self.onConnectionError(HandshakeConnectionError(node: self.remoteNode, message: "Handshake timed out")) // TODO: improve msgs - line 126: let node: Node // TODO: allow carrying UniqueNode - line 168: // self.state.log.trace("Done rejecting handshake.") // TODO: something more, terminate the association? - line 176: // self.state.log.trace("Done rejecting handshake.") // TODO: something more, terminate the association? - line 182: // self.negotiateCapabilities(...) // TODO: We may want to negotiate other options Sources/DistributedActors/DeadLetters.swift (8 lines): - line 30: /// "too late" or be dropped for some other reason, one may mark it using the TODO: "dont log me as dead letter" protocol. - line 41: // TODO: sender and other metadata - line 60: // TODO: rather could we send messages to self._deadLetters with enough info so it handles properly? - line 207: // TODO: more metadata (from Envelope) (e.g. sender) - line 220: // TODO: special handle $ask- replies; those mean we got a reply to an ask which timed out already - line 224: // FIXME: this should never happen; tombstone must always be taken in by the actor as last message - line 225: traceLog_Mailbox(self.address.path, "Tombstone arrived in dead letters. TODO: make sure these dont happen") - line 226: return true // TODO: would be better to avoid them ending up here at all, this means that likely a double dead letter was sent Sources/DistributedActors/Cluster/MembershipGossip/Cluster+MembershipGossipLogic.swift (7 lines): - line 44: // TODO: This can be optimized and it's enough if we keep a digest of the gossips; this way ACKs can just send the digest as well saving space. - line 104: // TODO: trim some information? - line 109: // TODO: Implement stricter-round robin, the same way as our SWIM impl does, see `nextMemberToPing` - line 121: // TODO: may also want to return "these were removed" if we need to make any internal cleanup - line 123: // TODO: a bit lazy implementation - line 164: // FIXME: optimize ack reply; this can contain only rows of seen tables where we are "ahead" (and always "our" row) - line 189: // effects are signalled via the ClusterShell, not here (it will also perform a merge) // TODO: a bit duplicated, could we maintain it here? Sources/DistributedActors/ActorRef+Ask.swift (7 lines): - line 88: // TODO: maybe a specialized one... for ask? - line 92: // TODO: implement special actor ref instead of using real actor - line 201: // TODO: ask errors should be lovely and include where they were asked from (source loc) - line 234: // FIXME: make this internal (!) - line 258: // TODO: replace with a special minimal `_ActorRef` that does not require spawning or scheduling. - line 272: // TODO: could we optimize the case when the target is _local_ and _terminated_ so we don't have to do the watch dance (heavy if we did it always), - line 287: // FIXME: Hack to stop from subReceive. Should we allow this somehow? Sources/DistributedActorsGenerator/Decls.swift (7 lines): - line 62: // TODO: "$box\(self.name)" would be nicer, but it is reserved - line 117: self.genericParameterDecls.map { $0.parameterDecl } // TODO: must handle where clauses better - line 214: res.append((nil, "_replyTo", "_ActorRef>")) // TODO: make the same with the error envelope - line 223: // FIXME: carry the return type raw in the reply enum - line 273: // TODO: instead analyse the type syntax? - line 280: // FIXME: this will break with nexting... - line 303: hasher.combine(self.name) // FIXME: take into account enclosing scope Sources/DistributedActors/Cluster/Transport/RemoteClusterActorPersonality.swift (7 lines): - line 30: let system: ActorSystem // TODO: maybe don't need to store it and access via clusterShell? - line 71: // TODO: move instrumentation into the transport? - line 88: self.instrumentation = system.settings.instrumentation.makeActorInstrumentation(self, address) // TODO: could be in association, per node - line 100: // TODO: metric for dead letter: self.instrumentation.deadLetter(message: message, from: nil) - line 109: // TODO: in case we'd get a new connection the redeliveries must remain... so we always need to poll for the remotecontrol from association? the association would keep the buffers? - line 110: // TODO: would this mean that we cannot implement re-delivery inside the NIO layer as we do today? - line 116: // TODO: metric for dead letter: self.instrumentation.deadLetter(message: message, from: nil) Sources/DistributedActors/Cluster/MembershipGossip/Cluster+MembershipGossip.swift (6 lines): - line 27: // TODO: There is tons of compression opportunity about not having to send full tables around in general, but for now we will just send them around - line 28: // FIXME: ensure that we never have a seen entry for a non-member - line 137: let members = self.membership.members(withStatus: [.joining, .up, .leaving]) // FIXME: we should not require joining nodes in convergence, can losen up a bit here I hope - line 227: // TODO: func haveNotYetSeen(version: VersionVector): [UniqueNode] - line 267: /// must be taken on the cluster layer, by using and checking for tombstones. // TODO: make a nasty test for this, a simple one we got; See MembershipGossipSeenTableTests - line 274: // TODO: test removing non existing member Sources/DistributedActorsTestKit/TestProbes.swift (6 lines): - line 100: signalQueue: _LinkedBlockingQueue<_SystemMessage>, // TODO: maybe we don't need this one - line 494: // TODO: would be nice to be able to also intercept system messages hm... - line 519: // FIXME: implement by scheduling checks rather than spinning - line 523: // TODO: make more async than seining like this, also with check interval rather than spin, or use the blocking queue properly - line 572: // TODO: find a better name; it is not exactly "fish for message" though, that can ignore messages for a while, this one does not - line 697: // TODO: expectTermination(of: ...) maybe nicer wording? Sources/DistributedActors/Cluster/Cluster+Membership.swift (6 lines): - line 65: /// and dropped which happens only after an extended period of time. // FIXME: That period of time is not implemented - line 161: // TODO: make this O(1) by allowing wrapper type to equality check only on NodeID - line 199: // TODO: this could take into account roles, if we do them - line 600: // TODO: diffing is not super well tested, may lose up numbers - line 605: // TODO: can likely be optimized more - line 630: // TODO: maybe conform to Sequence? Sources/DistributedActors/ActorRefProvider.swift (6 lines): - line 48: // TODO: consider if we need abstraction / does it cost us? - line 54: // TODO: should cache perhaps also associations to inject them eagerly to actor refs? - line 56: // TODO: restructure it somehow, perhaps we dont need the full abstraction like this - line 214: // TODO: Would be nice to not need this type at all; though initialization dance prohibiting self access makes this a bit hard - line 227: // TODO: duplicates some logic from _traverse implementation on Actor system (due to initialization dances), see if we can remove the duplication of this - line 228: // TODO: we may be able to pull this off by implementing the "root" as traversable and then we expose it to the Serialization() impl Sources/DistributedActorsTestKit/ActorTestKit.swift (5 lines): - line 87: // TODO: allow configuring dispatcher for the probe or always use the calling thread one - line 124: // TODO: does not handle blocking longer than `within` well - line 125: // TODO: should use default `within` from TestKit - line 245: // TODO: how to better hide such more nasty assertions? - line 246: // TODO: Not optimal since we always do traverseAll rather than follow the Path of the context Sources/DistributedActors/Backoff.swift (5 lines): - line 31: // TODO: make nicer for auto completion? (.constant) etc - line 44: // TODO: implement noLongerThan: .seconds(30), where total time is taken from actor system clock - line 149: // TODO: clock + limit "max total wait time" etc - line 158: // TODO: We could also implement taking a Clock, and using it see if there's a total limit exceeded - line 205: // TODO: potentially logic to check clock if we exceeded total backoff timeout here (and then return nil) Sources/DistributedActors/Cluster/Leadership.swift (5 lines): - line 104: private var membership: Cluster.Membership // FIXME: we need to ensure the membership is always up to date -- we need the initial snapshot or a diff from a zero state etc. - line 117: // FIXME: we have to add "own node" since we're not getting the .snapshot... so we have to manually act as if.. - line 153: // TODO: if/when we'd have some election scheme that is async, e.g. "vote" then this timeout should NOT be infinite and should be handled properly - line 188: /// without a leader to perform the unreachable -> down move. // TODO keep thinking if we can do better here, we could to a quorum downing IMHO, and remove this impl completely as it's very "bad". - line 222: // TODO: In situations which need strong guarantees, this leadership election scheme does NOT provide strong enough Sources/DistributedActors/Receptionist/Receptionist.swift (5 lines): - line 32: // Implementation notes: // TODO: Intercept messages to register at remote receptionist, and handle locally? - line 177: // FIXME: Implement me (!), we need to make the storage a counter - line 200: // FIXME: improve this to always pass around AddressableActorRef rather than just address (in receptionist Subscribe message), remove this trick then - line 213: var registeredKeys: Set = [] // TODO: OR we store it directly as registeredUnderKeys/subscribedToKeys in the dict - line 341: // TODO: can we hide this? Relates to: https://bugs.swift.org/browse/SR-5880 Sources/DistributedActors/Cluster/ClusterShellState.swift (5 lines): - line 27: /// Base backoff strategy to use in handshake retries // TODO: move it around somewhere so only handshake cares? - line 41: // TODO: maybe move log and settings outside of state into the shell? - line 368: // TODO: model the states to express this can not happen // there is a client side state machine and a server side one - line 372: // TODO: validate if it is for the same UID or not, if not, we may be in trouble? - line 390: // TODO: perhaps we instead just warn and ignore this; since it should be harmless Sources/DistributedActors/Cluster/SWIM/SWIMActorShell.swift (5 lines): - line 352: self.handlePingRequestResponse(response: .timeout(target: peerToPing, pingRequestOrigin: context.myself, timeout: pingTimeout, sequenceNumber: 0), pinged: peerToPing, context: context) // FIXME: that sequence number... - line 414: let event = SWIM.MemberStatusChangedEvent(previousStatus: previousStatus, member: member) // FIXME: make SWIM emit an option of the event - line 424: () // TODO: revisit logging more details here - line 514: // FIXME: expose addMember after all - line 532: // TODO: we are sending the ping here to initiate cluster membership. Once available this should do a state sync instead Sources/DistributedActors/Serialization/SerializationPool.swift (5 lines): - line 23: /// // TODO the scheme how we configure this may need some more re-thinking. - line 30: // TODO: This should be internal, but is forced to be public by `_deserializeDeliver` on references. - line 74: // TODO: also record thr delay between submitting and starting serialization work here? - line 82: // TODO: collapse those two and only use the instrumentation points, also for metrics - line 140: // TODO: also record thr delay between submitting and starting serialization work here? Sources/DistributedActors/_BehaviorTimers.swift (5 lines): - line 86: // TODO: eventually replace with our own scheduler implementation - line 101: // TODO: This will be replaced by proper timer keys which can express such need eventually. - line 107: for key in self.installedTimers.keys where includeSystemTimers || !key.isSystemTimer { // TODO: represent with "system timer key" type? - line 108: // TODO: the reason the `_` keys are not cancelled is because we want to cancel timers in _restartPrepare but we need "our restart timer" to remain - line 243: // TODO: avoid the box part? Sources/DistributedActors/Cluster/Reception/OperationLog.swift (4 lines): - line 16: // TODO: compacting ([+A, -A] -> no need to store A) - line 22: // TODO: optimize the "op / delta buffer" which this really is, kins of like in Op based CRDTs - line 44: // TODO: how to better express this; so it can be maintained by the OpLog itself - line 47: // TODO: update the min Sources/DistributedActorsTestKit/Cluster/ClusteredActorSystemsXCTestCase.swift (4 lines): - line 289: // FIXME: this is a weak workaround around not having "extensions" (unique object per actor system) - line 290: // FIXME: this can be removed once issue #458 lands - line 298: system.cluster.ref.tell(.query(.associatedNodes(probe.ref))) // TODO: ask would be nice here - line 344: let associatedNodes = try probe.expectMessage() // TODO: use interval here Sources/DistributedActors/Cluster/Transport/WireMessages.swift (4 lines): - line 34: // TODO: metadata - line 35: // TODO: "flags" incl. isSystemMessage - line 41: // TODO: such messages should go over a priority lane - line 64: // TODO: Maybe offeringToSpeakAtVersion or something like that? Sources/DistributedActors/Timers+Distributed.swift (4 lines): - line 44: internal let ownerID: ActorIdentity // TODO: can be just identity - line 67: log.debug("\(Self.self) deinit, cancelling [\(installedTimers.count)] timers") // TODO: include actor address - line 79: // TODO: represent with "system timer key" type? - line 80: // TODO: the reason the `_` keys are not cancelled is because we want to cancel timers in _restartPrepare but we need "our restart timer" to remain Sources/DistributedActors/Receptionist/DistributedReceptionist.swift (4 lines): - line 206: // FIXME: Implement me (!), we need to make the storage a counter - line 243: var registeredKeys: Set = [] // TODO: OR we store it directly as registeredUnderKeys/subscribedToKeys in the dict - line 331: /// Offer a new listing to the subscription stream. // FIXME: implement this by offering single elements (!!!) - line 335: // TODO: It would be lovely to be able to express this in the type system as "actor owned" or "actor local" to some actor instance. Sources/DistributedActors/Adapters.swift (4 lines): - line 17: // TODO: can this instead be a CellDelegate? - line 80: () // ignore all other messages // TODO: why? - line 281: () // ignore all other messages // TODO: why? - line 300: return // TODO: "drop" the message Sources/DistributedActors/Serialization/ActorRef+Serialization.swift (4 lines): - line 70: fatalError("Can not resolve actor refs without CodingUserInfoKey.actorSerializationContext set!") // TODO: better message - line 197: // FIXME: encode as authority/URI with optimized parser here, this will be executed many many times... - line 219: // FIXME: encode as authority/URI with optimized parser here, this will be executed many many times... - line 283: static let watch = 0 // TODO: UNWATCH!? Sources/DistributedActors/Instrumentation/ActorTransportInstrumentation.swift (4 lines): - line 18: // TODO: all these to accept trace context or something similar - line 20: // FIXME: recipient address, not just path - line 23: // TODO: func remoteActorMessageSerializeFailed - line 27: // TODO: func remoteActorMessageDeserializeEndFailed Sources/ActorSingletonPlugin/ActorSingletonProxy.swift (3 lines): - line 87: // TODO: perhaps we can figure out where `to` is next and hand over gracefully? - line 161: // FIXME: connecting to the singleton through proxy incurs an extra hop. an optimization would be - line 200: // TODO: log this warning only "once in while" after buffer becomes full Sources/DistributedActors/ActorShell+Children.swift (3 lines): - line 269: // TODO: revise surface API what we want to expose; stopping by just name may be okey? - line 315: try self.validateUniqueName(name) // FIXME: reserve name - line 322: case .default: dispatcher = self._dispatcher // TODO: this is dispatcher inheritance, not sure about it Sources/DistributedActors/Serialization/Serialization+Serializers+Codable.swift (3 lines): - line 27: // TODO: would be nice to be able to abstract over the coders (using TopLevelDecoder-like types) then rename this to `AnyCodableSerializer` - line 69: // TODO: would be nice to be able to abstract over the coders (using TopLevelDecoder-like types) then rename this to `AnyCodableSerializer` - line 96: // FIXME: validate format = self.format? Sources/DistributedActors/Dispatchers.swift (3 lines): - line 19: // FIXME: MessageDispatcher is going to be replaced by custom Executor types in Swift itself - line 22: // TODO: we should make it dedicated to dispatch() rather than raw executing perhaps? This way it can take care of fairness things - line 32: // TODO: discuss naming of `InternalMessageDispatcher` Sources/DistributedActors/Protobuf/WireProtocol+Serialization.swift (3 lines): - line 15: import struct Foundation.Data // TODO: would refer to not go "through" Data as our target always is ByteBuffer - line 60: // TODO: conversions are naive here, we'd want to express this more nicely... - line 145: // TODO: worth making it Proto representable or not? Sources/DistributedActors/Serialization/Serialization+Serializers.swift (3 lines): - line 27: // TODO: Document since users need to implement these - line 35: // TODO: does this stay like this? - line 70: // FIXME: this is evil? Sources/DistributedActors/Cluster/ClusterSettings.swift (2 lines): - line 165: // TODO: do we need to separate server and client sides? Sounds like a reasonable thing to do. - line 171: MultiThreadedEventLoopGroup(numberOfThreads: System.coreCount) // TODO: share pool with others Sources/ActorSingletonPlugin/ActorSingletonPlugin.swift (2 lines): - line 39: // FIXME: document that may crash, it may right? - line 84: // TODO: Future; TODO2: no need for this at all now since we have async await Sources/DistributedActors/Gossip/Gossip+Serialization.swift (2 lines): - line 43: guard let identifier = identifierAny as? GossipIdentifier else { // FIXME: just force GossipIdentifier to be codable, avoid this hacky dance? - line 50: // FIXME: sometimes we could encode raw and not via the Data -- think about it and fix it Sources/DistributedActors/Metrics/Metrics+Actor.swift (2 lines): - line 22: // TODO: Could apply the dynamic lookup tricks we do in tracing to make this looking up more painless / safe... - line 86: // TODO: create message processing metrics Sources/ActorSingletonPlugin/ActorSingletonAllocationStrategy.swift (2 lines): - line 53: // TODO: "oldest node" - line 55: // TODO: "race to become the host", all nodes race and try CAS-like to set themselves as leader -- this we could do with cas-paxos perhaps or similar; it is less predictable which node wins, which can be good or bad Sources/DistributedActors/Instrumentation/ActorInstrumentation.swift (2 lines): - line 18: // TODO: all these to accept trace context or something similar - line 28: // TODO: Those read bad, make one that is from/to in params? Sources/DistributedActors/Protobuf/ActorAddress+Serialization.swift (2 lines): - line 35: // TODO: make Error - line 95: self.segments = value.segments.map { $0.value } // TODO: avoiding the mapping could be nice... store segments as strings? Sources/DistributedActors/Instrumentation/os_signpost/ActorInstrumentation+os_signpost.swift (2 lines): - line 170: // FIXME: we need the sender() to attach properly - line 305: // TODO: make interval so we know the length of how long an actor processes a message Sources/SwiftBenchmarkTools/DriverUtils.swift (2 lines): - line 437: // FIXME: there is a Linux PMC API you can use to get this, but it's - line 472: // FIXME: This current implementation doesn't work on Linux. It is disabled Sources/DistributedActors/Concurrency/_FixedThreadPool.swift (2 lines): - line 19: // TODO: Discuss naming of `Worker` - line 69: // FIXME: We are currently using a timed `poll` instead of indefinitely Sources/DistributedActors/utils.swift (2 lines): - line 62: // TODO: Remove this once we're happy with swift-backtrace always printing backtrace (also on macos) - line 132: let threadId = pthread_self() // TODO: since pthread_threadid_np not available, how to get an id? Sources/DistributedActors/Serialization/Serialization+Codable.swift (2 lines): - line 24: // TODO: once we can abstract over Coders all these could go away most likely (and accept a generic TopLevelCoder) - line 58: // TODO: once we can abstract over Coders all these could go away most likely (and accept a generic TopLevelCoder) Sources/DistributedActorsTestKit/ShouldMatchers.swift (2 lines): - line 370: return TestMatchers(it: self, callSite: callSiteInfo).toBeEmpty() // TODO: lazy impl, should get "expected empty" messages etc - line 375: return TestMatchers(it: self, callSite: callSiteInfo).toBeNotEmpty() // TODO: lazy impl, should get "expected non-empty" messages etc Package.swift (2 lines): - line 16: // TODO: currently disabled warnings as errors because of Sendable check noise and work in progress on different toolchains - line 79: // MARK: Plugins // TODO: rename since may be confused with package plugins? Sources/DistributedActors/ActorMessages.swift (2 lines): - line 27: public typealias ActorMessage = Codable // FIXME: MAKE THIS SENDABLE: & Sendable - line 35: // FIXME: we should not add Codable conformance onto a stdlib type, but rather fix this in stdlib Sources/DistributedActors/Plugins/ActorSystem+Plugins.swift (2 lines): - line 20: // TODO: move to async function - line 24: // TODO: move to async function Sources/DistributedActors/Time.swift (2 lines): - line 23: // TODO: We have discussed and wanted to "do your own" rather than import the NIO ones, but not entirely sold on the usefulness of replicating them -- ktoso - line 34: fileprivate init(_ nanoseconds: Value) { // FIXME: Needed the copy since this constructor Sources/DistributedActors/Serialization/TopLevelBytesBlobCoders.swift (2 lines): - line 22: // TODO: TopLevelDataEncoder - line 176: // TODO: TopLevelDataDecoder Sources/DistributedActors/EventStream.swift (2 lines): - line 72: // TODO: clean this up since it's used by tests only (e.g., EventStreamConsumer) - line 103: enum Message: NonTransportableActorMessage { // TODO: make it codable, transportability depends on the Event really Sources/DistributedActors/StashBuffer.swift (2 lines): - line 32: // TODO: owner can be used to create metrics for the stash of specific owner - line 77: // TODO: can we make this honor the run length like `Mailbox` does? Sources/DistributedActors/Clocks/VersionVector.swift (2 lines): - line 37: // TODO: should we disallow mixing ReplicaID types somehow? - line 40: public typealias ReplicaVersion = (replicaID: ReplicaID, version: Version) // TODO: struct? Sources/DistributedActors/Signals.swift (2 lines): - line 26: public protocol Signal: NonTransportableActorMessage, Sendable {} // FIXME: we could allow them as Codable, we never send them over the wire, but people might manually if they wanted to I suppose - line 88: public let nodeTerminated: Bool // TODO: Making this a `Reason` could be nicer. Protos/WireProtocol.proto (2 lines): - line 90: // TODO: nicer representation though needs endiannees dance since this is then encoded in little endian; revisit how and where to represent once we have a solid handshake -- ktoso - line 93: // TODO: wasteful representation, keeping for now to iterate on handshake -- ktoso Sources/DistributedActors/Receptionist/DistributedReception.swift (2 lines): - line 147: // // TODO: assert the refs match type? - line 270: // return try! Guest.resolve(self._guest._ref.asAddressable.asAnyActorIdentity, using: system) // FIXME: cleanup these APIs, should never need throws, resolve earlier Sources/DistributedActors/Protobuf/ProtobufMessage+Extensions.swift (2 lines): - line 23: // FIXME: Avoid the copying, needs SwiftProtobuf changes - line 36: // let data = try self.jsonString().data(using: .utf8)! // TODO allow a "debug mode with json payloads?" Sources/ActorSingletonPlugin/ActorSingletonManager.swift (2 lines): - line 62: // TODO: (optimization) tell `ActorSingletonManager` on `from` node that this node is taking over (https://github.com/apple/swift-distributed-actors/issues/329) - line 67: // TODO: (optimization) tell `ActorSingletonManager` on `to` node that this node is handing off (https://github.com/apple/swift-distributed-actors/issues/329) Sources/DistributedActors/Cluster/Chaos/FaultyNetworkSimulatingHandler.swift (2 lines): - line 31: // TODO: in the future it can get more stateful behaviors if we wanted to or separate inbound/outbound etc. - line 90: // TODO: When `delay`, we have to be careful not to change ordering of the messages, meaning that if delayed, then all following messages have to be delayed as well. Sources/DistributedActors/SystemMessages.swift (2 lines): - line 59: case terminated(ref: AddressableActorRef, existenceConfirmed: Bool, addressTerminated: Bool) // TODO: more additional info? // TODO: send terminated PATH, not ref, sending to it does not make sense after all - line 72: case nodeTerminated(UniqueNode) // TODO: more additional info? Sources/DistributedActors/TimeSpec.swift (2 lines): - line 27: /// Not intended for general use. TODO: Make internal if possible. - line 31: // TODO: move to Time.swift? Sources/DistributedActors/Serialization/Serialization+Manifest.swift (2 lines): - line 144: // TODO: register types until https://github.com/apple/swift/pull/30318 is merged? - line 149: // TODO: mark as unsafe mode only Sources/DistributedActors/Cluster/Cluster+Event.swift (2 lines): - line 60: // FIXME: enable these assertions - line 83: // FIXME: enable these assertions Sources/DistributedActors/Gossip/Gossiper.swift (2 lines): - line 72: // FIXME: is there some way to express that actually, Metadata is INSIDE Payload so I only want to pass the "envelope" myself...? - line 92: // TODO: just force GossipIdentifier to be codable, avoid this dance? Sources/DistributedActors/Cluster/ClusterShell+LeaderActions.swift (2 lines): - line 138: // as otherwise the wrapper enforces "vector time moves forward" // TODO: or should we? - line 161: // TODO: will this "just work" as we removed from membership, so gossip will tell others...? Sources/DistributedActors/Receptionist/ReceptionistOperations.swift (2 lines): - line 133: // TODO: can we hide this? Relates to: https://bugs.swift.org/browse/SR-5880 - line 135: // TODO: can we hide this? Relates to: https://bugs.swift.org/browse/SR-5880 Sources/DistributedActors/Gossip/GossipLogic.swift (2 lines): - line 52: // TODO: OrderedSet would be the right thing here to be honest... - line 54: // TODO: make a directive here Sources/DistributedActors/Cluster/ClusterControl.swift (2 lines): - line 112: // TODO: no graceful steps implemented today yet) leave the cluster. - line 113: // TODO: leave should perhaps return a future or something to await on. Sources/DistributedActors/Receptionist/Reception.swift (2 lines): - line 87: // TODO: assert the refs match type? - line 204: return try! Guest.resolve(self._guest._ref.asAddressable.asAnyActorIdentity, using: system) // FIXME: cleanup these APIs, should never need throws, resolve earlier Sources/DistributedActors/Collections/Heap.swift (2 lines): - line 37: // Slightly modified version of NIO's Heap. TODO: upstream changes - line 270: // TODO: document if cheap (AFAICS yes) Sources/DistributedActors/ActorSystemSettings.swift (2 lines): - line 53: // FIXME: should have more proper config section - line 112: // TODO: hope to remove this once a StdOutLogHandler lands that has formatting support; Sources/DistributedActors/DistributedActor+Internal.swift (2 lines): - line 81: // FIXME: terrible hack, instead just store the id then? - line 107: // TODO: this is just to prevent a DI crash because of enums without cases and Codable Sources/DistributedActors/Concurrency/locks.swift (1 line): - line 22: // FIXME: Why is this here and not in concurrency helpers? Sources/DistributedActors/Instrumentation/ReceptionistInstrumentation.swift (1 line): - line 26: // TODO: lookup separately? Sources/DistributedActors/Instrumentation/os_signpost/InstrumentationProvider+os_signpost.swift (1 line): - line 22: // TODO: how to guard in iOS etc here? Sources/DistributedActors/Cluster/Protobuf/Cluster+Serialization.swift (1 line): - line 23: // FIXME: change this completely Sources/DistributedActors/Scheduler.swift (1 line): - line 65: // TODO: this is mostly only a placeholder impl; we'd need a proper wheel timer most likely Sources/DistributedActors/Protobuf/ActorAddress.pb.swift (1 line): - line 41: /// TODO oneof { senderNode | recipientNode | node } Sources/DistributedActors/Serialization/Serialization+PrimitiveSerializers.swift (1 line): - line 33: let len = message.lengthOfBytes(using: .utf8) // TODO: optimize for ascii? Sources/DistributedActors/Protobuf/SystemMessages+Serialization.swift (1 line): - line 155: // TODO: it is known dead, optimize the resolve? Sources/DistributedActors/Cluster/DiscoveryShell.swift (1 line): - line 46: // TODO: would there be cases where we want to reconnect the discovery mechanism instead? (we could handle it here) Sources/DistributedActorsGenerator/GenerateActors.swift (1 line): - line 47: // TODO: does not work cross module yet (it would break) Sources/DistributedActors/Cluster/SWIM/SWIM+Messages.swift (1 line): - line 66: /// - (TODO: this can just be an actor listening to events once we have events subbing) the shell queries `downingProvider` for decision for downing the node Sources/DistributedActors/Protobuf/WireProtocol.pb.swift (1 line): - line 327: /// TODO: wasteful representation, keeping for now to iterate on handshake -- ktoso Sources/DistributedActors/String+Extensions.swift (1 line): - line 204: self.appendLiteral("[\(ref.address)]") // TODO: make those address Sources/DistributedActors/Instrumentation/ActorMailboxInstrumentation.swift (1 line): - line 18: // TODO: all these to accept trace context or something similar Sources/DistributedActors/ActorNaming.swift (1 line): - line 170: // TODO: we could include node ids or similar if we wanted snowflakes... Sources/DistributedActors/Serialization/TopLevelBytesBlobSerializer.swift (1 line): - line 33: let encoder = TopLevelBytesBlobEncoder(allocator: self.allocator) // TODO: make it not a class? Sources/DistributedActorsTestKit/ByteBuffer+Testing.swift (1 line): - line 23: // TODO: probably remove those? Sources/DistributedActors/Plugins/ActorSystemSettings+Plugins.swift (1 line): - line 53: // @available(*, deprecated, message: "use 'actor cluster' transport version instead") // TODO: deprecate Sources/DistributedActorsTestKit/Data+Testing.swift (1 line): - line 20: // FIXME: this is obviously not a good idea Sources/DistributedActors/ActorMessage+Protobuf.swift (1 line): - line 72: // TODO: Thought; we could detect if we're nested in a top-level JSON that we should encode as json perhaps, since proto can do this? Sources/DistributedActors/DistributedActor+Messages.swift (1 line): - line 31: associatedtype Message: Codable // TODO: & Sendable Sources/DistributedActors/Gossip/PeerSelection.swift (1 line): - line 23: /// // TODO: implement SWIMs selection in terms of this Protos/ActorAddress.proto (1 line): - line 21: UniqueNode node = 1; // TODO oneof { senderNode | recipientNode | node } Sources/DistributedActors/Version.swift (1 line): - line 17: // TODO: Exact semantics remain to be defined. Reserved likely to be used for flags "connection modes" etc "don't connect me, I just send 1 message" etc? Sources/DistributedActors/Cluster/SWIM/ClusterMembership+Converters.swift (1 line): - line 32: context.system._resolve(context: .init(address: ._swim(on: self.asUniqueNode!), system: context.system)) // TODO: the ! is not so nice Sources/DistributedActorsTestKit/ActorSystemXCTestCase.swift (1 line): - line 21: // TODO: Document and API guarantees Sources/DistributedActors/Refs+any.swift (1 line): - line 138: return // TODO: drop the message Sources/DistributedActors/Concurrency/AffinityThreadPool.swift (1 line): - line 82: // TODO: We are doing a timed poll here to guarantee that we