in Sources/Core/Microsoft.StreamProcessing/Operators/Afa/CompiledPartitionedAfaPipe_SingleEvent.cs [47:506]
public override unsafe void OnNext(StreamMessage<TKey, TPayload> batch)
{
var stack = new Stack<int>();
var count = batch.Count;
var dest_vsync = this.batch.vsync.col;
var dest_vother = this.batch.vother.col;
var destkey = this.batch.key.col;
var dest_hash = this.batch.hash.col;
var srckey = batch.key.col;
var activeFindTraverser = new FastMap<GroupedActiveState<TKey, TRegister>>.FindTraverser(this.activeStates);
fixed (long* src_bv = batch.bitvector.col, src_vsync = batch.vsync.col, src_vother = batch.vother.col)
{
fixed (int* src_hash = batch.hash.col)
{
for (int i = 0; i < count; i++)
{
if ((src_bv[i >> 6] & (1L << (i & 0x3f))) == 0)
{
var key = srckey[i];
var partitionKey = this.getPartitionKey(key);
int partitionIndex = EnsurePartition(partitionKey);
long synctime = src_vsync[i];
if (!this.IsSyncTimeSimultaneityFree)
{
int index;
var tentativeVisibleTraverser = new FastMap<OutputEvent<TKey, TRegister>>.VisibleTraverser(this.tentativeOutput.entries[partitionIndex].value);
if (synctime > this.lastSyncTime.entries[partitionIndex].value) // move time forward
{
this.seenEvent.Remove(key);
if (this.tentativeOutput.Count > 0)
{
tentativeVisibleTraverser.currIndex = 0;
while (tentativeVisibleTraverser.Next(out index, out int hash))
{
var elem = this.tentativeOutput.entries[partitionIndex].value.Values[index];
dest_vsync[this.iter] = this.lastSyncTime.entries[partitionIndex].value;
dest_vother[this.iter] = elem.other;
this.batch.payload.col[this.iter] = elem.payload;
destkey[this.iter] = elem.key;
dest_hash[this.iter] = hash;
this.iter++;
if (this.iter == Config.DataBatchSize)
{
FlushContents();
dest_vsync = this.batch.vsync.col;
dest_vother = this.batch.vother.col;
destkey = this.batch.key.col;
dest_hash = this.batch.hash.col;
}
}
this.tentativeOutput.entries[partitionIndex].value.Clear(); // Clear the tentative output list
}
this.lastSyncTime.entries[partitionIndex].value = synctime;
}
if (this.seenEvent.Lookup(srckey[i], out index)) // Incoming event is a simultaneous one
{
if (this.seenEvent.entries[index].value == 1) // Detecting first duplicate, need to adjust state
{
this.seenEvent.entries[index].value = 2;
// Delete tentative output for that key
if (!this.IsSyncTimeSimultaneityFree)
{
var tentativeFindTraverser = new FastMap<OutputEvent<TKey, TRegister>>.FindTraverser(this.tentativeOutput.entries[partitionIndex].value);
if (tentativeFindTraverser.Find(src_hash[i]))
{
while (tentativeFindTraverser.Next(out index))
{
if (this.keyEqualityComparer(this.tentativeOutput.entries[partitionIndex].value.Values[index].key, srckey[i]))
{
tentativeFindTraverser.Remove();
}
}
}
}
// Delete active states for that key
if (activeFindTraverser.Find(src_hash[i]))
{
while (activeFindTraverser.Next(out index))
{
if (this.keyEqualityComparer(this.activeStates.Values[index].key, srckey[i]))
{
activeFindTraverser.Remove();
}
}
}
}
// Dont process this event
continue;
}
else
this.seenEvent.Insert(srckey[i], 1);
}
/* (1) Process currently active states */
bool ended = true;
if (activeFindTraverser.Find(src_hash[i]))
{
int orig_index;
// Track which active states need to be inserted after the current traversal
var newActiveStates = new List<GroupedActiveState<TKey, TRegister>>();
while (activeFindTraverser.Next(out int index))
{
orig_index = index;
var state = this.activeStates.Values[index];
if (!this.keyEqualityComparer(state.key, srckey[i])) continue;
if (state.PatternStartTimestamp + this.MaxDuration > synctime)
{
var currentStateMap = this.singleEventStateMap[state.state];
if (currentStateMap != null)
{
var m = currentStateMap.Length;
for (int cnt = 0; cnt < m; cnt++)
{
var arcinfo = currentStateMap[cnt];
if (arcinfo.Fence(synctime, batch[i], state.register))
{
var newReg = arcinfo.Transfer == null
? state.register
: arcinfo.Transfer(synctime, batch[i], state.register);
int ns = arcinfo.toState;
while (true)
{
if (this.isFinal[ns])
{
var otherTime = Math.Min(state.PatternStartTimestamp + this.MaxDuration, StreamEvent.InfinitySyncTime);
if (!this.IsSyncTimeSimultaneityFree)
{
var tentativeOutputEntry = this.tentativeOutput.entries[partitionIndex].value;
int ind = tentativeOutputEntry.Insert(src_hash[i]);
tentativeOutputEntry.Values[ind].other = otherTime;
tentativeOutputEntry.Values[ind].key = srckey[i];
tentativeOutputEntry.Values[ind].payload = newReg;
}
else
{
dest_vsync[this.iter] = synctime;
dest_vother[this.iter] = otherTime;
this.batch[this.iter] = newReg;
destkey[this.iter] = srckey[i];
dest_hash[this.iter] = src_hash[i];
this.iter++;
if (this.iter == Config.DataBatchSize)
{
FlushContents();
dest_vsync = this.batch.vsync.col;
dest_vother = this.batch.vother.col;
destkey = this.batch.key.col;
dest_hash = this.batch.hash.col;
}
}
}
if (this.hasOutgoingArcs[ns])
{
// Since we will eventually remove this state/index from activeStates, attempt to reuse this index for the outgoing state instead of deleting/re-adding
// If index is already -1, this means we've already reused the state and must allocate/insert a new index for the outgoing state.
if (index != -1)
{
this.activeStates.Values[index].key = srckey[i];
this.activeStates.Values[index].state = ns;
this.activeStates.Values[index].register = newReg;
this.activeStates.Values[index].PatternStartTimestamp = state.PatternStartTimestamp;
index = -1;
}
else
{
// Do not attempt to insert directly into activeStates, as that could corrupt the traversal state.
newActiveStates.Add(new GroupedActiveState<TKey, TRegister>
{
key = srckey[i],
state = ns,
register = newReg,
PatternStartTimestamp = state.PatternStartTimestamp,
});
}
ended = false;
// Add epsilon arc destinations to stack
if (this.epsilonStateMap == null) break;
if (this.epsilonStateMap[ns] != null)
{
for (int cnt2 = 0; cnt2 < this.epsilonStateMap[ns].Length; cnt2++)
stack.Push(this.epsilonStateMap[ns][cnt2]);
}
}
if (stack.Count == 0) break;
ns = stack.Pop();
}
if (this.IsDeterministic) break; // We are guaranteed to have only one successful transition
}
}
}
}
if (index == orig_index) activeFindTraverser.Remove();
if (this.IsDeterministic) break; // We are guaranteed to have only one active state
}
// Now that we are done traversing the current active states, add any new ones.
foreach (var newActiveState in newActiveStates)
{
this.activeStates.Insert(src_hash[i], newActiveState);
}
}
/* (2) Start new activations from the start state(s) */
if (!this.AllowOverlappingInstances && !ended) continue;
for (int counter = 0; counter < this.numStartStates; counter++)
{
int startState = this.startStates[counter];
var startStateMap = this.singleEventStateMap[startState];
if (startStateMap != null)
{
var m = startStateMap.Length;
for (int cnt = 0; cnt < m; cnt++)
{
var arcinfo = startStateMap[cnt];
if (arcinfo.Fence(synctime, batch[i], this.defaultRegister))
{
var newReg = arcinfo.Transfer == null
? this.defaultRegister
: arcinfo.Transfer(synctime, batch[i], this.defaultRegister);
int ns = arcinfo.toState;
while (true)
{
if (this.isFinal[ns])
{
var otherTime = Math.Min(synctime + this.MaxDuration, StreamEvent.InfinitySyncTime);
if (!this.IsSyncTimeSimultaneityFree)
{
var tentativeOutputEntry = this.tentativeOutput.entries[partitionIndex].value;
int ind = tentativeOutputEntry.Insert(src_hash[i]);
tentativeOutputEntry.Values[ind].other = otherTime;
tentativeOutputEntry.Values[ind].key = srckey[i];
tentativeOutputEntry.Values[ind].payload = newReg;
}
else
{
dest_vsync[this.iter] = synctime;
dest_vother[this.iter] = otherTime;
this.batch[this.iter] = newReg;
destkey[this.iter] = srckey[i];
dest_hash[this.iter] = src_hash[i];
this.iter++;
if (this.iter == Config.DataBatchSize)
{
FlushContents();
dest_vsync = this.batch.vsync.col;
dest_vother = this.batch.vother.col;
destkey = this.batch.key.col;
dest_hash = this.batch.hash.col;
}
}
}
if (this.hasOutgoingArcs[ns])
{
int index = this.activeStates.Insert(src_hash[i]);
this.activeStates.Values[index].key = srckey[i];
this.activeStates.Values[index].state = ns;
this.activeStates.Values[index].register = newReg;
this.activeStates.Values[index].PatternStartTimestamp = synctime;
// Add epsilon arc destinations to stack
if (this.epsilonStateMap == null) break;
if (this.epsilonStateMap[ns] != null)
{
for (int cnt2 = 0; cnt2 < this.epsilonStateMap[ns].Length; cnt2++)
stack.Push(this.epsilonStateMap[ns][cnt2]);
}
}
if (stack.Count == 0) break;
ns = stack.Pop();
}
if (this.IsDeterministic) break; // We are guaranteed to have only one successful transition
}
}
}
if (this.IsDeterministic) break; // We are guaranteed to have only one start state
}
}
else if (src_vother[i] == PartitionedStreamEvent.LowWatermarkOtherTime)
{
long synctime = src_vsync[i];
if (!this.IsSyncTimeSimultaneityFree)
{
// Clean active states for stale partitions
int seenEventIndex;
if (this.activeStates.Count > 0)
{
var activeVisibleTraverser = new FastMap<GroupedActiveState<TKey, TRegister>>.VisibleTraverser(this.activeStates);
while (activeVisibleTraverser.Next(out int activeStateIndex, out _))
{
var activeState = this.activeStates.Values[activeStateIndex];
if (synctime >= activeState.PatternStartTimestamp + this.MaxDuration)
{
// Since we know this partition is stale, remove it from seenEvent as well.
this.seenEvent.Remove(activeState.key);
this.activeStates.Remove(activeStateIndex);
}
}
}
// Clean seen events from stale partitions. This enumeration is necessary for stale partitions without active state.
seenEventIndex = FastDictionary2<TPartitionKey, long>.IteratorStart;
int partitionIndex;
while (this.seenEvent.Iterate(ref seenEventIndex))
{
var partitionKey = this.getPartitionKey(this.seenEvent.entries[seenEventIndex].key);
if (this.lastSyncTime.Lookup(partitionKey, out partitionIndex) && synctime > this.lastSyncTime.entries[partitionIndex].value)
{
this.seenEvent.Remove(this.seenEvent.entries[seenEventIndex].key);
}
}
// Clean last synctime and tentative output from stale partitions (these two need to be kept in sync)
partitionIndex = FastDictionary2<TPartitionKey, long>.IteratorStart;
while (this.lastSyncTime.Iterate(ref partitionIndex))
{
// Check to see if partition is stale
if (synctime > this.lastSyncTime.entries[partitionIndex].value)
{
// Emit tentative output from stale partitions
var tentativeVisibleTraverser = new FastMap<OutputEvent<TKey, TRegister>>.VisibleTraverser(this.tentativeOutput.entries[partitionIndex].value);
while (tentativeVisibleTraverser.Next(out int index, out int hash))
{
var elem = this.tentativeOutput.entries[partitionIndex].value.Values[index];
this.batch.vsync.col[this.iter] = this.lastSyncTime.entries[partitionIndex].value;
this.batch.vother.col[this.iter] = elem.other;
this.batch.payload.col[this.iter] = elem.payload;
this.batch.key.col[this.iter] = elem.key;
this.batch.hash.col[this.iter] = hash;
this.iter++;
if (this.iter == Config.DataBatchSize)
{
FlushContents();
dest_vsync = this.batch.vsync.col;
dest_vother = this.batch.vother.col;
destkey = this.batch.key.col;
dest_hash = this.batch.hash.col;
}
}
// Remove the partition
var partitionKey = this.lastSyncTime.entries[partitionIndex].key;
this.tentativeOutput.Remove(partitionKey);
this.lastSyncTime.Remove(partitionKey);
}
}
}
// Update dest_* on low watermark in case this event will hit the batch boundary and allocate a new batch
OnLowWatermark(synctime);
dest_vsync = this.batch.vsync.col;
dest_vother = this.batch.vother.col;
destkey = this.batch.key.col;
dest_hash = this.batch.hash.col;
}
else if (src_vother[i] == PartitionedStreamEvent.PunctuationOtherTime)
{
var key = srckey[i];
long synctime = src_vsync[i];
if (!this.IsSyncTimeSimultaneityFree)
{
var partitionKey = this.getPartitionKey(key);
int partitionIndex = EnsurePartition(partitionKey);
if (synctime > this.lastSyncTime.entries[partitionIndex].value) // move time forward
{
this.seenEvent.Remove(srckey[i]);
var tentativeVisibleTraverser = new FastMap<OutputEvent<TKey, TRegister>>.VisibleTraverser(this.tentativeOutput.entries[partitionIndex].value);
if (this.tentativeOutput.Count > 0)
{
tentativeVisibleTraverser.currIndex = 0;
while (tentativeVisibleTraverser.Next(out int index, out int hash))
{
var elem = this.tentativeOutput.entries[partitionIndex].value.Values[index];
this.batch.vsync.col[this.iter] = this.lastSyncTime.entries[partitionIndex].value;
this.batch.vother.col[this.iter] = elem.other;
this.batch.payload.col[this.iter] = elem.payload;
this.batch.key.col[this.iter] = elem.key;
this.batch.hash.col[this.iter] = hash;
this.iter++;
if (this.iter == Config.DataBatchSize)
{
FlushContents();
dest_vsync = this.batch.vsync.col;
dest_vother = this.batch.vother.col;
destkey = this.batch.key.col;
dest_hash = this.batch.hash.col;
}
}
this.tentativeOutput.entries[partitionIndex].value.Clear(); // Clear the tentative output list
}
this.lastSyncTime.entries[partitionIndex].value = synctime;
}
}
this.batch.vsync.col[this.iter] = synctime;
this.batch.vother.col[this.iter] = long.MinValue;
this.batch.payload.col[this.iter] = default;
this.batch.key.col[this.iter] = key;
this.batch.hash.col[this.iter] = src_hash[i];
this.batch.bitvector.col[this.iter >> 6] |= (1L << (this.iter & 0x3f));
this.iter++;
if (this.iter == Config.DataBatchSize)
{
FlushContents();
dest_vsync = this.batch.vsync.col;
dest_vother = this.batch.vother.col;
destkey = this.batch.key.col;
dest_hash = this.batch.hash.col;
}
}
}
}
}
batch.Free();
}