in packages/recoil/recoil_values/Recoil_selector.js [448:635]
function wrapPendingDependencyPromise(
store: Store,
promise: Promise<mixed>,
state: TreeState,
existingDeps: DepValues,
executionId: ExecutionId,
loadingDepsState: LoadingDepsState,
): Promise<T> {
return promise
.then(resolvedDep => {
if (!selectorIsLive()) {
// The selector was released since the request began; ignore the response.
clearExecutionInfo(store);
throw CANCELED;
}
// Check if we are handling a pending Recoil dependency or if the user
// threw their own Promise to "suspend" a selector evaluation. We need
// to check that the loadingDepPromise actually matches the promise that
// we caught in case the selector happened to catch the promise we threw
// for a pending Recoil dependency from `getRecoilValue()` and threw
// their own promise instead.
if (
loadingDepsState.loadingDepKey != null &&
loadingDepsState.loadingDepPromise === promise
) {
/**
* Note for async atoms, this means we are changing the atom's value
* in the store for the given version. This should be alright because
* the version of state is now stale and a new version will have
* already been triggered by the atom being resolved (see this logic
* in Recoil_atom.js)
*/
state.atomValues.set(
loadingDepsState.loadingDepKey,
loadableWithValue(resolvedDep),
);
} else {
/**
* If resolvedDepKey is not defined, the promise was a user-thrown
* promise. User-thrown promises are an advanced feature and they
* should be avoided in almost all cases. Using `loadable.map()` inside
* of selectors for loading loadables and then throwing that mapped
* loadable's promise is an example of a user-thrown promise.
*
* When we hit a user-thrown promise, we have to bail out of an optimization
* where we bypass calculating selector cache keys for selectors that
* have been previously seen for a given state (these selectors are saved in
* state.atomValues) to avoid stale state as we have no way of knowing
* what state changes happened (if any) in result to the promise resolving.
*
* Ideally we would only bail out selectors that are in the chain of
* dependencies for this selector, but there's currently no way to get
* a full list of a selector's downstream nodes because the state that
* is executing may be a discarded tree (so store.getGraph(state.version)
* will be empty), and the full dep tree may not be in the selector
* caches in the case where the selector's cache was cleared. To solve
* for this we would have to keep track of all running selector
* executions and their downstream deps. Because this only covers edge
* cases, that complexity might not be justifyable.
*/
store.getState().knownSelectors.forEach(nodeKey => {
state.atomValues.delete(nodeKey);
});
}
/**
* Optimization: Now that the dependency has resolved, let's try hitting
* the cache in case the dep resolved to a value we have previously seen.
*
* TODO:
* Note this optimization is not perfect because it only prevents re-executions
* _after_ the point where an async dependency is found. Any code leading
* up to the async dependency may have run unnecessarily. The ideal case
* would be to wait for the async dependency to resolve first, check the
* cache, and prevent _any_ execution of the selector if the resulting
* value of the dependency leads to a path that is found in the cache.
* The ideal case is more difficult to implement as it would require that
* we capture and wait for the the async dependency right after checking
* the cache. The current approach takes advantage of the fact that running
* the selector already has a code path that lets us exit early when
* an async dep resolves.
*/
const cachedLoadable = getValFromCacheAndUpdatedDownstreamDeps(
store,
state,
);
if (cachedLoadable && cachedLoadable.state !== 'loading') {
/**
* This has to notify stores of a resolved async, even if there is no
* current pending execution for the following case:
* 1) A component renders with this pending loadable.
* 2) The upstream dependency resolves.
* 3) While processing some other selector it reads this one, such as
* while traversing its dependencies. At this point it gets the
* new resolved value synchronously and clears the current
* execution ID. The component wasn't getting the value itself,
* though, so it still has the pending loadable.
* 4) When this code executes the current execution id was cleared
* and it wouldn't notify the component of the new value.
*
* I think this is only an issue with "early" rendering since the
* components got their value using the in-progress execution.
* We don't have a unit test for this case yet. I'm not sure it is
* necessary with recoil_concurrent_support mode.
*/
if (
isLatestExecution(store, executionId) ||
getExecutionInfo(store) == null
) {
notifyStoresOfResolvedAsync(store, executionId);
}
if (cachedLoadable.state === 'hasValue') {
return cachedLoadable.contents;
} else {
throw cachedLoadable.contents;
}
}
/**
* If this execution is stale, let's check to see if there is some in
* progress execution with a matching state. If we find a match, then
* we can take the value from that in-progress execution. Note this may
* sound like an edge case, but may be very common in cases where a
* loading dependency resolves from loading to having a value (thus
* possibly triggering a re-render), and React re-renders before the
* chained .then() functions run, thus starting a new execution as the
* dep has changed value. Without this check we will run the selector
* twice (once in the new execution and once again in this .then(), so
* this check is necessary to keep unnecessary re-executions to a
* minimum).
*
* Also note this code does not check across all executions that may be
* running. It only optimizes for the _latest_ execution per store as
* we currently do not maintain a list of all currently running executions.
* This means in some cases we may run selectors more than strictly
* necessary when there are multiple executions running for the same
* selector. This may be a valid tradeoff as checking for dep changes
* across all in-progress executions may take longer than just
* re-running the selector. This will be app-dependent, and maybe in the
* future we can make the behavior configurable. An ideal fix may be
* to extend the tree cache to support caching loading states.
*/
if (!isLatestExecution(store, executionId)) {
const executionInfo = getExecutionInfoOfInProgressExecution(state);
if (executionInfo?.latestLoadable.state === 'loading') {
/**
* Returning promise here without wrapping as the wrapper logic was
* already done upstream when this promise was generated.
*/
return executionInfo.latestLoadable.contents;
}
}
// Retry the selector evaluation now that the dependency has resolved
const [loadable, depValues] = evaluateSelectorGetter(
store,
state,
executionId,
);
updateExecutionInfoDepValues(store, executionId, depValues);
if (loadable.state !== 'loading') {
resolveAsync(store, state, executionId, loadable, depValues);
}
if (loadable.state === 'hasError') {
throw loadable.contents;
}
return loadable.contents;
})
.catch(error => {
// The selector was released since the request began; ignore the response.
if (error instanceof Canceled) {
throw CANCELED;
}
if (!selectorIsLive()) {
clearExecutionInfo(store);
throw CANCELED;
}
const loadable = loadableWithError(error);
resolveAsync(store, state, executionId, loadable, existingDeps);
throw error;
});
}