in neuron_viewer/src/TransformerDebugger/requests/inferenceDataFetcher.ts [158:180]
function performInference(
subRequests: TdbRequestSpec[],
setResponseFns: React.Dispatch<
React.SetStateAction<InferenceResponseAndResponseDict | null>
>[]
) {
batchedTdb({ subRequests })
.then((responseData) => {
if (responseData.inferenceSubResponses.length !== subRequests.length) {
throw new Error(
"Expected exactly " +
subRequests.length +
" inferenceSubResponses, but got " +
responseData.inferenceSubResponses.length
);
}
for (let i = 0; i < responseData.inferenceSubResponses.length; i++) {
setResponseFns[i](responseData.inferenceSubResponses[i]);
}
setActivationServerErrorMessage(null);
})
.catch((error) => handleInferenceError(error, setActivationServerErrorMessage));
}