in src/common/chat/gitlab_chat_api.ts [349:408]
async subscribeToUpdates(
messageCallback: (message: AiCompletionResponseMessageType) => Promise<void>,
subscriptionId?: string,
): Promise<Cable> {
const [platform, additionalContextEnabled] = await Promise.all([
this.#currentPlatform(),
this.#aiContextManager.isAdditionalContextEnabled(),
]);
const currentUser = await platform.fetchFromApi(currentUserRequest);
log.debug(
`GitLabChatApi: subscribeToUpdates, additionalContextEnabled: ${additionalContextEnabled}`,
);
const channel = new AiCompletionResponseChannel(
{
htmlResponse: false,
userId: `gid://gitlab/User/${currentUser.id}`,
aiAction: 'CHAT',
clientSubscriptionId: subscriptionId,
},
additionalContextEnabled,
);
const cable = await platform.connectToCable();
// we use this flag to fix https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues/1397
// sometimes a chunk comes after the full message and it broke the chat
let fullMessageReceived = false;
channel.on('newChunk', async msg => {
if (fullMessageReceived) {
log.info(`CHAT-DEBUG: full message received, ignoring chunk`);
return;
}
if (this.#canceledPromptRequestIds.includes(msg.requestId)) {
log.info(`CHAT-DEBUG: stream cancelled, ignoring chunk`);
return;
}
await messageCallback(msg);
});
channel.on('fullMessage', async message => {
fullMessageReceived = true;
if (this.#canceledPromptRequestIds.includes(message.requestId)) {
log.info(`CHAT-DEBUG: stream cancelled, ignoring full message`);
cable.disconnect();
return;
}
await messageCallback(message);
if (subscriptionId) {
cable.disconnect();
}
});
cable.subscribe(channel);
return cable;
}