async function getCompletionMetadata()

in src/lib/utils/business.svelte.ts [121:178]


async function getCompletionMetadata(
	conversation: ConversationClass | Conversation,
	signal?: AbortSignal
): Promise<CompletionMetadata> {
	const data = conversation instanceof ConversationClass ? conversation.data : conversation;
	const model = conversation.model;
	const systemMessage = projects.current?.systemMessage;

	const messages: ConversationMessage[] = [
		...(isSystemPromptSupported(model) && systemMessage?.length ? [{ role: "system", content: systemMessage }] : []),
		...data.messages,
	];
	const parsed = await Promise.all(messages.map(parseMessage));

	const baseArgs = {
		...data.config,
		messages: parsed,
		model: model.id,
		response_format: getResponseFormatObj(conversation),
		// eslint-disable-next-line @typescript-eslint/no-explicit-any
	} as any;

	// Handle OpenAI-compatible models
	if (isCustomModel(model)) {
		const openai = new OpenAI({
			apiKey: model.accessToken,
			baseURL: model.endpointUrl,
			dangerouslyAllowBrowser: true,
			fetch: (...args: Parameters<typeof fetch>) => {
				return fetch(args[0], { ...args[1], signal });
			},
		});

		const args = {
			...baseArgs,
			// eslint-disable-next-line @typescript-eslint/no-explicit-any
		} as any;

		return {
			type: "openai",
			client: openai,
			args,
		};
	}
	const args = {
		...baseArgs,
		provider: data.provider,
		// max_tokens: maxAllowedTokens(conversation) - currTokens,
		// eslint-disable-next-line @typescript-eslint/no-explicit-any
	} as any;

	// Handle HuggingFace models
	return {
		type: "huggingface",
		client: new InferenceClient(token.value),
		args,
	};
}