export async function activate()

in src/extension.ts [35:214]


export async function activate(context: vscode.ExtensionContext) {
	ctx = context;
	handleConfigTemplateChange(ctx);
	const config = vscode.workspace.getConfiguration("llm");
	// TODO: support TransportKind.socket
	const binaryPath: string | null = config.get("lsp.binaryPath") as string | null;
	let command: string;
	if (binaryPath) {
		command = binaryPath;
	} else {
		const ext = process.platform === "win32" ? ".exe" : "";
		command = vscode.Uri.joinPath(context.extensionUri, "server", `llm-ls${ext}`).fsPath;
	}
	if (command.startsWith("~/")) {
		command = homedir() + command.slice("~".length);
	}

	const serverOptions: ServerOptions = {
		run: {
			command, transport: TransportKind.stdio, options: {
				env: {
					"RUST_BACKTRACE": "1",
					"LLM_LOG_LEVEL": config.get("lsp.logLevel") as string,
				}
			}
		},
		debug: {
			command,
			transport: TransportKind.stdio,
			options: {
				env: {
					"RUST_BACKTRACE": "1",
					"LLM_LOG_LEVEL": config.get("lsp.logLevel") as string,
				}
			}
		}
	};

	const outputChannel = vscode.window.createOutputChannel('LLM VS Code', { log: true });
	const clientOptions: LanguageClientOptions = {
		documentSelector: [{ scheme: "*" }],
		outputChannel,
	};
	client = new LanguageClient(
		'llm',
		'LLM VS Code',
		serverOptions,
		clientOptions
	);

	loadingIndicator = createLoadingIndicator();

	await client.start();

	const afterInsert = vscode.commands.registerCommand('llm.afterInsert', async (response: CompletionResponse) => {
		const { request_id, completions } = response;
		const params = {
			requestId: request_id,
			acceptedCompletion: 0,
			shownCompletions: [0],
			completions,
		};
		await client.sendRequest("llm-ls/acceptCompletion", params);
	});
	ctx.subscriptions.push(afterInsert);

	const login = vscode.commands.registerCommand('llm.login', async (...args) => {
		const apiToken = await ctx.secrets.get('apiToken');
		if (apiToken !== undefined) {
			vscode.window.showInformationMessage('Llm: Already logged in');
			return;
		}
		const tokenPath = path.join(homedir(), path.sep, ".cache", path.sep, "huggingface", path.sep, "token");
		const token: string | undefined = await new Promise((res) => {
			readFile(tokenPath, (err, data) => {
				if (err) {
					res(undefined);
				}
				const content = data.toString();
				res(content.trim());
			});
		});
		if (token !== undefined) {
			await ctx.secrets.store('apiToken', token);
			vscode.window.showInformationMessage(`Llm: Logged in from cache: ~/.cache/huggingface/token ${tokenPath}`);
			return;
		}
		const input = await vscode.window.showInputBox({
			prompt: 'Please enter your API token (find yours at hf.co/settings/token):',
			placeHolder: 'Your token goes here ...'
		});
		if (input !== undefined) {
			await ctx.secrets.store('apiToken', input);
			vscode.window.showInformationMessage('Llm: Logged in succesfully');
		}
	});
	ctx.subscriptions.push(login);
	const logout = vscode.commands.registerCommand('llm.logout', async (...args) => {
		await ctx.secrets.delete('apiToken');
		vscode.window.showInformationMessage('Llm: Logged out');
	});
	ctx.subscriptions.push(logout);

	const attribution = vscode.commands.registerTextEditorCommand('llm.attribution', () => {
		void highlightStackAttributions();
	});
	ctx.subscriptions.push(attribution);
	const provider: vscode.InlineCompletionItemProvider = {
		async provideInlineCompletionItems(document, position, context, token) {
			const config = vscode.workspace.getConfiguration("llm");
			const autoSuggest = config.get("enableAutoSuggest") as boolean;
			const requestDelay = config.get("requestDelay") as number;
			if (context.triggerKind === vscode.InlineCompletionTriggerKind.Automatic && !autoSuggest) {
				return;
			}
			if (position.line < 0) {
				return;
			}
			if (requestDelay > 0) {
				const cancelled = await delay(requestDelay, token);
				if (cancelled) {
					return
				}
			}
			let tokenizerConfig: any = config.get("tokenizer");
			if (tokenizerConfig != null && tokenizerConfig.repository != null && tokenizerConfig.api_token == null) {
				tokenizerConfig.api_token = await ctx.secrets.get('apiToken');
			}
			let params = {
				position,
				textDocument: client.code2ProtocolConverter.asTextDocumentIdentifier(document),
				model: config.get("modelId") as string,
				backend: config.get("backend") as string,
				url: config.get("url") as string | null,
				tokensToClear: config.get("tokensToClear") as string[],
				apiToken: await ctx.secrets.get('apiToken'),
				requestBody: config.get("requestBody") as object,
				fim: config.get("fillInTheMiddle") as number,
				contextWindow: config.get("contextWindow") as number,
				tlsSkipVerifyInsecure: config.get("tlsSkipVerifyInsecure") as boolean,
				ide: "vscode",
				tokenizerConfig,
				disableUrlPathCompletion: config.get("disableUrlPathCompletion") as boolean,
			};
			try {
				loadingIndicator.show()
				const response: CompletionResponse = await client.sendRequest("llm-ls/getCompletions", params, token);
				loadingIndicator.hide()

				const items = [];
				for (const completion of response.completions) {
					items.push({
						insertText: completion.generated_text,
						range: new vscode.Range(position, position),
						command: {
							title: 'afterInsert',
							command: 'llm.afterInsert',
							arguments: [response],
						}
					});
				}

				return {
					items,
				};
			} catch (e) {
				const err_msg = (e as Error).message;
				if (err_msg.includes("is currently loading")) {
					vscode.window.showWarningMessage(err_msg);
				} else if (err_msg !== "Canceled") {
					vscode.window.showErrorMessage(err_msg);
				}
			}

		},

	};
	const documentFilter = config.get("documentFilter") as DocumentFilter | DocumentFilter[];
	vscode.languages.registerInlineCompletionItemProvider(documentFilter, provider);
}