in llama-3.2-webgpu/src/worker.js [32:94]
async function generate(messages) {
// Retrieve the text-generation pipeline.
const [tokenizer, model] = await TextGenerationPipeline.getInstance();
const inputs = tokenizer.apply_chat_template(messages, {
add_generation_prompt: true,
return_dict: true,
});
let startTime;
let numTokens = 0;
let tps;
const token_callback_function = () => {
startTime ??= performance.now();
if (numTokens++ > 0) {
tps = (numTokens / (performance.now() - startTime)) * 1000;
}
};
const callback_function = (output) => {
self.postMessage({
status: "update",
output,
tps,
numTokens,
});
};
const streamer = new TextStreamer(tokenizer, {
skip_prompt: true,
skip_special_tokens: true,
callback_function,
token_callback_function,
});
// Tell the main thread we are starting
self.postMessage({ status: "start" });
const { past_key_values, sequences } = await model.generate({
...inputs,
// TODO: Add when model is fixed
// past_key_values: past_key_values_cache,
// Sampling
do_sample: false,
max_new_tokens: 1024,
streamer,
stopping_criteria,
return_dict_in_generate: true,
});
// past_key_values_cache = past_key_values;
const decoded = tokenizer.batch_decode(sequences, {
skip_special_tokens: true,
});
// Send the output back to the main thread
self.postMessage({
status: "complete",
output: decoded,
});
}