in zyphra-zr1-webgpu/src/worker.js [51:128]
async function generate(messages) {
// Retrieve the text-generation pipeline.
const [tokenizer, model] = await TextGenerationPipeline.getInstance();
const inputs = tokenizer.apply_chat_template(messages, {
add_generation_prompt: true,
return_dict: true,
});
// 151648: <think>
// 151649: </think>
const [START_THINKING_TOKEN_ID, END_THINKING_TOKEN_ID] = tokenizer.encode(
"<think></think>",
{ add_special_tokens: false },
);
let state = "thinking"; // 'thinking' or 'answering'
let startTime;
let numTokens = 0;
let tps;
const token_callback_function = (tokens) => {
startTime ??= performance.now();
if (numTokens++ > 0) {
tps = (numTokens / (performance.now() - startTime)) * 1000;
}
if (tokens[0] == END_THINKING_TOKEN_ID) {
state = "answering";
}
};
const callback_function = (output) => {
self.postMessage({
status: "update",
output,
tps,
numTokens,
state,
});
};
const streamer = new TextStreamer(tokenizer, {
skip_prompt: true,
skip_special_tokens: true,
callback_function,
token_callback_function,
});
// Tell the main thread we are starting
self.postMessage({ status: "start" });
const { past_key_values, sequences } = await model.generate({
...inputs,
// TODO: Add back when fixed
// past_key_values: past_key_values_cache,
// Sampling
do_sample: false,
// repetition_penalty: 1.1,
// top_k: 3,
// temperature: 0.2,
max_new_tokens: 2048,
streamer,
stopping_criteria,
return_dict_in_generate: true,
});
past_key_values_cache = past_key_values;
const decoded = tokenizer.batch_decode(sequences, {
skip_special_tokens: true,
});
// Send the output back to the main thread
self.postMessage({
status: "complete",
output: decoded,
});
}