in deepseek-r1-webgpu/src/worker.js [130:152]
async function load() {
self.postMessage({
status: "loading",
data: "Loading model...",
});
// Load the pipeline and save it for future use.
const [tokenizer, model] = await TextGenerationPipeline.getInstance((x) => {
// We also add a progress callback to the pipeline so that we can
// track model loading.
self.postMessage(x);
});
self.postMessage({
status: "loading",
data: "Compiling shaders and warming up model...",
});
// Run model with dummy input to compile shaders
const inputs = tokenizer("a");
await model.generate({ ...inputs, max_new_tokens: 1 });
self.postMessage({ status: "ready" });
}