llama-3.2-reasoning-webgpu/src/worker.js (2 lines): - line 39: dtype: "q4f16", // TODO: use "q4" as fallback when fixed - line 111: // TODO: Add back when fixed janus-webgpu/src/worker.js (1 line): - line 66: prepare_inputs_embeds: "wasm", // TODO use "webgpu" when bug is fixed gemma-2-2b-jpn-webgpu/src/worker.js (1 line): - line 73: // TODO: Add when model is fixed semantic-audio-search/worker.js (1 line): - line 25: // TODO allow user to select quantized or not zyphra-zr1-webgpu/src/worker.js (1 line): - line 103: // TODO: Add back when fixed phi-3.5-webgpu/src/worker.js (1 line): - line 74: // TODO: Enable once model is fixed conversational-webgpu/src/worker.js (1 line): - line 184: do_sample: false, // TODO: do_sample: true is bugged (invalid data location on topk sample) semantic-audio-search/index.js (1 line): - line 37: // TODO Add colours janus-pro-webgpu/src/worker.js (1 line): - line 66: prepare_inputs_embeds: "wasm", // TODO use "webgpu" when bug is fixed llama-3.2-webgpu/src/worker.js (1 line): - line 72: // TODO: Add when model is fixed tinyswallow-webgpu/src/worker.js (1 line): - line 92: // TODO: Add back when fixed video-background-removal/main.js (1 line): - line 27: dtype: "fp32", // TODO: add fp16 support deepseek-r1-webgpu/src/worker.js (1 line): - line 103: // TODO: Add back when fixed text-to-speech-webgpu/src/App.jsx (1 line): - line 37: // TODO: Display error on screen smolvlm-webgpu/src/worker.js (1 line): - line 112: // TODO: Add back when fixed