export default async function inference()

in src/lib/inference.tsx [4:41]


export default async function inference({
  prompt,
  model = "Qwen/Qwen3-235B-A22B",
  apiKey,
  maxTokens = 512
}: {
  prompt: string,
  model?: string,
  apiKey?: string,
  maxTokens?: number
}) {
  if (!apiKey) {
    const token = window.localStorage.getItem("huggingface_access_token");
    if (!token) {
      throw new Error("You must be signed in to use the inference API!");
    }
    apiKey = token;
  }

  console.log("Inference", prompt, model, apiKey);
  const client = new InferenceClient(apiKey);

  const chatCompletion = await client.chatCompletion({
    provider: "fireworks-ai",
    model: model,
    messages: [
      {
        role: "user",
        content: prompt,
      },
    ],
    max_tokens: maxTokens,
  });
  

  console.log("Inference response", chatCompletion.choices[0].message);
  return chatCompletion.choices[0].message;
}