in src/lib/inference.tsx [43:89]
export function useInferenceOld({ apiKey }) {
const [isLoading, setIsLoading] = useState(false);
const [partialText, setPartialText] = useState("");
const [inferenceResult, setInferenceResult] = useState("");
const [error, setError] = useState<string | null>(null);
const inferenceInternal = async ({
prompt,
model,
maxTokens,
}: {
prompt: string;
model: string;
maxTokens: number;
}) => {
setIsLoading(true);
setPartialText("boop boop partial text");
try {
const result = await inference({
prompt,
model,
apiKey,
maxTokens,
});
setInferenceResult(result.content);
setIsLoading(false);
return result.content;
} catch (error) {
console.error("Error in inference", error);
setError(error.message);
setIsLoading(false);
return null;
}
};
const status = isLoading ? "thinking" : error ? "error" : "done";
return {
status,
partialText,
inferenceResult,
error,
inference: inferenceInternal,
};
}