packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/python/openai/0.hf-inference.py (27 lines of code) (raw):

import os from openai import OpenAI client = OpenAI( base_url="https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.2-11B-Vision-Instruct/v1", api_key=os.environ["HF_TOKEN"], ) completion = client.chat.completions.create( model="meta-llama/Llama-3.2-11B-Vision-Instruct", messages=[ { "role": "user", "content": [ { "type": "text", "text": "Describe this image in one sentence." }, { "type": "image_url", "image_url": { "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" } } ] } ], ) print(completion.choices[0].message)