hugegraph-llm/src/hugegraph_llm/models/llms/ollama.py [48:60]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                model=self.model,
                messages=messages,
            )
            usage = {
                "prompt_tokens": response['prompt_eval_count'],
                "completion_tokens": response['eval_count'],
                "total_tokens": response['prompt_eval_count'] + response['eval_count'],
            }
            log.info("Token usage: %s", json.dumps(usage))
            return response["message"]["content"]
        except Exception as e:
            print(f"Retrying LLM call {e}")
            raise e
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



hugegraph-llm/src/hugegraph_llm/models/llms/ollama.py [74:86]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                model=self.model,
                messages=messages,
            )
            usage = {
                "prompt_tokens": response['prompt_eval_count'],
                "completion_tokens": response['eval_count'],
                "total_tokens": response['prompt_eval_count'] + response['eval_count'],
            }
            log.info("Token usage: %s", json.dumps(usage))
            return response["message"]["content"]
        except Exception as e:
            print(f"Retrying LLM call {e}")
            raise e
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



