pyproject.toml (65 lines of code) (raw):

[tool.black] line-length = 240 [build-system] requires = ["setuptools>=42", "wheel", "setuptools_scm[tomli]>=6.3"] build-backend = "setuptools.build_meta" [project] name = "lmms_eval" version = "0.1.1" authors = [ { name = "LMMMs-Lab Evaluation Team", email = "lmms_eval@outlook.com" }, ] description = "A framework for evaluating large multi-modality language models" readme = "README.md" classifiers = [ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ] requires-python = ">=3.8" license = { text = "MIT" } dependencies = [ "accelerate>=0.21.0", "black==24.1.0", "datasets==2.16.1", "evaluate>=0.4.0", "jsonlines", "numexpr", "peft>=0.2.0", "pybind11>=2.6.2", "pytablewriter", "rouge-score>=0.0.4", "sacrebleu>=1.5.0", "scikit-learn>=0.24.1", "sqlitedict", "torch>=1.8", "openai>=1.0.0", "pycocoevalcap", "tqdm-multiprocess", "transformers", "zstandard", "pillow", "pyyaml", "sympy", "mpmath", "Jinja2", "openpyxl", "Levenshtein", "hf_transfer", "tenacity", "wandb>=0.16.0", "transformers-stream-generator", "tiktoken", "pre-commit", "pydantic", ] [tool.setuptools.packages.find] include = ["lmms_eval*"] [tool.setuptools.package-data] lmms_eval = ["**/*.yaml", "tasks/**/*"] [project.scripts] lmms-eval = "lmms_eval.__main__:cli_evaluate" lmms_eval = "lmms_eval.__main__:cli_evaluate" [project.urls] Homepage = "https://lmms-lab.github.io/lmms-eval-blog/" Repository = "https://github.com/EvolvingLMMs-Lab/lmms-eval"