-
Notifications
You must be signed in to change notification settings - Fork 1.1k
/
Copy pathpyproject.toml
54 lines (49 loc) · 1.29 KB
/
pyproject.toml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama_models"
version = "0.2.0"
authors = [
]
description = "Llama models"
readme = "README.md"
requires-python = ">=3.10"
dependencies = [
"PyYAML",
"jinja2>=3.1.6",
"tiktoken",
"pydantic>=2",
"Pillow",
"rich",
]
classifiers = []
[project.urls]
Homepage = "https://github.com/meta-llama/llama-models"
[project.scripts]
multimodal_example_chat_completion = "llama_models.scripts.multimodal_example_chat_completion:main"
multimodal_example_text_completion = "llama_models.scripts.multimodal_example_text_completion:main"
example_chat_completion = "llama_models.scripts.example_chat_completion:main"
example_text_completion = "llama_models.scripts.example_text_completion:main"
llama4_completion = "llama_models.llama4.scripts.completion:main"
llama4_chat_completion = "llama_models.llama4.scripts.chat_completion:main"
[project.optional-dependencies]
dev = [
"pytest",
"black",
"isort",
"mypy",
"ruff",
]
torch = [
"torch",
"torchvision",
"fairscale",
"fire",
"blobfile",
"fbgemm-gpu-genai==1.1.2",
]
[tool.setuptools]
package-dir = {"llama_models" = "llama_models"}
packages = {find = {}}