forked from ggerganov/llama.cpp
-
Notifications
You must be signed in to change notification settings - Fork 363
/
pyproject.toml
44 lines (38 loc) · 1.25 KB
/
pyproject.toml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
[tool.poetry]
name = "llama-cpp-scripts"
version = "0.0.0"
description = "Scripts that ship with llama.cpp"
authors = ["GGML <[email protected]>"]
readme = "README.md"
homepage = "https://ggml.ai"
repository = "https://github.com/ggerganov/llama.cpp"
keywords = ["ggml", "gguf", "llama.cpp"]
packages = [{ include = "*.py", from = "." }]
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
[tool.poetry.dependencies]
python = ">=3.9"
numpy = "^1.25.0"
sentencepiece = ">=0.1.98,<=0.2.0"
transformers = ">=4.35.2,<5.0.0"
protobuf = ">=4.21.0,<5.0.0"
gguf = { path = "./gguf-py" }
torch = { version = "^2.2.0", source = "pytorch" }
[tool.poetry.dev-dependencies]
pytest = "^5.2"
# Force wheel + cpu
# For discussion and context see https://github.com/python-poetry/poetry#6409
[[tool.poetry.source]]
name = "pytorch"
url = "https://download.pytorch.org/whl/cpu"
priority = "explicit"
[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
[tool.poetry.scripts]
llama-convert-hf-to-gguf = "convert_hf_to_gguf:main"
llama-convert-llama-ggml-to-gguf = "convert_llama_ggml_to_gguf:main"
llama-ggml-vk-generate-shaders = "ggml_vk_generate_shaders:main"