-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathpyproject.toml
More file actions
84 lines (75 loc) · 2.64 KB
/
pyproject.toml
File metadata and controls
84 lines (75 loc) · 2.64 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[project]
name = "avp"
dynamic = ["version"]
description = "Multi-agent text handoffs discard KV-cache and attention state. AVP transfers that state directly — 51-78% fewer tokens, 1.5-5x faster."
readme = "README.md"
license = "Apache-2.0"
requires-python = ">=3.10"
authors = [{ name = "VectorArc" }]
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
]
dependencies = [
"numpy>=1.24",
"protobuf>=4.21",
"zstandard>=0.21",
]
[project.urls]
Homepage = "https://github.com/VectorArc/avp-python"
Documentation = "https://github.com/VectorArc/avp-python#documentation"
Repository = "https://github.com/VectorArc/avp-python"
Issues = "https://github.com/VectorArc/avp-python/issues"
Changelog = "https://github.com/VectorArc/avp-python/blob/main/CHANGELOG.md"
[project.optional-dependencies]
# Engine connectors (pick one or more)
hf = ["torch>=2.0", "transformers>=5.0"]
huggingface = ["avp[hf]"]
vllm = ["vllm>=0.17.0,<0.19.0", "safetensors>=0.4", "huggingface-hub>=1.0"]
llamacpp = ["llama-cpp-python>=0.3", "gguf>=0.6"]
ollama = ["llama-cpp-python>=0.3", "gguf>=0.6"]
# Framework integrations (each requires HF engine)
langchain = ["avp[hf]", "langchain-core>=1.0"]
crewai = ["avp[hf]", "crewai>=0.86"]
autogen = ["avp[hf]", "autogen-core>=0.4"]
# Reference transport binding
transport = ["httpx[http2]>=0.25"]
server = ["avp[transport]", "fastapi>=0.104", "uvicorn[standard]>=0.24"]
# Convenience bundles
all = ["avp[hf,llamacpp,langchain,crewai,autogen,transport,server]"]
# Development
benchmark = ["avp[hf]", "datasets>=2.14"]
dev = [
"avp[hf,transport,server]",
"pytest>=7.0",
"pytest-asyncio>=0.21",
"ruff>=0.1",
"grpcio-tools>=1.59",
]
[project.entry-points."vllm.general_plugins"]
avp_latent = "avp.connectors.vllm_model_plugin:register"
[tool.hatch.version]
path = "src/avp/version.py"
[tool.hatch.build.targets.wheel]
packages = ["src/avp"]
[tool.pytest.ini_options]
asyncio_mode = "auto"
testpaths = ["tests"]
markers = [
"requires_torch: marks tests that require torch",
"requires_transformers: marks tests that require transformers",
"requires_vllm: marks tests that require vLLM + CUDA GPU (deselected by default)",
]
addopts = "-m 'not requires_vllm'"
[tool.ruff]
line-length = 99
target-version = "py310"