From 2284b1cfc2873f4cfbd847baa955fdf35c0b760d Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Thu, 12 Mar 2026 14:05:51 +0000 Subject: [PATCH] chore: add .agents/environment.yaml for automated dev setup Co-Authored-By: Joseph Gross --- .agents/environment.yaml | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 .agents/environment.yaml diff --git a/.agents/environment.yaml b/.agents/environment.yaml new file mode 100644 index 00000000000..718bb9fea4f --- /dev/null +++ b/.agents/environment.yaml @@ -0,0 +1,32 @@ +# See spec at https://docs.devin.ai/onboard-devin/repo-setup +initialize: | + # Install build tools and dependencies for llama.cpp (C/C++ project using CMake) + sudo apt-get update -qq + sudo apt-get install -y -qq cmake ccache libcurl4-openssl-dev + pip install pre-commit +maintenance: | + # Set up pre-commit hooks and build the project + pre-commit install + cmake -B build + cmake --build build --config Release -j $(nproc) +knowledge: + - name: lint + contents: | + # Pre-commit hooks handle linting (trailing whitespace, end-of-file, yaml check, flake8) + pre-commit run --all-files + - name: test + contents: | + # Run the test suite via CTest + cd build && ctest --output-on-failure + - name: build + contents: | + # CPU build (default, no GPU) + cmake -B build + cmake --build build --config Release -j $(nproc) + # Binaries are output to build/bin/ + - name: startup + contents: | + # Run the CLI (requires a GGUF model file) + ./build/bin/llama-cli -m + # Or launch the OpenAI-compatible API server + ./build/bin/llama-server -m