diff --git a/.env.example b/.env.example
new file mode 100644
index 0000000..11a55cf
--- /dev/null
+++ b/.env.example
@@ -0,0 +1,60 @@
+# Environment
+KEYNETRA_ENVIRONMENT=development
+KEYNETRA_DEBUG=false
+
+# Core storage
+KEYNETRA_DATABASE_URL=sqlite+pysqlite:///./keynetra.db
+KEYNETRA_REDIS_URL=redis://localhost:6379/0
+
+# API auth
+KEYNETRA_API_KEYS=devkey
+# Optional: comma-separated SHA256 hashes instead of plain API keys
+# KEYNETRA_API_KEY_HASHES=
+KEYNETRA_JWT_SECRET=change-me
+KEYNETRA_JWT_ALGORITHM=HS256
+KEYNETRA_ADMIN_USERNAME=admin
+KEYNETRA_ADMIN_PASSWORD=admin123
+KEYNETRA_ADMIN_TOKEN_EXPIRY_MINUTES=60
+
+# CORS
+KEYNETRA_CORS_ALLOW_ORIGINS=http://localhost:5173,http://127.0.0.1:5173
+KEYNETRA_CORS_ALLOW_ORIGIN_REGEX=
+KEYNETRA_CORS_ALLOW_CREDENTIALS=true
+KEYNETRA_CORS_ALLOW_METHODS=*
+KEYNETRA_CORS_ALLOW_HEADERS=*
+
+# Policy/model loading
+# Optional inline policy JSON
+# KEYNETRA_POLICIES_JSON=
+# Optional comma-separated file/dir paths
+KEYNETRA_POLICY_PATHS=./examples/policies
+KEYNETRA_MODEL_PATHS=./examples/auth-model.yaml
+
+# Caching and resilience
+KEYNETRA_DECISION_CACHE_TTL_SECONDS=5
+KEYNETRA_SERVICE_TIMEOUT_SECONDS=2.0
+KEYNETRA_CRITICAL_RETRY_ATTEMPTS=3
+KEYNETRA_RESILIENCE_MODE=fail_closed
+KEYNETRA_RESILIENCE_FALLBACK_BEHAVIOR=static
+
+# Rate limiting
+KEYNETRA_RATE_LIMIT_PER_MINUTE=60
+KEYNETRA_RATE_LIMIT_BURST=60
+KEYNETRA_RATE_LIMIT_WINDOW_SECONDS=60
+
+# Runtime mode
+KEYNETRA_SERVICE_MODE=all
+KEYNETRA_AUTO_SEED_SAMPLE_DATA=true
+KEYNETRA_OTEL_ENABLED=false
+
+# Server defaults for CLI config mode
+KEYNETRA_SERVER_HOST=0.0.0.0
+KEYNETRA_SERVER_PORT=8000
+
+# Policy distribution
+KEYNETRA_POLICY_EVENTS_CHANNEL=keynetra:policy_events
+
+# Optional OIDC/JWKS
+# KEYNETRA_OIDC_JWKS_URL=
+# KEYNETRA_OIDC_AUDIENCE=
+# KEYNETRA_OIDC_ISSUER=
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
new file mode 100644
index 0000000..2ae894b
--- /dev/null
+++ b/.github/CODEOWNERS
@@ -0,0 +1 @@
+* @repo-owner
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
new file mode 100644
index 0000000..1e8e26e
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -0,0 +1,37 @@
+name: Bug report
+description: Report a reproducible issue in KeyNetra.
+title: "[Bug]: "
+labels:
+ - bug
+body:
+ - type: textarea
+ id: description
+ attributes:
+ label: Description
+ description: Describe the issue as clearly as possible.
+ placeholder: What happened?
+ validations:
+ required: true
+ - type: textarea
+ id: steps
+ attributes:
+ label: Steps to reproduce
+ description: Provide the exact steps needed to reproduce the issue.
+ placeholder: 1. ...
+ validations:
+ required: true
+ - type: textarea
+ id: expected
+ attributes:
+ label: Expected behavior
+ description: Describe what you expected to happen.
+ validations:
+ required: true
+ - type: textarea
+ id: environment
+ attributes:
+ label: Environment
+ description: Include OS, Python version, deployment mode, and any relevant config.
+ placeholder: macOS, Python 3.11, SQLite, etc.
+ validations:
+ required: true
diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml
new file mode 100644
index 0000000..8db3a9f
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.yml
@@ -0,0 +1,28 @@
+name: Feature request
+description: Suggest an improvement or new capability for KeyNetra.
+title: "[Feature]: "
+labels:
+ - enhancement
+body:
+ - type: textarea
+ id: proposal
+ attributes:
+ label: Proposal
+ description: Summarize the feature you want to add.
+ placeholder: What should KeyNetra do?
+ validations:
+ required: true
+ - type: textarea
+ id: use_case
+ attributes:
+ label: Use case
+ description: Explain the problem this feature solves.
+ validations:
+ required: true
+ - type: textarea
+ id: impact
+ attributes:
+ label: Impact
+ description: Describe the expected benefit or tradeoffs.
+ validations:
+ required: true
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 0000000..a6f8be5
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,18 @@
+## Summary
+
+Describe the purpose of this pull request.
+
+## Changes
+
+-
+
+## Test Plan
+
+-
+
+## Checklist
+
+- [ ] tests added
+- [ ] docs updated
+- [ ] migrations verified
+- [ ] backward compatibility preserved
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
new file mode 100644
index 0000000..bc8ce77
--- /dev/null
+++ b/.github/workflows/ci.yml
@@ -0,0 +1,58 @@
+name: CI
+
+on:
+ push:
+ branches: [main]
+ pull_request:
+
+permissions:
+ contents: read
+
+jobs:
+ test:
+ name: CI / test (${{ matrix.python-version }})
+ runs-on: ubuntu-latest
+
+ strategy:
+ fail-fast: false
+ matrix:
+ python-version: ["3.11", "3.12", "3.13", "3.14"]
+
+ env:
+ KEYNETRA_DATABASE_URL: sqlite+pysqlite:///./.keynetra-ci.db
+ KEYNETRA_API_KEYS: testkey
+ PYTHONUNBUFFERED: "1"
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v5
+ with:
+ python-version: ${{ matrix.python-version }}
+ cache: "pip"
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ python -m pip install -r requirements.txt
+ python -m pip install -r requirements-dev.txt
+ python -m pip install -e .
+
+ - name: Lint
+ run: |
+ ruff check .
+ black --check .
+ isort --check-only .
+
+ - name: Migration check
+ env:
+ PYTHONPATH: ${{ github.workspace }}
+ run: python -m keynetra.cli migrate --confirm-destructive
+
+ - name: Tests and coverage
+ env:
+ PYTHONPATH: ${{ github.workspace }}
+ run: |
+ python -m pytest -q --cov=keynetra --cov-fail-under=80
\ No newline at end of file
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
new file mode 100644
index 0000000..d09a355
--- /dev/null
+++ b/.github/workflows/release.yml
@@ -0,0 +1,57 @@
+name: Release
+
+on:
+ push:
+ tags:
+ - "v*"
+
+permissions:
+ contents: write
+
+jobs:
+ release:
+ runs-on: ubuntu-latest
+ env:
+ KEYNETRA_DATABASE_URL: sqlite+pysqlite:///./.keynetra-release.db
+ KEYNETRA_API_KEYS: testkey
+ PYTHONUNBUFFERED: "1"
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3.11
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.11"
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ python -m pip install -r requirements.txt
+ python -m pip install -r requirements-dev.txt
+
+ - name: Build Python package
+ run: python -m build
+
+ - name: Run tests
+ run: pytest -q --cov=keynetra --cov-fail-under=80
+
+ - name: Attach release artifacts
+ uses: actions/upload-artifact@v4
+ with:
+ name: keynetra-release-artifacts
+ path: |
+ dist/*.tar.gz
+ dist/*.whl
+
+ - name: Publish GitHub release
+ uses: softprops/action-gh-release@v2
+ with:
+ name: KeyNetra ${{ github.ref_name }}
+ body: |
+ Initial public release of the KeyNetra authorization engine.
+
+ Includes support for RBAC, ABAC, ACL, and ReBAC with a compiled authorization engine, distributed caching, policy simulation, impact analysis, and observability.
+ files: |
+ dist/*.tar.gz
+ dist/*.whl
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..cf73dc8
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,19 @@
+__pycache__/
+*.pyc
+.env
+.venv
+.vscode
+.idea
+dist/
+build/
+.coverage
+htmlcov/
+.pytest_cache/
+node_modules/
+.ruff_cache/
+.mypy_cache/
+*.db
+*.sqlite
+*.sqlite3
+.DS_Store
+docs-site
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000..5170fb2
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,19 @@
+# Changelog
+
+## v0.1.0
+
+Initial public release of the KeyNetra authorization engine.
+
+### Features
+
+- RBAC
+- ABAC
+- ACL
+- ReBAC
+- Authorization models
+- Policy simulation
+- Impact analysis
+- Distributed caching
+- Redis scaling
+- Prometheus metrics
+- Docker and Kubernetes deployment
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..b93685a
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,68 @@
+# Contributing to KeyNetra
+
+Thanks for contributing.
+This guide is optimized for first-time contributors.
+
+## Development setup
+
+```bash
+python3.11 -m venv .venv
+source .venv/bin/activate
+pip install -r requirements.txt -r requirements-dev.txt
+export KEYNETRA_API_KEYS=devkey
+```
+
+Start the API locally:
+
+```bash
+python -m keynetra.cli serve
+```
+
+## Run tests
+
+Run all tests:
+
+```bash
+PYTHONPATH=. python3.11 -m pytest -q
+```
+
+Run targeted tests:
+
+```bash
+PYTHONPATH=. python3.11 -m pytest -q tests/test_api.py
+```
+
+## Coding guidelines
+
+- Keep changes small and focused
+- Add tests for behavior changes
+- Keep documentation in sync with code
+- Prefer clear names over clever shortcuts
+- Do not add unrelated refactors in the same PR
+
+Formatting/linting tools used in this project:
+
+- `black`
+- `isort`
+- `ruff`
+
+## Pull request checklist
+
+1. Create a feature branch
+2. Implement change with tests
+3. Run test suite locally
+4. Update docs when behavior changes
+5. Open PR with clear summary:
+ - problem
+ - approach
+ - test evidence
+
+## Reporting bugs
+
+When opening an issue, include:
+
+- expected behavior
+- actual behavior
+- minimal reproducible request/payload
+- logs/error output
+- runtime info (Python version, OS)
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000..499cf05
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,28 @@
+FROM python:3.11-slim
+
+ENV PYTHONDONTWRITEBYTECODE=1 \
+ PYTHONUNBUFFERED=1 \
+ PIP_NO_CACHE_DIR=1 \
+ PYTHONPATH=/app
+
+WORKDIR /app
+
+RUN useradd --create-home --uid 10001 appuser
+
+COPY requirements.txt /app/requirements.txt
+RUN pip install --no-cache-dir -r /app/requirements.txt
+
+COPY alembic.ini /app/alembic.ini
+COPY alembic /app/alembic
+COPY keynetra /app/keynetra
+COPY infra/docker/start.sh /usr/local/bin/start-keynetra
+
+RUN chmod +x /usr/local/bin/start-keynetra && chown -R appuser:appuser /app
+
+USER appuser
+EXPOSE 8000
+
+HEALTHCHECK --interval=30s --timeout=5s --start-period=20s --retries=5 \
+ CMD python -c "import urllib.request; urllib.request.urlopen('http://127.0.0.1:8000/health/ready', timeout=3)"
+
+ENTRYPOINT ["start-keynetra"]
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..73e3a43
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,24 @@
+PYTHON ?= python3.11
+
+.PHONY: install test lint format migrate run
+
+install:
+ $(PYTHON) -m pip install -r requirements.txt -r requirements-dev.txt
+
+test:
+ $(PYTHON) -m pytest -q
+
+lint:
+ $(PYTHON) -m ruff check .
+ $(PYTHON) -m black --check .
+ $(PYTHON) -m isort --check-only .
+
+format:
+ $(PYTHON) -m black .
+ $(PYTHON) -m isort .
+
+migrate:
+ $(PYTHON) -m keynetra.cli migrate --confirm-destructive
+
+run:
+ $(PYTHON) -m uvicorn keynetra.api.main:app --host 0.0.0.0 --port 8000
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..98e040e
--- /dev/null
+++ b/README.md
@@ -0,0 +1,385 @@
+
+
+
+ Policy-driven authorization and access control engine for modern applications.
+
+
+KeyNetra is an open-source authorization core built for teams that need Stripe/Keycloak/Casbin-level operational clarity while keeping architecture and deployment under their control.
+
+## Why KeyNetra
+
+- Deterministic evaluation pipeline with explain traces.
+- Multiple authorization models in one runtime:
+ - RBAC
+ - ACL
+ - ReBAC
+ - schema-permission checks
+ - compiled policy graph evaluation
+- Headless-first operation:
+ - HTTP API
+ - CLI
+ - embedded Python engine
+- Production-focused defaults:
+ - migrations
+ - cache layers
+ - observability metrics
+ - Docker/Kubernetes deployment assets
+
+## Table Of Contents
+
+- [Quick Start](#quick-start)
+- [Core Capabilities](#core-capabilities)
+- [Usage Modes](#usage-modes)
+- [Architecture](#architecture)
+- [API Surface](#api-surface)
+- [Configuration](#configuration)
+- [Security](#security)
+- [Caching and Consistency](#caching-and-consistency)
+- [Observability](#observability)
+- [Deployment](#deployment)
+- [Development](#development)
+- [Documentation](#documentation)
+- [Release and Compatibility](#release-and-compatibility)
+- [Citation](#citation)
+- [Contributing](#contributing)
+- [License](#license)
+
+## Quick Start
+
+### 1) Install
+
+```bash
+python3.11 -m venv .venv
+source .venv/bin/activate
+pip install -r requirements.txt -r requirements-dev.txt
+cp .env.example .env
+```
+
+### 2) Start API
+
+```bash
+python -m keynetra.cli serve --config examples/keynetra.yaml
+```
+
+### 3) Verify health
+
+```bash
+curl -i http://localhost:8000/health/ready
+```
+
+### 4) Run first authorization check
+
+```bash
+curl -s -X POST http://localhost:8000/check-access \
+ -H "Content-Type: application/json" \
+ -H "X-API-Key: devkey" \
+ -d '{
+ "user": {"id": 1, "role": "manager"},
+ "action": "approve_payment",
+ "resource": {"amount": 5000},
+ "context": {}
+ }'
+```
+
+## Core Capabilities
+
+| Capability | Details |
+| --- | --- |
+| RBAC | Roles, permissions, role-permission bindings |
+| ACL | Subject/resource/action-level allow/deny |
+| ReBAC | Relationship tuples and index-assisted checks |
+| Compiled policy graph | Deterministic policy evaluation stage |
+| Auth modeling | Schema parser + validator + compiler |
+| Simulation | `/simulate-policy` and `/impact-analysis` |
+| Cache layers | Policy, decision, relationship, ACL, access index |
+| Observability | Prometheus metrics + structured logs |
+| Runtime modes | API, CLI, embedded Python |
+
+## Usage Modes
+
+### API Server Mode
+
+```bash
+python -m keynetra.cli serve --config examples/keynetra.yaml
+```
+
+### CLI Mode
+
+```bash
+python -m keynetra.cli help-cli
+python -m keynetra.cli check --config examples/keynetra.yaml --api-key devkey --action read --user '{"id":"u1"}' --resource '{"resource_type":"document","resource_id":"doc-1"}'
+python -m keynetra.cli compile-policies --config examples/keynetra.yaml
+python -m keynetra.cli doctor --service core --config examples/keynetra.yaml
+```
+
+### Embedded Python Mode
+
+```python
+from keynetra import KeyNetra
+
+engine = KeyNetra.from_config("examples/keynetra.yaml")
+engine.load_policies("examples/policies")
+engine.load_model("examples/auth-model.yaml")
+
+decision = engine.check_access(
+ subject="user:1",
+ action="read",
+ resource="document:abc",
+ context={},
+)
+print(decision.allowed)
+```
+
+### Pure Engine Import
+
+```python
+from keynetra.engine import KeyNetraEngine
+
+engine = KeyNetraEngine(
+ [{"action": "read", "effect": "allow", "priority": 1, "conditions": {}}]
+)
+decision = engine.check_access(
+ subject="user:123",
+ action="read",
+ resource="document:abc",
+ context={},
+)
+print(decision.allowed)
+```
+
+## Architecture
+
+Layered boundaries:
+
+- `keynetra/engine`: deterministic decision logic only
+- `keynetra/services`: orchestration, hydration, consistency handling
+- `keynetra/infrastructure`: DB/cache/repository side effects
+- `keynetra/api`: transport, middleware, and route wiring
+
+```mermaid
+flowchart LR
+ A[Request] --> B[AuthorizationService]
+ B --> C[KeyNetraEngine]
+ C --> D[Decision + Explain Trace]
+ B --> E[(Decision Cache)]
+ B --> F[(Audit Log)]
+```
+
+Engine evaluation order:
+
+1. direct user permissions
+2. ACL checks
+3. RBAC role permissions
+4. relationship index checks
+5. schema permission checks
+6. compiled policy graph checks
+7. default deny
+
+## API Surface
+
+OpenAPI contract: [`contracts/openapi/keynetra-v0.1.0.yaml`](./contracts/openapi/keynetra-v0.1.0.yaml)
+
+Key endpoints:
+
+- Decisions:
+ - `POST /check-access`
+ - `POST /check-access-batch`
+ - `POST /simulate`
+- Modeling:
+ - `POST /auth-model`
+ - `GET /auth-model`
+- ACL:
+ - `POST /acl`
+ - `GET /acl/{resource_type}/{resource_id}`
+ - `DELETE /acl/{acl_id}`
+- Simulation:
+ - `POST /simulate-policy`
+ - `POST /impact-analysis`
+- Health and metrics:
+ - `GET /health`
+ - `GET /health/live`
+ - `GET /health/ready`
+ - `GET /metrics`
+- Admin auth:
+ - `POST /admin/login`
+
+## Configuration
+
+KeyNetra supports YAML, JSON, and TOML config files:
+
+```bash
+python -m keynetra.cli serve --config examples/keynetra.yaml
+```
+
+Example (`examples/keynetra.yaml`):
+
+```yaml
+database:
+ url: sqlite+pysqlite:///./keynetra.db
+
+redis:
+ url: redis://localhost:6379/0
+
+policies:
+ path: ./examples/policies
+
+models:
+ path: ./examples/auth-model.yaml
+
+seed_data: true
+
+server:
+ host: 0.0.0.0
+ port: 8080
+```
+
+Policy/model file support:
+
+- policies: `.yaml`, `.json`, `.polar`
+- auth models: `.yaml`, `.json`, `.toml` (plus raw schema/text)
+
+## Security
+
+- API key auth (`X-API-Key`)
+- JWT bearer auth
+- admin login endpoint (`/admin/login`)
+- management role enforcement (`viewer`, `developer`, `admin`)
+- idempotency middleware for write safety
+- API version negotiation (`X-API-Version`)
+
+For disclosure policy, see [`SECURITY.md`](./SECURITY.md).
+
+## Caching and Consistency
+
+Cache layers:
+
+- policy cache
+- decision cache
+- relationship cache
+- ACL cache
+- access-index cache
+
+Distribution and invalidation:
+
+- Redis backend with in-memory fallback
+- namespace bump invalidation strategy
+- policy distribution via Redis Pub/Sub
+
+## Observability
+
+- Prometheus metrics at `GET /metrics`
+- structured logging (JSON) and rich colored logs
+- explain traces and audit records for decision transparency
+
+Docker monitoring stack includes:
+
+- Prometheus: `http://localhost:9090`
+- Grafana: `http://localhost:3000`
+
+## Deployment
+
+### Docker Compose (default)
+
+```bash
+docker compose up --build
+```
+
+### Docker Compose (development)
+
+```bash
+docker compose -f docker-compose.dev.yml up --build
+```
+
+Services included in stack:
+
+- KeyNetra API
+- PostgreSQL
+- Redis
+- Prometheus
+- Grafana
+
+Kubernetes baseline:
+
+- Helm chart at `infra/k8s/helm/keynetra`
+
+## Development
+
+```bash
+make install
+make lint
+make test
+make migrate
+make run
+```
+
+Policy and diagnostics:
+
+```bash
+python -m keynetra.cli test-policy examples/policy_tests.yaml
+python -m keynetra.cli explain --user u1 --resource r1 --action read
+python -m keynetra.cli benchmark --api-key devkey
+```
+
+## Documentation
+
+- docs index: [`docs/README.md`](./docs/README.md)
+- architecture notes: [`architecture.md`](./architecture.md)
+- Docusaurus site app: [`docs-site/`](./docs-site/)
+- sidebar config: [`docs-site/sidebars.ts`](./docs-site/sidebars.ts)
+- Docusaurus config: [`docs-site/docusaurus.config.ts`](./docs-site/docusaurus.config.ts)
+
+## Release and Compatibility
+
+Current version: `0.1.0`
+
+- package version: [`pyproject.toml`](./pyproject.toml)
+- runtime version: [`keynetra/version.py`](./keynetra/version.py)
+- release notes: [`CHANGELOG.md`](./CHANGELOG.md)
+
+Compatibility:
+
+- Python `3.11+`
+- DB: PostgreSQL, SQLite
+- Cache: Redis optional
+- Deployment: Docker Compose, Helm baseline
+
+## Citation
+
+```bibtex
+@software{keynetra_v0_1_0,
+ title = {KeyNetra: Policy-driven Authorization and Access Control Engine},
+ author = {KeyNetra Community},
+ year = {2026},
+ version = {0.1.0},
+ url = {https://github.com/keynetra/keynetra-core}
+}
+```
+
+## Contributing
+
+Contributions are welcome.
+
+- contribution guide: [`CONTRIBUTING.md`](./CONTRIBUTING.md)
+- security policy: [`SECURITY.md`](./SECURITY.md)
+
+## License
+
+Apache License 2.0. See [`LICENSE`](./LICENSE).
+
+---
+
+
+ Made with love for the KeyNetra Community.
+
diff --git a/SECURITY.md b/SECURITY.md
new file mode 100644
index 0000000..ca2282e
--- /dev/null
+++ b/SECURITY.md
@@ -0,0 +1,47 @@
+# Security Policy
+
+## Reporting vulnerabilities
+
+Do not open public issues for security findings.
+
+Report privately to:
+
+- `security@keynetra.com`
+
+Include:
+
+- affected component/endpoint
+- reproduction steps
+- potential impact
+- suggested mitigation (if available)
+
+## Safe policy design recommendations
+
+1. Default deny
+
+- Do not rely on broad allow fallback policies.
+
+2. Least privilege
+
+- Grant only required actions for each role.
+
+3. Separate duties
+
+- Add explicit deny controls for high-risk flows (for example maker-checker).
+
+4. Tenant isolation
+
+- Enforce tenant boundaries in policy and request attributes.
+
+5. Validate policy changes before rollout
+
+- Use `/simulate-policy` for before/after decision checks.
+- Use `/impact-analysis` to detect large blast radius.
+
+6. Audit decision metadata
+
+- Store `decision`, `reason`, `policy_id`, and `revision` for traceability.
+
+## Supported versions
+
+Security fixes are applied to the current active release line.
diff --git a/alembic.ini b/alembic.ini
new file mode 100644
index 0000000..0b90a67
--- /dev/null
+++ b/alembic.ini
@@ -0,0 +1,36 @@
+[alembic]
+script_location = alembic
+path_separator = os
+prepend_sys_path = .
+
+[loggers]
+keys = root,sqlalchemy,alembic
+
+[handlers]
+keys = console
+
+[formatters]
+keys = generic
+
+[logger_root]
+level = INFO
+handlers = console
+
+[logger_sqlalchemy]
+level = WARN
+handlers =
+qualname = sqlalchemy.engine
+
+[logger_alembic]
+level = INFO
+handlers =
+qualname = alembic
+
+[handler_console]
+class = StreamHandler
+args = (sys.stderr,)
+level = NOTSET
+formatter = generic
+
+[formatter_generic]
+format = %(levelname)-5.5s [%(name)s] %(message)s
diff --git a/alembic/env.py b/alembic/env.py
new file mode 100644
index 0000000..71e1d86
--- /dev/null
+++ b/alembic/env.py
@@ -0,0 +1,64 @@
+from __future__ import annotations
+
+from logging.config import fileConfig
+
+from sqlalchemy import engine_from_config, pool
+
+from alembic import context
+from keynetra.config.settings import get_settings
+from keynetra.domain.models import acl as _acl # noqa: F401
+from keynetra.domain.models import audit as _audit # noqa: F401
+from keynetra.domain.models import auth_model as _auth_model # noqa: F401
+from keynetra.domain.models import idempotency as _idempotency # noqa: F401
+from keynetra.domain.models import policy_versioning as _policy_versioning # noqa: F401
+from keynetra.domain.models import rbac as _rbac # noqa: F401
+from keynetra.domain.models import relationship as _relationship # noqa: F401
+from keynetra.domain.models import tenant as _tenant # noqa: F401
+from keynetra.domain.models.base import Base
+
+config = context.config
+
+if config.config_file_name is not None:
+ fileConfig(config.config_file_name)
+
+target_metadata = Base.metadata
+
+
+def get_url() -> str:
+ return get_settings().database_url
+
+
+def run_migrations_offline() -> None:
+ url = get_url()
+ context.configure(
+ url=url,
+ target_metadata=target_metadata,
+ literal_binds=True,
+ dialect_opts={"paramstyle": "named"},
+ compare_type=True,
+ )
+
+ with context.begin_transaction():
+ context.run_migrations()
+
+
+def run_migrations_online() -> None:
+ configuration = config.get_section(config.config_ini_section) or {}
+ configuration["sqlalchemy.url"] = get_url()
+ connectable = engine_from_config(
+ configuration,
+ prefix="sqlalchemy.",
+ poolclass=pool.NullPool,
+ )
+
+ with connectable.connect() as connection:
+ context.configure(connection=connection, target_metadata=target_metadata, compare_type=True)
+
+ with context.begin_transaction():
+ context.run_migrations()
+
+
+if context.is_offline_mode():
+ run_migrations_offline()
+else:
+ run_migrations_online()
diff --git a/alembic/script.py.mako b/alembic/script.py.mako
new file mode 100644
index 0000000..1098775
--- /dev/null
+++ b/alembic/script.py.mako
@@ -0,0 +1,28 @@
+"""${message}
+
+Revision ID: ${up_revision}
+Revises: ${down_revision | comma,n}
+Create Date: ${create_date}
+
+"""
+
+from __future__ import annotations
+
+from alembic import op
+import sqlalchemy as sa
+
+${imports if imports else ""}
+
+revision = ${repr(up_revision)}
+down_revision = ${repr(down_revision)}
+branch_labels = ${repr(branch_labels)}
+depends_on = ${repr(depends_on)}
+
+
+def upgrade() -> None:
+ ${upgrades if upgrades else "pass"}
+
+
+def downgrade() -> None:
+ ${downgrades if downgrades else "pass"}
+
diff --git a/alembic/versions/20260404_000001_init.py b/alembic/versions/20260404_000001_init.py
new file mode 100644
index 0000000..d9e4e77
--- /dev/null
+++ b/alembic/versions/20260404_000001_init.py
@@ -0,0 +1,66 @@
+from __future__ import annotations
+
+import sqlalchemy as sa
+
+from alembic import op
+
+revision = "20260404_000001"
+down_revision = None
+branch_labels = None
+depends_on = None
+
+
+def upgrade() -> None:
+ op.create_table(
+ "users",
+ sa.Column("id", sa.Integer(), primary_key=True),
+ sa.Column("external_id", sa.String(length=128), nullable=True),
+ )
+ op.create_index("ix_users_external_id", "users", ["external_id"], unique=False)
+
+ op.create_table(
+ "roles",
+ sa.Column("id", sa.Integer(), primary_key=True),
+ sa.Column("name", sa.String(length=64), nullable=False, unique=True),
+ )
+
+ op.create_table(
+ "permissions",
+ sa.Column("id", sa.Integer(), primary_key=True),
+ sa.Column("action", sa.String(length=128), nullable=False),
+ sa.UniqueConstraint("action", name="uq_permissions_action"),
+ )
+
+ op.create_table(
+ "user_roles",
+ sa.Column(
+ "user_id", sa.Integer(), sa.ForeignKey("users.id", ondelete="CASCADE"), primary_key=True
+ ),
+ sa.Column(
+ "role_id", sa.Integer(), sa.ForeignKey("roles.id", ondelete="CASCADE"), primary_key=True
+ ),
+ )
+
+ op.create_table(
+ "role_permissions",
+ sa.Column(
+ "role_id", sa.Integer(), sa.ForeignKey("roles.id", ondelete="CASCADE"), primary_key=True
+ ),
+ sa.Column(
+ "permission_id",
+ sa.Integer(),
+ sa.ForeignKey("permissions.id", ondelete="CASCADE"),
+ primary_key=True,
+ ),
+ )
+
+ # policies are created in 20260404_000002 (versioned policy schema)
+
+
+def downgrade() -> None:
+ op.drop_table("role_permissions")
+ op.drop_table("user_roles")
+ op.drop_table("permissions")
+ op.drop_table("roles")
+ op.drop_index("ix_users_external_id", table_name="users")
+ op.drop_table("users")
diff --git a/alembic/versions/20260404_000002_tenants_versioning_audit.py b/alembic/versions/20260404_000002_tenants_versioning_audit.py
new file mode 100644
index 0000000..b5f33ca
--- /dev/null
+++ b/alembic/versions/20260404_000002_tenants_versioning_audit.py
@@ -0,0 +1,99 @@
+from __future__ import annotations
+
+import sqlalchemy as sa
+
+from alembic import op
+
+revision = "20260404_000002"
+down_revision = "20260404_000001"
+branch_labels = None
+depends_on = None
+
+
+def upgrade() -> None:
+ op.create_table(
+ "tenants",
+ sa.Column("id", sa.Integer(), primary_key=True),
+ sa.Column("tenant_key", sa.String(length=64), nullable=False, unique=True),
+ )
+
+ op.create_table(
+ "audit_logs",
+ sa.Column("id", sa.Integer(), primary_key=True),
+ sa.Column(
+ "tenant_id",
+ sa.Integer(),
+ sa.ForeignKey("tenants.id", ondelete="CASCADE"),
+ nullable=False,
+ ),
+ sa.Column("principal_type", sa.String(length=32), nullable=False),
+ sa.Column("principal_id", sa.String(length=128), nullable=False),
+ sa.Column("user", sa.JSON(), nullable=False, server_default=sa.text("'{}'")),
+ sa.Column("action", sa.String(length=128), nullable=False),
+ sa.Column("resource", sa.JSON(), nullable=False, server_default=sa.text("'{}'")),
+ sa.Column("decision", sa.String(length=8), nullable=False),
+ sa.Column("matched_policies", sa.JSON(), nullable=False, server_default=sa.text("'[]'")),
+ sa.Column("reason", sa.String(length=256), nullable=True),
+ sa.Column(
+ "created_at", sa.DateTime(timezone=True), nullable=False, server_default=sa.func.now()
+ ),
+ )
+ op.create_index("ix_audit_logs_tenant_id", "audit_logs", ["tenant_id"], unique=False)
+
+ op.create_table(
+ "policies",
+ sa.Column("id", sa.Integer(), primary_key=True),
+ sa.Column(
+ "tenant_id",
+ sa.Integer(),
+ sa.ForeignKey("tenants.id", ondelete="CASCADE"),
+ nullable=False,
+ ),
+ sa.Column("policy_key", sa.String(length=64), nullable=False),
+ sa.Column("current_version", sa.Integer(), nullable=False, server_default="1"),
+ sa.UniqueConstraint("tenant_id", "policy_key", name="uq_policies_tenant_key"),
+ )
+ op.create_index("ix_policies_tenant_id", "policies", ["tenant_id"], unique=False)
+
+ op.create_table(
+ "policy_versions",
+ sa.Column("id", sa.Integer(), primary_key=True),
+ sa.Column(
+ "tenant_id",
+ sa.Integer(),
+ sa.ForeignKey("tenants.id", ondelete="CASCADE"),
+ nullable=False,
+ ),
+ sa.Column(
+ "policy_id",
+ sa.Integer(),
+ sa.ForeignKey("policies.id", ondelete="CASCADE"),
+ nullable=False,
+ ),
+ sa.Column("version", sa.Integer(), nullable=False),
+ sa.Column("action", sa.String(length=128), nullable=False),
+ sa.Column("effect", sa.String(length=16), nullable=False, server_default="deny"),
+ sa.Column("priority", sa.Integer(), nullable=False, server_default="100"),
+ sa.Column("conditions", sa.JSON(), nullable=False, server_default=sa.text("'{}'")),
+ sa.Column(
+ "created_at", sa.DateTime(timezone=True), nullable=False, server_default=sa.func.now()
+ ),
+ sa.Column("created_by", sa.String(length=128), nullable=True),
+ sa.UniqueConstraint("policy_id", "version", name="uq_policy_versions_policy_version"),
+ )
+ op.create_index(
+ "ix_policy_versions_tenant_action_priority",
+ "policy_versions",
+ ["tenant_id", "action", "priority"],
+ unique=False,
+ )
+
+
+def downgrade() -> None:
+ op.drop_index("ix_policy_versions_tenant_action_priority", table_name="policy_versions")
+ op.drop_table("policy_versions")
+ op.drop_index("ix_policies_tenant_id", table_name="policies")
+ op.drop_table("policies")
+ op.drop_index("ix_audit_logs_tenant_id", table_name="audit_logs")
+ op.drop_table("audit_logs")
+ op.drop_table("tenants")
diff --git a/alembic/versions/20260404_000003_tenant_policy_version.py b/alembic/versions/20260404_000003_tenant_policy_version.py
new file mode 100644
index 0000000..0c7c3ab
--- /dev/null
+++ b/alembic/versions/20260404_000003_tenant_policy_version.py
@@ -0,0 +1,20 @@
+from __future__ import annotations
+
+import sqlalchemy as sa
+
+from alembic import op
+
+revision = "20260404_000003"
+down_revision = "20260404_000002"
+branch_labels = None
+depends_on = None
+
+
+def upgrade() -> None:
+ op.add_column(
+ "tenants", sa.Column("policy_version", sa.Integer(), nullable=False, server_default="1")
+ )
+
+
+def downgrade() -> None:
+ op.drop_column("tenants", "policy_version")
diff --git a/alembic/versions/20260404_000004_relationships.py b/alembic/versions/20260404_000004_relationships.py
new file mode 100644
index 0000000..0a31fbc
--- /dev/null
+++ b/alembic/versions/20260404_000004_relationships.py
@@ -0,0 +1,50 @@
+from __future__ import annotations
+
+import sqlalchemy as sa
+
+from alembic import op
+
+revision = "20260404_000004"
+down_revision = "20260404_000003"
+branch_labels = None
+depends_on = None
+
+
+def upgrade() -> None:
+ op.create_table(
+ "relationships",
+ sa.Column("id", sa.Integer(), primary_key=True),
+ sa.Column(
+ "tenant_id",
+ sa.Integer(),
+ sa.ForeignKey("tenants.id", ondelete="CASCADE"),
+ nullable=False,
+ ),
+ sa.Column("subject_type", sa.String(length=32), nullable=False),
+ sa.Column("subject_id", sa.String(length=128), nullable=False),
+ sa.Column("relation", sa.String(length=64), nullable=False),
+ sa.Column("object_type", sa.String(length=32), nullable=False),
+ sa.Column("object_id", sa.String(length=128), nullable=False),
+ sa.UniqueConstraint(
+ "tenant_id",
+ "subject_type",
+ "subject_id",
+ "relation",
+ "object_type",
+ "object_id",
+ name="uq_relationships_tuple",
+ ),
+ )
+ op.create_index(
+ "ix_relationships_lookup",
+ "relationships",
+ ["tenant_id", "subject_type", "subject_id", "relation"],
+ unique=False,
+ )
+ op.create_index("ix_relationships_tenant_id", "relationships", ["tenant_id"], unique=False)
+
+
+def downgrade() -> None:
+ op.drop_index("ix_relationships_tenant_id", table_name="relationships")
+ op.drop_index("ix_relationships_lookup", table_name="relationships")
+ op.drop_table("relationships")
diff --git a/alembic/versions/20260404_000005_audit_explainability.py b/alembic/versions/20260404_000005_audit_explainability.py
new file mode 100644
index 0000000..b744a42
--- /dev/null
+++ b/alembic/versions/20260404_000005_audit_explainability.py
@@ -0,0 +1,26 @@
+from __future__ import annotations
+
+import sqlalchemy as sa
+
+from alembic import op
+
+revision = "20260404_000005"
+down_revision = "20260404_000004"
+branch_labels = None
+depends_on = None
+
+
+def upgrade() -> None:
+ op.add_column(
+ "audit_logs",
+ sa.Column("evaluated_rules", sa.JSON(), nullable=False, server_default=sa.text("'[]'")),
+ )
+ op.add_column(
+ "audit_logs",
+ sa.Column("failed_conditions", sa.JSON(), nullable=False, server_default=sa.text("'[]'")),
+ )
+
+
+def downgrade() -> None:
+ op.drop_column("audit_logs", "failed_conditions")
+ op.drop_column("audit_logs", "evaluated_rules")
diff --git a/alembic/versions/20260405_000006_idempotency.py b/alembic/versions/20260405_000006_idempotency.py
new file mode 100644
index 0000000..e6629a3
--- /dev/null
+++ b/alembic/versions/20260405_000006_idempotency.py
@@ -0,0 +1,32 @@
+from __future__ import annotations
+
+import sqlalchemy as sa
+
+from alembic import op
+
+revision = "20260405_000006"
+down_revision = "20260404_000005"
+branch_labels = None
+depends_on = None
+
+
+def upgrade() -> None:
+ op.create_table(
+ "idempotency_records",
+ sa.Column("id", sa.Integer(), primary_key=True),
+ sa.Column("scope", sa.String(length=256), nullable=False),
+ sa.Column("idempotency_key", sa.String(length=128), nullable=False),
+ sa.Column("request_hash", sa.String(length=64), nullable=False),
+ sa.Column("response_status_code", sa.Integer(), nullable=True),
+ sa.Column("response_body", sa.Text(), nullable=True),
+ sa.Column("response_content_type", sa.String(length=128), nullable=True),
+ sa.Column(
+ "created_at", sa.DateTime(timezone=True), nullable=False, server_default=sa.func.now()
+ ),
+ sa.Column("completed_at", sa.DateTime(timezone=True), nullable=True),
+ sa.UniqueConstraint("scope", "idempotency_key", name="uq_idempotency_records_scope_key"),
+ )
+
+
+def downgrade() -> None:
+ op.drop_table("idempotency_records")
diff --git a/alembic/versions/20260405_000007_resource_acl.py b/alembic/versions/20260405_000007_resource_acl.py
new file mode 100644
index 0000000..73902e7
--- /dev/null
+++ b/alembic/versions/20260405_000007_resource_acl.py
@@ -0,0 +1,52 @@
+"""add resource acl table
+
+Revision ID: 20260405_000007
+Revises: 20260405_000006
+Create Date: 2026-04-05 00:07:00.000000
+"""
+
+from __future__ import annotations
+
+import sqlalchemy as sa
+
+from alembic import op
+
+# revision identifiers, used by Alembic.
+revision = "20260405_000007"
+down_revision = "20260405_000006"
+branch_labels = None
+depends_on = None
+
+
+def upgrade() -> None:
+ op.create_table(
+ "resource_acl",
+ sa.Column("id", sa.Integer(), primary_key=True),
+ sa.Column(
+ "tenant_id",
+ sa.Integer(),
+ sa.ForeignKey("tenants.id", ondelete="CASCADE"),
+ nullable=False,
+ ),
+ sa.Column("subject_type", sa.String(length=32), nullable=False),
+ sa.Column("subject_id", sa.String(length=128), nullable=False),
+ sa.Column("resource_type", sa.String(length=64), nullable=False),
+ sa.Column("resource_id", sa.String(length=128), nullable=False),
+ sa.Column("action", sa.String(length=128), nullable=False),
+ sa.Column("effect", sa.String(length=16), nullable=False),
+ sa.Column("created_at", sa.DateTime(), nullable=False),
+ )
+ op.create_index(
+ "ix_resource_acl_lookup",
+ "resource_acl",
+ ["tenant_id", "resource_type", "resource_id", "action"],
+ )
+ op.create_index(
+ "ix_resource_acl_subject", "resource_acl", ["tenant_id", "subject_type", "subject_id"]
+ )
+
+
+def downgrade() -> None:
+ op.drop_index("ix_resource_acl_subject", table_name="resource_acl")
+ op.drop_index("ix_resource_acl_lookup", table_name="resource_acl")
+ op.drop_table("resource_acl")
diff --git a/alembic/versions/20260405_000008_auth_model_revision.py b/alembic/versions/20260405_000008_auth_model_revision.py
new file mode 100644
index 0000000..fe9fb8a
--- /dev/null
+++ b/alembic/versions/20260405_000008_auth_model_revision.py
@@ -0,0 +1,46 @@
+"""add auth model storage and authorization revision
+
+Revision ID: 20260405_000008
+Revises: 20260405_000007
+Create Date: 2026-04-05 00:08:00.000000
+"""
+
+from __future__ import annotations
+
+import sqlalchemy as sa
+
+from alembic import op
+
+revision = "20260405_000008"
+down_revision = "20260405_000007"
+branch_labels = None
+depends_on = None
+
+
+def upgrade() -> None:
+ op.add_column(
+ "tenants",
+ sa.Column("authorization_revision", sa.Integer(), nullable=False, server_default="1"),
+ )
+ op.create_table(
+ "auth_models",
+ sa.Column("id", sa.Integer(), primary_key=True),
+ sa.Column(
+ "tenant_id",
+ sa.Integer(),
+ sa.ForeignKey("tenants.id", ondelete="CASCADE"),
+ nullable=False,
+ unique=True,
+ ),
+ sa.Column("schema_text", sa.Text(), nullable=False),
+ sa.Column("schema_json", sa.JSON(), nullable=False),
+ sa.Column("compiled_json", sa.JSON(), nullable=False),
+ sa.Column("created_at", sa.DateTime(), nullable=False),
+ sa.Column("updated_at", sa.DateTime(), nullable=False),
+ sa.UniqueConstraint("tenant_id", name="uq_auth_models_tenant"),
+ )
+
+
+def downgrade() -> None:
+ op.drop_table("auth_models")
+ op.drop_column("tenants", "authorization_revision")
diff --git a/architecture.md b/architecture.md
new file mode 100644
index 0000000..9137bdd
--- /dev/null
+++ b/architecture.md
@@ -0,0 +1,77 @@
+# KeyNetra Core Architecture
+
+## Boundaries
+
+- `keynetra.engine` is pure and deterministic.
+ It accepts a single `AuthorizationInput` object and returns an `AuthorizationDecision`.
+ It imports only the Python standard library and has no access to DB sessions, caches, HTTP objects, or external state.
+- `keynetra.services` orchestrates workflows.
+ Services validate inputs, hydrate explicit engine inputs from repositories, invoke the engine, coordinate cache lookups, and write audit records.
+- `keynetra.infrastructure` owns side effects.
+ Repositories handle SQLAlchemy access. Cache adapters handle Redis or in-memory fallback. Policy event publishing also lives here.
+- `keynetra.api` is transport only.
+ Routes translate HTTP requests into service calls and map service outputs back to response models.
+
+## Authorization Flow
+
+1. API receives `user`, `resource`, `action`, and optional `context`.
+2. `AuthorizationService` loads the tenant, current policies, user context, and relationships through repository/cache interfaces.
+3. The service builds one explicit `AuthorizationInput` and hydrates ACL and access-index data when available.
+4. `KeyNetraEngine` evaluates that input deterministically in this order:
+ - direct user permissions
+ - ACL entries
+ - RBAC role permissions
+ - relationship index checks
+ - compiled ABAC policy graph
+ - default deny
+5. The engine returns:
+ - `decision`
+ - `reason`
+ - `policy_id`
+ - `explain_trace`
+6. The service stores the decision in the short-TTL decision cache and writes the audit log through infrastructure.
+7. API returns the decision without embedding business logic.
+
+```mermaid
+flowchart TD
+ A[Request] --> B[Hydrate explicit input]
+ B --> C1[Direct permission]
+ C1 -->|allow/deny| Z[Return]
+ C1 -->|abstain| C2[ACL]
+ C2 -->|allow/deny| Z
+ C2 -->|abstain| C3[RBAC roles]
+ C3 -->|allow/deny| Z
+ C3 -->|abstain| C4[Relationship index]
+ C4 -->|allow/deny| Z
+ C4 -->|abstain| C5[Compiled ABAC graph]
+ C5 -->|allow/deny| Z
+ C5 -->|abstain| D[Default deny]
+```
+
+## Cache Layers
+
+- Decision cache:
+ Uses a stable hash of the full hydrated authorization input plus tenant policy version.
+ A tenant namespace counter invalidates cached decisions after policy or relationship changes.
+- Policy cache:
+ Stores serialized current policy definitions by tenant and policy version.
+ Policy updates bump a policy namespace and publish an invalidation event.
+- Relationship cache:
+ Stores relationship edges per tenant subject.
+ Relationship writes invalidate that subject cache entry and bump the tenant decision namespace.
+- ACL cache:
+ Stores resource-level ACL rows by tenant/resource/action.
+ ACL writes invalidate the resource namespace so ACL and access-index lookups refresh together.
+- Access index cache:
+ Stores resource/action subject indexes for ACL and relationship matches.
+ Relationship, ACL, and role-binding updates invalidate the relevant resource namespace.
+- Compiled policy graph cache:
+ Keeps an in-memory executable graph per tenant policy version.
+ Policy updates rebuild the graph from the existing DSL and store the compiled nodes in memory.
+
+## Determinism Rules
+
+- No randomness is used in the engine.
+- No hidden state is read by the engine.
+- Time-based rules require explicit `context.current_time`.
+- Relationship checks depend only on explicit relationship edges supplied in `AuthorizationInput.user["relations"]`.
diff --git a/contracts/openapi/keynetra-v0.1.0.yaml b/contracts/openapi/keynetra-v0.1.0.yaml
new file mode 100644
index 0000000..9f388c0
--- /dev/null
+++ b/contracts/openapi/keynetra-v0.1.0.yaml
@@ -0,0 +1,2216 @@
+openapi: 3.1.0
+info:
+ title: KeyNetra
+ version: 0.1.0
+paths:
+ /health:
+ get:
+ tags:
+ - health
+ summary: Health
+ operationId: health_health_get
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_dict_str__str__'
+ /health/live:
+ get:
+ tags:
+ - health
+ summary: Liveness
+ operationId: liveness_health_live_get
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_dict_str__str__'
+ /health/ready:
+ get:
+ tags:
+ - health
+ summary: Readiness
+ operationId: readiness_health_ready_get
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_dict_str__object__'
+ /check-access:
+ post:
+ tags:
+ - access
+ summary: Check Access
+ operationId: check_access_check_access_post
+ requestBody:
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/AccessRequest'
+ required: true
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_AccessDecisionResponse_'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ /simulate:
+ post:
+ tags:
+ - access
+ summary: Simulate
+ operationId: simulate_simulate_post
+ requestBody:
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/AccessRequest'
+ required: true
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_SimulationResponse_'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ /check-access-batch:
+ post:
+ tags:
+ - access
+ summary: Check Access Batch
+ operationId: check_access_batch_check_access_batch_post
+ requestBody:
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/BatchAccessRequest'
+ required: true
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_BatchAccessResponse_'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ /admin/login:
+ post:
+ tags:
+ - auth
+ - auth
+ summary: Admin Login
+ operationId: admin_login_admin_login_post
+ requestBody:
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/AdminLoginRequest'
+ required: true
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_AdminLoginResponse_'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ /policies:
+ get:
+ tags:
+ - management
+ summary: List Policies
+ operationId: list_policies_policies_get
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ parameters:
+ - name: limit
+ in: query
+ required: false
+ schema:
+ type: integer
+ default: 50
+ title: Limit
+ - name: cursor
+ in: query
+ required: false
+ schema:
+ anyOf:
+ - type: string
+ - type: 'null'
+ title: Cursor
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_list_PolicyOut__'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ post:
+ tags:
+ - management
+ summary: Create Policy
+ operationId: create_policy_policies_post
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ requestBody:
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/PolicyCreate'
+ responses:
+ '201':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_PolicyOut_'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ /policies/{policy_key}:
+ put:
+ tags:
+ - management
+ summary: Update Policy
+ operationId: update_policy_policies__policy_key__put
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ parameters:
+ - name: policy_key
+ in: path
+ required: true
+ schema:
+ type: string
+ title: Policy Key
+ requestBody:
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/PolicyCreate'
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_PolicyOut_'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ delete:
+ tags:
+ - management
+ summary: Delete Policy
+ operationId: delete_policy_policies__policy_key__delete
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ parameters:
+ - name: policy_key
+ in: path
+ required: true
+ schema:
+ type: string
+ title: Policy Key
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_dict_str__str__'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ /policies/dsl:
+ post:
+ tags:
+ - management
+ summary: Create Policy From Dsl
+ operationId: create_policy_from_dsl_policies_dsl_post
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ parameters:
+ - name: dsl
+ in: query
+ required: true
+ schema:
+ type: string
+ title: Dsl
+ responses:
+ '201':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_PolicyOut_'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ /policies/{policy_key}/rollback/{version}:
+ post:
+ tags:
+ - management
+ summary: Rollback Policy
+ operationId: rollback_policy_policies__policy_key__rollback__version__post
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ parameters:
+ - name: policy_key
+ in: path
+ required: true
+ schema:
+ type: string
+ title: Policy Key
+ - name: version
+ in: path
+ required: true
+ schema:
+ type: integer
+ title: Version
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_dict_str__Union_int__str___'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ /acl:
+ post:
+ tags:
+ - management
+ summary: Create Acl Entry
+ operationId: create_acl_entry_acl_post
+ requestBody:
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ACLCreate'
+ required: true
+ responses:
+ '201':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_ACLOut_'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ /acl/{resource_type}/{resource_id}:
+ get:
+ tags:
+ - management
+ summary: List Acl Entries
+ operationId: list_acl_entries_acl__resource_type___resource_id__get
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ parameters:
+ - name: resource_type
+ in: path
+ required: true
+ schema:
+ type: string
+ title: Resource Type
+ - name: resource_id
+ in: path
+ required: true
+ schema:
+ type: string
+ title: Resource Id
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_list_ACLOut__'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ /acl/{acl_id}:
+ delete:
+ tags:
+ - management
+ summary: Delete Acl Entry
+ operationId: delete_acl_entry_acl__acl_id__delete
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ parameters:
+ - name: acl_id
+ in: path
+ required: true
+ schema:
+ type: integer
+ title: Acl Id
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_dict_str__int__'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ /auth-model:
+ get:
+ tags:
+ - management
+ summary: Get Auth Model
+ operationId: get_auth_model_auth_model_get
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_AuthModelOut_'
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ post:
+ tags:
+ - management
+ summary: Create Auth Model
+ operationId: create_auth_model_auth_model_post
+ requestBody:
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/AuthModelCreate'
+ required: true
+ responses:
+ '201':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_AuthModelOut_'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ /simulate-policy:
+ post:
+ tags:
+ - management
+ summary: Simulate Policy
+ operationId: simulate_policy_simulate_policy_post
+ requestBody:
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/PolicySimulationRequest'
+ required: true
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_PolicySimulationResponse_'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ /impact-analysis:
+ post:
+ tags:
+ - management
+ summary: Impact Analysis
+ operationId: impact_analysis_impact_analysis_post
+ requestBody:
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ImpactAnalysisRequest'
+ required: true
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_ImpactAnalysisResponse_'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ /roles:
+ get:
+ tags:
+ - management
+ summary: List Roles
+ operationId: list_roles_roles_get
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ parameters:
+ - name: limit
+ in: query
+ required: false
+ schema:
+ type: integer
+ default: 50
+ title: Limit
+ - name: cursor
+ in: query
+ required: false
+ schema:
+ anyOf:
+ - type: string
+ - type: 'null'
+ title: Cursor
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_list_RoleOut__'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ post:
+ tags:
+ - management
+ summary: Create Role
+ operationId: create_role_roles_post
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ requestBody:
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/RoleCreate'
+ responses:
+ '201':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/RoleOut'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ /roles/{role_id}:
+ put:
+ tags:
+ - management
+ summary: Update Role
+ operationId: update_role_roles__role_id__put
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ parameters:
+ - name: role_id
+ in: path
+ required: true
+ schema:
+ type: integer
+ title: Role Id
+ requestBody:
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/RoleUpdate'
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/RoleOut'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ delete:
+ tags:
+ - management
+ summary: Delete Role
+ operationId: delete_role_roles__role_id__delete
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ parameters:
+ - name: role_id
+ in: path
+ required: true
+ schema:
+ type: integer
+ title: Role Id
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_dict_str__int__'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ /roles/{role_id}/permissions:
+ get:
+ tags:
+ - management
+ summary: List Role Permissions
+ operationId: list_role_permissions_roles__role_id__permissions_get
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ parameters:
+ - name: role_id
+ in: path
+ required: true
+ schema:
+ type: integer
+ title: Role Id
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_list_PermissionOut__'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ /roles/{role_id}/permissions/{permission_id}:
+ post:
+ tags:
+ - management
+ summary: Add Permission To Role
+ operationId: add_permission_to_role_roles__role_id__permissions__permission_id__post
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ parameters:
+ - name: role_id
+ in: path
+ required: true
+ schema:
+ type: integer
+ title: Role Id
+ - name: permission_id
+ in: path
+ required: true
+ schema:
+ type: integer
+ title: Permission Id
+ responses:
+ '201':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_PermissionOut_'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ delete:
+ tags:
+ - management
+ summary: Remove Permission From Role
+ operationId: remove_permission_from_role_roles__role_id__permissions__permission_id__delete
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ parameters:
+ - name: role_id
+ in: path
+ required: true
+ schema:
+ type: integer
+ title: Role Id
+ - name: permission_id
+ in: path
+ required: true
+ schema:
+ type: integer
+ title: Permission Id
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_dict_str__int__'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ /permissions:
+ get:
+ tags:
+ - management
+ summary: List Permissions
+ operationId: list_permissions_permissions_get
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ parameters:
+ - name: limit
+ in: query
+ required: false
+ schema:
+ type: integer
+ default: 50
+ title: Limit
+ - name: cursor
+ in: query
+ required: false
+ schema:
+ anyOf:
+ - type: string
+ - type: 'null'
+ title: Cursor
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_list_PermissionOut__'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ post:
+ tags:
+ - management
+ summary: Create Permission
+ operationId: create_permission_permissions_post
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ requestBody:
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/PermissionCreate'
+ responses:
+ '201':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/PermissionOut'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ /permissions/{permission_id}:
+ put:
+ tags:
+ - management
+ summary: Update Permission
+ operationId: update_permission_permissions__permission_id__put
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ parameters:
+ - name: permission_id
+ in: path
+ required: true
+ schema:
+ type: integer
+ title: Permission Id
+ requestBody:
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/PermissionUpdate'
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/PermissionOut'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ delete:
+ tags:
+ - management
+ summary: Delete Permission
+ operationId: delete_permission_permissions__permission_id__delete
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ parameters:
+ - name: permission_id
+ in: path
+ required: true
+ schema:
+ type: integer
+ title: Permission Id
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_dict_str__int__'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ /permissions/{permission_id}/roles:
+ get:
+ tags:
+ - management
+ summary: List Permission Roles
+ operationId: list_permission_roles_permissions__permission_id__roles_get
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ parameters:
+ - name: permission_id
+ in: path
+ required: true
+ schema:
+ type: integer
+ title: Permission Id
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_list_RoleOut__'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ /relationships:
+ get:
+ tags:
+ - management
+ summary: List Relationships
+ operationId: list_relationships_relationships_get
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ parameters:
+ - name: subject_type
+ in: query
+ required: true
+ schema:
+ type: string
+ title: Subject Type
+ - name: subject_id
+ in: query
+ required: true
+ schema:
+ type: string
+ title: Subject Id
+ - name: limit
+ in: query
+ required: false
+ schema:
+ type: integer
+ default: 50
+ title: Limit
+ - name: cursor
+ in: query
+ required: false
+ schema:
+ anyOf:
+ - type: string
+ - type: 'null'
+ title: Cursor
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_list_dict_str__str___'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ post:
+ tags:
+ - management
+ summary: Create Relationship
+ operationId: create_relationship_relationships_post
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ requestBody:
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/RelationshipCreate'
+ responses:
+ '201':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_RelationshipOut_'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ /audit:
+ get:
+ tags:
+ - management
+ summary: List Audit Logs
+ operationId: list_audit_logs_audit_get
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ parameters:
+ - name: limit
+ in: query
+ required: false
+ schema:
+ type: integer
+ default: 50
+ title: Limit
+ - name: cursor
+ in: query
+ required: false
+ schema:
+ anyOf:
+ - type: string
+ - type: 'null'
+ title: Cursor
+ - name: user_id
+ in: query
+ required: false
+ schema:
+ anyOf:
+ - type: string
+ - type: 'null'
+ title: User Id
+ - name: resource_id
+ in: query
+ required: false
+ schema:
+ anyOf:
+ - type: string
+ - type: 'null'
+ title: Resource Id
+ - name: decision
+ in: query
+ required: false
+ schema:
+ anyOf:
+ - type: string
+ - type: 'null'
+ title: Decision
+ - name: start_time
+ in: query
+ required: false
+ schema:
+ anyOf:
+ - type: string
+ format: date-time
+ - type: 'null'
+ title: Start Time
+ - name: end_time
+ in: query
+ required: false
+ schema:
+ anyOf:
+ - type: string
+ format: date-time
+ - type: 'null'
+ title: End Time
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_list_AuditRecordOut__'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ /playground/evaluate:
+ post:
+ tags:
+ - playground
+ summary: Evaluate
+ operationId: evaluate_playground_evaluate_post
+ requestBody:
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/PlaygroundEvaluateRequest'
+ required: true
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_dict_str__Any__'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ security:
+ - HTTPBearer: []
+ - APIKeyHeader: []
+ /dev/sample-data:
+ get:
+ tags:
+ - dev
+ summary: Get Sample Data
+ operationId: get_sample_data_dev_sample_data_get
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_dict_str__object__'
+ /dev/sample-data/seed:
+ post:
+ tags:
+ - dev
+ summary: Seed Sample Data
+ operationId: seed_sample_data_dev_sample_data_seed_post
+ parameters:
+ - name: reset
+ in: query
+ required: false
+ schema:
+ type: boolean
+ description: Clear the sample dataset before reseeding it.
+ default: false
+ title: Reset
+ description: Clear the sample dataset before reseeding it.
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/SuccessResponse_dict_str__object__'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+components:
+ schemas:
+ ACLCreate:
+ properties:
+ subject_type:
+ type: string
+ title: Subject Type
+ subject_id:
+ type: string
+ title: Subject Id
+ resource_type:
+ type: string
+ title: Resource Type
+ resource_id:
+ type: string
+ title: Resource Id
+ action:
+ type: string
+ title: Action
+ effect:
+ type: string
+ title: Effect
+ type: object
+ required:
+ - subject_type
+ - subject_id
+ - resource_type
+ - resource_id
+ - action
+ - effect
+ title: ACLCreate
+ ACLOut:
+ properties:
+ subject_type:
+ type: string
+ title: Subject Type
+ subject_id:
+ type: string
+ title: Subject Id
+ resource_type:
+ type: string
+ title: Resource Type
+ resource_id:
+ type: string
+ title: Resource Id
+ action:
+ type: string
+ title: Action
+ effect:
+ type: string
+ title: Effect
+ id:
+ type: integer
+ title: Id
+ tenant_id:
+ type: integer
+ title: Tenant Id
+ created_at:
+ anyOf:
+ - type: string
+ format: date-time
+ - type: 'null'
+ title: Created At
+ type: object
+ required:
+ - subject_type
+ - subject_id
+ - resource_type
+ - resource_id
+ - action
+ - effect
+ - id
+ - tenant_id
+ title: ACLOut
+ AccessDecisionResponse:
+ properties:
+ allowed:
+ type: boolean
+ title: Allowed
+ decision:
+ type: string
+ title: Decision
+ matched_policies:
+ items:
+ type: string
+ type: array
+ title: Matched Policies
+ reason:
+ anyOf:
+ - type: string
+ - type: 'null'
+ title: Reason
+ policy_id:
+ anyOf:
+ - type: string
+ - type: 'null'
+ title: Policy Id
+ explain_trace:
+ items:
+ additionalProperties: true
+ type: object
+ type: array
+ title: Explain Trace
+ revision:
+ anyOf:
+ - type: integer
+ - type: 'null'
+ title: Revision
+ type: object
+ required:
+ - allowed
+ - decision
+ title: AccessDecisionResponse
+ AccessRequest:
+ properties:
+ user:
+ additionalProperties: true
+ type: object
+ title: User
+ action:
+ type: string
+ title: Action
+ resource:
+ additionalProperties: true
+ type: object
+ title: Resource
+ context:
+ additionalProperties: true
+ type: object
+ title: Context
+ consistency:
+ type: string
+ title: Consistency
+ default: eventual
+ revision:
+ anyOf:
+ - type: integer
+ - type: 'null'
+ title: Revision
+ type: object
+ required:
+ - action
+ title: AccessRequest
+ description: Explicit authorization request passed through the API boundary.
+ AdminLoginRequest:
+ properties:
+ username:
+ type: string
+ title: Username
+ password:
+ type: string
+ title: Password
+ type: object
+ required:
+ - username
+ - password
+ title: AdminLoginRequest
+ AdminLoginResponse:
+ properties:
+ access_token:
+ type: string
+ title: Access Token
+ token_type:
+ type: string
+ title: Token Type
+ default: bearer
+ expires_in:
+ type: integer
+ title: Expires In
+ role:
+ type: string
+ title: Role
+ default: admin
+ tenant_key:
+ type: string
+ title: Tenant Key
+ type: object
+ required:
+ - access_token
+ - expires_in
+ - tenant_key
+ title: AdminLoginResponse
+ AuditRecordOut:
+ properties:
+ id:
+ type: integer
+ title: Id
+ principal_type:
+ type: string
+ title: Principal Type
+ principal_id:
+ type: string
+ title: Principal Id
+ user:
+ additionalProperties: true
+ type: object
+ title: User
+ action:
+ type: string
+ title: Action
+ resource:
+ additionalProperties: true
+ type: object
+ title: Resource
+ decision:
+ type: string
+ title: Decision
+ matched_policies:
+ items: {}
+ type: array
+ title: Matched Policies
+ reason:
+ anyOf:
+ - type: string
+ - type: 'null'
+ title: Reason
+ evaluated_rules:
+ items: {}
+ type: array
+ title: Evaluated Rules
+ failed_conditions:
+ items: {}
+ type: array
+ title: Failed Conditions
+ created_at:
+ type: string
+ format: date-time
+ title: Created At
+ type: object
+ required:
+ - id
+ - principal_type
+ - principal_id
+ - user
+ - action
+ - resource
+ - decision
+ - matched_policies
+ - evaluated_rules
+ - failed_conditions
+ - created_at
+ title: AuditRecordOut
+ AuthModelCreate:
+ properties:
+ schema:
+ type: string
+ title: Schema
+ type: object
+ required:
+ - schema
+ title: AuthModelCreate
+ AuthModelOut:
+ properties:
+ id:
+ type: integer
+ title: Id
+ tenant_id:
+ type: integer
+ title: Tenant Id
+ schema:
+ type: string
+ title: Schema
+ parsed:
+ additionalProperties: true
+ type: object
+ title: Parsed
+ compiled:
+ additionalProperties: true
+ type: object
+ title: Compiled
+ type: object
+ required:
+ - id
+ - tenant_id
+ - schema
+ - parsed
+ - compiled
+ title: AuthModelOut
+ BatchAccessItem:
+ properties:
+ action:
+ type: string
+ title: Action
+ resource:
+ additionalProperties: true
+ type: object
+ title: Resource
+ type: object
+ required:
+ - action
+ title: BatchAccessItem
+ BatchAccessRequest:
+ properties:
+ user:
+ additionalProperties: true
+ type: object
+ title: User
+ items:
+ items:
+ $ref: '#/components/schemas/BatchAccessItem'
+ type: array
+ title: Items
+ consistency:
+ type: string
+ title: Consistency
+ default: eventual
+ revision:
+ anyOf:
+ - type: integer
+ - type: 'null'
+ title: Revision
+ type: object
+ required:
+ - items
+ title: BatchAccessRequest
+ BatchAccessResponse:
+ properties:
+ results:
+ items:
+ $ref: '#/components/schemas/BatchAccessResult'
+ type: array
+ title: Results
+ revision:
+ anyOf:
+ - type: integer
+ - type: 'null'
+ title: Revision
+ type: object
+ required:
+ - results
+ title: BatchAccessResponse
+ BatchAccessResult:
+ properties:
+ action:
+ type: string
+ title: Action
+ allowed:
+ type: boolean
+ title: Allowed
+ revision:
+ anyOf:
+ - type: integer
+ - type: 'null'
+ title: Revision
+ type: object
+ required:
+ - action
+ - allowed
+ title: BatchAccessResult
+ HTTPValidationError:
+ properties:
+ detail:
+ items:
+ $ref: '#/components/schemas/ValidationError'
+ type: array
+ title: Detail
+ type: object
+ title: HTTPValidationError
+ ImpactAnalysisRequest:
+ properties:
+ policy_change:
+ type: string
+ title: Policy Change
+ type: object
+ required:
+ - policy_change
+ title: ImpactAnalysisRequest
+ ImpactAnalysisResponse:
+ properties:
+ gained_access:
+ items:
+ type: integer
+ type: array
+ title: Gained Access
+ lost_access:
+ items:
+ type: integer
+ type: array
+ title: Lost Access
+ type: object
+ title: ImpactAnalysisResponse
+ MetaBody:
+ properties:
+ request_id:
+ anyOf:
+ - type: string
+ - type: 'null'
+ title: Request Id
+ limit:
+ anyOf:
+ - type: integer
+ - type: 'null'
+ title: Limit
+ next_cursor:
+ anyOf:
+ - type: string
+ - type: 'null'
+ title: Next Cursor
+ extra:
+ additionalProperties: true
+ type: object
+ title: Extra
+ type: object
+ title: MetaBody
+ PermissionCreate:
+ properties:
+ action:
+ type: string
+ title: Action
+ type: object
+ required:
+ - action
+ title: PermissionCreate
+ PermissionOut:
+ properties:
+ id:
+ type: integer
+ title: Id
+ action:
+ type: string
+ title: Action
+ type: object
+ required:
+ - id
+ - action
+ title: PermissionOut
+ PermissionUpdate:
+ properties:
+ action:
+ type: string
+ title: Action
+ type: object
+ required:
+ - action
+ title: PermissionUpdate
+ PlaygroundEvaluateRequest:
+ properties:
+ policies:
+ items:
+ $ref: '#/components/schemas/PlaygroundPolicy'
+ type: array
+ title: Policies
+ input:
+ $ref: '#/components/schemas/PlaygroundInput'
+ type: object
+ required:
+ - policies
+ - input
+ title: PlaygroundEvaluateRequest
+ PlaygroundInput:
+ properties:
+ user:
+ additionalProperties: true
+ type: object
+ title: User
+ resource:
+ additionalProperties: true
+ type: object
+ title: Resource
+ action:
+ type: string
+ title: Action
+ default: ''
+ context:
+ additionalProperties: true
+ type: object
+ title: Context
+ type: object
+ title: PlaygroundInput
+ PlaygroundPolicy:
+ properties:
+ action:
+ type: string
+ title: Action
+ effect:
+ type: string
+ title: Effect
+ default: allow
+ priority:
+ type: integer
+ title: Priority
+ default: 100
+ policy_id:
+ anyOf:
+ - type: string
+ - type: 'null'
+ title: Policy Id
+ conditions:
+ additionalProperties: true
+ type: object
+ title: Conditions
+ type: object
+ required:
+ - action
+ title: PlaygroundPolicy
+ PolicyCreate:
+ properties:
+ action:
+ type: string
+ title: Action
+ effect:
+ type: string
+ title: Effect
+ default: allow
+ priority:
+ type: integer
+ title: Priority
+ default: 100
+ conditions:
+ additionalProperties: true
+ type: object
+ title: Conditions
+ type: object
+ required:
+ - action
+ title: PolicyCreate
+ PolicyOut:
+ properties:
+ id:
+ type: integer
+ title: Id
+ action:
+ type: string
+ title: Action
+ effect:
+ type: string
+ title: Effect
+ priority:
+ type: integer
+ title: Priority
+ conditions:
+ additionalProperties: true
+ type: object
+ title: Conditions
+ type: object
+ required:
+ - id
+ - action
+ - effect
+ - priority
+ - conditions
+ title: PolicyOut
+ PolicySimulationInput:
+ properties:
+ policy_change:
+ anyOf:
+ - type: string
+ - type: 'null'
+ title: Policy Change
+ relationship_change:
+ anyOf:
+ - additionalProperties: true
+ type: object
+ - type: 'null'
+ title: Relationship Change
+ role_change:
+ anyOf:
+ - additionalProperties: true
+ type: object
+ - type: 'null'
+ title: Role Change
+ type: object
+ title: PolicySimulationInput
+ PolicySimulationRequest:
+ properties:
+ simulate:
+ $ref: '#/components/schemas/PolicySimulationInput'
+ request:
+ additionalProperties: true
+ type: object
+ title: Request
+ type: object
+ title: PolicySimulationRequest
+ PolicySimulationResponse:
+ properties:
+ decision_before:
+ additionalProperties: true
+ type: object
+ title: Decision Before
+ decision_after:
+ additionalProperties: true
+ type: object
+ title: Decision After
+ type: object
+ required:
+ - decision_before
+ - decision_after
+ title: PolicySimulationResponse
+ RelationshipCreate:
+ properties:
+ subject_type:
+ type: string
+ title: Subject Type
+ subject_id:
+ type: string
+ title: Subject Id
+ relation:
+ type: string
+ title: Relation
+ object_type:
+ type: string
+ title: Object Type
+ object_id:
+ type: string
+ title: Object Id
+ type: object
+ required:
+ - subject_type
+ - subject_id
+ - relation
+ - object_type
+ - object_id
+ title: RelationshipCreate
+ RelationshipOut:
+ properties:
+ subject_type:
+ type: string
+ title: Subject Type
+ subject_id:
+ type: string
+ title: Subject Id
+ relation:
+ type: string
+ title: Relation
+ object_type:
+ type: string
+ title: Object Type
+ object_id:
+ type: string
+ title: Object Id
+ id:
+ type: integer
+ title: Id
+ type: object
+ required:
+ - subject_type
+ - subject_id
+ - relation
+ - object_type
+ - object_id
+ - id
+ title: RelationshipOut
+ RoleCreate:
+ properties:
+ name:
+ type: string
+ title: Name
+ type: object
+ required:
+ - name
+ title: RoleCreate
+ RoleOut:
+ properties:
+ id:
+ type: integer
+ title: Id
+ name:
+ type: string
+ title: Name
+ type: object
+ required:
+ - id
+ - name
+ title: RoleOut
+ RoleUpdate:
+ properties:
+ name:
+ type: string
+ title: Name
+ type: object
+ required:
+ - name
+ title: RoleUpdate
+ SimulationResponse:
+ properties:
+ decision:
+ type: string
+ title: Decision
+ matched_policies:
+ items:
+ type: string
+ type: array
+ title: Matched Policies
+ reason:
+ anyOf:
+ - type: string
+ - type: 'null'
+ title: Reason
+ policy_id:
+ anyOf:
+ - type: string
+ - type: 'null'
+ title: Policy Id
+ explain_trace:
+ items:
+ additionalProperties: true
+ type: object
+ type: array
+ title: Explain Trace
+ failed_conditions:
+ items:
+ type: string
+ type: array
+ title: Failed Conditions
+ revision:
+ anyOf:
+ - type: integer
+ - type: 'null'
+ title: Revision
+ type: object
+ required:
+ - decision
+ - matched_policies
+ title: SimulationResponse
+ SuccessResponse_ACLOut_:
+ properties:
+ data:
+ $ref: '#/components/schemas/ACLOut'
+ meta:
+ $ref: '#/components/schemas/MetaBody'
+ error:
+ type: 'null'
+ title: Error
+ type: object
+ required:
+ - data
+ title: SuccessResponse[ACLOut]
+ SuccessResponse_AccessDecisionResponse_:
+ properties:
+ data:
+ $ref: '#/components/schemas/AccessDecisionResponse'
+ meta:
+ $ref: '#/components/schemas/MetaBody'
+ error:
+ type: 'null'
+ title: Error
+ type: object
+ required:
+ - data
+ title: SuccessResponse[AccessDecisionResponse]
+ SuccessResponse_AdminLoginResponse_:
+ properties:
+ data:
+ $ref: '#/components/schemas/AdminLoginResponse'
+ meta:
+ $ref: '#/components/schemas/MetaBody'
+ error:
+ type: 'null'
+ title: Error
+ type: object
+ required:
+ - data
+ title: SuccessResponse[AdminLoginResponse]
+ SuccessResponse_AuthModelOut_:
+ properties:
+ data:
+ $ref: '#/components/schemas/AuthModelOut'
+ meta:
+ $ref: '#/components/schemas/MetaBody'
+ error:
+ type: 'null'
+ title: Error
+ type: object
+ required:
+ - data
+ title: SuccessResponse[AuthModelOut]
+ SuccessResponse_BatchAccessResponse_:
+ properties:
+ data:
+ $ref: '#/components/schemas/BatchAccessResponse'
+ meta:
+ $ref: '#/components/schemas/MetaBody'
+ error:
+ type: 'null'
+ title: Error
+ type: object
+ required:
+ - data
+ title: SuccessResponse[BatchAccessResponse]
+ SuccessResponse_ImpactAnalysisResponse_:
+ properties:
+ data:
+ $ref: '#/components/schemas/ImpactAnalysisResponse'
+ meta:
+ $ref: '#/components/schemas/MetaBody'
+ error:
+ type: 'null'
+ title: Error
+ type: object
+ required:
+ - data
+ title: SuccessResponse[ImpactAnalysisResponse]
+ SuccessResponse_PermissionOut_:
+ properties:
+ data:
+ $ref: '#/components/schemas/PermissionOut'
+ meta:
+ $ref: '#/components/schemas/MetaBody'
+ error:
+ type: 'null'
+ title: Error
+ type: object
+ required:
+ - data
+ title: SuccessResponse[PermissionOut]
+ SuccessResponse_PolicyOut_:
+ properties:
+ data:
+ $ref: '#/components/schemas/PolicyOut'
+ meta:
+ $ref: '#/components/schemas/MetaBody'
+ error:
+ type: 'null'
+ title: Error
+ type: object
+ required:
+ - data
+ title: SuccessResponse[PolicyOut]
+ SuccessResponse_PolicySimulationResponse_:
+ properties:
+ data:
+ $ref: '#/components/schemas/PolicySimulationResponse'
+ meta:
+ $ref: '#/components/schemas/MetaBody'
+ error:
+ type: 'null'
+ title: Error
+ type: object
+ required:
+ - data
+ title: SuccessResponse[PolicySimulationResponse]
+ SuccessResponse_RelationshipOut_:
+ properties:
+ data:
+ $ref: '#/components/schemas/RelationshipOut'
+ meta:
+ $ref: '#/components/schemas/MetaBody'
+ error:
+ type: 'null'
+ title: Error
+ type: object
+ required:
+ - data
+ title: SuccessResponse[RelationshipOut]
+ SuccessResponse_SimulationResponse_:
+ properties:
+ data:
+ $ref: '#/components/schemas/SimulationResponse'
+ meta:
+ $ref: '#/components/schemas/MetaBody'
+ error:
+ type: 'null'
+ title: Error
+ type: object
+ required:
+ - data
+ title: SuccessResponse[SimulationResponse]
+ SuccessResponse_dict_str__Any__:
+ properties:
+ data:
+ additionalProperties: true
+ type: object
+ title: Data
+ meta:
+ $ref: '#/components/schemas/MetaBody'
+ error:
+ type: 'null'
+ title: Error
+ type: object
+ required:
+ - data
+ title: SuccessResponse[dict[str, Any]]
+ SuccessResponse_dict_str__Union_int__str___:
+ properties:
+ data:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ type: object
+ title: Data
+ meta:
+ $ref: '#/components/schemas/MetaBody'
+ error:
+ type: 'null'
+ title: Error
+ type: object
+ required:
+ - data
+ title: SuccessResponse[dict[str, Union[int, str]]]
+ SuccessResponse_dict_str__int__:
+ properties:
+ data:
+ additionalProperties:
+ type: integer
+ type: object
+ title: Data
+ meta:
+ $ref: '#/components/schemas/MetaBody'
+ error:
+ type: 'null'
+ title: Error
+ type: object
+ required:
+ - data
+ title: SuccessResponse[dict[str, int]]
+ SuccessResponse_dict_str__object__:
+ properties:
+ data:
+ additionalProperties: true
+ type: object
+ title: Data
+ meta:
+ $ref: '#/components/schemas/MetaBody'
+ error:
+ type: 'null'
+ title: Error
+ type: object
+ required:
+ - data
+ title: SuccessResponse[dict[str, object]]
+ SuccessResponse_dict_str__str__:
+ properties:
+ data:
+ additionalProperties:
+ type: string
+ type: object
+ title: Data
+ meta:
+ $ref: '#/components/schemas/MetaBody'
+ error:
+ type: 'null'
+ title: Error
+ type: object
+ required:
+ - data
+ title: SuccessResponse[dict[str, str]]
+ SuccessResponse_list_ACLOut__:
+ properties:
+ data:
+ items:
+ $ref: '#/components/schemas/ACLOut'
+ type: array
+ title: Data
+ meta:
+ $ref: '#/components/schemas/MetaBody'
+ error:
+ type: 'null'
+ title: Error
+ type: object
+ required:
+ - data
+ title: SuccessResponse[list[ACLOut]]
+ SuccessResponse_list_AuditRecordOut__:
+ properties:
+ data:
+ items:
+ $ref: '#/components/schemas/AuditRecordOut'
+ type: array
+ title: Data
+ meta:
+ $ref: '#/components/schemas/MetaBody'
+ error:
+ type: 'null'
+ title: Error
+ type: object
+ required:
+ - data
+ title: SuccessResponse[list[AuditRecordOut]]
+ SuccessResponse_list_PermissionOut__:
+ properties:
+ data:
+ items:
+ $ref: '#/components/schemas/PermissionOut'
+ type: array
+ title: Data
+ meta:
+ $ref: '#/components/schemas/MetaBody'
+ error:
+ type: 'null'
+ title: Error
+ type: object
+ required:
+ - data
+ title: SuccessResponse[list[PermissionOut]]
+ SuccessResponse_list_PolicyOut__:
+ properties:
+ data:
+ items:
+ $ref: '#/components/schemas/PolicyOut'
+ type: array
+ title: Data
+ meta:
+ $ref: '#/components/schemas/MetaBody'
+ error:
+ type: 'null'
+ title: Error
+ type: object
+ required:
+ - data
+ title: SuccessResponse[list[PolicyOut]]
+ SuccessResponse_list_RoleOut__:
+ properties:
+ data:
+ items:
+ $ref: '#/components/schemas/RoleOut'
+ type: array
+ title: Data
+ meta:
+ $ref: '#/components/schemas/MetaBody'
+ error:
+ type: 'null'
+ title: Error
+ type: object
+ required:
+ - data
+ title: SuccessResponse[list[RoleOut]]
+ SuccessResponse_list_dict_str__str___:
+ properties:
+ data:
+ items:
+ additionalProperties:
+ type: string
+ type: object
+ type: array
+ title: Data
+ meta:
+ $ref: '#/components/schemas/MetaBody'
+ error:
+ type: 'null'
+ title: Error
+ type: object
+ required:
+ - data
+ title: SuccessResponse[list[dict[str, str]]]
+ ValidationError:
+ properties:
+ loc:
+ items:
+ anyOf:
+ - type: string
+ - type: integer
+ type: array
+ title: Location
+ msg:
+ type: string
+ title: Message
+ type:
+ type: string
+ title: Error Type
+ input:
+ title: Input
+ ctx:
+ type: object
+ title: Context
+ type: object
+ required:
+ - loc
+ - msg
+ - type
+ title: ValidationError
+ securitySchemes:
+ HTTPBearer:
+ type: http
+ scheme: bearer
+ APIKeyHeader:
+ type: apiKey
+ in: header
+ name: X-API-Key
diff --git a/data/imgs/icon.png b/data/imgs/icon.png
new file mode 100644
index 0000000..7e3237b
Binary files /dev/null and b/data/imgs/icon.png differ
diff --git a/data/imgs/logo.png b/data/imgs/logo.png
new file mode 100644
index 0000000..2c8f5b2
Binary files /dev/null and b/data/imgs/logo.png differ
diff --git a/docker-compose.dev.yml b/docker-compose.dev.yml
new file mode 100644
index 0000000..b262470
--- /dev/null
+++ b/docker-compose.dev.yml
@@ -0,0 +1,105 @@
+name: keynetra-dev
+
+services:
+ keynetra:
+ build:
+ context: .
+ dockerfile: Dockerfile
+ restart: unless-stopped
+ working_dir: /app
+ command: >
+ uvicorn keynetra.api.main:app
+ --host 0.0.0.0
+ --port 8000
+ --reload
+ --proxy-headers
+ --forwarded-allow-ips "*"
+ ports:
+ - "8000:8000"
+ volumes:
+ - .:/app
+ environment:
+ KEYNETRA_ENVIRONMENT: development
+ KEYNETRA_DEBUG: "true"
+ KEYNETRA_DATABASE_URL: ${KEYNETRA_DATABASE_URL:-postgresql+psycopg://keynetra:keynetra@postgres:5432/keynetra}
+ KEYNETRA_REDIS_URL: ${KEYNETRA_REDIS_URL:-redis://redis:6379/0}
+ KEYNETRA_API_KEYS: ${KEYNETRA_API_KEYS:-devkey}
+ KEYNETRA_JWT_SECRET: ${KEYNETRA_JWT_SECRET:-change-me}
+ KEYNETRA_ADMIN_USERNAME: ${KEYNETRA_ADMIN_USERNAME:-admin}
+ KEYNETRA_ADMIN_PASSWORD: ${KEYNETRA_ADMIN_PASSWORD:-admin123}
+ KEYNETRA_SERVICE_MODE: ${KEYNETRA_SERVICE_MODE:-all}
+ KEYNETRA_AUTO_SEED_SAMPLE_DATA: ${KEYNETRA_AUTO_SEED_SAMPLE_DATA:-1}
+ KEYNETRA_RATE_LIMIT_PER_MINUTE: ${KEYNETRA_RATE_LIMIT_PER_MINUTE:-120}
+ KEYNETRA_RATE_LIMIT_BURST: ${KEYNETRA_RATE_LIMIT_BURST:-120}
+ KEYNETRA_RUN_MIGRATIONS: ${KEYNETRA_RUN_MIGRATIONS:-1}
+ depends_on:
+ postgres:
+ condition: service_healthy
+ redis:
+ condition: service_healthy
+
+ postgres:
+ image: postgres:16
+ restart: unless-stopped
+ environment:
+ POSTGRES_USER: ${KEYNETRA_POSTGRES_USER:-keynetra}
+ POSTGRES_PASSWORD: ${KEYNETRA_POSTGRES_PASSWORD:-keynetra}
+ POSTGRES_DB: ${KEYNETRA_POSTGRES_DB:-keynetra}
+ ports:
+ - "5432:5432"
+ volumes:
+ - postgres_data:/var/lib/postgresql/data
+ healthcheck:
+ test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"]
+ interval: 10s
+ timeout: 5s
+ retries: 5
+
+ redis:
+ image: redis:7-alpine
+ restart: unless-stopped
+ ports:
+ - "6379:6379"
+ healthcheck:
+ test: ["CMD", "redis-cli", "ping"]
+ interval: 10s
+ timeout: 5s
+ retries: 5
+
+ prometheus:
+ image: prom/prometheus:v2.55.0
+ restart: unless-stopped
+ command:
+ - --config.file=/etc/prometheus/prometheus.yml
+ - --storage.tsdb.path=/prometheus
+ - --web.enable-lifecycle
+ ports:
+ - "9090:9090"
+ volumes:
+ - ./infra/docker/monitoring/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro
+ - prometheus_data:/prometheus
+ depends_on:
+ keynetra:
+ condition: service_started
+
+ grafana:
+ image: grafana/grafana:11.2.0
+ restart: unless-stopped
+ environment:
+ GF_SECURITY_ADMIN_USER: ${KEYNETRA_GRAFANA_USER:-admin}
+ GF_SECURITY_ADMIN_PASSWORD: ${KEYNETRA_GRAFANA_PASSWORD:-admin}
+ GF_USERS_ALLOW_SIGN_UP: "false"
+ ports:
+ - "3000:3000"
+ volumes:
+ - grafana_data:/var/lib/grafana
+ - ./infra/docker/monitoring/grafana/provisioning:/etc/grafana/provisioning:ro
+ - ./infra/docker/monitoring/grafana/dashboards:/var/lib/grafana/dashboards:ro
+ depends_on:
+ prometheus:
+ condition: service_started
+
+volumes:
+ postgres_data:
+ prometheus_data:
+ grafana_data:
diff --git a/docker-compose.yml b/docker-compose.yml
new file mode 100644
index 0000000..cbfb33b
--- /dev/null
+++ b/docker-compose.yml
@@ -0,0 +1,102 @@
+name: keynetra
+
+x-keynetra-common: &keynetra-common
+ build:
+ context: .
+ dockerfile: Dockerfile
+ restart: unless-stopped
+ environment:
+ KEYNETRA_DATABASE_URL: ${KEYNETRA_DATABASE_URL:-postgresql+psycopg://keynetra:keynetra@postgres:5432/keynetra}
+ KEYNETRA_REDIS_URL: ${KEYNETRA_REDIS_URL:-redis://redis:6379/0}
+ KEYNETRA_API_KEYS: ${KEYNETRA_API_KEYS:-devkey}
+ KEYNETRA_JWT_SECRET: ${KEYNETRA_JWT_SECRET:-change-me}
+ KEYNETRA_ADMIN_USERNAME: ${KEYNETRA_ADMIN_USERNAME:-admin}
+ KEYNETRA_ADMIN_PASSWORD: ${KEYNETRA_ADMIN_PASSWORD:-admin123}
+ KEYNETRA_RATE_LIMIT_PER_MINUTE: ${KEYNETRA_RATE_LIMIT_PER_MINUTE:-120}
+ KEYNETRA_RATE_LIMIT_BURST: ${KEYNETRA_RATE_LIMIT_BURST:-120}
+ KEYNETRA_RUN_MIGRATIONS: ${KEYNETRA_RUN_MIGRATIONS:-1}
+ KEYNETRA_AUTO_SEED_SAMPLE_DATA: ${KEYNETRA_AUTO_SEED_SAMPLE_DATA:-1}
+ depends_on:
+ postgres:
+ condition: service_healthy
+ redis:
+ condition: service_healthy
+
+services:
+ # Production/default API service.
+ keynetra:
+ <<: *keynetra-common
+ ports:
+ - "8000:8000"
+ healthcheck:
+ test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://127.0.0.1:8000/health/ready', timeout=3)"]
+ interval: 30s
+ timeout: 5s
+ retries: 5
+ start_period: 20s
+
+ postgres:
+ image: postgres:16
+ restart: unless-stopped
+ environment:
+ POSTGRES_USER: ${KEYNETRA_POSTGRES_USER:-keynetra}
+ POSTGRES_PASSWORD: ${KEYNETRA_POSTGRES_PASSWORD:-keynetra}
+ POSTGRES_DB: ${KEYNETRA_POSTGRES_DB:-keynetra}
+ ports:
+ - "5432:5432"
+ volumes:
+ - postgres_data:/var/lib/postgresql/data
+ healthcheck:
+ test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"]
+ interval: 10s
+ timeout: 5s
+ retries: 5
+
+ redis:
+ image: redis:7-alpine
+ restart: unless-stopped
+ ports:
+ - "6379:6379"
+ healthcheck:
+ test: ["CMD", "redis-cli", "ping"]
+ interval: 10s
+ timeout: 5s
+ retries: 5
+
+ prometheus:
+ image: prom/prometheus:v2.55.0
+ restart: unless-stopped
+ command:
+ - --config.file=/etc/prometheus/prometheus.yml
+ - --storage.tsdb.path=/prometheus
+ - --web.enable-lifecycle
+ ports:
+ - "9090:9090"
+ volumes:
+ - ./infra/docker/monitoring/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro
+ - prometheus_data:/prometheus
+ depends_on:
+ keynetra:
+ condition: service_started
+
+ grafana:
+ image: grafana/grafana:11.2.0
+ restart: unless-stopped
+ environment:
+ GF_SECURITY_ADMIN_USER: ${KEYNETRA_GRAFANA_USER:-admin}
+ GF_SECURITY_ADMIN_PASSWORD: ${KEYNETRA_GRAFANA_PASSWORD:-admin}
+ GF_USERS_ALLOW_SIGN_UP: "false"
+ ports:
+ - "3000:3000"
+ volumes:
+ - grafana_data:/var/lib/grafana
+ - ./infra/docker/monitoring/grafana/provisioning:/etc/grafana/provisioning:ro
+ - ./infra/docker/monitoring/grafana/dashboards:/var/lib/grafana/dashboards:ro
+ depends_on:
+ prometheus:
+ condition: service_started
+
+volumes:
+ postgres_data:
+ prometheus_data:
+ grafana_data:
diff --git a/docs/README.md b/docs/README.md
new file mode 100644
index 0000000..9c361b0
--- /dev/null
+++ b/docs/README.md
@@ -0,0 +1,69 @@
+# KeyNetra Documentation
+
+This documentation set is organized like an OSS project handbook: quick onboarding, architecture references, operations runbooks, and executable examples.
+
+## Recommended Reading Order
+
+1. [Project Overview](getting-started/overview.md)
+2. [Installation](getting-started/installation.md)
+3. [Quickstart](getting-started/quickstart.md)
+4. [Example Files](examples/example-files.md)
+5. [API Reference](reference/api-reference.md)
+
+## Documentation Map
+
+Getting Started:
+
+- [Overview](getting-started/overview.md)
+- [Installation](getting-started/installation.md)
+- [Quickstart](getting-started/quickstart.md)
+- [Runtime Modes](getting-started/runtime-modes.md)
+
+Examples:
+
+- [Example Files](examples/example-files.md)
+- [End-to-End API Flow](examples/end-to-end-api-flow.md)
+- [CLI Workflows](examples/cli-workflows.md)
+- [Policy Patterns](examples/policy-patterns.md)
+
+Core Concepts:
+
+- [Authorization Models](core-concepts/authorization-models.md)
+- [Request Evaluation Lifecycle](core-concepts/request-evaluation-lifecycle.md)
+- [Consistency and Revisions](core-concepts/consistency-and-revisions.md)
+
+Architecture:
+
+- [System Architecture](architecture/system-architecture.md)
+- [Authorization Pipeline](architecture/authorization-pipeline.md)
+- [Caching and Consistency](architecture/caching-and-consistency.md)
+- [Data Models](architecture/data-models.md)
+
+Reference:
+
+- [API Reference](reference/api-reference.md)
+- [CLI Reference](reference/cli-reference.md)
+- [Configuration Files](reference/configuration-files.md)
+- [Environment Variables](reference/environment-variables.md)
+- [Policy File Formats](reference/policy-files.md)
+- [Authorization Model Files](reference/auth-model-files.md)
+
+Operations:
+
+- [Docker Deployment](operations/deployment-docker.md)
+- [Kubernetes Deployment](operations/deployment-kubernetes.md)
+- [Observability](operations/observability.md)
+- [Security](operations/security.md)
+- [Troubleshooting](operations/troubleshooting.md)
+
+Development:
+
+- [Local Development](development/local-development.md)
+- [Migrations](development/migrations.md)
+- [Testing](development/testing.md)
+- [CI/CD and Release](development/ci-cd-release.md)
+- [Contributing](development/contributing.md)
+
+## Source of Truth
+
+When documentation and code diverge, use implementation in `keynetra/` and contracts in `contracts/openapi/` as source of truth.
diff --git a/docs/api-endpoints.md b/docs/api-endpoints.md
new file mode 100644
index 0000000..afe29ee
--- /dev/null
+++ b/docs/api-endpoints.md
@@ -0,0 +1,344 @@
+# API Endpoints (Beginner Guide)
+
+All endpoints below are active in this repository and are the primary integration surface.
+
+Base URL:
+
+- `http://localhost:8000`
+
+Auth header:
+
+- `X-API-Key: `
+
+Example setup:
+
+```bash
+export KEYNETRA_API_KEYS=devkey
+python -m keynetra.cli serve
+```
+
+---
+
+## POST /check-access
+
+Purpose:
+
+- Evaluate one authorization request and return allow/deny with explanation.
+
+Code path:
+
+- Route: `keynetra/api/routes/access.py::check_access`
+- Service call: `AuthorizationService.authorize(...)`
+- Engine call: `KeyNetraEngine.decide(...)`
+
+Request body:
+
+```json
+{
+ "user": {"id": "alice", "role": "manager", "permissions": ["approve_payment"]},
+ "action": "approve_payment",
+ "resource": {"resource_type": "payment", "resource_id": "pay-900", "amount": 5000},
+ "context": {"department": "finance"},
+ "consistency": "eventual",
+ "revision": null
+}
+```
+
+Example request:
+
+```bash
+curl -s -X POST http://localhost:8000/check-access \
+ -H "Content-Type: application/json" \
+ -H "X-API-Key: devkey" \
+ -d '{
+ "user": {"id": "alice", "role": "manager", "permissions": ["approve_payment"]},
+ "action": "approve_payment",
+ "resource": {"resource_type": "payment", "resource_id": "pay-900", "amount": 5000},
+ "context": {"department": "finance"}
+ }' | jq .
+```
+
+Example response:
+
+```json
+{
+ "data": {
+ "allowed": true,
+ "decision": "allow",
+ "matched_policies": ["rbac:permissions"],
+ "reason": "explicit permission grant",
+ "policy_id": "rbac:permissions",
+ "explain_trace": [],
+ "revision": 1
+ },
+ "meta": {"request_id": "...", "limit": null, "next_cursor": null, "extra": {}},
+ "error": null
+}
+```
+
+Common use cases:
+
+- Check access before serving a protected API
+- Add audit trail context for allow/deny decisions
+- Return explanation details to internal admin tools
+
+---
+
+## POST /check-access-batch
+
+Purpose:
+
+- Evaluate multiple actions/resources for the same user in one call.
+
+Code path:
+
+- Route: `keynetra/api/routes/access.py::check_access_batch`
+- Service call: `AuthorizationService.authorize_batch(...)`
+- Engine call per item: `KeyNetraEngine.decide(...)`
+
+Request body:
+
+```json
+{
+ "user": {"id": "alice", "role": "manager", "permissions": ["approve_payment"]},
+ "items": [
+ {"action": "approve_payment", "resource": {"resource_type": "payment", "resource_id": "pay-900", "amount": 5000}},
+ {"action": "delete", "resource": {"resource_type": "payment", "resource_id": "pay-900"}}
+ ],
+ "consistency": "eventual",
+ "revision": null
+}
+```
+
+Example request:
+
+```bash
+curl -s -X POST http://localhost:8000/check-access-batch \
+ -H "Content-Type: application/json" \
+ -H "X-API-Key: devkey" \
+ -d '{
+ "user": {"id": "alice", "role": "manager", "permissions": ["approve_payment"]},
+ "items": [
+ {"action": "approve_payment", "resource": {"resource_type": "payment", "resource_id": "pay-900", "amount": 5000}},
+ {"action": "delete", "resource": {"resource_type": "payment", "resource_id": "pay-900"}}
+ ]
+ }' | jq .
+```
+
+Example response:
+
+```json
+{
+ "data": {
+ "results": [
+ {"action": "approve_payment", "allowed": true, "revision": 1},
+ {"action": "delete", "allowed": false, "revision": 1}
+ ],
+ "revision": 1
+ },
+ "meta": {"request_id": "...", "limit": null, "next_cursor": null, "extra": {}},
+ "error": null
+}
+```
+
+Common use cases:
+
+- Render UI permissions for many buttons/tabs at once
+- Reduce network calls from gateway/backend-for-frontend
+
+---
+
+## POST /simulate
+
+Purpose:
+
+- Run a non-persisted decision with full trace and failed conditions.
+
+Code path:
+
+- Route: `keynetra/api/routes/access.py::simulate`
+- Service call: `AuthorizationService.simulate(...)`
+- Internally uses `authorize(...)` with standard evaluation pipeline
+
+Request body:
+
+- Same shape as `/check-access`
+
+Example request:
+
+```bash
+curl -s -X POST http://localhost:8000/simulate \
+ -H "Content-Type: application/json" \
+ -H "X-API-Key: devkey" \
+ -d '{
+ "user": {"id": "manager-1", "role": "manager"},
+ "action": "approve_payment",
+ "resource": {"resource_type": "payment", "resource_id": "pay-900", "amount": 120000},
+ "context": {"department": "finance"}
+ }' | jq .
+```
+
+Example response:
+
+```json
+{
+ "data": {
+ "decision": "deny",
+ "matched_policies": [],
+ "reason": "default deny",
+ "policy_id": null,
+ "explain_trace": [],
+ "failed_conditions": ["max_amount"],
+ "revision": 1
+ },
+ "meta": {"request_id": "...", "limit": null, "next_cursor": null, "extra": {}},
+ "error": null
+}
+```
+
+Common use cases:
+
+- Debug policy behavior without changing state
+- Build policy authoring tools with explainability
+
+---
+
+## POST /simulate-policy
+
+Purpose:
+
+- Compare decision before and after a proposed policy change.
+
+Code path:
+
+- Route: `keynetra/api/routes/simulation.py::simulate_policy`
+- Simulator: `PolicySimulator.simulate_policy_change(...)`
+- DSL parser: `keynetra/services/policy_dsl.py::dsl_to_policy`
+
+Note:
+
+- Requires management role (`viewer` or higher). API key auth works as admin in this repo.
+
+Request body:
+
+```json
+{
+ "simulate": {
+ "policy_change": "allow:\n action: share_document\n priority: 1\n policy_key: share-admin\n when:\n role: admin"
+ },
+ "request": {
+ "user": {"id": "root-admin", "role": "admin", "roles": ["admin"]},
+ "action": "share_document",
+ "resource": {"resource_type": "document", "resource_id": "doc-1"},
+ "context": {}
+ }
+}
+```
+
+Example request:
+
+```bash
+curl -s -X POST http://localhost:8000/simulate-policy \
+ -H "Content-Type: application/json" \
+ -H "X-API-Key: devkey" \
+ -d '{
+ "simulate": {
+ "policy_change": "allow:\n action: share_document\n priority: 1\n policy_key: share-admin\n when:\n role: admin"
+ },
+ "request": {
+ "user": {"id": "root-admin", "role": "admin", "roles": ["admin"]},
+ "action": "share_document",
+ "resource": {"resource_type": "document", "resource_id": "doc-1"},
+ "context": {}
+ }
+ }' | jq .
+```
+
+Example response:
+
+```json
+{
+ "data": {
+ "decision_before": {
+ "allowed": false,
+ "decision": "deny",
+ "reason": "no matching policy",
+ "policy_id": null
+ },
+ "decision_after": {
+ "allowed": true,
+ "decision": "allow",
+ "reason": "policy change grants access",
+ "policy_id": "share-admin"
+ }
+ },
+ "meta": {"request_id": "...", "limit": null, "next_cursor": null, "extra": {}},
+ "error": null
+}
+```
+
+Common use cases:
+
+- Review policy change impact during PRs
+- Safety-check production policy updates
+
+---
+
+## POST /impact-analysis
+
+Purpose:
+
+- Estimate which users gain or lose access from a proposed policy change.
+
+Code path:
+
+- Route: `keynetra/api/routes/simulation.py::impact_analysis`
+- Analyzer: `ImpactAnalyzer.analyze_policy_change(...)`
+- Compares `before_engine` and `after_engine` decisions per user/resource candidate
+
+Request body:
+
+```json
+{
+ "policy_change": "deny:\n action: export_payment\n priority: 1\n policy_key: deny-export-contractors\n when:\n role: external"
+}
+```
+
+Example request:
+
+```bash
+curl -s -X POST http://localhost:8000/impact-analysis \
+ -H "Content-Type: application/json" \
+ -H "X-API-Key: devkey" \
+ -d '{
+ "policy_change": "deny:\n action: export_payment\n priority: 1\n policy_key: deny-export-contractors\n when:\n role: external"
+ }' | jq .
+```
+
+Example response:
+
+```json
+{
+ "data": {
+ "gained_access": [101, 204],
+ "lost_access": [302]
+ },
+ "meta": {"request_id": "...", "limit": null, "next_cursor": null, "extra": {}},
+ "error": null
+}
+```
+
+Common use cases:
+
+- Change approvals for security/governance
+- Alerting on high-impact policy changes
+
+---
+
+## Errors to expect
+
+- `401 unauthorized`: missing/invalid API key or token
+- `403 forbidden`: principal lacks required management role
+- `422 validation_error`: payload format or values are invalid
+- `429 too_many_requests`: rate limit exceeded
+- `500 database_error`: storage issue
diff --git a/docs/architecture.md b/docs/architecture.md
new file mode 100644
index 0000000..67df62f
--- /dev/null
+++ b/docs/architecture.md
@@ -0,0 +1,51 @@
+# Architecture Guide
+
+This page explains how an authorization request flows through KeyNetra.
+
+## Core components
+
+- API layer: FastAPI routes in `keynetra/api/routes/`
+- Service layer: orchestration in `keynetra/services/`
+- Engine layer: policy evaluation in `keynetra/engine/`
+- Data layer: repositories in `keynetra/infrastructure/repositories/`
+- Cache layer: in-memory/Redis caches in `keynetra/infrastructure/cache/`
+- Observability: metrics/logging/audit support
+
+## Request flow
+
+```text
+Client request
+ -> API auth (API key or JWT)
+ -> Request validation
+ -> AuthorizationService.authorize(...)
+ -> Load policies / relationships / ACL / model graph
+ -> Evaluate decision (RBAC/ABAC/ACL/ReBAC)
+ -> Build explain_trace + reason + policy_id
+ -> Write audit / update metrics
+ -> Return response envelope
+```
+
+## Policy evaluation flow (simplified)
+
+1. Read request (`user`, `action`, `resource`, `context`)
+2. Evaluate explicit allows/denies (policies and ACL where applicable)
+3. Evaluate relationship-based grants (ReBAC model graph)
+4. Apply priority and first-match logic
+5. Return `allow` or `deny`
+6. If no policy matches, deny by default
+
+## Consistency and revision tokens
+
+Responses include `revision` values.
+Use revision tokens when you need stronger consistency between write and read operations.
+
+## Caching behavior
+
+KeyNetra uses cache adapters to reduce repeated policy and relationship lookups.
+When policies or relationships change, namespaces/entries are invalidated.
+
+## Where to read next
+
+- [API Endpoints](api-endpoints.md)
+- [Policy Guide](policies.md)
+- [Best Practices](best-practices.md)
diff --git a/docs/architecture/authorization-pipeline.md b/docs/architecture/authorization-pipeline.md
new file mode 100644
index 0000000..e609e69
--- /dev/null
+++ b/docs/architecture/authorization-pipeline.md
@@ -0,0 +1,89 @@
+---
+title: Authorization Pipeline
+---
+
+# Authorization Pipeline
+
+`KeyNetraEngine` evaluates authorization in deterministic order.
+
+Source of truth:
+
+- `keynetra/engine/keynetra_engine.py`
+
+## Evaluation Order
+
+1. Direct user permissions
+2. ACL checks
+3. RBAC role permissions
+4. Relationship index checks
+5. Schema permission graph checks
+6. Compiled policy graph evaluation
+7. Default deny
+
+This order is fixed by engine implementation and is important when multiple models can match the same request.
+
+## Input Contract
+
+Engine accepts an explicit `AuthorizationInput` object:
+
+- `user`
+- `action`
+- `resource`
+- `context`
+- hydrated ACL/relationship/index/model graph fields from service layer
+
+No hidden data fetch occurs inside the engine.
+
+The service layer pre-hydrates policy data, relationships, ACL data, and optional compiled model graphs before the engine runs.
+
+## Decision Output
+
+`AuthorizationDecision` includes:
+
+- `allowed`
+- `decision` (`allow` or `deny`)
+- `reason`
+- `policy_id`
+- `matched_policies`
+- `failed_conditions`
+- `explain_trace`
+
+`explain_trace` is designed for debugging and auditability of decision paths.
+
+## Service Responsibilities
+
+Service constructs full input and handles:
+
+- policy retrieval and compilation lookup
+- relationship and ACL hydration
+- decision caching
+- revision-aware consistency
+- audit writes
+
+Primary file:
+
+- `keynetra/services/authorization.py`
+
+## Example Decision Call
+
+```python
+from keynetra.engine import KeyNetraEngine
+
+engine = KeyNetraEngine([
+ {"action": "read", "effect": "allow", "priority": 10, "conditions": {"role": "admin"}}
+])
+
+decision = engine.check_access(
+ subject="user:123",
+ action="read",
+ resource="document:abc",
+ context={"role": "admin"},
+)
+```
+
+## Related Pages
+
+- [Data Models and Storage](data-models.md)
+- [Caching and Consistency](caching-and-consistency.md)
+- [Policy File Formats](../reference/policy-files.md)
+- [Authorization Model Files](../reference/auth-model-files.md)
diff --git a/docs/architecture/caching-and-consistency.md b/docs/architecture/caching-and-consistency.md
new file mode 100644
index 0000000..7a66585
--- /dev/null
+++ b/docs/architecture/caching-and-consistency.md
@@ -0,0 +1,63 @@
+---
+title: Caching and Consistency
+---
+
+# Caching and Consistency
+
+KeyNetra uses layered caching with Redis backend and in-memory fallback.
+
+Caching is implemented per concern (policy, decision, ACL, relationship, and access index) to reduce latency while preserving deterministic decisions.
+
+## Cache Layers
+
+- Policy cache: `keynetra/infrastructure/cache/policy_cache.py`
+- Relationship cache: `keynetra/infrastructure/cache/relationship_cache.py`
+- Decision cache: `keynetra/infrastructure/cache/decision_cache.py`
+- ACL cache: `keynetra/infrastructure/cache/acl_cache.py`
+- Access index cache: `keynetra/infrastructure/cache/access_index_cache.py`
+
+Backend abstraction:
+
+- `keynetra/infrastructure/cache/backends.py`
+
+If Redis is unavailable, KeyNetra falls back to shared in-memory cache adapters in-process.
+
+## Invalidation Model
+
+- Tenant namespace bump for decision cache invalidation.
+- Resource/subject scoped invalidation for ACL and relationship changes.
+- Policy updates invalidate policy cache and publish distribution events.
+
+This keeps cache behavior predictable across policy and relationship mutations.
+
+## Policy Distribution
+
+Redis pub/sub channel is used for policy update fan-out:
+
+- Event publisher: `keynetra/infrastructure/cache/policy_distribution.py`
+- Subscriber startup: `keynetra/api/main.py` (`_start_policy_subscriber`)
+- Channel config: `KEYNETRA_POLICY_EVENTS_CHANNEL`
+
+## Consistency Controls
+
+Access requests support consistency modes and revisions:
+
+- eventual cached reads (default)
+- fully consistent bypass behavior where configured in service
+- revision-driven keying in decision cache
+
+Implementation references:
+
+- `keynetra/services/authorization.py`
+- `keynetra/services/revisions.py`
+
+## Operational Notes
+
+- For horizontally scaled deployments, configure Redis to share cache and policy events.
+- For local development, in-memory fallback works without Redis.
+
+## Related Pages
+
+- [Authorization Pipeline](authorization-pipeline.md)
+- [Observability](../operations/observability.md)
+- [Troubleshooting](../operations/troubleshooting.md)
diff --git a/docs/architecture/data-models.md b/docs/architecture/data-models.md
new file mode 100644
index 0000000..6949a15
--- /dev/null
+++ b/docs/architecture/data-models.md
@@ -0,0 +1,59 @@
+---
+title: Data Models and Storage
+---
+
+# Data Models and Storage
+
+KeyNetra persists state in relational tables with Alembic migration control.
+
+This page maps high-level authorization concepts to concrete database tables.
+
+## Core Tables
+
+Defined in `keynetra/domain/models/`:
+
+- `tenant.py`: `tenants`
+- `rbac.py`: `users`, `roles`, `permissions`, `user_roles`, `role_permissions`
+- `relationship.py`: `relationships`
+- `acl.py`: `resource_acl`
+- `policy_versioning.py`: `policies`, `policy_versions`
+- `auth_model.py`: `auth_models`
+- `audit.py`: `audit_logs`
+- `idempotency.py`: `idempotency_records`
+
+## Concept to Table Mapping
+
+- Tenancy and revisions: `tenants`
+- RBAC: `users`, `roles`, `permissions`, `user_roles`, `role_permissions`
+- ReBAC edges: `relationships`
+- ACL rules: `resource_acl`
+- Policy history: `policies`, `policy_versions`
+- Schema modeling: `auth_models`
+- Decision audit: `audit_logs`
+- Idempotent write replay: `idempotency_records`
+
+## Migration System
+
+- Alembic config: `alembic.ini`
+- Runtime env: `alembic/env.py`
+- Revisions: `alembic/versions/*.py`
+
+Current revision history includes baseline plus tenant policy versioning, relationships, ACL, auth model, audit explainability, and idempotency support.
+
+See [Migrations](../development/migrations.md) for execution details.
+
+## Repository Pattern
+
+Storage access is routed through repository implementations in:
+
+- `keynetra/infrastructure/repositories/`
+
+Services use protocol interfaces from:
+
+- `keynetra/services/interfaces.py`
+
+## Related Pages
+
+- [Migrations](../development/migrations.md)
+- [API Reference](../reference/api-reference.md)
+- [Authorization Pipeline](authorization-pipeline.md)
diff --git a/docs/architecture/system-architecture.md b/docs/architecture/system-architecture.md
new file mode 100644
index 0000000..82f1198
--- /dev/null
+++ b/docs/architecture/system-architecture.md
@@ -0,0 +1,84 @@
+---
+title: System Architecture
+---
+
+# System Architecture
+
+KeyNetra follows a layered architecture with strict boundary control.
+
+## Layers
+
+Key principle: the engine layer remains pure and deterministic, while side effects stay in service/infrastructure layers.
+
+## Engine Layer
+
+- Location: `keynetra/engine/`
+- Contains deterministic authorization logic.
+- No DB, cache, HTTP, or external state access.
+
+Primary engine implementation:
+
+- `keynetra/engine/keynetra_engine.py`
+- `keynetra/engine/compiled/`
+- `keynetra/engine/model_graph/`
+
+## Service Layer
+
+- Location: `keynetra/services/`
+- Orchestrates repositories, cache, revision consistency, and resilience.
+
+Main orchestrator:
+
+- `keynetra/services/authorization.py`
+
+## Infrastructure Layer
+
+- Location: `keynetra/infrastructure/`
+- Owns cache backends, repositories, DB session handling, and transport adapters.
+
+Examples:
+
+- `keynetra/infrastructure/cache/`
+- `keynetra/infrastructure/repositories/`
+- `keynetra/infrastructure/storage/session.py`
+
+## API Layer
+
+- Location: `keynetra/api/`
+- FastAPI routes and middleware only.
+- Delegates decision logic to services.
+
+Entry point:
+
+- `keynetra/api/main.py`
+
+## Configuration Layer
+
+- Location: `keynetra/config/`
+- Environment settings, security, tenancy, and file-based config loading.
+
+## Domain Layer
+
+- Location: `keynetra/domain/`
+- SQLAlchemy data models and API schema contracts.
+
+## Request Lifecycle
+
+1. API receives request and authenticates principal.
+2. Service hydrates tenant context and evaluation input.
+3. Engine evaluates with deterministic decision order.
+4. Service handles cache/audit/revision side effects.
+5. API returns normalized response envelope.
+
+## Architecture Guardrails
+
+- `keynetra/` code does not depend on `infra/`.
+- Route handlers avoid business logic and delegate to services.
+- Engine evaluations use explicit inputs only, with no hidden lookups.
+
+## Related Pages
+
+- [Authorization Pipeline](authorization-pipeline.md)
+- [Caching and Consistency](caching-and-consistency.md)
+- [API Reference](../reference/api-reference.md)
+- [Data Models and Storage](data-models.md)
diff --git a/docs/best-practices.md b/docs/best-practices.md
new file mode 100644
index 0000000..1e19c17
--- /dev/null
+++ b/docs/best-practices.md
@@ -0,0 +1,52 @@
+# Best Practices
+
+## 1) Deny by default
+
+Treat unmatched requests as deny.
+Do not create broad fallback allow rules.
+
+## 2) Apply least privilege
+
+- Grant only required actions
+- Prefer narrower resource scopes
+- Review and remove stale grants regularly
+
+## 3) Use policy versioning discipline
+
+- Track policy changes in source control
+- Require review for policy edits
+- Use `policy_id` naming that reflects intent and version
+
+## 4) Keep tenant boundaries explicit
+
+- Include tenant checks in policies/attributes
+- Prevent cross-tenant reads by default
+- Test multi-tenant edge cases with batch checks
+
+## 5) Validate before deployment
+
+Always run both:
+
+- `/simulate-policy` for before/after behavior
+- `/impact-analysis` for affected user scope
+
+## 6) Use explainability in production support
+
+Persist or log these fields from decisions:
+
+- `decision`
+- `reason`
+- `policy_id`
+- `revision`
+- `explain_trace`
+
+## 7) Keep ACL usage controlled
+
+Use ACL for explicit exceptions, not as the primary model for the whole system.
+
+## 8) Add policy tests for critical flows
+
+- Payment approvals
+- Admin operations
+- Cross-tenant access
+- Data export operations
diff --git a/docs/cli.md b/docs/cli.md
new file mode 100644
index 0000000..fb2feec
--- /dev/null
+++ b/docs/cli.md
@@ -0,0 +1,70 @@
+# CLI Guide
+
+KeyNetra CLI lets you run and validate authorization without UI.
+
+Main entry point:
+
+```bash
+python -m keynetra.cli --help
+```
+
+## Start server
+
+```bash
+export KEYNETRA_API_KEYS=devkey
+python -m keynetra.cli serve
+```
+
+## Load models
+
+Apply a model file to API:
+
+```bash
+python -m keynetra.cli model apply ./path/to/auth-model.yaml --api-key devkey
+```
+
+Show current model:
+
+```bash
+python -m keynetra.cli model show --api-key devkey
+```
+
+## Run access checks
+
+```bash
+python -m keynetra.cli check \
+ --api-key devkey \
+ --user '{"id":"alice","role":"manager"}' \
+ --action approve_payment \
+ --resource '{"resource_type":"payment","resource_id":"pay-900","amount":5000}' \
+ --context '{"department":"finance"}'
+```
+
+## Simulate policy changes
+
+```bash
+python -m keynetra.cli simulate \
+ --api-key devkey \
+ --policy-change 'allow:\n action: share_document\n priority: 1\n policy_key: share-admin\n when:\n role: admin' \
+ --user '{"id":"root-admin","role":"admin","roles":["admin"]}' \
+ --action share_document \
+ --resource '{"resource_type":"document","resource_id":"doc-1"}'
+```
+
+## Run impact analysis
+
+```bash
+python -m keynetra.cli impact \
+ --api-key devkey \
+ --policy-change 'deny:\n action: export_payment\n priority: 1\n policy_key: deny-export-contractors\n when:\n role: external'
+```
+
+## Helpful developer commands
+
+```bash
+python -m keynetra.cli test-policy ./path/to/policy_tests.yaml
+python -m keynetra.cli compile-policies --path ./policies
+python -m keynetra.cli explain --user alice --resource doc-1 --action read
+python -m keynetra.cli doctor --service core
+python -m keynetra.cli version
+```
diff --git a/docs/configuration.md b/docs/configuration.md
new file mode 100644
index 0000000..1d6b48b
--- /dev/null
+++ b/docs/configuration.md
@@ -0,0 +1,79 @@
+# Configuration Guide
+
+KeyNetra supports two practical configuration styles:
+
+1. Environment variables (fastest)
+2. YAML/JSON/TOML config file passed to CLI with `--config`
+
+## Environment variable setup
+
+```bash
+export KEYNETRA_API_KEYS=devkey
+export KEYNETRA_DATABASE_URL=sqlite+pysqlite:///./keynetra.db
+export KEYNETRA_REDIS_URL=
+export KEYNETRA_POLICY_PATHS=./policies
+export KEYNETRA_MODEL_PATHS=./models
+python -m keynetra.cli serve
+```
+
+## YAML config file
+
+Example `keynetra.yaml`:
+
+```yaml
+database:
+ url: sqlite+pysqlite:///./keynetra.db
+redis:
+ url: null
+policies:
+ path: ./policies
+models:
+ path: ./models
+server:
+ host: 0.0.0.0
+ port: 8000
+seed_data: false
+```
+
+Run with config file:
+
+```bash
+export KEYNETRA_API_KEYS=devkey
+python -m keynetra.cli serve --config ./keynetra.yaml
+```
+
+Note:
+
+- API keys are still configured via environment (`KEYNETRA_API_KEYS`).
+
+## JSON config file
+
+```json
+{
+ "database": {"url": "sqlite+pysqlite:///./keynetra.db"},
+ "redis": {"url": null},
+ "policy_paths": ["./policies"],
+ "model_paths": ["./models"],
+ "server": {"host": "0.0.0.0", "port": 8000},
+ "seed_data": false
+}
+```
+
+Run:
+
+```bash
+export KEYNETRA_API_KEYS=devkey
+python -m keynetra.cli serve --config ./keynetra.json
+```
+
+## Most useful env vars
+
+- `KEYNETRA_API_KEYS`
+- `KEYNETRA_DATABASE_URL`
+- `KEYNETRA_REDIS_URL`
+- `KEYNETRA_POLICY_PATHS`
+- `KEYNETRA_MODEL_PATHS`
+- `KEYNETRA_RATE_LIMIT_PER_MINUTE`
+- `KEYNETRA_RATE_LIMIT_BURST`
+- `KEYNETRA_SERVER_HOST`
+- `KEYNETRA_SERVER_PORT`
diff --git a/docs/core-concepts/authorization-models.md b/docs/core-concepts/authorization-models.md
new file mode 100644
index 0000000..73de158
--- /dev/null
+++ b/docs/core-concepts/authorization-models.md
@@ -0,0 +1,61 @@
+---
+title: Authorization Models
+---
+
+# Authorization Models
+
+KeyNetra supports multiple authorization models that can be composed in a single decision flow.
+
+## RBAC
+
+Role-Based Access Control is implemented through users, roles, permissions, and role-permission bindings.
+
+Related implementation:
+
+- `keynetra/domain/models/rbac.py`
+- `keynetra/api/routes/roles.py`
+- `keynetra/api/routes/permissions.py`
+
+## ACL
+
+Access Control Lists provide resource-scoped, subject-specific allow/deny entries.
+
+Related implementation:
+
+- `keynetra/domain/models/acl.py`
+- `keynetra/api/routes/acl.py`
+
+## ReBAC
+
+Relationship-Based Access Control uses relationship edges between subjects and objects.
+
+Related implementation:
+
+- `keynetra/domain/models/relationship.py`
+- `keynetra/api/routes/relationships.py`
+
+## Policy Graph Evaluation
+
+Policy rules are compiled and evaluated as part of the deterministic engine pipeline.
+
+Related implementation:
+
+- `keynetra/engine/compiled/decision_graph.py`
+- `keynetra/services/policies.py`
+
+## Schema-Based Authorization Modeling
+
+Authorization models can be defined as schema files and compiled into permission graphs.
+
+Related implementation:
+
+- `keynetra/modeling/schema_parser.py`
+- `keynetra/modeling/model_validator.py`
+- `keynetra/modeling/permission_compiler.py`
+
+## Related Pages
+
+- [Authorization Pipeline](../architecture/authorization-pipeline.md)
+- [Policy File Formats](../reference/policy-files.md)
+- [Authorization Model Files](../reference/auth-model-files.md)
+
diff --git a/docs/core-concepts/consistency-and-revisions.md b/docs/core-concepts/consistency-and-revisions.md
new file mode 100644
index 0000000..af851f9
--- /dev/null
+++ b/docs/core-concepts/consistency-and-revisions.md
@@ -0,0 +1,52 @@
+---
+title: Consistency and Revisions
+---
+
+# Consistency and Revisions
+
+KeyNetra uses tenant revisions and cache namespace strategies to keep authorization decisions coherent during policy and relationship changes.
+
+## Consistency Modes
+
+Access requests can use different consistency behavior, including eventual cached reads and stricter consistency paths.
+
+Primary implementation:
+
+- `keynetra/services/authorization.py`
+
+## Revision Tracking
+
+Tenant revisions and policy versions are used to isolate stale decisions.
+
+Primary implementation:
+
+- `keynetra/services/revisions.py`
+- `keynetra/domain/models/tenant.py`
+
+## Cache Namespace Bumping
+
+When policies, ACL entries, or relationships change, relevant cache namespaces are bumped and stale decision keys become invalid.
+
+Related caches:
+
+- policy cache
+- relationship cache
+- ACL cache
+- access index cache
+- decision cache
+
+## Distributed Invalidation
+
+In multi-instance deployments, policy invalidations are distributed through Redis Pub/Sub.
+
+Related implementation:
+
+- `keynetra/infrastructure/cache/policy_distribution.py`
+- `keynetra/api/main.py` (`_start_policy_subscriber`)
+
+## Related Pages
+
+- [Caching and Consistency](../architecture/caching-and-consistency.md)
+- [Observability](../operations/observability.md)
+- [Troubleshooting](../operations/troubleshooting.md)
+
diff --git a/docs/core-concepts/request-evaluation-lifecycle.md b/docs/core-concepts/request-evaluation-lifecycle.md
new file mode 100644
index 0000000..1d6c9c0
--- /dev/null
+++ b/docs/core-concepts/request-evaluation-lifecycle.md
@@ -0,0 +1,60 @@
+---
+title: Request Evaluation Lifecycle
+---
+
+# Request Evaluation Lifecycle
+
+This page explains what happens from request intake to final authorization decision.
+
+## 1) Request Intake
+
+An access request includes:
+
+- `user`
+- `action`
+- `resource`
+- optional `context`
+
+Transport entry points:
+
+- `POST /check-access`
+- `POST /check-access-batch`
+
+## 2) Service Hydration
+
+The authorization service resolves tenant state, policies, relationships, ACL data, and cached decision candidates.
+
+Key implementation:
+
+- `keynetra/services/authorization.py`
+
+## 3) Engine Evaluation
+
+The engine performs deterministic evaluation across direct permissions, ACL, RBAC, relationships, schema permissions, policy graph, and default deny.
+
+Key implementation:
+
+- `keynetra/engine/keynetra_engine.py`
+
+## 4) Decision Output
+
+The system returns:
+
+- decision (`allow` or `deny`)
+- reason and optional policy ID
+- explain trace entries for audit/debugging
+
+## 5) Side Effects
+
+After decision calculation, the service may:
+
+- write audit records
+- update decision cache
+- apply revision/consistency behavior
+
+## Related Pages
+
+- [Authorization Pipeline](../architecture/authorization-pipeline.md)
+- [Caching and Consistency](../architecture/caching-and-consistency.md)
+- [API Reference](../reference/api-reference.md)
+
diff --git a/docs/deep-dive/code-walkthrough.md b/docs/deep-dive/code-walkthrough.md
new file mode 100644
index 0000000..32e09ac
--- /dev/null
+++ b/docs/deep-dive/code-walkthrough.md
@@ -0,0 +1,130 @@
+# Code Walkthrough (Line-by-Line Concepts)
+
+This guide explains key classes and methods with implementation context.
+
+## A) `keynetra/api/routes/access.py`
+
+### `check_access(...)`
+
+What it does:
+
+1. Accepts validated `AccessRequest`
+2. Calls `AuthorizationService.authorize(...)`
+3. Converts service output to API schema (`AccessDecisionResponse`)
+4. Returns standardized success envelope
+
+Why this design:
+
+- Route layer is transport-focused (HTTP validation/serialization)
+- Business logic stays in service/engine layers
+
+### `check_access_batch(...)`
+
+What it does:
+
+- Maps `BatchAccessRequest.items` into service input
+- Returns per-item allow/deny results with revision
+
+### `simulate(...)`
+
+What it does:
+
+- Calls `service.simulate(...)`
+- Returns diagnostic fields like `failed_conditions`
+
+## B) `keynetra/services/authorization.py`
+
+### `AuthorizationService.__init__(...)`
+
+Dependency injection of:
+
+- repositories (tenants, policies, users, relationships, audit, ACL, model)
+- caches (policy, relationship, decision, ACL, access index)
+- settings (timeouts, resilience mode, etc.)
+
+Benefit:
+
+- easy testing with fake repositories/caches
+- clear boundary between domain logic and storage
+
+### `authorize(...)`
+
+Notable behavior:
+
+- Builds fallback input early for resilience path
+- Uses decision cache unless `fully_consistent`
+- Writes audit after decision
+- Returns stable response even when backend fails (via fallback behavior)
+
+### `_build_authorization_input(...)`
+
+Adds optional data into `AuthorizationInput`:
+
+- `acl_entries`
+- `access_index_entries`
+- `permission_graph`
+
+This allows engine to evaluate multiple models in one run.
+
+## C) `keynetra/engine/keynetra_engine.py`
+
+### `AuthorizationInput`
+
+Everything required for deterministic decision is explicit in this object.
+No hidden external calls happen in the engine.
+
+### `PolicyDefinition`
+
+Normalized policy object with:
+
+- `action`
+- `effect`
+- `conditions`
+- `priority`
+- `policy_id`
+
+### `KeyNetraEngine.decide(...)`
+
+Supports two call styles:
+
+- new style: pass `AuthorizationInput`
+- legacy style: `decide(user, action, resource)`
+
+### `_decide_structured(...)`
+
+This is the decision pipeline and the most important method to understand.
+It appends trace steps for each stage and exits on first decisive stage.
+
+## D) `keynetra/services/policy_simulator.py`
+
+### `simulate_policy_change(...)`
+
+- Computes before decision from current state
+- Parses policy DSL (`dsl_to_policy`)
+- Evaluates after decision in temporary engine
+- Returns both decisions for direct comparison
+
+## E) `keynetra/services/impact_analysis.py`
+
+### `analyze_policy_change(...)`
+
+- Compares before/after engines across user-resource candidates
+- Reports changed user sets:
+ - `gained_access`
+ - `lost_access`
+
+Interpretation tip:
+
+- Large changed sets mean high blast radius; review carefully.
+
+## F) `keynetra/cli.py`
+
+Commands to map with API features:
+
+- `check` -> `/check-access`
+- `simulate` -> `/simulate-policy`
+- `impact` -> `/impact-analysis`
+- `test-policy` -> policy regression tests
+- `compile-policies` -> policy compile/validation summary
+
+Use CLI for reproducible scripts and CI jobs.
diff --git a/docs/deep-dive/developer-manual.md b/docs/deep-dive/developer-manual.md
new file mode 100644
index 0000000..bce915a
--- /dev/null
+++ b/docs/deep-dive/developer-manual.md
@@ -0,0 +1,219 @@
+# Developer Manual (Detailed)
+
+This manual explains how KeyNetra works from request entry to final decision.
+It is intended for developers integrating KeyNetra into real services.
+
+## 1) Mental model
+
+At runtime, KeyNetra does this for every authorization check:
+
+1. Accept request from API or CLI
+2. Authenticate principal (`X-API-Key` or JWT)
+3. Build normalized `AuthorizationInput`
+4. Enrich user/resource context (roles, permissions, relationships)
+5. Evaluate decision using deterministic engine stages
+6. Return decision envelope with reason and explain trace
+
+Core types:
+
+- `AuthorizationInput` in `keynetra/engine/keynetra_engine.py`
+- `AuthorizationDecision` in `keynetra/engine/keynetra_engine.py`
+- `AuthorizationResult` in `keynetra/services/authorization.py`
+
+## 2) API entry points and code path
+
+Main route handlers:
+
+- `POST /check-access` -> `keynetra/api/routes/access.py::check_access`
+- `POST /check-access-batch` -> `keynetra/api/routes/access.py::check_access_batch`
+- `POST /simulate` -> `keynetra/api/routes/access.py::simulate`
+- `POST /simulate-policy` -> `keynetra/api/routes/simulation.py::simulate_policy`
+- `POST /impact-analysis` -> `keynetra/api/routes/simulation.py::impact_analysis`
+
+Service construction:
+
+- `get_authorization_service()` wires repositories + caches in `access.py`
+- `get_simulation_services()` wires simulator/analyzer in `simulation.py`
+
+## 3) AuthorizationService internals
+
+File: `keynetra/services/authorization.py`
+
+Primary methods:
+
+- `authorize(...)`
+- `authorize_batch(...)`
+- `simulate(...)`
+- `get_revision(...)`
+
+### 3.1 `authorize(...)` flow
+
+`authorize()` does more than engine evaluation. It orchestrates:
+
+1. Input validation via `validate_user` and `validate_resource`
+2. Tenant lookup via `TenantRepository`
+3. User hydration (`_hydrate_user`) to include persisted roles/relationships
+4. Decision cache lookup (unless `consistency=fully_consistent`)
+5. Engine construction (`_build_engine`) using current policy version
+6. Pure engine call: `engine.decide(authorization_input)`
+7. Cache write, audit write, and metrics reporting
+8. Resilience fallback if dependencies fail
+
+Why this matters:
+
+- API behavior is stable even when cache or storage temporarily fails
+- Decisions remain explainable because fallback still returns structured traces
+
+### 3.2 `authorize_batch(...)`
+
+`authorize_batch()`:
+
+- Reuses tenant and engine setup once
+- Evaluates items concurrently using `ThreadPoolExecutor`
+- Preserves per-item allow/deny results with revision
+
+Use this when frontends need many permission checks in one request.
+
+### 3.3 `simulate(...)`
+
+`simulate()` calls `authorize()` and returns `decision` directly.
+
+Key difference from `/check-access`:
+
+- API response includes `failed_conditions` and trace details for diagnostics
+
+### 3.4 How input enrichment works
+
+`_hydrate_user(...)` adds:
+
+- `roles`
+- `role_permissions`
+- `relations`
+- `direct_permissions`
+
+This enables mixed RBAC/ABAC/ReBAC decisions from one normalized input.
+
+## 4) Engine internals and stage ordering
+
+File: `keynetra/engine/keynetra_engine.py`
+
+`KeyNetraEngine._decide_structured(...)` evaluates in fixed order:
+
+1. Direct permissions (`rbac:permissions`)
+2. ACL match
+3. Role permissions (`rbac:role`)
+4. Relationship index check (`relationship:index`)
+5. Compiled authorization model graph (`permission_graph`)
+6. Compiled policy graph (`policy_graph`)
+7. Default deny
+
+This ordering is important: earlier matches can short-circuit later stages.
+
+### 4.1 Traceability
+
+Every stage appends an `ExplainTraceStep`.
+Response traces are deterministic and include:
+
+- `step`
+- `outcome`
+- `detail`
+- `policy_id`
+
+This is the core debugging feature for production support.
+
+### 4.2 Condition evaluation
+
+`ConditionEvaluator` implements handlers such as:
+
+- `handle_role`
+- `handle_max_amount`
+- `handle_owner_only`
+- `handle_time_range`
+- `handle_geo_match`
+- `handle_has_relation`
+
+Unknown condition keys fail safely (`unknown condition: `).
+
+## 5) Simulation and impact analysis internals
+
+### 5.1 Policy simulation
+
+File: `keynetra/services/policy_simulator.py`
+
+`simulate_policy_change(...)`:
+
+1. Builds "before" decision via `AuthorizationService`
+2. Parses proposed DSL with `dsl_to_policy`
+3. Appends changed policy to current policy list
+4. Builds temporary engine and computes "after" decision
+
+Output: `SimulationResult(decision_before, decision_after)`
+
+### 5.2 Impact analysis
+
+File: `keynetra/services/impact_analysis.py`
+
+`analyze_policy_change(...)`:
+
+1. Loads current policies
+2. Builds `before_engine` and `after_engine`
+3. Iterates users and candidate resources
+4. Compares before/after decision for target action
+5. Returns `gained_access` and `lost_access`
+
+Use this to estimate blast radius before deploying policy updates.
+
+## 6) Caching and consistency details
+
+Cache adapters used by service layer:
+
+- Policy cache
+- Relationship cache
+- Decision cache
+- ACL/access index caches
+
+Consistency knobs in access APIs:
+
+- `consistency: eventual` (default; uses decision cache)
+- `consistency: fully_consistent` (bypasses decision cache)
+- optional `revision` token for stronger control
+
+## 7) Example: full request lifecycle
+
+Request:
+
+```json
+{
+ "user": {"id": "alice", "role": "manager", "permissions": ["approve_payment"]},
+ "action": "approve_payment",
+ "resource": {"resource_type": "payment", "resource_id": "pay-900", "amount": 5000},
+ "context": {"department": "finance"}
+}
+```
+
+Potential stage path:
+
+- Direct permission stage matches `approve_payment`
+- Engine returns allow with `policy_id=rbac:permissions`
+- Service wraps response + revision + request metadata
+
+## 8) Integration checklist
+
+Before integrating in production:
+
+1. Use `/check-access-batch` where N checks happen per request
+2. Log `decision`, `reason`, `policy_id`, `revision`
+3. Add policy simulation in CI review for policy changes
+4. Add impact analysis for sensitive policy operations
+5. Keep deny-by-default and least-privilege policies
+
+## 9) Source map (quick links)
+
+- API app bootstrap: `keynetra/api/main.py`
+- Access routes: `keynetra/api/routes/access.py`
+- Simulation routes: `keynetra/api/routes/simulation.py`
+- Service orchestrator: `keynetra/services/authorization.py`
+- Engine core: `keynetra/engine/keynetra_engine.py`
+- Policy simulator: `keynetra/services/policy_simulator.py`
+- Impact analysis: `keynetra/services/impact_analysis.py`
+- CLI: `keynetra/cli.py`
diff --git a/docs/deep-dive/integration-cookbook.md b/docs/deep-dive/integration-cookbook.md
new file mode 100644
index 0000000..56698a0
--- /dev/null
+++ b/docs/deep-dive/integration-cookbook.md
@@ -0,0 +1,118 @@
+# Integration Cookbook (Practical)
+
+This page gives end-to-end integration patterns with copy-paste examples.
+
+## 1) Backend middleware pattern
+
+Use KeyNetra before protected handlers.
+
+Pseudo-flow:
+
+1. Build request payload from authenticated user + route context
+2. Call `/check-access`
+3. Deny with 403 when `allowed=false`
+4. Log `reason` + `policy_id` for debugging
+
+Example payload:
+
+```json
+{
+ "user": {"id": "u-42", "role": "manager", "permissions": ["approve_payment"]},
+ "action": "approve_payment",
+ "resource": {"resource_type": "payment", "resource_id": "pay-900", "amount": 5000},
+ "context": {"department": "finance", "request_id": "req-123"}
+}
+```
+
+## 2) Frontend permission matrix pattern
+
+When UI needs many permissions (buttons, tabs, actions), call one batch endpoint.
+
+Example:
+
+```bash
+curl -s -X POST http://localhost:8000/check-access-batch \
+ -H "Content-Type: application/json" \
+ -H "X-API-Key: devkey" \
+ -d '{
+ "user": {"id": "u-42", "role": "manager", "permissions": ["approve_payment"]},
+ "items": [
+ {"action": "approve_payment", "resource": {"resource_type": "payment", "resource_id": "pay-1", "amount": 500}},
+ {"action": "approve_payment", "resource": {"resource_type": "payment", "resource_id": "pay-2", "amount": 500000}},
+ {"action": "read", "resource": {"resource_type": "document", "resource_id": "doc-1"}}
+ ]
+ }' | jq .
+```
+
+## 3) Safe policy rollout pattern
+
+For policy PRs or release pipelines:
+
+1. Run `/simulate-policy` with representative cases
+2. Run `/impact-analysis`
+3. Require explicit approval for high-impact changes
+
+### Step A: simulate one critical flow
+
+```bash
+curl -s -X POST http://localhost:8000/simulate-policy \
+ -H "Content-Type: application/json" \
+ -H "X-API-Key: devkey" \
+ -d '{
+ "simulate": {
+ "policy_change": "deny:\n action: approve_payment\n priority: 1\n policy_key: emergency-freeze\n when:\n department: finance"
+ },
+ "request": {
+ "user": {"id": "u-42", "role": "manager", "department": "finance"},
+ "action": "approve_payment",
+ "resource": {"resource_type": "payment", "resource_id": "pay-900", "amount": 1000},
+ "context": {}
+ }
+ }' | jq .
+```
+
+### Step B: analyze blast radius
+
+```bash
+curl -s -X POST http://localhost:8000/impact-analysis \
+ -H "Content-Type: application/json" \
+ -H "X-API-Key: devkey" \
+ -d '{
+ "policy_change": "deny:\n action: approve_payment\n priority: 1\n policy_key: emergency-freeze\n when:\n department: finance"
+ }' | jq .
+```
+
+## 4) Incident-debug pattern
+
+If an expected allow becomes deny in production:
+
+1. Replay request through `/simulate`
+2. Inspect `failed_conditions`
+3. Inspect `explain_trace`
+4. Confirm latest `revision`
+
+Example:
+
+```bash
+curl -s -X POST http://localhost:8000/simulate \
+ -H "Content-Type: application/json" \
+ -H "X-API-Key: devkey" \
+ -d '{
+ "user": {"id": "u-42", "role": "manager"},
+ "action": "approve_payment",
+ "resource": {"resource_type": "payment", "resource_id": "pay-900", "amount": 250000},
+ "context": {"department": "finance"}
+ }' | jq .
+```
+
+## 5) Language-agnostic response contract
+
+Always parse these fields from responses:
+
+- `data.allowed` or `data.decision`
+- `data.reason`
+- `data.policy_id`
+- `data.revision`
+- `meta.request_id`
+
+These fields are enough for product behavior, logging, and support triage.
diff --git a/docs/development/ci-cd-release.md b/docs/development/ci-cd-release.md
new file mode 100644
index 0000000..73d0430
--- /dev/null
+++ b/docs/development/ci-cd-release.md
@@ -0,0 +1,64 @@
+---
+title: CI/CD and Release
+---
+
+# CI/CD and Release
+
+GitHub Actions workflows:
+
+- `.github/workflows/ci.yml`
+- `.github/workflows/release.yml`
+
+## CI Workflow
+
+Triggered on pushes and pull requests.
+
+Stages:
+
+1. Setup Python 3.11
+2. Install dependencies
+3. Lint (`ruff`, `black --check`, `isort --check-only`)
+4. Migration check (`python -m keynetra.cli migrate --confirm-destructive`)
+5. Tests + coverage (`--cov-fail-under=80`)
+
+CI currently runs on Python 3.11.
+
+## Release Workflow
+
+Triggered on tags matching `v*`.
+
+Stages:
+
+1. Build package (`python -m build`)
+2. Run tests with coverage
+3. Upload artifacts (`.whl`, `.tar.gz`)
+4. Publish GitHub release
+
+## Recommended Release Steps
+
+1. ensure version alignment (`pyproject.toml`, `keynetra/version.py`, OpenAPI info)
+2. run lint, migrations, and full tests locally
+3. confirm changelog and release notes
+4. push release tag (`vX.Y.Z`)
+
+## Version and Contract Alignment
+
+Version `0.1.0` is currently represented in:
+
+- `pyproject.toml`
+- `keynetra/version.py`
+- `contracts/openapi/keynetra-v0.1.0.yaml`
+
+## Release Hygiene Checklist
+
+- tests pass locally and in CI
+- OpenAPI contract synced with implemented routes
+- migrations apply cleanly
+- docs and examples updated
+- changelog updated
+
+## Related Pages
+
+- [Testing Strategy](testing.md)
+- [Contributing](contributing.md)
+- [Migrations](migrations.md)
diff --git a/docs/development/contributing.md b/docs/development/contributing.md
new file mode 100644
index 0000000..09d65c2
--- /dev/null
+++ b/docs/development/contributing.md
@@ -0,0 +1,47 @@
+---
+title: Contributing
+---
+
+# Contributing
+
+Primary contribution guidance comes from:
+
+- `CONTRIBUTING.md`
+
+## Standards
+
+- Python 3.11
+- Black formatting
+- Isort import order
+- Ruff lint rules
+- tests and coverage maintained
+- architecture boundaries respected (`keynetra/` does not depend on `infra/`)
+
+## Documentation Expectations
+
+- update docs for behavior changes
+- keep examples runnable and version-aligned
+- maintain internal links across pages
+
+## Typical Workflow
+
+1. Create branch
+2. Implement focused change
+3. Add/update tests
+4. Run lint + tests
+5. Update docs/migrations as needed
+6. Open PR
+
+## Useful Commands
+
+```bash
+make lint
+make test
+make migrate
+```
+
+## Related Pages
+
+- [Local Development](local-development.md)
+- [CI/CD and Release](ci-cd-release.md)
+- [Testing Strategy](testing.md)
diff --git a/docs/development/local-development.md b/docs/development/local-development.md
new file mode 100644
index 0000000..a9ef2ac
--- /dev/null
+++ b/docs/development/local-development.md
@@ -0,0 +1,66 @@
+---
+title: Local Development
+---
+
+# Local Development
+
+This page describes the recommended development workflow for contributors and maintainers.
+
+## Setup
+
+```bash
+python3.11 -m venv .venv
+source .venv/bin/activate
+pip install -r requirements.txt -r requirements-dev.txt
+cp .env.example .env
+```
+
+Optional: run local services via Docker while running the app locally.
+
+## Core Commands
+
+From `Makefile`:
+
+- `make install`
+- `make test`
+- `make lint`
+- `make format`
+- `make migrate`
+- `make run`
+
+## Run API
+
+```bash
+make run
+```
+
+or
+
+```bash
+uvicorn keynetra.api.main:app --reload
+```
+
+Or use CLI:
+
+```bash
+python -m keynetra.cli serve --config ./keynetra.yaml
+```
+
+## Seed Sample Data
+
+```bash
+python -m keynetra.cli seed-data --reset
+```
+
+## Developer-Facing Endpoints
+
+In development/local environment (`KEYNETRA_ENVIRONMENT=development`), sample endpoints are available:
+
+- `GET /dev/sample-data`
+- `POST /dev/sample-data/seed`
+
+## Related Pages
+
+- [Testing Strategy](testing.md)
+- [Migrations](migrations.md)
+- [Contributing](contributing.md)
diff --git a/docs/development/migrations.md b/docs/development/migrations.md
new file mode 100644
index 0000000..029b81b
--- /dev/null
+++ b/docs/development/migrations.md
@@ -0,0 +1,60 @@
+---
+title: Migrations
+---
+
+# Migrations
+
+KeyNetra uses Alembic for schema migrations.
+
+All schema changes should be tracked with migration files under `alembic/versions/`.
+
+## Files
+
+- `alembic.ini`
+- `alembic/env.py`
+- `alembic/versions/*.py`
+- `keynetra/migrations.py` (destructive migration detection utility)
+
+## Run Migrations
+
+```bash
+python -m keynetra.cli migrate
+```
+
+If destructive revisions exist and are intentional:
+
+```bash
+python -m keynetra.cli migrate --confirm-destructive
+```
+
+## Migration Safety
+
+`keynetra/migrations.py` detects unapplied destructive operations (drop table/column) and blocks execution unless explicitly confirmed.
+
+## Migration Coverage
+
+Revision files currently include schema for:
+
+- RBAC tables
+- tenant and policy versioning
+- relationships
+- audit explainability fields
+- idempotency records
+- ACL entries
+- authorization model revisions
+
+## Docker Migrations
+
+Container startup script runs migrations when:
+
+- `KEYNETRA_RUN_MIGRATIONS=1`
+
+Reference:
+
+- `infra/docker/start.sh`
+
+## Related Pages
+
+- [Data Models and Storage](../architecture/data-models.md)
+- [Troubleshooting](../operations/troubleshooting.md)
+- [CI/CD and Release](ci-cd-release.md)
diff --git a/docs/development/testing.md b/docs/development/testing.md
new file mode 100644
index 0000000..6793b12
--- /dev/null
+++ b/docs/development/testing.md
@@ -0,0 +1,72 @@
+---
+title: Testing Strategy
+---
+
+# Testing Strategy
+
+Test suite location:
+
+- `tests/`
+
+## Run Tests
+
+```bash
+pytest -q
+pytest -q --cov=keynetra --cov-fail-under=80
+```
+
+For quick local iteration, run targeted test modules:
+
+```bash
+pytest -q tests/test_engine.py
+pytest -q tests/test_api.py
+```
+
+## Coverage Areas
+
+Current test modules validate:
+
+- engine behavior and explainability
+- API contract and route behavior
+- ACL operations
+- relationship indexing
+- compiled policies and policy simulation
+- impact analysis
+- auth model parsing/validation/compile flow
+- revision consistency and caching behavior
+- metrics endpoint output
+- admin login flow
+- migration safety utilities
+- release hardening checks
+- headless and CLI modes
+
+Representative files:
+
+- `tests/test_engine.py`
+- `tests/test_api.py`
+- `tests/test_api_contract.py`
+- `tests/test_acl.py`
+- `tests/test_auth_model.py`
+- `tests/test_policy_simulation.py`
+- `tests/test_impact_analysis.py`
+- `tests/test_metrics_endpoint.py`
+- `tests/test_services_caching.py`
+- `tests/test_headless_modes.py`
+
+## Policy Test Suites
+
+Policy-specific deterministic testing via CLI:
+
+```bash
+python -m keynetra.cli test-policy ./policy_tests.yaml
+```
+
+## CI Expectations
+
+CI validates lint, migration application, and coverage thresholds. Match those checks locally before opening a PR.
+
+## Related Pages
+
+- [CLI Reference](../reference/cli-reference.md)
+- [CI/CD and Release](ci-cd-release.md)
+- [Contributing](contributing.md)
diff --git a/docs/examples/assets/auth-model.yaml b/docs/examples/assets/auth-model.yaml
new file mode 100644
index 0000000..b4681f6
--- /dev/null
+++ b/docs/examples/assets/auth-model.yaml
@@ -0,0 +1,13 @@
+model:
+ schema_version: 1
+ type: document
+ relations:
+ owner: user
+ editor:
+ - user
+ viewer:
+ - user
+ permissions:
+ read: owner or editor or viewer
+ write: owner or editor
+ delete: owner
diff --git a/docs/examples/assets/keynetra.yaml b/docs/examples/assets/keynetra.yaml
new file mode 100644
index 0000000..7305b0b
--- /dev/null
+++ b/docs/examples/assets/keynetra.yaml
@@ -0,0 +1,18 @@
+database:
+ url: sqlite+pysqlite:///./keynetra.db
+
+redis:
+ url: redis://localhost:6379/0
+
+policies:
+ paths:
+ - ./docs/examples/assets/policies
+
+models:
+ path: ./docs/examples/assets/auth-model.yaml
+
+seed_data: true
+
+server:
+ host: 0.0.0.0
+ port: 8000
diff --git a/docs/examples/assets/policies/document_access.yaml b/docs/examples/assets/policies/document_access.yaml
new file mode 100644
index 0000000..dac5e3d
--- /dev/null
+++ b/docs/examples/assets/policies/document_access.yaml
@@ -0,0 +1,34 @@
+policies:
+ - action: read
+ effect: allow
+ priority: 10
+ policy_id: document-read-admin
+ conditions:
+ role: admin
+ resource_type: document
+
+ - action: read
+ effect: allow
+ priority: 20
+ policy_id: document-read-editor
+ conditions:
+ relation: editor
+ resource_type: document
+ same_tenant: true
+
+ - action: write
+ effect: allow
+ priority: 30
+ policy_id: document-write-owner
+ conditions:
+ relation: owner
+ resource_type: document
+ owner_only: true
+
+ - action: delete
+ effect: deny
+ priority: 40
+ policy_id: document-delete-protected
+ conditions:
+ resource_type: document
+ resource_attr: { classification: legal_hold }
diff --git a/docs/examples/assets/policies/finance_rules.json b/docs/examples/assets/policies/finance_rules.json
new file mode 100644
index 0000000..49cf70b
--- /dev/null
+++ b/docs/examples/assets/policies/finance_rules.json
@@ -0,0 +1,23 @@
+[
+ {
+ "action": "approve_payment",
+ "effect": "allow",
+ "priority": 35,
+ "policy_id": "payment-approve-manager",
+ "conditions": {
+ "role": "manager",
+ "department": "finance",
+ "max_amount": 10000
+ }
+ },
+ {
+ "action": "approve_payment",
+ "effect": "deny",
+ "priority": 95,
+ "policy_id": "payment-approve-high-risk",
+ "conditions": {
+ "department": "finance",
+ "risk_level": "high"
+ }
+ }
+]
diff --git a/docs/examples/assets/policies/ops_rules.polar b/docs/examples/assets/policies/ops_rules.polar
new file mode 100644
index 0000000..6424bfe
--- /dev/null
+++ b/docs/examples/assets/policies/ops_rules.polar
@@ -0,0 +1,5 @@
+# Polar-like flat rules supported by KeyNetra loader
+allow action=deploy priority=20 policy_id=ops-deploy-allow role=ops environment=staging
+allow action=restart_service priority=30 policy_id=ops-restart-allow role=sre
+
+deny action=deploy priority=90 policy_id=ops-deploy-deny-prod role=contractor environment=production
diff --git a/docs/examples/assets/policy_tests.yaml b/docs/examples/assets/policy_tests.yaml
new file mode 100644
index 0000000..6a09c8a
--- /dev/null
+++ b/docs/examples/assets/policy_tests.yaml
@@ -0,0 +1,42 @@
+policies:
+ - action: read
+ effect: allow
+ priority: 10
+ policy_id: document-read-admin
+ conditions:
+ role: admin
+ resource_type: document
+
+ - action: read
+ effect: deny
+ priority: 80
+ policy_id: document-read-legal-hold
+ conditions:
+ resource_type: document
+ classification: legal_hold
+
+tests:
+ - name: admin can read normal document
+ expect: allow
+ input:
+ user:
+ id: alice
+ role: admin
+ action: read
+ resource:
+ resource_type: document
+ resource_id: doc-1
+ context: {}
+
+ - name: legal hold document denied for admin
+ expect: deny
+ input:
+ user:
+ id: alice
+ role: admin
+ action: read
+ resource:
+ resource_type: document
+ resource_id: doc-2
+ classification: legal_hold
+ context: {}
diff --git a/docs/examples/cli-workflows.md b/docs/examples/cli-workflows.md
new file mode 100644
index 0000000..c80bb47
--- /dev/null
+++ b/docs/examples/cli-workflows.md
@@ -0,0 +1,73 @@
+---
+title: CLI Workflows
+---
+
+# CLI Workflows
+
+This page provides operational CLI recipes for development and release workflows.
+
+## Local Bootstrap
+
+```bash
+python -m keynetra.cli migrate
+python -m keynetra.cli seed-data --reset
+python -m keynetra.cli serve
+```
+
+## API Decision via CLI
+
+```bash
+python -m keynetra.cli check \
+ --api-key devkey \
+ --user '{"id":"alice","role":"manager"}' \
+ --action approve_payment \
+ --resource '{"resource_type":"payment","resource_id":"pay-900","amount":5000}'
+```
+
+## Policy Validation Pipeline
+
+```bash
+python -m keynetra.cli compile-policies --config docs/examples/assets/keynetra.yaml
+python -m keynetra.cli test-policy docs/examples/assets/policy_tests.yaml
+python -m keynetra.cli doctor --service core --config docs/examples/assets/keynetra.yaml
+```
+
+## Runtime Debug Flow
+
+```bash
+python -m keynetra.cli explain \
+ --user u1 \
+ --resource doc-1 \
+ --action read \
+ --context '{"department":"finance"}'
+```
+
+## Performance Smoke Test
+
+```bash
+python -m keynetra.cli benchmark \
+ --url http://localhost:8000/check-access \
+ --requests 200 \
+ --concurrency 20 \
+ --api-key devkey
+```
+
+## ACL Maintenance
+
+```bash
+python -m keynetra.cli acl add \
+ --subject-type user \
+ --subject-id alice \
+ --resource-type document \
+ --resource-id doc-1 \
+ --action read \
+ --effect allow
+
+python -m keynetra.cli acl list --resource-type document --resource-id doc-1
+python -m keynetra.cli acl remove --acl-id 1
+```
+
+## Related Pages
+
+- [CLI Reference](../reference/cli-reference.md)
+- [Quickstart](../getting-started/quickstart.md)
diff --git a/docs/examples/end-to-end-api-flow.md b/docs/examples/end-to-end-api-flow.md
new file mode 100644
index 0000000..c0133f4
--- /dev/null
+++ b/docs/examples/end-to-end-api-flow.md
@@ -0,0 +1,97 @@
+---
+title: End-to-End API Flow
+---
+
+# End-to-End API Flow
+
+This walkthrough covers a practical management-to-decision flow using HTTP APIs.
+
+For file-based bootstrapping, use [Example Files](example-files.md) in `docs/examples/assets/`.
+
+## Goal
+
+- Create a policy
+- Validate access decision
+- Simulate a policy change
+- Review audit records
+
+## 1. Start KeyNetra
+
+```bash
+export KEYNETRA_API_KEYS=devkey
+python -m keynetra.cli serve
+```
+
+## 2. Create Policy
+
+```bash
+curl -s -X POST http://localhost:8000/policies \
+ -H "Content-Type: application/json" \
+ -H "X-API-Key: devkey" \
+ -d '{
+ "action": "read",
+ "effect": "allow",
+ "priority": 50,
+ "conditions": {
+ "policy_key": "allow-read-admin",
+ "role": "admin"
+ }
+ }' | jq .
+```
+
+## 3. Evaluate Access
+
+```bash
+curl -s -X POST http://localhost:8000/check-access \
+ -H "Content-Type: application/json" \
+ -H "X-API-Key: devkey" \
+ -d '{
+ "user": {"id": "u1", "role": "admin"},
+ "action": "read",
+ "resource": {"resource_type": "document", "resource_id": "doc-1"},
+ "context": {}
+ }' | jq .
+```
+
+You should see `data.allowed=true` when policy and payload conditions match.
+
+## 4. Simulate Deny Override
+
+```bash
+curl -s -X POST http://localhost:8000/simulate-policy \
+ -H "Content-Type: application/json" \
+ -H "X-API-Key: devkey" \
+ -d '{
+ "simulate": {
+ "policy_change": "deny:\n action: read\n priority: 100\n policy_key: deny-read-admin-temp\n when:\n role: admin"
+ },
+ "request": {
+ "user": {"id": "u1", "role": "admin"},
+ "action": "read",
+ "resource": {"resource_type": "document", "resource_id": "doc-1"},
+ "context": {}
+ }
+ }' | jq .
+```
+
+Use this output to confirm behavior before persisting policy updates.
+
+## 5. Read Audit Trail
+
+```bash
+curl -s "http://localhost:8000/audit?user_id=u1&resource_id=doc-1&limit=10" \
+ -H "X-API-Key: devkey" | jq .
+```
+
+## 6. Cleanup Policy
+
+```bash
+curl -s -X DELETE http://localhost:8000/policies/allow-read-admin \
+ -H "X-API-Key: devkey" | jq .
+```
+
+## Related Pages
+
+- [API Reference](../reference/api-reference.md)
+- [Policy File Formats](../reference/policy-files.md)
+- [Policy Patterns](policy-patterns.md)
diff --git a/docs/examples/example-files.md b/docs/examples/example-files.md
new file mode 100644
index 0000000..ef454c4
--- /dev/null
+++ b/docs/examples/example-files.md
@@ -0,0 +1,143 @@
+---
+title: Example Files
+---
+
+# Example Files
+
+All core examples are embedded directly here so you can copy/paste without browsing file paths.
+
+## Runtime Config Example
+
+```yaml
+database:
+ url: sqlite+pysqlite:///./keynetra.db
+redis:
+ url: redis://localhost:6379/0
+policies:
+ paths:
+ - ./docs/examples/assets/policies
+models:
+ path: ./docs/examples/assets/auth-model.yaml
+seed_data: true
+server:
+ host: 0.0.0.0
+ port: 8000
+```
+
+## Authorization Model Example
+
+```yaml
+model:
+ schema_version: 1
+ type: document
+ relations:
+ owner: user
+ editor:
+ - user
+ viewer:
+ - user
+ permissions:
+ read: owner or editor or viewer
+ write: owner or editor
+ delete: owner
+```
+
+## Policy Examples
+
+YAML:
+
+```yaml
+policies:
+ - action: read
+ effect: allow
+ priority: 10
+ policy_id: document-read-admin
+ conditions:
+ role: admin
+ resource_type: document
+
+ - action: delete
+ effect: deny
+ priority: 40
+ policy_id: document-delete-protected
+ conditions:
+ resource_type: document
+ resource_attr: { classification: legal_hold }
+```
+
+JSON:
+
+```json
+[
+ {
+ "action": "approve_payment",
+ "effect": "allow",
+ "priority": 35,
+ "policy_id": "payment-approve-manager",
+ "conditions": {
+ "role": "manager",
+ "department": "finance",
+ "max_amount": 10000
+ }
+ }
+]
+```
+
+Polar-like:
+
+```text
+allow action=deploy priority=20 policy_id=ops-deploy-allow role=ops environment=staging
+deny action=deploy priority=90 policy_id=ops-deploy-deny-prod role=contractor environment=production
+```
+
+## Policy Test Suite Example
+
+```yaml
+policies:
+ - action: read
+ effect: allow
+ priority: 10
+ policy_id: document-read-admin
+ conditions:
+ role: admin
+ resource_type: document
+
+tests:
+ - name: admin can read normal document
+ expect: allow
+ input:
+ user:
+ id: alice
+ role: admin
+ action: read
+ resource:
+ resource_type: document
+ resource_id: doc-1
+ context: {}
+```
+
+## Quick Validation Flow
+
+```bash
+# server
+python -m keynetra.cli serve --config docs/examples/assets/keynetra.yaml
+
+# compile and test
+python -m keynetra.cli compile-policies --config docs/examples/assets/keynetra.yaml
+python -m keynetra.cli test-policy docs/examples/assets/policy_tests.yaml
+
+# apply model
+python -m keynetra.cli model apply docs/examples/assets/auth-model.yaml --api-key devkey
+```
+
+## Why Embedded Examples
+
+- Show supported file formats (`yaml`, `json`, `polar`).
+- Give copy-paste examples directly inside docs.
+- Keep docs and runnable assets aligned.
+
+## Related Pages
+
+- [Project Overview](../getting-started/overview.md)
+- [Policy File Formats](../reference/policy-files.md)
+- [Authorization Model Files](../reference/auth-model-files.md)
diff --git a/docs/examples/policy-patterns.md b/docs/examples/policy-patterns.md
new file mode 100644
index 0000000..57075de
--- /dev/null
+++ b/docs/examples/policy-patterns.md
@@ -0,0 +1,84 @@
+---
+title: Policy Patterns
+---
+
+# Policy Patterns
+
+These patterns are aligned with KeyNetra policy parsing and decision priority behavior.
+
+## Pattern 1: Explicit Admin Allow
+
+```yaml
+policies:
+ - policy_id: allow-read-admin
+ action: read
+ effect: allow
+ priority: 20
+ conditions:
+ role: admin
+```
+
+Use when a role should have stable baseline access.
+
+## Pattern 2: Deny Override for High-Risk Context
+
+```yaml
+policies:
+ - policy_id: deny-export-external
+ action: export
+ effect: deny
+ priority: 100
+ conditions:
+ role: external
+```
+
+Use high priority deny rules for risk boundaries.
+
+## Pattern 3: Amount Guardrail
+
+```yaml
+policies:
+ - policy_id: allow-approve-manager-low-value
+ action: approve_payment
+ effect: allow
+ priority: 40
+ conditions:
+ role: manager
+ max_amount: 10000
+```
+
+Pair with request payload context such as `amount` to enforce transaction limits.
+
+## Pattern 4: Department Scope
+
+```yaml
+policies:
+ - policy_id: allow-finance-read
+ action: read_payment
+ effect: allow
+ priority: 30
+ conditions:
+ department: finance
+```
+
+Use contextual fields from `context` payload for scoped permissions.
+
+## Pattern 5: Progressive Rollout
+
+1. Create policy in low priority allow mode.
+2. Run `simulate-policy` for representative users/resources.
+3. Run `impact-analysis` to estimate changed decisions.
+4. Increase priority after validation.
+
+## Validation Checklist
+
+- Every rule has an explicit `action`, `effect`, and `priority`.
+- `policy_id` or `policy_key` is stable for rollback/audit.
+- Condition keys match request schema fields.
+- Run `compile-policies` and `test-policy` before deployment.
+
+## Related Pages
+
+- [Policy File Formats](../reference/policy-files.md)
+- [CLI Workflows](cli-workflows.md)
+- [End-to-End API Flow](end-to-end-api-flow.md)
diff --git a/docs/getting-started/installation.md b/docs/getting-started/installation.md
new file mode 100644
index 0000000..214c0ac
--- /dev/null
+++ b/docs/getting-started/installation.md
@@ -0,0 +1,69 @@
+---
+title: Installation
+---
+
+# Installation
+
+This page covers local and Docker-based installation paths for KeyNetra.
+
+## Prerequisites
+
+- Python 3.11
+- `pip`
+- Optional for production/local parity: Docker + Docker Compose
+
+Implementation references:
+
+- `pyproject.toml`
+- `requirements.txt`
+- `requirements-dev.txt`
+- `Dockerfile`
+
+## Local Python Setup
+
+```bash
+python3.11 -m venv .venv
+source .venv/bin/activate
+pip install -r requirements.txt -r requirements-dev.txt
+cp .env.example .env
+```
+
+## Verify Installation
+
+```bash
+python -m keynetra.cli version
+python -m keynetra.cli help-cli
+```
+
+Expected behavior:
+
+- `version` prints the current package version (for example, `0.1.0`)
+- `help-cli` prints the operational command reference
+
+## Optional Docker Setup
+
+```bash
+docker compose up --build
+```
+
+Development compose:
+
+```bash
+docker compose -f docker-compose.dev.yml up --build
+```
+
+## Verify Runtime
+
+After startup, run:
+
+```bash
+curl -i http://localhost:8000/health/ready
+```
+
+You should receive an HTTP `200` response.
+
+## Next
+
+- [Quickstart](quickstart.md)
+- [Configuration Files](../reference/configuration-files.md)
+- [Environment Variables](../reference/environment-variables.md)
diff --git a/docs/getting-started/overview.md b/docs/getting-started/overview.md
new file mode 100644
index 0000000..c6b5cbc
--- /dev/null
+++ b/docs/getting-started/overview.md
@@ -0,0 +1,60 @@
+---
+title: Project Overview
+---
+
+# Project Overview
+
+KeyNetra is a Python authorization platform that combines a deterministic policy engine with API, CLI, and embedded usage modes.
+
+It is designed for self-hosted, headless-first deployments where policy evaluation must remain deterministic and auditable.
+
+## Repository Scope
+
+Primary implementation lives in:
+
+- `keynetra/engine`: pure authorization engine
+- `keynetra/services`: orchestration layer for policy loading, cache, audit, and resilience
+- `keynetra/api`: FastAPI transport and middleware
+- `keynetra/infrastructure`: DB/cache repositories, logging, and metrics integrations
+- `keynetra/domain`: SQLAlchemy models and Pydantic schemas
+- `keynetra/config`: settings, security, tenancy, and config file loading
+- `alembic/`: database migrations
+- `infra/`: Docker and Kubernetes deployment assets
+- `contracts/openapi/keynetra-v0.1.0.yaml`: OpenAPI contract
+- `examples/`: config, policy, and model examples
+
+## Core Capabilities
+
+- RBAC: users, roles, permissions, and role-permission binding
+- ACL: resource-level allow/deny entries
+- ReBAC: relationship graph checks
+- ABAC-style policies: compiled decision graph from policy definitions
+- Authorization modeling: schema parser, validator, and permission compiler
+- Policy simulation and impact analysis
+- Revision and consistency controls
+- Redis-backed distributed cache with in-memory fallback
+- Prometheus metrics and structured logging
+
+## Usage Modes
+
+KeyNetra supports three primary operating modes:
+
+- HTTP API server mode
+- CLI operational mode
+- Embedded engine mode inside Python applications
+
+See [Runtime Modes](runtime-modes.md) for concrete examples.
+
+## Who This Is For
+
+- Platform/backend engineers embedding authorization in services
+- DevOps/SRE operators deploying KeyNetra in Docker or Kubernetes
+- Application teams integrating with management and decision APIs
+
+## Related Pages
+
+- [Runtime Modes](runtime-modes.md)
+- [Example Files](../examples/example-files.md)
+- [System Architecture](../architecture/system-architecture.md)
+- [API Reference](../reference/api-reference.md)
+- [Docker Deployment](../operations/deployment-docker.md)
diff --git a/docs/getting-started/quickstart.md b/docs/getting-started/quickstart.md
new file mode 100644
index 0000000..da1157a
--- /dev/null
+++ b/docs/getting-started/quickstart.md
@@ -0,0 +1,132 @@
+---
+title: Quickstart
+---
+
+# Quickstart
+
+This guide validates a full local KeyNetra flow: install, run server, execute a decision request, and inspect results.
+
+## Prerequisites
+
+- Python 3.11+
+- `pip`
+- `curl`
+- Optional: `jq` for pretty JSON output
+
+## 1. Install Dependencies
+
+From repository root:
+
+```bash
+python3.11 -m venv .venv
+source .venv/bin/activate
+pip install -r requirements.txt -r requirements-dev.txt
+```
+
+## 2. Configure API Access Key
+
+```bash
+export KEYNETRA_API_KEYS=devkey
+```
+
+Optional but useful for first run:
+
+```bash
+export KEYNETRA_ENVIRONMENT=development
+export KEYNETRA_AUTO_SEED_SAMPLE_DATA=true
+```
+
+## 3. Start the API Server
+
+```bash
+python -m keynetra.cli serve --host 0.0.0.0 --port 8000
+```
+
+Server entrypoint is `keynetra/api/main.py` and default URL is `http://localhost:8000`.
+
+## 4. Verify Health and Readiness
+
+```bash
+curl -s http://localhost:8000/health | jq .
+curl -s http://localhost:8000/health/ready | jq .
+```
+
+Expected status is `ok` for healthy local setup.
+
+## 5. Run Your First Access Decision
+
+```bash
+curl -s -X POST http://localhost:8000/check-access \
+ -H "Content-Type: application/json" \
+ -H "X-API-Key: devkey" \
+ -d '{
+ "user": {"id": "alice", "role": "manager", "permissions": ["approve_payment"]},
+ "action": "approve_payment",
+ "resource": {"resource_type": "payment", "resource_id": "pay-900", "amount": 5000},
+ "context": {"department": "finance"}
+ }' | jq .
+```
+
+Key fields to review in the response:
+
+- `data.allowed`: final allow/deny boolean
+- `data.decision`: normalized decision string
+- `data.matched_policies`: rules that produced the outcome
+- `request_id`: request correlation id from middleware
+
+## 6. Run a Batch Check
+
+```bash
+curl -s -X POST http://localhost:8000/check-access-batch \
+ -H "Content-Type: application/json" \
+ -H "X-API-Key: devkey" \
+ -d '{
+ "user": {"id": "alice", "role": "manager"},
+ "items": [
+ {
+ "action": "read",
+ "resource": {"resource_type": "document", "resource_id": "doc-1"},
+ "context": {}
+ },
+ {
+ "action": "delete",
+ "resource": {"resource_type": "document", "resource_id": "doc-1"},
+ "context": {}
+ }
+ ]
+ }' | jq .
+```
+
+Use this endpoint when a single user needs multiple action checks in one network call.
+
+## 7. Simulate a Policy Change
+
+```bash
+curl -s -X POST http://localhost:8000/simulate-policy \
+ -H "Content-Type: application/json" \
+ -H "X-API-Key: devkey" \
+ -d '{
+ "simulate": {
+ "policy_change": "allow:\n action: read\n priority: 10\n when:\n role: admin"
+ },
+ "request": {
+ "user": {"id": "u1", "role": "admin"},
+ "action": "read",
+ "resource": {"resource_type": "document", "resource_id": "doc-1"},
+ "context": {}
+ }
+ }' | jq .
+```
+
+This lets you validate policy behavior before persisting the change.
+
+## 8. Stop the Server
+
+Use `Ctrl+C` in the terminal running `serve`.
+
+## Next Steps
+
+- [Runtime Modes](runtime-modes.md)
+- [API Reference](../reference/api-reference.md)
+- [CLI Reference](../reference/cli-reference.md)
+- [End-to-End API Example](../examples/end-to-end-api-flow.md)
diff --git a/docs/getting-started/runtime-modes.md b/docs/getting-started/runtime-modes.md
new file mode 100644
index 0000000..ffaa769
--- /dev/null
+++ b/docs/getting-started/runtime-modes.md
@@ -0,0 +1,45 @@
+---
+title: Runtime Modes
+---
+
+# Runtime Modes
+
+KeyNetra can run in three modes.
+
+## 1) API server mode
+
+```bash
+export KEYNETRA_API_KEYS=devkey
+python -m keynetra.cli serve
+```
+
+Use when other services call authorization over HTTP.
+
+## 2) CLI mode
+
+```bash
+python -m keynetra.cli check \
+ --api-key devkey \
+ --user '{"id":"alice","role":"manager"}' \
+ --action approve_payment \
+ --resource '{"resource_type":"payment","resource_id":"pay-900","amount":5000}'
+```
+
+Use for local testing, scripts, and operations.
+
+## 3) Embedded Python mode
+
+```python
+from keynetra import KeyNetra
+
+engine = KeyNetra.from_config("./keynetra.yaml")
+decision = engine.check_access(
+ subject="user:alice",
+ action="read",
+ resource="document:doc-1",
+ context={}
+)
+print(decision.allowed)
+```
+
+Use when you want in-process authorization in Python applications.
diff --git a/docs/models/README.md b/docs/models/README.md
new file mode 100644
index 0000000..20e0805
--- /dev/null
+++ b/docs/models/README.md
@@ -0,0 +1,39 @@
+# Authorization Models
+
+If you are new to authorization, this is the quickest mental model:
+
+- RBAC answers: "What can this role do?"
+- ABAC answers: "Do attributes satisfy policy conditions?"
+- ACL answers: "Is this exact user/group explicitly allowed or denied on this resource?"
+- ReBAC answers: "Does a relationship path grant access?"
+
+KeyNetra supports all four and can combine them in a single decision.
+
+## How to choose
+
+- Start with RBAC for coarse permissions
+- Add ABAC for dynamic constraints (department, time, amount)
+- Add ACL for exceptions on specific resources
+- Add ReBAC for sharing/collaboration graphs (owner/editor/member)
+
+## Example model (document system)
+
+```yaml
+model:
+ type: document
+ relations:
+ owner: user
+ editor: user
+ viewer: user
+ permissions:
+ read: owner or editor or viewer
+ write: owner or editor
+ delete: owner
+```
+
+## Read next
+
+- [RBAC](rbac.md)
+- [ABAC](abac.md)
+- [ACL](acl.md)
+- [ReBAC](rebac.md)
diff --git a/docs/models/abac.md b/docs/models/abac.md
new file mode 100644
index 0000000..47b4edc
--- /dev/null
+++ b/docs/models/abac.md
@@ -0,0 +1,37 @@
+# ABAC (Attribute-Based Access Control)
+
+ABAC evaluates attributes from user, resource, and context.
+
+## Simple idea
+
+A request is allowed when conditions match attributes.
+
+Examples of attributes:
+
+- User: `department`, `employment_type`
+- Resource: `owner_id`, `classification`, `amount`
+- Context: `time`, `ip`, `region`
+
+## Example
+
+Policy condition concept:
+
+```yaml
+conditions:
+ role: manager
+ max_amount: 100000
+```
+
+Request resource:
+
+```json
+{"amount": 45000}
+```
+
+Result: allowed for a manager under threshold.
+
+## When ABAC works well
+
+- Financial approvals
+- Geo/time-based restrictions
+- Department-scoped access
diff --git a/docs/models/acl.md b/docs/models/acl.md
new file mode 100644
index 0000000..294961a
--- /dev/null
+++ b/docs/models/acl.md
@@ -0,0 +1,30 @@
+# ACL (Access Control List)
+
+ACL stores explicit allow/deny entries per resource.
+
+## Simple idea
+
+You can override generic rules for one resource.
+
+Example entry:
+
+```json
+{
+ "subject_type": "user",
+ "subject_id": "charlie",
+ "resource_type": "document",
+ "resource_id": "doc-1",
+ "action": "share",
+ "effect": "deny"
+}
+```
+
+## When ACL is useful
+
+- One-off exceptions
+- Sensitive records requiring explicit grants/denies
+- Temporary access overrides
+
+## Caution
+
+Avoid relying only on ACL for large systems. Combine with RBAC/ABAC.
diff --git a/docs/models/rbac.md b/docs/models/rbac.md
new file mode 100644
index 0000000..d7f3930
--- /dev/null
+++ b/docs/models/rbac.md
@@ -0,0 +1,35 @@
+# RBAC (Role-Based Access Control)
+
+RBAC grants access based on roles assigned to users.
+
+## Simple idea
+
+- Users have roles (`admin`, `manager`, `viewer`)
+- Roles map to allowed actions
+
+## Example
+
+User:
+
+```json
+{"id": "alice", "role": "manager", "permissions": ["approve_payment"]}
+```
+
+Request:
+
+```json
+{"action": "approve_payment"}
+```
+
+If the role/permissions include the action, decision is `allow`.
+
+## When RBAC works well
+
+- Standard SaaS dashboards
+- Internal admin tooling
+- Stable permission catalogs
+
+## Limitation
+
+RBAC alone cannot express dynamic constraints like "amount < 100000".
+Use ABAC for that.
diff --git a/docs/models/rebac.md b/docs/models/rebac.md
new file mode 100644
index 0000000..1684ba1
--- /dev/null
+++ b/docs/models/rebac.md
@@ -0,0 +1,30 @@
+# ReBAC (Relationship-Based Access Control)
+
+ReBAC grants permissions from relationships between subjects and resources.
+
+## Simple idea
+
+If relationship exists, access may be allowed.
+
+Examples:
+
+- `user:alice` is `owner` of `document:doc-1`
+- `user:bob` is `editor` of `document:doc-1`
+
+Model permission:
+
+```yaml
+permissions:
+ read: owner or editor or viewer
+ write: owner or editor
+```
+
+## When ReBAC works well
+
+- Document sharing
+- Team collaboration tools
+- Hierarchical organizations and graph permissions
+
+## Benefit
+
+ReBAC keeps sharing logic out of application code and inside explicit relationship data.
diff --git a/docs/operations/deployment-docker.md b/docs/operations/deployment-docker.md
new file mode 100644
index 0000000..c81bf9d
--- /dev/null
+++ b/docs/operations/deployment-docker.md
@@ -0,0 +1,86 @@
+---
+title: Docker Deployment
+---
+
+# Docker Deployment
+
+This page covers the Docker deployment assets shipped in this repository.
+
+Docker assets:
+
+- `Dockerfile`
+- `docker-compose.yml`
+- `docker-compose.dev.yml`
+- `infra/docker/start.sh`
+
+## Default Stack
+
+```bash
+docker compose up --build
+```
+
+Services:
+
+- `keynetra` API
+- PostgreSQL
+- Redis
+- Prometheus
+- Grafana
+
+Default exposed ports:
+
+- API: `8000`
+- Postgres: `5432`
+- Redis: `6379`
+- Prometheus: `9090`
+- Grafana: `3000`
+
+## Development Stack
+
+```bash
+docker compose -f docker-compose.dev.yml up --build
+```
+
+Includes source mount and Uvicorn reload.
+
+Use this stack for iterative local development when you need auto-reload behavior.
+
+## Startup Behavior
+
+Container entrypoint script:
+
+1. Runs Alembic migrations if `KEYNETRA_RUN_MIGRATIONS=1`
+2. Renders startup dashboard when enabled
+3. Exports rich logging defaults
+4. Starts Uvicorn workers
+
+Implementation: `infra/docker/start.sh`
+
+## Useful Environment Values
+
+- `KEYNETRA_DATABASE_URL`
+- `KEYNETRA_REDIS_URL`
+- `KEYNETRA_API_KEYS`
+- `KEYNETRA_ADMIN_USERNAME`
+- `KEYNETRA_ADMIN_PASSWORD`
+- `KEYNETRA_UVICORN_WORKERS`
+- `KEYNETRA_LOG_FORMAT=rich`
+- `KEYNETRA_FORCE_COLOR=1`
+
+Example override:
+
+```bash
+KEYNETRA_API_KEYS=devkey KEYNETRA_AUTO_SEED_SAMPLE_DATA=1 docker compose up --build
+```
+
+## Health Endpoints
+
+- `GET /health`
+- `GET /health/live`
+- `GET /health/ready`
+
+## Related Pages
+
+- [Observability](observability.md)
+- [Troubleshooting](troubleshooting.md)
+- [Configuration Files](../reference/configuration-files.md)
diff --git a/docs/operations/deployment-kubernetes.md b/docs/operations/deployment-kubernetes.md
new file mode 100644
index 0000000..a14d3e2
--- /dev/null
+++ b/docs/operations/deployment-kubernetes.md
@@ -0,0 +1,55 @@
+---
+title: Kubernetes and Helm
+---
+
+# Kubernetes and Helm
+
+Kubernetes assets are under `infra/k8s/`.
+
+The included chart is intentionally minimal and should be extended for production environments.
+
+## Helm Chart
+
+Location:
+
+- `infra/k8s/helm/keynetra/`
+
+Key files:
+
+- `Chart.yaml`
+- `values.yaml`
+- `templates/deployment.yaml`
+
+`values.yaml` currently defines image repository/tag and service port. Deployment template provides baseline single-deployment rollout.
+
+## What To Extend Before Production
+
+- environment variables and secret references
+- readiness/liveness probes
+- resource limits/requests
+- rolling update strategy
+- ingress and TLS
+- external database/redis service wiring
+
+## Terraform Directory
+
+`infra/k8s/terraform/README.md` documents intended scope:
+
+- self-hosted modules only
+- no SaaS control-plane infrastructure in this repository
+
+## Production Considerations
+
+For production Kubernetes usage, extend chart values for:
+
+- environment variables and secrets
+- liveness/readiness probes
+- resource requests/limits
+- ingress/network policy
+- external Postgres and Redis connectivity
+
+## Related Pages
+
+- [Docker Deployment](deployment-docker.md)
+- [Security](security.md)
+- [Environment Variables](../reference/environment-variables.md)
diff --git a/docs/operations/observability.md b/docs/operations/observability.md
new file mode 100644
index 0000000..236660d
--- /dev/null
+++ b/docs/operations/observability.md
@@ -0,0 +1,66 @@
+---
+title: Observability
+---
+
+# Observability
+
+KeyNetra includes first-party metrics and structured logging for operational visibility.
+
+Observability components:
+
+- Metrics definitions: `keynetra/observability/metrics.py`
+- Metrics endpoint: `keynetra/api/routes/metrics.py`
+- Logging config: `keynetra/infrastructure/logging.py`
+- Request logging middleware: `keynetra/api/middleware/logging.py`
+
+## Metrics Endpoint
+
+`GET /metrics` returns Prometheus text format (`text/plain; version=0.0.4`).
+
+## Metric Families
+
+From implementation, key metrics include:
+
+- `keynetra_access_checks_total`
+- `keynetra_acl_matches_total`
+- `keynetra_policy_evaluations_total`
+- `keynetra_relationship_traversals_total`
+- `keynetra_policy_compilations_total`
+- `keynetra_revision_updates_total`
+- `keynetra_access_check_latency_seconds`
+- `keynetra_decision_latency_seconds`
+- `keynetra_cache_hits_total`
+- `keynetra_cache_misses_total`
+- `keynetra_cache_events_total`
+- `keynetra_api_errors_total`
+
+These metrics cover authorization decisions, cache behavior, policy/model lifecycle, and API error rates.
+
+## Logging Modes
+
+- JSON logs by default
+- Rich colored logs when `KEYNETRA_LOG_FORMAT=rich`
+
+Docker startup script sets rich mode by default.
+
+Use JSON mode for log aggregation pipelines and rich mode for local operator readability.
+
+## Prometheus and Grafana
+
+Compose stack includes monitoring:
+
+- Prometheus config: `infra/docker/monitoring/prometheus/prometheus.yml`
+- Grafana provisioning: `infra/docker/monitoring/grafana/provisioning/`
+- Dashboards: `infra/docker/monitoring/grafana/dashboards/`
+
+## Quick Validation
+
+```bash
+curl -s http://localhost:8000/metrics | head
+```
+
+## Related Pages
+
+- [Docker Deployment](deployment-docker.md)
+- [Troubleshooting](troubleshooting.md)
+- [API Reference](../reference/api-reference.md)
diff --git a/docs/operations/security.md b/docs/operations/security.md
new file mode 100644
index 0000000..0585bd8
--- /dev/null
+++ b/docs/operations/security.md
@@ -0,0 +1,64 @@
+---
+title: Security
+---
+
+# Security
+
+Security behavior is implemented across config, middleware, and route dependencies.
+
+This page documents the security mechanisms currently implemented in the repository.
+
+## Authentication Methods
+
+- API key header (`X-API-Key`)
+- JWT bearer token
+- Optional OIDC/JWKS token verification
+- Admin login endpoint (`/admin/login`) issuing JWT
+
+Key implementation files:
+
+- `keynetra/config/security.py`
+- `keynetra/config/admin_auth.py`
+- `keynetra/api/routes/admin_auth.py`
+
+## Authorization for Management APIs
+
+Management endpoints enforce tenant role levels:
+
+- viewer
+- developer
+- admin
+
+Role checks are wired through `require_management_role(...)`.
+
+API keys are treated as admin-level principals for management paths by default behavior in current implementation.
+
+## Rate Limiting and Idempotency
+
+- Rate limiting middleware: `keynetra/config/rate_limit.py`
+- Idempotency middleware: `keynetra/api/middleware/idempotency.py`
+- Idempotency storage: `keynetra/domain/models/idempotency.py`
+
+## API Version and Request Tracking
+
+- Version negotiation: `X-API-Version` middleware
+- Request IDs and structured request completion logs
+
+## Recommended Operational Baselines
+
+- rotate API keys and JWT secrets regularly
+- use hashed API key mode (`KEYNETRA_API_KEY_HASHES`) in production
+- avoid default admin credentials outside local development
+- run behind TLS-terminating proxy or gateway
+
+## Disclosure Policy
+
+See repository policy:
+
+- `SECURITY.md`
+
+## Related Pages
+
+- [API Reference](../reference/api-reference.md)
+- [Environment Variables](../reference/environment-variables.md)
+- [Troubleshooting](troubleshooting.md)
diff --git a/docs/operations/troubleshooting.md b/docs/operations/troubleshooting.md
new file mode 100644
index 0000000..8a9c6ac
--- /dev/null
+++ b/docs/operations/troubleshooting.md
@@ -0,0 +1,88 @@
+---
+title: Troubleshooting
+---
+
+# Troubleshooting
+
+Use this page for common local and container runtime issues.
+
+## Server Starts Then Exits in Docker
+
+Check:
+
+- `KEYNETRA_DATABASE_URL` connectivity
+- migration failures in `infra/docker/start.sh`
+- worker count and Uvicorn startup logs
+
+Commands:
+
+```bash
+docker compose logs keynetra --tail=200
+docker compose ps
+```
+
+Also verify `KEYNETRA_UVICORN_WORKERS`; high values can fail in constrained environments.
+
+## No Colors in Logs
+
+Set:
+
+- `KEYNETRA_LOG_FORMAT=rich`
+- `KEYNETRA_FORCE_COLOR=1`
+
+For Docker, confirm env values are set in compose service environment.
+
+If output is piped to a non-TTY, some terminals may suppress ANSI colors.
+
+## Startup Screen Not Visible
+
+Startup banner rendering is in `infra/docker/start.sh` and can be disabled with `KEYNETRA_STARTUP_SCREEN=0`.
+
+## Auth Failures
+
+Verify:
+
+- `KEYNETRA_API_KEYS` or `KEYNETRA_API_KEY_HASHES`
+- JWT secret/algorithm match
+- admin credentials (`KEYNETRA_ADMIN_USERNAME`, `KEYNETRA_ADMIN_PASSWORD`)
+
+For API-key authentication, ensure the header name is exactly `X-API-Key`.
+
+## Migration Failures
+
+Run manually:
+
+```bash
+python -m keynetra.cli migrate --confirm-destructive
+```
+
+Review:
+
+- `alembic/env.py`
+- `alembic/versions/`
+
+## Config File Not Applied
+
+Confirm command includes:
+
+```bash
+python -m keynetra.cli serve --config ./keynetra.yaml
+```
+
+Supported file types are YAML/JSON/TOML only.
+
+If CLI still uses old values, verify no conflicting `KEYNETRA_*` variables are exported in your shell.
+
+## Metrics Endpoint Not Available
+
+Verify that service mode includes observability routes and check:
+
+```bash
+curl -i http://localhost:8000/metrics
+```
+
+## Related Pages
+
+- [Docker Deployment](deployment-docker.md)
+- [Configuration Files](../reference/configuration-files.md)
+- [Observability](observability.md)
diff --git a/docs/package-lock.json b/docs/package-lock.json
new file mode 100644
index 0000000..6ab6825
--- /dev/null
+++ b/docs/package-lock.json
@@ -0,0 +1,6 @@
+{
+ "name": "docs",
+ "lockfileVersion": 3,
+ "requires": true,
+ "packages": {}
+}
diff --git a/docs/policies.md b/docs/policies.md
new file mode 100644
index 0000000..5ed605f
--- /dev/null
+++ b/docs/policies.md
@@ -0,0 +1,73 @@
+# Policy Guide
+
+This guide explains policy structure in plain language.
+
+## Policy structure
+
+Key fields you will use most:
+
+- `action`: what operation the policy targets
+- `effect`: `allow` or `deny`
+- `priority`: lower numbers are evaluated first
+- `policy_id` (or key): identifier shown in decision responses
+- `conditions`: attribute checks required for a match
+
+## Example
+
+```yaml
+policies:
+ - action: approve_payment
+ effect: allow
+ priority: 10
+ policy_id: finance-approve-manager-under-limit
+ conditions:
+ role: manager
+ max_amount: 100000
+
+ - action: approve_payment
+ effect: deny
+ priority: 20
+ policy_id: finance-maker-checker-deny
+ conditions:
+ owner_only: true
+```
+
+## Allow and deny logic
+
+- Policies are checked by priority.
+- First matching policy determines outcome.
+- If nothing matches, system returns deny (safe default).
+
+## Priority rules
+
+- Smaller number = higher priority
+- Use this to place explicit safety denies before broad allows
+
+Example:
+
+- Priority `1`: deny risky operation
+- Priority `10`: allow common trusted flow
+
+## Conditions and attributes
+
+Conditions are matched against request data:
+
+- `user` attributes (`role`, `permissions`)
+- `resource` attributes (`amount`, `owner_id`, `resource_type`)
+- `context` attributes (`department`, `time`)
+
+## Practical tips
+
+- Keep policies small and focused
+- Use clear `policy_id` names so traces are readable
+- Prefer explicit denies for high-risk operations
+- Validate changes with `/simulate-policy` before deployment
+- Run `/impact-analysis` for high-blast-radius updates
+
+## Example workflow
+
+1. Draft policy in YAML
+2. Run `python -m keynetra.cli test-policy `
+3. Run `/simulate-policy` with representative request
+4. Run `/impact-analysis` to measure user impact
+5. Deploy policy
diff --git a/docs/quickstart.md b/docs/quickstart.md
new file mode 100644
index 0000000..ee1f6ff
--- /dev/null
+++ b/docs/quickstart.md
@@ -0,0 +1,92 @@
+# 5-Minute Quickstart
+
+This quickstart is designed for developers who have never used an authorization engine.
+
+## What you will do
+
+1. Start KeyNetra locally
+2. Send one access request
+3. Read the decision and reason
+
+## Prerequisites
+
+- Python 3.11
+- `curl`
+
+## 1) Install and activate environment
+
+```bash
+python3.11 -m venv .venv
+source .venv/bin/activate
+pip install -r requirements.txt -r requirements-dev.txt
+```
+
+## 2) Set an API key and start server
+
+```bash
+export KEYNETRA_API_KEYS=devkey
+python -m keynetra.cli serve
+```
+
+Server runs on `http://localhost:8000`.
+
+## 3) Check health
+
+```bash
+curl -s http://localhost:8000/health | jq .
+```
+
+Expected shape:
+
+```json
+{
+ "data": {"status": "ok"},
+ "meta": {"request_id": "..."},
+ "error": null
+}
+```
+
+## 4) Run first authorization check
+
+```bash
+curl -s -X POST http://localhost:8000/check-access \
+ -H "Content-Type: application/json" \
+ -H "X-API-Key: devkey" \
+ -d '{
+ "user": {"id": "alice", "role": "manager", "permissions": ["approve_payment"]},
+ "action": "approve_payment",
+ "resource": {"resource_type": "payment", "resource_id": "pay-900", "amount": 5000},
+ "context": {"department": "finance"}
+ }' | jq .
+```
+
+Typical response fields:
+
+- `data.allowed`: `true` or `false`
+- `data.decision`: `allow` or `deny`
+- `data.reason`: human-readable reason
+- `data.policy_id`: policy that made the decision
+- `data.explain_trace`: decision trace for debugging
+- `data.revision`: revision token for consistency
+
+## 5) Run a batch check
+
+```bash
+curl -s -X POST http://localhost:8000/check-access-batch \
+ -H "Content-Type: application/json" \
+ -H "X-API-Key: devkey" \
+ -d '{
+ "user": {"id": "alice", "role": "manager", "permissions": ["approve_payment"]},
+ "items": [
+ {"action": "approve_payment", "resource": {"resource_type": "payment", "resource_id": "pay-900", "amount": 1000}},
+ {"action": "delete", "resource": {"resource_type": "payment", "resource_id": "pay-900"}}
+ ]
+ }' | jq .
+```
+
+## Next
+
+- [API Endpoints](api-endpoints.md)
+- [Authorization Models](models/README.md)
+- [Policies](policies.md)
+- [CLI Guide](cli.md)
diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md
new file mode 100644
index 0000000..f0913e3
--- /dev/null
+++ b/docs/reference/api-reference.md
@@ -0,0 +1,226 @@
+---
+title: API Reference
+---
+
+# API Reference
+
+This page documents the implemented HTTP API surface in this repository.
+
+Implementation entrypoints:
+
+- `keynetra/api/main.py`
+- `keynetra/api/service_modes.py`
+- `keynetra/api/routes/*`
+
+OpenAPI contract:
+
+- `contracts/openapi/keynetra-v0.1.0.yaml`
+
+## Base URL
+
+Local default:
+
+```text
+http://localhost:8000
+```
+
+## Authentication
+
+Supported request auth:
+
+- `X-API-Key: `
+- `Authorization: Bearer `
+- Admin login via `POST /admin/login`
+
+Many management endpoints require elevated roles enforced in route dependencies.
+
+## Service Modes and Endpoint Availability
+
+Configured via `KEYNETRA_SERVICE_MODE`:
+
+- `all`: exposes access and management APIs
+- `access-api`: exposes health/metrics + access endpoints
+- `policy-store`: exposes health/metrics + management endpoints
+- `policy-engine`: exposes health/metrics + access endpoints
+
+If an endpoint is missing in runtime, verify the service mode first.
+
+## Response Envelope
+
+Most endpoints return the standard envelope defined in `keynetra/domain/schemas/api.py`.
+
+Typical success shape:
+
+```json
+{
+ "success": true,
+ "data": {},
+ "request_id": "..."
+}
+```
+
+## Endpoint Groups
+
+### Health and Observability
+
+- `GET /health`
+- `GET /health/live`
+- `GET /health/ready`
+- `GET /metrics`
+
+Example:
+
+```bash
+curl -s http://localhost:8000/health/ready | jq .
+```
+
+### Access Decision
+
+- `POST /check-access`
+- `POST /check-access-batch`
+- `POST /simulate`
+
+Single decision example:
+
+```bash
+curl -s -X POST http://localhost:8000/check-access \
+ -H "Content-Type: application/json" \
+ -H "X-API-Key: devkey" \
+ -d '{
+ "user": {"id": "u1", "role": "admin"},
+ "action": "read",
+ "resource": {"resource_type": "document", "resource_id": "doc-1"},
+ "context": {}
+ }' | jq .
+```
+
+Batch decision example:
+
+```bash
+curl -s -X POST http://localhost:8000/check-access-batch \
+ -H "Content-Type: application/json" \
+ -H "X-API-Key: devkey" \
+ -d '{
+ "user": {"id": "u1", "role": "admin"},
+ "items": [
+ {"action": "read", "resource": {"resource_type": "document", "resource_id": "doc-1"}, "context": {}},
+ {"action": "write", "resource": {"resource_type": "document", "resource_id": "doc-1"}, "context": {}}
+ ]
+ }' | jq .
+```
+
+### Policy Simulation and Impact
+
+- `POST /simulate-policy`
+- `POST /impact-analysis`
+
+Example:
+
+```bash
+curl -s -X POST http://localhost:8000/simulate-policy \
+ -H "Content-Type: application/json" \
+ -H "X-API-Key: devkey" \
+ -d '{
+ "simulate": {
+ "policy_change": "allow:\n action: read\n priority: 10\n policy_key: read-admin\n when:\n role: admin"
+ },
+ "request": {
+ "user": {"id": "u1", "role": "admin"},
+ "action": "read",
+ "resource": {"resource_type": "document", "resource_id": "doc-1"},
+ "context": {}
+ }
+ }' | jq .
+```
+
+### Policy Management
+
+- `GET /policies`
+- `POST /policies`
+- `PUT /policies/{policy_key}`
+- `DELETE /policies/{policy_key}`
+- `POST /policies/dsl`
+- `POST /policies/{policy_key}/rollback/{version}`
+
+Create policy example:
+
+```bash
+curl -s -X POST http://localhost:8000/policies \
+ -H "Content-Type: application/json" \
+ -H "X-API-Key: devkey" \
+ -d '{
+ "action": "read",
+ "effect": "allow",
+ "priority": 20,
+ "conditions": {"policy_key": "document-read-admin", "role": "admin"}
+ }' | jq .
+```
+
+### RBAC, ACL, Relationships, and Models
+
+RBAC endpoints:
+
+- `GET /roles`
+- `POST /roles`
+- `PUT /roles/{role_id}`
+- `DELETE /roles/{role_id}`
+- `GET /roles/{role_id}/permissions`
+- `POST /roles/{role_id}/permissions`
+- `DELETE /roles/{role_id}/permissions/{permission_id}`
+- `GET /permissions`
+- `POST /permissions`
+- `PUT /permissions/{permission_id}`
+- `DELETE /permissions/{permission_id}`
+- `GET /permissions/{permission_id}/roles`
+
+ACL endpoints:
+
+- `POST /acl`
+- `GET /acl/{resource_type}/{resource_id}`
+- `DELETE /acl/{acl_id}`
+
+Relationship endpoints:
+
+- `GET /relationships`
+- `POST /relationships`
+
+Authorization model endpoints:
+
+- `POST /auth-model`
+- `GET /auth-model`
+
+### Audit, Playground, and Dev Utilities
+
+- `GET /audit`
+- `POST /playground/evaluate`
+- `GET /dev/sample-data`
+- `POST /dev/sample-data/seed`
+
+## Common Error Cases
+
+- `401`: missing or invalid API key/JWT
+- `403`: authenticated but insufficient management role
+- `422`: payload validation error
+- `500`: database or internal processing failure
+
+Inspect `request_id` in error responses to trace logs.
+
+## Versioning and Middleware
+
+Versioning middleware:
+
+- `keynetra/api/middleware/versioning.py`
+
+Other key middleware:
+
+- request id: `keynetra/api/middleware/request_id.py`
+- rate limit: `keynetra/config/rate_limit.py`
+- idempotency: `keynetra/api/middleware/idempotency.py`
+- structured error envelope: `keynetra/api/middleware/errors.py`
+
+## Related Pages
+
+- [CLI Reference](cli-reference.md)
+- [Configuration Files](configuration-files.md)
+- [End-to-End API Example](../examples/end-to-end-api-flow.md)
+- [Security](../operations/security.md)
diff --git a/docs/reference/auth-model-files.md b/docs/reference/auth-model-files.md
new file mode 100644
index 0000000..3a3ce36
--- /dev/null
+++ b/docs/reference/auth-model-files.md
@@ -0,0 +1,86 @@
+---
+title: Authorization Model Files
+---
+
+# Authorization Model Files
+
+Authorization schema model support is implemented in:
+
+- `keynetra/config/file_loaders.py`
+- `keynetra/modeling/schema_parser.py`
+- `keynetra/modeling/model_validator.py`
+- `keynetra/modeling/permission_compiler.py`
+
+Supported file formats:
+
+- `.yaml` / `.yml`
+- `.json`
+- `.toml`
+- `.schema` / `.txt` (raw schema DSL)
+
+These files define relation and permission semantics used by the schema permission stage in authorization evaluation.
+
+## YAML Example
+
+```yaml
+model:
+ schema_version: 1
+ type: document
+ relations:
+ owner: user
+ editor: user
+ permissions:
+ read: owner or editor
+ write: owner
+```
+
+## Generated DSL Shape
+
+Files are normalized to schema DSL with sections like:
+
+```text
+model schema 1
+type user
+type document
+relations
+owner: [user]
+editor: [user]
+permissions
+read = owner or editor
+write = owner
+```
+
+## Runtime Integration
+
+- API startup auto-load via configured `model_paths`.
+- `POST /auth-model` stores and compiles model per tenant.
+- Embedded usage via `KeyNetra.load_model(...)`.
+
+## Minimal DSL Example
+
+```text
+model schema 1
+type user
+type document
+relations
+owner: [user]
+permissions
+read = owner
+```
+
+## Validation Rules
+
+The compiler/validator enforces:
+
+- schema version must be `>= 1`
+- at least one type and permission must exist
+- `user` type must exist
+- relation subjects must reference defined types
+- permission expressions must reference known relations/permissions
+
+## Related Pages
+
+- [Configuration Files](configuration-files.md)
+- [Authorization Pipeline](../architecture/authorization-pipeline.md)
+- [API Reference](api-reference.md)
+- [Policy File Formats](policy-files.md)
diff --git a/docs/reference/cli-reference.md b/docs/reference/cli-reference.md
new file mode 100644
index 0000000..67be36b
--- /dev/null
+++ b/docs/reference/cli-reference.md
@@ -0,0 +1,164 @@
+---
+title: CLI Reference
+---
+
+# CLI Reference
+
+KeyNetra CLI is implemented in `keynetra/cli.py` and built with Typer.
+
+Entrypoint:
+
+```bash
+python -m keynetra.cli --help
+```
+
+## Global Option
+
+- `--config `: load YAML/JSON/TOML configuration before executing a command.
+
+## Command Summary
+
+Server and runtime:
+
+- `serve`
+- `start` (backward-compatible alias)
+- `version`
+- `help-cli`
+
+Auth and operations:
+
+- `admin-login`
+- `migrate`
+- `seed-data`
+- `doctor`
+
+Decision workflows:
+
+- `check`
+- `simulate`
+- `impact`
+- `explain`
+- `benchmark`
+
+Policy/model tooling:
+
+- `test-policy`
+- `compile-policies`
+- `model apply`
+- `model show`
+
+ACL tooling:
+
+- `acl add`
+- `acl list`
+- `acl remove`
+
+## Core Workflows
+
+### Start server
+
+```bash
+export KEYNETRA_API_KEYS=devkey
+python -m keynetra.cli serve --host 0.0.0.0 --port 8000
+```
+
+### Check one access request
+
+```bash
+python -m keynetra.cli check \
+ --api-key devkey \
+ --user '{"id":"alice","role":"manager"}' \
+ --action approve_payment \
+ --resource '{"resource_type":"payment","resource_id":"pay-900","amount":5000}' \
+ --context '{"department":"finance"}'
+```
+
+### Simulate a policy change before rollout
+
+```bash
+python -m keynetra.cli simulate \
+ --api-key devkey \
+ --policy-change 'allow:\n action: read\n priority: 10\n policy_key: read-admin\n when:\n role: admin' \
+ --user '{"id":"u1","role":"admin"}' \
+ --action read \
+ --resource '{"resource_type":"document","resource_id":"doc-1"}'
+```
+
+### Estimate policy impact
+
+```bash
+python -m keynetra.cli impact \
+ --api-key devkey \
+ --policy-change 'deny:\n action: export_payment\n priority: 5\n policy_key: deny-export-external\n when:\n role: external'
+```
+
+### Compile policies from configured paths
+
+```bash
+python -m keynetra.cli compile-policies --config docs/examples/assets/keynetra.yaml
+```
+
+### Validate policy tests
+
+```bash
+python -m keynetra.cli test-policy docs/examples/assets/policy_tests.yaml
+```
+
+### Local readiness checks
+
+```bash
+python -m keynetra.cli doctor --service core --config docs/examples/assets/keynetra.yaml
+```
+
+## Model Commands
+
+Apply a schema model:
+
+```bash
+python -m keynetra.cli model apply docs/examples/assets/auth-model.yaml --api-key devkey
+```
+
+Read current model:
+
+```bash
+python -m keynetra.cli model show --api-key devkey
+```
+
+## ACL Commands
+
+Add ACL:
+
+```bash
+python -m keynetra.cli acl add \
+ --subject-type user \
+ --subject-id alice \
+ --resource-type document \
+ --resource-id doc-1 \
+ --action read \
+ --effect allow
+```
+
+List ACL for resource:
+
+```bash
+python -m keynetra.cli acl list --resource-type document --resource-id doc-1
+```
+
+Remove ACL entry:
+
+```bash
+python -m keynetra.cli acl remove --acl-id 1
+```
+
+## Exit Behavior
+
+- Commands raise non-zero exit code on HTTP failure, validation failure, or readiness failure.
+- `test-policy` exits non-zero if any policy test fails.
+- `doctor` exits non-zero when `ok=false`.
+
+## Related Pages
+
+- [Quickstart](../getting-started/quickstart.md)
+- [API Reference](api-reference.md)
+- [Policy File Formats](policy-files.md)
+- [CLI Workflows](../examples/cli-workflows.md)
diff --git a/docs/reference/configuration-files.md b/docs/reference/configuration-files.md
new file mode 100644
index 0000000..7ecab47
--- /dev/null
+++ b/docs/reference/configuration-files.md
@@ -0,0 +1,141 @@
+---
+title: Configuration Files
+---
+
+# Configuration Files
+
+KeyNetra supports YAML, JSON, and TOML configuration files.
+
+Loader implementation:
+
+- `keynetra/config/config_loader.py`
+
+## Precedence
+
+When multiple configuration sources are used, effective settings follow this order:
+
+1. CLI flags (`--host`, `--port`, command-specific options)
+2. Environment variables (`KEYNETRA_*`)
+3. Config file values loaded via `--config`
+4. Built-in defaults in `keynetra/config/settings.py`
+
+## Supported Keys
+
+Top-level keys currently mapped by loader:
+
+- `database.url`
+- `redis.url`
+- `policies.path` and `policies.paths`
+- `models.path` and `models.paths`
+- `policy_paths`
+- `model_paths`
+- `seed_data`
+- `server.host`
+- `server.port`
+
+These are transformed into `KEYNETRA_*` environment variables.
+
+## Field Mapping
+
+| Config Field | Type | Purpose | Mapped Environment Variable |
+| --- | --- | --- | --- |
+| `database.url` | string | SQLAlchemy database URL | `KEYNETRA_DATABASE_URL` |
+| `redis.url` | string | Redis connection URL | `KEYNETRA_REDIS_URL` |
+| `policies.path` / `policies.paths` | string/list | Policy file or directory inputs | `KEYNETRA_POLICY_PATHS` |
+| `policy_paths` | list | Alternate explicit policy list | `KEYNETRA_POLICY_PATHS` |
+| `models.path` / `models.paths` | string/list | Auth model files | `KEYNETRA_MODEL_PATHS` |
+| `model_paths` | list | Alternate explicit model list | `KEYNETRA_MODEL_PATHS` |
+| `seed_data` | bool | Auto-seed sample data in local mode | `KEYNETRA_AUTO_SEED_SAMPLE_DATA` |
+| `server.host` | string | API bind host | `KEYNETRA_SERVER_HOST` |
+| `server.port` | int | API bind port | `KEYNETRA_SERVER_PORT` |
+
+## Example YAML
+
+```yaml
+database:
+ url: postgresql+psycopg://keynetra:keynetra@localhost:5432/keynetra
+
+redis:
+ url: redis://localhost:6379/0
+
+policies:
+ paths:
+ - ./docs/examples/assets/policies
+
+models:
+ path: ./docs/examples/assets/auth-model.yaml
+
+seed_data: false
+
+server:
+ host: 0.0.0.0
+ port: 8000
+```
+
+## Example JSON
+
+```json
+{
+ "database": { "url": "sqlite+pysqlite:///./keynetra.db" },
+ "redis": { "url": "redis://localhost:6379/0" },
+ "policy_paths": ["./docs/examples/assets/policies"],
+ "model_paths": ["./docs/examples/assets/auth-model.yaml"],
+ "seed_data": true,
+ "server": { "host": "0.0.0.0", "port": 8000 }
+}
+```
+
+## Example TOML
+
+```toml
+[database]
+url = "sqlite+pysqlite:///./keynetra.db"
+
+[redis]
+url = "redis://localhost:6379/0"
+
+[policies]
+path = "./docs/examples/assets/policies"
+
+[models]
+path = "./docs/examples/assets/auth-model.yaml"
+
+seed_data = true
+
+[server]
+host = "0.0.0.0"
+port = 8000
+```
+
+## Runtime Usage
+
+API server:
+
+```bash
+python -m keynetra.cli serve --config ./docs/examples/assets/keynetra.yaml
+```
+
+Decision check using the same config:
+
+```bash
+python -m keynetra.cli check \
+ --config ./docs/examples/assets/keynetra.yaml \
+ --api-key devkey \
+ --user '{"id":"u1","role":"admin"}' \
+ --action read \
+ --resource '{"resource_type":"document","resource_id":"doc-1"}'
+```
+
+## Validation Tips
+
+- Use absolute paths in containerized environments.
+- Keep policy/model paths under version control for repeatable deployments.
+- Run `compile-policies` after any policy path change.
+- Run `doctor --service core` before production rollout.
+
+## Related Pages
+
+- [Environment Variables](environment-variables.md)
+- [Policy File Formats](policy-files.md)
+- [Authorization Model Files](auth-model-files.md)
+- [CLI Reference](cli-reference.md)
diff --git a/docs/reference/environment-variables.md b/docs/reference/environment-variables.md
new file mode 100644
index 0000000..00e11b0
--- /dev/null
+++ b/docs/reference/environment-variables.md
@@ -0,0 +1,135 @@
+---
+title: Environment Variables
+---
+
+# Environment Variables
+
+Runtime settings are defined in `keynetra/config/settings.py`. `.env.example` provides baseline values.
+
+This page summarizes runtime variables and gives a production-oriented example block.
+
+## Core Runtime
+
+- `KEYNETRA_ENVIRONMENT`
+- `KEYNETRA_DEBUG`
+- `KEYNETRA_SERVICE_MODE`
+- `KEYNETRA_SERVER_HOST`
+- `KEYNETRA_SERVER_PORT`
+- `KEYNETRA_AUTO_SEED_SAMPLE_DATA`
+
+Purpose:
+
+- environment mode, server bindings, routing mode, and local bootstrap behavior
+
+## Data Stores
+
+- `KEYNETRA_DATABASE_URL`
+- `KEYNETRA_REDIS_URL`
+
+Purpose:
+
+- configure primary persistence (database) and optional distributed cache/event backend (Redis)
+
+## Authentication and Security
+
+- `KEYNETRA_API_KEYS`
+- `KEYNETRA_API_KEY_HASHES`
+- `KEYNETRA_JWT_SECRET`
+- `KEYNETRA_JWT_ALGORITHM`
+- `KEYNETRA_ADMIN_USERNAME`
+- `KEYNETRA_ADMIN_PASSWORD`
+- `KEYNETRA_ADMIN_TOKEN_EXPIRY_MINUTES`
+
+Purpose:
+
+- configure API auth methods and admin login token behavior
+
+## CORS
+
+- `KEYNETRA_CORS_ALLOW_ORIGINS`
+- `KEYNETRA_CORS_ALLOW_ORIGIN_REGEX`
+- `KEYNETRA_CORS_ALLOW_CREDENTIALS`
+- `KEYNETRA_CORS_ALLOW_METHODS`
+- `KEYNETRA_CORS_ALLOW_HEADERS`
+
+Purpose:
+
+- browser-origin controls for web clients
+
+## Policy and Model Loading
+
+- `KEYNETRA_POLICIES_JSON`
+- `KEYNETRA_POLICY_PATHS`
+- `KEYNETRA_MODEL_PATHS`
+
+Purpose:
+
+- configure inline policies or load policies/models from file paths
+
+## Caching and Resilience
+
+- `KEYNETRA_DECISION_CACHE_TTL_SECONDS`
+- `KEYNETRA_SERVICE_TIMEOUT_SECONDS`
+- `KEYNETRA_CRITICAL_RETRY_ATTEMPTS`
+- `KEYNETRA_RESILIENCE_MODE`
+- `KEYNETRA_RESILIENCE_FALLBACK_BEHAVIOR`
+- `KEYNETRA_POLICY_EVENTS_CHANNEL`
+
+Purpose:
+
+- decision-cache tuning, service timeout/retry behavior, and policy event distribution
+
+## Rate Limiting
+
+- `KEYNETRA_RATE_LIMIT_PER_MINUTE`
+- `KEYNETRA_RATE_LIMIT_BURST`
+- `KEYNETRA_RATE_LIMIT_WINDOW_SECONDS`
+
+Purpose:
+
+- configure API request throttling defaults
+
+## OTel and OIDC
+
+- `KEYNETRA_OTEL_ENABLED`
+- `KEYNETRA_OIDC_JWKS_URL`
+- `KEYNETRA_OIDC_AUDIENCE`
+- `KEYNETRA_OIDC_ISSUER`
+
+## Logging
+
+- `KEYNETRA_LOG_FORMAT` (`json` or `rich`)
+- `KEYNETRA_FORCE_COLOR` (`1`/`0`)
+
+## Docker Startup Helpers
+
+- `KEYNETRA_RUN_MIGRATIONS`
+- `KEYNETRA_STARTUP_SCREEN`
+- `KEYNETRA_HOST`
+- `KEYNETRA_PORT`
+- `KEYNETRA_UVICORN_WORKERS`
+
+## Example `.env`
+
+```bash
+KEYNETRA_ENVIRONMENT=production
+KEYNETRA_DATABASE_URL=postgresql+psycopg://keynetra:keynetra@postgres:5432/keynetra
+KEYNETRA_REDIS_URL=redis://redis:6379/0
+KEYNETRA_API_KEYS=devkey
+KEYNETRA_JWT_SECRET=change-me
+KEYNETRA_ADMIN_USERNAME=admin
+KEYNETRA_ADMIN_PASSWORD=admin123
+KEYNETRA_POLICY_PATHS=./docs/examples/assets/policies
+KEYNETRA_MODEL_PATHS=./docs/examples/assets/auth-model.yaml
+KEYNETRA_SERVICE_MODE=all
+KEYNETRA_SERVER_HOST=0.0.0.0
+KEYNETRA_SERVER_PORT=8000
+KEYNETRA_LOG_FORMAT=rich
+KEYNETRA_FORCE_COLOR=1
+```
+
+## Related Pages
+
+- [Configuration Files](configuration-files.md)
+- [Troubleshooting](../operations/troubleshooting.md)
+- [Security](../operations/security.md)
diff --git a/docs/reference/policy-files.md b/docs/reference/policy-files.md
new file mode 100644
index 0000000..cc93d20
--- /dev/null
+++ b/docs/reference/policy-files.md
@@ -0,0 +1,83 @@
+---
+title: Policy File Formats
+---
+
+# Policy File Formats
+
+Policy file loaders are implemented in:
+
+- `keynetra/config/file_loaders.py`
+
+Supported policy formats:
+
+- `.yaml` / `.yml`
+- `.json`
+- `.polar`
+
+Policy files can be loaded from individual files or recursively scanned directories.
+
+## YAML
+
+```yaml
+policies:
+ - action: read
+ effect: allow
+ priority: 10
+ policy_id: document-read-admin
+ conditions:
+ role: admin
+```
+
+Also supported:
+
+```yaml
+allow:
+ action: read
+ priority: 10
+ when:
+ role: admin
+```
+
+## JSON
+
+```json
+[
+ {
+ "action": "approve_payment",
+ "effect": "allow",
+ "priority": 5,
+ "conditions": { "role": "manager", "max_amount": 10000 }
+ }
+]
+```
+
+## Polar-like Flat Rules
+
+```text
+allow action=deploy priority=15 role=ops
+deny action=deploy priority=100
+```
+
+## Loading from Paths
+
+Configured `policy_paths` can be files or directories. Directory paths are scanned recursively for supported extensions.
+
+Priority and conditions are preserved as loaded and compiled into the decision graph.
+
+Runtime hooks:
+
+- CLI compile: `python -m keynetra.cli compile-policies --config ...`
+- API startup bootstrap: `keynetra/api/main.py` (`_bootstrap_file_backed_policies`)
+- Embedded usage: `KeyNetra.load_policies(...)`
+
+## Validation Tips
+
+- Ensure each rule has a non-empty `action`.
+- Use explicit `priority` values for deterministic precedence.
+- Keep condition keys consistent with request payload fields.
+
+## Related Pages
+
+- [Configuration Files](configuration-files.md)
+- [Authorization Pipeline](../architecture/authorization-pipeline.md)
+- [CLI Reference](cli-reference.md)
diff --git a/docs/resources.md b/docs/resources.md
new file mode 100644
index 0000000..a5a5759
--- /dev/null
+++ b/docs/resources.md
@@ -0,0 +1,32 @@
+# Documentation Resources
+
+KeyNetra docs share a unified visual identity. The same `data/imgs/logo.png` graphic anchors:
+
+- `README.md` hero banner
+- `docs/README.md` header overview
+- Every quickstart/reference guide that embeds the logo via `
`
+
+Use this file as the entry point for doc sources, templates, and branding assets.
+
+## Branding asset
+
+- File: `data/imgs/logo.png`
+- Use: hero banner, doc headers, quickstart references
+- Recommended alt text: "KeyNetra Logo"
+
+## Doc sources
+
+- `README.md`: top-level landing
+- `docs/api-endpoints.md`: HTTP contract details
+- `docs/models/`: authorization model explanations
+- `docs/policies.md`: policy structure guidance
+- `docs/use-cases.md`: real-world example scenarios
+- `docs/deep-dive/`: developer manual, code walkthrough, integration cookbook
+
+Each markdown includes the same logo to keep visual continuity.
+
+## When adding new docs
+
+1. Save art in `data/imgs/` and reference via relative path `data/imgs/logo.png`.
+2. Reuse the same hero markup `
` for brand consistency.
+3. Keep doc resources structured under `docs/` so the documentation site can render them uniformly.
diff --git a/docs/testing-guide.md b/docs/testing-guide.md
new file mode 100644
index 0000000..e838842
--- /dev/null
+++ b/docs/testing-guide.md
@@ -0,0 +1,154 @@
+# KeyNetra Verification Guide
+
+This guide verifies KeyNetra end-to-end without any UI.
+
+## 1) Run the test suite
+
+```bash
+PYTHONPATH=. python3.11 -m pytest -q
+```
+
+Coverage audited in `tests/` (the repository does not contain `core/tests/`):
+
+- authorization engine
+- RBAC, ABAC, ACL, relationship-based access (ReBAC)
+- authorization modeling and compiled policy evaluation
+- policy simulation and impact analysis
+- revision tokens and consistency behavior
+- metrics endpoint and cache behavior
+- API contracts
+
+Additional endpoint-level coverage added for:
+
+- `POST /check-access-batch`
+- `POST /simulate`
+- `POST /simulate-policy`
+- `POST /impact-analysis`
+
+## 2) Real-world authorization scenarios
+
+Use:
+
+- `examples/scenarios/real_world_authorization_scenarios.yaml`
+
+Included scenarios:
+
+- Document management system
+- SaaS multi-tenant access
+- Financial approval workflow
+- Team collaboration
+- Admin privilege delegation
+
+Each scenario defines subjects, resources, actions, relationships, roles, policies, and ACL entries.
+
+## 3) Authorization models
+
+Use model examples from:
+
+- `examples/models/document_model.yaml`
+- `examples/models/saas_tenant_model.yaml`
+- `examples/models/finance_model.yaml`
+- `examples/models/team_collaboration_model.yaml`
+- `examples/models/admin_delegation_model.yaml`
+
+## 4) Policy examples
+
+Use policy files from:
+
+- `examples/policies/document_access.yaml`
+- `examples/policies/finance_policy.yaml`
+- `examples/policies/team_access.yaml`
+
+## 5) API request examples
+
+Request payloads for all required endpoints:
+
+- `examples/requests/api_requests.json`
+
+Expected responses:
+
+- `examples/responses/api_expected_responses.json`
+
+### Example calls
+
+```bash
+curl -s -X POST http://localhost:8000/check-access \
+ -H "Content-Type: application/json" \
+ -H "X-API-Key: testkey" \
+ -d @<(jq '.["check-access"]' examples/requests/api_requests.json)
+
+curl -s -X POST http://localhost:8000/check-access-batch \
+ -H "Content-Type: application/json" \
+ -H "X-API-Key: testkey" \
+ -d @<(jq '.["check-access-batch"]' examples/requests/api_requests.json)
+
+curl -s -X POST http://localhost:8000/simulate \
+ -H "Content-Type: application/json" \
+ -H "X-API-Key: testkey" \
+ -d @<(jq '.["simulate"]' examples/requests/api_requests.json)
+
+curl -s -X POST http://localhost:8000/simulate-policy \
+ -H "Content-Type: application/json" \
+ -H "X-API-Key: testkey" \
+ -d @<(jq '.["simulate-policy"]' examples/requests/api_requests.json)
+
+curl -s -X POST http://localhost:8000/impact-analysis \
+ -H "Content-Type: application/json" \
+ -H "X-API-Key: testkey" \
+ -d @<(jq '.["impact-analysis"]' examples/requests/api_requests.json)
+```
+
+## 6) CLI verification examples
+
+Use:
+
+- `examples/requests/cli_examples.sh`
+
+Direct commands:
+
+```bash
+keynetra check \
+ --api-key testkey \
+ --user '{"id":"alice","role":"editor","permissions":["approve_payment"]}' \
+ --action read \
+ --resource '{"resource_type":"document","resource_id":"doc-123"}'
+
+keynetra simulate \
+ --api-key testkey \
+ --policy-change 'allow:\n action: share_document\n priority: 1\n policy_key: share-admin\n when:\n role: admin' \
+ --user '{"id":"root-admin","role":"admin","roles":["admin"]}' \
+ --action share_document \
+ --resource '{"resource_type":"document","resource_id":"doc-123"}'
+
+keynetra impact \
+ --api-key testkey \
+ --policy-change 'deny:\n action: export_payment\n priority: 1\n policy_key: deny-export-contractors\n when:\n role: external'
+```
+
+## 7) Developer verification forms
+
+Use structured forms from:
+
+- `examples/forms/developer_verification_forms.json`
+
+Fill one form per test case and compare actual decision vs `expected`.
+
+## 8) Example test datasets
+
+Use these datasets to seed and validate real-world flows:
+
+- `examples/data/users.json`
+- `examples/data/roles.json`
+- `examples/data/relationships.json`
+- `examples/data/acl_entries.json`
+
+## 9) No-UI developer workflow
+
+1. Start API: `keynetra serve`
+2. Run tests: `PYTHONPATH=. python3.11 -m pytest -q`
+3. Replay API payloads from `examples/requests/api_requests.json`
+4. Compare responses to `examples/responses/api_expected_responses.json`
+5. Run CLI checks from `examples/requests/cli_examples.sh`
+6. Validate scenario decisions using `examples/forms/developer_verification_forms.json`
+
+This provides repeatable verification through API, CLI, config files, and datasets only.
diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md
new file mode 100644
index 0000000..12b5a7f
--- /dev/null
+++ b/docs/troubleshooting.md
@@ -0,0 +1,80 @@
+# Troubleshooting
+
+## 1) `401 unauthorized` on every request
+
+Cause:
+
+- Missing or wrong API key
+
+Fix:
+
+```bash
+export KEYNETRA_API_KEYS=devkey
+curl -H "X-API-Key: devkey" http://localhost:8000/health
+```
+
+## 2) `403 forbidden` on simulation endpoints
+
+Cause:
+
+- Principal does not have required management role
+
+Fix:
+
+- Use API key auth for local testing (`X-API-Key`)
+- Or provide JWT with management claims
+
+## 3) `429 too_many_requests`
+
+Cause:
+
+- Rate limit exceeded
+
+Fix:
+
+```bash
+export KEYNETRA_RATE_LIMIT_PER_MINUTE=1000
+export KEYNETRA_RATE_LIMIT_BURST=1000
+```
+
+## 4) Database errors at startup
+
+Cause:
+
+- Bad `KEYNETRA_DATABASE_URL`
+- Missing local DB permissions
+
+Fix:
+
+```bash
+export KEYNETRA_DATABASE_URL=sqlite+pysqlite:///./keynetra.db
+python -m keynetra.cli serve
+```
+
+## 5) Policy change does not seem to apply
+
+Cause:
+
+- Cache still serving old state
+- Policy not loaded from expected path
+
+Fix:
+
+- Confirm policy path/config values
+- Restart server for local debugging
+- Use `/simulate-policy` to confirm new policy behavior
+
+## 6) Hard to understand deny responses
+
+Fix:
+
+- Use `/simulate` for `failed_conditions`
+- Inspect `reason`, `policy_id`, and `explain_trace`
+
+## 7) CLI command cannot find model/policy file
+
+Fix:
+
+- Use absolute paths first
+- Confirm working directory is repository root
+- Check file extension and content format
diff --git a/docs/use-cases.md b/docs/use-cases.md
new file mode 100644
index 0000000..24cff2d
--- /dev/null
+++ b/docs/use-cases.md
@@ -0,0 +1,83 @@
+# Real-World Use Cases
+
+This page maps common product scenarios to KeyNetra concepts.
+
+## 1) Document Management System
+
+Typical requirements:
+
+- Owners can read/write/delete
+- Editors can read/write
+- Viewers can only read
+- Specific users can be denied sharing for sensitive docs
+
+How KeyNetra helps:
+
+- ReBAC for owner/editor/viewer relationships
+- ACL for per-document exceptions
+- Policy trace for support/debugging
+
+## 2) SaaS Multi-Tenant Platform
+
+Typical requirements:
+
+- User can only access resources in their tenant
+- Tenant admins manage tenant settings
+- Cross-tenant access is denied by default
+
+How KeyNetra helps:
+
+- ABAC (`same_tenant`) checks
+- RBAC for `tenant_admin` vs `tenant_member`
+- Batch checks for dashboards with many widgets
+
+## 3) Financial Approval Workflow
+
+Typical requirements:
+
+- Managers can approve up to a threshold
+- Finance admins approve above threshold
+- Maker-checker separation (owner cannot self-approve)
+
+How KeyNetra helps:
+
+- ABAC for amount-based limits
+- Explicit deny for maker-checker guardrail
+- `/simulate-policy` before rolling out new thresholds
+
+## 4) Team Collaboration System
+
+Typical requirements:
+
+- Maintainers can merge
+- Contributors can comment/read
+- External users cannot merge even if they can view
+
+How KeyNetra helps:
+
+- ReBAC for maintainer/contributor relationships
+- RBAC for external role restrictions
+- ACL exceptions for temporary project access
+
+## 5) Admin Delegation
+
+Typical requirements:
+
+- Root admin can delegate limited rights
+- Delegated admins can grant but not perform all root operations
+- Read-only support users should never mutate policy
+
+How KeyNetra helps:
+
+- RBAC + ABAC for delegated constraints
+- ACL deny entries for protected policy operations
+- Impact analysis before changing admin policies
+
+## Suggested validation process for any use case
+
+1. Define model relations and permissions
+2. Write baseline policies
+3. Add ACL exceptions only where needed
+4. Run `/simulate` and `/simulate-policy`
+5. Run `/impact-analysis`
+6. Add tests for critical rules
diff --git a/examples/auth-model.yaml b/examples/auth-model.yaml
new file mode 100644
index 0000000..2407c5c
--- /dev/null
+++ b/examples/auth-model.yaml
@@ -0,0 +1,9 @@
+model:
+ schema_version: 1
+ type: document
+ relations:
+ owner: user
+ editor: user
+ permissions:
+ read: owner or editor
+ write: owner
diff --git a/examples/data/acl_entries.json b/examples/data/acl_entries.json
new file mode 100644
index 0000000..be8650d
--- /dev/null
+++ b/examples/data/acl_entries.json
@@ -0,0 +1,34 @@
+[
+ {
+ "subject_type": "user",
+ "subject_id": "charlie",
+ "resource_type": "document",
+ "resource_id": "doc-001",
+ "action": "share",
+ "effect": "deny"
+ },
+ {
+ "subject_type": "user",
+ "subject_id": "tenant-a-analyst",
+ "resource_type": "dashboard",
+ "resource_id": "dash-1",
+ "action": "export",
+ "effect": "deny"
+ },
+ {
+ "subject_type": "user",
+ "subject_id": "manager-1",
+ "resource_type": "payment",
+ "resource_id": "pay-900",
+ "action": "reject",
+ "effect": "allow"
+ },
+ {
+ "subject_type": "user",
+ "subject_id": "support-agent",
+ "resource_type": "policy",
+ "resource_id": "billing-guard",
+ "action": "update_policy",
+ "effect": "deny"
+ }
+]
diff --git a/examples/data/relationships.json b/examples/data/relationships.json
new file mode 100644
index 0000000..925cc6f
--- /dev/null
+++ b/examples/data/relationships.json
@@ -0,0 +1,10 @@
+[
+ {"subject_type": "user", "subject_id": "alice", "relation": "owner", "object_type": "document", "object_id": "doc-001"},
+ {"subject_type": "user", "subject_id": "bob", "relation": "editor", "object_type": "document", "object_id": "doc-001"},
+ {"subject_type": "user", "subject_id": "charlie", "relation": "viewer", "object_type": "document", "object_id": "doc-001"},
+ {"subject_type": "user", "subject_id": "manager-1", "relation": "reviewer", "object_type": "payment", "object_id": "pay-900"},
+ {"subject_type": "user", "subject_id": "cfo-1", "relation": "finance_admin", "object_type": "payment", "object_id": "pay-900"},
+ {"subject_type": "user", "subject_id": "contractor", "relation": "commenter", "object_type": "board", "object_id": "team-roadmap"},
+ {"subject_type": "user", "subject_id": "root-admin", "relation": "owner", "object_type": "tenant", "object_id": "acme"},
+ {"subject_type": "user", "subject_id": "ops-admin", "relation": "delegate", "object_type": "tenant", "object_id": "acme"}
+]
diff --git a/examples/data/roles.json b/examples/data/roles.json
new file mode 100644
index 0000000..9c2f270
--- /dev/null
+++ b/examples/data/roles.json
@@ -0,0 +1,9 @@
+[
+ {"name": "document_admin", "permissions": ["read", "write", "delete", "share"]},
+ {"name": "contributor", "permissions": ["read", "write", "comment"]},
+ {"name": "viewer", "permissions": ["read"]},
+ {"name": "manager", "permissions": ["approve_payment", "reject_payment", "read"]},
+ {"name": "finance_admin", "permissions": ["approve_payment", "export_payment", "read"]},
+ {"name": "external", "permissions": ["read", "comment"]},
+ {"name": "admin", "permissions": ["grant", "revoke", "update_policy", "read"]}
+]
diff --git a/examples/data/users.json b/examples/data/users.json
new file mode 100644
index 0000000..fcb88a6
--- /dev/null
+++ b/examples/data/users.json
@@ -0,0 +1,9 @@
+[
+ {"id": "alice", "tenant": "acme", "role": "document_admin", "department": "engineering"},
+ {"id": "bob", "tenant": "acme", "role": "contributor", "department": "engineering"},
+ {"id": "charlie", "tenant": "acme", "role": "viewer", "department": "support"},
+ {"id": "manager-1", "tenant": "acme", "role": "manager", "department": "finance"},
+ {"id": "cfo-1", "tenant": "acme", "role": "finance_admin", "department": "finance"},
+ {"id": "contractor", "tenant": "acme", "role": "external", "department": "vendor"},
+ {"id": "root-admin", "tenant": "acme", "role": "admin", "department": "platform"}
+]
diff --git a/examples/forms/developer_verification_forms.json b/examples/forms/developer_verification_forms.json
new file mode 100644
index 0000000..c75455d
--- /dev/null
+++ b/examples/forms/developer_verification_forms.json
@@ -0,0 +1,84 @@
+[
+ {
+ "scenario": "document_management",
+ "subject": "user:alice",
+ "resource": "document:doc-001",
+ "action": "read",
+ "roles": ["document_admin"],
+ "relationships": ["owner"],
+ "attributes": {
+ "department": "engineering",
+ "tenant": "acme",
+ "classification": "internal"
+ },
+ "expected": {
+ "decision": "allow",
+ "policy_id": "document-read-admin"
+ }
+ },
+ {
+ "scenario": "saas_multi_tenant",
+ "subject": "user:tenant-a-analyst",
+ "resource": "dashboard:dash-1",
+ "action": "export",
+ "roles": ["tenant_analyst"],
+ "relationships": ["member"],
+ "attributes": {
+ "tenant": "acme",
+ "resource_tenant": "acme"
+ },
+ "expected": {
+ "decision": "deny",
+ "policy_id": "acl:export-deny"
+ }
+ },
+ {
+ "scenario": "financial_approval",
+ "subject": "user:manager-1",
+ "resource": "payment:pay-900",
+ "action": "approve_payment",
+ "roles": ["manager"],
+ "relationships": ["reviewer"],
+ "attributes": {
+ "amount": 45000,
+ "owner_id": "approver-1",
+ "department": "finance"
+ },
+ "expected": {
+ "decision": "allow",
+ "policy_id": "finance-approve-manager-under-limit"
+ }
+ },
+ {
+ "scenario": "team_collaboration",
+ "subject": "user:contractor",
+ "resource": "repo:platform-core",
+ "action": "merge",
+ "roles": ["external"],
+ "relationships": ["commenter"],
+ "attributes": {
+ "tenant": "acme",
+ "resource_tenant": "acme"
+ },
+ "expected": {
+ "decision": "deny",
+ "policy_id": "team-external-no-merge"
+ }
+ },
+ {
+ "scenario": "admin_delegation",
+ "subject": "user:ops-admin",
+ "resource": "tenant:acme",
+ "action": "grant",
+ "roles": ["developer"],
+ "relationships": ["delegate"],
+ "attributes": {
+ "delegated_by_admin": true,
+ "tenant": "acme"
+ },
+ "expected": {
+ "decision": "allow",
+ "policy_id": "delegated-ops-limited"
+ }
+ }
+]
diff --git a/examples/keynetra.yaml b/examples/keynetra.yaml
new file mode 100644
index 0000000..3a9ffdf
--- /dev/null
+++ b/examples/keynetra.yaml
@@ -0,0 +1,17 @@
+database:
+ url: sqlite+pysqlite:///./keynetra.db
+
+redis:
+ url: redis://localhost:6379/0
+
+policies:
+ path: ./examples/policies
+
+models:
+ path: ./examples/auth-model.yaml
+
+seed_data: true
+
+server:
+ host: 0.0.0.0
+ port: 8080
diff --git a/examples/models/admin_delegation_model.yaml b/examples/models/admin_delegation_model.yaml
new file mode 100644
index 0000000..10ca899
--- /dev/null
+++ b/examples/models/admin_delegation_model.yaml
@@ -0,0 +1,11 @@
+model:
+ type: tenant_admin
+ relations:
+ owner: user
+ delegate: user
+ auditor: user
+ permissions:
+ read: owner or delegate or auditor
+ grant: owner or delegate
+ revoke: owner
+ update_policy: owner
diff --git a/examples/models/document_model.yaml b/examples/models/document_model.yaml
new file mode 100644
index 0000000..ac9b05f
--- /dev/null
+++ b/examples/models/document_model.yaml
@@ -0,0 +1,12 @@
+model:
+ type: document
+ relations:
+ owner: user
+ editor: user
+ viewer: user
+ approver: user
+ permissions:
+ read: owner or editor or viewer
+ write: owner or editor
+ delete: owner
+ approve: approver or owner
diff --git a/examples/models/finance_model.yaml b/examples/models/finance_model.yaml
new file mode 100644
index 0000000..1487401
--- /dev/null
+++ b/examples/models/finance_model.yaml
@@ -0,0 +1,10 @@
+model:
+ type: payment
+ relations:
+ owner: user
+ reviewer: user
+ finance_admin: user
+ permissions:
+ submit: owner
+ approve: reviewer or finance_admin
+ reject: reviewer or finance_admin
diff --git a/examples/models/saas_tenant_model.yaml b/examples/models/saas_tenant_model.yaml
new file mode 100644
index 0000000..8d9398b
--- /dev/null
+++ b/examples/models/saas_tenant_model.yaml
@@ -0,0 +1,10 @@
+model:
+ type: tenant_resource
+ relations:
+ tenant_admin: user
+ tenant_member: user
+ support_viewer: user
+ permissions:
+ read: tenant_admin or tenant_member or support_viewer
+ manage: tenant_admin
+ export: tenant_admin or tenant_member
diff --git a/examples/models/team_collaboration_model.yaml b/examples/models/team_collaboration_model.yaml
new file mode 100644
index 0000000..68868c1
--- /dev/null
+++ b/examples/models/team_collaboration_model.yaml
@@ -0,0 +1,10 @@
+model:
+ type: team_asset
+ relations:
+ maintainer: user
+ contributor: user
+ commenter: user
+ permissions:
+ read: maintainer or contributor or commenter
+ comment: maintainer or contributor or commenter
+ merge: maintainer
diff --git a/examples/policies/document_access.yaml b/examples/policies/document_access.yaml
new file mode 100644
index 0000000..6b41a8c
--- /dev/null
+++ b/examples/policies/document_access.yaml
@@ -0,0 +1,42 @@
+policies:
+ - action: read
+ effect: allow
+ priority: 10
+ policy_id: document-read-admin
+ conditions:
+ role: admin
+ resource_type: document
+
+ - action: read
+ effect: allow
+ priority: 20
+ policy_id: document-read-editor
+ conditions:
+ relation: editor
+ resource_type: document
+ same_tenant: true
+
+ - action: write
+ effect: allow
+ priority: 30
+ policy_id: document-write-owner
+ conditions:
+ relation: owner
+ resource_type: document
+ owner_only: true
+
+ - action: delete
+ effect: deny
+ priority: 40
+ policy_id: document-delete-protected
+ conditions:
+ resource_type: document
+ resource_attr: { classification: legal_hold }
+
+ - action: share
+ effect: deny
+ priority: 50
+ policy_id: document-share-external
+ conditions:
+ role: external
+ resource_type: document
diff --git a/examples/policies/finance_policy.yaml b/examples/policies/finance_policy.yaml
new file mode 100644
index 0000000..d69bafe
--- /dev/null
+++ b/examples/policies/finance_policy.yaml
@@ -0,0 +1,36 @@
+policies:
+ - action: approve_payment
+ effect: allow
+ priority: 10
+ policy_id: finance-approve-manager-under-limit
+ conditions:
+ role: manager
+ max_amount: 100000
+ owner_only: false
+ resource_type: payment
+
+ - action: approve_payment
+ effect: allow
+ priority: 20
+ policy_id: finance-approve-cfo-over-limit
+ conditions:
+ role: finance_admin
+ min_amount: 100001
+ resource_type: payment
+
+ - action: approve_payment
+ effect: deny
+ priority: 30
+ policy_id: finance-maker-checker-deny
+ conditions:
+ owner_only: true
+ resource_type: payment
+
+ - action: export_payment
+ effect: allow
+ priority: 40
+ policy_id: finance-export-auditor
+ conditions:
+ role: auditor
+ department: finance
+ resource_type: payment
diff --git a/examples/policies/finance_rules.json b/examples/policies/finance_rules.json
new file mode 100644
index 0000000..7ef3214
--- /dev/null
+++ b/examples/policies/finance_rules.json
@@ -0,0 +1,12 @@
+[
+ {
+ "action": "approve_payment",
+ "effect": "allow",
+ "priority": 5,
+ "policy_id": "finance-approve-manager",
+ "conditions": {
+ "role": "manager",
+ "max_amount": 10000
+ }
+ }
+]
diff --git a/examples/policies/ops_rules.polar b/examples/policies/ops_rules.polar
new file mode 100644
index 0000000..a786d1d
--- /dev/null
+++ b/examples/policies/ops_rules.polar
@@ -0,0 +1,2 @@
+allow action=deploy priority=10 role=ops
+deny action=delete priority=20 role=contractor
diff --git a/examples/policies/team_access.yaml b/examples/policies/team_access.yaml
new file mode 100644
index 0000000..953aec1
--- /dev/null
+++ b/examples/policies/team_access.yaml
@@ -0,0 +1,33 @@
+policies:
+ - action: merge
+ effect: allow
+ priority: 10
+ policy_id: team-maintainer-merge
+ conditions:
+ relation: maintainer
+ resource_type: repo
+ same_tenant: true
+
+ - action: comment
+ effect: allow
+ priority: 20
+ policy_id: team-collaborator-comment
+ conditions:
+ relation: contributor
+ resource_type: repo
+
+ - action: read
+ effect: allow
+ priority: 30
+ policy_id: team-viewer-read
+ conditions:
+ role: viewer
+ resource_type: board
+
+ - action: merge
+ effect: deny
+ priority: 40
+ policy_id: team-external-no-merge
+ conditions:
+ role: external
+ resource_type: repo
diff --git a/examples/policy_tests.yaml b/examples/policy_tests.yaml
new file mode 100644
index 0000000..f8f7edf
--- /dev/null
+++ b/examples/policy_tests.yaml
@@ -0,0 +1,37 @@
+policies:
+ - allow:
+ action: approve_payment
+ priority: 10
+ policy_key: approve-manager-owner
+ when:
+ role: manager
+ max_amount: 100000
+ owner_only: true
+ - deny:
+ action: approve_payment
+ priority: 20
+ policy_key: reject-non-owner
+ when:
+ role: manager
+
+tests:
+ - name: manager_can_approve_small_payment
+ input:
+ user:
+ id: 1
+ role: manager
+ action: approve_payment
+ resource:
+ amount: 500
+ owner_id: 1
+ expect: allow
+ - name: non_owner_is_denied_even_if_manager
+ input:
+ user:
+ id: 1
+ role: manager
+ action: approve_payment
+ resource:
+ amount: 500
+ owner_id: 99
+ expect: deny
diff --git a/examples/requests/api_requests.json b/examples/requests/api_requests.json
new file mode 100644
index 0000000..ce1cc6f
--- /dev/null
+++ b/examples/requests/api_requests.json
@@ -0,0 +1,37 @@
+{
+ "check-access": {
+ "user": {"id": "alice", "role": "editor", "permissions": ["approve_payment"]},
+ "action": "read",
+ "resource": {"resource_type": "document", "resource_id": "doc-123", "owner_id": "alice"},
+ "context": {"department": "engineering", "time": "2026-04-06T12:00:00Z"}
+ },
+ "check-access-batch": {
+ "user": {"id": "alice", "role": "editor", "permissions": ["approve_payment"]},
+ "items": [
+ {"action": "read", "resource": {"resource_type": "document", "resource_id": "doc-123"}},
+ {"action": "write", "resource": {"resource_type": "document", "resource_id": "doc-123"}},
+ {"action": "delete", "resource": {"resource_type": "document", "resource_id": "doc-123"}}
+ ],
+ "consistency": "eventual"
+ },
+ "simulate": {
+ "user": {"id": "manager-1", "role": "manager", "permissions": []},
+ "action": "approve_payment",
+ "resource": {"resource_type": "payment", "resource_id": "pay-900", "amount": 45000, "owner_id": "u-12"},
+ "context": {"department": "finance", "time": "2026-04-06T12:00:00Z"}
+ },
+ "simulate-policy": {
+ "simulate": {
+ "policy_change": "allow:\n action: share_document\n priority: 1\n policy_key: share-admin\n when:\n role: admin"
+ },
+ "request": {
+ "user": {"id": "root-admin", "role": "admin", "roles": ["admin"]},
+ "action": "share_document",
+ "resource": {"resource_type": "document", "resource_id": "doc-123"},
+ "context": {"tenant": "acme"}
+ }
+ },
+ "impact-analysis": {
+ "policy_change": "deny:\n action: export_payment\n priority: 1\n policy_key: deny-export-contractors\n when:\n role: external"
+ }
+}
diff --git a/examples/requests/cli_examples.sh b/examples/requests/cli_examples.sh
new file mode 100755
index 0000000..b6ec75b
--- /dev/null
+++ b/examples/requests/cli_examples.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+set -euo pipefail
+
+API_KEY="${API_KEY:-testkey}"
+BASE_URL="${BASE_URL:-http://localhost:8000}"
+
+keynetra check \
+ --api-key "$API_KEY" \
+ --url "$BASE_URL/check-access" \
+ --user '{"id":"alice","role":"editor","permissions":["approve_payment"]}' \
+ --action read \
+ --resource '{"resource_type":"document","resource_id":"doc-123"}' \
+ --context '{"department":"engineering"}'
+
+keynetra simulate \
+ --api-key "$API_KEY" \
+ --url "$BASE_URL/simulate-policy" \
+ --policy-change 'allow:\n action: share_document\n priority: 1\n policy_key: share-admin\n when:\n role: admin' \
+ --user '{"id":"root-admin","role":"admin","roles":["admin"]}' \
+ --action share_document \
+ --resource '{"resource_type":"document","resource_id":"doc-123"}'
+
+keynetra impact \
+ --api-key "$API_KEY" \
+ --url "$BASE_URL/impact-analysis" \
+ --policy-change 'deny:\n action: export_payment\n priority: 1\n policy_key: deny-export-contractors\n when:\n role: external'
+
+keynetra test-policy examples/policy_tests.yaml
+keynetra compile-policies --path examples/policies/document_access.yaml --path examples/policies/finance_policy.yaml
diff --git a/examples/responses/api_expected_responses.json b/examples/responses/api_expected_responses.json
new file mode 100644
index 0000000..9701e3c
--- /dev/null
+++ b/examples/responses/api_expected_responses.json
@@ -0,0 +1,64 @@
+{
+ "check-access": {
+ "data": {
+ "allowed": true,
+ "decision": "allow",
+ "matched_policies": ["rbac:permissions"],
+ "reason": "explicit permission grant",
+ "policy_id": "rbac:permissions",
+ "revision": 1
+ },
+ "meta": {"request_id": ""},
+ "error": null
+ },
+ "check-access-batch": {
+ "data": {
+ "results": [
+ {"action": "read", "allowed": true, "revision": 1},
+ {"action": "write", "allowed": true, "revision": 1},
+ {"action": "delete", "allowed": false, "revision": 1}
+ ],
+ "revision": 1
+ },
+ "meta": {"request_id": ""},
+ "error": null
+ },
+ "simulate": {
+ "data": {
+ "decision": "allow",
+ "matched_policies": ["approve-manager-owner"],
+ "reason": "manager policy conditions matched",
+ "policy_id": "approve-manager-owner",
+ "failed_conditions": [],
+ "revision": 1
+ },
+ "meta": {"request_id": ""},
+ "error": null
+ },
+ "simulate-policy": {
+ "data": {
+ "decision_before": {
+ "allowed": false,
+ "decision": "deny",
+ "reason": "no matching policy",
+ "policy_id": null
+ },
+ "decision_after": {
+ "allowed": true,
+ "decision": "allow",
+ "reason": "policy change grants access",
+ "policy_id": "share-admin"
+ }
+ },
+ "meta": {"request_id": ""},
+ "error": null
+ },
+ "impact-analysis": {
+ "data": {
+ "gained_access": [101, 204],
+ "lost_access": [302]
+ },
+ "meta": {"request_id": ""},
+ "error": null
+ }
+}
diff --git a/examples/scenarios/real_world_authorization_scenarios.yaml b/examples/scenarios/real_world_authorization_scenarios.yaml
new file mode 100644
index 0000000..a990a07
--- /dev/null
+++ b/examples/scenarios/real_world_authorization_scenarios.yaml
@@ -0,0 +1,221 @@
+scenarios:
+ - id: document_management
+ name: Document Management System
+ subjects:
+ - user:alice
+ - user:bob
+ - user:charlie
+ resources:
+ - document:doc-001
+ - document:doc-002
+ actions: [read, write, delete, share]
+ relationships:
+ - subject: user:alice
+ relation: owner
+ resource: document:doc-001
+ - subject: user:bob
+ relation: editor
+ resource: document:doc-001
+ - subject: user:charlie
+ relation: viewer
+ resource: document:doc-001
+ roles:
+ - name: document_admin
+ subjects: [user:alice]
+ - name: contributor
+ subjects: [user:bob]
+ - name: viewer
+ subjects: [user:charlie]
+ policies:
+ - id: doc-owner-full
+ effect: allow
+ action: write
+ conditions: { relation: owner, resource_type: document }
+ - id: doc-editor-write
+ effect: allow
+ action: write
+ conditions: { relation: editor, resource_type: document }
+ - id: doc-viewer-read
+ effect: allow
+ action: read
+ conditions: { relation: viewer, resource_type: document }
+ acl_entries:
+ - resource: document:doc-001
+ action: share
+ effect: deny
+ subject: user:charlie
+
+ - id: saas_multi_tenant
+ name: SaaS Multi-Tenant Access
+ subjects:
+ - user:tenant-a-admin
+ - user:tenant-a-analyst
+ - user:tenant-b-admin
+ resources:
+ - invoice:inv-100
+ - dashboard:dash-1
+ actions: [read, export, manage]
+ relationships:
+ - subject: user:tenant-a-admin
+ relation: member
+ resource: tenant:acme
+ - subject: user:tenant-a-analyst
+ relation: member
+ resource: tenant:acme
+ - subject: user:tenant-b-admin
+ relation: member
+ resource: tenant:globex
+ roles:
+ - name: tenant_admin
+ subjects: [user:tenant-a-admin, user:tenant-b-admin]
+ - name: tenant_analyst
+ subjects: [user:tenant-a-analyst]
+ policies:
+ - id: same-tenant-read
+ effect: allow
+ action: read
+ conditions: { same_tenant: true }
+ - id: tenant-admin-manage
+ effect: allow
+ action: manage
+ conditions: { role: tenant_admin }
+ - id: cross-tenant-deny
+ effect: deny
+ action: read
+ conditions: { same_tenant: false }
+ acl_entries:
+ - resource: dashboard:dash-1
+ action: export
+ effect: deny
+ subject: user:tenant-a-analyst
+
+ - id: financial_approval
+ name: Financial Approval Workflow
+ subjects:
+ - user:approver-1
+ - user:manager-1
+ - user:cfo-1
+ resources:
+ - payment:pay-900
+ actions: [submit, approve, reject]
+ relationships:
+ - subject: user:approver-1
+ relation: owner
+ resource: payment:pay-900
+ - subject: user:manager-1
+ relation: reviewer
+ resource: payment:pay-900
+ roles:
+ - name: employee
+ subjects: [user:approver-1]
+ - name: manager
+ subjects: [user:manager-1]
+ - name: finance_admin
+ subjects: [user:cfo-1]
+ policies:
+ - id: manager-approve-under-limit
+ effect: allow
+ action: approve
+ conditions: { role: manager, max_amount: 100000 }
+ - id: cfo-approve-over-limit
+ effect: allow
+ action: approve
+ conditions: { role: finance_admin, min_amount: 100001 }
+ - id: maker-checker-separation
+ effect: deny
+ action: approve
+ conditions: { owner_only: true }
+ acl_entries:
+ - resource: payment:pay-900
+ action: reject
+ effect: allow
+ subject: user:manager-1
+
+ - id: team_collaboration
+ name: Team-Based Collaboration
+ subjects:
+ - user:lead
+ - user:dev1
+ - user:contractor
+ resources:
+ - board:team-roadmap
+ - repo:platform-core
+ actions: [read, comment, merge]
+ relationships:
+ - subject: user:lead
+ relation: maintainer
+ resource: repo:platform-core
+ - subject: user:dev1
+ relation: contributor
+ resource: repo:platform-core
+ - subject: user:contractor
+ relation: commenter
+ resource: board:team-roadmap
+ roles:
+ - name: team_lead
+ subjects: [user:lead]
+ - name: engineer
+ subjects: [user:dev1]
+ - name: external
+ subjects: [user:contractor]
+ policies:
+ - id: maintainer-merge
+ effect: allow
+ action: merge
+ conditions: { relation: maintainer }
+ - id: contributor-comment
+ effect: allow
+ action: comment
+ conditions: { relation: contributor }
+ - id: external-no-merge
+ effect: deny
+ action: merge
+ conditions: { role: external }
+ acl_entries:
+ - resource: repo:platform-core
+ action: read
+ effect: allow
+ subject: user:contractor
+
+ - id: admin_delegation
+ name: Admin Privilege Delegation
+ subjects:
+ - user:root-admin
+ - user:ops-admin
+ - user:support-agent
+ resources:
+ - tenant:acme
+ - policy:billing-guard
+ actions: [grant, revoke, update_policy, read]
+ relationships:
+ - subject: user:root-admin
+ relation: owner
+ resource: tenant:acme
+ - subject: user:ops-admin
+ relation: delegate
+ resource: tenant:acme
+ roles:
+ - name: admin
+ subjects: [user:root-admin]
+ - name: developer
+ subjects: [user:ops-admin]
+ - name: viewer
+ subjects: [user:support-agent]
+ policies:
+ - id: admin-full-management
+ effect: allow
+ action: update_policy
+ conditions: { role: admin }
+ - id: delegated-ops-limited
+ effect: allow
+ action: grant
+ conditions: { role: developer, delegated_by_admin: true }
+ - id: support-read-only
+ effect: allow
+ action: read
+ conditions: { role: viewer }
+ acl_entries:
+ - resource: policy:billing-guard
+ action: update_policy
+ effect: deny
+ subject: user:support-agent
diff --git a/infra/docker/Dockerfile b/infra/docker/Dockerfile
new file mode 100644
index 0000000..8d4b1b3
--- /dev/null
+++ b/infra/docker/Dockerfile
@@ -0,0 +1,28 @@
+FROM python:3.11-slim
+
+ENV PYTHONDONTWRITEBYTECODE=1 \
+ PYTHONUNBUFFERED=1 \
+ PIP_NO_CACHE_DIR=1 \
+ PYTHONPATH=/app
+
+WORKDIR /app
+
+RUN useradd --create-home --uid 10001 appuser
+
+COPY core/requirements.txt /app/requirements.txt
+RUN pip install --no-cache-dir -r /app/requirements.txt
+
+COPY core/alembic.ini /app/alembic.ini
+COPY core/alembic /app/alembic
+COPY core/keynetra /app/keynetra
+COPY core/infra/docker/start.sh /usr/local/bin/start-keynetra
+
+RUN chmod +x /usr/local/bin/start-keynetra && chown -R appuser:appuser /app
+
+USER appuser
+EXPOSE 8000
+
+HEALTHCHECK --interval=30s --timeout=5s --start-period=20s --retries=5 \
+ CMD python -c "import urllib.request; urllib.request.urlopen('http://127.0.0.1:8000/health/ready', timeout=3)"
+
+ENTRYPOINT ["start-keynetra"]
diff --git a/infra/docker/monitoring/grafana/dashboards/keynetra-overview.json b/infra/docker/monitoring/grafana/dashboards/keynetra-overview.json
new file mode 100644
index 0000000..34634d9
--- /dev/null
+++ b/infra/docker/monitoring/grafana/dashboards/keynetra-overview.json
@@ -0,0 +1,64 @@
+{
+ "uid": "keynetra-overview",
+ "title": "KeyNetra Overview",
+ "schemaVersion": 39,
+ "version": 1,
+ "refresh": "30s",
+ "timezone": "browser",
+ "panels": [
+ {
+ "type": "stat",
+ "title": "Access Checks/s",
+ "gridPos": { "h": 8, "w": 8, "x": 0, "y": 0 },
+ "datasource": "Prometheus",
+ "targets": [
+ {
+ "expr": "sum(rate(keynetra_access_checks_total[5m]))",
+ "refId": "A"
+ }
+ ],
+ "options": {
+ "colorMode": "value",
+ "graphMode": "area",
+ "justifyMode": "center",
+ "textMode": "value_and_name"
+ }
+ },
+ {
+ "type": "stat",
+ "title": "Cache Hits/s",
+ "gridPos": { "h": 8, "w": 8, "x": 8, "y": 0 },
+ "datasource": "Prometheus",
+ "targets": [
+ {
+ "expr": "sum(rate(keynetra_cache_hits_total[5m]))",
+ "refId": "A"
+ }
+ ],
+ "options": {
+ "colorMode": "value",
+ "graphMode": "area",
+ "justifyMode": "center",
+ "textMode": "value_and_name"
+ }
+ },
+ {
+ "type": "stat",
+ "title": "Revision Updates/s",
+ "gridPos": { "h": 8, "w": 8, "x": 16, "y": 0 },
+ "datasource": "Prometheus",
+ "targets": [
+ {
+ "expr": "sum(rate(keynetra_revision_updates_total[5m]))",
+ "refId": "A"
+ }
+ ],
+ "options": {
+ "colorMode": "value",
+ "graphMode": "area",
+ "justifyMode": "center",
+ "textMode": "value_and_name"
+ }
+ }
+ ]
+}
diff --git a/infra/docker/monitoring/grafana/provisioning/dashboards/dashboards.yml b/infra/docker/monitoring/grafana/provisioning/dashboards/dashboards.yml
new file mode 100644
index 0000000..668919c
--- /dev/null
+++ b/infra/docker/monitoring/grafana/provisioning/dashboards/dashboards.yml
@@ -0,0 +1,12 @@
+apiVersion: 1
+
+providers:
+ - name: KeyNetra
+ orgId: 1
+ folder: KeyNetra
+ type: file
+ disableDeletion: false
+ allowUiUpdates: true
+ options:
+ path: /var/lib/grafana/dashboards
+
diff --git a/infra/docker/monitoring/grafana/provisioning/datasources/datasource.yml b/infra/docker/monitoring/grafana/provisioning/datasources/datasource.yml
new file mode 100644
index 0000000..96faeb7
--- /dev/null
+++ b/infra/docker/monitoring/grafana/provisioning/datasources/datasource.yml
@@ -0,0 +1,10 @@
+apiVersion: 1
+
+datasources:
+ - name: Prometheus
+ type: prometheus
+ access: proxy
+ url: http://prometheus:9090
+ isDefault: true
+ editable: true
+
diff --git a/infra/docker/monitoring/prometheus/prometheus.yml b/infra/docker/monitoring/prometheus/prometheus.yml
new file mode 100644
index 0000000..1414b6b
--- /dev/null
+++ b/infra/docker/monitoring/prometheus/prometheus.yml
@@ -0,0 +1,11 @@
+global:
+ scrape_interval: 15s
+ evaluation_interval: 15s
+
+scrape_configs:
+ - job_name: keynetra
+ metrics_path: /metrics
+ static_configs:
+ - targets:
+ - keynetra:8000
+
diff --git a/infra/docker/start.sh b/infra/docker/start.sh
new file mode 100644
index 0000000..1d92c53
--- /dev/null
+++ b/infra/docker/start.sh
@@ -0,0 +1,38 @@
+#!/bin/sh
+set -eu
+
+cd /app
+
+if [ "${KEYNETRA_RUN_MIGRATIONS:-1}" = "1" ]; then
+ alembic -c /app/alembic.ini upgrade head
+fi
+
+# Docker uses uvicorn directly, so render the startup dashboard explicitly.
+if [ "${KEYNETRA_STARTUP_SCREEN:-1}" = "1" ]; then
+ python - <<'PY'
+import os
+from keynetra.cli import _render_startup_screen
+from keynetra.config.settings import get_settings
+
+host = os.getenv("KEYNETRA_HOST", "0.0.0.0")
+port = int(os.getenv("KEYNETRA_PORT", "8000"))
+settings = get_settings()
+_render_startup_screen(
+ host=host,
+ port=port,
+ reload=False,
+ settings=settings,
+ config_path=os.getenv("KEYNETRA_CONFIG"),
+)
+PY
+fi
+
+export KEYNETRA_LOG_FORMAT="${KEYNETRA_LOG_FORMAT:-rich}"
+export KEYNETRA_FORCE_COLOR="${KEYNETRA_FORCE_COLOR:-1}"
+
+exec uvicorn keynetra.api.main:app \
+ --host "${KEYNETRA_HOST:-0.0.0.0}" \
+ --port "${KEYNETRA_PORT:-8000}" \
+ --proxy-headers \
+ --forwarded-allow-ips "*" \
+ --workers "${KEYNETRA_UVICORN_WORKERS:-2}"
diff --git a/infra/k8s/helm/keynetra/Chart.yaml b/infra/k8s/helm/keynetra/Chart.yaml
new file mode 100644
index 0000000..80e28b1
--- /dev/null
+++ b/infra/k8s/helm/keynetra/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: keynetra
+description: Helm chart for self-hosted KeyNetra OSS deployments
+type: application
+version: 0.1.0
+appVersion: "0.1.0"
diff --git a/infra/k8s/helm/keynetra/templates/deployment.yaml b/infra/k8s/helm/keynetra/templates/deployment.yaml
new file mode 100644
index 0000000..0b19942
--- /dev/null
+++ b/infra/k8s/helm/keynetra/templates/deployment.yaml
@@ -0,0 +1,19 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: keynetra
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: keynetra
+ template:
+ metadata:
+ labels:
+ app: keynetra
+ spec:
+ containers:
+ - name: keynetra
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ ports:
+ - containerPort: 8000
diff --git a/infra/k8s/helm/keynetra/templates/service.yaml b/infra/k8s/helm/keynetra/templates/service.yaml
new file mode 100644
index 0000000..397ae5a
--- /dev/null
+++ b/infra/k8s/helm/keynetra/templates/service.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: keynetra
+spec:
+ selector:
+ app: keynetra
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: 8000
diff --git a/infra/k8s/helm/keynetra/values.yaml b/infra/k8s/helm/keynetra/values.yaml
new file mode 100644
index 0000000..f4b6ec6
--- /dev/null
+++ b/infra/k8s/helm/keynetra/values.yaml
@@ -0,0 +1,7 @@
+image:
+ repository: ghcr.io/keynetra/core
+ tag: "0.1.0"
+
+service:
+ type: ClusterIP
+ port: 8000
diff --git a/infra/k8s/terraform/README.md b/infra/k8s/terraform/README.md
new file mode 100644
index 0000000..f8fedba
--- /dev/null
+++ b/infra/k8s/terraform/README.md
@@ -0,0 +1,11 @@
+# KeyNetra Core Terraform
+
+This directory is reserved for self-hosted infrastructure modules only.
+
+Allowed examples:
+
+- single-tenant VM deployments
+- self-hosted Kubernetes clusters
+- customer-managed databases and caches
+
+Do not place SaaS control plane or managed multi-tenant infrastructure here.
diff --git a/keynetra/__init__.py b/keynetra/__init__.py
new file mode 100644
index 0000000..695e7f9
--- /dev/null
+++ b/keynetra/__init__.py
@@ -0,0 +1,5 @@
+"""keynetra top-level package."""
+
+from keynetra.headless import KeyNetra
+
+__all__ = ["KeyNetra"]
diff --git a/keynetra/api/__init__.py b/keynetra/api/__init__.py
new file mode 100644
index 0000000..d1e594b
--- /dev/null
+++ b/keynetra/api/__init__.py
@@ -0,0 +1 @@
+"""API routes package."""
diff --git a/keynetra/api/errors.py b/keynetra/api/errors.py
new file mode 100644
index 0000000..d3c98eb
--- /dev/null
+++ b/keynetra/api/errors.py
@@ -0,0 +1,31 @@
+"""Core API error codes and exception helpers."""
+
+from __future__ import annotations
+
+from enum import StrEnum
+from typing import Any
+
+
+class ApiErrorCode(StrEnum):
+ BAD_REQUEST = "bad_request"
+ UNAUTHORIZED = "unauthorized"
+ FORBIDDEN = "forbidden"
+ NOT_FOUND = "not_found"
+ CONFLICT = "conflict"
+ TOO_MANY_REQUESTS = "too_many_requests"
+ VALIDATION_ERROR = "validation_error"
+ DATABASE_ERROR = "database_error"
+ INTERNAL_ERROR = "internal_error"
+
+
+class ApiError(Exception):
+ """Structured application error rendered by the global error handler."""
+
+ def __init__(
+ self, *, status_code: int, code: ApiErrorCode, message: str, details: Any | None = None
+ ) -> None:
+ self.status_code = status_code
+ self.code = code
+ self.message = message
+ self.details = details
+ super().__init__(message)
diff --git a/keynetra/api/main.py b/keynetra/api/main.py
new file mode 100644
index 0000000..0d262a5
--- /dev/null
+++ b/keynetra/api/main.py
@@ -0,0 +1,146 @@
+from fastapi import FastAPI
+from fastapi.middleware.cors import CORSMiddleware
+
+from keynetra.api.middleware.admin import AdminAuthorizationContextMiddleware
+from keynetra.api.middleware.errors import register_error_handlers
+from keynetra.api.middleware.idempotency import IdempotencyMiddleware
+from keynetra.api.middleware.logging import RequestLoggingMiddleware
+from keynetra.api.middleware.request_id import RequestIdMiddleware
+from keynetra.api.middleware.versioning import ApiVersionMiddleware
+from keynetra.api.service_modes import router_for_mode
+from keynetra.config.rate_limit import RateLimitMiddleware
+from keynetra.config.redis_client import get_redis
+from keynetra.config.settings import get_settings
+from keynetra.config.tenancy import DEFAULT_TENANT_KEY
+from keynetra.engine.compiled.decision_graph import COMPILED_POLICY_STORE
+from keynetra.engine.keynetra_engine import KeyNetraEngine
+from keynetra.engine.model_graph.permission_graph import MODEL_GRAPH_STORE, CompiledPermissionGraph
+from keynetra.infrastructure.cache.policy_cache import build_policy_cache
+from keynetra.infrastructure.logging import configure_json_logging
+from keynetra.infrastructure.storage.session import (
+ create_session_factory,
+ initialize_database,
+)
+from keynetra.modeling.permission_compiler import compile_authorization_schema
+from keynetra.services.seeding import seed_demo_data
+from keynetra.version import version as keynetra_version
+
+
+def create_app() -> FastAPI:
+ configure_json_logging()
+ app = FastAPI(title="KeyNetra", version=keynetra_version)
+ settings = get_settings()
+
+ app.add_middleware(RequestIdMiddleware)
+ app.add_middleware(ApiVersionMiddleware)
+ app.add_middleware(RequestLoggingMiddleware)
+ app.add_middleware(AdminAuthorizationContextMiddleware)
+ app.add_middleware(RateLimitMiddleware, settings=settings)
+ app.add_middleware(IdempotencyMiddleware, settings=settings)
+ app.add_middleware(
+ CORSMiddleware,
+ allow_origins=settings.parsed_cors_allow_origins(),
+ allow_origin_regex=settings.cors_allow_origin_regex,
+ allow_credentials=settings.cors_allow_credentials,
+ allow_methods=settings.parsed_cors_allow_methods(),
+ allow_headers=settings.parsed_cors_allow_headers(),
+ )
+ register_error_handlers(app, settings)
+
+ mode = getattr(settings, "service_mode", "all")
+ app.include_router(router_for_mode(mode))
+
+ if getattr(settings, "otel_enabled", False):
+ try:
+ from opentelemetry.instrumentation.fastapi import FastAPIInstrumentor
+
+ FastAPIInstrumentor.instrument_app(app)
+ except Exception:
+ pass
+
+ @app.on_event("startup")
+ def _bootstrap_sample_data() -> None:
+ initialize_database(settings.database_url)
+ _bootstrap_file_backed_policies()
+ _bootstrap_file_backed_model()
+ if settings.environment.strip().lower() not in {"development", "dev", "local"}:
+ return
+ if not getattr(settings, "auto_seed_sample_data", False):
+ return
+ mode = getattr(settings, "service_mode", "all").strip().lower()
+ if mode not in {"all", "policy-store"}:
+ return
+ db = create_session_factory(settings.database_url)()
+ try:
+ seed_demo_data(db)
+ finally:
+ db.close()
+
+ _start_policy_subscriber(app)
+ return app
+
+
+def _start_policy_subscriber(app: FastAPI) -> None:
+ settings = get_settings()
+ policy_cache = build_policy_cache(get_redis())
+ try:
+ import json
+ import threading
+
+ r = get_redis()
+ if r is None:
+ return
+
+ pubsub = r.pubsub()
+ pubsub.subscribe(settings.policy_events_channel)
+
+ def run() -> None:
+ for msg in pubsub.listen():
+ if msg.get("type") != "message":
+ continue
+ try:
+ payload = json.loads(msg.get("data"))
+ tenant_key = payload.get("tenant_key")
+ if isinstance(tenant_key, str):
+ policy_cache.invalidate(tenant_key)
+ except Exception:
+ continue
+
+ t = threading.Thread(target=run, name="policy-subscriber", daemon=True)
+ t.start()
+ app.state.policy_subscriber = t
+ except Exception:
+ return
+
+
+def _bootstrap_file_backed_model() -> None:
+ settings = get_settings()
+ model_paths = settings.parsed_model_paths()
+ if not model_paths:
+ return
+ try:
+ from keynetra.config.file_loaders import load_authorization_model_from_paths
+
+ schema = load_authorization_model_from_paths(model_paths)
+ if not schema:
+ return
+ compiled = compile_authorization_schema(schema)
+ MODEL_GRAPH_STORE.set(
+ DEFAULT_TENANT_KEY,
+ CompiledPermissionGraph(tenant_key=DEFAULT_TENANT_KEY, model=compiled),
+ )
+ except Exception:
+ return
+
+
+def _bootstrap_file_backed_policies() -> None:
+ settings = get_settings()
+ try:
+ policies = settings.load_policies()
+ engine = KeyNetraEngine(policies)
+ COMPILED_POLICY_STORE.set(DEFAULT_TENANT_KEY, 1, engine._compiled_graph)
+ except Exception:
+ return
+
+
+app = create_app()
diff --git a/keynetra/api/middleware/__init__.py b/keynetra/api/middleware/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/keynetra/api/middleware/admin.py b/keynetra/api/middleware/admin.py
new file mode 100644
index 0000000..7d8e680
--- /dev/null
+++ b/keynetra/api/middleware/admin.py
@@ -0,0 +1,16 @@
+"""Administrative request context middleware."""
+
+from __future__ import annotations
+
+from starlette.middleware.base import BaseHTTPMiddleware
+
+
+class AdminAuthorizationContextMiddleware(BaseHTTPMiddleware):
+ _PREFIXES = ("/policies", "/roles", "/permissions", "/relationships", "/playground", "/audit")
+
+ async def dispatch(self, request, call_next): # type: ignore[override]
+ request.state.requested_tenant_key = "default"
+ request.state.is_management_api = any(
+ request.url.path.startswith(prefix) for prefix in self._PREFIXES
+ )
+ return await call_next(request)
diff --git a/keynetra/api/middleware/errors.py b/keynetra/api/middleware/errors.py
new file mode 100644
index 0000000..1cc3a00
--- /dev/null
+++ b/keynetra/api/middleware/errors.py
@@ -0,0 +1,100 @@
+from __future__ import annotations
+
+import logging
+import traceback
+from typing import Any
+
+from fastapi import FastAPI, Request
+from fastapi.exceptions import RequestValidationError
+from fastapi.responses import JSONResponse
+from starlette.exceptions import HTTPException as StarletteHTTPException
+
+from keynetra.api.errors import ApiError, ApiErrorCode
+from keynetra.config.settings import Settings
+from keynetra.infrastructure.logging import log_event
+from keynetra.infrastructure.metrics import record_api_error
+
+
+def _request_id(request: Request) -> str | None:
+ return getattr(getattr(request, "state", None), "request_id", None)
+
+
+def register_error_handlers(app: FastAPI, settings: Settings) -> None:
+ logger = logging.getLogger("keynetra.errors")
+
+ @app.exception_handler(ApiError)
+ async def api_exception_handler(request: Request, exc: ApiError) -> JSONResponse:
+ record_api_error(code=str(exc.code))
+ log_event(
+ logger,
+ event="api_error",
+ code=str(exc.code),
+ message=exc.message,
+ request_id=_request_id(request),
+ tenant_id="default",
+ )
+ payload: dict[str, Any] = {
+ "data": None,
+ "error": {"code": exc.code, "message": exc.message, "details": exc.details},
+ }
+ return JSONResponse(status_code=exc.status_code, content=payload)
+
+ @app.exception_handler(StarletteHTTPException)
+ async def http_exception_handler(request: Request, exc: StarletteHTTPException) -> JSONResponse:
+ code_map = {
+ 400: ApiErrorCode.BAD_REQUEST,
+ 401: ApiErrorCode.UNAUTHORIZED,
+ 403: ApiErrorCode.FORBIDDEN,
+ 404: ApiErrorCode.NOT_FOUND,
+ 409: ApiErrorCode.CONFLICT,
+ 429: ApiErrorCode.TOO_MANY_REQUESTS,
+ }
+ payload: dict[str, Any] = {
+ "data": None,
+ "error": {
+ "code": code_map.get(exc.status_code, ApiErrorCode.BAD_REQUEST),
+ "message": str(exc.detail),
+ "details": None,
+ },
+ }
+ record_api_error(code=str(payload["error"]["code"]))
+ return JSONResponse(status_code=exc.status_code, content=payload)
+
+ @app.exception_handler(RequestValidationError)
+ async def validation_exception_handler(
+ request: Request, exc: RequestValidationError
+ ) -> JSONResponse:
+ record_api_error(code=str(ApiErrorCode.VALIDATION_ERROR))
+ payload: dict[str, Any] = {
+ "data": None,
+ "error": {
+ "code": ApiErrorCode.VALIDATION_ERROR,
+ "message": "invalid request",
+ "details": exc.errors(),
+ },
+ }
+ return JSONResponse(status_code=422, content=payload)
+
+ @app.exception_handler(Exception)
+ async def unhandled_exception_handler(request: Request, exc: Exception) -> JSONResponse:
+ rid = _request_id(request)
+ record_api_error(code=str(ApiErrorCode.INTERNAL_ERROR))
+ log_event(
+ logger,
+ event="unhandled_exception",
+ request_id=rid,
+ tenant_id="default",
+ error=repr(exc),
+ traceback="".join(traceback.format_exception(type(exc), exc, exc.__traceback__)),
+ )
+ payload: dict[str, Any] = {
+ "data": None,
+ "error": {
+ "code": ApiErrorCode.INTERNAL_ERROR,
+ "message": "internal server error",
+ "details": None,
+ },
+ }
+ if settings.debug:
+ payload["error"]["details"] = repr(exc)
+ return JSONResponse(status_code=500, content=payload)
diff --git a/keynetra/api/middleware/idempotency.py b/keynetra/api/middleware/idempotency.py
new file mode 100644
index 0000000..8421057
--- /dev/null
+++ b/keynetra/api/middleware/idempotency.py
@@ -0,0 +1,126 @@
+"""API-layer idempotency handling for targeted write endpoints."""
+
+from __future__ import annotations
+
+import hashlib
+from typing import Callable
+
+from starlette.middleware.base import BaseHTTPMiddleware
+from starlette.requests import Request
+from starlette.responses import JSONResponse, Response
+
+from keynetra.api.responses import request_id_from_state
+from keynetra.config.settings import Settings
+from keynetra.infrastructure.repositories.idempotency import SqlIdempotencyRepository
+from keynetra.infrastructure.storage.session import create_session_factory, initialize_database
+
+
+class IdempotencyMiddleware(BaseHTTPMiddleware):
+ """Replays the first completed response for duplicate write requests."""
+
+ _target_paths = {
+ ("POST", "/policies"),
+ ("POST", "/policies/dsl"),
+ ("POST", "/relationships"),
+ }
+
+ def __init__(self, app, settings: Settings) -> None: # type: ignore[override]
+ super().__init__(app)
+ initialize_database(settings.database_url)
+ self._session_factory = create_session_factory(settings.database_url)
+
+ async def dispatch(
+ self, request: Request, call_next: Callable[[Request], Response]
+ ) -> Response:
+ if (request.method.upper(), request.url.path) not in self._target_paths:
+ return await call_next(request)
+
+ idempotency_key = request.headers.get("Idempotency-Key")
+ if not idempotency_key:
+ return await call_next(request)
+
+ body = await request.body()
+ scope = f"{request.method.upper()}:{request.url.path}"
+ request_hash = hashlib.sha256(b"\n".join([scope.encode("utf-8"), body])).hexdigest()
+
+ db = self._session_factory()
+ try:
+ repository = SqlIdempotencyRepository(db)
+ start = repository.start(
+ scope=scope, idempotency_key=idempotency_key, request_hash=request_hash
+ )
+ if start.outcome == "mismatch":
+ return JSONResponse(
+ status_code=409,
+ content={
+ "data": None,
+ "error": {
+ "code": "conflict",
+ "message": "idempotency key reused with a different request",
+ "details": {"idempotency_key": idempotency_key},
+ },
+ },
+ )
+ if start.outcome == "pending":
+ return JSONResponse(
+ status_code=409,
+ content={
+ "data": None,
+ "error": {
+ "code": "conflict",
+ "message": "request with this idempotency key is still in progress",
+ "details": {"idempotency_key": idempotency_key},
+ },
+ },
+ )
+ if start.outcome == "replay":
+ response = Response(
+ content=start.response_body or "",
+ status_code=start.status_code or 200,
+ media_type=start.content_type or "application/json",
+ )
+ response.headers["X-Idempotent-Replayed"] = "true"
+ response.headers["X-Idempotency-Key"] = idempotency_key
+ return response
+ finally:
+ db.close()
+
+ response = await call_next(request)
+ response_body = await _collect_body(response)
+ replayable = _clone_response(response=response, body=response_body)
+ if start.record_id is not None and response.status_code < 500:
+ db = self._session_factory()
+ try:
+ SqlIdempotencyRepository(db).complete(
+ record_id=start.record_id,
+ status_code=response.status_code,
+ response_body=response_body.decode("utf-8"),
+ content_type=response.media_type,
+ )
+ finally:
+ db.close()
+ replayable.headers["X-Idempotency-Key"] = idempotency_key
+ replayable.headers["X-Request-Id"] = request_id_from_state(
+ request.state
+ ) or replayable.headers.get("X-Request-Id", "")
+ return replayable
+
+
+async def _collect_body(response: Response) -> bytes:
+ if hasattr(response, "body") and response.body is not None:
+ return bytes(response.body)
+ body = b""
+ async for chunk in response.body_iterator:
+ body += chunk
+ return body
+
+
+def _clone_response(*, response: Response, body: bytes) -> Response:
+ headers = dict(response.headers)
+ return Response(
+ content=body,
+ status_code=response.status_code,
+ headers=headers,
+ media_type=response.media_type,
+ background=response.background,
+ )
diff --git a/keynetra/api/middleware/logging.py b/keynetra/api/middleware/logging.py
new file mode 100644
index 0000000..a5f35be
--- /dev/null
+++ b/keynetra/api/middleware/logging.py
@@ -0,0 +1,36 @@
+from __future__ import annotations
+
+import logging
+import time
+from typing import Callable
+
+from starlette.middleware.base import BaseHTTPMiddleware
+from starlette.requests import Request
+from starlette.responses import Response
+
+from keynetra.infrastructure.logging import log_event
+
+
+class RequestLoggingMiddleware(BaseHTTPMiddleware):
+ """Emit one structured log line per request."""
+
+ def __init__(self, app) -> None: # type: ignore[override]
+ super().__init__(app)
+ self._logger = logging.getLogger("keynetra.request")
+
+ async def dispatch(
+ self, request: Request, call_next: Callable[[Request], Response]
+ ) -> Response:
+ start = time.perf_counter()
+ response = await call_next(request)
+ log_event(
+ self._logger,
+ event="request_completed",
+ method=request.method,
+ path=request.url.path,
+ status_code=response.status_code,
+ duration_ms=round((time.perf_counter() - start) * 1000, 3),
+ request_id=getattr(request.state, "request_id", None),
+ tenant_id="default",
+ )
+ return response
diff --git a/keynetra/api/middleware/request_id.py b/keynetra/api/middleware/request_id.py
new file mode 100644
index 0000000..3fa6267
--- /dev/null
+++ b/keynetra/api/middleware/request_id.py
@@ -0,0 +1,28 @@
+from __future__ import annotations
+
+import secrets
+from typing import Callable
+
+from starlette.middleware.base import BaseHTTPMiddleware
+from starlette.requests import Request
+from starlette.responses import Response
+
+
+class RequestIdMiddleware(BaseHTTPMiddleware):
+ """
+ Ensures every request has a request id (for tracing/log correlation).
+ - Accepts inbound `X-Request-Id` if present
+ - Otherwise generates a short, URL-safe id
+ - Echoes it back on responses as `X-Request-Id`
+ """
+
+ header_name = "X-Request-Id"
+
+ async def dispatch(
+ self, request: Request, call_next: Callable[[Request], Response]
+ ) -> Response:
+ request_id = request.headers.get(self.header_name) or secrets.token_urlsafe(10)
+ request.state.request_id = request_id
+ response = await call_next(request)
+ response.headers[self.header_name] = request_id
+ return response
diff --git a/keynetra/api/middleware/versioning.py b/keynetra/api/middleware/versioning.py
new file mode 100644
index 0000000..319e59e
--- /dev/null
+++ b/keynetra/api/middleware/versioning.py
@@ -0,0 +1,57 @@
+"""API version negotiation middleware."""
+
+from __future__ import annotations
+
+import logging
+from typing import Callable
+
+from starlette.middleware.base import BaseHTTPMiddleware
+from starlette.requests import Request
+from starlette.responses import JSONResponse, Response
+
+from keynetra.infrastructure.logging import log_event
+
+
+class ApiVersionMiddleware(BaseHTTPMiddleware):
+ """Resolves request API version from `X-API-Version`."""
+
+ header_name = "X-API-Version"
+ latest_version = "v1"
+ supported_versions = {"v1"}
+
+ async def dispatch(
+ self, request: Request, call_next: Callable[[Request], Response]
+ ) -> Response:
+ requested_version = (
+ request.headers.get(self.header_name, self.latest_version).strip()
+ or self.latest_version
+ )
+ if requested_version not in self.supported_versions:
+ return JSONResponse(
+ status_code=400,
+ content={
+ "data": None,
+ "error": {
+ "code": "bad_request",
+ "message": "unsupported api version",
+ "details": {
+ "requested_version": requested_version,
+ "supported_versions": sorted(self.supported_versions),
+ },
+ },
+ },
+ )
+
+ request.state.api_version = requested_version
+ log_event(
+ logging.getLogger("keynetra.api_version"),
+ event="api_version_used",
+ api_version=requested_version,
+ path=request.url.path,
+ method=request.method,
+ request_id=getattr(request.state, "request_id", None),
+ tenant_id="default",
+ )
+ response = await call_next(request)
+ response.headers[self.header_name] = requested_version
+ return response
diff --git a/keynetra/api/pagination.py b/keynetra/api/pagination.py
new file mode 100644
index 0000000..7b757d0
--- /dev/null
+++ b/keynetra/api/pagination.py
@@ -0,0 +1,41 @@
+"""Cursor pagination helpers for stable API list endpoints."""
+
+from __future__ import annotations
+
+import base64
+import json
+from typing import Any
+
+from keynetra.api.errors import ApiError, ApiErrorCode
+
+
+def encode_cursor(payload: dict[str, Any]) -> str:
+ """Encode an opaque cursor payload."""
+
+ raw = json.dumps(payload, separators=(",", ":"), sort_keys=True).encode("utf-8")
+ return base64.urlsafe_b64encode(raw).decode("ascii")
+
+
+def decode_cursor(cursor: str | None) -> dict[str, Any] | None:
+ """Decode an opaque cursor payload or raise a validation error."""
+
+ if not cursor:
+ return None
+ try:
+ raw = base64.urlsafe_b64decode(cursor.encode("ascii"))
+ decoded = json.loads(raw.decode("utf-8"))
+ except Exception as exc:
+ raise ApiError(
+ status_code=422,
+ code=ApiErrorCode.VALIDATION_ERROR,
+ message="invalid cursor",
+ details={"cursor": cursor},
+ ) from exc
+ if not isinstance(decoded, dict):
+ raise ApiError(
+ status_code=422,
+ code=ApiErrorCode.VALIDATION_ERROR,
+ message="invalid cursor",
+ details={"cursor": cursor},
+ )
+ return decoded
diff --git a/keynetra/api/responses.py b/keynetra/api/responses.py
new file mode 100644
index 0000000..afad988
--- /dev/null
+++ b/keynetra/api/responses.py
@@ -0,0 +1,28 @@
+"""Response helpers for standardized API envelopes."""
+
+from __future__ import annotations
+
+from typing import Any
+
+from keynetra.domain.schemas.api import MetaBody
+
+
+def success_response(
+ *,
+ data: Any,
+ request_id: str | None = None,
+ limit: int | None = None,
+ next_cursor: str | None = None,
+ meta: dict[str, Any] | None = None,
+) -> dict[str, Any]:
+ return {
+ "data": data,
+ "meta": MetaBody(
+ request_id=request_id, limit=limit, next_cursor=next_cursor, extra=meta or {}
+ ).model_dump(),
+ "error": None,
+ }
+
+
+def request_id_from_state(state: Any) -> str | None:
+ return getattr(state, "request_id", None)
diff --git a/keynetra/api/router.py b/keynetra/api/router.py
new file mode 100644
index 0000000..e2cc540
--- /dev/null
+++ b/keynetra/api/router.py
@@ -0,0 +1,6 @@
+from fastapi import APIRouter
+
+from keynetra.api.service_modes import router_for_mode
+
+# Backward-compatible full router alias; canonical routing lives in service_modes.py.
+api_router: APIRouter = router_for_mode("all")
diff --git a/keynetra/api/routes/__init__.py b/keynetra/api/routes/__init__.py
new file mode 100644
index 0000000..fb0a2f8
--- /dev/null
+++ b/keynetra/api/routes/__init__.py
@@ -0,0 +1 @@
+"""API route modules."""
diff --git a/keynetra/api/routes/access.py b/keynetra/api/routes/access.py
new file mode 100644
index 0000000..216e603
--- /dev/null
+++ b/keynetra/api/routes/access.py
@@ -0,0 +1,229 @@
+"""HTTP transport for authorization checks.
+
+The API layer validates transport concerns and delegates orchestration to the
+service layer. It does not perform policy evaluation or persistence logic.
+"""
+
+from __future__ import annotations
+
+import logging
+
+from fastapi import APIRouter, Depends, Request, status
+from sqlalchemy.exc import SQLAlchemyError
+from sqlalchemy.orm import Session
+
+from keynetra.api.errors import ApiError, ApiErrorCode
+from keynetra.api.responses import request_id_from_state, success_response
+from keynetra.config.redis_client import get_redis
+from keynetra.config.security import get_principal
+from keynetra.config.settings import Settings, get_settings
+from keynetra.config.tenancy import DEFAULT_TENANT_KEY
+from keynetra.domain.schemas.access import (
+ AccessDecisionResponse,
+ AccessRequest,
+ BatchAccessRequest,
+ BatchAccessResponse,
+ BatchAccessResult,
+ SimulationResponse,
+)
+from keynetra.domain.schemas.api import SuccessResponse
+from keynetra.infrastructure.cache.access_index_cache import build_access_index_cache
+from keynetra.infrastructure.cache.acl_cache import build_acl_cache
+from keynetra.infrastructure.cache.decision_cache import build_decision_cache
+from keynetra.infrastructure.cache.policy_cache import build_policy_cache
+from keynetra.infrastructure.cache.relationship_cache import build_relationship_cache
+from keynetra.infrastructure.repositories.acl import SqlACLRepository
+from keynetra.infrastructure.repositories.audit import SqlAuditRepository
+from keynetra.infrastructure.repositories.auth_models import SqlAuthModelRepository
+from keynetra.infrastructure.repositories.policies import SqlPolicyRepository
+from keynetra.infrastructure.repositories.relationships import SqlRelationshipRepository
+from keynetra.infrastructure.repositories.tenants import SqlTenantRepository
+from keynetra.infrastructure.repositories.users import SqlUserRepository
+from keynetra.infrastructure.storage.session import get_db
+from keynetra.services.attribute_validation import AttributeValidationError
+from keynetra.services.authorization import AuthorizationService
+
+router = APIRouter()
+logger = logging.getLogger("keynetra.access")
+
+
+def get_authorization_service(
+ settings: Settings = Depends(get_settings),
+ db: Session = Depends(get_db),
+) -> AuthorizationService:
+ """Create the request-scoped authorization service."""
+
+ redis_client = get_redis()
+ return AuthorizationService(
+ settings=settings,
+ tenants=SqlTenantRepository(db),
+ policies=SqlPolicyRepository(db),
+ users=SqlUserRepository(db),
+ relationships=SqlRelationshipRepository(db),
+ audit=SqlAuditRepository(db),
+ policy_cache=build_policy_cache(redis_client),
+ relationship_cache=build_relationship_cache(redis_client),
+ decision_cache=build_decision_cache(redis_client),
+ acl_repository=SqlACLRepository(db),
+ acl_cache=build_acl_cache(redis_client),
+ access_index_cache=build_access_index_cache(redis_client),
+ auth_model_repository=SqlAuthModelRepository(db),
+ )
+
+
+@router.post(
+ "/check-access",
+ response_model=SuccessResponse[AccessDecisionResponse],
+ dependencies=[Depends(get_principal)],
+)
+def check_access(
+ payload: AccessRequest,
+ request: Request,
+ service: AuthorizationService = Depends(get_authorization_service),
+ principal: dict[str, str] = Depends(get_principal),
+) -> dict[str, object]:
+ try:
+ result = service.authorize(
+ tenant_key=DEFAULT_TENANT_KEY,
+ principal=principal,
+ user=payload.user,
+ action=payload.action,
+ resource=payload.resource,
+ context=payload.context,
+ consistency=payload.consistency,
+ revision=payload.revision,
+ )
+ except AttributeValidationError as error:
+ raise ApiError(
+ status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
+ code=ApiErrorCode.VALIDATION_ERROR,
+ message=str(error),
+ ) from error
+ except SQLAlchemyError as error:
+ raise ApiError(
+ status_code=500, code=ApiErrorCode.DATABASE_ERROR, message="db error"
+ ) from error
+
+ logger.info(
+ "decision user=%s action=%s result=%s cached=%s principal=%s",
+ payload.user.get("id"),
+ payload.action,
+ result.decision.decision.upper(),
+ result.cached,
+ principal.get("type"),
+ )
+ return success_response(
+ data=AccessDecisionResponse(
+ allowed=result.decision.allowed,
+ decision=result.decision.decision,
+ matched_policies=list(result.decision.matched_policies),
+ reason=result.decision.reason,
+ policy_id=result.decision.policy_id,
+ explain_trace=[step.to_dict() for step in result.decision.explain_trace],
+ revision=result.revision,
+ ).model_dump(),
+ request_id=request_id_from_state(request.state),
+ )
+
+
+@router.post(
+ "/simulate",
+ response_model=SuccessResponse[SimulationResponse],
+ dependencies=[Depends(get_principal)],
+)
+def simulate(
+ payload: AccessRequest,
+ request: Request,
+ service: AuthorizationService = Depends(get_authorization_service),
+ principal: dict[str, str] = Depends(get_principal),
+) -> dict[str, object]:
+ try:
+ decision = service.simulate(
+ tenant_key=DEFAULT_TENANT_KEY,
+ principal=principal,
+ user=payload.user,
+ action=payload.action,
+ resource=payload.resource,
+ context=payload.context,
+ )
+ except AttributeValidationError as error:
+ raise ApiError(
+ status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
+ code=ApiErrorCode.VALIDATION_ERROR,
+ message=str(error),
+ ) from error
+ except SQLAlchemyError as error:
+ raise ApiError(
+ status_code=500, code=ApiErrorCode.DATABASE_ERROR, message="db error"
+ ) from error
+
+ logger.info(
+ "simulate user=%s action=%s result=%s principal=%s",
+ payload.user.get("id"),
+ payload.action,
+ decision.decision.upper(),
+ principal.get("type"),
+ )
+ return success_response(
+ data=SimulationResponse(
+ decision=decision.decision,
+ matched_policies=list(decision.matched_policies),
+ reason=decision.reason,
+ policy_id=decision.policy_id,
+ explain_trace=[step.to_dict() for step in decision.explain_trace],
+ failed_conditions=list(decision.failed_conditions),
+ revision=service.get_revision(tenant_key=DEFAULT_TENANT_KEY),
+ ).model_dump(),
+ request_id=request_id_from_state(request.state),
+ )
+
+
+@router.post(
+ "/check-access-batch",
+ response_model=SuccessResponse[BatchAccessResponse],
+ dependencies=[Depends(get_principal)],
+)
+def check_access_batch(
+ payload: BatchAccessRequest,
+ request: Request,
+ service: AuthorizationService = Depends(get_authorization_service),
+ principal: dict[str, str] = Depends(get_principal),
+) -> dict[str, object]:
+ try:
+ results = service.authorize_batch(
+ tenant_key=DEFAULT_TENANT_KEY,
+ principal=principal,
+ user=payload.user,
+ items=[item.model_dump() for item in payload.items],
+ consistency=payload.consistency,
+ revision=payload.revision,
+ )
+ except AttributeValidationError as error:
+ raise ApiError(
+ status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
+ code=ApiErrorCode.VALIDATION_ERROR,
+ message=str(error),
+ ) from error
+ except SQLAlchemyError as error:
+ raise ApiError(
+ status_code=500, code=ApiErrorCode.DATABASE_ERROR, message="db error"
+ ) from error
+
+ logger.info(
+ "batch user=%s items=%s principal=%s",
+ payload.user.get("id"),
+ len(payload.items),
+ principal.get("type"),
+ )
+ return success_response(
+ data=BatchAccessResponse(
+ results=[
+ BatchAccessResult(
+ action=item.action, allowed=result.decision.allowed, revision=result.revision
+ ).model_dump()
+ for item, result in zip(payload.items, results, strict=False)
+ ],
+ revision=results[0].revision if results else None,
+ ).model_dump(),
+ request_id=request_id_from_state(request.state),
+ )
diff --git a/keynetra/api/routes/acl.py b/keynetra/api/routes/acl.py
new file mode 100644
index 0000000..6cdaf63
--- /dev/null
+++ b/keynetra/api/routes/acl.py
@@ -0,0 +1,159 @@
+from __future__ import annotations
+
+from fastapi import APIRouter, Depends, Request, status
+from sqlalchemy.exc import SQLAlchemyError
+from sqlalchemy.orm import Session
+
+from keynetra.api.errors import ApiError, ApiErrorCode
+from keynetra.api.responses import request_id_from_state, success_response
+from keynetra.config.admin_auth import AdminAccess, require_management_role
+from keynetra.config.redis_client import get_redis
+from keynetra.config.security import get_principal
+from keynetra.domain.schemas.api import SuccessResponse
+from keynetra.domain.schemas.management import ACLCreate, ACLOut
+from keynetra.infrastructure.cache.access_index_cache import build_access_index_cache
+from keynetra.infrastructure.cache.acl_cache import build_acl_cache
+from keynetra.infrastructure.cache.decision_cache import build_decision_cache
+from keynetra.infrastructure.repositories.acl import SqlACLRepository
+from keynetra.infrastructure.repositories.relationships import SqlRelationshipRepository
+from keynetra.infrastructure.repositories.tenants import SqlTenantRepository
+from keynetra.infrastructure.storage.session import get_db
+from keynetra.services.access_indexer import AccessIndexer
+from keynetra.services.revisions import RevisionService
+
+router = APIRouter(prefix="/acl", dependencies=[Depends(get_principal)])
+
+
+def get_acl_dependencies(
+ db: Session = Depends(get_db),
+) -> tuple[SqlTenantRepository, SqlACLRepository, AccessIndexer]:
+ redis_client = get_redis()
+ tenant_repo = SqlTenantRepository(db)
+ acl_repo = SqlACLRepository(db)
+ indexer = AccessIndexer(
+ acl_repository=acl_repo,
+ acl_cache=build_acl_cache(redis_client),
+ access_index_cache=build_access_index_cache(redis_client),
+ relationships=SqlRelationshipRepository(db),
+ )
+ return tenant_repo, acl_repo, indexer
+
+
+@router.post("", response_model=SuccessResponse[ACLOut], status_code=status.HTTP_201_CREATED)
+def create_acl_entry(
+ payload: ACLCreate,
+ request: Request,
+ deps: tuple[SqlTenantRepository, SqlACLRepository, AccessIndexer] = Depends(
+ get_acl_dependencies
+ ),
+ access: AdminAccess = Depends(require_management_role("developer")),
+) -> dict[str, object]:
+ tenant_repo, acl_repo, indexer = deps
+ tenant = tenant_repo.get_or_create(access.tenant_key)
+ if payload.effect not in {"allow", "deny"}:
+ raise ApiError(
+ status_code=422,
+ code=ApiErrorCode.VALIDATION_ERROR,
+ message="effect must be allow or deny",
+ )
+ try:
+ acl_id = acl_repo.create_acl_entry(
+ tenant_id=tenant.id,
+ subject_type=payload.subject_type,
+ subject_id=payload.subject_id,
+ resource_type=payload.resource_type,
+ resource_id=payload.resource_id,
+ action=payload.action,
+ effect=payload.effect,
+ )
+ created = acl_repo.get_acl_entry(tenant_id=tenant.id, acl_id=acl_id)
+ indexer.invalidate_resource(
+ tenant_id=tenant.id,
+ resource_type=payload.resource_type,
+ resource_id=payload.resource_id,
+ )
+ build_decision_cache(get_redis()).bump_namespace(tenant.tenant_key)
+ RevisionService(tenant_repo).bump_revision(tenant_key=tenant.tenant_key)
+ except SQLAlchemyError as error:
+ raise ApiError(
+ status_code=500, code=ApiErrorCode.DATABASE_ERROR, message="db error"
+ ) from error
+ return success_response(
+ data=ACLOut(
+ id=acl_id,
+ tenant_id=tenant.id,
+ created_at=None if created is None else created.created_at,
+ **payload.model_dump(),
+ ).model_dump(),
+ request_id=request_id_from_state(request.state),
+ )
+
+
+@router.get("/{resource_type}/{resource_id}", response_model=SuccessResponse[list[ACLOut]])
+def list_acl_entries(
+ resource_type: str,
+ resource_id: str,
+ request: Request,
+ deps: tuple[SqlTenantRepository, SqlACLRepository, AccessIndexer] = Depends(
+ get_acl_dependencies
+ ),
+ access: AdminAccess = Depends(require_management_role("viewer")),
+) -> dict[str, object]:
+ tenant_repo, acl_repo, _ = deps
+ tenant = tenant_repo.get_or_create(access.tenant_key)
+ try:
+ rows = acl_repo.list_resource_acl(
+ tenant_id=tenant.id, resource_type=resource_type, resource_id=resource_id
+ )
+ except SQLAlchemyError as error:
+ raise ApiError(
+ status_code=500, code=ApiErrorCode.DATABASE_ERROR, message="db error"
+ ) from error
+ return success_response(
+ data=[
+ ACLOut(
+ id=row.id,
+ tenant_id=row.tenant_id,
+ subject_type=row.subject_type,
+ subject_id=row.subject_id,
+ resource_type=row.resource_type,
+ resource_id=row.resource_id,
+ action=row.action,
+ effect=row.effect,
+ created_at=row.created_at,
+ ).model_dump()
+ for row in rows
+ ],
+ request_id=request_id_from_state(request.state),
+ )
+
+
+@router.delete("/{acl_id}", response_model=SuccessResponse[dict[str, int]])
+def delete_acl_entry(
+ acl_id: int,
+ request: Request,
+ deps: tuple[SqlTenantRepository, SqlACLRepository, AccessIndexer] = Depends(
+ get_acl_dependencies
+ ),
+ access: AdminAccess = Depends(require_management_role("admin")),
+) -> dict[str, object]:
+ tenant_repo, acl_repo, indexer = deps
+ tenant = tenant_repo.get_or_create(access.tenant_key)
+ try:
+ target = acl_repo.get_acl_entry(tenant_id=tenant.id, acl_id=acl_id)
+ acl_repo.delete_acl_entry(tenant_id=tenant.id, acl_id=acl_id)
+ if target is not None:
+ indexer.invalidate_resource(
+ tenant_id=tenant.id,
+ resource_type=target.resource_type,
+ resource_id=target.resource_id,
+ )
+ build_decision_cache(get_redis()).bump_namespace(tenant.tenant_key)
+ RevisionService(tenant_repo).bump_revision(tenant_key=tenant.tenant_key)
+ except SQLAlchemyError as error:
+ raise ApiError(
+ status_code=500, code=ApiErrorCode.DATABASE_ERROR, message="db error"
+ ) from error
+ return success_response(
+ data={"acl_id": acl_id}, request_id=request_id_from_state(request.state)
+ )
diff --git a/keynetra/api/routes/admin_auth.py b/keynetra/api/routes/admin_auth.py
new file mode 100644
index 0000000..5ff6c2c
--- /dev/null
+++ b/keynetra/api/routes/admin_auth.py
@@ -0,0 +1,63 @@
+from __future__ import annotations
+
+import hmac
+from datetime import UTC, datetime, timedelta
+
+from fastapi import APIRouter, Depends, Request, status
+from jose import jwt
+
+from keynetra.api.errors import ApiError, ApiErrorCode
+from keynetra.api.responses import request_id_from_state, success_response
+from keynetra.config.settings import Settings, get_settings
+from keynetra.config.tenancy import DEFAULT_TENANT_KEY
+from keynetra.domain.schemas.api import SuccessResponse
+from keynetra.domain.schemas.management import AdminLoginRequest, AdminLoginResponse
+
+router = APIRouter(prefix="/admin")
+
+
+@router.post("/login", response_model=SuccessResponse[AdminLoginResponse], tags=["auth"])
+def admin_login(
+ payload: AdminLoginRequest,
+ request: Request,
+ settings: Settings = Depends(get_settings),
+) -> dict[str, object]:
+ username = settings.admin_username
+ password = settings.admin_password
+
+ if not username or not password:
+ raise ApiError(
+ status_code=status.HTTP_403_FORBIDDEN,
+ code=ApiErrorCode.FORBIDDEN,
+ message="admin login is disabled",
+ )
+
+ valid_username = hmac.compare_digest(payload.username, username)
+ valid_password = hmac.compare_digest(payload.password, password)
+ if not (valid_username and valid_password):
+ raise ApiError(
+ status_code=status.HTTP_401_UNAUTHORIZED,
+ code=ApiErrorCode.UNAUTHORIZED,
+ message="invalid admin credentials",
+ )
+
+ expires_at = datetime.now(UTC) + timedelta(minutes=max(1, settings.admin_token_expiry_minutes))
+ token = jwt.encode(
+ {
+ "sub": payload.username,
+ "role": "admin",
+ "admin_role": "admin",
+ "tenant_roles": {DEFAULT_TENANT_KEY: "admin"},
+ "exp": int(expires_at.timestamp()),
+ },
+ settings.jwt_secret,
+ algorithm=settings.jwt_algorithm,
+ )
+ return success_response(
+ data=AdminLoginResponse(
+ access_token=token,
+ expires_in=max(1, settings.admin_token_expiry_minutes) * 60,
+ tenant_key=DEFAULT_TENANT_KEY,
+ ).model_dump(),
+ request_id=request_id_from_state(request.state),
+ )
diff --git a/keynetra/api/routes/audit.py b/keynetra/api/routes/audit.py
new file mode 100644
index 0000000..ef201fb
--- /dev/null
+++ b/keynetra/api/routes/audit.py
@@ -0,0 +1,62 @@
+from __future__ import annotations
+
+from datetime import datetime
+
+from fastapi import APIRouter, Depends, Request
+from sqlalchemy.exc import SQLAlchemyError
+from sqlalchemy.orm import Session
+
+from keynetra.api.errors import ApiError, ApiErrorCode
+from keynetra.api.pagination import decode_cursor
+from keynetra.api.responses import request_id_from_state, success_response
+from keynetra.config.admin_auth import AdminAccess, require_management_role
+from keynetra.domain.schemas.api import SuccessResponse
+from keynetra.domain.schemas.management import AuditRecordOut
+from keynetra.infrastructure.repositories.audit import SqlAuditRepository
+from keynetra.infrastructure.repositories.tenants import SqlTenantRepository
+from keynetra.infrastructure.storage.session import get_db
+
+router = APIRouter(prefix="/audit")
+
+
+@router.get("", response_model=SuccessResponse[list[AuditRecordOut]])
+def list_audit_logs(
+ request: Request,
+ db: Session = Depends(get_db),
+ access: AdminAccess = Depends(require_management_role("viewer")),
+ limit: int = 50,
+ cursor: str | None = None,
+ user_id: str | None = None,
+ resource_id: str | None = None,
+ decision: str | None = None,
+ start_time: datetime | None = None,
+ end_time: datetime | None = None,
+) -> dict[str, object]:
+ if limit < 1 or limit > 100:
+ raise ApiError(
+ status_code=422,
+ code=ApiErrorCode.VALIDATION_ERROR,
+ message="limit must be between 1 and 100",
+ )
+ tenant = SqlTenantRepository(db).get_or_create(access.tenant_key)
+ try:
+ items, next_cursor = SqlAuditRepository(db).list_page(
+ tenant_id=tenant.id,
+ limit=limit,
+ cursor=decode_cursor(cursor),
+ user_id=user_id,
+ resource_id=resource_id,
+ decision=decision,
+ start_time=start_time,
+ end_time=end_time,
+ )
+ except SQLAlchemyError as error:
+ raise ApiError(
+ status_code=500, code=ApiErrorCode.DATABASE_ERROR, message="db error"
+ ) from error
+ return success_response(
+ data=[AuditRecordOut(**item.__dict__).model_dump(mode="json") for item in items],
+ request_id=request_id_from_state(request.state),
+ limit=limit,
+ next_cursor=next_cursor,
+ )
diff --git a/keynetra/api/routes/auth_model.py b/keynetra/api/routes/auth_model.py
new file mode 100644
index 0000000..f03526b
--- /dev/null
+++ b/keynetra/api/routes/auth_model.py
@@ -0,0 +1,97 @@
+from __future__ import annotations
+
+from fastapi import APIRouter, Depends, Request, status
+from sqlalchemy.exc import SQLAlchemyError
+from sqlalchemy.orm import Session
+
+from keynetra.api.errors import ApiError, ApiErrorCode
+from keynetra.api.responses import request_id_from_state, success_response
+from keynetra.config.admin_auth import AdminAccess, require_management_role
+from keynetra.config.security import get_principal
+from keynetra.domain.schemas.api import SuccessResponse
+from keynetra.domain.schemas.modeling import AuthModelCreate, AuthModelOut
+from keynetra.engine.model_graph.permission_graph import MODEL_GRAPH_STORE, CompiledPermissionGraph
+from keynetra.infrastructure.repositories.auth_models import SqlAuthModelRepository
+from keynetra.infrastructure.repositories.tenants import SqlTenantRepository
+from keynetra.infrastructure.storage.session import get_db
+from keynetra.modeling import (
+ compile_authorization_schema,
+ parse_authorization_schema,
+ validate_authorization_schema,
+)
+from keynetra.services.revisions import RevisionService
+
+router = APIRouter(prefix="/auth-model", dependencies=[Depends(get_principal)])
+
+
+@router.post("", response_model=SuccessResponse[AuthModelOut], status_code=status.HTTP_201_CREATED)
+def create_auth_model(
+ payload: AuthModelCreate,
+ request: Request,
+ db: Session = Depends(get_db),
+ access: AdminAccess = Depends(require_management_role("developer")),
+) -> dict[str, object]:
+ tenant_repo = SqlTenantRepository(db)
+ repo = SqlAuthModelRepository(db)
+ tenant = tenant_repo.get_or_create(access.tenant_key)
+ try:
+ schema = parse_authorization_schema(payload.schema_text)
+ validate_authorization_schema(schema)
+ compiled = compile_authorization_schema(schema)
+ record = repo.upsert_model(
+ tenant_id=tenant.id,
+ schema_text=payload.schema_text,
+ schema_json={
+ "version": schema.version,
+ "types": list(schema.types),
+ "relations": {name: list(subjects) for name, subjects in schema.relations.items()},
+ "permissions": {name: name for name in schema.permissions},
+ },
+ compiled_json=compiled.to_dict(),
+ )
+ MODEL_GRAPH_STORE.set(
+ access.tenant_key, CompiledPermissionGraph(tenant_key=access.tenant_key, model=compiled)
+ )
+ RevisionService(tenant_repo).bump_revision(tenant_key=access.tenant_key)
+ except ValueError as error:
+ raise ApiError(
+ status_code=422, code=ApiErrorCode.VALIDATION_ERROR, message=str(error)
+ ) from error
+ except SQLAlchemyError as error:
+ raise ApiError(
+ status_code=500, code=ApiErrorCode.DATABASE_ERROR, message="db error"
+ ) from error
+ return success_response(
+ data=AuthModelOut(
+ id=record.id,
+ tenant_id=record.tenant_id,
+ schema_text=record.schema_text,
+ parsed=record.schema_json,
+ compiled=record.compiled_json,
+ ).model_dump(by_alias=True),
+ request_id=request_id_from_state(request.state),
+ )
+
+
+@router.get("", response_model=SuccessResponse[AuthModelOut])
+def get_auth_model(
+ request: Request,
+ db: Session = Depends(get_db),
+ access: AdminAccess = Depends(require_management_role("viewer")),
+) -> dict[str, object]:
+ tenant_repo = SqlTenantRepository(db)
+ repo = SqlAuthModelRepository(db)
+ tenant = tenant_repo.get_or_create(access.tenant_key)
+ record = repo.get_model(tenant_id=tenant.id)
+ if record is None:
+ raise ApiError(status_code=404, code=ApiErrorCode.NOT_FOUND, message="auth model not found")
+ return success_response(
+ data=AuthModelOut(
+ id=record.id,
+ tenant_id=record.tenant_id,
+ schema_text=record.schema_text,
+ parsed=record.schema_json,
+ compiled=record.compiled_json,
+ ).model_dump(by_alias=True),
+ request_id=request_id_from_state(request.state),
+ )
diff --git a/keynetra/api/routes/dev.py b/keynetra/api/routes/dev.py
new file mode 100644
index 0000000..f7e816d
--- /dev/null
+++ b/keynetra/api/routes/dev.py
@@ -0,0 +1,53 @@
+from __future__ import annotations
+
+from fastapi import APIRouter, Depends, Query, Request
+from sqlalchemy.orm import Session
+
+from keynetra.api.errors import ApiError, ApiErrorCode
+from keynetra.api.responses import request_id_from_state, success_response
+from keynetra.config.sample_data import sample_bootstrap_document
+from keynetra.config.settings import Settings, get_settings
+from keynetra.domain.schemas.api import SuccessResponse
+from keynetra.infrastructure.storage.session import get_db
+from keynetra.services.seeding import seed_demo_data
+
+router = APIRouter(prefix="/dev")
+
+
+def _require_local_dev(settings: Settings) -> None:
+ if settings.environment.strip().lower() not in {"development", "dev", "local"}:
+ raise ApiError(status_code=404, code=ApiErrorCode.NOT_FOUND, message="not found")
+
+
+@router.get("/sample-data", response_model=SuccessResponse[dict[str, object]])
+def get_sample_data(
+ request: Request,
+ settings: Settings = Depends(get_settings),
+) -> dict[str, object]:
+ _require_local_dev(settings)
+ return success_response(
+ data=sample_bootstrap_document(), request_id=request_id_from_state(request.state)
+ )
+
+
+@router.post("/sample-data/seed", response_model=SuccessResponse[dict[str, object]])
+def seed_sample_data(
+ request: Request,
+ db: Session = Depends(get_db),
+ settings: Settings = Depends(get_settings),
+ reset: bool = Query(False, description="Clear the sample dataset before reseeding it."),
+) -> dict[str, object]:
+ _require_local_dev(settings)
+ summary = seed_demo_data(db, reset=reset)
+ return success_response(
+ data={
+ "tenant_key": summary.tenant_key,
+ "created_tenant": summary.created_tenant,
+ "created_user": summary.created_user,
+ "created_role": summary.created_role,
+ "created_permissions": summary.created_permissions,
+ "created_relationships": summary.created_relationships,
+ "created_policies": summary.created_policies,
+ },
+ request_id=request_id_from_state(request.state),
+ )
diff --git a/keynetra/api/routes/health.py b/keynetra/api/routes/health.py
new file mode 100644
index 0000000..a17fa5a
--- /dev/null
+++ b/keynetra/api/routes/health.py
@@ -0,0 +1,72 @@
+from __future__ import annotations
+
+from fastapi import APIRouter, Depends, Request, status
+from fastapi.responses import JSONResponse
+from sqlalchemy import text
+
+from keynetra.api.responses import request_id_from_state, success_response
+from keynetra.config.redis_client import get_redis
+from keynetra.config.settings import Settings, get_settings
+from keynetra.domain.schemas.api import SuccessResponse
+from keynetra.infrastructure.storage.session import create_engine_for_url
+
+router = APIRouter()
+
+
+@router.get("/health", response_model=SuccessResponse[dict[str, str]])
+def health(request: Request) -> dict[str, object]:
+ return success_response(data={"status": "ok"}, request_id=request_id_from_state(request.state))
+
+
+@router.get("/health/live", response_model=SuccessResponse[dict[str, str]])
+def liveness(request: Request) -> dict[str, object]:
+ return success_response(data={"status": "ok"}, request_id=request_id_from_state(request.state))
+
+
+@router.get("/health/ready", response_model=SuccessResponse[dict[str, object]])
+def readiness(request: Request, settings: Settings = Depends(get_settings)) -> JSONResponse:
+ database_status = _check_database(settings)
+ redis_status = _check_redis(settings)
+ healthy = database_status["status"] == "ok" and redis_status["status"] in {
+ "ok",
+ "not_configured",
+ }
+ payload = success_response(
+ data={
+ "status": "ok" if healthy else "degraded",
+ "checks": {
+ "database": database_status,
+ "redis": redis_status,
+ },
+ },
+ request_id=request_id_from_state(request.state),
+ )
+ return JSONResponse(
+ status_code=status.HTTP_200_OK if healthy else status.HTTP_503_SERVICE_UNAVAILABLE,
+ content=payload,
+ )
+
+
+def _check_database(settings: Settings) -> dict[str, str]:
+ try:
+ engine = create_engine_for_url(settings.database_url)
+ with engine.connect() as connection:
+ connection.execute(text("SELECT 1"))
+ return {"status": "ok"}
+ except Exception as exc:
+ return {"status": "error", "detail": repr(exc)}
+
+
+def _check_redis(settings: Settings) -> dict[str, str]:
+ if not settings.redis_url:
+ return {"status": "not_configured"}
+
+ client = get_redis()
+ if client is None:
+ return {"status": "error", "detail": "redis client unavailable"}
+
+ try:
+ client.ping()
+ return {"status": "ok"}
+ except Exception as exc:
+ return {"status": "error", "detail": repr(exc)}
diff --git a/keynetra/api/routes/metrics.py b/keynetra/api/routes/metrics.py
new file mode 100644
index 0000000..db9f6e4
--- /dev/null
+++ b/keynetra/api/routes/metrics.py
@@ -0,0 +1,12 @@
+from __future__ import annotations
+
+from fastapi import APIRouter
+from fastapi.responses import Response
+from prometheus_client import generate_latest
+
+router = APIRouter()
+
+
+@router.get("/metrics", include_in_schema=False)
+def metrics() -> Response:
+ return Response(content=generate_latest(), media_type="text/plain; version=0.0.4")
diff --git a/keynetra/api/routes/permissions.py b/keynetra/api/routes/permissions.py
new file mode 100644
index 0000000..1426c02
--- /dev/null
+++ b/keynetra/api/routes/permissions.py
@@ -0,0 +1,183 @@
+from __future__ import annotations
+
+from fastapi import APIRouter, Depends, Request, status
+from sqlalchemy import and_, delete, or_, select
+from sqlalchemy.exc import SQLAlchemyError
+from sqlalchemy.orm import Session
+
+from keynetra.api.errors import ApiError, ApiErrorCode
+from keynetra.api.pagination import decode_cursor, encode_cursor
+from keynetra.api.responses import request_id_from_state, success_response
+from keynetra.config.admin_auth import AdminAccess, require_management_role
+from keynetra.config.redis_client import get_redis
+from keynetra.config.security import get_principal
+from keynetra.config.tenancy import DEFAULT_TENANT_KEY
+from keynetra.domain.models.rbac import Permission, Role, role_permissions
+from keynetra.domain.schemas.api import SuccessResponse
+from keynetra.domain.schemas.management import (
+ PermissionCreate,
+ PermissionOut,
+ PermissionUpdate,
+ RoleOut,
+)
+from keynetra.infrastructure.cache.access_index_cache import build_access_index_cache
+from keynetra.infrastructure.repositories.tenants import SqlTenantRepository
+from keynetra.infrastructure.storage.session import get_db
+from keynetra.services.revisions import RevisionService
+
+router = APIRouter(prefix="/permissions", dependencies=[Depends(get_principal)])
+
+
+@router.get("", response_model=SuccessResponse[list[PermissionOut]])
+def list_permissions(
+ request: Request,
+ db: Session = Depends(get_db),
+ _: AdminAccess = Depends(require_management_role("viewer")),
+ limit: int = 50,
+ cursor: str | None = None,
+) -> dict[str, object]:
+ if limit < 1 or limit > 100:
+ raise ApiError(
+ status_code=422,
+ code=ApiErrorCode.VALIDATION_ERROR,
+ message="limit must be between 1 and 100",
+ )
+ decoded = decode_cursor(cursor)
+ query = select(Permission)
+ if decoded is not None:
+ query = query.where(
+ or_(
+ Permission.action > str(decoded["action"]),
+ and_(
+ Permission.action == str(decoded["action"]), Permission.id > int(decoded["id"])
+ ),
+ )
+ )
+ perms = (
+ db.execute(query.order_by(Permission.action.asc(), Permission.id.asc()).limit(limit + 1))
+ .scalars()
+ .all()
+ )
+ has_next = len(perms) > limit
+ page = perms[:limit]
+ next_cursor = (
+ encode_cursor({"action": page[-1].action, "id": page[-1].id}) if has_next and page else None
+ )
+ return success_response(
+ data=[PermissionOut(id=p.id, action=p.action).model_dump() for p in page],
+ request_id=request_id_from_state(request.state),
+ limit=limit,
+ next_cursor=next_cursor,
+ )
+
+
+@router.post("", response_model=PermissionOut, status_code=status.HTTP_201_CREATED)
+def create_permission(
+ payload: PermissionCreate,
+ db: Session = Depends(get_db),
+ _: AdminAccess = Depends(require_management_role("admin")),
+) -> PermissionOut:
+ existing = (
+ db.execute(select(Permission).where(Permission.action == payload.action)).scalars().first()
+ )
+ if existing:
+ raise ApiError(
+ status_code=409, code=ApiErrorCode.CONFLICT, message="permission already exists"
+ )
+ perm = Permission(action=payload.action)
+ try:
+ db.add(perm)
+ db.commit()
+ db.refresh(perm)
+ build_access_index_cache(get_redis()).invalidate_global()
+ RevisionService(SqlTenantRepository(db)).bump_revision(tenant_key=DEFAULT_TENANT_KEY)
+ except SQLAlchemyError as e:
+ db.rollback()
+ raise ApiError(status_code=500, code=ApiErrorCode.DATABASE_ERROR, message="db error") from e
+ return PermissionOut(id=perm.id, action=perm.action)
+
+
+@router.put("/{permission_id}", response_model=PermissionOut)
+def update_permission(
+ permission_id: int,
+ payload: PermissionUpdate,
+ db: Session = Depends(get_db),
+ _: AdminAccess = Depends(require_management_role("developer")),
+) -> PermissionOut:
+ permission = db.get(Permission, permission_id)
+ if permission is None:
+ raise ApiError(status_code=404, code=ApiErrorCode.NOT_FOUND, message="permission not found")
+ existing = (
+ db.execute(
+ select(Permission)
+ .where(Permission.action == payload.action)
+ .where(Permission.id != permission_id)
+ )
+ .scalars()
+ .first()
+ )
+ if existing:
+ raise ApiError(
+ status_code=409, code=ApiErrorCode.CONFLICT, message="permission already exists"
+ )
+ permission.action = payload.action
+ try:
+ db.commit()
+ db.refresh(permission)
+ build_access_index_cache(get_redis()).invalidate_global()
+ RevisionService(SqlTenantRepository(db)).bump_revision(tenant_key=DEFAULT_TENANT_KEY)
+ except SQLAlchemyError as e:
+ db.rollback()
+ raise ApiError(status_code=500, code=ApiErrorCode.DATABASE_ERROR, message="db error") from e
+ return PermissionOut(id=permission.id, action=permission.action)
+
+
+@router.delete("/{permission_id}", response_model=SuccessResponse[dict[str, int]])
+def delete_permission(
+ permission_id: int,
+ request: Request,
+ db: Session = Depends(get_db),
+ _: AdminAccess = Depends(require_management_role("admin")),
+) -> dict[str, object]:
+ permission = (
+ db.execute(select(Permission).where(Permission.id == permission_id).options())
+ .scalars()
+ .first()
+ )
+ if permission is None:
+ raise ApiError(status_code=404, code=ApiErrorCode.NOT_FOUND, message="permission not found")
+ try:
+ db.execute(
+ delete(role_permissions).where(role_permissions.c.permission_id == permission.id)
+ )
+ db.delete(permission)
+ db.commit()
+ build_access_index_cache(get_redis()).invalidate_global()
+ RevisionService(SqlTenantRepository(db)).bump_revision(tenant_key=DEFAULT_TENANT_KEY)
+ except SQLAlchemyError as e:
+ db.rollback()
+ raise ApiError(status_code=500, code=ApiErrorCode.DATABASE_ERROR, message="db error") from e
+ return success_response(
+ data={"permission_id": permission_id}, request_id=request_id_from_state(request.state)
+ )
+
+
+@router.get("/{permission_id}/roles", response_model=SuccessResponse[list[RoleOut]])
+def list_permission_roles(
+ permission_id: int,
+ request: Request,
+ db: Session = Depends(get_db),
+ _: AdminAccess = Depends(require_management_role("viewer")),
+) -> dict[str, object]:
+ permission = db.get(Permission, permission_id)
+ if permission is None:
+ raise ApiError(status_code=404, code=ApiErrorCode.NOT_FOUND, message="permission not found")
+ roles = (
+ db.execute(select(Role).where(Role.permissions.any(Permission.id == permission_id)))
+ .scalars()
+ .all()
+ )
+ return success_response(
+ data=[RoleOut(id=role.id, name=role.name).model_dump() for role in roles],
+ request_id=request_id_from_state(request.state),
+ )
diff --git a/keynetra/api/routes/playground.py b/keynetra/api/routes/playground.py
new file mode 100644
index 0000000..865ab25
--- /dev/null
+++ b/keynetra/api/routes/playground.py
@@ -0,0 +1,63 @@
+"""Interactive evaluation surface for inline policies."""
+
+from __future__ import annotations
+
+from typing import Any
+
+from fastapi import APIRouter, Depends, Request
+from pydantic import BaseModel, Field
+
+from keynetra.api.responses import request_id_from_state, success_response
+from keynetra.config.admin_auth import AdminAccess, require_management_role
+from keynetra.config.security import get_principal
+from keynetra.domain.schemas.api import SuccessResponse
+from keynetra.engine.keynetra_engine import AuthorizationInput, KeyNetraEngine
+
+
+class PlaygroundPolicy(BaseModel):
+ action: str
+ effect: str = "allow"
+ priority: int = 100
+ policy_id: str | None = None
+ conditions: dict[str, Any] = Field(default_factory=dict)
+
+
+class PlaygroundInput(BaseModel):
+ user: dict[str, Any] = Field(default_factory=dict)
+ resource: dict[str, Any] = Field(default_factory=dict)
+ action: str = ""
+ context: dict[str, Any] = Field(default_factory=dict)
+
+
+class PlaygroundEvaluateRequest(BaseModel):
+ policies: list[PlaygroundPolicy]
+ input: PlaygroundInput
+
+
+router = APIRouter(prefix="/playground", dependencies=[Depends(get_principal)])
+
+
+@router.post("/evaluate", response_model=SuccessResponse[dict[str, Any]])
+def evaluate(
+ payload: PlaygroundEvaluateRequest,
+ request: Request,
+ _: AdminAccess = Depends(require_management_role("developer")),
+) -> dict[str, Any]:
+ engine = KeyNetraEngine([policy.model_dump() for policy in payload.policies])
+ authorization_input = AuthorizationInput(
+ user=payload.input.user,
+ resource=payload.input.resource,
+ action=payload.input.action,
+ context=payload.input.context,
+ )
+ decision = engine.decide(authorization_input)
+ return success_response(
+ data={
+ "allowed": decision.allowed,
+ "decision": decision.decision,
+ "reason": decision.reason,
+ "policy_id": decision.policy_id,
+ "explain_trace": [step.to_dict() for step in decision.explain_trace],
+ },
+ request_id=request_id_from_state(request.state),
+ )
diff --git a/keynetra/api/routes/policies.py b/keynetra/api/routes/policies.py
new file mode 100644
index 0000000..b4bd290
--- /dev/null
+++ b/keynetra/api/routes/policies.py
@@ -0,0 +1,259 @@
+"""HTTP transport for policy management."""
+
+from __future__ import annotations
+
+from fastapi import APIRouter, Depends, Request, status
+from sqlalchemy.exc import SQLAlchemyError
+from sqlalchemy.orm import Session
+
+from keynetra.api.errors import ApiError, ApiErrorCode
+from keynetra.api.pagination import decode_cursor
+from keynetra.api.responses import request_id_from_state, success_response
+from keynetra.config.admin_auth import AdminAccess, require_management_role
+from keynetra.config.redis_client import get_redis
+from keynetra.config.security import get_principal
+from keynetra.config.settings import Settings, get_settings
+from keynetra.domain.schemas.api import SuccessResponse
+from keynetra.domain.schemas.management import PolicyCreate, PolicyOut
+from keynetra.infrastructure.cache.decision_cache import build_decision_cache
+from keynetra.infrastructure.cache.policy_cache import build_policy_cache
+from keynetra.infrastructure.cache.policy_distribution import RedisPolicyEventPublisher
+from keynetra.infrastructure.repositories.policies import SqlPolicyRepository
+from keynetra.infrastructure.repositories.tenants import SqlTenantRepository
+from keynetra.infrastructure.storage.session import get_db
+from keynetra.services.policies import PolicyService
+from keynetra.services.policy_dsl import dsl_to_policy
+from keynetra.services.policy_lint import PolicyLintService
+
+router = APIRouter(prefix="/policies", dependencies=[Depends(get_principal)])
+
+
+def get_policy_service(
+ settings: Settings = Depends(get_settings),
+ db: Session = Depends(get_db),
+) -> tuple[PolicyService, PolicyLintService, SqlTenantRepository]:
+ """Create the shared repositories for policy management."""
+
+ redis_client = get_redis()
+ tenant_repo = SqlTenantRepository(db)
+ policy_repo = SqlPolicyRepository(db)
+ service = PolicyService(
+ tenants=tenant_repo,
+ policies=policy_repo,
+ policy_cache=build_policy_cache(redis_client),
+ decision_cache=build_decision_cache(redis_client),
+ publisher=RedisPolicyEventPublisher(settings),
+ )
+ lint_service = PolicyLintService(session=db, policies=policy_repo)
+ return service, lint_service, tenant_repo
+
+
+@router.get("", response_model=SuccessResponse[list[PolicyOut]])
+def list_policies(
+ request: Request,
+ deps: tuple[PolicyService, PolicyLintService, SqlTenantRepository] = Depends(
+ get_policy_service
+ ),
+ access: AdminAccess = Depends(require_management_role("viewer")),
+ limit: int = 50,
+ cursor: str | None = None,
+) -> dict[str, object]:
+ if limit < 1 or limit > 100:
+ raise ApiError(
+ status_code=422,
+ code=ApiErrorCode.VALIDATION_ERROR,
+ message="limit must be between 1 and 100",
+ )
+ service, lint_service, tenant_repo = deps
+ tenant_key = access.tenant_key
+ try:
+ items, next_cursor = service.list_policies_page(
+ tenant_key=tenant_key, limit=limit, cursor=decode_cursor(cursor)
+ )
+ tenant = tenant_repo.get_or_create(tenant_key)
+ warnings = lint_service.lint(tenant_id=tenant.id)
+ except SQLAlchemyError as error:
+ raise ApiError(
+ status_code=500, code=ApiErrorCode.DATABASE_ERROR, message="db error"
+ ) from error
+ return success_response(
+ data=[PolicyOut(**item).model_dump() for item in items],
+ request_id=request_id_from_state(request.state),
+ limit=limit,
+ next_cursor=next_cursor,
+ meta={"warnings": warnings} if warnings else None,
+ )
+
+
+@router.post("", response_model=SuccessResponse[PolicyOut], status_code=status.HTTP_201_CREATED)
+def create_policy(
+ payload: PolicyCreate,
+ request: Request,
+ deps: tuple[PolicyService, PolicyLintService, SqlTenantRepository] = Depends(
+ get_policy_service
+ ),
+ principal: dict[str, str] = Depends(get_principal),
+ access: AdminAccess = Depends(require_management_role("developer")),
+) -> dict[str, object]:
+ service, lint_service, tenant_repo = deps
+ tenant_key = access.tenant_key
+ if payload.effect not in {"allow", "deny"}:
+ raise ApiError(
+ status_code=422,
+ code=ApiErrorCode.VALIDATION_ERROR,
+ message="effect must be allow or deny",
+ )
+ try:
+ result = service.create_policy(
+ tenant_key=tenant_key,
+ policy_key=str(payload.conditions.get("policy_key") or payload.action),
+ action=payload.action,
+ effect=payload.effect,
+ priority=payload.priority,
+ conditions=payload.conditions,
+ created_by=str(principal.get("id")),
+ )
+ except SQLAlchemyError as error:
+ raise ApiError(
+ status_code=500, code=ApiErrorCode.DATABASE_ERROR, message="db error"
+ ) from error
+ warnings = lint_service.lint(tenant_id=tenant_repo.get_or_create(tenant_key).id)
+ return success_response(
+ data=PolicyOut(
+ id=result.id,
+ action=result.action,
+ effect=result.effect,
+ priority=result.priority,
+ conditions=result.conditions,
+ ).model_dump(),
+ request_id=request_id_from_state(request.state),
+ meta={"warnings": warnings} if warnings else None,
+ )
+
+
+@router.put("/{policy_key}", response_model=SuccessResponse[PolicyOut])
+def update_policy(
+ policy_key: str,
+ payload: PolicyCreate,
+ request: Request,
+ deps: tuple[PolicyService, PolicyLintService, SqlTenantRepository] = Depends(
+ get_policy_service
+ ),
+ principal: dict[str, str] = Depends(get_principal),
+ access: AdminAccess = Depends(require_management_role("developer")),
+) -> dict[str, object]:
+ service, lint_service, tenant_repo = deps
+ if payload.effect not in {"allow", "deny"}:
+ raise ApiError(
+ status_code=422,
+ code=ApiErrorCode.VALIDATION_ERROR,
+ message="effect must be allow or deny",
+ )
+ try:
+ result = service.create_policy(
+ tenant_key=access.tenant_key,
+ policy_key=policy_key,
+ action=payload.action,
+ effect=payload.effect,
+ priority=payload.priority,
+ conditions=payload.conditions,
+ created_by=str(principal.get("id")),
+ )
+ except SQLAlchemyError as error:
+ raise ApiError(
+ status_code=500, code=ApiErrorCode.DATABASE_ERROR, message="db error"
+ ) from error
+ warnings = lint_service.lint(tenant_id=tenant_repo.get_or_create(access.tenant_key).id)
+ return success_response(
+ data=PolicyOut(
+ id=result.id,
+ action=result.action,
+ effect=result.effect,
+ priority=result.priority,
+ conditions=result.conditions,
+ ).model_dump(),
+ request_id=request_id_from_state(request.state),
+ meta={"warnings": warnings} if warnings else None,
+ )
+
+
+@router.post("/dsl", response_model=SuccessResponse[PolicyOut], status_code=status.HTTP_201_CREATED)
+def create_policy_from_dsl(
+ dsl: str,
+ request: Request,
+ deps: tuple[PolicyService, PolicyLintService, SqlTenantRepository] = Depends(
+ get_policy_service
+ ),
+ principal: dict[str, str] = Depends(get_principal),
+ access: AdminAccess = Depends(require_management_role("developer")),
+) -> dict[str, object]:
+ try:
+ policy = dsl_to_policy(dsl)
+ except ValueError as error:
+ raise ApiError(
+ status_code=422, code=ApiErrorCode.VALIDATION_ERROR, message=str(error)
+ ) from error
+ return create_policy(
+ payload=PolicyCreate(
+ action=policy["action"],
+ effect=policy["effect"],
+ priority=policy["priority"],
+ conditions=policy["conditions"],
+ ),
+ request=request,
+ deps=deps,
+ principal=principal,
+ access=access,
+ )
+
+
+@router.delete("/{policy_key}", response_model=SuccessResponse[dict[str, str]])
+def delete_policy(
+ policy_key: str,
+ request: Request,
+ deps: tuple[PolicyService, PolicyLintService, SqlTenantRepository] = Depends(
+ get_policy_service
+ ),
+ access: AdminAccess = Depends(require_management_role("admin")),
+) -> dict[str, object]:
+ service, _, _ = deps
+ try:
+ service.delete_policy(tenant_key=access.tenant_key, policy_key=policy_key)
+ except SQLAlchemyError as error:
+ raise ApiError(
+ status_code=500, code=ApiErrorCode.DATABASE_ERROR, message="db error"
+ ) from error
+ return success_response(
+ data={"policy_key": policy_key}, request_id=request_id_from_state(request.state)
+ )
+
+
+@router.post(
+ "/{policy_key}/rollback/{version}", response_model=SuccessResponse[dict[str, int | str]]
+)
+def rollback_policy(
+ policy_key: str,
+ version: int,
+ request: Request,
+ deps: tuple[PolicyService, PolicyLintService, SqlTenantRepository] = Depends(
+ get_policy_service
+ ),
+ access: AdminAccess = Depends(require_management_role("admin")),
+) -> dict[str, object]:
+ service, _, _ = deps
+ try:
+ current_policy_key, current_version = service.rollback_policy(
+ tenant_key=access.tenant_key,
+ policy_key=policy_key,
+ version=version,
+ )
+ except ValueError as error:
+ raise ApiError(status_code=404, code=ApiErrorCode.NOT_FOUND, message=str(error)) from error
+ except SQLAlchemyError as error:
+ raise ApiError(
+ status_code=500, code=ApiErrorCode.DATABASE_ERROR, message="db error"
+ ) from error
+ return success_response(
+ data={"policy_key": current_policy_key, "current_version": current_version},
+ request_id=request_id_from_state(request.state),
+ )
diff --git a/keynetra/api/routes/relationships.py b/keynetra/api/routes/relationships.py
new file mode 100644
index 0000000..bcc859f
--- /dev/null
+++ b/keynetra/api/routes/relationships.py
@@ -0,0 +1,111 @@
+"""HTTP transport for relationship management."""
+
+from __future__ import annotations
+
+from fastapi import APIRouter, Depends, Request, status
+from pydantic import BaseModel
+from sqlalchemy.exc import IntegrityError, SQLAlchemyError
+from sqlalchemy.orm import Session
+
+from keynetra.api.errors import ApiError, ApiErrorCode
+from keynetra.api.pagination import decode_cursor
+from keynetra.api.responses import request_id_from_state, success_response
+from keynetra.config.admin_auth import AdminAccess, require_management_role
+from keynetra.config.redis_client import get_redis
+from keynetra.config.security import get_principal
+from keynetra.domain.schemas.api import SuccessResponse
+from keynetra.infrastructure.cache.access_index_cache import build_access_index_cache
+from keynetra.infrastructure.cache.decision_cache import build_decision_cache
+from keynetra.infrastructure.cache.relationship_cache import build_relationship_cache
+from keynetra.infrastructure.repositories.relationships import SqlRelationshipRepository
+from keynetra.infrastructure.repositories.tenants import SqlTenantRepository
+from keynetra.infrastructure.storage.session import get_db
+from keynetra.services.relationships import RelationshipService
+
+router = APIRouter(prefix="/relationships", dependencies=[Depends(get_principal)])
+
+
+class RelationshipCreate(BaseModel):
+ subject_type: str
+ subject_id: str
+ relation: str
+ object_type: str
+ object_id: str
+
+
+class RelationshipOut(RelationshipCreate):
+ id: int
+
+
+def get_relationship_service(db: Session = Depends(get_db)) -> RelationshipService:
+ """Create the request-scoped relationship service."""
+
+ redis_client = get_redis()
+ return RelationshipService(
+ tenants=SqlTenantRepository(db),
+ relationships=SqlRelationshipRepository(db),
+ relationship_cache=build_relationship_cache(redis_client),
+ decision_cache=build_decision_cache(redis_client),
+ access_index_cache=build_access_index_cache(redis_client),
+ )
+
+
+@router.get("", response_model=SuccessResponse[list[dict[str, str]]])
+def list_relationships(
+ subject_type: str,
+ subject_id: str,
+ request: Request,
+ service: RelationshipService = Depends(get_relationship_service),
+ access: AdminAccess = Depends(require_management_role("viewer")),
+ limit: int = 50,
+ cursor: str | None = None,
+) -> dict[str, object]:
+ if limit < 1 or limit > 100:
+ raise ApiError(
+ status_code=422,
+ code=ApiErrorCode.VALIDATION_ERROR,
+ message="limit must be between 1 and 100",
+ )
+ try:
+ data, next_cursor = service.list_relationships_page(
+ tenant_key=access.tenant_key,
+ subject_type=subject_type,
+ subject_id=subject_id,
+ limit=limit,
+ cursor=decode_cursor(cursor),
+ )
+ except SQLAlchemyError as error:
+ raise ApiError(
+ status_code=500, code=ApiErrorCode.DATABASE_ERROR, message="db error"
+ ) from error
+ return success_response(
+ data=data,
+ request_id=request_id_from_state(request.state),
+ limit=limit,
+ next_cursor=next_cursor,
+ )
+
+
+@router.post(
+ "", response_model=SuccessResponse[RelationshipOut], status_code=status.HTTP_201_CREATED
+)
+def create_relationship(
+ payload: RelationshipCreate,
+ request: Request,
+ service: RelationshipService = Depends(get_relationship_service),
+ access: AdminAccess = Depends(require_management_role("developer")),
+) -> dict[str, object]:
+ try:
+ row_id = service.create_relationship(tenant_key=access.tenant_key, **payload.model_dump())
+ except IntegrityError as error:
+ raise ApiError(
+ status_code=409, code=ApiErrorCode.CONFLICT, message="relationship exists"
+ ) from error
+ except SQLAlchemyError as error:
+ raise ApiError(
+ status_code=500, code=ApiErrorCode.DATABASE_ERROR, message="db error"
+ ) from error
+ return success_response(
+ data=RelationshipOut(id=row_id, **payload.model_dump()).model_dump(),
+ request_id=request_id_from_state(request.state),
+ )
diff --git a/keynetra/api/routes/roles.py b/keynetra/api/routes/roles.py
new file mode 100644
index 0000000..af0f552
--- /dev/null
+++ b/keynetra/api/routes/roles.py
@@ -0,0 +1,238 @@
+from __future__ import annotations
+
+from fastapi import APIRouter, Depends, Request, status
+from sqlalchemy import and_, delete, or_, select
+from sqlalchemy.exc import SQLAlchemyError
+from sqlalchemy.orm import Session, joinedload
+
+from keynetra.api.errors import ApiError, ApiErrorCode
+from keynetra.api.pagination import decode_cursor, encode_cursor
+from keynetra.api.responses import request_id_from_state, success_response
+from keynetra.config.admin_auth import AdminAccess, require_management_role
+from keynetra.config.redis_client import get_redis
+from keynetra.config.security import get_principal
+from keynetra.config.tenancy import DEFAULT_TENANT_KEY
+from keynetra.domain.models.rbac import Permission, Role, role_permissions, user_roles
+from keynetra.domain.schemas.api import SuccessResponse
+from keynetra.domain.schemas.management import PermissionOut, RoleCreate, RoleOut, RoleUpdate
+from keynetra.infrastructure.cache.access_index_cache import build_access_index_cache
+from keynetra.infrastructure.repositories.tenants import SqlTenantRepository
+from keynetra.infrastructure.storage.session import get_db
+from keynetra.services.revisions import RevisionService
+
+router = APIRouter(prefix="/roles", dependencies=[Depends(get_principal)])
+
+
+@router.get("", response_model=SuccessResponse[list[RoleOut]])
+def list_roles(
+ request: Request,
+ db: Session = Depends(get_db),
+ _: AdminAccess = Depends(require_management_role("viewer")),
+ limit: int = 50,
+ cursor: str | None = None,
+) -> dict[str, object]:
+ if limit < 1 or limit > 100:
+ raise ApiError(
+ status_code=422,
+ code=ApiErrorCode.VALIDATION_ERROR,
+ message="limit must be between 1 and 100",
+ )
+ decoded = decode_cursor(cursor)
+ query = select(Role)
+ if decoded is not None:
+ query = query.where(
+ or_(
+ Role.name > str(decoded["name"]),
+ and_(Role.name == str(decoded["name"]), Role.id > int(decoded["id"])),
+ )
+ )
+ roles = (
+ db.execute(query.order_by(Role.name.asc(), Role.id.asc()).limit(limit + 1)).scalars().all()
+ )
+ has_next = len(roles) > limit
+ page = roles[:limit]
+ next_cursor = (
+ encode_cursor({"name": page[-1].name, "id": page[-1].id}) if has_next and page else None
+ )
+ return success_response(
+ data=[RoleOut(id=r.id, name=r.name).model_dump() for r in page],
+ request_id=request_id_from_state(request.state),
+ limit=limit,
+ next_cursor=next_cursor,
+ )
+
+
+@router.post("", response_model=RoleOut, status_code=status.HTTP_201_CREATED)
+def create_role(
+ payload: RoleCreate,
+ db: Session = Depends(get_db),
+ _: AdminAccess = Depends(require_management_role("admin")),
+) -> RoleOut:
+ existing = db.execute(select(Role).where(Role.name == payload.name)).scalars().first()
+ if existing:
+ raise ApiError(status_code=409, code=ApiErrorCode.CONFLICT, message="role already exists")
+ role = Role(name=payload.name)
+ try:
+ db.add(role)
+ db.commit()
+ db.refresh(role)
+ build_access_index_cache(get_redis()).invalidate_global()
+ RevisionService(SqlTenantRepository(db)).bump_revision(tenant_key=DEFAULT_TENANT_KEY)
+ except SQLAlchemyError as e:
+ db.rollback()
+ raise ApiError(status_code=500, code=ApiErrorCode.DATABASE_ERROR, message="db error") from e
+ return RoleOut(id=role.id, name=role.name)
+
+
+@router.put("/{role_id}", response_model=RoleOut)
+def update_role(
+ role_id: int,
+ payload: RoleUpdate,
+ db: Session = Depends(get_db),
+ _: AdminAccess = Depends(require_management_role("developer")),
+) -> RoleOut:
+ role = db.get(Role, role_id)
+ if role is None:
+ raise ApiError(status_code=404, code=ApiErrorCode.NOT_FOUND, message="role not found")
+ existing = (
+ db.execute(select(Role).where(Role.name == payload.name).where(Role.id != role_id))
+ .scalars()
+ .first()
+ )
+ if existing:
+ raise ApiError(status_code=409, code=ApiErrorCode.CONFLICT, message="role already exists")
+ role.name = payload.name
+ try:
+ db.commit()
+ db.refresh(role)
+ build_access_index_cache(get_redis()).invalidate_global()
+ RevisionService(SqlTenantRepository(db)).bump_revision(tenant_key=DEFAULT_TENANT_KEY)
+ except SQLAlchemyError as e:
+ db.rollback()
+ raise ApiError(status_code=500, code=ApiErrorCode.DATABASE_ERROR, message="db error") from e
+ return RoleOut(id=role.id, name=role.name)
+
+
+@router.delete("/{role_id}", response_model=SuccessResponse[dict[str, int]])
+def delete_role(
+ role_id: int,
+ request: Request,
+ db: Session = Depends(get_db),
+ _: AdminAccess = Depends(require_management_role("admin")),
+) -> dict[str, object]:
+ role = (
+ db.execute(
+ select(Role)
+ .where(Role.id == role_id)
+ .options(joinedload(Role.permissions), joinedload(Role.users))
+ )
+ .unique()
+ .scalars()
+ .first()
+ )
+ if role is None:
+ raise ApiError(status_code=404, code=ApiErrorCode.NOT_FOUND, message="role not found")
+ try:
+ db.execute(delete(role_permissions).where(role_permissions.c.role_id == role.id))
+ db.execute(delete(user_roles).where(user_roles.c.role_id == role.id))
+ db.delete(role)
+ db.commit()
+ build_access_index_cache(get_redis()).invalidate_global()
+ RevisionService(SqlTenantRepository(db)).bump_revision(tenant_key=DEFAULT_TENANT_KEY)
+ except SQLAlchemyError as e:
+ db.rollback()
+ raise ApiError(status_code=500, code=ApiErrorCode.DATABASE_ERROR, message="db error") from e
+ return success_response(
+ data={"role_id": role_id}, request_id=request_id_from_state(request.state)
+ )
+
+
+@router.get("/{role_id}/permissions", response_model=SuccessResponse[list[PermissionOut]])
+def list_role_permissions(
+ role_id: int,
+ request: Request,
+ db: Session = Depends(get_db),
+ _: AdminAccess = Depends(require_management_role("viewer")),
+) -> dict[str, object]:
+ role = (
+ db.execute(select(Role).where(Role.id == role_id).options(joinedload(Role.permissions)))
+ .scalars()
+ .first()
+ )
+ if role is None:
+ raise ApiError(status_code=404, code=ApiErrorCode.NOT_FOUND, message="role not found")
+ return success_response(
+ data=[
+ PermissionOut(id=permission.id, action=permission.action).model_dump()
+ for permission in role.permissions
+ ],
+ request_id=request_id_from_state(request.state),
+ )
+
+
+@router.post(
+ "/{role_id}/permissions/{permission_id}",
+ response_model=SuccessResponse[PermissionOut],
+ status_code=status.HTTP_201_CREATED,
+)
+def add_permission_to_role(
+ role_id: int,
+ permission_id: int,
+ request: Request,
+ db: Session = Depends(get_db),
+ _: AdminAccess = Depends(require_management_role("developer")),
+) -> dict[str, object]:
+ role = db.get(Role, role_id)
+ permission = db.get(Permission, permission_id)
+ if role is None:
+ raise ApiError(status_code=404, code=ApiErrorCode.NOT_FOUND, message="role not found")
+ if permission is None:
+ raise ApiError(status_code=404, code=ApiErrorCode.NOT_FOUND, message="permission not found")
+ if permission not in role.permissions:
+ role.permissions.append(permission)
+ try:
+ db.commit()
+ build_access_index_cache(get_redis()).invalidate_global()
+ RevisionService(SqlTenantRepository(db)).bump_revision(tenant_key=DEFAULT_TENANT_KEY)
+ except SQLAlchemyError as e:
+ db.rollback()
+ raise ApiError(
+ status_code=500, code=ApiErrorCode.DATABASE_ERROR, message="db error"
+ ) from e
+ return success_response(
+ data=PermissionOut(id=permission.id, action=permission.action).model_dump(),
+ request_id=request_id_from_state(request.state),
+ )
+
+
+@router.delete(
+ "/{role_id}/permissions/{permission_id}", response_model=SuccessResponse[dict[str, int]]
+)
+def remove_permission_from_role(
+ role_id: int,
+ permission_id: int,
+ request: Request,
+ db: Session = Depends(get_db),
+ _: AdminAccess = Depends(require_management_role("developer")),
+) -> dict[str, object]:
+ role = db.get(Role, role_id)
+ permission = db.get(Permission, permission_id)
+ if role is None:
+ raise ApiError(status_code=404, code=ApiErrorCode.NOT_FOUND, message="role not found")
+ if permission is None:
+ raise ApiError(status_code=404, code=ApiErrorCode.NOT_FOUND, message="permission not found")
+ if permission in role.permissions:
+ role.permissions.remove(permission)
+ try:
+ db.commit()
+ build_access_index_cache(get_redis()).invalidate_global()
+ RevisionService(SqlTenantRepository(db)).bump_revision(tenant_key=DEFAULT_TENANT_KEY)
+ except SQLAlchemyError as e:
+ db.rollback()
+ raise ApiError(
+ status_code=500, code=ApiErrorCode.DATABASE_ERROR, message="db error"
+ ) from e
+ return success_response(
+ data={"role_id": role_id, "permission_id": permission_id},
+ request_id=request_id_from_state(request.state),
+ )
diff --git a/keynetra/api/routes/simulation.py b/keynetra/api/routes/simulation.py
new file mode 100644
index 0000000..7fa53e6
--- /dev/null
+++ b/keynetra/api/routes/simulation.py
@@ -0,0 +1,170 @@
+from __future__ import annotations
+
+from fastapi import APIRouter, Depends, Request
+from sqlalchemy.exc import SQLAlchemyError
+from sqlalchemy.orm import Session
+
+from keynetra.api.errors import ApiError, ApiErrorCode
+from keynetra.api.responses import request_id_from_state, success_response
+from keynetra.config.admin_auth import AdminAccess, require_management_role
+from keynetra.config.redis_client import get_redis
+from keynetra.config.settings import get_settings
+from keynetra.domain.schemas.api import SuccessResponse
+from keynetra.domain.schemas.modeling import (
+ ImpactAnalysisRequest,
+ ImpactAnalysisResponse,
+ PolicySimulationRequest,
+ PolicySimulationResponse,
+)
+from keynetra.infrastructure.cache.access_index_cache import build_access_index_cache
+from keynetra.infrastructure.cache.acl_cache import build_acl_cache
+from keynetra.infrastructure.cache.decision_cache import build_decision_cache
+from keynetra.infrastructure.cache.policy_cache import build_policy_cache
+from keynetra.infrastructure.cache.relationship_cache import build_relationship_cache
+from keynetra.infrastructure.repositories.acl import SqlACLRepository
+from keynetra.infrastructure.repositories.audit import SqlAuditRepository
+from keynetra.infrastructure.repositories.auth_models import SqlAuthModelRepository
+from keynetra.infrastructure.repositories.policies import SqlPolicyRepository
+from keynetra.infrastructure.repositories.relationships import SqlRelationshipRepository
+from keynetra.infrastructure.repositories.tenants import SqlTenantRepository
+from keynetra.infrastructure.repositories.users import SqlUserRepository
+from keynetra.infrastructure.storage.session import get_db
+from keynetra.services.authorization import AuthorizationService
+from keynetra.services.impact_analysis import ImpactAnalyzer
+from keynetra.services.policy_simulator import PolicySimulator
+
+router = APIRouter()
+
+
+def get_simulation_services(
+ db: Session = Depends(get_db),
+) -> tuple[AuthorizationService, PolicySimulator, ImpactAnalyzer]:
+ redis_client = get_redis()
+ tenants = SqlTenantRepository(db)
+ policies = SqlPolicyRepository(db)
+ users = SqlUserRepository(db)
+ relationships = SqlRelationshipRepository(db)
+ auth = AuthorizationService(
+ settings=get_settings(),
+ tenants=tenants,
+ policies=policies,
+ users=users,
+ relationships=relationships,
+ audit=SqlAuditRepository(db),
+ policy_cache=build_policy_cache(redis_client),
+ relationship_cache=build_relationship_cache(redis_client),
+ decision_cache=build_decision_cache(redis_client),
+ acl_repository=SqlACLRepository(db),
+ acl_cache=build_acl_cache(redis_client),
+ access_index_cache=build_access_index_cache(redis_client),
+ auth_model_repository=SqlAuthModelRepository(db),
+ )
+ simulator = PolicySimulator(tenants=tenants, policies=policies, authorization_service=auth)
+ impact = ImpactAnalyzer(
+ tenants=tenants, policies=policies, users=users, relationships=relationships
+ )
+ return auth, simulator, impact
+
+
+@router.post("/simulate-policy", response_model=SuccessResponse[PolicySimulationResponse])
+def simulate_policy(
+ payload: PolicySimulationRequest,
+ request: Request,
+ deps: tuple[AuthorizationService, PolicySimulator, ImpactAnalyzer] = Depends(
+ get_simulation_services
+ ),
+ access: AdminAccess = Depends(require_management_role("viewer")),
+) -> dict[str, object]:
+ _auth, simulator, _impact = deps
+ req = _normalize_request(payload.request)
+ policy_change = payload.simulate.policy_change
+ if not policy_change:
+ raise ApiError(
+ status_code=422, code=ApiErrorCode.VALIDATION_ERROR, message="policy_change is required"
+ )
+ try:
+ result = simulator.simulate_policy_change(
+ tenant_key=access.tenant_key,
+ user=req["user"],
+ action=req["action"],
+ resource=req["resource"],
+ context=req["context"],
+ policy_change=policy_change,
+ )
+ except ValueError as error:
+ raise ApiError(
+ status_code=422, code=ApiErrorCode.VALIDATION_ERROR, message=str(error)
+ ) from error
+ except SQLAlchemyError as error:
+ raise ApiError(
+ status_code=500, code=ApiErrorCode.DATABASE_ERROR, message="db error"
+ ) from error
+ return success_response(
+ data=PolicySimulationResponse(
+ decision_before={
+ "allowed": result.decision_before.allowed,
+ "decision": result.decision_before.decision,
+ "reason": result.decision_before.reason,
+ "policy_id": result.decision_before.policy_id,
+ },
+ decision_after={
+ "allowed": result.decision_after.allowed,
+ "decision": result.decision_after.decision,
+ "reason": result.decision_after.reason,
+ "policy_id": result.decision_after.policy_id,
+ },
+ ).model_dump(),
+ request_id=request_id_from_state(request.state),
+ )
+
+
+@router.post("/impact-analysis", response_model=SuccessResponse[ImpactAnalysisResponse])
+def impact_analysis(
+ payload: ImpactAnalysisRequest,
+ request: Request,
+ deps: tuple[AuthorizationService, PolicySimulator, ImpactAnalyzer] = Depends(
+ get_simulation_services
+ ),
+ access: AdminAccess = Depends(require_management_role("viewer")),
+) -> dict[str, object]:
+ _auth, _simulator, impact = deps
+ try:
+ result = impact.analyze_policy_change(
+ tenant_key=access.tenant_key, policy_change=payload.policy_change
+ )
+ except ValueError as error:
+ raise ApiError(
+ status_code=422, code=ApiErrorCode.VALIDATION_ERROR, message=str(error)
+ ) from error
+ except SQLAlchemyError as error:
+ raise ApiError(
+ status_code=500, code=ApiErrorCode.DATABASE_ERROR, message="db error"
+ ) from error
+ return success_response(
+ data=ImpactAnalysisResponse(**result.__dict__).model_dump(),
+ request_id=request_id_from_state(request.state),
+ )
+
+
+def _normalize_request(raw: dict[str, object]) -> dict[str, object]:
+ user = raw.get("user")
+ resource = raw.get("resource")
+ action = raw.get("action")
+ context = raw.get("context") or {}
+ if isinstance(user, str):
+ user = {"id": user}
+ if isinstance(resource, str):
+ parts = resource.split(":", 1)
+ resource = {
+ "resource_type": parts[0],
+ "resource_id": parts[1] if len(parts) > 1 else parts[0],
+ }
+ if not isinstance(user, dict):
+ user = {}
+ if not isinstance(resource, dict):
+ resource = {}
+ if not isinstance(action, str):
+ action = ""
+ if not isinstance(context, dict):
+ context = {}
+ return {"user": user, "resource": resource, "action": action, "context": context}
diff --git a/keynetra/api/service_modes.py b/keynetra/api/service_modes.py
new file mode 100644
index 0000000..48eef01
--- /dev/null
+++ b/keynetra/api/service_modes.py
@@ -0,0 +1,45 @@
+from __future__ import annotations
+
+from fastapi import APIRouter
+
+from keynetra.api.routes.access import router as access_router
+from keynetra.api.routes.acl import router as acl_router
+from keynetra.api.routes.admin_auth import router as admin_auth_router
+from keynetra.api.routes.audit import router as audit_router
+from keynetra.api.routes.auth_model import router as auth_model_router
+from keynetra.api.routes.dev import router as dev_router
+from keynetra.api.routes.health import router as health_router
+from keynetra.api.routes.metrics import router as metrics_router
+from keynetra.api.routes.permissions import router as permissions_router
+from keynetra.api.routes.playground import router as playground_router
+from keynetra.api.routes.policies import router as policies_router
+from keynetra.api.routes.relationships import router as relationships_router
+from keynetra.api.routes.roles import router as roles_router
+from keynetra.api.routes.simulation import router as simulation_router
+
+
+def router_for_mode(mode: str) -> APIRouter:
+ router = APIRouter()
+ router.include_router(metrics_router, tags=["observability"])
+ router.include_router(health_router, tags=["health"])
+
+ mode = (mode or "all").lower()
+ if mode in {"all", "access-api"}:
+ router.include_router(access_router, tags=["access"])
+ if mode in {"all", "policy-store"}:
+ router.include_router(admin_auth_router, tags=["auth"])
+ router.include_router(policies_router, tags=["management"])
+ router.include_router(acl_router, tags=["management"])
+ router.include_router(auth_model_router, tags=["management"])
+ router.include_router(simulation_router, tags=["management"])
+ router.include_router(roles_router, tags=["management"])
+ router.include_router(permissions_router, tags=["management"])
+ router.include_router(relationships_router, tags=["management"])
+ router.include_router(audit_router, tags=["management"])
+ router.include_router(playground_router, tags=["playground"])
+ router.include_router(dev_router, tags=["dev"])
+ if mode == "policy-engine":
+ # Engine is exposed via /check-access + /simulate routes (in access_router).
+ router.include_router(access_router, tags=["engine"])
+
+ return router
diff --git a/keynetra/cli.py b/keynetra/cli.py
new file mode 100644
index 0000000..67f2454
--- /dev/null
+++ b/keynetra/cli.py
@@ -0,0 +1,884 @@
+"""Operational CLI for KeyNetra core."""
+
+from __future__ import annotations
+
+import asyncio
+import json
+import os
+import time
+import warnings
+from pathlib import Path
+from typing import Any
+
+import httpx
+import typer
+from sqlalchemy import text
+from sqlalchemy.exc import SQLAlchemyError
+from sqlalchemy.orm import Session
+
+from keynetra.config.config_loader import apply_config_to_environment, load_config_file
+from keynetra.config.file_loaders import load_policies_from_paths
+from keynetra.config.redis_client import get_redis
+from keynetra.config.settings import get_settings, reset_settings_cache
+from keynetra.config.tenancy import DEFAULT_TENANT_KEY
+from keynetra.infrastructure.cache.access_index_cache import build_access_index_cache
+from keynetra.infrastructure.cache.acl_cache import build_acl_cache
+from keynetra.infrastructure.cache.decision_cache import build_decision_cache
+from keynetra.infrastructure.cache.policy_cache import build_policy_cache
+from keynetra.infrastructure.cache.relationship_cache import build_relationship_cache
+from keynetra.infrastructure.repositories.acl import SqlACLRepository
+from keynetra.infrastructure.repositories.audit import SqlAuditRepository
+from keynetra.infrastructure.repositories.policies import SqlPolicyRepository
+from keynetra.infrastructure.repositories.relationships import SqlRelationshipRepository
+from keynetra.infrastructure.repositories.tenants import SqlTenantRepository
+from keynetra.infrastructure.repositories.users import SqlUserRepository
+from keynetra.infrastructure.storage.session import (
+ create_engine_for_url,
+ create_session_factory,
+ initialize_database,
+)
+from keynetra.migrations import find_destructive_revisions
+from keynetra.services.authorization import AuthorizationService
+from keynetra.services.doctor import run_core_doctor
+from keynetra.services.policy_testing import validate_policy_test_suite
+from keynetra.services.seeding import seed_demo_data
+from keynetra.version import __version__
+
+# Keep CLI startup output focused; these Pydantic warnings are non-fatal.
+warnings.filterwarnings(
+ "ignore",
+ message=r'Field name "schema" in "AuthModel(Create|Out)" shadows an attribute in parent "BaseModel"',
+ category=UserWarning,
+)
+
+app = typer.Typer(add_completion=False, help="KeyNetra operational CLI.")
+acl_app = typer.Typer(add_completion=False, help="Manage ACL entries.")
+model_app = typer.Typer(add_completion=False, help="Manage authorization models.")
+app.add_typer(acl_app, name="acl")
+app.add_typer(model_app, name="model")
+
+
+@app.callback()
+def cli_root(
+ ctx: typer.Context,
+ config: str | None = typer.Option(
+ None, "--config", help="Path to YAML/JSON/TOML KeyNetra configuration file."
+ ),
+) -> None:
+ if config:
+ _load_config(config)
+ ctx.obj = {"config": config}
+
+
+def _load_config(path: str) -> None:
+ cfg = load_config_file(path)
+ apply_config_to_environment(cfg)
+ os.environ["KEYNETRA_CONFIG"] = path
+ reset_settings_cache()
+ get_redis.cache_clear()
+
+
+def _effective_config_path(ctx: typer.Context, explicit: str | None) -> str | None:
+ if explicit:
+ return explicit
+ if isinstance(ctx.obj, dict):
+ value = ctx.obj.get("config")
+ if isinstance(value, str) and value.strip():
+ return value
+ return None
+
+
+def _maybe_load_config(ctx: typer.Context, path: str | None) -> None:
+ effective = _effective_config_path(ctx, path)
+ if effective:
+ _load_config(effective)
+
+
+def _resolve_url(explicit_url: str | None, suffix: str, *, use_settings: bool) -> str:
+ if explicit_url:
+ return explicit_url
+ if not use_settings:
+ return f"http://localhost:8000{suffix}"
+ settings = get_settings()
+ host = settings.server_host
+ if host == "0.0.0.0":
+ host = "127.0.0.1"
+ return f"http://{host}:{settings.server_port}{suffix}"
+
+
+@app.command("start")
+def start(
+ ctx: typer.Context,
+ host: str = typer.Option("0.0.0.0", "--host"),
+ port: int = typer.Option(8000, "--port"),
+ reload: bool = typer.Option(False, "--reload", help="Enable development autoreload."),
+ config: str | None = typer.Option(None, "--config", help="Path to config file."),
+) -> None:
+ """Start the KeyNetra HTTP API (backward-compatible alias for serve)."""
+
+ config_active = _effective_config_path(ctx, config) is not None
+ _maybe_load_config(ctx, config)
+ settings = get_settings() if config_active else None
+ _run_server(
+ host=host if not config_active or host != "0.0.0.0" else settings.server_host,
+ port=port if not config_active or port != 8000 else settings.server_port,
+ reload=reload,
+ )
+
+
+@app.command("serve")
+def serve(
+ ctx: typer.Context,
+ host: str = typer.Option("0.0.0.0", "--host"),
+ port: int = typer.Option(8000, "--port"),
+ reload: bool = typer.Option(False, "--reload", help="Enable development autoreload."),
+ config: str | None = typer.Option(None, "--config", help="Path to config file."),
+) -> None:
+ """Start the KeyNetra HTTP API in headless config mode."""
+
+ config_active = _effective_config_path(ctx, config) is not None
+ _maybe_load_config(ctx, config)
+ settings = get_settings() if config_active else None
+ _run_server(
+ host=host if not config_active or host != "0.0.0.0" else settings.server_host,
+ port=port if not config_active or port != 8000 else settings.server_port,
+ reload=reload,
+ )
+
+
+def _run_server(*, host: str, port: int, reload: bool) -> None:
+ """Run the FastAPI app."""
+
+ import uvicorn
+
+ settings = get_settings()
+ _render_startup_screen(
+ host=host,
+ port=port,
+ reload=reload,
+ settings=settings,
+ config_path=os.getenv("KEYNETRA_CONFIG"),
+ )
+ os.environ["KEYNETRA_LOG_FORMAT"] = "rich"
+ try:
+ uvicorn.run(
+ "keynetra.api.main:app",
+ host=host,
+ port=port,
+ reload=reload,
+ log_config=None,
+ access_log=True,
+ )
+ except TypeError:
+ uvicorn.run("keynetra.api.main:app", host=host, port=port, reload=reload)
+
+
+def _render_startup_screen(
+ *, host: str, port: int, reload: bool, settings: Any, config_path: str | None
+) -> None:
+ try:
+ from rich import box
+ from rich.align import Align
+ from rich.columns import Columns
+ from rich.console import Console
+ from rich.panel import Panel
+ from rich.table import Table
+ from rich.text import Text
+ except ModuleNotFoundError:
+ typer.echo(f"KeyNetra starting on http://{host}:{port} mode={settings.service_mode}")
+ return
+
+ force_color = os.getenv("KEYNETRA_FORCE_COLOR", "1").strip().lower() not in {"0", "false", "no"}
+ console = Console(
+ force_terminal=force_color, color_system="truecolor" if force_color else "auto"
+ )
+ banner = Text("KEYNETRA", style="bold magenta")
+ try:
+ import pyfiglet
+
+ f = pyfiglet.figlet_format("KEYNETRA", font="slant")
+ banner = Text(f, style="bold magenta")
+ except Exception:
+ pass
+
+ header = Panel.fit(
+ Align.center(
+ Text.assemble(
+ banner,
+ "\n",
+ ("Authorization Engine", "bold cyan"),
+ "\n",
+ (f"v{__version__}", "bold bright_white"),
+ )
+ ),
+ border_style="bright_blue",
+ padding=(0, 2),
+ box=box.ROUNDED,
+ )
+ console.print(header)
+
+ runtime = Table(
+ box=box.SIMPLE_HEAVY,
+ show_header=True,
+ header_style="bold bright_cyan",
+ expand=True,
+ pad_edge=False,
+ )
+ runtime.add_column("Runtime", style="bold white", width=14, no_wrap=True)
+ runtime.add_column("Value", style="bright_white", overflow="fold")
+ runtime.add_row("Mode", f"[bright_magenta]{settings.service_mode}[/bright_magenta]")
+ runtime.add_row("Environment", f"[cyan]{settings.environment}[/cyan]")
+ runtime.add_row("Server", f"[green]http://{host}:{port}[/green]")
+ runtime.add_row("Reload", "[green]enabled[/green]" if reload else "[yellow]disabled[/yellow]")
+ runtime.add_row("Config File", str(config_path or "not provided"))
+
+ storage = Table(
+ box=box.SIMPLE_HEAVY,
+ show_header=True,
+ header_style="bold bright_green",
+ expand=True,
+ pad_edge=False,
+ )
+ storage.add_column("Storage", style="bold white", width=14, no_wrap=True)
+ storage.add_column("Value", style="bright_white", overflow="fold")
+ storage.add_row("Database", str(settings.database_url))
+ storage.add_row("Redis", str(settings.redis_url or "disabled"))
+ storage.add_row("Policy Paths", ", ".join(settings.parsed_policy_paths()) or "default")
+ storage.add_row("Model Paths", ", ".join(settings.parsed_model_paths()) or "none")
+
+ security = Table(
+ box=box.SIMPLE_HEAVY,
+ show_header=True,
+ header_style="bold yellow",
+ expand=True,
+ pad_edge=False,
+ )
+ security.add_column("Security", style="bold white", width=14, no_wrap=True)
+ security.add_column("Value", style="bright_white", overflow="fold")
+ security.add_row("Auth", "api-key + jwt + admin-login")
+ security.add_row("Admin User", str(settings.admin_username or "disabled"))
+ security.add_row("Rate Limit", f"{settings.rate_limit_per_minute}/min")
+
+ panel_width = max(60, console.width - 2)
+ if console.width < 140:
+ console.print(
+ Panel(
+ runtime,
+ title="Runtime",
+ border_style="bright_cyan",
+ box=box.ROUNDED,
+ width=panel_width,
+ )
+ )
+ console.print(
+ Panel(
+ storage,
+ title="Storage",
+ border_style="bright_green",
+ box=box.ROUNDED,
+ width=panel_width,
+ )
+ )
+ console.print(
+ Panel(
+ security,
+ title="Security",
+ border_style="yellow",
+ box=box.ROUNDED,
+ width=panel_width,
+ )
+ )
+ else:
+ console.print(Columns([runtime, storage, security], equal=True, expand=True))
+ console.print(
+ Panel(
+ "[bold green]Startup complete[/bold green] • [cyan]launching uvicorn[/cyan]",
+ border_style="green",
+ box=box.MINIMAL_HEAVY_HEAD,
+ )
+ )
+
+
+@app.command("version")
+def version() -> None:
+ """Print the KeyNetra core version."""
+
+ typer.echo(__version__)
+
+
+@app.command("admin-login")
+def admin_login(
+ ctx: typer.Context,
+ username: str = typer.Option(..., "--username"),
+ password: str = typer.Option(..., "--password"),
+ url: str | None = typer.Option(None, "--url"),
+ config: str | None = typer.Option(None, "--config", help="Path to config file."),
+) -> None:
+ """Get admin JWT using username/password."""
+
+ config_active = _effective_config_path(ctx, config) is not None
+ _maybe_load_config(ctx, config)
+ resp = httpx.post(
+ _resolve_url(url, "/admin/login", use_settings=config_active),
+ json={"username": username, "password": password},
+ timeout=10.0,
+ headers={"Content-Type": "application/json"},
+ )
+ resp.raise_for_status()
+ typer.echo(resp.text)
+
+
+@app.command("help-cli")
+def help_cli() -> None:
+ """Print a complete CLI quick reference with headless config examples."""
+
+ typer.echo(
+ "\n".join(
+ [
+ "KeyNetra CLI Help",
+ "",
+ "Global option:",
+ " --config Load YAML/JSON/TOML config file",
+ "",
+ "Core commands:",
+ " keynetra serve --config examples/keynetra.yaml",
+ " keynetra start --host 0.0.0.0 --port 8000",
+ " keynetra version",
+ " keynetra admin-login --username admin --password admin123 [--config ...]",
+ " keynetra migrate [--config ...]",
+ " keynetra seed-data [--reset] [--config ...]",
+ ' keynetra check --api-key devkey --action read --user \'{"id":"u1"}\' --resource \'{"resource_type":"document","resource_id":"doc-1"}\' [--config ...]',
+ ' keynetra simulate --api-key devkey --policy-change \'{"action":"read","effect":"allow","priority":10,"conditions":{"role":"admin"}}\' --action read [--config ...]',
+ ' keynetra impact --api-key devkey --policy-change \'{"action":"read","effect":"deny","priority":1,"conditions":{}}\' [--config ...]',
+ " keynetra explain --user u1 --resource doc-1 --action read [--config ...]",
+ " keynetra test-policy examples/policy_tests.yaml",
+ " keynetra compile-policies --config examples/keynetra.yaml",
+ " keynetra doctor --service core [--config ...]",
+ " keynetra benchmark --api-key devkey",
+ "",
+ "ACL commands:",
+ " keynetra acl add --subject-type user --subject-id u1 --resource-type document --resource-id doc-1 --action read --effect allow",
+ " keynetra acl list --resource-type document --resource-id doc-1",
+ " keynetra acl remove --acl-id 1",
+ "",
+ "Model commands:",
+ " keynetra model apply examples/auth-model.yaml --api-key devkey",
+ " keynetra model show --api-key devkey",
+ "",
+ "Headless config file examples:",
+ " examples/keynetra.yaml",
+ " examples/auth-model.yaml",
+ " examples/policies/",
+ "",
+ "Embedded usage:",
+ " from keynetra import KeyNetra",
+ " engine = KeyNetra.from_config('examples/keynetra.yaml')",
+ " decision = engine.check_access(subject='user:1', action='read', resource='document:abc')",
+ ]
+ )
+ )
+
+
+@app.command("migrate")
+def migrate(
+ ctx: typer.Context,
+ revision: str = typer.Option("head", "--revision", help="Alembic revision to upgrade to."),
+ confirm_destructive: bool = typer.Option(
+ False, "--confirm-destructive", help="Allow migrations that drop tables or columns."
+ ),
+ config: str | None = typer.Option(None, "--config", help="Path to config file."),
+) -> None:
+ """Apply database migrations for the configured KeyNetra database."""
+ _maybe_load_config(ctx, config)
+
+ from alembic import command
+ from alembic.config import Config
+
+ core_dir = Path(__file__).resolve().parents[1]
+ config = Config(str(core_dir / "alembic.ini"))
+ config.set_main_option("script_location", str(core_dir / "alembic"))
+ config.set_main_option("sqlalchemy.url", get_settings().database_url)
+ engine = create_engine_for_url(get_settings().database_url)
+ versions_dir = core_dir / "alembic" / "versions"
+ applied = _read_applied_revisions(engine)
+ destructive = find_destructive_revisions(versions_dir, applied)
+ if destructive and not confirm_destructive:
+ typer.echo("Destructive migrations detected:")
+ for revision_id in destructive:
+ typer.echo(f" - {revision_id}")
+ typer.echo("Re-run with --confirm-destructive to apply them.")
+ raise typer.Exit(code=1)
+ command.upgrade(config, revision)
+ typer.echo(f"Migrations applied up to {revision}.")
+
+
+@app.command("seed-data")
+def seed_data(
+ ctx: typer.Context,
+ reset: bool = typer.Option(
+ False, "--reset", help="Clear the sample dataset before seeding it again."
+ ),
+ config: str | None = typer.Option(None, "--config", help="Path to config file."),
+) -> None:
+ """Seed deterministic sample data for local development and smoke tests."""
+ _maybe_load_config(ctx, config)
+
+ settings = get_settings()
+ initialize_database(settings.database_url)
+ session_factory = create_session_factory(settings.database_url)
+ db = session_factory()
+ try:
+ summary = seed_demo_data(db, reset=reset)
+ finally:
+ db.close()
+
+ typer.echo(
+ json.dumps(
+ {
+ "tenant_key": summary.tenant_key,
+ "created_tenant": summary.created_tenant,
+ "created_user": summary.created_user,
+ "created_role": summary.created_role,
+ "created_permissions": summary.created_permissions,
+ "created_relationships": summary.created_relationships,
+ "created_policies": summary.created_policies,
+ },
+ indent=2,
+ )
+ )
+
+
+@app.command("check")
+def check(
+ ctx: typer.Context,
+ url: str | None = typer.Option(None, "--url"),
+ api_key: str = typer.Option(..., "--api-key"),
+ user: str = typer.Option("{}", "--user", help="JSON object"),
+ action: str = typer.Option(..., "--action"),
+ resource: str = typer.Option("{}", "--resource", help="JSON object"),
+ context: str = typer.Option("{}", "--context", help="JSON object"),
+ config: str | None = typer.Option(None, "--config", help="Path to config file."),
+) -> None:
+ """Send one authorization request to a running KeyNetra server."""
+ config_active = _effective_config_path(ctx, config) is not None
+ _maybe_load_config(ctx, config)
+
+ user_obj: dict[str, Any] = json.loads(user)
+ res_obj: dict[str, Any] = json.loads(resource)
+ context_obj: dict[str, Any] = json.loads(context)
+ payload = {"user": user_obj, "action": action, "resource": res_obj, "context": context_obj}
+ headers = {"X-API-Key": api_key}
+ resp = httpx.post(
+ _resolve_url(url, "/check-access", use_settings=config_active),
+ json=payload,
+ headers=headers,
+ timeout=10.0,
+ )
+ resp.raise_for_status()
+ typer.echo(resp.text)
+
+
+@model_app.command("apply")
+def model_apply(
+ file: Path = typer.Argument(
+ ..., exists=True, dir_okay=False, readable=True, help="Schema DSL file"
+ ),
+ url: str = typer.Option("http://localhost:8000/auth-model", "--url"),
+ api_key: str = typer.Option(..., "--api-key"),
+) -> None:
+ schema = file.read_text(encoding="utf-8")
+ resp = httpx.post(url, json={"schema": schema}, headers={"X-API-Key": api_key}, timeout=10.0)
+ resp.raise_for_status()
+ typer.echo(resp.text)
+
+
+@model_app.command("show")
+def model_show(
+ url: str = typer.Option("http://localhost:8000/auth-model", "--url"),
+ api_key: str = typer.Option(..., "--api-key"),
+) -> None:
+ resp = httpx.get(url, headers={"X-API-Key": api_key}, timeout=10.0)
+ resp.raise_for_status()
+ typer.echo(resp.text)
+
+
+@app.command("simulate")
+def simulate(
+ ctx: typer.Context,
+ policy_change: str = typer.Option(..., "--policy-change"),
+ user: str = typer.Option("{}", "--user", help="JSON object"),
+ action: str = typer.Option(..., "--action"),
+ resource: str = typer.Option("{}", "--resource", help="JSON object"),
+ context: str = typer.Option("{}", "--context", help="JSON object"),
+ url: str | None = typer.Option(None, "--url"),
+ api_key: str = typer.Option(..., "--api-key"),
+ config: str | None = typer.Option(None, "--config", help="Path to config file."),
+) -> None:
+ config_active = _effective_config_path(ctx, config) is not None
+ _maybe_load_config(ctx, config)
+ payload = {
+ "simulate": {"policy_change": policy_change},
+ "request": {
+ "user": json.loads(user),
+ "action": action,
+ "resource": json.loads(resource),
+ "context": json.loads(context),
+ },
+ }
+ resp = httpx.post(
+ _resolve_url(url, "/simulate-policy", use_settings=config_active),
+ json=payload,
+ headers={"X-API-Key": api_key},
+ timeout=10.0,
+ )
+ resp.raise_for_status()
+ typer.echo(resp.text)
+
+
+@app.command("impact")
+def impact(
+ ctx: typer.Context,
+ policy_change: str = typer.Option(..., "--policy-change"),
+ url: str | None = typer.Option(None, "--url"),
+ api_key: str = typer.Option(..., "--api-key"),
+ config: str | None = typer.Option(None, "--config", help="Path to config file."),
+) -> None:
+ config_active = _effective_config_path(ctx, config) is not None
+ _maybe_load_config(ctx, config)
+ resp = httpx.post(
+ _resolve_url(url, "/impact-analysis", use_settings=config_active),
+ json={"policy_change": policy_change},
+ headers={"X-API-Key": api_key},
+ timeout=10.0,
+ )
+ resp.raise_for_status()
+ typer.echo(resp.text)
+
+
+@app.command("explain")
+def explain(
+ ctx: typer.Context,
+ user: str = typer.Option(..., "--user", help="User id."),
+ resource: str = typer.Option(..., "--resource", help="Resource id."),
+ action: str = typer.Option(..., "--action"),
+ context: str = typer.Option("{}", "--context", help="JSON object"),
+ config: str | None = typer.Option(None, "--config", help="Path to config file."),
+) -> None:
+ """Evaluate one decision locally and print the explanation trace."""
+ _maybe_load_config(ctx, config)
+
+ settings = get_settings()
+ initialize_database(settings.database_url)
+ session_factory = create_session_factory(settings.database_url)
+ db = session_factory()
+ try:
+ service = _build_authorization_service(db)
+ result = service.authorize(
+ tenant_key=DEFAULT_TENANT_KEY,
+ principal={"type": "cli", "id": "cli"},
+ user={"id": _coerce_scalar(user)},
+ action=action,
+ resource={"id": _coerce_scalar(resource)},
+ context=json.loads(context),
+ audit=False,
+ )
+ finally:
+ db.close()
+
+ typer.echo(
+ json.dumps(
+ {
+ "allowed": result.decision.allowed,
+ "decision": result.decision.decision,
+ "reason": result.decision.reason,
+ "policy_id": result.decision.policy_id,
+ "matched_policies": list(result.decision.matched_policies),
+ "explain_trace": [step.to_dict() for step in result.decision.explain_trace],
+ },
+ indent=2,
+ )
+ )
+
+
+@app.command("test-policy")
+def test_policy(
+ file: Path = typer.Argument(
+ ..., exists=True, dir_okay=False, readable=True, help="YAML or JSON policy test file"
+ ),
+) -> None:
+ """Validate policies and execute deterministic policy tests before deployment."""
+
+ document = file.read_text(encoding="utf-8")
+ results = validate_policy_test_suite(document)
+ failures = [result for result in results if not result.passed]
+
+ for result in results:
+ status = "PASS" if result.passed else "FAIL"
+ typer.echo(
+ f"[{status}] {result.name}: expected={result.expected} actual={result.actual} "
+ f"policy_id={result.policy_id or '-'} reason={result.reason or '-'}"
+ )
+
+ if failures:
+ raise typer.Exit(code=1)
+
+
+@app.command("compile-policies")
+def compile_policies(
+ ctx: typer.Context,
+ path: list[str] | None = typer.Option(
+ None,
+ "--path",
+ help="Policy file or directory path. Repeat --path for multiple values.",
+ ),
+ config: str | None = typer.Option(None, "--config", help="Path to config file."),
+) -> None:
+ """Compile policies from files and print a deterministic summary."""
+
+ _maybe_load_config(ctx, config)
+ settings = get_settings()
+ configured_paths = path or settings.parsed_policy_paths()
+ if not configured_paths:
+ raise typer.BadParameter("no policy paths configured")
+
+ policies = load_policies_from_paths(configured_paths)
+ if not policies:
+ raise typer.BadParameter("no policy definitions found")
+
+ from keynetra.engine.keynetra_engine import KeyNetraEngine
+
+ engine = KeyNetraEngine(policies)
+ typer.echo(
+ json.dumps(
+ {
+ "compiled_policies": len(policies),
+ "strategy": "first_match",
+ "policy_ids": [
+ policy.policy_id or f"{policy.action}:{policy.priority}:{policy.effect}"
+ for policy in engine._policies # noqa: SLF001
+ ],
+ },
+ indent=2,
+ )
+ )
+
+
+@app.command("doctor")
+def doctor(
+ ctx: typer.Context,
+ service: str = typer.Option("core", "--service", help="Deployment to validate: core or saas."),
+ config: str | None = typer.Option(None, "--config", help="Path to config file."),
+) -> None:
+ """Validate production readiness for core or SaaS deployments."""
+ _maybe_load_config(ctx, config)
+
+ normalized_service = service.strip().lower()
+ if normalized_service == "core":
+ result = run_core_doctor(get_settings())
+ elif normalized_service == "saas":
+ try:
+ from saas.backend.src.config.settings import get_settings as get_saas_settings
+ from saas.backend.src.services.doctor import run_saas_doctor
+ except ModuleNotFoundError as exc:
+ raise typer.BadParameter("SaaS backend is not importable in this environment.") from exc
+ result = run_saas_doctor(get_saas_settings())
+ else:
+ raise typer.BadParameter("service must be one of: core, saas")
+
+ typer.echo(json.dumps(result, indent=2))
+ if not result["ok"]:
+ raise typer.Exit(code=1)
+
+
+async def _run_benchmark(
+ url: str,
+ payload: dict[str, Any],
+ headers: dict[str, str],
+ total: int,
+ concurrency: int,
+ timeout: float,
+) -> list[float]:
+ durations: list[float] = []
+ sem = asyncio.Semaphore(concurrency)
+ async with httpx.AsyncClient(timeout=timeout) as client:
+
+ async def send_request() -> None:
+ async with sem:
+ start = time.perf_counter()
+ response = await client.post(url, json=payload, headers=headers)
+ elapsed = time.perf_counter() - start
+ response.raise_for_status()
+ durations.append(elapsed)
+
+ await asyncio.gather(*(send_request() for _ in range(total)))
+ return durations
+
+
+def _percentile(values: list[float], percentile: float) -> float:
+ if not values:
+ return 0.0
+ sorted_vals = sorted(values)
+ k = (len(sorted_vals) - 1) * (percentile / 100.0)
+ lower = int(k)
+ upper = min(lower + 1, len(sorted_vals) - 1)
+ weight = k - lower
+ return (1 - weight) * sorted_vals[lower] + weight * sorted_vals[upper]
+
+
+@app.command("benchmark")
+def benchmark(
+ url: str = typer.Option("http://localhost:8000/check-access", "--url"),
+ requests: int = typer.Option(100, "--requests"),
+ concurrency: int = typer.Option(10, "--concurrency"),
+ api_key: str = typer.Option(..., "--api-key"),
+ timeout: float = typer.Option(10.0, "--timeout"),
+) -> None:
+ """Measure latency and throughput against the authorization API."""
+
+ if requests < 1:
+ raise typer.BadParameter("requests must be greater than zero")
+ if concurrency < 1:
+ raise typer.BadParameter("concurrency must be greater than zero")
+ payload = {"user": {"id": 1}, "action": "check", "resource": {"amount": 1}, "context": {}}
+ headers = {"X-API-Key": api_key}
+ durations = asyncio.run(_run_benchmark(url, payload, headers, requests, concurrency, timeout))
+ if not durations:
+ typer.echo("No successful samples collected.")
+ raise typer.Exit(code=1)
+ total_time = sum(durations)
+ throughput = len(durations) / total_time if total_time > 0 else 0.0
+ result = {
+ "requests": len(durations),
+ "p50(ms)": _percentile(durations, 50) * 1000,
+ "p95(ms)": _percentile(durations, 95) * 1000,
+ "p99(ms)": _percentile(durations, 99) * 1000,
+ "throughput": throughput,
+ }
+ typer.echo(json.dumps(result, indent=2))
+
+
+@acl_app.command("add")
+def acl_add(
+ subject_type: str = typer.Option(..., "--subject-type"),
+ subject_id: str = typer.Option(..., "--subject-id"),
+ resource_type: str = typer.Option(..., "--resource-type"),
+ resource_id: str = typer.Option(..., "--resource-id"),
+ action: str = typer.Option(..., "--action"),
+ effect: str = typer.Option(..., "--effect"),
+ tenant_key: str = typer.Option(DEFAULT_TENANT_KEY, "--tenant-key"),
+) -> None:
+ settings = get_settings()
+ initialize_database(settings.database_url)
+ db = create_session_factory(settings.database_url)()
+ redis_client = get_redis()
+ try:
+ tenants = SqlTenantRepository(db)
+ tenant = tenants.get_or_create(tenant_key)
+ acl_repo = SqlACLRepository(db)
+ acl_id = acl_repo.create_acl_entry(
+ tenant_id=tenant.id,
+ subject_type=subject_type,
+ subject_id=subject_id,
+ resource_type=resource_type,
+ resource_id=resource_id,
+ action=action,
+ effect=effect,
+ )
+ build_acl_cache(redis_client).invalidate(
+ tenant_id=tenant.id, resource_type=resource_type, resource_id=resource_id
+ )
+ build_access_index_cache(redis_client).invalidate_tenant(tenant_id=tenant.id)
+ build_decision_cache(redis_client).bump_namespace(tenant_key)
+ typer.echo(json.dumps({"acl_id": acl_id, "tenant_key": tenant_key}, indent=2))
+ finally:
+ db.close()
+
+
+@acl_app.command("list")
+def acl_list(
+ resource_type: str = typer.Option(..., "--resource-type"),
+ resource_id: str = typer.Option(..., "--resource-id"),
+ tenant_key: str = typer.Option(DEFAULT_TENANT_KEY, "--tenant-key"),
+) -> None:
+ settings = get_settings()
+ initialize_database(settings.database_url)
+ db = create_session_factory(settings.database_url)()
+ try:
+ tenant = SqlTenantRepository(db).get_or_create(tenant_key)
+ rows = SqlACLRepository(db).list_resource_acl(
+ tenant_id=tenant.id, resource_type=resource_type, resource_id=resource_id
+ )
+ typer.echo(json.dumps([row.to_dict() for row in rows], indent=2, default=str))
+ finally:
+ db.close()
+
+
+@acl_app.command("remove")
+def acl_remove(
+ acl_id: int = typer.Option(..., "--acl-id"),
+ tenant_key: str = typer.Option(DEFAULT_TENANT_KEY, "--tenant-key"),
+) -> None:
+ settings = get_settings()
+ initialize_database(settings.database_url)
+ db = create_session_factory(settings.database_url)()
+ redis_client = get_redis()
+ try:
+ tenants = SqlTenantRepository(db)
+ tenant = tenants.get_or_create(tenant_key)
+ repo = SqlACLRepository(db)
+ target = repo.get_acl_entry(tenant_id=tenant.id, acl_id=acl_id)
+ repo.delete_acl_entry(tenant_id=tenant.id, acl_id=acl_id)
+ if target is not None:
+ build_acl_cache(redis_client).invalidate(
+ tenant_id=tenant.id,
+ resource_type=target.resource_type,
+ resource_id=target.resource_id,
+ )
+ build_access_index_cache(redis_client).invalidate_tenant(tenant_id=tenant.id)
+ build_decision_cache(redis_client).bump_namespace(tenant_key)
+ typer.echo(json.dumps({"acl_id": acl_id, "tenant_key": tenant_key}, indent=2))
+ finally:
+ db.close()
+
+
+def _read_applied_revisions(engine) -> set[str]:
+ try:
+ with engine.connect() as connection:
+ return {
+ str(revision)
+ for revision in connection.execute(text("SELECT version_num FROM alembic_version"))
+ .scalars()
+ .all()
+ }
+ except (SQLAlchemyError, Exception):
+ return set()
+
+
+def _build_authorization_service(db: Session) -> AuthorizationService:
+ redis_client = get_redis()
+ return AuthorizationService(
+ settings=get_settings(),
+ tenants=SqlTenantRepository(db),
+ policies=SqlPolicyRepository(db),
+ users=SqlUserRepository(db),
+ relationships=SqlRelationshipRepository(db),
+ audit=SqlAuditRepository(db),
+ policy_cache=build_policy_cache(redis_client),
+ relationship_cache=build_relationship_cache(redis_client),
+ decision_cache=build_decision_cache(redis_client),
+ acl_repository=SqlACLRepository(db),
+ acl_cache=build_acl_cache(redis_client),
+ access_index_cache=build_access_index_cache(redis_client),
+ )
+
+
+def _coerce_scalar(value: str) -> int | str:
+ return int(value) if value.isdigit() else value
+
+
+def main() -> None:
+ app()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/keynetra/config/__init__.py b/keynetra/config/__init__.py
new file mode 100644
index 0000000..dd0d452
--- /dev/null
+++ b/keynetra/config/__init__.py
@@ -0,0 +1 @@
+"""Core configuration and defaults."""
diff --git a/keynetra/config/admin_auth.py b/keynetra/config/admin_auth.py
new file mode 100644
index 0000000..17e96ad
--- /dev/null
+++ b/keynetra/config/admin_auth.py
@@ -0,0 +1,89 @@
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Any
+
+from fastapi import Depends, Request, status
+
+from keynetra.api.errors import ApiError, ApiErrorCode
+from keynetra.config.security import get_principal
+from keynetra.config.tenancy import DEFAULT_TENANT_KEY
+
+_ROLE_ORDER = {"viewer": 1, "developer": 2, "admin": 3}
+
+
+@dataclass(frozen=True)
+class AdminAccess:
+ tenant_key: str
+ role: str
+ principal: dict[str, Any]
+
+
+def require_management_role(minimum_role: str):
+ if minimum_role not in _ROLE_ORDER:
+ raise ValueError(f"unsupported management role: {minimum_role}")
+
+ def dependency(
+ request: Request,
+ principal: dict[str, Any] = Depends(get_principal),
+ ) -> AdminAccess:
+ role = _resolve_tenant_role(principal)
+ if role is None:
+ raise ApiError(
+ status_code=status.HTTP_403_FORBIDDEN,
+ code=ApiErrorCode.FORBIDDEN,
+ message="tenant access denied",
+ details={"tenant_key": DEFAULT_TENANT_KEY},
+ )
+ if _ROLE_ORDER[role] < _ROLE_ORDER[minimum_role]:
+ raise ApiError(
+ status_code=status.HTTP_403_FORBIDDEN,
+ code=ApiErrorCode.FORBIDDEN,
+ message="insufficient management role",
+ details={
+ "required_role": minimum_role,
+ "actual_role": role,
+ "tenant_key": DEFAULT_TENANT_KEY,
+ },
+ )
+ request.state.admin_role = role
+ request.state.admin_tenant_key = DEFAULT_TENANT_KEY
+ return AdminAccess(tenant_key=DEFAULT_TENANT_KEY, role=role, principal=principal)
+
+ return dependency
+
+
+def _resolve_tenant_role(principal: dict[str, Any]) -> str | None:
+ if principal.get("type") == "api_key":
+ return "admin"
+
+ claims = principal.get("claims")
+ if not isinstance(claims, dict):
+ return None
+
+ tenant_roles = claims.get("tenant_roles")
+ if isinstance(tenant_roles, dict):
+ for role in sorted(
+ tenant_roles.values(), key=lambda item: _ROLE_ORDER.get(item, 0), reverse=True
+ ):
+ if isinstance(role, str) and role in _ROLE_ORDER:
+ return role
+ elif isinstance(tenant_roles, list):
+ for item in tenant_roles:
+ if not isinstance(item, dict):
+ continue
+ role = item.get("role")
+ if isinstance(role, str) and role in _ROLE_ORDER:
+ return role
+
+ role = claims.get("admin_role") or claims.get("role")
+ if isinstance(role, str) and role in _ROLE_ORDER:
+ return role
+
+ roles = claims.get("admin_roles") or claims.get("roles")
+ if isinstance(roles, list):
+ for item in roles:
+ if isinstance(item, str) and item in _ROLE_ORDER:
+ return item
+
+ return None
diff --git a/keynetra/config/config_loader.py b/keynetra/config/config_loader.py
new file mode 100644
index 0000000..5356a3e
--- /dev/null
+++ b/keynetra/config/config_loader.py
@@ -0,0 +1,139 @@
+from __future__ import annotations
+
+import json
+import os
+import tomllib
+from dataclasses import dataclass
+from pathlib import Path
+from typing import Any
+
+try:
+ import yaml
+except ModuleNotFoundError: # pragma: no cover - optional parser dependency
+ yaml = None # type: ignore[assignment]
+
+
+@dataclass(frozen=True)
+class KeyNetraFileConfig:
+ database_url: str | None = None
+ redis_url: str | None = None
+ policy_paths: tuple[str, ...] = ()
+ model_paths: tuple[str, ...] = ()
+ seed_data: bool | None = None
+ server_host: str | None = None
+ server_port: int | None = None
+
+
+def load_config_file(path: str | Path) -> KeyNetraFileConfig:
+ config_path = Path(path).expanduser().resolve()
+ suffix = config_path.suffix.lower()
+ raw = config_path.read_text(encoding="utf-8")
+ payload: Any
+ if suffix in {".yaml", ".yml"}:
+ if yaml is None:
+ raise ValueError("PyYAML is required to parse YAML configuration files")
+ payload = yaml.safe_load(raw)
+ elif suffix == ".json":
+ payload = json.loads(raw)
+ elif suffix == ".toml":
+ payload = tomllib.loads(raw)
+ else:
+ raise ValueError(f"unsupported config file format: {config_path.suffix}")
+ if payload is None:
+ payload = {}
+ if not isinstance(payload, dict):
+ raise ValueError("configuration root must be an object")
+ return _normalize_config(payload)
+
+
+def apply_config_to_environment(config: KeyNetraFileConfig) -> None:
+ if config.database_url:
+ os.environ["KEYNETRA_DATABASE_URL"] = config.database_url
+ if config.redis_url:
+ os.environ["KEYNETRA_REDIS_URL"] = config.redis_url
+ if config.policy_paths:
+ os.environ["KEYNETRA_POLICY_PATHS"] = ",".join(config.policy_paths)
+ if config.model_paths:
+ os.environ["KEYNETRA_MODEL_PATHS"] = ",".join(config.model_paths)
+ if config.seed_data is not None:
+ os.environ["KEYNETRA_AUTO_SEED_SAMPLE_DATA"] = "true" if config.seed_data else "false"
+ if config.server_host:
+ os.environ["KEYNETRA_SERVER_HOST"] = config.server_host
+ if config.server_port is not None:
+ os.environ["KEYNETRA_SERVER_PORT"] = str(config.server_port)
+
+
+def _normalize_config(payload: dict[str, Any]) -> KeyNetraFileConfig:
+ database_url = _as_str(_nested(payload, "database", "url"))
+ redis_url = _as_str(_nested(payload, "redis", "url"))
+ policy_paths = _paths_from_payload(payload, section="policies", plural_key="policy_paths")
+ model_paths = _paths_from_payload(payload, section="models", plural_key="model_paths")
+ seed_data = _as_bool(payload.get("seed_data"))
+ server_host = _as_str(_nested(payload, "server", "host"))
+ server_port = _as_int(_nested(payload, "server", "port"))
+
+ return KeyNetraFileConfig(
+ database_url=database_url,
+ redis_url=redis_url,
+ policy_paths=policy_paths,
+ model_paths=model_paths,
+ seed_data=seed_data,
+ server_host=server_host,
+ server_port=server_port,
+ )
+
+
+def _paths_from_payload(
+ payload: dict[str, Any], *, section: str, plural_key: str
+) -> tuple[str, ...]:
+ out: list[str] = []
+ if isinstance(payload.get(plural_key), list):
+ out.extend([str(item) for item in payload.get(plural_key, []) if isinstance(item, str)])
+ section_obj = payload.get(section)
+ if isinstance(section_obj, dict):
+ single = section_obj.get("path")
+ if isinstance(single, str):
+ out.append(single)
+ many = section_obj.get("paths")
+ if isinstance(many, list):
+ out.extend([str(item) for item in many if isinstance(item, str)])
+ return tuple(dict.fromkeys(path.strip() for path in out if path and path.strip()))
+
+
+def _nested(payload: dict[str, Any], section: str, key: str) -> Any:
+ section_obj = payload.get(section)
+ if not isinstance(section_obj, dict):
+ return None
+ return section_obj.get(key)
+
+
+def _as_str(value: Any) -> str | None:
+ if value is None:
+ return None
+ if isinstance(value, str):
+ trimmed = value.strip()
+ return trimmed or None
+ return str(value)
+
+
+def _as_int(value: Any) -> int | None:
+ if value is None:
+ return None
+ try:
+ return int(value)
+ except (TypeError, ValueError):
+ return None
+
+
+def _as_bool(value: Any) -> bool | None:
+ if value is None:
+ return None
+ if isinstance(value, bool):
+ return value
+ if isinstance(value, str):
+ normalized = value.strip().lower()
+ if normalized in {"true", "1", "yes", "on"}:
+ return True
+ if normalized in {"false", "0", "no", "off"}:
+ return False
+ return None
diff --git a/keynetra/config/file_loaders.py b/keynetra/config/file_loaders.py
new file mode 100644
index 0000000..03f6a98
--- /dev/null
+++ b/keynetra/config/file_loaders.py
@@ -0,0 +1,246 @@
+from __future__ import annotations
+
+import json
+import tomllib
+from pathlib import Path
+from typing import Any
+
+try:
+ import yaml
+except ModuleNotFoundError: # pragma: no cover - optional parser dependency
+ yaml = None # type: ignore[assignment]
+
+
+def load_policies_from_paths(paths: list[str]) -> list[dict[str, Any]]:
+ policies: list[dict[str, Any]] = []
+ for path in paths:
+ policy_path = Path(path).expanduser()
+ if policy_path.is_dir():
+ files = sorted(
+ [
+ child
+ for child in policy_path.rglob("*")
+ if child.is_file()
+ and child.suffix.lower() in {".yaml", ".yml", ".json", ".polar"}
+ ]
+ )
+ for file_path in files:
+ policies.extend(load_policies_from_file(file_path))
+ continue
+ if policy_path.is_file():
+ policies.extend(load_policies_from_file(policy_path))
+ return policies
+
+
+def load_policies_from_file(path: str | Path) -> list[dict[str, Any]]:
+ policy_path = Path(path).expanduser().resolve()
+ suffix = policy_path.suffix.lower()
+ raw = policy_path.read_text(encoding="utf-8")
+
+ if suffix in {".yaml", ".yml"}:
+ if yaml is None:
+ raise ValueError("PyYAML is required to parse YAML policy files")
+ payload = yaml.safe_load(raw)
+ return _normalize_policy_payload(payload)
+ if suffix == ".json":
+ payload = json.loads(raw)
+ return _normalize_policy_payload(payload)
+ if suffix == ".polar":
+ return _parse_polar_policy_lines(raw)
+ raise ValueError(f"unsupported policy format: {policy_path.suffix}")
+
+
+def load_authorization_model_from_paths(paths: list[str]) -> str | None:
+ for path in paths:
+ model_path = Path(path).expanduser()
+ if model_path.is_dir():
+ files = sorted([child for child in model_path.rglob("*") if child.is_file()])
+ for file_path in files:
+ schema = _load_model_file_if_supported(file_path)
+ if schema:
+ return schema
+ continue
+ if model_path.is_file():
+ schema = _load_model_file_if_supported(model_path)
+ if schema:
+ return schema
+ return None
+
+
+def _load_model_file_if_supported(path: Path) -> str | None:
+ suffix = path.suffix.lower()
+ if suffix in {".yaml", ".yml", ".json", ".toml"}:
+ return load_authorization_model_from_file(path)
+ if suffix in {".schema", ".txt"}:
+ text = path.read_text(encoding="utf-8").strip()
+ return text or None
+ return None
+
+
+def load_authorization_model_from_file(path: str | Path) -> str:
+ model_path = Path(path).expanduser().resolve()
+ suffix = model_path.suffix.lower()
+ raw = model_path.read_text(encoding="utf-8")
+
+ payload: Any
+ if suffix in {".yaml", ".yml"}:
+ if yaml is None:
+ raise ValueError("PyYAML is required to parse YAML model files")
+ payload = yaml.safe_load(raw)
+ elif suffix == ".json":
+ payload = json.loads(raw)
+ elif suffix == ".toml":
+ payload = tomllib.loads(raw)
+ else:
+ raise ValueError(f"unsupported authorization model format: {model_path.suffix}")
+
+ if isinstance(payload, str):
+ text = payload.strip()
+ if not text:
+ raise ValueError("authorization model file is empty")
+ return text
+ if not isinstance(payload, dict):
+ raise ValueError("authorization model file must contain an object")
+ return _model_mapping_to_schema(payload)
+
+
+def _normalize_policy_payload(payload: Any) -> list[dict[str, Any]]:
+ if payload is None:
+ return []
+ if isinstance(payload, list):
+ policies: list[dict[str, Any]] = []
+ for item in payload:
+ policies.extend(_normalize_policy_payload(item))
+ return policies
+ if isinstance(payload, dict):
+ if "policies" in payload and isinstance(payload["policies"], list):
+ return _normalize_policy_payload(payload["policies"])
+ if "allow" in payload or "deny" in payload:
+ return [_policy_from_effect_block(payload)]
+ if "action" in payload:
+ effect = str(payload.get("effect", "deny")).lower()
+ return [
+ {
+ "action": str(payload.get("action", "")),
+ "effect": "allow" if effect == "allow" else "deny",
+ "priority": int(payload.get("priority", 100)),
+ "conditions": dict(payload.get("conditions") or {}),
+ "policy_id": (
+ None if payload.get("policy_id") is None else str(payload.get("policy_id"))
+ ),
+ }
+ ]
+ raise ValueError("invalid policy payload")
+
+
+def _policy_from_effect_block(payload: dict[str, Any]) -> dict[str, Any]:
+ if "allow" in payload:
+ effect = "allow"
+ block = payload.get("allow")
+ else:
+ effect = "deny"
+ block = payload.get("deny")
+ if not isinstance(block, dict):
+ raise ValueError("policy block must be an object")
+ action = str(block.get("action", "")).strip()
+ if not action:
+ raise ValueError("policy action is required")
+ conditions = block.get("when") or block.get("conditions") or {}
+ if not isinstance(conditions, dict):
+ raise ValueError("policy conditions must be an object")
+ return {
+ "action": action,
+ "effect": effect,
+ "priority": int(block.get("priority", 100)),
+ "conditions": dict(conditions),
+ "policy_id": None if block.get("policy_id") is None else str(block.get("policy_id")),
+ }
+
+
+def _parse_polar_policy_lines(text: str) -> list[dict[str, Any]]:
+ policies: list[dict[str, Any]] = []
+ for line in text.splitlines():
+ stripped = line.split("#", 1)[0].strip()
+ if not stripped:
+ continue
+ parts = stripped.split()
+ effect = parts[0].lower()
+ if effect not in {"allow", "deny"}:
+ raise ValueError(f"invalid .polar rule: {stripped}")
+ attrs: dict[str, str] = {}
+ for token in parts[1:]:
+ if "=" not in token:
+ raise ValueError(f"invalid .polar token: {token}")
+ key, value = token.split("=", 1)
+ attrs[key.strip()] = value.strip()
+ action = attrs.pop("action", "").strip()
+ if not action:
+ raise ValueError(f"missing action in .polar rule: {stripped}")
+ priority = int(attrs.pop("priority", "100"))
+ policy_id = attrs.pop("policy_id", None)
+ conditions = {key: _coerce_scalar(value) for key, value in attrs.items()}
+ policies.append(
+ {
+ "action": action,
+ "effect": effect,
+ "priority": priority,
+ "conditions": conditions,
+ "policy_id": policy_id,
+ }
+ )
+ return policies
+
+
+def _coerce_scalar(value: str) -> Any:
+ lowered = value.lower()
+ if lowered in {"true", "false"}:
+ return lowered == "true"
+ try:
+ return int(value)
+ except ValueError:
+ pass
+ try:
+ return float(value)
+ except ValueError:
+ pass
+ return value
+
+
+def _model_mapping_to_schema(payload: dict[str, Any]) -> str:
+ model = payload.get("model", payload)
+ if not isinstance(model, dict):
+ raise ValueError("model must be an object")
+ version = int(model.get("schema_version", model.get("version", 1)))
+ object_type = str(model.get("type", "resource")).strip() or "resource"
+ relations_obj = model.get("relations") or {}
+ permissions_obj = model.get("permissions") or {}
+
+ if not isinstance(relations_obj, dict) or not isinstance(permissions_obj, dict):
+ raise ValueError("relations and permissions must be objects")
+
+ types = {"user", object_type}
+ for subjects in relations_obj.values():
+ if isinstance(subjects, str):
+ types.add(subjects)
+ elif isinstance(subjects, list):
+ types.update(str(item) for item in subjects if item)
+
+ lines: list[str] = [f"model schema {version}"]
+ for type_name in sorted(types):
+ lines.append(f"type {type_name}")
+
+ lines.append("relations")
+ for name, subjects in relations_obj.items():
+ if isinstance(subjects, str):
+ subject_list = [subjects]
+ elif isinstance(subjects, list):
+ subject_list = [str(item) for item in subjects if item]
+ else:
+ raise ValueError(f"invalid relation subjects for {name}")
+ lines.append(f"{name}: [{', '.join(subject_list)}]")
+
+ lines.append("permissions")
+ for name, expr in permissions_obj.items():
+ lines.append(f"{name} = {expr}")
+
+ return "\n".join(lines)
diff --git a/keynetra/config/policies.py b/keynetra/config/policies.py
new file mode 100644
index 0000000..42d5473
--- /dev/null
+++ b/keynetra/config/policies.py
@@ -0,0 +1,7 @@
+"""Canonical default policy definitions."""
+
+from __future__ import annotations
+
+from keynetra.config.sample_data import DEFAULT_POLICIES
+
+__all__ = ["DEFAULT_POLICIES"]
diff --git a/keynetra/config/rate_limit.py b/keynetra/config/rate_limit.py
new file mode 100644
index 0000000..bf5a050
--- /dev/null
+++ b/keynetra/config/rate_limit.py
@@ -0,0 +1,165 @@
+"""Redis-backed token bucket middleware for external endpoints."""
+
+from __future__ import annotations
+
+import hashlib
+import math
+import time
+from dataclasses import dataclass
+from threading import Lock
+
+from fastapi import Request, status
+from fastapi.responses import JSONResponse
+from starlette.middleware.base import BaseHTTPMiddleware
+from starlette.responses import Response
+
+from keynetra.config.redis_client import get_redis
+from keynetra.config.settings import Settings
+
+
+@dataclass
+class _LocalBucket:
+ tokens: float
+ updated_at: float
+
+
+_local_limits: dict[str, _LocalBucket] = {}
+_local_limits_lock = Lock()
+_EXEMPT_PATHS = {"/health", "/metrics", "/docs", "/redoc", "/openapi.json"}
+_REDIS_BUCKET_SCRIPT = """
+local key = KEYS[1]
+local now = tonumber(ARGV[1])
+local refill_rate = tonumber(ARGV[2])
+local capacity = tonumber(ARGV[3])
+local requested = tonumber(ARGV[4])
+local ttl = tonumber(ARGV[5])
+
+local values = redis.call("HMGET", key, "tokens", "updated_at")
+local tokens = tonumber(values[1])
+local updated_at = tonumber(values[2])
+
+if tokens == nil then
+ tokens = capacity
+end
+if updated_at == nil then
+ updated_at = now
+end
+
+local elapsed = math.max(0, now - updated_at)
+tokens = math.min(capacity, tokens + (elapsed * refill_rate))
+
+local allowed = 0
+if tokens >= requested then
+ tokens = tokens - requested
+ allowed = 1
+end
+
+redis.call("HMSET", key, "tokens", tokens, "updated_at", now)
+redis.call("EXPIRE", key, ttl)
+
+local retry_after = 0
+if allowed == 0 then
+ retry_after = math.ceil((requested - tokens) / refill_rate)
+end
+
+return {allowed, tokens, retry_after}
+"""
+
+
+class RateLimitMiddleware(BaseHTTPMiddleware):
+ def __init__(self, app, settings: Settings) -> None: # type: ignore[override]
+ super().__init__(app)
+ self._settings = settings
+ with _local_limits_lock:
+ _local_limits.clear()
+
+ async def dispatch(self, request: Request, call_next) -> Response: # type: ignore[override]
+ if request.method.upper() == "OPTIONS" or request.url.path in _EXEMPT_PATHS:
+ return await call_next(request)
+
+ decision = self._consume(request)
+ if isinstance(decision, Response):
+ return decision
+ response = await call_next(request)
+ response.headers["X-RateLimit-Limit"] = str(decision.limit)
+ response.headers["X-RateLimit-Remaining"] = str(decision.remaining)
+ response.headers["X-RateLimit-Reset"] = str(decision.retry_after)
+ return response
+
+ def _consume(self, request: Request) -> "_BucketDecision | Response":
+ rate = max(1, self._settings.rate_limit_per_minute)
+ interval = max(1, self._settings.rate_limit_window_seconds)
+ capacity = max(1, self._settings.rate_limit_burst or rate)
+ refill_rate = rate / interval
+ now = time.time()
+ principal = request.headers.get("X-API-Key") or request.headers.get("Authorization")
+ if principal is None:
+ principal = request.client.host if request.client else "anonymous"
+ principal_hash = hashlib.sha256(principal.encode("utf-8")).hexdigest()[:32]
+ key = f"rl:tb:{principal_hash}"
+ ttl = max(interval, math.ceil(capacity / refill_rate) * 2)
+
+ redis_client = get_redis()
+ if redis_client is not None:
+ try:
+ allowed, remaining, retry_after = redis_client.eval(
+ _REDIS_BUCKET_SCRIPT,
+ 1,
+ key,
+ str(now),
+ str(refill_rate),
+ str(capacity),
+ "1",
+ str(ttl),
+ )
+ allowed_bool = int(allowed) == 1
+ remaining_tokens = max(0, int(float(remaining)))
+ retry_after_seconds = max(0, int(retry_after))
+ if not allowed_bool:
+ return self._limited_response(limit=capacity, retry_after=retry_after_seconds)
+ return _BucketDecision(
+ limit=capacity, remaining=remaining_tokens, retry_after=retry_after_seconds
+ )
+ except Exception:
+ pass
+
+ with _local_limits_lock:
+ bucket = _local_limits.get(key)
+ if bucket is None:
+ bucket = _LocalBucket(tokens=float(capacity), updated_at=now)
+ _local_limits[key] = bucket
+ elapsed = max(0.0, now - bucket.updated_at)
+ bucket.tokens = min(float(capacity), bucket.tokens + (elapsed * refill_rate))
+ bucket.updated_at = now
+ if bucket.tokens < 1.0:
+ retry_after = max(1, math.ceil((1.0 - bucket.tokens) / refill_rate))
+ return self._limited_response(limit=capacity, retry_after=retry_after)
+ bucket.tokens -= 1.0
+ remaining = max(0, int(bucket.tokens))
+ return _BucketDecision(limit=capacity, remaining=remaining, retry_after=0)
+
+ def _limited_response(self, *, limit: int, retry_after: int) -> JSONResponse:
+ return JSONResponse(
+ status_code=status.HTTP_429_TOO_MANY_REQUESTS,
+ headers={
+ "Retry-After": str(max(1, retry_after)),
+ "X-RateLimit-Limit": str(limit),
+ "X-RateLimit-Remaining": "0",
+ "X-RateLimit-Reset": str(max(1, retry_after)),
+ },
+ content={
+ "data": None,
+ "error": {
+ "code": "too_many_requests",
+ "message": "rate limit exceeded",
+ "details": None,
+ },
+ },
+ )
+
+
+@dataclass(frozen=True)
+class _BucketDecision:
+ limit: int
+ remaining: int
+ retry_after: int
diff --git a/keynetra/config/redis_client.py b/keynetra/config/redis_client.py
new file mode 100644
index 0000000..8c80602
--- /dev/null
+++ b/keynetra/config/redis_client.py
@@ -0,0 +1,19 @@
+from __future__ import annotations
+
+from functools import lru_cache
+from typing import Any
+
+try:
+ import redis
+except ModuleNotFoundError: # pragma: no cover - optional dependency in minimal dev/test envs
+ redis = None # type: ignore[assignment]
+
+from keynetra.config.settings import get_settings
+
+
+@lru_cache
+def get_redis() -> Any | None:
+ settings = get_settings()
+ if not settings.redis_url or redis is None:
+ return None
+ return redis.Redis.from_url(settings.redis_url, decode_responses=True)
diff --git a/keynetra/config/sample_data.py b/keynetra/config/sample_data.py
new file mode 100644
index 0000000..8a5027d
--- /dev/null
+++ b/keynetra/config/sample_data.py
@@ -0,0 +1,100 @@
+from __future__ import annotations
+
+import json
+from typing import Any
+
+SAMPLE_TENANT_KEY = "default"
+
+SAMPLE_USER = {
+ "id": 1,
+ "external_id": "sample-manager",
+}
+
+SAMPLE_ROLE = {
+ "name": "manager",
+}
+
+SAMPLE_PERMISSIONS = [
+ {"action": "approve_payment"},
+ {"action": "view_project"},
+]
+
+SAMPLE_RELATIONSHIPS = [
+ {
+ "subject_type": "user",
+ "subject_id": "1",
+ "relation": "member_of",
+ "object_type": "team",
+ "object_id": "engineering",
+ }
+]
+
+SAMPLE_POLICY_DEFINITIONS = [
+ {
+ "policy_key": "approve-manager",
+ "action": "approve_payment",
+ "effect": "allow",
+ "priority": 10,
+ "conditions": {"role": "manager", "max_amount": 100000},
+ },
+ {
+ "policy_key": "view-owner",
+ "action": "view_project",
+ "effect": "allow",
+ "priority": 10,
+ "conditions": {"owner_only": True},
+ },
+]
+
+DEFAULT_POLICIES = [
+ {
+ "action": item["action"],
+ "effect": item["effect"],
+ "conditions": dict(item["conditions"]),
+ "priority": item["priority"],
+ }
+ for item in SAMPLE_POLICY_DEFINITIONS
+]
+
+
+def sample_bootstrap_document() -> dict[str, Any]:
+ return {
+ "env": {
+ "KEYNETRA_ENV": "development",
+ "KEYNETRA_DEBUG": "true",
+ "KEYNETRA_DATABASE_URL": "sqlite+pysqlite:///./keynetra.db",
+ "KEYNETRA_REDIS_URL": "redis://localhost:6379/0",
+ "KEYNETRA_API_KEYS": "devkey",
+ "KEYNETRA_JWT_SECRET": "change-me",
+ "KEYNETRA_JWT_ALGORITHM": "HS256",
+ "KEYNETRA_CORS_ALLOW_ORIGINS": "http://localhost:5173,http://127.0.0.1:5173",
+ "KEYNETRA_CORS_ALLOW_CREDENTIALS": "true",
+ "KEYNETRA_CORS_ALLOW_METHODS": "*",
+ "KEYNETRA_CORS_ALLOW_HEADERS": "*",
+ "KEYNETRA_POLICIES_JSON": json.dumps(DEFAULT_POLICIES, separators=(",", ":")),
+ "KEYNETRA_POLICIES_CACHE_TTL_SECONDS": "5",
+ "KEYNETRA_DECISION_CACHE_TTL_SECONDS": "5",
+ "KEYNETRA_SERVICE_TIMEOUT_SECONDS": "2.0",
+ "KEYNETRA_CRITICAL_RETRY_ATTEMPTS": "3",
+ "KEYNETRA_RESILIENCE_MODE": "fail_closed",
+ "KEYNETRA_RESILIENCE_FALLBACK_BEHAVIOR": "static",
+ "KEYNETRA_RATE_LIMIT_PER_MINUTE": "60",
+ "KEYNETRA_RATE_LIMIT_BURST": "60",
+ "KEYNETRA_RATE_LIMIT_WINDOW_SECONDS": "60",
+ "KEYNETRA_OTEL_ENABLED": "false",
+ "KEYNETRA_SERVICE_MODE": "all",
+ "KEYNETRA_POLICY_EVENTS_CHANNEL": "keynetra:policy_events",
+ },
+ "sample": {
+ "tenant_key": SAMPLE_TENANT_KEY,
+ "user": SAMPLE_USER,
+ "role": SAMPLE_ROLE,
+ "permissions": SAMPLE_PERMISSIONS,
+ "relationships": SAMPLE_RELATIONSHIPS,
+ "policies": SAMPLE_POLICY_DEFINITIONS,
+ },
+ "commands": {
+ "seed": "PYTHONPATH=core python -m keynetra.cli seed-data --reset",
+ "start": "PYTHONPATH=core python -m keynetra.cli start --host 0.0.0.0 --port 8000",
+ },
+ }
diff --git a/keynetra/config/security.py b/keynetra/config/security.py
new file mode 100644
index 0000000..aace2b0
--- /dev/null
+++ b/keynetra/config/security.py
@@ -0,0 +1,96 @@
+from __future__ import annotations
+
+import hashlib
+import hmac
+import logging
+from typing import Any
+
+from fastapi import Depends, HTTPException, Request, Security, status
+from fastapi.security import APIKeyHeader, HTTPAuthorizationCredentials, HTTPBearer
+from jose import JWTError, jwt
+
+from keynetra.config.settings import Settings, get_settings
+from keynetra.infrastructure.logging import log_event
+
+api_key_scheme = APIKeyHeader(name="X-API-Key", auto_error=False)
+bearer_scheme = HTTPBearer(auto_error=False)
+_auth_logger = logging.getLogger("keynetra.auth")
+
+
+def _decode_with_jwks(token: str, jwks: dict, audience: str | None, issuer: str | None) -> dict:
+ header = jwt.get_unverified_header(token)
+ kid = header.get("kid")
+ keys = jwks.get("keys", []) if isinstance(jwks, dict) else []
+ for key in keys:
+ if kid and key.get("kid") != kid:
+ continue
+ try:
+ return jwt.decode(
+ token, key, audience=audience, issuer=issuer, options={"verify_aud": bool(audience)}
+ )
+ except JWTError:
+ continue
+ raise JWTError("no matching jwk")
+
+
+def _unauthorized(detail: str = "unauthorized") -> HTTPException:
+ return HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail=detail)
+
+
+def _log_failed_auth(request: Request, *, reason: str, api_key: str | None = None) -> None:
+ log_event(
+ _auth_logger,
+ event="auth_failed",
+ reason=reason,
+ path=request.url.path,
+ method=request.method,
+ request_id=getattr(request.state, "request_id", None),
+ tenant_id="default",
+ client_host=request.client.host if request.client else None,
+ api_key_prefix=(api_key or "")[:12] or None,
+ )
+
+
+def _matches_api_key(candidate: str, stored_hashes: set[str]) -> bool:
+ candidate_hash = hashlib.sha256(candidate.encode("utf-8")).hexdigest()
+ return any(hmac.compare_digest(candidate_hash, stored_hash) for stored_hash in stored_hashes)
+
+
+def get_principal(
+ request: Request,
+ settings: Settings = Depends(get_settings),
+ authorization: HTTPAuthorizationCredentials | None = Security(bearer_scheme),
+ x_api_key: str | None = Security(api_key_scheme),
+) -> dict[str, Any]:
+ api_key_hashes = settings.parsed_api_key_hashes()
+ if x_api_key:
+ if _matches_api_key(x_api_key, api_key_hashes):
+ return {
+ "type": "api_key",
+ "id": hashlib.sha256(x_api_key.encode("utf-8")).hexdigest()[:12],
+ }
+ _log_failed_auth(request, reason="invalid_api_key", api_key=x_api_key)
+ raise _unauthorized("invalid api key")
+
+ if authorization and authorization.scheme.lower() == "bearer":
+ token = authorization.credentials.strip()
+ try:
+ if settings.oidc_jwks_url:
+ import httpx
+
+ jwks = httpx.get(settings.oidc_jwks_url, timeout=5.0).json()
+ payload = _decode_with_jwks(
+ token, jwks, settings.oidc_audience, settings.oidc_issuer
+ )
+ else:
+ payload = jwt.decode(
+ token, settings.jwt_secret, algorithms=[settings.jwt_algorithm]
+ )
+ except Exception as e:
+ _log_failed_auth(request, reason="invalid_jwt")
+ raise _unauthorized("invalid jwt") from e
+ subject = payload.get("sub") or payload.get("user_id") or payload.get("client_id") or "jwt"
+ return {"type": "jwt", "id": str(subject), "claims": payload}
+
+ _log_failed_auth(request, reason="missing_credentials")
+ raise _unauthorized("missing credentials")
diff --git a/keynetra/config/settings.py b/keynetra/config/settings.py
new file mode 100644
index 0000000..7228b41
--- /dev/null
+++ b/keynetra/config/settings.py
@@ -0,0 +1,130 @@
+from __future__ import annotations
+
+import hashlib
+import json
+from functools import lru_cache
+from typing import Any
+
+from pydantic import Field
+from pydantic_settings import BaseSettings, SettingsConfigDict
+
+from keynetra.config.policies import DEFAULT_POLICIES
+
+
+class Settings(BaseSettings):
+ model_config = SettingsConfigDict(env_prefix="KEYNETRA_", extra="ignore", populate_by_name=True)
+
+ environment: str = Field(default="development")
+ debug: bool = Field(default=False)
+
+ database_url: str = Field(
+ default="sqlite+pysqlite:///./keynetra.db",
+ )
+ redis_url: str | None = Field(default=None)
+
+ api_keys: str | None = Field(default=None)
+ api_key_hashes: str | None = Field(default=None)
+ jwt_secret: str = Field(default="change-me")
+ jwt_algorithm: str = Field(default="HS256")
+ admin_username: str | None = Field(default=None)
+ admin_password: str | None = Field(default=None)
+ admin_token_expiry_minutes: int = Field(default=60)
+
+ cors_allow_origins: str | None = Field(default="http://localhost:5173,http://127.0.0.1:5173")
+ cors_allow_origin_regex: str | None = Field(default=None)
+ cors_allow_credentials: bool = Field(default=True)
+ cors_allow_methods: str = Field(default="*")
+ cors_allow_headers: str = Field(default="*")
+
+ policies_json: str | None = Field(default=None)
+ policy_paths: str | None = Field(default=None)
+ model_paths: str | None = Field(default=None)
+ decision_cache_ttl_seconds: int = Field(default=5)
+ service_timeout_seconds: float = Field(default=2.0)
+ critical_retry_attempts: int = Field(default=3)
+ resilience_mode: str = Field(default="fail_closed")
+ resilience_fallback_behavior: str = Field(default="static")
+
+ rate_limit_per_minute: int = Field(default=60)
+ rate_limit_burst: int | None = Field(default=None)
+ rate_limit_window_seconds: int = Field(default=60)
+ otel_enabled: bool = Field(default=False)
+ service_mode: str = Field(default="all")
+ auto_seed_sample_data: bool = Field(default=False)
+ server_host: str = Field(default="0.0.0.0")
+ server_port: int = Field(default=8000)
+
+ # Policy distribution
+ policy_events_channel: str = Field(default="keynetra:policy_events")
+
+ # OIDC / JWKS (optional)
+ oidc_jwks_url: str | None = Field(default=None)
+ oidc_audience: str | None = Field(default=None)
+ oidc_issuer: str | None = Field(default=None)
+
+ def load_policies(self) -> list[dict[str, Any]]:
+ if not self.policies_json:
+ paths = self.parsed_policy_paths()
+ if paths:
+ from keynetra.config.file_loaders import load_policies_from_paths
+
+ loaded = load_policies_from_paths(paths)
+ if loaded:
+ return loaded
+ return DEFAULT_POLICIES
+
+ try:
+ decoded = json.loads(self.policies_json)
+ except json.JSONDecodeError:
+ return DEFAULT_POLICIES
+
+ if not isinstance(decoded, list):
+ return DEFAULT_POLICIES
+
+ return [p for p in decoded if isinstance(p, dict)]
+
+ def parsed_policy_paths(self) -> list[str]:
+ if not self.policy_paths:
+ return []
+ return [path.strip() for path in self.policy_paths.split(",") if path.strip()]
+
+ def parsed_model_paths(self) -> list[str]:
+ if not self.model_paths:
+ return []
+ return [path.strip() for path in self.model_paths.split(",") if path.strip()]
+
+ def parsed_api_keys(self) -> set[str]:
+ if not self.api_keys:
+ return set()
+ return {k.strip() for k in self.api_keys.split(",") if k.strip()}
+
+ def parsed_api_key_hashes(self) -> set[str]:
+ if self.api_key_hashes:
+ return {value.strip() for value in self.api_key_hashes.split(",") if value.strip()}
+ return {hashlib.sha256(key.encode("utf-8")).hexdigest() for key in self.parsed_api_keys()}
+
+ def parsed_cors_allow_origins(self) -> list[str]:
+ if not self.cors_allow_origins:
+ return []
+ return [o.strip() for o in self.cors_allow_origins.split(",") if o.strip()]
+
+ def parsed_cors_allow_methods(self) -> list[str]:
+ value = (self.cors_allow_methods or "").strip()
+ if not value or value == "*":
+ return ["*"]
+ return [m.strip() for m in value.split(",") if m.strip()]
+
+ def parsed_cors_allow_headers(self) -> list[str]:
+ value = (self.cors_allow_headers or "").strip()
+ if not value or value == "*":
+ return ["*"]
+ return [h.strip() for h in value.split(",") if h.strip()]
+
+
+@lru_cache
+def get_settings() -> Settings:
+ return Settings()
+
+
+def reset_settings_cache() -> None:
+ get_settings.cache_clear()
diff --git a/keynetra/config/tenancy.py b/keynetra/config/tenancy.py
new file mode 100644
index 0000000..2f6a603
--- /dev/null
+++ b/keynetra/config/tenancy.py
@@ -0,0 +1,7 @@
+from __future__ import annotations
+
+DEFAULT_TENANT_KEY = "default"
+
+
+def get_tenant_key() -> str:
+ return DEFAULT_TENANT_KEY
diff --git a/keynetra/domain/__init__.py b/keynetra/domain/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/keynetra/domain/models/__init__.py b/keynetra/domain/models/__init__.py
new file mode 100644
index 0000000..8c1965d
--- /dev/null
+++ b/keynetra/domain/models/__init__.py
@@ -0,0 +1 @@
+"""SQLAlchemy models."""
diff --git a/keynetra/domain/models/acl.py b/keynetra/domain/models/acl.py
new file mode 100644
index 0000000..eeb5aa0
--- /dev/null
+++ b/keynetra/domain/models/acl.py
@@ -0,0 +1,30 @@
+from __future__ import annotations
+
+from datetime import datetime
+
+from sqlalchemy import DateTime, ForeignKey, Index, String
+from sqlalchemy.orm import Mapped, mapped_column
+
+from keynetra.domain.models.base import Base
+
+
+class ResourceACL(Base):
+ __tablename__ = "resource_acl"
+
+ id: Mapped[int] = mapped_column(primary_key=True)
+ tenant_id: Mapped[int] = mapped_column(ForeignKey("tenants.id", ondelete="CASCADE"), index=True)
+
+ subject_type: Mapped[str] = mapped_column(String(32), nullable=False)
+ subject_id: Mapped[str] = mapped_column(String(128), nullable=False)
+ resource_type: Mapped[str] = mapped_column(String(64), nullable=False)
+ resource_id: Mapped[str] = mapped_column(String(128), nullable=False)
+ action: Mapped[str] = mapped_column(String(128), nullable=False)
+ effect: Mapped[str] = mapped_column(String(16), nullable=False)
+ created_at: Mapped[datetime] = mapped_column(
+ DateTime(timezone=False), nullable=False, default=datetime.utcnow
+ )
+
+ __table_args__ = (
+ Index("ix_resource_acl_lookup", "tenant_id", "resource_type", "resource_id", "action"),
+ Index("ix_resource_acl_subject", "tenant_id", "subject_type", "subject_id"),
+ )
diff --git a/keynetra/domain/models/audit.py b/keynetra/domain/models/audit.py
new file mode 100644
index 0000000..eb689ce
--- /dev/null
+++ b/keynetra/domain/models/audit.py
@@ -0,0 +1,32 @@
+from __future__ import annotations
+
+from datetime import datetime
+
+from sqlalchemy import JSON, DateTime, ForeignKey, String
+from sqlalchemy.orm import Mapped, mapped_column
+
+from keynetra.domain.models.base import Base
+
+
+class AuditLog(Base):
+ __tablename__ = "audit_logs"
+
+ id: Mapped[int] = mapped_column(primary_key=True)
+ tenant_id: Mapped[int] = mapped_column(ForeignKey("tenants.id", ondelete="CASCADE"), index=True)
+
+ principal_type: Mapped[str] = mapped_column(String(32), nullable=False)
+ principal_id: Mapped[str] = mapped_column(String(128), nullable=False)
+
+ user: Mapped[dict] = mapped_column(JSON, nullable=False, default=dict)
+ action: Mapped[str] = mapped_column(String(128), nullable=False)
+ resource: Mapped[dict] = mapped_column(JSON, nullable=False, default=dict)
+
+ decision: Mapped[str] = mapped_column(String(8), nullable=False) # ALLOW/DENY
+ matched_policies: Mapped[list] = mapped_column(JSON, nullable=False, default=list)
+ reason: Mapped[str | None] = mapped_column(String(256), nullable=True)
+ evaluated_rules: Mapped[list] = mapped_column(JSON, nullable=False, default=list)
+ failed_conditions: Mapped[list] = mapped_column(JSON, nullable=False, default=list)
+
+ created_at: Mapped[datetime] = mapped_column(
+ DateTime(timezone=True), nullable=False, default=datetime.utcnow
+ )
diff --git a/keynetra/domain/models/auth_model.py b/keynetra/domain/models/auth_model.py
new file mode 100644
index 0000000..53d4a60
--- /dev/null
+++ b/keynetra/domain/models/auth_model.py
@@ -0,0 +1,28 @@
+from __future__ import annotations
+
+from datetime import datetime
+
+from sqlalchemy import JSON, DateTime, ForeignKey, Text, UniqueConstraint
+from sqlalchemy.orm import Mapped, mapped_column
+
+from keynetra.domain.models.base import Base
+
+
+class AuthorizationModel(Base):
+ __tablename__ = "auth_models"
+
+ id: Mapped[int] = mapped_column(primary_key=True)
+ tenant_id: Mapped[int] = mapped_column(
+ ForeignKey("tenants.id", ondelete="CASCADE"), unique=True, index=True
+ )
+ schema_text: Mapped[str] = mapped_column(Text, nullable=False)
+ schema_json: Mapped[dict] = mapped_column(JSON, nullable=False)
+ compiled_json: Mapped[dict] = mapped_column(JSON, nullable=False)
+ created_at: Mapped[datetime] = mapped_column(
+ DateTime(timezone=False), nullable=False, default=datetime.utcnow
+ )
+ updated_at: Mapped[datetime] = mapped_column(
+ DateTime(timezone=False), nullable=False, default=datetime.utcnow, onupdate=datetime.utcnow
+ )
+
+ __table_args__ = (UniqueConstraint("tenant_id", name="uq_auth_models_tenant"),)
diff --git a/keynetra/domain/models/base.py b/keynetra/domain/models/base.py
new file mode 100644
index 0000000..6d6e58c
--- /dev/null
+++ b/keynetra/domain/models/base.py
@@ -0,0 +1,7 @@
+from __future__ import annotations
+
+from sqlalchemy.orm import DeclarativeBase
+
+
+class Base(DeclarativeBase):
+ pass
diff --git a/keynetra/domain/models/idempotency.py b/keynetra/domain/models/idempotency.py
new file mode 100644
index 0000000..e91219c
--- /dev/null
+++ b/keynetra/domain/models/idempotency.py
@@ -0,0 +1,30 @@
+from __future__ import annotations
+
+from datetime import datetime
+
+from sqlalchemy import DateTime, Integer, String, Text, UniqueConstraint
+from sqlalchemy.orm import Mapped, mapped_column
+
+from keynetra.domain.models.base import Base
+
+
+class IdempotencyRecord(Base):
+ """Persistent idempotency record for replaying write responses."""
+
+ __tablename__ = "idempotency_records"
+
+ id: Mapped[int] = mapped_column(primary_key=True)
+ scope: Mapped[str] = mapped_column(String(256), nullable=False)
+ idempotency_key: Mapped[str] = mapped_column(String(128), nullable=False)
+ request_hash: Mapped[str] = mapped_column(String(64), nullable=False)
+ response_status_code: Mapped[int | None] = mapped_column(Integer, nullable=True)
+ response_body: Mapped[str | None] = mapped_column(Text, nullable=True)
+ response_content_type: Mapped[str | None] = mapped_column(String(128), nullable=True)
+ created_at: Mapped[datetime] = mapped_column(
+ DateTime(timezone=True), nullable=False, default=datetime.utcnow
+ )
+ completed_at: Mapped[datetime | None] = mapped_column(DateTime(timezone=True), nullable=True)
+
+ __table_args__ = (
+ UniqueConstraint("scope", "idempotency_key", name="uq_idempotency_records_scope_key"),
+ )
diff --git a/keynetra/domain/models/policy_versioning.py b/keynetra/domain/models/policy_versioning.py
new file mode 100644
index 0000000..aa7969e
--- /dev/null
+++ b/keynetra/domain/models/policy_versioning.py
@@ -0,0 +1,46 @@
+from __future__ import annotations
+
+from datetime import datetime
+
+from sqlalchemy import JSON, DateTime, ForeignKey, Index, Integer, String, UniqueConstraint
+from sqlalchemy.orm import Mapped, mapped_column
+
+from keynetra.domain.models.base import Base
+
+
+class Policy(Base):
+ __tablename__ = "policies"
+
+ id: Mapped[int] = mapped_column(primary_key=True)
+ tenant_id: Mapped[int] = mapped_column(ForeignKey("tenants.id", ondelete="CASCADE"), index=True)
+ policy_key: Mapped[str] = mapped_column(String(64), nullable=False)
+ current_version: Mapped[int] = mapped_column(Integer, nullable=False, default=1)
+ extend_existing = True
+
+ __table_args__ = (UniqueConstraint("tenant_id", "policy_key", name="uq_policies_tenant_key"),)
+
+
+class PolicyVersion(Base):
+ __tablename__ = "policy_versions"
+
+ id: Mapped[int] = mapped_column(primary_key=True)
+ tenant_id: Mapped[int] = mapped_column(ForeignKey("tenants.id", ondelete="CASCADE"), index=True)
+ policy_id: Mapped[int] = mapped_column(
+ ForeignKey("policies.id", ondelete="CASCADE"), index=True
+ )
+ version: Mapped[int] = mapped_column(Integer, nullable=False)
+
+ action: Mapped[str] = mapped_column(String(128), nullable=False)
+ effect: Mapped[str] = mapped_column(String(16), nullable=False, default="deny")
+ priority: Mapped[int] = mapped_column(Integer, nullable=False, default=100)
+ conditions: Mapped[dict] = mapped_column(JSON, nullable=False, default=dict)
+
+ created_at: Mapped[datetime] = mapped_column(
+ DateTime(timezone=True), nullable=False, default=datetime.utcnow
+ )
+ created_by: Mapped[str | None] = mapped_column(String(128), nullable=True)
+
+ __table_args__ = (
+ UniqueConstraint("policy_id", "version", name="uq_policy_versions_policy_version"),
+ Index("ix_policy_versions_tenant_action_priority", "tenant_id", "action", "priority"),
+ )
diff --git a/keynetra/domain/models/rbac.py b/keynetra/domain/models/rbac.py
new file mode 100644
index 0000000..bc1ec7d
--- /dev/null
+++ b/keynetra/domain/models/rbac.py
@@ -0,0 +1,58 @@
+from __future__ import annotations
+
+from sqlalchemy import Column, ForeignKey, Index, Integer, String, Table, UniqueConstraint
+from sqlalchemy.orm import Mapped, mapped_column, relationship
+
+from keynetra.domain.models.base import Base
+
+user_roles = Table(
+ "user_roles",
+ Base.metadata,
+ Column("user_id", Integer, ForeignKey("users.id", ondelete="CASCADE"), primary_key=True),
+ Column("role_id", Integer, ForeignKey("roles.id", ondelete="CASCADE"), primary_key=True),
+)
+
+role_permissions = Table(
+ "role_permissions",
+ Base.metadata,
+ Column("role_id", Integer, ForeignKey("roles.id", ondelete="CASCADE"), primary_key=True),
+ Column(
+ "permission_id", Integer, ForeignKey("permissions.id", ondelete="CASCADE"), primary_key=True
+ ),
+)
+
+
+class User(Base):
+ __tablename__ = "users"
+
+ id: Mapped[int] = mapped_column(primary_key=True)
+ external_id: Mapped[str | None] = mapped_column(String(128), nullable=True)
+
+ roles: Mapped[list["Role"]] = relationship(secondary=user_roles, back_populates="users")
+
+ __table_args__ = (Index("ix_users_external_id", "external_id"),)
+
+
+class Role(Base):
+ __tablename__ = "roles"
+
+ id: Mapped[int] = mapped_column(primary_key=True)
+ name: Mapped[str] = mapped_column(String(64), nullable=False, unique=True)
+
+ users: Mapped[list[User]] = relationship(secondary=user_roles, back_populates="roles")
+ permissions: Mapped[list["Permission"]] = relationship(
+ secondary=role_permissions, back_populates="roles"
+ )
+
+
+class Permission(Base):
+ __tablename__ = "permissions"
+
+ id: Mapped[int] = mapped_column(primary_key=True)
+ action: Mapped[str] = mapped_column(String(128), nullable=False)
+
+ roles: Mapped[list[Role]] = relationship(
+ secondary=role_permissions, back_populates="permissions"
+ )
+
+ __table_args__ = (UniqueConstraint("action", name="uq_permissions_action"),)
diff --git a/keynetra/domain/models/relationship.py b/keynetra/domain/models/relationship.py
new file mode 100644
index 0000000..cf486bc
--- /dev/null
+++ b/keynetra/domain/models/relationship.py
@@ -0,0 +1,32 @@
+from __future__ import annotations
+
+from sqlalchemy import ForeignKey, Index, String, UniqueConstraint
+from sqlalchemy.orm import Mapped, mapped_column
+
+from keynetra.domain.models.base import Base
+
+
+class Relationship(Base):
+ __tablename__ = "relationships"
+
+ id: Mapped[int] = mapped_column(primary_key=True)
+ tenant_id: Mapped[int] = mapped_column(ForeignKey("tenants.id", ondelete="CASCADE"), index=True)
+
+ subject_type: Mapped[str] = mapped_column(String(32), nullable=False)
+ subject_id: Mapped[str] = mapped_column(String(128), nullable=False)
+ relation: Mapped[str] = mapped_column(String(64), nullable=False)
+ object_type: Mapped[str] = mapped_column(String(32), nullable=False)
+ object_id: Mapped[str] = mapped_column(String(128), nullable=False)
+
+ __table_args__ = (
+ UniqueConstraint(
+ "tenant_id",
+ "subject_type",
+ "subject_id",
+ "relation",
+ "object_type",
+ "object_id",
+ name="uq_relationships_tuple",
+ ),
+ Index("ix_relationships_lookup", "tenant_id", "subject_type", "subject_id", "relation"),
+ )
diff --git a/keynetra/domain/models/tenant.py b/keynetra/domain/models/tenant.py
new file mode 100644
index 0000000..b118217
--- /dev/null
+++ b/keynetra/domain/models/tenant.py
@@ -0,0 +1,15 @@
+from __future__ import annotations
+
+from sqlalchemy import Integer, String
+from sqlalchemy.orm import Mapped, mapped_column
+
+from keynetra.domain.models.base import Base
+
+
+class Tenant(Base):
+ __tablename__ = "tenants"
+
+ id: Mapped[int] = mapped_column(primary_key=True)
+ tenant_key: Mapped[str] = mapped_column(String(64), unique=True, nullable=False)
+ policy_version: Mapped[int] = mapped_column(Integer, nullable=False, default=1)
+ authorization_revision: Mapped[int] = mapped_column(Integer, nullable=False, default=1)
diff --git a/keynetra/domain/schemas/__init__.py b/keynetra/domain/schemas/__init__.py
new file mode 100644
index 0000000..f391682
--- /dev/null
+++ b/keynetra/domain/schemas/__init__.py
@@ -0,0 +1 @@
+"""Pydantic schemas."""
diff --git a/keynetra/domain/schemas/access.py b/keynetra/domain/schemas/access.py
new file mode 100644
index 0000000..0a141b6
--- /dev/null
+++ b/keynetra/domain/schemas/access.py
@@ -0,0 +1,63 @@
+from __future__ import annotations
+
+from typing import Any
+
+from pydantic import BaseModel, Field
+
+
+class AccessRequest(BaseModel):
+ """Explicit authorization request passed through the API boundary."""
+
+ user: dict[str, Any] = Field(default_factory=dict)
+ action: str
+ resource: dict[str, Any] = Field(default_factory=dict)
+ context: dict[str, Any] = Field(default_factory=dict)
+ consistency: str = "eventual"
+ revision: int | None = None
+
+
+class AccessResponse(BaseModel):
+ allowed: bool
+
+
+class AccessDecisionResponse(BaseModel):
+ allowed: bool
+ decision: str
+ matched_policies: list[str] = Field(default_factory=list)
+ reason: str | None = None
+ policy_id: str | None = None
+ explain_trace: list[dict[str, Any]] = Field(default_factory=list)
+ revision: int | None = None
+
+
+class SimulationResponse(BaseModel):
+ decision: str
+ matched_policies: list[str]
+ reason: str | None = None
+ policy_id: str | None = None
+ explain_trace: list[dict[str, Any]] = Field(default_factory=list)
+ failed_conditions: list[str] = Field(default_factory=list)
+ revision: int | None = None
+
+
+class BatchAccessItem(BaseModel):
+ action: str
+ resource: dict[str, Any] = Field(default_factory=dict)
+
+
+class BatchAccessRequest(BaseModel):
+ user: dict[str, Any] = Field(default_factory=dict)
+ items: list[BatchAccessItem]
+ consistency: str = "eventual"
+ revision: int | None = None
+
+
+class BatchAccessResult(BaseModel):
+ action: str
+ allowed: bool
+ revision: int | None = None
+
+
+class BatchAccessResponse(BaseModel):
+ results: list[BatchAccessResult]
+ revision: int | None = None
diff --git a/keynetra/domain/schemas/api.py b/keynetra/domain/schemas/api.py
new file mode 100644
index 0000000..83e2a13
--- /dev/null
+++ b/keynetra/domain/schemas/api.py
@@ -0,0 +1,33 @@
+"""Shared API envelope schemas for core."""
+
+from __future__ import annotations
+
+from typing import Any, Generic, TypeVar
+
+from pydantic import BaseModel, Field
+
+PayloadT = TypeVar("PayloadT")
+
+
+class ErrorBody(BaseModel):
+ code: str
+ message: str
+ details: Any | None = None
+
+
+class MetaBody(BaseModel):
+ request_id: str | None = None
+ limit: int | None = None
+ next_cursor: str | None = None
+ extra: dict[str, Any] = Field(default_factory=dict)
+
+
+class SuccessResponse(BaseModel, Generic[PayloadT]):
+ data: PayloadT
+ meta: MetaBody = Field(default_factory=MetaBody)
+ error: None = None
+
+
+class ErrorResponse(BaseModel):
+ data: None = None
+ error: ErrorBody
diff --git a/keynetra/domain/schemas/management.py b/keynetra/domain/schemas/management.py
new file mode 100644
index 0000000..0aa1848
--- /dev/null
+++ b/keynetra/domain/schemas/management.py
@@ -0,0 +1,95 @@
+from __future__ import annotations
+
+from datetime import datetime
+from typing import Any
+
+from pydantic import BaseModel, Field
+
+
+class RoleCreate(BaseModel):
+ name: str
+
+
+class RoleUpdate(BaseModel):
+ name: str
+
+
+class RoleOut(BaseModel):
+ id: int
+ name: str
+
+
+class PermissionCreate(BaseModel):
+ action: str
+
+
+class PermissionUpdate(BaseModel):
+ action: str
+
+
+class PermissionOut(BaseModel):
+ id: int
+ action: str
+
+
+class RolePermissionOut(BaseModel):
+ id: int
+ action: str
+
+
+class PolicyCreate(BaseModel):
+ action: str
+ effect: str = "allow"
+ priority: int = 100
+ conditions: dict[str, Any] = Field(default_factory=dict)
+
+
+class PolicyOut(BaseModel):
+ id: int
+ action: str
+ effect: str
+ priority: int
+ conditions: dict[str, Any]
+
+
+class ACLCreate(BaseModel):
+ subject_type: str
+ subject_id: str
+ resource_type: str
+ resource_id: str
+ action: str
+ effect: str
+
+
+class ACLOut(ACLCreate):
+ id: int
+ tenant_id: int
+ created_at: datetime | None = None
+
+
+class AuditRecordOut(BaseModel):
+ id: int
+ principal_type: str
+ principal_id: str
+ user: dict[str, Any]
+ action: str
+ resource: dict[str, Any]
+ decision: str
+ matched_policies: list[Any]
+ reason: str | None = None
+ evaluated_rules: list[Any]
+ failed_conditions: list[Any]
+ created_at: datetime
+
+
+class AdminLoginRequest(BaseModel):
+ username: str
+ password: str
+
+
+class AdminLoginResponse(BaseModel):
+ access_token: str
+ token_type: str = "bearer"
+ expires_in: int
+ role: str = "admin"
+ tenant_key: str
diff --git a/keynetra/domain/schemas/modeling.py b/keynetra/domain/schemas/modeling.py
new file mode 100644
index 0000000..0dba874
--- /dev/null
+++ b/keynetra/domain/schemas/modeling.py
@@ -0,0 +1,44 @@
+from __future__ import annotations
+
+from typing import Any
+
+from pydantic import BaseModel, ConfigDict, Field
+
+
+class AuthModelCreate(BaseModel):
+ model_config = ConfigDict(populate_by_name=True)
+ schema_text: str = Field(alias="schema")
+
+
+class AuthModelOut(BaseModel):
+ model_config = ConfigDict(populate_by_name=True)
+ id: int
+ tenant_id: int
+ schema_text: str = Field(alias="schema")
+ parsed: dict[str, Any]
+ compiled: dict[str, Any]
+
+
+class PolicySimulationInput(BaseModel):
+ policy_change: str | None = None
+ relationship_change: dict[str, Any] | None = None
+ role_change: dict[str, Any] | None = None
+
+
+class PolicySimulationRequest(BaseModel):
+ simulate: PolicySimulationInput = Field(default_factory=PolicySimulationInput)
+ request: dict[str, Any] = Field(default_factory=dict)
+
+
+class PolicySimulationResponse(BaseModel):
+ decision_before: dict[str, Any]
+ decision_after: dict[str, Any]
+
+
+class ImpactAnalysisRequest(BaseModel):
+ policy_change: str
+
+
+class ImpactAnalysisResponse(BaseModel):
+ gained_access: list[int] = Field(default_factory=list)
+ lost_access: list[int] = Field(default_factory=list)
diff --git a/keynetra/engine/__init__.py b/keynetra/engine/__init__.py
new file mode 100644
index 0000000..38ffa4e
--- /dev/null
+++ b/keynetra/engine/__init__.py
@@ -0,0 +1,17 @@
+"""Pure engine exports."""
+
+from .keynetra_engine import (
+ AuthorizationDecision,
+ AuthorizationInput,
+ ExplainTraceStep,
+ KeyNetraEngine,
+ PolicyDefinition,
+)
+
+__all__ = [
+ "AuthorizationDecision",
+ "AuthorizationInput",
+ "ExplainTraceStep",
+ "KeyNetraEngine",
+ "PolicyDefinition",
+]
diff --git a/keynetra/engine/compiled/__init__.py b/keynetra/engine/compiled/__init__.py
new file mode 100644
index 0000000..1dcbdb9
--- /dev/null
+++ b/keynetra/engine/compiled/__init__.py
@@ -0,0 +1,14 @@
+from keynetra.engine.compiled.decision_graph import (
+ COMPILED_POLICY_STORE,
+ DecisionGraph,
+ GraphDecision,
+)
+from keynetra.engine.compiled.policy_compiler import PolicyAST, compile_policy_graph
+
+__all__ = [
+ "COMPILED_POLICY_STORE",
+ "DecisionGraph",
+ "GraphDecision",
+ "PolicyAST",
+ "compile_policy_graph",
+]
diff --git a/keynetra/engine/compiled/decision_graph.py b/keynetra/engine/compiled/decision_graph.py
new file mode 100644
index 0000000..fcb429c
--- /dev/null
+++ b/keynetra/engine/compiled/decision_graph.py
@@ -0,0 +1,71 @@
+"""Executable decision graph for compiled policy evaluation."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass, field
+from threading import RLock
+from typing import Any, Callable
+
+
+@dataclass(frozen=True)
+class GraphDecision:
+ outcome: str
+ reason: str | None
+ policy_id: str | None
+
+
+@dataclass(frozen=True)
+class CompiledPolicyNode:
+ policy_id: str | None
+ action: str
+ effect: str
+ priority: int
+ evaluate: Callable[[Any], tuple[bool, str | None]]
+
+
+@dataclass
+class DecisionGraph:
+ nodes: tuple[CompiledPolicyNode, ...] = field(default_factory=tuple)
+
+ def evaluate(self, authorization_input: Any) -> GraphDecision:
+ first_reason: str | None = None
+ for node in self.nodes:
+ if node.action != getattr(authorization_input, "action", None):
+ continue
+ matched, reason = node.evaluate(authorization_input)
+ if matched:
+ return GraphDecision(
+ outcome=node.effect,
+ reason=reason or f"matched policy {node.policy_id or node.action}",
+ policy_id=node.policy_id,
+ )
+ if first_reason is None and reason is not None:
+ first_reason = reason
+ if first_reason is not None:
+ return GraphDecision(outcome="deny", reason=first_reason, policy_id=None)
+ return GraphDecision(outcome="abstain", reason=None, policy_id=None)
+
+
+class CompiledPolicyStore:
+ """In-memory compiled graph cache keyed by tenant and policy version."""
+
+ def __init__(self) -> None:
+ self._lock = RLock()
+ self._graphs: dict[tuple[str, int], DecisionGraph] = {}
+
+ def get(self, tenant_key: str, policy_version: int) -> DecisionGraph | None:
+ with self._lock:
+ return self._graphs.get((tenant_key, policy_version))
+
+ def set(self, tenant_key: str, policy_version: int, graph: DecisionGraph) -> None:
+ with self._lock:
+ self._graphs[(tenant_key, policy_version)] = graph
+
+ def invalidate(self, tenant_key: str) -> None:
+ with self._lock:
+ keys = [key for key in self._graphs if key[0] == tenant_key]
+ for key in keys:
+ self._graphs.pop(key, None)
+
+
+COMPILED_POLICY_STORE = CompiledPolicyStore()
diff --git a/keynetra/engine/compiled/policy_compiler.py b/keynetra/engine/compiled/policy_compiler.py
new file mode 100644
index 0000000..c420ea3
--- /dev/null
+++ b/keynetra/engine/compiled/policy_compiler.py
@@ -0,0 +1,67 @@
+"""Policy compilation from DSL-shaped policy objects into executable graphs."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Any
+
+from keynetra.engine.compiled.decision_graph import CompiledPolicyNode, DecisionGraph
+from keynetra.observability.metrics import record_policy_compilation
+
+
+@dataclass(frozen=True)
+class PolicyAST:
+ action: str
+ effect: str
+ priority: int
+ policy_id: str | None
+ conditions: dict[str, Any]
+
+
+def compile_policy_ast(ast: PolicyAST, evaluator: Any) -> CompiledPolicyNode:
+ # Metadata like policy_key travels through the DSL layer but is not a
+ # decision condition. Ignore it at compile time so it does not block
+ # otherwise valid policies.
+ checks: list[tuple[str, Any]] = [
+ (key, value) for key, value in ast.conditions.items() if key not in {"policy_key"}
+ ]
+ policy_id = ast.policy_id or f"{ast.action}:{ast.priority}:{ast.effect}"
+
+ def evaluate(authorization_input: Any) -> tuple[bool, str | None]:
+ for key, value in checks:
+ handler = getattr(evaluator, f"handle_{key}", None)
+ if handler is None:
+ return False, f"unknown condition: {key}"
+ matched, reason = handler(value, authorization_input)
+ if not matched:
+ return False, reason or f"{key} mismatch"
+ return True, None
+
+ return CompiledPolicyNode(
+ policy_id=policy_id,
+ action=ast.action,
+ effect=ast.effect,
+ priority=ast.priority,
+ evaluate=evaluate,
+ )
+
+
+def compile_policy_graph(
+ policies: list[dict[str, Any]], evaluator: Any, *, tenant_key: str | None = None
+) -> DecisionGraph:
+ ast_nodes = [
+ PolicyAST(
+ action=str(policy.get("action", "")),
+ effect="allow" if str(policy.get("effect", "deny")) == "allow" else "deny",
+ priority=int(policy.get("priority", 100)),
+ policy_id=str(policy.get("policy_id")) if policy.get("policy_id") is not None else None,
+ conditions=dict(policy.get("conditions") or {}),
+ )
+ for policy in policies
+ ]
+ compiled = [
+ compile_policy_ast(ast, evaluator)
+ for ast in sorted(ast_nodes, key=lambda node: node.priority)
+ ]
+ record_policy_compilation(tenant=tenant_key)
+ return DecisionGraph(nodes=tuple(compiled))
diff --git a/keynetra/engine/keynetra_engine.py b/keynetra/engine/keynetra_engine.py
new file mode 100644
index 0000000..531935f
--- /dev/null
+++ b/keynetra/engine/keynetra_engine.py
@@ -0,0 +1,828 @@
+"""Pure, deterministic authorization engine.
+
+This module is intentionally isolated from HTTP, databases, caches, and other
+external systems. Every input needed to evaluate a decision must be supplied
+explicitly through ``AuthorizationInput``.
+"""
+
+from __future__ import annotations
+
+import time
+from dataclasses import dataclass, field
+from datetime import datetime
+from typing import Any, Callable, Literal
+
+from keynetra.engine.compiled.decision_graph import DecisionGraph
+from keynetra.engine.compiled.policy_compiler import compile_policy_graph
+from keynetra.engine.model_graph.permission_graph import CompiledPermissionGraph
+from keynetra.observability.metrics import (
+ observe_access_check_latency,
+ record_access_check,
+ record_acl_match,
+ record_policy_evaluation,
+ record_relationship_traversal,
+)
+
+DecisionValue = Literal["allow", "deny"]
+StageOutcome = Literal["allow", "deny", "abstain"]
+
+
+@dataclass(frozen=True)
+class AuthorizationInput:
+ """Explicit request supplied to the pure decision engine."""
+
+ user: dict[str, Any]
+ resource: dict[str, Any]
+ action: str
+ context: dict[str, Any] = field(default_factory=dict)
+ acl_entries: tuple[dict[str, Any], ...] = ()
+ access_index_entries: tuple[dict[str, Any], ...] = ()
+ permission_graph: CompiledPermissionGraph | None = None
+ compiled_graph: DecisionGraph | None = None
+ tenant_key: str | None = None
+
+
+@dataclass(frozen=True)
+class PolicyDefinition:
+ """Policy definition evaluated by the engine."""
+
+ action: str
+ effect: DecisionValue = "deny"
+ conditions: dict[str, Any] = field(default_factory=dict)
+ priority: int = 100
+ policy_id: str | None = None
+
+ @staticmethod
+ def from_dict(raw: dict[str, Any]) -> "PolicyDefinition":
+ return PolicyDefinition(
+ action=str(raw.get("action", "")),
+ effect="allow" if str(raw.get("effect", "deny")) == "allow" else "deny",
+ conditions=raw.get("conditions") if isinstance(raw.get("conditions"), dict) else {},
+ priority=int(raw.get("priority", 100)),
+ policy_id=str(raw.get("policy_id")) if raw.get("policy_id") is not None else None,
+ )
+
+
+@dataclass(frozen=True)
+class ExplainTraceStep:
+ """One deterministic step in the evaluation trace."""
+
+ step: str
+ outcome: str
+ detail: str
+ policy_id: str | None = None
+
+ def to_dict(self) -> dict[str, str | None]:
+ return {
+ "step": self.step,
+ "outcome": self.outcome,
+ "detail": self.detail,
+ "policy_id": self.policy_id,
+ }
+
+
+@dataclass(frozen=True)
+class AuthorizationDecision:
+ """Pure engine output.
+
+ ``policy_id`` points to the winning policy when one exists. The
+ ``explain_trace`` documents every relevant evaluation step.
+ """
+
+ allowed: bool
+ decision: DecisionValue
+ reason: str | None
+ policy_id: str | None
+ explain_trace: tuple[ExplainTraceStep, ...]
+ matched_policies: tuple[str, ...] = ()
+ failed_conditions: tuple[str, ...] = ()
+
+ @property
+ def evaluated_rules(self) -> list[dict[str, str | None]]:
+ """Backward-compatible trace alias for existing callers."""
+
+ return [step.to_dict() for step in self.explain_trace]
+
+
+ConditionHandler = Callable[[Any, AuthorizationInput], tuple[bool, str | None]]
+
+
+class ConditionEvaluator:
+ """Evaluates policy conditions using only explicit request data."""
+
+ def evaluate(
+ self, conditions: dict[str, Any], authorization_input: AuthorizationInput
+ ) -> tuple[bool, str | None]:
+ for key, value in conditions.items():
+ handler = getattr(self, f"handle_{key}", None)
+ if handler is None:
+ return False, f"unknown condition: {key}"
+ ok, reason = handler(value, authorization_input)
+ if not ok:
+ return False, reason or f"{key} mismatch"
+ return True, None
+
+ def handle_role(
+ self, value: str, authorization_input: AuthorizationInput
+ ) -> tuple[bool, str | None]:
+ user = authorization_input.user
+ if user.get("role") == value:
+ return True, None
+ roles = user.get("roles", [])
+ ok = isinstance(roles, list) and value in roles
+ return ok, None if ok else "role mismatch"
+
+ def handle_max_amount(
+ self, value: int | float, authorization_input: AuthorizationInput
+ ) -> tuple[bool, str | None]:
+ amount = authorization_input.resource.get("amount", 0)
+ try:
+ ok = float(amount) <= float(value)
+ except (TypeError, ValueError):
+ return False, "invalid amount"
+ return ok, None if ok else "max_amount exceeded"
+
+ def handle_owner_only(
+ self, value: bool, authorization_input: AuthorizationInput
+ ) -> tuple[bool, str | None]:
+ if not value:
+ return True, None
+ resource = authorization_input.resource
+ user = authorization_input.user
+ ok = resource.get("owner_id") == user.get("id")
+ return ok, None if ok else "owner mismatch"
+
+ def handle_time_range(
+ self, value: dict[str, Any], authorization_input: AuthorizationInput
+ ) -> tuple[bool, str | None]:
+ if not isinstance(value, dict):
+ return False, "invalid time_range"
+ start = value.get("start")
+ end = value.get("end")
+ current_time = authorization_input.context.get("current_time")
+ if not isinstance(start, str) or not isinstance(end, str):
+ return False, "invalid time_range"
+ if not isinstance(current_time, str):
+ return False, "missing context.current_time"
+ try:
+ start_value = datetime.strptime(start, "%H:%M").time()
+ end_value = datetime.strptime(end, "%H:%M").time()
+ current_value = datetime.strptime(current_time, "%H:%M").time()
+ except ValueError:
+ return False, "invalid time_range"
+ if start_value <= end_value:
+ ok = start_value <= current_value <= end_value
+ else:
+ ok = current_value >= start_value or current_value <= end_value
+ return ok, None if ok else "time_range mismatch"
+
+ def handle_geo_match(
+ self, value: dict[str, Any], authorization_input: AuthorizationInput
+ ) -> tuple[bool, str | None]:
+
+ if not isinstance(value, dict):
+ return False, "invalid geo_match"
+ user_field = value.get("user_field", "country")
+ resource_field = value.get("resource_field", "country")
+ user = authorization_input.user
+ resource = authorization_input.resource
+ ok = user.get(user_field) is not None and user.get(user_field) == resource.get(
+ resource_field
+ )
+ return ok, None if ok else "geo mismatch"
+
+ def handle_has_relation(
+ self, value: dict[str, Any], authorization_input: AuthorizationInput
+ ) -> tuple[bool, str | None]:
+ if not isinstance(value, dict):
+ return False, "invalid has_relation"
+ relation = value.get("relation")
+ object_type = value.get("object_type")
+ object_id = value.get("object_id")
+ object_id_from_resource = value.get("object_id_from_resource")
+ if object_id is None and isinstance(object_id_from_resource, str):
+ object_id = authorization_input.resource.get(object_id_from_resource)
+
+ if not isinstance(relation, str) or not isinstance(object_type, str) or object_id is None:
+ return False, "invalid has_relation"
+
+ relations = authorization_input.user.get("relations", [])
+ if not isinstance(relations, list):
+ return False, "no relations"
+
+ object_id_str = str(object_id)
+ for edge in relations:
+ if not isinstance(edge, dict):
+ continue
+ if (
+ edge.get("relation") == relation
+ and edge.get("object_type") == object_type
+ and str(edge.get("object_id")) == object_id_str
+ ):
+ return True, None
+ return False, "relation mismatch"
+
+
+class KeyNetraEngine:
+ """Deterministic evaluator over explicit input and policy definitions."""
+
+ def __init__(
+ self,
+ policies: list[PolicyDefinition | dict[str, Any]],
+ strategy: str = "first_match",
+ compiled_graph: DecisionGraph | None = None,
+ ) -> None:
+ parsed = [
+ p if isinstance(p, PolicyDefinition) else PolicyDefinition.from_dict(p)
+ for p in policies
+ ]
+ self._policies: tuple[PolicyDefinition, ...] = tuple(
+ sorted(parsed, key=lambda policy: policy.priority)
+ )
+ self._evaluator = ConditionEvaluator()
+ self._compiled_graph = compiled_graph or compile_policy_graph(
+ [
+ {
+ "action": policy.action,
+ "effect": policy.effect,
+ "priority": policy.priority,
+ "conditions": policy.conditions,
+ "policy_id": policy.policy_id,
+ }
+ for policy in self._policies
+ ],
+ self._evaluator,
+ )
+ self._strategy = strategy
+
+ def decide(
+ self,
+ authorization_input: AuthorizationInput | dict[str, Any],
+ action: str | None = None,
+ resource: dict[str, Any] | None = None,
+ ) -> AuthorizationDecision:
+ """Evaluate access.
+
+ The legacy ``decide(user, action, resource)`` call shape remains
+ supported for compatibility, but all new code should pass a single
+ ``AuthorizationInput`` instance.
+ """
+
+ normalized_input = self._normalize_input(
+ authorization_input, action=action, resource=resource
+ )
+ return self._decide_structured(normalized_input)
+
+ def check_access(
+ self,
+ *,
+ subject: str | dict[str, Any],
+ action: str,
+ resource: str | dict[str, Any],
+ context: dict[str, Any] | None = None,
+ ) -> AuthorizationDecision:
+ """Headless convenience API for direct engine embedding.
+
+ This method intentionally performs lightweight subject/resource parsing
+ only, then delegates to the deterministic ``decide`` pipeline.
+ """
+
+ user_payload = self._normalize_subject(subject)
+ resource_payload = self._normalize_resource(resource)
+ return self.decide(
+ AuthorizationInput(
+ user=user_payload,
+ action=action,
+ resource=resource_payload,
+ context=dict(context or {}),
+ )
+ )
+
+ def _normalize_input(
+ self,
+ authorization_input: AuthorizationInput | dict[str, Any],
+ *,
+ action: str | None,
+ resource: dict[str, Any] | None,
+ ) -> AuthorizationInput:
+ if isinstance(authorization_input, AuthorizationInput):
+ return authorization_input
+ if action is None or resource is None:
+ raise TypeError(
+ "authorization_input, action, and resource are required for legacy decide() calls"
+ )
+ return AuthorizationInput(
+ user=dict(authorization_input), action=action, resource=dict(resource), context={}
+ )
+
+ def _normalize_subject(self, subject: str | dict[str, Any]) -> dict[str, Any]:
+ if isinstance(subject, dict):
+ return dict(subject)
+ subject_type, subject_id = self._parse_descriptor(subject)
+ if subject_type == "user":
+ return {"id": subject_id}
+ return {"id": subject_id, "subject_type": subject_type}
+
+ def _normalize_resource(self, resource: str | dict[str, Any]) -> dict[str, Any]:
+ if isinstance(resource, dict):
+ return dict(resource)
+ resource_type, resource_id = self._parse_descriptor(resource)
+ return {
+ "id": resource_id,
+ "resource_id": resource_id,
+ "resource_type": resource_type,
+ "type": resource_type,
+ }
+
+ def _parse_descriptor(self, value: str) -> tuple[str, str]:
+ normalized = str(value).strip()
+ if ":" not in normalized:
+ return normalized or "unknown", normalized or ""
+ prefix, suffix = normalized.split(":", 1)
+ return prefix.strip() or "unknown", suffix.strip()
+
+ def _decide_structured(self, authorization_input: AuthorizationInput) -> AuthorizationDecision:
+ total_started = time.perf_counter()
+ trace: list[ExplainTraceStep] = [
+ ExplainTraceStep(
+ step="start",
+ outcome="continue",
+ detail=f"evaluate action={authorization_input.action}",
+ )
+ ]
+ user_subjects = self._subject_descriptors(authorization_input)
+
+ stage_started = time.perf_counter()
+ stage = self._evaluate_direct_permissions(authorization_input, trace=trace)
+ observe_access_check_latency(
+ tenant=authorization_input.tenant_key,
+ stage="rbac",
+ value=time.perf_counter() - stage_started,
+ )
+ if stage[0] != "abstain":
+ return self._decision_from_stage(
+ stage,
+ trace=trace,
+ policy_id="rbac:permissions",
+ matched=("rbac:permissions",),
+ authorization_input=authorization_input,
+ total_started=total_started,
+ )
+
+ stage_started = time.perf_counter()
+ stage = self._evaluate_acl(authorization_input, trace=trace, user_subjects=user_subjects)
+ observe_access_check_latency(
+ tenant=authorization_input.tenant_key,
+ stage="acl",
+ value=time.perf_counter() - stage_started,
+ )
+ if stage[0] != "abstain":
+ return self._decision_from_stage(
+ stage,
+ trace=trace,
+ policy_id=stage[2],
+ matched=(stage[2],) if stage[2] else (),
+ authorization_input=authorization_input,
+ total_started=total_started,
+ )
+
+ stage_started = time.perf_counter()
+ stage = self._evaluate_role_permissions(authorization_input, trace=trace)
+ observe_access_check_latency(
+ tenant=authorization_input.tenant_key,
+ stage="rbac",
+ value=time.perf_counter() - stage_started,
+ )
+ if stage[0] != "abstain":
+ return self._decision_from_stage(
+ stage,
+ trace=trace,
+ policy_id="rbac:role",
+ matched=("rbac:role",),
+ authorization_input=authorization_input,
+ total_started=total_started,
+ )
+
+ stage_started = time.perf_counter()
+ stage = self._evaluate_relationship_index(
+ authorization_input, trace=trace, user_subjects=user_subjects
+ )
+ observe_access_check_latency(
+ tenant=authorization_input.tenant_key,
+ stage="relationship",
+ value=time.perf_counter() - stage_started,
+ )
+ if stage[0] != "abstain":
+ return self._decision_from_stage(
+ stage,
+ trace=trace,
+ policy_id="relationship:index",
+ matched=("relationship:index",),
+ authorization_input=authorization_input,
+ total_started=total_started,
+ )
+
+ stage_started = time.perf_counter()
+ stage = self._evaluate_permission_graph(authorization_input, trace=trace)
+ observe_access_check_latency(
+ tenant=authorization_input.tenant_key,
+ stage="schema",
+ value=time.perf_counter() - stage_started,
+ )
+ if stage[0] != "abstain":
+ return self._decision_from_stage(
+ stage,
+ trace=trace,
+ policy_id=stage[2],
+ matched=(stage[2],) if stage[2] else (),
+ authorization_input=authorization_input,
+ total_started=total_started,
+ )
+
+ stage_started = time.perf_counter()
+ stage = self._evaluate_compiled_policies(authorization_input, trace=trace)
+ observe_access_check_latency(
+ tenant=authorization_input.tenant_key,
+ stage="policy",
+ value=time.perf_counter() - stage_started,
+ )
+ if stage[0] != "abstain":
+ return self._decision_from_stage(
+ stage,
+ trace=trace,
+ policy_id=stage[2],
+ matched=(stage[2],) if stage[2] else (),
+ authorization_input=authorization_input,
+ total_started=total_started,
+ )
+
+ trace.append(ExplainTraceStep(step="final", outcome="deny", detail="no matching policy"))
+ observe_access_check_latency(
+ tenant=authorization_input.tenant_key,
+ stage="total",
+ value=time.perf_counter() - total_started,
+ )
+ record_access_check(tenant=authorization_input.tenant_key, decision="deny")
+ return AuthorizationDecision(
+ allowed=False,
+ decision="deny",
+ reason="no matching policy",
+ policy_id=None,
+ explain_trace=tuple(trace),
+ matched_policies=(),
+ failed_conditions=(),
+ )
+
+ def _decision_from_stage(
+ self,
+ stage: tuple[StageOutcome, str | None, str | None],
+ *,
+ trace: list[ExplainTraceStep],
+ policy_id: str | None,
+ matched: tuple[str, ...],
+ authorization_input: AuthorizationInput,
+ total_started: float,
+ ) -> AuthorizationDecision:
+ outcome, reason, stage_policy_id = stage
+ final_policy_id = stage_policy_id or policy_id
+ final_detail = reason or f"decision {outcome}"
+ if final_policy_id == "rbac:permissions" and outcome == "allow":
+ final_detail = "granted by explicit permission"
+ trace.append(
+ ExplainTraceStep(
+ step="final",
+ outcome=outcome,
+ detail=final_detail,
+ policy_id=final_policy_id,
+ )
+ )
+ observe_access_check_latency(
+ tenant=authorization_input.tenant_key,
+ stage="total",
+ value=time.perf_counter() - total_started,
+ )
+ record_access_check(
+ tenant=authorization_input.tenant_key,
+ decision="allow" if outcome == "allow" else "deny",
+ )
+ return AuthorizationDecision(
+ allowed=outcome == "allow",
+ decision="allow" if outcome == "allow" else "deny",
+ reason=reason,
+ policy_id=final_policy_id,
+ explain_trace=tuple(trace),
+ matched_policies=matched if outcome == "allow" else (),
+ failed_conditions=(),
+ )
+
+ def _evaluate_direct_permissions(
+ self, authorization_input: AuthorizationInput, *, trace: list[ExplainTraceStep]
+ ) -> tuple[StageOutcome, str | None, str | None]:
+ permissions = authorization_input.user.get(
+ "direct_permissions", authorization_input.user.get("permissions", [])
+ )
+ if isinstance(permissions, list) and authorization_input.action in permissions:
+ trace.append(
+ ExplainTraceStep(
+ step="rbac_permissions",
+ outcome="matched",
+ detail="explicit permission grant matched input action",
+ policy_id="rbac:permissions",
+ )
+ )
+ return "allow", "explicit permission grant", "rbac:permissions"
+ trace.append(
+ ExplainTraceStep(
+ step="rbac_permissions", outcome="abstain", detail="no direct permission match"
+ )
+ )
+ return "abstain", None, None
+
+ def _evaluate_acl(
+ self,
+ authorization_input: AuthorizationInput,
+ *,
+ trace: list[ExplainTraceStep],
+ user_subjects: set[str],
+ ) -> tuple[StageOutcome, str | None, str | None]:
+ resource_type, resource_id = self._resource_identity(authorization_input.resource)
+ if not resource_type or not resource_id:
+ trace.append(
+ ExplainTraceStep(
+ step="acl", outcome="abstain", detail="resource identity unavailable"
+ )
+ )
+ return "abstain", None, None
+ acl_entries = authorization_input.acl_entries
+ if not acl_entries and authorization_input.access_index_entries:
+ acl_entries = tuple(
+ entry
+ for entry in authorization_input.access_index_entries
+ if str(entry.get("source")) == "acl"
+ )
+ matched = False
+ for entry in acl_entries:
+ if self._acl_matches(
+ entry, resource_type, resource_id, authorization_input.action, user_subjects
+ ):
+ matched = True
+ record_acl_match(tenant=authorization_input.tenant_key)
+ effect = str(entry.get("effect", "deny")).lower()
+ subject = f"{entry.get('subject_type')}:{entry.get('subject_id')}"
+ trace.append(
+ ExplainTraceStep(
+ step="acl",
+ outcome=effect if effect in {"allow", "deny"} else "abstain",
+ detail=f"matched ACL entry {subject} {authorization_input.action} {resource_type}:{resource_id}",
+ policy_id=f"acl:{entry.get('id')}",
+ )
+ )
+ if effect in {"allow", "deny"}:
+ return (
+ effect,
+ f"matched ACL entry {subject} {authorization_input.action} {resource_type}:{resource_id}",
+ f"acl:{entry.get('id')}",
+ )
+ if not matched:
+ trace.append(ExplainTraceStep(step="acl", outcome="abstain", detail="no ACL match"))
+ return "abstain", None, None
+
+ def _evaluate_role_permissions(
+ self, authorization_input: AuthorizationInput, *, trace: list[ExplainTraceStep]
+ ) -> tuple[StageOutcome, str | None, str | None]:
+ permissions = authorization_input.user.get("role_permissions", [])
+ if isinstance(permissions, list) and authorization_input.action in permissions:
+ trace.append(
+ ExplainTraceStep(
+ step="rbac_role",
+ outcome="allow",
+ detail="role permission grant",
+ policy_id="rbac:role",
+ )
+ )
+ return "allow", "role permission grant", "rbac:role"
+ trace.append(
+ ExplainTraceStep(step="rbac_role", outcome="abstain", detail="no role permission match")
+ )
+ return "abstain", None, None
+
+ def _evaluate_relationship_index(
+ self,
+ authorization_input: AuthorizationInput,
+ *,
+ trace: list[ExplainTraceStep],
+ user_subjects: set[str],
+ ) -> tuple[StageOutcome, str | None, str | None]:
+ record_relationship_traversal(tenant=authorization_input.tenant_key)
+ resource_type, resource_id = self._resource_identity(authorization_input.resource)
+ if not resource_type or not resource_id:
+ trace.append(
+ ExplainTraceStep(
+ step="relationship", outcome="abstain", detail="resource identity unavailable"
+ )
+ )
+ return "abstain", None, None
+ for entry in authorization_input.access_index_entries:
+ if str(entry.get("source")) != "relationship":
+ continue
+ if (
+ str(entry.get("resource_type")) != resource_type
+ or str(entry.get("resource_id")) != resource_id
+ ):
+ continue
+ if str(entry.get("action")) not in {authorization_input.action, "*"}:
+ continue
+ allowed = entry.get("allowed_subjects", [])
+ if not isinstance(allowed, (list, tuple, set)):
+ continue
+ if any(str(subject) in user_subjects for subject in allowed):
+ trace.append(
+ ExplainTraceStep(
+ step="relationship",
+ outcome="allow",
+ detail=f"relationship index match for {resource_type}:{resource_id}",
+ policy_id="relationship:index",
+ )
+ )
+ return (
+ "allow",
+ f"relationship index match for {resource_type}:{resource_id}",
+ "relationship:index",
+ )
+ trace.append(
+ ExplainTraceStep(
+ step="relationship", outcome="abstain", detail="no relationship index match"
+ )
+ )
+ return "abstain", None, None
+
+ def _evaluate_compiled_policies(
+ self, authorization_input: AuthorizationInput, *, trace: list[ExplainTraceStep]
+ ) -> tuple[StageOutcome, str | None, str | None]:
+ record_policy_evaluation(tenant=authorization_input.tenant_key)
+ graph = authorization_input.compiled_graph or self._compiled_graph
+ graph_decision = graph.evaluate(authorization_input)
+ if graph_decision.outcome == "abstain":
+ trace.append(
+ ExplainTraceStep(
+ step="policy_graph", outcome="abstain", detail="no matching policy node"
+ )
+ )
+ return "abstain", None, None
+ trace.append(
+ ExplainTraceStep(
+ step="policy_graph",
+ outcome=graph_decision.outcome,
+ detail=graph_decision.reason or f"decision {graph_decision.outcome}",
+ policy_id=graph_decision.policy_id,
+ )
+ )
+ return graph_decision.outcome, graph_decision.reason, graph_decision.policy_id
+
+ def _evaluate_permission_graph(
+ self, authorization_input: AuthorizationInput, *, trace: list[ExplainTraceStep]
+ ) -> tuple[StageOutcome, str | None, str | None]:
+ graph = authorization_input.permission_graph
+ if graph is None:
+ trace.append(
+ ExplainTraceStep(
+ step="permission_graph", outcome="abstain", detail="no authorization model"
+ )
+ )
+ return "abstain", None, None
+ graph_decision = graph.evaluate(authorization_input)
+ if graph_decision.outcome == "abstain":
+ trace.append(
+ ExplainTraceStep(
+ step="permission_graph",
+ outcome="abstain",
+ detail="permission graph did not apply",
+ )
+ )
+ return "abstain", None, None
+ trace.append(
+ ExplainTraceStep(
+ step="permission_graph",
+ outcome=graph_decision.outcome,
+ detail=graph_decision.reason or f"decision {graph_decision.outcome}",
+ policy_id=graph_decision.policy_id,
+ )
+ )
+ return graph_decision.outcome, graph_decision.reason, graph_decision.policy_id
+
+ def _subject_descriptors(self, authorization_input: AuthorizationInput) -> set[str]:
+ descriptors: set[str] = set()
+ user = authorization_input.user
+ user_id = user.get("id")
+ if user_id is not None:
+ descriptors.add(f"user:{user_id}")
+ roles = user.get("roles", [])
+ if isinstance(roles, list):
+ descriptors.update(f"role:{role}" for role in roles if role is not None)
+ permissions = user.get("permissions", [])
+ if isinstance(permissions, list):
+ descriptors.update(
+ f"permission:{permission}" for permission in permissions if permission is not None
+ )
+ direct_permissions = user.get("direct_permissions", [])
+ if isinstance(direct_permissions, list):
+ descriptors.update(
+ f"permission:{permission}"
+ for permission in direct_permissions
+ if permission is not None
+ )
+ relations = user.get("relations", [])
+ if isinstance(relations, list):
+ for relation in relations:
+ if not isinstance(relation, dict):
+ continue
+ relation_type = str(relation.get("relation", ""))
+ object_type = str(relation.get("object_type", ""))
+ object_id = str(relation.get("object_id", ""))
+ if relation_type and object_type and object_id:
+ descriptors.add(f"relationship:{relation_type}:{object_type}:{object_id}")
+ return descriptors
+
+ def _resource_identity(self, resource: dict[str, Any]) -> tuple[str, str]:
+ resource_type = str(
+ resource.get("resource_type")
+ or resource.get("type")
+ or resource.get("kind")
+ or resource.get("entity_type")
+ or ""
+ )
+ resource_id = str(resource.get("resource_id") or resource.get("id") or "")
+ return resource_type, resource_id
+
+ def _acl_matches(
+ self,
+ entry: dict[str, Any],
+ resource_type: str,
+ resource_id: str,
+ action: str,
+ user_subjects: set[str],
+ ) -> bool:
+ if (
+ str(entry.get("resource_type")) != resource_type
+ or str(entry.get("resource_id")) != resource_id
+ ):
+ return False
+ if str(entry.get("action")) != action:
+ return False
+ subject_type = str(entry.get("subject_type", ""))
+ subject_id = str(entry.get("subject_id", ""))
+ return self._acl_subject_matches(subject_type, subject_id, user_subjects)
+
+ def _acl_subject_matches(
+ self, subject_type: str, subject_id: str, user_subjects: set[str]
+ ) -> bool:
+ if not subject_type or not subject_id:
+ return False
+ if subject_type == "relationship":
+ normalized_subject_id = (
+ subject_id[12:] if subject_id.startswith("relationship:") else subject_id
+ )
+ candidates = {
+ subject_id,
+ normalized_subject_id,
+ f"relationship:{normalized_subject_id}",
+ }
+ return any(candidate in user_subjects for candidate in candidates)
+ return f"{subject_type}:{subject_id}" in user_subjects
+
+ def _decision_from_policy(
+ self,
+ policy: PolicyDefinition,
+ *,
+ trace: list[ExplainTraceStep],
+ failed_conditions: list[str],
+ ) -> AuthorizationDecision:
+ policy_id = self._policy_id(policy)
+ trace.append(
+ ExplainTraceStep(
+ step="final",
+ outcome=policy.effect,
+ detail=f"selected policy effect={policy.effect}",
+ policy_id=policy_id,
+ )
+ )
+ return AuthorizationDecision(
+ allowed=policy.effect == "allow",
+ decision=policy.effect,
+ reason=f"matched policy {policy_id}" if policy_id else "matched policy",
+ policy_id=policy_id,
+ explain_trace=tuple(trace),
+ matched_policies=(policy_id,) if policy_id is not None else (),
+ failed_conditions=tuple(failed_conditions),
+ )
+
+ def _best_reason(
+ self, evaluated: list[tuple[PolicyDefinition, bool, str | None]]
+ ) -> str | None:
+ for _policy, matched, reason in evaluated:
+ if not matched and reason:
+ return reason
+ return None
+
+ def _policy_id(self, policy: PolicyDefinition) -> str | None:
+ return policy.policy_id or f"{policy.action}:{policy.priority}:{policy.effect}"
diff --git a/keynetra/engine/model_graph/__init__.py b/keynetra/engine/model_graph/__init__.py
new file mode 100644
index 0000000..31992a9
--- /dev/null
+++ b/keynetra/engine/model_graph/__init__.py
@@ -0,0 +1,13 @@
+from keynetra.engine.model_graph.permission_graph import (
+ MODEL_GRAPH_STORE,
+ AuthorizationGraphDecision,
+ CompiledPermissionGraph,
+ PermissionGraphStore,
+)
+
+__all__ = [
+ "AuthorizationGraphDecision",
+ "CompiledPermissionGraph",
+ "MODEL_GRAPH_STORE",
+ "PermissionGraphStore",
+]
diff --git a/keynetra/engine/model_graph/graph_executor.py b/keynetra/engine/model_graph/graph_executor.py
new file mode 100644
index 0000000..2ba8d80
--- /dev/null
+++ b/keynetra/engine/model_graph/graph_executor.py
@@ -0,0 +1,11 @@
+"""Graph execution helpers for permission graphs."""
+
+from __future__ import annotations
+
+from keynetra.engine.model_graph.permission_graph import (
+ CompiledPermissionGraph,
+)
+
+
+def execute_permission_graph(graph: CompiledPermissionGraph, authorization_input):
+ return graph.evaluate(authorization_input)
diff --git a/keynetra/engine/model_graph/permission_graph.py b/keynetra/engine/model_graph/permission_graph.py
new file mode 100644
index 0000000..52b4551
--- /dev/null
+++ b/keynetra/engine/model_graph/permission_graph.py
@@ -0,0 +1,116 @@
+"""Compiled permission graph for schema-first authorization models."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from threading import RLock
+from typing import Any
+
+from keynetra.modeling.permission_compiler import CompiledAuthorizationModel
+
+
+@dataclass(frozen=True)
+class AuthorizationGraphDecision:
+ outcome: str
+ reason: str | None
+ policy_id: str | None
+
+
+@dataclass(frozen=True)
+class CompiledPermissionGraph:
+ tenant_key: str
+ model: CompiledAuthorizationModel
+
+ def evaluate(self, authorization_input: Any) -> AuthorizationGraphDecision:
+ resource_type, _resource_id = self._resource_identity(authorization_input)
+ action = getattr(authorization_input, "action", None)
+ if not resource_type or not action:
+ return AuthorizationGraphDecision(outcome="abstain", reason=None, policy_id=None)
+ if action not in self.model.permissions:
+ return AuthorizationGraphDecision(outcome="abstain", reason=None, policy_id=None)
+ evaluator = _PermissionEvaluator(
+ authorization_input=authorization_input, model=self.model, resource_type=resource_type
+ )
+ matched = evaluator.evaluate(self.model.permissions[action].expression)
+ if matched:
+ return AuthorizationGraphDecision(
+ outcome="allow",
+ reason=f"matched authorization model permission {action}",
+ policy_id=f"auth-model:{action}",
+ )
+ return AuthorizationGraphDecision(
+ outcome="deny",
+ reason=f"authorization model denied {action}",
+ policy_id=f"auth-model:{action}",
+ )
+
+ def _resource_identity(self, authorization_input: Any) -> tuple[str, str]:
+ resource = getattr(authorization_input, "resource", {}) or {}
+ resource_type = str(
+ resource.get("resource_type") or resource.get("type") or resource.get("kind") or ""
+ )
+ resource_id = str(resource.get("resource_id") or resource.get("id") or "")
+ return resource_type, resource_id
+
+
+class _PermissionEvaluator:
+ def __init__(
+ self, *, authorization_input: Any, model: CompiledAuthorizationModel, resource_type: str
+ ) -> None:
+ self._authorization_input = authorization_input
+ self._model = model
+ self._resource_type = resource_type
+
+ def evaluate(self, expr: Any) -> bool:
+ from keynetra.modeling.schema_parser import AndExpr, IdentifierExpr, NotExpr, OrExpr
+
+ if isinstance(expr, IdentifierExpr):
+ return self._has_relation(expr.name)
+ if isinstance(expr, NotExpr):
+ return not self.evaluate(expr.value)
+ if isinstance(expr, AndExpr):
+ return self.evaluate(expr.left) and self.evaluate(expr.right)
+ if isinstance(expr, OrExpr):
+ return self.evaluate(expr.left) or self.evaluate(expr.right)
+ return False
+
+ def _has_relation(self, name: str) -> bool:
+ relations = getattr(self._authorization_input, "user", {}).get("relations", [])
+ if not isinstance(relations, list):
+ return False
+ resource = getattr(self._authorization_input, "resource", {}) or {}
+ resource_id = str(resource.get("resource_id") or resource.get("id") or "")
+ for edge in relations:
+ if not isinstance(edge, dict):
+ continue
+ if str(edge.get("relation")) != name:
+ continue
+ if str(edge.get("object_type")) != self._resource_type:
+ continue
+ if str(edge.get("object_id")) != resource_id:
+ continue
+ return True
+ return False
+
+
+class PermissionGraphStore:
+ """In-memory compiled permission graph cache keyed by tenant."""
+
+ def __init__(self) -> None:
+ self._lock = RLock()
+ self._graphs: dict[str, CompiledPermissionGraph] = {}
+
+ def get(self, tenant_key: str) -> CompiledPermissionGraph | None:
+ with self._lock:
+ return self._graphs.get(tenant_key)
+
+ def set(self, tenant_key: str, graph: CompiledPermissionGraph) -> None:
+ with self._lock:
+ self._graphs[tenant_key] = graph
+
+ def invalidate(self, tenant_key: str) -> None:
+ with self._lock:
+ self._graphs.pop(tenant_key, None)
+
+
+MODEL_GRAPH_STORE = PermissionGraphStore()
diff --git a/keynetra/headless.py b/keynetra/headless.py
new file mode 100644
index 0000000..8b02a79
--- /dev/null
+++ b/keynetra/headless.py
@@ -0,0 +1,102 @@
+from __future__ import annotations
+
+from dataclasses import dataclass
+from pathlib import Path
+from typing import Any
+
+from keynetra.config.config_loader import load_config_file
+from keynetra.config.file_loaders import (
+ load_authorization_model_from_file,
+ load_authorization_model_from_paths,
+ load_policies_from_paths,
+)
+from keynetra.config.policies import DEFAULT_POLICIES
+from keynetra.engine.keynetra_engine import (
+ AuthorizationDecision,
+ AuthorizationInput,
+ KeyNetraEngine,
+)
+from keynetra.engine.model_graph.permission_graph import CompiledPermissionGraph
+from keynetra.modeling.permission_compiler import compile_authorization_schema
+
+
+@dataclass
+class KeyNetra:
+ """Embedded, headless authorization facade."""
+
+ _engine: KeyNetraEngine
+ _permission_graph: CompiledPermissionGraph | None = None
+
+ @classmethod
+ def from_config(cls, path: str | Path) -> "KeyNetra":
+ config = load_config_file(path)
+ policies = load_policies_from_paths(list(config.policy_paths)) or list(DEFAULT_POLICIES)
+ engine = cls(_engine=KeyNetraEngine(policies))
+
+ schema = load_authorization_model_from_paths(list(config.model_paths))
+ if schema:
+ engine._permission_graph = CompiledPermissionGraph(
+ tenant_key="default",
+ model=compile_authorization_schema(schema),
+ )
+ return engine
+
+ def load_policies(self, path: str | Path) -> None:
+ loaded = load_policies_from_paths([str(path)])
+ if not loaded:
+ raise ValueError("no policies found in the provided path")
+ self._engine = KeyNetraEngine(loaded)
+
+ def load_model(self, path: str | Path) -> None:
+ schema = load_authorization_model_from_file(path)
+ self._permission_graph = CompiledPermissionGraph(
+ tenant_key="default",
+ model=compile_authorization_schema(schema),
+ )
+
+ def check_access(
+ self,
+ *,
+ subject: str | dict[str, Any],
+ action: str,
+ resource: str | dict[str, Any],
+ context: dict[str, Any] | None = None,
+ ) -> AuthorizationDecision:
+ user_payload = self._subject_to_user(subject)
+ resource_payload = self._resource_to_payload(resource)
+ return self._engine.decide(
+ AuthorizationInput(
+ user=user_payload,
+ action=action,
+ resource=resource_payload,
+ context=dict(context or {}),
+ permission_graph=self._permission_graph,
+ )
+ )
+
+ def _subject_to_user(self, subject: str | dict[str, Any]) -> dict[str, Any]:
+ if isinstance(subject, dict):
+ return dict(subject)
+ kind, identifier = _parse_descriptor(subject)
+ if kind == "user":
+ return {"id": identifier}
+ return {"id": identifier, "subject_type": kind}
+
+ def _resource_to_payload(self, resource: str | dict[str, Any]) -> dict[str, Any]:
+ if isinstance(resource, dict):
+ return dict(resource)
+ resource_type, resource_id = _parse_descriptor(resource)
+ return {
+ "id": resource_id,
+ "resource_id": resource_id,
+ "resource_type": resource_type,
+ "type": resource_type,
+ }
+
+
+def _parse_descriptor(value: str) -> tuple[str, str]:
+ normalized = str(value).strip()
+ if ":" not in normalized:
+ return normalized or "unknown", normalized or ""
+ prefix, suffix = normalized.split(":", 1)
+ return prefix.strip() or "unknown", suffix.strip()
diff --git a/keynetra/infrastructure/__init__.py b/keynetra/infrastructure/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/keynetra/infrastructure/cache/__init__.py b/keynetra/infrastructure/cache/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/keynetra/infrastructure/cache/access_index_cache.py b/keynetra/infrastructure/cache/access_index_cache.py
new file mode 100644
index 0000000..c146cef
--- /dev/null
+++ b/keynetra/infrastructure/cache/access_index_cache.py
@@ -0,0 +1,144 @@
+"""Distributed access index cache."""
+
+from __future__ import annotations
+
+import json
+from typing import Any
+
+from keynetra.infrastructure.cache.backends import CacheBackend, build_cache_backend
+from keynetra.observability.metrics import record_cache_hit, record_cache_miss
+from keynetra.services.interfaces import AccessIndexEntry
+
+
+class RedisBackedAccessIndexCache:
+ """Caches resource/action access index entries."""
+
+ def __init__(self, backend: CacheBackend, ttl_seconds: int = 30) -> None:
+ self._backend = backend
+ self._ttl_seconds = ttl_seconds
+
+ def get(
+ self, *, tenant_id: int, resource_type: str, resource_id: str, action: str
+ ) -> list[AccessIndexEntry] | None:
+ raw = self._backend.get(
+ self._key(
+ tenant_id=tenant_id,
+ resource_type=resource_type,
+ resource_id=resource_id,
+ action=action,
+ )
+ )
+ if raw is None:
+ record_cache_miss(cache_type="access_index")
+ return None
+ try:
+ payload = json.loads(raw)
+ except json.JSONDecodeError:
+ record_cache_miss(cache_type="access_index")
+ return None
+ if not isinstance(payload, list):
+ record_cache_miss(cache_type="access_index")
+ return None
+ record_cache_hit(cache_type="access_index")
+ entries: list[AccessIndexEntry] = []
+ for item in payload:
+ if not isinstance(item, dict):
+ continue
+ allowed_subjects = item.get("allowed_subjects", [])
+ if not isinstance(allowed_subjects, list):
+ allowed_subjects = []
+ entries.append(
+ AccessIndexEntry(
+ resource_type=str(item.get("resource_type", resource_type)),
+ resource_id=str(item.get("resource_id", resource_id)),
+ action=str(item.get("action", action)),
+ allowed_subjects=tuple(
+ str(subject) for subject in allowed_subjects if isinstance(subject, str)
+ ),
+ source=str(item.get("source", "unknown")),
+ subject_type=(
+ item.get("subject_type")
+ if item.get("subject_type") is None
+ else str(item.get("subject_type"))
+ ),
+ subject_id=(
+ item.get("subject_id")
+ if item.get("subject_id") is None
+ else str(item.get("subject_id"))
+ ),
+ effect=(
+ item.get("effect")
+ if item.get("effect") is None
+ else str(item.get("effect"))
+ ),
+ acl_id=int(item["acl_id"]) if item.get("acl_id") is not None else None,
+ )
+ )
+ return entries
+
+ def set(
+ self,
+ *,
+ tenant_id: int,
+ resource_type: str,
+ resource_id: str,
+ action: str,
+ entries: list[AccessIndexEntry],
+ ) -> None:
+ payload = [
+ {
+ "resource_type": entry.resource_type,
+ "resource_id": entry.resource_id,
+ "action": entry.action,
+ "allowed_subjects": list(entry.allowed_subjects),
+ "source": entry.source,
+ "subject_type": entry.subject_type,
+ "subject_id": entry.subject_id,
+ "effect": entry.effect,
+ "acl_id": entry.acl_id,
+ }
+ for entry in entries
+ ]
+ self._backend.set(
+ self._key(
+ tenant_id=tenant_id,
+ resource_type=resource_type,
+ resource_id=resource_id,
+ action=action,
+ ),
+ json.dumps(payload, separators=(",", ":")),
+ self._ttl_seconds,
+ )
+
+ def invalidate(self, *, tenant_id: int, resource_type: str, resource_id: str) -> None:
+ self._backend.incr(
+ self._namespace_key(
+ tenant_id=tenant_id, resource_type=resource_type, resource_id=resource_id
+ )
+ )
+
+ def invalidate_tenant(self, *, tenant_id: int) -> None:
+ self._backend.incr(f"idxns:{tenant_id}:tenant")
+
+ def invalidate_global(self) -> None:
+ self._backend.incr("idxns:global")
+
+ def _key(self, *, tenant_id: int, resource_type: str, resource_id: str, action: str) -> str:
+ namespace = (
+ self._backend.get(
+ self._namespace_key(
+ tenant_id=tenant_id, resource_type=resource_type, resource_id=resource_id
+ )
+ )
+ or self._backend.get(f"idxns:{tenant_id}:tenant")
+ or self._backend.get("idxns:global")
+ or "0"
+ )
+ return f"idx:{tenant_id}:{namespace}:{resource_type}:{resource_id}:{action}"
+
+ def _namespace_key(self, *, tenant_id: int, resource_type: str, resource_id: str) -> str:
+ return f"idxns:{tenant_id}:{resource_type}:{resource_id}"
+
+
+def build_access_index_cache(redis_client: Any | None) -> RedisBackedAccessIndexCache:
+ return RedisBackedAccessIndexCache(build_cache_backend(redis_client))
diff --git a/keynetra/infrastructure/cache/acl_cache.py b/keynetra/infrastructure/cache/acl_cache.py
new file mode 100644
index 0000000..bdf0b16
--- /dev/null
+++ b/keynetra/infrastructure/cache/acl_cache.py
@@ -0,0 +1,110 @@
+"""ACL cache adapter."""
+
+from __future__ import annotations
+
+import json
+from typing import Any
+
+from keynetra.infrastructure.cache.backends import CacheBackend, build_cache_backend
+from keynetra.observability.metrics import record_cache_hit, record_cache_miss
+from keynetra.services.interfaces import ACLRecord
+
+
+class RedisBackedACLCache:
+ """Caches ACL lists per tenant resource/action."""
+
+ def __init__(self, backend: CacheBackend, ttl_seconds: int = 30) -> None:
+ self._backend = backend
+ self._ttl_seconds = ttl_seconds
+
+ def get(
+ self, *, tenant_id: int, resource_type: str, resource_id: str, action: str
+ ) -> list[ACLRecord] | None:
+ raw = self._backend.get(
+ self._key(
+ tenant_id=tenant_id,
+ resource_type=resource_type,
+ resource_id=resource_id,
+ action=action,
+ )
+ )
+ if raw is None:
+ record_cache_miss(cache_type="acl")
+ return None
+ try:
+ payload = json.loads(raw)
+ except json.JSONDecodeError:
+ record_cache_miss(cache_type="acl")
+ return None
+ if not isinstance(payload, list):
+ record_cache_miss(cache_type="acl")
+ return None
+ record_cache_hit(cache_type="acl")
+ records: list[ACLRecord] = []
+ for item in payload:
+ if not isinstance(item, dict):
+ continue
+ records.append(
+ ACLRecord(
+ id=int(item.get("id", 0)),
+ tenant_id=int(item.get("tenant_id", tenant_id)),
+ subject_type=str(item.get("subject_type", "")),
+ subject_id=str(item.get("subject_id", "")),
+ resource_type=str(item.get("resource_type", resource_type)),
+ resource_id=str(item.get("resource_id", resource_id)),
+ action=str(item.get("action", action)),
+ effect=str(item.get("effect", "deny")),
+ created_at=item.get("created_at"),
+ )
+ )
+ return records
+
+ def set(
+ self,
+ *,
+ tenant_id: int,
+ resource_type: str,
+ resource_id: str,
+ action: str,
+ acl_entries: list[ACLRecord],
+ ) -> None:
+ payload = [entry.to_dict() for entry in acl_entries]
+ self._backend.set(
+ self._key(
+ tenant_id=tenant_id,
+ resource_type=resource_type,
+ resource_id=resource_id,
+ action=action,
+ ),
+ json.dumps(payload, separators=(",", ":")),
+ self._ttl_seconds,
+ )
+
+ def invalidate(self, *, tenant_id: int, resource_type: str, resource_id: str) -> None:
+ self._backend.incr(
+ self._namespace_key(
+ tenant_id=tenant_id, resource_type=resource_type, resource_id=resource_id
+ )
+ )
+
+ def invalidate_global(self) -> None:
+ self._backend.incr("aclns:global")
+
+ def _key(self, *, tenant_id: int, resource_type: str, resource_id: str, action: str) -> str:
+ namespace = (
+ self._backend.get(
+ self._namespace_key(
+ tenant_id=tenant_id, resource_type=resource_type, resource_id=resource_id
+ )
+ )
+ or self._backend.get("aclns:global")
+ or "0"
+ )
+ return f"acl:{tenant_id}:{namespace}:{resource_type}:{resource_id}:{action}"
+
+ def _namespace_key(self, *, tenant_id: int, resource_type: str, resource_id: str) -> str:
+ return f"aclns:{tenant_id}:{resource_type}:{resource_id}"
+
+
+def build_acl_cache(redis_client: Any | None) -> RedisBackedACLCache:
+ return RedisBackedACLCache(build_cache_backend(redis_client))
diff --git a/keynetra/infrastructure/cache/backends.py b/keynetra/infrastructure/cache/backends.py
new file mode 100644
index 0000000..adfca3a
--- /dev/null
+++ b/keynetra/infrastructure/cache/backends.py
@@ -0,0 +1,100 @@
+"""Cache backend implementations.
+
+Infrastructure owns cache transport details. Services use cache interfaces
+defined in ``keynetra.services.interfaces``.
+"""
+
+from __future__ import annotations
+
+import time
+from typing import Any, Protocol
+
+
+class CacheBackend(Protocol):
+ """Minimal key/value backend required by cache adapters."""
+
+ def get(self, key: str) -> str | None: ...
+
+ def set(self, key: str, value: str, ttl_seconds: int | None = None) -> None: ...
+
+ def delete(self, key: str) -> None: ...
+
+ def incr(self, key: str) -> int: ...
+
+
+class InMemoryCacheBackend:
+ """Simple in-memory TTL cache used when Redis is unavailable."""
+
+ def __init__(self) -> None:
+ self._values: dict[str, tuple[str, float | None]] = {}
+
+ def get(self, key: str) -> str | None:
+ item = self._values.get(key)
+ if item is None:
+ return None
+ value, expires_at = item
+ if expires_at is not None and expires_at <= time.time():
+ self._values.pop(key, None)
+ return None
+ return value
+
+ def set(self, key: str, value: str, ttl_seconds: int | None = None) -> None:
+ expires_at = None if ttl_seconds is None else time.time() + max(1, ttl_seconds)
+ self._values[key] = (value, expires_at)
+
+ def delete(self, key: str) -> None:
+ self._values.pop(key, None)
+
+ def incr(self, key: str) -> int:
+ current = self.get(key)
+ next_value = (int(current) if current is not None else 0) + 1
+ self.set(key, str(next_value))
+ return next_value
+
+
+class RedisCacheBackend:
+ """Redis-backed cache wrapper with the same minimal surface."""
+
+ def __init__(self, client: Any) -> None:
+ self._client = client
+
+ def get(self, key: str) -> str | None:
+ try:
+ value = self._client.get(key)
+ except Exception:
+ return None
+ if value is None:
+ return None
+ return str(value)
+
+ def set(self, key: str, value: str, ttl_seconds: int | None = None) -> None:
+ try:
+ if ttl_seconds is None:
+ self._client.set(key, value)
+ else:
+ self._client.setex(key, max(1, ttl_seconds), value)
+ except Exception:
+ return
+
+ def delete(self, key: str) -> None:
+ try:
+ self._client.delete(key)
+ except Exception:
+ return
+
+ def incr(self, key: str) -> int:
+ try:
+ return int(self._client.incr(key))
+ except Exception:
+ return 0
+
+
+_memory_backend = InMemoryCacheBackend()
+
+
+def build_cache_backend(client: Any | None) -> CacheBackend:
+ """Return a Redis backend when available, otherwise the shared memory fallback."""
+
+ if client is None:
+ return _memory_backend
+ return RedisCacheBackend(client)
diff --git a/keynetra/infrastructure/cache/decision_cache.py b/keynetra/infrastructure/cache/decision_cache.py
new file mode 100644
index 0000000..d4feb88
--- /dev/null
+++ b/keynetra/infrastructure/cache/decision_cache.py
@@ -0,0 +1,108 @@
+"""Decision cache adapter.
+
+The cache lives outside the pure engine. Keys are derived from the fully
+hydrated authorization input so cache hits never hide changes to explicit
+inputs such as relationships or request context.
+"""
+
+from __future__ import annotations
+
+import hashlib
+import json
+from typing import Any
+
+from keynetra.engine.keynetra_engine import AuthorizationInput
+from keynetra.infrastructure.cache.backends import CacheBackend, build_cache_backend
+from keynetra.services.interfaces import CachedDecision
+
+
+def _stable_json(value: Any) -> str:
+ return json.dumps(value, sort_keys=True, separators=(",", ":"), default=str)
+
+
+class RedisBackedDecisionCache:
+ """Decision cache with namespace bump invalidation."""
+
+ def __init__(self, backend: CacheBackend) -> None:
+ self._backend = backend
+
+ def get(self, key: str) -> CachedDecision | None:
+ raw = self._backend.get(key)
+ if raw is None:
+ return None
+ try:
+ payload = json.loads(raw)
+ except json.JSONDecodeError:
+ return None
+ return CachedDecision(
+ allowed=bool(payload.get("allowed")),
+ decision=str(payload.get("decision", "deny")),
+ reason=(
+ payload.get("reason")
+ if payload.get("reason") is None
+ else str(payload.get("reason"))
+ ),
+ policy_id=(
+ payload.get("policy_id")
+ if payload.get("policy_id") is None
+ else str(payload.get("policy_id"))
+ ),
+ matched_policies=[
+ str(item) for item in payload.get("matched_policies", []) if isinstance(item, str)
+ ],
+ explain_trace=[
+ step for step in payload.get("explain_trace", []) if isinstance(step, dict)
+ ],
+ failed_conditions=[
+ str(item) for item in payload.get("failed_conditions", []) if isinstance(item, str)
+ ],
+ )
+
+ def set(self, key: str, value: CachedDecision, ttl_seconds: int) -> None:
+ payload = {
+ "allowed": value.allowed,
+ "decision": value.decision,
+ "reason": value.reason,
+ "policy_id": value.policy_id,
+ "matched_policies": value.matched_policies,
+ "explain_trace": value.explain_trace,
+ "failed_conditions": value.failed_conditions,
+ }
+ self._backend.set(key, json.dumps(payload, separators=(",", ":")), ttl_seconds)
+
+ def make_key(
+ self,
+ *,
+ tenant_key: str,
+ policy_version: int,
+ authorization_input: AuthorizationInput,
+ revision: int | None = None,
+ ) -> str:
+ namespace = self._tenant_namespace(tenant_key)
+ payload = {
+ "tenant_key": tenant_key,
+ "policy_version": policy_version,
+ "revision": revision,
+ "action": authorization_input.action,
+ "user": authorization_input.user,
+ "resource": authorization_input.resource,
+ "context": authorization_input.context,
+ }
+ digest = hashlib.sha256(_stable_json(payload).encode("utf-8")).hexdigest()
+ return f"dec:{tenant_key}:{namespace}:{policy_version}:{digest}"
+
+ def bump_namespace(self, tenant_key: str) -> int:
+ return self._backend.incr(self._namespace_key(tenant_key))
+
+ def _tenant_namespace(self, tenant_key: str) -> int:
+ raw = self._backend.get(self._namespace_key(tenant_key))
+ return int(raw) if raw is not None else 0
+
+ def _namespace_key(self, tenant_key: str) -> str:
+ return f"decns:{tenant_key}"
+
+
+def build_decision_cache(redis_client: Any | None) -> RedisBackedDecisionCache:
+ """Build the default decision cache."""
+
+ return RedisBackedDecisionCache(build_cache_backend(redis_client))
diff --git a/keynetra/infrastructure/cache/policy_cache.py b/keynetra/infrastructure/cache/policy_cache.py
new file mode 100644
index 0000000..0a22133
--- /dev/null
+++ b/keynetra/infrastructure/cache/policy_cache.py
@@ -0,0 +1,77 @@
+"""Policy cache adapter.
+
+Infrastructure stores serialized policy definitions. Services remain
+responsible for constructing the engine from cached policy records.
+"""
+
+from __future__ import annotations
+
+import json
+from typing import Any
+
+from keynetra.engine.keynetra_engine import PolicyDefinition
+from keynetra.infrastructure.cache.backends import CacheBackend, build_cache_backend
+from keynetra.services.interfaces import PolicyRecord
+
+
+class RedisBackedPolicyCache:
+ """Policy cache with per-tenant namespace invalidation."""
+
+ def __init__(self, backend: CacheBackend) -> None:
+ self._backend = backend
+
+ def get(self, tenant_key: str, policy_version: int) -> list[PolicyRecord] | None:
+ key = self._cache_key(tenant_key, policy_version)
+ raw = self._backend.get(key)
+ if raw is None:
+ return None
+ try:
+ payload = json.loads(raw)
+ except json.JSONDecodeError:
+ return None
+ if not isinstance(payload, list):
+ return None
+ records: list[PolicyRecord] = []
+ for item in payload:
+ if not isinstance(item, dict):
+ continue
+ records.append(
+ PolicyRecord(
+ id=int(item["id"]),
+ definition=PolicyDefinition.from_dict(item["definition"]),
+ )
+ )
+ return records
+
+ def set(self, tenant_key: str, policy_version: int, policies: list[PolicyRecord]) -> None:
+ key = self._cache_key(tenant_key, policy_version)
+ payload = [
+ {
+ "id": policy.id,
+ "definition": {
+ "action": policy.definition.action,
+ "effect": policy.definition.effect,
+ "priority": policy.definition.priority,
+ "conditions": policy.definition.conditions,
+ "policy_id": policy.definition.policy_id,
+ },
+ }
+ for policy in policies
+ ]
+ self._backend.set(key, json.dumps(payload, separators=(",", ":")))
+
+ def invalidate(self, tenant_key: str) -> None:
+ self._backend.incr(self._namespace_key(tenant_key))
+
+ def _cache_key(self, tenant_key: str, policy_version: int) -> str:
+ namespace = self._backend.get(self._namespace_key(tenant_key)) or "0"
+ return f"pol:{tenant_key}:{namespace}:{policy_version}"
+
+ def _namespace_key(self, tenant_key: str) -> str:
+ return f"polns:{tenant_key}"
+
+
+def build_policy_cache(redis_client: Any | None) -> RedisBackedPolicyCache:
+ """Build the default policy cache."""
+
+ return RedisBackedPolicyCache(build_cache_backend(redis_client))
diff --git a/keynetra/infrastructure/cache/policy_distribution.py b/keynetra/infrastructure/cache/policy_distribution.py
new file mode 100644
index 0000000..8bd04a8
--- /dev/null
+++ b/keynetra/infrastructure/cache/policy_distribution.py
@@ -0,0 +1,40 @@
+from __future__ import annotations
+
+import json
+from dataclasses import dataclass
+
+from keynetra.config.redis_client import get_redis
+from keynetra.config.settings import Settings
+from keynetra.services.interfaces import PolicyEventPublisher
+
+
+@dataclass(frozen=True)
+class PolicyUpdateEvent:
+ tenant_key: str
+ policy_version: int
+
+ def to_json(self) -> str:
+ return json.dumps({"tenant_key": self.tenant_key, "policy_version": self.policy_version})
+
+
+def publish_policy_update(settings: Settings, event: PolicyUpdateEvent) -> None:
+ r = get_redis()
+ if r is None:
+ return
+ try:
+ r.publish(settings.policy_events_channel, event.to_json())
+ except Exception:
+ return
+
+
+class RedisPolicyEventPublisher(PolicyEventPublisher):
+ """Publish policy update notifications to Redis when available."""
+
+ def __init__(self, settings: Settings) -> None:
+ self._settings = settings
+
+ def publish_policy_update(self, *, tenant_key: str, policy_version: int) -> None:
+ publish_policy_update(
+ self._settings,
+ PolicyUpdateEvent(tenant_key=tenant_key, policy_version=policy_version),
+ )
diff --git a/keynetra/infrastructure/cache/relationship_cache.py b/keynetra/infrastructure/cache/relationship_cache.py
new file mode 100644
index 0000000..0ef5a68
--- /dev/null
+++ b/keynetra/infrastructure/cache/relationship_cache.py
@@ -0,0 +1,75 @@
+"""Relationship cache adapter."""
+
+from __future__ import annotations
+
+import json
+from typing import Any
+
+from keynetra.infrastructure.cache.backends import CacheBackend, build_cache_backend
+from keynetra.services.interfaces import RelationshipRecord
+
+
+class RedisBackedRelationshipCache:
+ """Caches relationship lists per tenant subject."""
+
+ def __init__(self, backend: CacheBackend, ttl_seconds: int = 30) -> None:
+ self._backend = backend
+ self._ttl_seconds = ttl_seconds
+
+ def get(
+ self, *, tenant_id: int, subject_type: str, subject_id: str
+ ) -> list[RelationshipRecord] | None:
+ raw = self._backend.get(
+ self._key(tenant_id=tenant_id, subject_type=subject_type, subject_id=subject_id)
+ )
+ if raw is None:
+ return None
+ try:
+ payload = json.loads(raw)
+ except json.JSONDecodeError:
+ return None
+ if not isinstance(payload, list):
+ return None
+ relationships: list[RelationshipRecord] = []
+ for item in payload:
+ if not isinstance(item, dict):
+ continue
+ relationships.append(
+ RelationshipRecord(
+ subject_type=str(item.get("subject_type", "")),
+ subject_id=str(item.get("subject_id", "")),
+ relation=str(item.get("relation", "")),
+ object_type=str(item.get("object_type", "")),
+ object_id=str(item.get("object_id", "")),
+ )
+ )
+ return relationships
+
+ def set(
+ self,
+ *,
+ tenant_id: int,
+ subject_type: str,
+ subject_id: str,
+ relationships: list[RelationshipRecord],
+ ) -> None:
+ payload = [relationship.to_dict() for relationship in relationships]
+ self._backend.set(
+ self._key(tenant_id=tenant_id, subject_type=subject_type, subject_id=subject_id),
+ json.dumps(payload, separators=(",", ":")),
+ self._ttl_seconds,
+ )
+
+ def invalidate(self, *, tenant_id: int, subject_type: str, subject_id: str) -> None:
+ self._backend.delete(
+ self._key(tenant_id=tenant_id, subject_type=subject_type, subject_id=subject_id)
+ )
+
+ def _key(self, *, tenant_id: int, subject_type: str, subject_id: str) -> str:
+ return f"rel:{tenant_id}:{subject_type}:{subject_id}"
+
+
+def build_relationship_cache(redis_client: Any | None) -> RedisBackedRelationshipCache:
+ """Build the default relationship cache."""
+
+ return RedisBackedRelationshipCache(build_cache_backend(redis_client))
diff --git a/keynetra/infrastructure/cache/user_cache.py b/keynetra/infrastructure/cache/user_cache.py
new file mode 100644
index 0000000..55760d7
--- /dev/null
+++ b/keynetra/infrastructure/cache/user_cache.py
@@ -0,0 +1,33 @@
+from __future__ import annotations
+
+import json
+from typing import Any
+
+from keynetra.config.redis_client import get_redis
+
+
+def get_cached_user_context(key: str) -> dict[str, Any] | None:
+ r = get_redis()
+ if r is None:
+ return None
+ try:
+ raw = r.get(key)
+ except Exception:
+ return None
+ if not raw:
+ return None
+ try:
+ decoded = json.loads(raw)
+ except Exception:
+ return None
+ return decoded if isinstance(decoded, dict) else None
+
+
+def set_cached_user_context(key: str, ctx: dict[str, Any], ttl_seconds: int) -> None:
+ r = get_redis()
+ if r is None:
+ return
+ try:
+ r.setex(key, max(1, ttl_seconds), json.dumps(ctx, separators=(",", ":"), sort_keys=True))
+ except Exception:
+ return
diff --git a/keynetra/infrastructure/logging.py b/keynetra/infrastructure/logging.py
new file mode 100644
index 0000000..ba8e684
--- /dev/null
+++ b/keynetra/infrastructure/logging.py
@@ -0,0 +1,73 @@
+"""Structured logging helpers for core."""
+
+from __future__ import annotations
+
+import json
+import logging
+import os
+from datetime import datetime, timezone
+from typing import Any
+
+
+class JsonLogFormatter(logging.Formatter):
+ def format(self, record: logging.LogRecord) -> str:
+ payload: dict[str, Any]
+ if isinstance(record.msg, dict):
+ payload = dict(record.msg)
+ else:
+ payload = {"message": record.getMessage()}
+ payload.setdefault("timestamp", datetime.now(timezone.utc).isoformat())
+ payload.setdefault("level", record.levelname)
+ payload.setdefault("logger", record.name)
+ return json.dumps(payload, default=str)
+
+
+def configure_json_logging() -> None:
+ mode = os.getenv("KEYNETRA_LOG_FORMAT", "json").strip().lower()
+ if mode == "rich":
+ configure_rich_logging()
+ return
+ root = logging.getLogger()
+ if getattr(root, "_keynetra_json_logging", False):
+ return
+ handler = logging.StreamHandler()
+ handler.setFormatter(JsonLogFormatter())
+ root.handlers = [handler]
+ root.setLevel(logging.INFO)
+ root._keynetra_json_logging = True # type: ignore[attr-defined]
+
+
+def configure_rich_logging() -> None:
+ root = logging.getLogger()
+ if getattr(root, "_keynetra_rich_logging", False):
+ return
+ try:
+ from rich.console import Console
+ from rich.logging import RichHandler
+ except ModuleNotFoundError:
+ handler = logging.StreamHandler()
+ handler.setFormatter(JsonLogFormatter())
+ root.handlers = [handler]
+ root.setLevel(logging.INFO)
+ root._keynetra_json_logging = True # type: ignore[attr-defined]
+ return
+
+ force_color = os.getenv("KEYNETRA_FORCE_COLOR", "1").strip().lower() not in {"0", "false", "no"}
+ console = Console(
+ force_terminal=force_color, color_system="truecolor" if force_color else "auto"
+ )
+ handler = RichHandler(
+ rich_tracebacks=True,
+ markup=True,
+ show_path=False,
+ console=console,
+ )
+ formatter = logging.Formatter("%(message)s")
+ handler.setFormatter(formatter)
+ root.handlers = [handler]
+ root.setLevel(logging.INFO)
+ root._keynetra_rich_logging = True # type: ignore[attr-defined]
+
+
+def log_event(logger: logging.Logger, *, event: str, **fields: Any) -> None:
+ logger.info({"event": event, **fields})
diff --git a/keynetra/infrastructure/metrics.py b/keynetra/infrastructure/metrics.py
new file mode 100644
index 0000000..68cc11b
--- /dev/null
+++ b/keynetra/infrastructure/metrics.py
@@ -0,0 +1,15 @@
+"""Core metrics hooks."""
+
+from __future__ import annotations
+
+from keynetra.observability.metrics import ( # noqa: F401
+ observe_decision_latency,
+ record_api_error,
+ record_cache_event,
+)
+
+__all__ = [
+ "observe_decision_latency",
+ "record_api_error",
+ "record_cache_event",
+]
diff --git a/keynetra/infrastructure/repositories/__init__.py b/keynetra/infrastructure/repositories/__init__.py
new file mode 100644
index 0000000..5c03db0
--- /dev/null
+++ b/keynetra/infrastructure/repositories/__init__.py
@@ -0,0 +1,17 @@
+"""Infrastructure repository implementations."""
+
+from .audit import SqlAuditRepository
+from .auth_models import SqlAuthModelRepository
+from .policies import SqlPolicyRepository
+from .relationships import SqlRelationshipRepository
+from .tenants import SqlTenantRepository
+from .users import SqlUserRepository
+
+__all__ = [
+ "SqlAuditRepository",
+ "SqlAuthModelRepository",
+ "SqlPolicyRepository",
+ "SqlRelationshipRepository",
+ "SqlTenantRepository",
+ "SqlUserRepository",
+]
diff --git a/keynetra/infrastructure/repositories/acl.py b/keynetra/infrastructure/repositories/acl.py
new file mode 100644
index 0000000..70600d0
--- /dev/null
+++ b/keynetra/infrastructure/repositories/acl.py
@@ -0,0 +1,112 @@
+"""ACL persistence implementation."""
+
+from __future__ import annotations
+
+from sqlalchemy import delete, select
+from sqlalchemy.orm import Session
+
+from keynetra.domain.models.acl import ResourceACL
+from keynetra.services.interfaces import ACLRecord
+
+
+class SqlACLRepository:
+ """SQLAlchemy-backed ACL repository."""
+
+ def __init__(self, session: Session) -> None:
+ self._session = session
+
+ def create_acl_entry(
+ self,
+ *,
+ tenant_id: int,
+ subject_type: str,
+ subject_id: str,
+ resource_type: str,
+ resource_id: str,
+ action: str,
+ effect: str,
+ ) -> int:
+ row = ResourceACL(
+ tenant_id=tenant_id,
+ subject_type=subject_type,
+ subject_id=subject_id,
+ resource_type=resource_type,
+ resource_id=resource_id,
+ action=action,
+ effect=effect,
+ )
+ self._session.add(row)
+ self._session.commit()
+ self._session.refresh(row)
+ return row.id
+
+ def list_resource_acl(
+ self, *, tenant_id: int, resource_type: str, resource_id: str
+ ) -> list[ACLRecord]:
+ rows = (
+ self._session.execute(
+ select(ResourceACL)
+ .where(ResourceACL.tenant_id == tenant_id)
+ .where(ResourceACL.resource_type == resource_type)
+ .where(ResourceACL.resource_id == resource_id)
+ .order_by(ResourceACL.action.asc(), ResourceACL.id.asc())
+ )
+ .scalars()
+ .all()
+ )
+ return [self._to_record(row) for row in rows]
+
+ def get_acl_entry(self, *, tenant_id: int, acl_id: int) -> ACLRecord | None:
+ row = (
+ self._session.execute(
+ select(ResourceACL)
+ .where(ResourceACL.tenant_id == tenant_id)
+ .where(ResourceACL.id == acl_id)
+ )
+ .scalars()
+ .first()
+ )
+ return None if row is None else self._to_record(row)
+
+ def find_matching_acl(
+ self,
+ *,
+ tenant_id: int,
+ resource_type: str,
+ resource_id: str,
+ action: str,
+ ) -> list[ACLRecord]:
+ rows = (
+ self._session.execute(
+ select(ResourceACL)
+ .where(ResourceACL.tenant_id == tenant_id)
+ .where(ResourceACL.resource_type == resource_type)
+ .where(ResourceACL.resource_id == resource_id)
+ .where(ResourceACL.action == action)
+ .order_by(ResourceACL.id.asc())
+ )
+ .scalars()
+ .all()
+ )
+ return [self._to_record(row) for row in rows]
+
+ def delete_acl_entry(self, *, tenant_id: int, acl_id: int) -> None:
+ self._session.execute(
+ delete(ResourceACL)
+ .where(ResourceACL.tenant_id == tenant_id)
+ .where(ResourceACL.id == acl_id)
+ )
+ self._session.commit()
+
+ def _to_record(self, row: ResourceACL) -> ACLRecord:
+ return ACLRecord(
+ id=row.id,
+ tenant_id=row.tenant_id,
+ subject_type=row.subject_type,
+ subject_id=row.subject_id,
+ resource_type=row.resource_type,
+ resource_id=row.resource_id,
+ action=row.action,
+ effect=row.effect,
+ created_at=row.created_at,
+ )
diff --git a/keynetra/infrastructure/repositories/audit.py b/keynetra/infrastructure/repositories/audit.py
new file mode 100644
index 0000000..d56201a
--- /dev/null
+++ b/keynetra/infrastructure/repositories/audit.py
@@ -0,0 +1,122 @@
+"""Audit persistence implementation."""
+
+from __future__ import annotations
+
+from datetime import datetime
+
+from sqlalchemy import String, and_, desc, func, or_, select
+from sqlalchemy.orm import Session
+
+from keynetra.api.pagination import encode_cursor
+from keynetra.domain.models.audit import AuditLog
+from keynetra.engine.keynetra_engine import AuthorizationDecision, AuthorizationInput
+from keynetra.services.interfaces import AuditListItem
+
+
+class SqlAuditRepository:
+ """SQLAlchemy-backed audit writer."""
+
+ def __init__(self, session: Session) -> None:
+ self._session = session
+
+ def write(
+ self,
+ *,
+ tenant_id: int,
+ principal_type: str,
+ principal_id: str,
+ authorization_input: AuthorizationInput,
+ decision: AuthorizationDecision,
+ ) -> None:
+ row = AuditLog(
+ tenant_id=tenant_id,
+ principal_type=principal_type,
+ principal_id=principal_id,
+ user=authorization_input.user,
+ action=authorization_input.action,
+ resource=authorization_input.resource,
+ decision=decision.decision.upper(),
+ matched_policies=list(decision.matched_policies),
+ reason=decision.reason,
+ evaluated_rules=[step.to_dict() for step in decision.explain_trace],
+ failed_conditions=list(decision.failed_conditions),
+ )
+ self._session.add(row)
+ self._session.commit()
+
+ def list_page(
+ self,
+ *,
+ tenant_id: int,
+ limit: int,
+ cursor: dict | None,
+ user_id: str | None,
+ resource_id: str | None,
+ decision: str | None,
+ start_time: datetime | None,
+ end_time: datetime | None,
+ ) -> tuple[list[AuditListItem], str | None]:
+ query = select(AuditLog).where(AuditLog.tenant_id == tenant_id)
+ if user_id:
+ query = query.where(self._json_field(AuditLog.user, "id") == user_id)
+ if resource_id:
+ query = query.where(
+ or_(
+ self._json_field(AuditLog.resource, "id") == resource_id,
+ self._json_field(AuditLog.resource, "resource_id") == resource_id,
+ )
+ )
+ if decision:
+ query = query.where(AuditLog.decision == decision.upper())
+ if start_time:
+ query = query.where(AuditLog.created_at >= start_time)
+ if end_time:
+ query = query.where(AuditLog.created_at <= end_time)
+ if cursor is not None:
+ cursor_created_at = datetime.fromisoformat(str(cursor["created_at"]))
+ cursor_id = int(cursor["id"])
+ query = query.where(
+ or_(
+ AuditLog.created_at < cursor_created_at,
+ and_(AuditLog.created_at == cursor_created_at, AuditLog.id < cursor_id),
+ )
+ )
+
+ rows = (
+ self._session.execute(
+ query.order_by(desc(AuditLog.created_at), desc(AuditLog.id)).limit(limit + 1)
+ )
+ .scalars()
+ .all()
+ )
+ has_next = len(rows) > limit
+ page = rows[:limit]
+ next_cursor = (
+ encode_cursor({"created_at": page[-1].created_at.isoformat(), "id": page[-1].id})
+ if has_next and page
+ else None
+ )
+ return [self._to_item(row) for row in page], next_cursor
+
+ def _json_field(self, column, key: str):
+ dialect = self._session.bind.dialect.name if self._session.bind is not None else ""
+ if dialect == "postgresql":
+ return column[key].as_string()
+ return func.json_extract(column, f"$.{key}", type_=String)
+
+ @staticmethod
+ def _to_item(row: AuditLog) -> AuditListItem:
+ return AuditListItem(
+ id=row.id,
+ principal_type=row.principal_type,
+ principal_id=row.principal_id,
+ user=row.user,
+ action=row.action,
+ resource=row.resource,
+ decision=row.decision,
+ matched_policies=list(row.matched_policies),
+ reason=row.reason,
+ evaluated_rules=list(row.evaluated_rules),
+ failed_conditions=list(row.failed_conditions),
+ created_at=row.created_at,
+ )
diff --git a/keynetra/infrastructure/repositories/auth_models.py b/keynetra/infrastructure/repositories/auth_models.py
new file mode 100644
index 0000000..d28f403
--- /dev/null
+++ b/keynetra/infrastructure/repositories/auth_models.py
@@ -0,0 +1,68 @@
+"""Authorization model persistence implementation."""
+
+from __future__ import annotations
+
+from sqlalchemy import select
+from sqlalchemy.orm import Session
+
+from keynetra.domain.models.auth_model import AuthorizationModel
+from keynetra.services.interfaces import AuthModelRecord
+
+
+class SqlAuthModelRepository:
+ """SQLAlchemy-backed authorization model repository."""
+
+ def __init__(self, session: Session) -> None:
+ self._session = session
+
+ def get_model(self, *, tenant_id: int) -> AuthModelRecord | None:
+ row = (
+ self._session.execute(
+ select(AuthorizationModel).where(AuthorizationModel.tenant_id == tenant_id)
+ )
+ .scalars()
+ .first()
+ )
+ return None if row is None else self._to_record(row)
+
+ def upsert_model(
+ self,
+ *,
+ tenant_id: int,
+ schema_text: str,
+ schema_json: dict,
+ compiled_json: dict,
+ ) -> AuthModelRecord:
+ row = (
+ self._session.execute(
+ select(AuthorizationModel).where(AuthorizationModel.tenant_id == tenant_id)
+ )
+ .scalars()
+ .first()
+ )
+ if row is None:
+ row = AuthorizationModel(
+ tenant_id=tenant_id,
+ schema_text=schema_text,
+ schema_json=schema_json,
+ compiled_json=compiled_json,
+ )
+ self._session.add(row)
+ else:
+ row.schema_text = schema_text
+ row.schema_json = schema_json
+ row.compiled_json = compiled_json
+ self._session.commit()
+ self._session.refresh(row)
+ return self._to_record(row)
+
+ def _to_record(self, row: AuthorizationModel) -> AuthModelRecord:
+ return AuthModelRecord(
+ id=row.id,
+ tenant_id=row.tenant_id,
+ schema_text=row.schema_text,
+ schema_json=dict(row.schema_json or {}),
+ compiled_json=dict(row.compiled_json or {}),
+ created_at=row.created_at,
+ updated_at=row.updated_at,
+ )
diff --git a/keynetra/infrastructure/repositories/idempotency.py b/keynetra/infrastructure/repositories/idempotency.py
new file mode 100644
index 0000000..b8998aa
--- /dev/null
+++ b/keynetra/infrastructure/repositories/idempotency.py
@@ -0,0 +1,86 @@
+"""Persistence for API idempotency records."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from datetime import UTC, datetime
+
+from sqlalchemy import select
+from sqlalchemy.exc import IntegrityError
+from sqlalchemy.orm import Session
+
+from keynetra.domain.models.idempotency import IdempotencyRecord
+
+
+@dataclass(frozen=True)
+class IdempotencyStartResult:
+ """Result of claiming or replaying an idempotent request."""
+
+ outcome: str
+ record_id: int | None = None
+ status_code: int | None = None
+ response_body: str | None = None
+ content_type: str | None = None
+
+
+class SqlIdempotencyRepository:
+ """SQLAlchemy-backed storage for idempotent write requests."""
+
+ def __init__(self, session: Session) -> None:
+ self._session = session
+
+ def start(
+ self, *, scope: str, idempotency_key: str, request_hash: str
+ ) -> IdempotencyStartResult:
+ record = IdempotencyRecord(
+ scope=scope, idempotency_key=idempotency_key, request_hash=request_hash
+ )
+ self._session.add(record)
+ try:
+ self._session.commit()
+ self._session.refresh(record)
+ return IdempotencyStartResult(outcome="started", record_id=record.id)
+ except IntegrityError:
+ self._session.rollback()
+ existing = self._get(scope=scope, idempotency_key=idempotency_key)
+ if existing is None:
+ raise
+ if existing.request_hash != request_hash:
+ return IdempotencyStartResult(outcome="mismatch")
+ if existing.response_status_code is None or existing.response_body is None:
+ return IdempotencyStartResult(outcome="pending")
+ return IdempotencyStartResult(
+ outcome="replay",
+ record_id=existing.id,
+ status_code=existing.response_status_code,
+ response_body=existing.response_body,
+ content_type=existing.response_content_type,
+ )
+
+ def complete(
+ self,
+ *,
+ record_id: int,
+ status_code: int,
+ response_body: str,
+ content_type: str | None,
+ ) -> None:
+ record = self._session.get(IdempotencyRecord, record_id)
+ if record is None:
+ return
+ record.response_status_code = status_code
+ record.response_body = response_body
+ record.response_content_type = content_type
+ record.completed_at = datetime.now(UTC)
+ self._session.commit()
+
+ def _get(self, *, scope: str, idempotency_key: str) -> IdempotencyRecord | None:
+ return (
+ self._session.execute(
+ select(IdempotencyRecord)
+ .where(IdempotencyRecord.scope == scope)
+ .where(IdempotencyRecord.idempotency_key == idempotency_key)
+ )
+ .scalars()
+ .first()
+ )
diff --git a/keynetra/infrastructure/repositories/policies.py b/keynetra/infrastructure/repositories/policies.py
new file mode 100644
index 0000000..030fcd9
--- /dev/null
+++ b/keynetra/infrastructure/repositories/policies.py
@@ -0,0 +1,205 @@
+"""Policy persistence implementation."""
+
+from __future__ import annotations
+
+from typing import Any
+
+from sqlalchemy import and_, delete, or_, select
+from sqlalchemy.orm import Session
+
+from keynetra.api.pagination import encode_cursor
+from keynetra.domain.models.policy_versioning import Policy, PolicyVersion
+from keynetra.engine.keynetra_engine import PolicyDefinition
+from keynetra.services.interfaces import PolicyListItem, PolicyMutationResult, PolicyRecord
+
+
+class SqlPolicyRepository:
+ """SQLAlchemy-backed policy repository."""
+
+ def __init__(self, session: Session) -> None:
+ self._session = session
+
+ def list_current_policies(self, *, tenant_id: int) -> list[PolicyRecord]:
+ rows = self._current_policy_rows(tenant_id=tenant_id)
+ return [
+ PolicyRecord(
+ id=version.id,
+ definition=PolicyDefinition(
+ action=version.action,
+ effect="allow" if version.effect == "allow" else "deny",
+ priority=version.priority,
+ conditions=dict(version.conditions or {}),
+ policy_id=f"{policy.policy_key}:v{version.version}",
+ ),
+ )
+ for version, policy in rows
+ ]
+
+ def list_current_policy_views(self, *, tenant_id: int) -> list[PolicyListItem]:
+ rows = self._current_policy_rows(tenant_id=tenant_id)
+ return [
+ PolicyListItem(
+ id=version.id,
+ action=version.action,
+ effect=version.effect,
+ priority=version.priority,
+ conditions=(version.conditions or {})
+ | {"policy_key": policy.policy_key, "version": version.version},
+ )
+ for version, policy in rows
+ ]
+
+ def list_current_policy_page(
+ self,
+ *,
+ tenant_id: int,
+ limit: int,
+ cursor: dict[str, Any] | None,
+ ) -> tuple[list[PolicyListItem], str | None]:
+ query = (
+ select(PolicyVersion, Policy)
+ .join(Policy, Policy.id == PolicyVersion.policy_id)
+ .where(Policy.tenant_id == tenant_id)
+ .where(PolicyVersion.tenant_id == tenant_id)
+ .where(PolicyVersion.version == Policy.current_version)
+ )
+ if cursor is not None:
+ query = query.where(
+ or_(
+ PolicyVersion.priority > int(cursor["priority"]),
+ and_(
+ PolicyVersion.priority == int(cursor["priority"]),
+ PolicyVersion.id > int(cursor["id"]),
+ ),
+ )
+ )
+ rows = self._session.execute(
+ query.order_by(PolicyVersion.priority.asc(), PolicyVersion.id.asc()).limit(limit + 1)
+ ).all()
+ has_next = len(rows) > limit
+ page_rows = rows[:limit]
+ items = [
+ PolicyListItem(
+ id=version.id,
+ action=version.action,
+ effect=version.effect,
+ priority=version.priority,
+ conditions=(version.conditions or {})
+ | {"policy_key": policy.policy_key, "version": version.version},
+ )
+ for version, policy in page_rows
+ ]
+ next_cursor = None
+ if has_next and page_rows:
+ last_version, _ = page_rows[-1]
+ next_cursor = encode_cursor({"priority": last_version.priority, "id": last_version.id})
+ return items, next_cursor
+
+ def create_policy_version(
+ self,
+ *,
+ tenant_id: int,
+ policy_key: str,
+ action: str,
+ effect: str,
+ priority: int,
+ conditions: dict[str, Any],
+ created_by: str | None,
+ ) -> PolicyMutationResult:
+ policy = (
+ self._session.execute(
+ select(Policy)
+ .where(Policy.tenant_id == tenant_id)
+ .where(Policy.policy_key == policy_key)
+ )
+ .scalars()
+ .first()
+ )
+ if policy is None:
+ policy = Policy(tenant_id=tenant_id, policy_key=policy_key, current_version=1)
+ self._session.add(policy)
+ self._session.flush()
+ next_version = 1
+ else:
+ next_version = int(policy.current_version) + 1
+ policy.current_version = next_version
+
+ policy_version = PolicyVersion(
+ tenant_id=tenant_id,
+ policy_id=policy.id,
+ version=next_version,
+ action=action,
+ effect=effect,
+ priority=priority,
+ conditions=conditions,
+ created_by=created_by,
+ )
+ self._session.add(policy_version)
+ self._session.commit()
+ self._session.refresh(policy_version)
+ return PolicyMutationResult(
+ id=policy_version.id,
+ action=policy_version.action,
+ effect=policy_version.effect,
+ priority=policy_version.priority,
+ conditions=dict(policy_version.conditions or {}),
+ )
+
+ def rollback_policy(self, *, tenant_id: int, policy_key: str, version: int) -> tuple[str, int]:
+ policy = (
+ self._session.execute(
+ select(Policy)
+ .where(Policy.tenant_id == tenant_id)
+ .where(Policy.policy_key == policy_key)
+ )
+ .scalars()
+ .first()
+ )
+ if policy is None:
+ raise ValueError("policy not found")
+ existing = (
+ self._session.execute(
+ select(PolicyVersion)
+ .where(PolicyVersion.tenant_id == tenant_id)
+ .where(PolicyVersion.policy_id == policy.id)
+ .where(PolicyVersion.version == version)
+ )
+ .scalars()
+ .first()
+ )
+ if existing is None:
+ raise ValueError("version not found")
+ policy.current_version = version
+ self._session.commit()
+ self._session.refresh(policy)
+ return policy.policy_key, int(policy.current_version)
+
+ def delete_policy(self, *, tenant_id: int, policy_key: str) -> None:
+ policy = (
+ self._session.execute(
+ select(Policy)
+ .where(Policy.tenant_id == tenant_id)
+ .where(Policy.policy_key == policy_key)
+ )
+ .scalars()
+ .first()
+ )
+ if policy is None:
+ return
+ self._session.execute(
+ delete(PolicyVersion)
+ .where(PolicyVersion.tenant_id == tenant_id)
+ .where(PolicyVersion.policy_id == policy.id)
+ )
+ self._session.execute(delete(Policy).where(Policy.id == policy.id))
+ self._session.commit()
+
+ def _current_policy_rows(self, *, tenant_id: int) -> list[tuple[PolicyVersion, Policy]]:
+ return self._session.execute(
+ select(PolicyVersion, Policy)
+ .join(Policy, Policy.id == PolicyVersion.policy_id)
+ .where(Policy.tenant_id == tenant_id)
+ .where(PolicyVersion.tenant_id == tenant_id)
+ .where(PolicyVersion.version == Policy.current_version)
+ .order_by(PolicyVersion.priority.asc(), PolicyVersion.id.asc())
+ ).all()
diff --git a/keynetra/infrastructure/repositories/relationships.py b/keynetra/infrastructure/repositories/relationships.py
new file mode 100644
index 0000000..835ed2c
--- /dev/null
+++ b/keynetra/infrastructure/repositories/relationships.py
@@ -0,0 +1,173 @@
+"""Relationship persistence implementation."""
+
+from __future__ import annotations
+
+from sqlalchemy import and_, or_, select
+from sqlalchemy.orm import Session
+
+from keynetra.api.pagination import encode_cursor
+from keynetra.domain.models.relationship import Relationship
+from keynetra.services.interfaces import RelationshipRecord
+
+
+class SqlRelationshipRepository:
+ """SQLAlchemy-backed relationship repository."""
+
+ def __init__(self, session: Session) -> None:
+ self._session = session
+
+ def list_for_subject(
+ self, *, tenant_id: int, subject_type: str, subject_id: str
+ ) -> list[RelationshipRecord]:
+ rows = (
+ self._session.execute(
+ select(Relationship)
+ .where(Relationship.tenant_id == tenant_id)
+ .where(Relationship.subject_type == subject_type)
+ .where(Relationship.subject_id == subject_id)
+ .order_by(
+ Relationship.relation.asc(),
+ Relationship.object_type.asc(),
+ Relationship.object_id.asc(),
+ Relationship.id.asc(),
+ )
+ )
+ .scalars()
+ .all()
+ )
+ return [
+ RelationshipRecord(
+ subject_type=row.subject_type,
+ subject_id=row.subject_id,
+ relation=row.relation,
+ object_type=row.object_type,
+ object_id=row.object_id,
+ )
+ for row in rows
+ ]
+
+ def list_for_subject_page(
+ self,
+ *,
+ tenant_id: int,
+ subject_type: str,
+ subject_id: str,
+ limit: int,
+ cursor: dict[str, object] | None,
+ ) -> tuple[list[RelationshipRecord], str | None]:
+ query = (
+ select(Relationship)
+ .where(Relationship.tenant_id == tenant_id)
+ .where(Relationship.subject_type == subject_type)
+ .where(Relationship.subject_id == subject_id)
+ )
+ if cursor is not None:
+ query = query.where(
+ or_(
+ Relationship.relation > str(cursor["relation"]),
+ and_(
+ Relationship.relation == str(cursor["relation"]),
+ Relationship.object_type > str(cursor["object_type"]),
+ ),
+ and_(
+ Relationship.relation == str(cursor["relation"]),
+ Relationship.object_type == str(cursor["object_type"]),
+ Relationship.object_id > str(cursor["object_id"]),
+ ),
+ and_(
+ Relationship.relation == str(cursor["relation"]),
+ Relationship.object_type == str(cursor["object_type"]),
+ Relationship.object_id == str(cursor["object_id"]),
+ Relationship.id > int(cursor["id"]),
+ ),
+ )
+ )
+ rows = (
+ self._session.execute(
+ query.order_by(
+ Relationship.relation.asc(),
+ Relationship.object_type.asc(),
+ Relationship.object_id.asc(),
+ Relationship.id.asc(),
+ ).limit(limit + 1)
+ )
+ .scalars()
+ .all()
+ )
+ has_next = len(rows) > limit
+ page_rows = rows[:limit]
+ items = [
+ RelationshipRecord(
+ subject_type=row.subject_type,
+ subject_id=row.subject_id,
+ relation=row.relation,
+ object_type=row.object_type,
+ object_id=row.object_id,
+ )
+ for row in page_rows
+ ]
+ next_cursor = None
+ if has_next and page_rows:
+ last = page_rows[-1]
+ next_cursor = encode_cursor(
+ {
+ "relation": last.relation,
+ "object_type": last.object_type,
+ "object_id": last.object_id,
+ "id": last.id,
+ }
+ )
+ return items, next_cursor
+
+ def list_for_object(
+ self, *, tenant_id: int, object_type: str, object_id: str
+ ) -> list[RelationshipRecord]:
+ rows = (
+ self._session.execute(
+ select(Relationship)
+ .where(Relationship.tenant_id == tenant_id)
+ .where(Relationship.object_type == object_type)
+ .where(Relationship.object_id == object_id)
+ .order_by(
+ Relationship.subject_type.asc(),
+ Relationship.subject_id.asc(),
+ Relationship.relation.asc(),
+ Relationship.id.asc(),
+ )
+ )
+ .scalars()
+ .all()
+ )
+ return [
+ RelationshipRecord(
+ subject_type=row.subject_type,
+ subject_id=row.subject_id,
+ relation=row.relation,
+ object_type=row.object_type,
+ object_id=row.object_id,
+ )
+ for row in rows
+ ]
+
+ def create(
+ self,
+ *,
+ tenant_id: int,
+ subject_type: str,
+ subject_id: str,
+ relation: str,
+ object_type: str,
+ object_id: str,
+ ) -> int:
+ row = Relationship(
+ tenant_id=tenant_id,
+ subject_type=subject_type,
+ subject_id=subject_id,
+ relation=relation,
+ object_type=object_type,
+ object_id=object_id,
+ )
+ self._session.add(row)
+ self._session.commit()
+ self._session.refresh(row)
+ return row.id
diff --git a/keynetra/infrastructure/repositories/tenants.py b/keynetra/infrastructure/repositories/tenants.py
new file mode 100644
index 0000000..2157ebf
--- /dev/null
+++ b/keynetra/infrastructure/repositories/tenants.py
@@ -0,0 +1,68 @@
+"""Tenant persistence implementation.
+
+This module owns database access. Services should depend on the
+``TenantRepository`` protocol instead of SQLAlchemy details.
+"""
+
+from __future__ import annotations
+
+from sqlalchemy import select
+from sqlalchemy.orm import Session
+
+from keynetra.domain.models.tenant import Tenant
+from keynetra.services.interfaces import TenantRecord
+
+
+class SqlTenantRepository:
+ """SQLAlchemy-backed tenant repository."""
+
+ def __init__(self, session: Session) -> None:
+ self._session = session
+
+ def get_by_id(self, tenant_id: int) -> TenantRecord | None:
+ tenant = (
+ self._session.execute(select(Tenant).where(Tenant.id == tenant_id)).scalars().first()
+ )
+ if tenant is None:
+ return None
+ return self._to_record(tenant)
+
+ def get_or_create(self, tenant_key: str) -> TenantRecord:
+ existing = (
+ self._session.execute(select(Tenant).where(Tenant.tenant_key == tenant_key))
+ .scalars()
+ .first()
+ )
+ if existing is not None:
+ return self._to_record(existing)
+ tenant = Tenant(tenant_key=tenant_key)
+ self._session.add(tenant)
+ self._session.commit()
+ self._session.refresh(tenant)
+ return self._to_record(tenant)
+
+ def bump_policy_version(self, tenant: TenantRecord) -> TenantRecord:
+ row = self._session.execute(select(Tenant).where(Tenant.id == tenant.id)).scalars().first()
+ if row is None:
+ raise ValueError("tenant not found")
+ row.policy_version = int(row.policy_version) + 1
+ self._session.commit()
+ self._session.refresh(row)
+ return self._to_record(row)
+
+ def bump_revision(self, tenant: TenantRecord) -> TenantRecord:
+ row = self._session.execute(select(Tenant).where(Tenant.id == tenant.id)).scalars().first()
+ if row is None:
+ raise ValueError("tenant not found")
+ row.authorization_revision = int(row.authorization_revision) + 1
+ self._session.commit()
+ self._session.refresh(row)
+ return self._to_record(row)
+
+ def _to_record(self, tenant: Tenant) -> TenantRecord:
+ return TenantRecord(
+ id=tenant.id,
+ tenant_key=tenant.tenant_key,
+ policy_version=int(tenant.policy_version),
+ revision=int(getattr(tenant, "authorization_revision", 1)),
+ )
diff --git a/keynetra/infrastructure/repositories/users.py b/keynetra/infrastructure/repositories/users.py
new file mode 100644
index 0000000..cc2d772
--- /dev/null
+++ b/keynetra/infrastructure/repositories/users.py
@@ -0,0 +1,47 @@
+"""User persistence implementation."""
+
+from __future__ import annotations
+
+from typing import Any
+
+from sqlalchemy import select
+from sqlalchemy.orm import Session, joinedload
+
+from keynetra.domain.models.rbac import Role, User
+
+
+class SqlUserRepository:
+ """SQLAlchemy-backed user context loader."""
+
+ def __init__(self, session: Session) -> None:
+ self._session = session
+
+ def get_user_context(self, user_id: int) -> dict[str, Any] | None:
+ user = (
+ self._session.execute(
+ select(User)
+ .where(User.id == user_id)
+ .options(joinedload(User.roles).joinedload(Role.permissions))
+ )
+ .scalars()
+ .first()
+ )
+ if user is None:
+ return None
+ permissions: set[str] = set()
+ roles: set[str] = set()
+ for role in user.roles:
+ roles.add(role.name)
+ for permission in role.permissions:
+ permissions.add(permission.action)
+ primary_role = next(iter(sorted(roles)), None)
+ return {
+ "id": user.id,
+ "role": primary_role,
+ "roles": sorted(roles),
+ "permissions": sorted(permissions),
+ }
+
+ def list_user_ids(self, *, tenant_id: int) -> list[int]:
+ rows = self._session.execute(select(User.id).order_by(User.id.asc())).scalars().all()
+ return [int(row) for row in rows]
diff --git a/keynetra/infrastructure/storage/__init__.py b/keynetra/infrastructure/storage/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/keynetra/infrastructure/storage/session.py b/keynetra/infrastructure/storage/session.py
new file mode 100644
index 0000000..5305fb0
--- /dev/null
+++ b/keynetra/infrastructure/storage/session.py
@@ -0,0 +1,63 @@
+from __future__ import annotations
+
+from collections.abc import Generator
+from functools import lru_cache
+
+from sqlalchemy import create_engine
+from sqlalchemy.engine import Engine
+from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy.pool import StaticPool
+
+from keynetra.config.settings import get_settings
+
+
+@lru_cache
+def create_engine_for_url(database_url: str) -> Engine:
+ # Shared in-memory sqlite connection is required for deterministic tests.
+ if (
+ database_url.startswith("sqlite+pysqlite:///:memory:")
+ or database_url == "sqlite:///:memory:"
+ ):
+ return create_engine(
+ database_url,
+ connect_args={"check_same_thread": False},
+ poolclass=StaticPool,
+ future=True,
+ )
+ return create_engine(database_url, pool_pre_ping=True, future=True)
+
+
+@lru_cache
+def create_session_factory(database_url: str) -> sessionmaker[Session]:
+ engine = create_engine_for_url(database_url)
+ return sessionmaker(bind=engine, autoflush=False, autocommit=False, expire_on_commit=False)
+
+
+@lru_cache
+def initialize_database(database_url: str) -> None:
+ if not database_url.startswith("sqlite"):
+ return
+
+ from keynetra.domain.models import acl as _acl # noqa: F401
+ from keynetra.domain.models import audit as _audit # noqa: F401
+ from keynetra.domain.models import auth_model as _auth_model # noqa: F401
+ from keynetra.domain.models import idempotency as _idempotency # noqa: F401
+ from keynetra.domain.models import policy_versioning as _policy_versioning # noqa: F401
+ from keynetra.domain.models import rbac as _rbac # noqa: F401
+ from keynetra.domain.models import relationship as _relationship # noqa: F401
+ from keynetra.domain.models import tenant as _tenant # noqa: F401
+ from keynetra.domain.models.base import Base
+
+ engine = create_engine_for_url(database_url)
+ Base.metadata.create_all(bind=engine)
+
+
+def get_db() -> Generator[Session, None, None]:
+ settings = get_settings()
+ initialize_database(settings.database_url)
+ SessionLocal = create_session_factory(settings.database_url)
+ db = SessionLocal()
+ try:
+ yield db
+ finally:
+ db.close()
diff --git a/keynetra/main.py b/keynetra/main.py
new file mode 100644
index 0000000..2f4a0a4
--- /dev/null
+++ b/keynetra/main.py
@@ -0,0 +1,8 @@
+"""Backward-compatible module entrypoint.
+
+Use `keynetra.api.main` for the canonical HTTP transport layer.
+"""
+
+from keynetra.api.main import app, create_app
+
+__all__ = ["app", "create_app"]
diff --git a/keynetra/migrations.py b/keynetra/migrations.py
new file mode 100644
index 0000000..85dac85
--- /dev/null
+++ b/keynetra/migrations.py
@@ -0,0 +1,31 @@
+"""Utilities for detecting destructive Alembic migrations."""
+
+from __future__ import annotations
+
+import re
+from pathlib import Path
+from typing import Iterable
+
+DROP_PATTERN = re.compile(r"\bdrop_(?:table|column)\b")
+REVISION_PATTERN = re.compile(r"^revision\s*=\s*['\"](?P[^'\"]+)['\"]", re.MULTILINE)
+
+
+def parse_revision_file(path: Path) -> tuple[str | None, bool]:
+ try:
+ content = path.read_text(encoding="utf-8")
+ except FileNotFoundError:
+ return None, False
+ match = REVISION_PATTERN.search(content)
+ revision = match.group("revision") if match else None
+ destructive = bool(DROP_PATTERN.search(content))
+ return revision, destructive
+
+
+def find_destructive_revisions(versions_dir: Path, applied_revisions: Iterable[str]) -> list[str]:
+ applied = {rev for rev in applied_revisions if isinstance(rev, str)}
+ destructive: list[str] = []
+ for path in versions_dir.glob("*.py"):
+ revision, has_drop = parse_revision_file(path)
+ if revision and has_drop and revision not in applied:
+ destructive.append(revision)
+ return sorted(destructive)
diff --git a/keynetra/modeling/__init__.py b/keynetra/modeling/__init__.py
new file mode 100644
index 0000000..c0d8198
--- /dev/null
+++ b/keynetra/modeling/__init__.py
@@ -0,0 +1,10 @@
+from keynetra.modeling.model_validator import validate_authorization_schema
+from keynetra.modeling.permission_compiler import compile_authorization_schema
+from keynetra.modeling.schema_parser import AuthorizationSchema, parse_authorization_schema
+
+__all__ = [
+ "AuthorizationSchema",
+ "compile_authorization_schema",
+ "parse_authorization_schema",
+ "validate_authorization_schema",
+]
diff --git a/keynetra/modeling/model_validator.py b/keynetra/modeling/model_validator.py
new file mode 100644
index 0000000..ee8a7fb
--- /dev/null
+++ b/keynetra/modeling/model_validator.py
@@ -0,0 +1,47 @@
+"""Validation for authorization modeling schemas."""
+
+from __future__ import annotations
+
+from keynetra.modeling.schema_parser import (
+ AndExpr,
+ AuthorizationSchema,
+ IdentifierExpr,
+ NotExpr,
+ OrExpr,
+)
+
+
+def validate_authorization_schema(schema: AuthorizationSchema) -> None:
+ if schema.version < 1:
+ raise ValueError("schema version must be >= 1")
+ if not schema.types:
+ raise ValueError("schema must define at least one type")
+ if "user" not in schema.types:
+ raise ValueError("schema must define type user")
+ if not schema.permissions:
+ raise ValueError("schema must define permissions")
+ for relation, subjects in schema.relations.items():
+ if not relation:
+ raise ValueError("relation names must be non-empty")
+ for subject in subjects:
+ if subject not in schema.types:
+ raise ValueError(f"relation {relation} references unknown type {subject}")
+ for permission, expr in schema.permissions.items():
+ if not permission:
+ raise ValueError("permission names must be non-empty")
+ _validate_expr(expr, schema)
+
+
+def _validate_expr(expr, schema: AuthorizationSchema) -> None:
+ if isinstance(expr, IdentifierExpr):
+ if expr.name not in schema.relations and expr.name not in schema.permissions:
+ raise ValueError(f"unknown relation or permission {expr.name}")
+ return
+ if isinstance(expr, NotExpr):
+ _validate_expr(expr.value, schema)
+ return
+ if isinstance(expr, AndExpr) or isinstance(expr, OrExpr):
+ _validate_expr(expr.left, schema)
+ _validate_expr(expr.right, schema)
+ return
+ raise ValueError("invalid expression node")
diff --git a/keynetra/modeling/permission_compiler.py b/keynetra/modeling/permission_compiler.py
new file mode 100644
index 0000000..279f109
--- /dev/null
+++ b/keynetra/modeling/permission_compiler.py
@@ -0,0 +1,76 @@
+"""Compiles authorization schemas into executable permission graphs."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Any
+
+from keynetra.modeling.model_validator import validate_authorization_schema
+from keynetra.modeling.schema_parser import (
+ AndExpr,
+ AuthorizationSchema,
+ Expr,
+ IdentifierExpr,
+ NotExpr,
+ OrExpr,
+ parse_authorization_schema,
+)
+
+
+@dataclass(frozen=True)
+class CompiledPermission:
+ name: str
+ expression: Expr
+
+
+@dataclass(frozen=True)
+class CompiledAuthorizationModel:
+ version: int
+ types: tuple[str, ...]
+ relations: dict[str, tuple[str, ...]]
+ permissions: dict[str, CompiledPermission]
+ raw: str
+
+ def to_dict(self) -> dict[str, Any]:
+ return {
+ "version": self.version,
+ "types": list(self.types),
+ "relations": {name: list(subjects) for name, subjects in self.relations.items()},
+ "permissions": {
+ name: _expr_to_dict(permission.expression)
+ for name, permission in self.permissions.items()
+ },
+ "raw": self.raw,
+ }
+
+
+def compile_authorization_schema(
+ schema_text: str | AuthorizationSchema,
+) -> CompiledAuthorizationModel:
+ schema = (
+ parse_authorization_schema(schema_text) if isinstance(schema_text, str) else schema_text
+ )
+ validate_authorization_schema(schema)
+ permissions = {
+ name: CompiledPermission(name=name, expression=expr)
+ for name, expr in schema.permissions.items()
+ }
+ return CompiledAuthorizationModel(
+ version=schema.version,
+ types=schema.types,
+ relations=schema.relations,
+ permissions=permissions,
+ raw=schema.raw,
+ )
+
+
+def _expr_to_dict(expr: Expr) -> dict[str, Any]:
+ if isinstance(expr, IdentifierExpr):
+ return {"kind": "identifier", "name": expr.name}
+ if isinstance(expr, NotExpr):
+ return {"kind": "not", "value": _expr_to_dict(expr.value)}
+ if isinstance(expr, AndExpr):
+ return {"kind": "and", "left": _expr_to_dict(expr.left), "right": _expr_to_dict(expr.right)}
+ if isinstance(expr, OrExpr):
+ return {"kind": "or", "left": _expr_to_dict(expr.left), "right": _expr_to_dict(expr.right)}
+ raise ValueError("invalid expression")
diff --git a/keynetra/modeling/schema_parser.py b/keynetra/modeling/schema_parser.py
new file mode 100644
index 0000000..62155ff
--- /dev/null
+++ b/keynetra/modeling/schema_parser.py
@@ -0,0 +1,160 @@
+"""Parser for schema-first authorization models."""
+
+from __future__ import annotations
+
+import re
+from dataclasses import dataclass, field
+from typing import Any
+
+
+@dataclass(frozen=True)
+class IdentifierExpr:
+ name: str
+
+
+@dataclass(frozen=True)
+class NotExpr:
+ value: Any
+
+
+@dataclass(frozen=True)
+class AndExpr:
+ left: Any
+ right: Any
+
+
+@dataclass(frozen=True)
+class OrExpr:
+ left: Any
+ right: Any
+
+
+Expr = IdentifierExpr | NotExpr | AndExpr | OrExpr
+
+
+@dataclass(frozen=True)
+class AuthorizationSchema:
+ version: int
+ types: tuple[str, ...] = ()
+ relations: dict[str, tuple[str, ...]] = field(default_factory=dict)
+ permissions: dict[str, Expr] = field(default_factory=dict)
+ raw: str = ""
+
+
+_TOKEN_RE = re.compile(r"\s*(\(|\)|and\b|or\b|not\b|[A-Za-z_][A-Za-z0-9_:-]*)\s*")
+
+
+def parse_authorization_schema(schema_text: str) -> AuthorizationSchema:
+ lines = [line.split("#", 1)[0].strip() for line in schema_text.splitlines()]
+ lines = [line for line in lines if line]
+ if not lines:
+ raise ValueError("schema is empty")
+
+ header = lines.pop(0)
+ match = re.fullmatch(r"model\s+schema\s+(?P\d+)", header, flags=re.IGNORECASE)
+ if not match:
+ raise ValueError("schema must start with 'model schema '")
+
+ version = int(match.group("version"))
+ types: list[str] = []
+ relations: dict[str, tuple[str, ...]] = {}
+ permissions: dict[str, Expr] = {}
+ section = None
+
+ for line in lines:
+ lowered = line.lower()
+ if lowered in {"relations", "permissions"}:
+ section = lowered
+ continue
+ if lowered.startswith("type "):
+ types.append(line.split(None, 1)[1].strip())
+ continue
+ if section == "relations":
+ name, subjects = _parse_relation(line)
+ relations[name] = subjects
+ continue
+ if section == "permissions":
+ name, expr = _parse_permission(line)
+ permissions[name] = expr
+ continue
+ raise ValueError(f"unexpected schema line: {line}")
+
+ return AuthorizationSchema(
+ version=version,
+ types=tuple(types),
+ relations=relations,
+ permissions=permissions,
+ raw=schema_text,
+ )
+
+
+def _parse_relation(line: str) -> tuple[str, tuple[str, ...]]:
+ if ":" not in line:
+ raise ValueError(f"invalid relation: {line}")
+ name, subject_text = line.split(":", 1)
+ name = name.strip()
+ subject_text = subject_text.strip()
+ if not name:
+ raise ValueError(f"invalid relation: {line}")
+ if not subject_text.startswith("[") or not subject_text.endswith("]"):
+ raise ValueError(f"invalid relation subjects: {line}")
+ subjects = [item.strip() for item in subject_text[1:-1].split(",") if item.strip()]
+ if not subjects:
+ raise ValueError(f"invalid relation subjects: {line}")
+ return name, tuple(subjects)
+
+
+def _parse_permission(line: str) -> tuple[str, Expr]:
+ if "=" not in line:
+ raise ValueError(f"invalid permission: {line}")
+ name, expr_text = line.split("=", 1)
+ name = name.strip()
+ expr_text = expr_text.strip()
+ if not name or not expr_text:
+ raise ValueError(f"invalid permission: {line}")
+ tokens = _tokenize(expr_text)
+ expr, index = _parse_expr(tokens, 0)
+ if index != len(tokens):
+ raise ValueError(f"invalid permission expression: {line}")
+ return name, expr
+
+
+def _tokenize(expr_text: str) -> list[str]:
+ tokens = [match.group(1) for match in _TOKEN_RE.finditer(expr_text)]
+ if "".join(tokens).replace(" ", "") != expr_text.replace(" ", ""):
+ raise ValueError(f"invalid permission expression: {expr_text}")
+ return tokens
+
+
+def _parse_expr(tokens: list[str], index: int) -> tuple[Expr, int]:
+ left, index = _parse_term(tokens, index)
+ while index < len(tokens) and tokens[index].lower() == "or":
+ right, index = _parse_term(tokens, index + 1)
+ left = OrExpr(left=left, right=right)
+ return left, index
+
+
+def _parse_term(tokens: list[str], index: int) -> tuple[Expr, int]:
+ left, index = _parse_factor(tokens, index)
+ while index < len(tokens) and tokens[index].lower() == "and":
+ right, index = _parse_factor(tokens, index + 1)
+ left = AndExpr(left=left, right=right)
+ return left, index
+
+
+def _parse_factor(tokens: list[str], index: int) -> tuple[Expr, int]:
+ if index >= len(tokens):
+ raise ValueError("unexpected end of expression")
+ token = tokens[index]
+ lowered = token.lower()
+ if lowered == "not":
+ value, next_index = _parse_factor(tokens, index + 1)
+ return NotExpr(value=value), next_index
+ if token == "(":
+ expr, next_index = _parse_expr(tokens, index + 1)
+ if next_index >= len(tokens) or tokens[next_index] != ")":
+ raise ValueError("missing closing parenthesis")
+ return expr, next_index + 1
+ if token in {")", "and", "or"}:
+ raise ValueError("invalid expression")
+ return IdentifierExpr(name=token), index + 1
diff --git a/keynetra/observability/__init__.py b/keynetra/observability/__init__.py
new file mode 100644
index 0000000..fa039cd
--- /dev/null
+++ b/keynetra/observability/__init__.py
@@ -0,0 +1,31 @@
+from __future__ import annotations
+
+from keynetra.observability.metrics import (
+ observe_access_check_latency,
+ observe_decision_latency,
+ record_access_check,
+ record_acl_match,
+ record_api_error,
+ record_cache_event,
+ record_cache_hit,
+ record_cache_miss,
+ record_policy_compilation,
+ record_policy_evaluation,
+ record_relationship_traversal,
+ record_revision_update,
+)
+
+__all__ = [
+ "observe_access_check_latency",
+ "observe_decision_latency",
+ "record_access_check",
+ "record_acl_match",
+ "record_api_error",
+ "record_cache_event",
+ "record_cache_hit",
+ "record_cache_miss",
+ "record_policy_compilation",
+ "record_policy_evaluation",
+ "record_relationship_traversal",
+ "record_revision_update",
+]
diff --git a/keynetra/observability/metrics.py b/keynetra/observability/metrics.py
new file mode 100644
index 0000000..844259f
--- /dev/null
+++ b/keynetra/observability/metrics.py
@@ -0,0 +1,166 @@
+"""Prometheus metrics for KeyNetra observability."""
+
+from __future__ import annotations
+
+try:
+ from prometheus_client import Counter, Histogram
+except ModuleNotFoundError: # pragma: no cover
+ Counter = None # type: ignore[assignment]
+ Histogram = None # type: ignore[assignment]
+
+if Counter is not None and Histogram is not None:
+ ACCESS_CHECKS_TOTAL = Counter(
+ "keynetra_access_checks_total",
+ "Authorization decision counts",
+ labelnames=("tenant", "decision"),
+ )
+ ACL_MATCHES_TOTAL = Counter(
+ "keynetra_acl_matches_total",
+ "ACL match counts",
+ labelnames=("tenant",),
+ )
+ POLICY_EVALUATIONS_TOTAL = Counter(
+ "keynetra_policy_evaluations_total",
+ "Policy evaluation counts",
+ labelnames=("tenant",),
+ )
+ RELATIONSHIP_TRAVERSALS_TOTAL = Counter(
+ "keynetra_relationship_traversals_total",
+ "Relationship traversal counts",
+ labelnames=("tenant",),
+ )
+ POLICY_COMPILATIONS_TOTAL = Counter(
+ "keynetra_policy_compilations_total",
+ "Policy compilation counts",
+ labelnames=("tenant",),
+ )
+ REVISION_UPDATES_TOTAL = Counter(
+ "keynetra_revision_updates_total",
+ "Revision update counts",
+ labelnames=("tenant",),
+ )
+ ACCESS_CHECK_LATENCY_SECONDS = Histogram(
+ "keynetra_access_check_latency_seconds",
+ "Authorization latency per evaluation stage",
+ labelnames=("tenant", "stage"),
+ )
+ CACHE_HITS_TOTAL = Counter(
+ "keynetra_cache_hits_total",
+ "Cache hit counts",
+ labelnames=("cache_type",),
+ )
+ CACHE_MISSES_TOTAL = Counter(
+ "keynetra_cache_misses_total",
+ "Cache miss counts",
+ labelnames=("cache_type",),
+ )
+ DECISION_LATENCY_SECONDS = Histogram(
+ "keynetra_decision_latency_seconds",
+ "Authorization decision latency",
+ labelnames=("tenant_key",),
+ )
+ CACHE_EVENTS_TOTAL = Counter(
+ "keynetra_cache_events_total",
+ "Authorization cache hit and miss counts",
+ labelnames=("cache_name", "outcome"),
+ )
+ API_ERRORS_TOTAL = Counter(
+ "keynetra_api_errors_total",
+ "Core API error counts",
+ labelnames=("code",),
+ )
+else: # pragma: no cover
+ ACCESS_CHECKS_TOTAL = None
+ ACL_MATCHES_TOTAL = None
+ POLICY_EVALUATIONS_TOTAL = None
+ RELATIONSHIP_TRAVERSALS_TOTAL = None
+ POLICY_COMPILATIONS_TOTAL = None
+ REVISION_UPDATES_TOTAL = None
+ ACCESS_CHECK_LATENCY_SECONDS = None
+ CACHE_HITS_TOTAL = None
+ CACHE_MISSES_TOTAL = None
+ DECISION_LATENCY_SECONDS = None
+ CACHE_EVENTS_TOTAL = None
+ API_ERRORS_TOTAL = None
+
+
+def _tenant_label(tenant: str | None) -> str:
+ value = str(tenant or "default").strip()
+ return value or "default"
+
+
+def _cache_type_label(cache_type: str) -> str:
+ value = str(cache_type or "unknown").strip().lower()
+ return value if value in {"policy", "acl", "relationship", "access_index"} else "unknown"
+
+
+def record_access_check(*, tenant: str | None, decision: str) -> None:
+ if ACCESS_CHECKS_TOTAL is not None:
+ ACCESS_CHECKS_TOTAL.labels(tenant=_tenant_label(tenant), decision=str(decision)).inc()
+
+
+def record_acl_match(*, tenant: str | None) -> None:
+ if ACL_MATCHES_TOTAL is not None:
+ ACL_MATCHES_TOTAL.labels(tenant=_tenant_label(tenant)).inc()
+
+
+def record_policy_evaluation(*, tenant: str | None) -> None:
+ if POLICY_EVALUATIONS_TOTAL is not None:
+ POLICY_EVALUATIONS_TOTAL.labels(tenant=_tenant_label(tenant)).inc()
+
+
+def record_relationship_traversal(*, tenant: str | None) -> None:
+ if RELATIONSHIP_TRAVERSALS_TOTAL is not None:
+ RELATIONSHIP_TRAVERSALS_TOTAL.labels(tenant=_tenant_label(tenant)).inc()
+
+
+def record_policy_compilation(*, tenant: str | None) -> None:
+ if POLICY_COMPILATIONS_TOTAL is not None:
+ POLICY_COMPILATIONS_TOTAL.labels(tenant=_tenant_label(tenant)).inc()
+
+
+def record_revision_update(*, tenant: str | None) -> None:
+ if REVISION_UPDATES_TOTAL is not None:
+ REVISION_UPDATES_TOTAL.labels(tenant=_tenant_label(tenant)).inc()
+
+
+def observe_access_check_latency(*, tenant: str | None, stage: str, value: float) -> None:
+ if ACCESS_CHECK_LATENCY_SECONDS is not None:
+ ACCESS_CHECK_LATENCY_SECONDS.labels(tenant=_tenant_label(tenant), stage=str(stage)).observe(
+ value
+ )
+
+
+def record_cache_hit(*, cache_type: str) -> None:
+ cache = _cache_type_label(cache_type)
+ if CACHE_HITS_TOTAL is not None:
+ CACHE_HITS_TOTAL.labels(cache_type=cache).inc()
+
+
+def record_cache_miss(*, cache_type: str) -> None:
+ cache = _cache_type_label(cache_type)
+ if CACHE_MISSES_TOTAL is not None:
+ CACHE_MISSES_TOTAL.labels(cache_type=cache).inc()
+
+
+def record_cache_event(*, cache_name: str, outcome: str) -> None:
+ cache = _cache_type_label(cache_name)
+ outcome_label = str(outcome).strip().lower()
+ if CACHE_EVENTS_TOTAL is not None:
+ CACHE_EVENTS_TOTAL.labels(cache_name=cache, outcome=outcome_label or "miss").inc()
+ if cache == "unknown":
+ return
+ if outcome_label == "hit":
+ record_cache_hit(cache_type=cache)
+ else:
+ record_cache_miss(cache_type=cache)
+
+
+def observe_decision_latency(*, tenant_key: str, value: float) -> None:
+ if DECISION_LATENCY_SECONDS is not None:
+ DECISION_LATENCY_SECONDS.labels(tenant_key=tenant_key).observe(value)
+
+
+def record_api_error(*, code: str) -> None:
+ if API_ERRORS_TOTAL is not None:
+ API_ERRORS_TOTAL.labels(code=code).inc()
diff --git a/keynetra/services/__init__.py b/keynetra/services/__init__.py
new file mode 100644
index 0000000..02dea84
--- /dev/null
+++ b/keynetra/services/__init__.py
@@ -0,0 +1 @@
+"""Service layer."""
diff --git a/keynetra/services/access_indexer.py b/keynetra/services/access_indexer.py
new file mode 100644
index 0000000..ff87584
--- /dev/null
+++ b/keynetra/services/access_indexer.py
@@ -0,0 +1,179 @@
+"""Distributed access indexing for ACL and relationship lookups."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Any
+
+from keynetra.services.interfaces import (
+ AccessIndexCache,
+ AccessIndexEntry,
+ ACLCache,
+ ACLRepository,
+ RelationshipRecord,
+ RelationshipRepository,
+)
+
+
+@dataclass(frozen=True)
+class AccessSubject:
+ subject_type: str
+ subject_id: str
+
+ def to_descriptor(self) -> str:
+ return f"{self.subject_type}:{self.subject_id}"
+
+
+def relationship_descriptor(relationship: RelationshipRecord) -> str:
+ return (
+ f"relationship:{relationship.relation}:{relationship.object_type}:{relationship.object_id}"
+ )
+
+
+class AccessIndexer:
+ """Builds resource/action access indices from ACL and relationship data."""
+
+ def __init__(
+ self,
+ *,
+ acl_repository: ACLRepository,
+ acl_cache: ACLCache,
+ access_index_cache: AccessIndexCache,
+ relationships: RelationshipRepository,
+ ) -> None:
+ self._acl_repository = acl_repository
+ self._acl_cache = acl_cache
+ self._access_index_cache = access_index_cache
+ self._relationships = relationships
+
+ def build_resource_index(
+ self,
+ *,
+ tenant_id: int,
+ resource_type: str,
+ resource_id: str,
+ action: str,
+ ) -> list[AccessIndexEntry]:
+ cached = self._access_index_cache.get(
+ tenant_id=tenant_id,
+ resource_type=resource_type,
+ resource_id=resource_id,
+ action=action,
+ )
+ if cached is not None:
+ return cached
+
+ acl_entries = self._acl_cache.get(
+ tenant_id=tenant_id,
+ resource_type=resource_type,
+ resource_id=resource_id,
+ action=action,
+ )
+ if acl_entries is None:
+ acl_entries = self._acl_repository.find_matching_acl(
+ tenant_id=tenant_id,
+ resource_type=resource_type,
+ resource_id=resource_id,
+ action=action,
+ )
+ self._acl_cache.set(
+ tenant_id=tenant_id,
+ resource_type=resource_type,
+ resource_id=resource_id,
+ action=action,
+ acl_entries=acl_entries,
+ )
+
+ relationship_rows = self._relationships.list_for_object(
+ tenant_id=tenant_id,
+ object_type=resource_type,
+ object_id=resource_id,
+ )
+
+ entries = [
+ AccessIndexEntry(
+ resource_type=resource_type,
+ resource_id=resource_id,
+ action=action,
+ allowed_subjects=(self._subject_descriptor(acl.subject_type, acl.subject_id),),
+ source="acl",
+ subject_type=acl.subject_type,
+ subject_id=acl.subject_id,
+ effect=acl.effect,
+ acl_id=acl.id,
+ )
+ for acl in acl_entries
+ ]
+ if relationship_rows:
+ entries.append(
+ AccessIndexEntry(
+ resource_type=resource_type,
+ resource_id=resource_id,
+ action=action,
+ allowed_subjects=tuple(
+ sorted(
+ {
+ (
+ self._subject_descriptor(row.subject_type, row.subject_id)
+ if row.subject_type != "relationship"
+ else relationship_descriptor(row)
+ )
+ for row in relationship_rows
+ }
+ )
+ ),
+ source="relationship",
+ )
+ )
+
+ self._access_index_cache.set(
+ tenant_id=tenant_id,
+ resource_type=resource_type,
+ resource_id=resource_id,
+ action=action,
+ entries=entries,
+ )
+ return entries
+
+ def invalidate_resource(self, *, tenant_id: int, resource_type: str, resource_id: str) -> None:
+ self._acl_cache.invalidate(
+ tenant_id=tenant_id, resource_type=resource_type, resource_id=resource_id
+ )
+ self._access_index_cache.invalidate(
+ tenant_id=tenant_id, resource_type=resource_type, resource_id=resource_id
+ )
+
+ def invalidate_tenant(self, *, tenant_id: int) -> None:
+ self._access_index_cache.invalidate_tenant(tenant_id=tenant_id)
+
+ def subject_descriptors(self, user: dict[str, Any]) -> set[str]:
+ descriptors: set[str] = set()
+ user_id = user.get("id")
+ if user_id is not None:
+ descriptors.add(self._subject_descriptor("user", str(user_id)))
+ roles = user.get("roles", [])
+ if isinstance(roles, list):
+ descriptors.update(
+ self._subject_descriptor("role", str(role)) for role in roles if role is not None
+ )
+ permissions = user.get("permissions", [])
+ if isinstance(permissions, list):
+ descriptors.update(
+ self._subject_descriptor("permission", str(permission))
+ for permission in permissions
+ if permission is not None
+ )
+ relations = user.get("relations", [])
+ if isinstance(relations, list):
+ for relation in relations:
+ if not isinstance(relation, dict):
+ continue
+ relation_type = str(relation.get("relation", ""))
+ object_type = str(relation.get("object_type", ""))
+ object_id = str(relation.get("object_id", ""))
+ if relation_type and object_type and object_id:
+ descriptors.add(f"relationship:{relation_type}:{object_type}:{object_id}")
+ return descriptors
+
+ def _subject_descriptor(self, subject_type: str, subject_id: str) -> str:
+ return f"{subject_type}:{subject_id}"
diff --git a/keynetra/services/attribute_validation.py b/keynetra/services/attribute_validation.py
new file mode 100644
index 0000000..898c476
--- /dev/null
+++ b/keynetra/services/attribute_validation.py
@@ -0,0 +1,32 @@
+from __future__ import annotations
+
+from typing import Any
+
+
+class AttributeValidationError(ValueError):
+ pass
+
+
+def _validate_dict(obj: Any, *, name: str, max_keys: int, max_depth: int, depth: int = 0) -> None:
+ if not isinstance(obj, dict):
+ raise AttributeValidationError(f"{name} must be an object")
+ if len(obj) > max_keys:
+ raise AttributeValidationError(f"{name} too large")
+ if depth > max_depth:
+ raise AttributeValidationError(f"{name} too deep")
+ for k, v in obj.items():
+ if not isinstance(k, str):
+ raise AttributeValidationError(f"{name} keys must be strings")
+ if isinstance(v, dict):
+ _validate_dict(v, name=name, max_keys=max_keys, max_depth=max_depth, depth=depth + 1)
+ elif isinstance(v, list):
+ if len(v) > max_keys:
+ raise AttributeValidationError(f"{name} list too large")
+
+
+def validate_user(user: dict[str, Any]) -> None:
+ _validate_dict(user, name="user", max_keys=200, max_depth=5)
+
+
+def validate_resource(resource: dict[str, Any]) -> None:
+ _validate_dict(resource, name="resource", max_keys=200, max_depth=5)
diff --git a/keynetra/services/audit.py b/keynetra/services/audit.py
new file mode 100644
index 0000000..af7cdf2
--- /dev/null
+++ b/keynetra/services/audit.py
@@ -0,0 +1,9 @@
+"""Deprecated compatibility import.
+
+Database-backed audit writing now lives in
+``keynetra.infrastructure.repositories.audit``.
+"""
+
+from keynetra.infrastructure.repositories.audit import SqlAuditRepository as AuditWriter
+
+__all__ = ["AuditWriter"]
diff --git a/keynetra/services/authorization.py b/keynetra/services/authorization.py
new file mode 100644
index 0000000..a6525e7
--- /dev/null
+++ b/keynetra/services/authorization.py
@@ -0,0 +1,644 @@
+"""Authorization orchestration service.
+
+This layer coordinates validation, repository access, caching, and audit
+writing. The decision engine remains pure and receives only explicit input.
+"""
+
+from __future__ import annotations
+
+import logging
+import time
+from concurrent.futures import ThreadPoolExecutor
+from dataclasses import dataclass
+from typing import Any
+
+from keynetra.config.settings import Settings
+from keynetra.engine.compiled.decision_graph import COMPILED_POLICY_STORE
+from keynetra.engine.keynetra_engine import (
+ AuthorizationDecision,
+ AuthorizationInput,
+ ExplainTraceStep,
+ KeyNetraEngine,
+)
+from keynetra.engine.model_graph.permission_graph import MODEL_GRAPH_STORE, CompiledPermissionGraph
+from keynetra.infrastructure.logging import log_event
+from keynetra.infrastructure.metrics import observe_decision_latency, record_cache_event
+from keynetra.services.access_indexer import AccessIndexer
+from keynetra.services.attribute_validation import validate_resource, validate_user
+from keynetra.services.interfaces import (
+ AccessIndexCache,
+ ACLCache,
+ ACLRepository,
+ AuditRepository,
+ AuthModelRepository,
+ CachedDecision,
+ DecisionCache,
+ PolicyCache,
+ PolicyRepository,
+ RelationshipCache,
+ RelationshipRepository,
+ TenantRepository,
+ UserRepository,
+)
+from keynetra.services.resilience import retry, with_timeout
+from keynetra.services.revisions import RevisionService
+
+
+@dataclass(frozen=True)
+class AuthorizationResult:
+ """Service-level authorization result used by API and tests."""
+
+ decision: AuthorizationDecision
+ cached: bool
+ revision: int
+
+
+class AuthorizationService:
+ """Compose persistence, caches, and the pure engine into one flow."""
+
+ def __init__(
+ self,
+ *,
+ settings: Settings,
+ tenants: TenantRepository,
+ policies: PolicyRepository,
+ users: UserRepository,
+ relationships: RelationshipRepository,
+ audit: AuditRepository,
+ policy_cache: PolicyCache,
+ relationship_cache: RelationshipCache,
+ decision_cache: DecisionCache,
+ acl_repository: ACLRepository | None = None,
+ acl_cache: ACLCache | None = None,
+ access_index_cache: AccessIndexCache | None = None,
+ auth_model_repository: AuthModelRepository | None = None,
+ ) -> None:
+ self._settings = settings
+ self._tenants = tenants
+ self._policies = policies
+ self._users = users
+ self._relationships = relationships
+ self._audit = audit
+ self._policy_cache = policy_cache
+ self._relationship_cache = relationship_cache
+ self._decision_cache = decision_cache
+ self._acl_repository = acl_repository
+ self._acl_cache = acl_cache
+ self._access_index_cache = access_index_cache
+ self._auth_model_repository = auth_model_repository
+ self._revisions = RevisionService(tenants)
+ self._access_indexer = (
+ AccessIndexer(
+ acl_repository=acl_repository,
+ acl_cache=acl_cache,
+ access_index_cache=access_index_cache,
+ relationships=relationships,
+ )
+ if acl_repository is not None
+ and acl_cache is not None
+ and access_index_cache is not None
+ else None
+ )
+ self._logger = logging.getLogger("keynetra.authorization")
+
+ def authorize(
+ self,
+ *,
+ tenant_key: str,
+ principal: dict[str, Any],
+ user: dict[str, Any],
+ action: str,
+ resource: dict[str, Any],
+ context: dict[str, Any] | None = None,
+ consistency: str = "eventual",
+ revision: int | None = None,
+ audit: bool = True,
+ ) -> AuthorizationResult:
+ started_at = time.perf_counter()
+ fallback_input = AuthorizationInput(
+ user=dict(user),
+ action=action,
+ resource=dict(resource),
+ context=dict(context or {}),
+ tenant_key=tenant_key,
+ )
+ try:
+ authorization_input, tenant = self._build_input(
+ tenant_key=tenant_key,
+ user=user,
+ action=action,
+ resource=resource,
+ context=context or {},
+ )
+ except Exception as exc:
+ decision = self._fallback_decision(
+ fallback_input, reason=f"authorization input unavailable: {exc}"
+ )
+ observe_decision_latency(tenant_key=tenant_key, value=time.perf_counter() - started_at)
+ return AuthorizationResult(decision=decision, cached=False, revision=1)
+
+ try:
+ cache_key = None
+ if consistency.strip().lower() != "fully_consistent":
+ cache_key = self._decision_cache.make_key(
+ tenant_key=tenant.tenant_key,
+ policy_version=tenant.policy_version,
+ authorization_input=authorization_input,
+ revision=tenant.revision if revision is None else revision,
+ )
+ cached = self._safe_cache_get(cache_key)
+ if cached is not None:
+ observe_decision_latency(
+ tenant_key=tenant.tenant_key, value=time.perf_counter() - started_at
+ )
+ return AuthorizationResult(
+ decision=self._decision_from_cache(cached),
+ cached=True,
+ revision=tenant.revision,
+ )
+
+ engine = self._build_engine(
+ tenant_key=tenant.tenant_key,
+ tenant_id=tenant.id,
+ policy_version=tenant.policy_version,
+ )
+ decision = engine.decide(authorization_input)
+ if cache_key is not None:
+ self._safe_cache_set(cache_key, CachedDecision.from_decision(decision))
+ if audit:
+ self._safe_audit_write(
+ tenant_id=tenant.id,
+ principal_type=str(principal.get("type")),
+ principal_id=str(principal.get("id")),
+ authorization_input=authorization_input,
+ decision=decision,
+ )
+ return AuthorizationResult(decision=decision, cached=False, revision=tenant.revision)
+ except Exception as exc:
+ log_event(
+ self._logger,
+ event="authorization_fallback",
+ tenant_id=tenant.tenant_key,
+ principal_type=str(principal.get("type")),
+ resilience_mode=self._settings.resilience_mode,
+ fallback_behavior=self._settings.resilience_fallback_behavior,
+ reason=repr(exc),
+ )
+ return AuthorizationResult(
+ decision=self._fallback_decision(
+ authorization_input, reason="authorization backend unavailable"
+ ),
+ cached=False,
+ revision=tenant.revision,
+ )
+ finally:
+ observe_decision_latency(tenant_key=tenant_key, value=time.perf_counter() - started_at)
+
+ def authorize_batch(
+ self,
+ *,
+ tenant_key: str,
+ principal: dict[str, Any],
+ user: dict[str, Any],
+ items: list[dict[str, Any]],
+ context: dict[str, Any] | None = None,
+ consistency: str = "eventual",
+ revision: int | None = None,
+ ) -> list[AuthorizationResult]:
+ validate_user(user)
+ fallback_context = dict(context or {})
+ try:
+ tenant = with_timeout(
+ lambda: self._tenants.get_or_create(tenant_key),
+ timeout_seconds=self._settings.service_timeout_seconds,
+ )
+ enriched_user = self._hydrate_user(tenant_id=tenant.id, user=user)
+ engine = self._build_engine(
+ tenant_key=tenant.tenant_key,
+ tenant_id=tenant.id,
+ policy_version=tenant.policy_version,
+ )
+ except Exception:
+ return [
+ AuthorizationResult(
+ decision=self._fallback_decision(
+ AuthorizationInput(
+ user=dict(user),
+ action=str(item.get("action", "")),
+ resource=dict(item.get("resource") or {}),
+ context=fallback_context,
+ tenant_key=tenant_key,
+ ),
+ reason="authorization backend unavailable",
+ ),
+ cached=False,
+ revision=1,
+ )
+ for item in items
+ ]
+
+ def evaluate_item(item: dict[str, Any]) -> AuthorizationResult:
+ resource = dict(item.get("resource") or {})
+ validate_resource(resource)
+ authorization_input = self._build_authorization_input(
+ tenant_id=tenant.id,
+ tenant_key=tenant.tenant_key,
+ user=enriched_user,
+ action=str(item["action"]),
+ resource=resource,
+ context=dict(context or {}),
+ )
+ cache_key = None
+ if consistency.strip().lower() != "fully_consistent":
+ cache_key = self._decision_cache.make_key(
+ tenant_key=tenant.tenant_key,
+ policy_version=tenant.policy_version,
+ authorization_input=authorization_input,
+ revision=tenant.revision if revision is None else revision,
+ )
+ cached = self._safe_cache_get(cache_key)
+ if cached is not None:
+ return AuthorizationResult(
+ decision=self._decision_from_cache(cached),
+ cached=True,
+ revision=tenant.revision,
+ )
+ decision = engine.decide(authorization_input)
+ if cache_key is not None:
+ self._safe_cache_set(cache_key, CachedDecision.from_decision(decision))
+ self._safe_audit_write(
+ tenant_id=tenant.id,
+ principal_type=str(principal.get("type")),
+ principal_id=str(principal.get("id")),
+ authorization_input=authorization_input,
+ decision=decision,
+ )
+ return AuthorizationResult(decision=decision, cached=False, revision=tenant.revision)
+
+ with ThreadPoolExecutor(max_workers=min(32, max(1, len(items)))) as pool:
+ return list(pool.map(evaluate_item, items))
+
+ def simulate(
+ self,
+ *,
+ tenant_key: str,
+ principal: dict[str, Any],
+ user: dict[str, Any],
+ action: str,
+ resource: dict[str, Any],
+ context: dict[str, Any] | None = None,
+ ) -> AuthorizationDecision:
+ result = self.authorize(
+ tenant_key=tenant_key,
+ principal=principal,
+ user=user,
+ action=action,
+ resource=resource,
+ context=context,
+ audit=True,
+ )
+ return result.decision
+
+ def get_revision(self, *, tenant_key: str) -> int:
+ return self._revisions.get_revision(tenant_key=tenant_key)
+
+ def _build_input(
+ self,
+ *,
+ tenant_key: str,
+ user: dict[str, Any],
+ action: str,
+ resource: dict[str, Any],
+ context: dict[str, Any],
+ ) -> tuple[AuthorizationInput, Any]:
+ validate_user(user)
+ validate_resource(resource)
+ tenant = with_timeout(
+ lambda: self._tenants.get_or_create(tenant_key),
+ timeout_seconds=self._settings.service_timeout_seconds,
+ )
+ enriched_user = self._hydrate_user(tenant_id=tenant.id, user=user)
+ return (
+ self._build_authorization_input(
+ tenant_id=tenant.id,
+ tenant_key=tenant_key,
+ user=enriched_user,
+ action=action,
+ resource=resource,
+ context=context,
+ ),
+ tenant,
+ )
+
+ def _build_authorization_input(
+ self,
+ *,
+ tenant_id: int,
+ tenant_key: str,
+ user: dict[str, Any],
+ action: str,
+ resource: dict[str, Any],
+ context: dict[str, Any],
+ ) -> AuthorizationInput:
+ acl_entries: tuple[dict[str, Any], ...] = ()
+ access_entries: tuple[dict[str, Any], ...] = ()
+ permission_graph: CompiledPermissionGraph | None = MODEL_GRAPH_STORE.get(tenant_key)
+ if permission_graph is None and self._auth_model_repository is not None:
+ model_record = self._auth_model_repository.get_model(tenant_id=tenant_id)
+ if model_record is not None:
+ from keynetra.modeling.permission_compiler import compile_authorization_schema
+
+ compiled = compile_authorization_schema(model_record.schema_text)
+ permission_graph = CompiledPermissionGraph(tenant_key=tenant_key, model=compiled)
+ MODEL_GRAPH_STORE.set(tenant_key, permission_graph)
+ if self._access_indexer is not None:
+ resource_type, resource_id = self._resource_identity(resource)
+ if resource_type and resource_id:
+ entries = self._access_indexer.build_resource_index(
+ tenant_id=tenant_id,
+ resource_type=resource_type,
+ resource_id=resource_id,
+ action=action,
+ )
+ access_entries = tuple(entry.__dict__ for entry in entries)
+ acl_entries = tuple(
+ {
+ **dict(entry.__dict__),
+ "id": entry.acl_id,
+ }
+ for entry in entries
+ if entry.source == "acl"
+ )
+ return AuthorizationInput(
+ user=user,
+ action=action,
+ resource=dict(resource),
+ context=dict(context),
+ tenant_key=tenant_key,
+ acl_entries=acl_entries,
+ access_index_entries=access_entries,
+ permission_graph=permission_graph,
+ )
+
+ def _hydrate_user(self, *, tenant_id: int, user: dict[str, Any]) -> dict[str, Any]:
+ enriched_user = dict(user)
+ direct_permissions = enriched_user.get("permissions", [])
+ if isinstance(direct_permissions, list):
+ enriched_user["direct_permissions"] = list(direct_permissions)
+ user_id = enriched_user.get("id")
+ if isinstance(user_id, int):
+ persisted_user = with_timeout(
+ lambda: self._users.get_user_context(user_id),
+ timeout_seconds=self._settings.service_timeout_seconds,
+ )
+ if persisted_user is not None:
+ enriched_user["roles"] = list(persisted_user.get("roles", []))
+ enriched_user["role_permissions"] = list(persisted_user.get("permissions", []))
+ enriched_user.setdefault("role", persisted_user.get("role"))
+ relationships = self._safe_relationship_cache_get(
+ tenant_id=tenant_id, subject_type="user", subject_id=str(user_id)
+ )
+ if relationships is None:
+ relationships = with_timeout(
+ lambda: self._relationships.list_for_subject(
+ tenant_id=tenant_id,
+ subject_type="user",
+ subject_id=str(user_id),
+ ),
+ timeout_seconds=self._settings.service_timeout_seconds,
+ )
+ self._safe_relationship_cache_set(
+ tenant_id=tenant_id,
+ subject_type="user",
+ subject_id=str(user_id),
+ relationships=relationships,
+ )
+ enriched_user["relations"] = [relationship.to_dict() for relationship in relationships]
+ return enriched_user
+
+ def _build_engine(
+ self, *, tenant_key: str, tenant_id: int, policy_version: int
+ ) -> KeyNetraEngine:
+ cached = self._safe_policy_cache_get(tenant_key, policy_version)
+ if cached is None:
+ cached = with_timeout(
+ lambda: self._policies.list_current_policies(tenant_id=tenant_id),
+ timeout_seconds=self._settings.service_timeout_seconds,
+ )
+ if not cached:
+ policies = self._settings.load_policies()
+ engine = KeyNetraEngine(policies, strategy="first_match")
+ COMPILED_POLICY_STORE.set(tenant_key, policy_version, engine._compiled_graph)
+ return engine
+ self._safe_policy_cache_set(tenant_key, policy_version, cached)
+ policies = [policy.definition for policy in cached]
+ engine = KeyNetraEngine(policies, strategy="first_match")
+ COMPILED_POLICY_STORE.set(tenant_key, policy_version, engine._compiled_graph)
+ return engine
+
+ def _decision_from_cache(self, cached: CachedDecision) -> AuthorizationDecision:
+ return AuthorizationDecision(
+ allowed=cached.allowed,
+ decision=(
+ "allow"
+ if cached.allowed
+ else "deny" if cached.decision not in {"allow", "deny"} else cached.decision
+ ),
+ reason=cached.reason,
+ policy_id=cached.policy_id,
+ explain_trace=tuple(
+ ExplainTraceStep(
+ step=str(item.get("step", "cache")),
+ outcome=str(item.get("outcome", "cached")),
+ detail=str(item.get("detail", "served from decision cache")),
+ policy_id=(
+ item.get("policy_id")
+ if item.get("policy_id") is None
+ else str(item.get("policy_id"))
+ ),
+ )
+ for item in cached.explain_trace
+ ),
+ matched_policies=tuple(cached.matched_policies),
+ failed_conditions=tuple(cached.failed_conditions),
+ )
+
+ def _safe_deny(self, *, reason: str) -> AuthorizationDecision:
+ return AuthorizationDecision(
+ allowed=False,
+ decision="deny",
+ reason=reason,
+ policy_id=None,
+ explain_trace=(ExplainTraceStep(step="final", outcome="deny", detail=reason),),
+ matched_policies=(),
+ failed_conditions=(reason,),
+ )
+
+ def _safe_allow(self, *, reason: str) -> AuthorizationDecision:
+ return AuthorizationDecision(
+ allowed=True,
+ decision="allow",
+ reason=reason,
+ policy_id=None,
+ explain_trace=(ExplainTraceStep(step="final", outcome="allow", detail=reason),),
+ matched_policies=(),
+ failed_conditions=(),
+ )
+
+ def _fallback_decision(
+ self, authorization_input: AuthorizationInput, *, reason: str
+ ) -> AuthorizationDecision:
+ behavior = (self._settings.resilience_fallback_behavior or "static").strip().lower()
+ if behavior == "default_policy_eval":
+ try:
+ decision = KeyNetraEngine(
+ self._settings.load_policies(), strategy="first_match"
+ ).decide(authorization_input)
+ return AuthorizationDecision(
+ allowed=decision.allowed,
+ decision=decision.decision,
+ reason=decision.reason,
+ policy_id=decision.policy_id,
+ explain_trace=tuple(
+ list(decision.explain_trace)
+ + [
+ ExplainTraceStep(
+ step="resilience_fallback",
+ outcome="fallback",
+ detail=reason,
+ policy_id=decision.policy_id,
+ )
+ ]
+ ),
+ matched_policies=decision.matched_policies,
+ failed_conditions=decision.failed_conditions,
+ )
+ except Exception as exc:
+ log_event(
+ self._logger, event="resilience_default_policy_eval_failed", reason=repr(exc)
+ )
+
+ if (self._settings.resilience_mode or "fail_closed").strip().lower() == "fail_open":
+ return self._safe_allow(reason=reason)
+ return self._safe_deny(reason=reason)
+
+ def _safe_cache_get(self, key: str) -> CachedDecision | None:
+ try:
+ cached = with_timeout(
+ lambda: self._decision_cache.get(key),
+ timeout_seconds=self._settings.service_timeout_seconds,
+ )
+ except Exception as exc:
+ record_cache_event(cache_name="decision", outcome="fallback")
+ log_event(
+ self._logger, event="cache_get_failed", cache_name="decision", reason=repr(exc)
+ )
+ return None
+ record_cache_event(cache_name="decision", outcome="hit" if cached is not None else "miss")
+ return cached
+
+ def _safe_cache_set(self, key: str, value: CachedDecision) -> None:
+ try:
+ retry(
+ lambda: with_timeout(
+ lambda: self._decision_cache.set(
+ key, value, self._settings.decision_cache_ttl_seconds
+ ),
+ timeout_seconds=self._settings.service_timeout_seconds,
+ ),
+ attempts=self._settings.critical_retry_attempts,
+ )
+ except Exception as exc:
+ log_event(
+ self._logger, event="cache_set_failed", cache_name="decision", reason=repr(exc)
+ )
+
+ def _safe_policy_cache_get(self, tenant_key: str, policy_version: int):
+ try:
+ cached = with_timeout(
+ lambda: self._policy_cache.get(tenant_key, policy_version),
+ timeout_seconds=self._settings.service_timeout_seconds,
+ )
+ except Exception as exc:
+ record_cache_event(cache_name="policy", outcome="fallback")
+ log_event(self._logger, event="cache_get_failed", cache_name="policy", reason=repr(exc))
+ return None
+ record_cache_event(cache_name="policy", outcome="hit" if cached is not None else "miss")
+ return cached
+
+ def _safe_policy_cache_set(
+ self, tenant_key: str, policy_version: int, cached: list[Any]
+ ) -> None:
+ try:
+ retry(
+ lambda: with_timeout(
+ lambda: self._policy_cache.set(tenant_key, policy_version, cached),
+ timeout_seconds=self._settings.service_timeout_seconds,
+ ),
+ attempts=self._settings.critical_retry_attempts,
+ )
+ except Exception as exc:
+ log_event(self._logger, event="cache_set_failed", cache_name="policy", reason=repr(exc))
+
+ def _safe_relationship_cache_get(self, *, tenant_id: int, subject_type: str, subject_id: str):
+ try:
+ cached = with_timeout(
+ lambda: self._relationship_cache.get(
+ tenant_id=tenant_id, subject_type=subject_type, subject_id=subject_id
+ ),
+ timeout_seconds=self._settings.service_timeout_seconds,
+ )
+ except Exception as exc:
+ record_cache_event(cache_name="relationship", outcome="fallback")
+ log_event(
+ self._logger, event="cache_get_failed", cache_name="relationship", reason=repr(exc)
+ )
+ return None
+ record_cache_event(
+ cache_name="relationship", outcome="hit" if cached is not None else "miss"
+ )
+ return cached
+
+ def _safe_relationship_cache_set(
+ self, *, tenant_id: int, subject_type: str, subject_id: str, relationships: list[Any]
+ ) -> None:
+ try:
+ retry(
+ lambda: with_timeout(
+ lambda: self._relationship_cache.set(
+ tenant_id=tenant_id,
+ subject_type=subject_type,
+ subject_id=subject_id,
+ relationships=relationships,
+ ),
+ timeout_seconds=self._settings.service_timeout_seconds,
+ ),
+ attempts=self._settings.critical_retry_attempts,
+ )
+ except Exception as exc:
+ log_event(
+ self._logger, event="cache_set_failed", cache_name="relationship", reason=repr(exc)
+ )
+
+ def _resource_identity(self, resource: dict[str, Any]) -> tuple[str, str]:
+ resource_type = str(
+ resource.get("resource_type")
+ or resource.get("type")
+ or resource.get("kind")
+ or resource.get("entity_type")
+ or ""
+ )
+ resource_id = str(resource.get("resource_id") or resource.get("id") or "")
+ return resource_type, resource_id
+
+ def _safe_audit_write(self, **kwargs: Any) -> None:
+ try:
+ retry(
+ lambda: with_timeout(
+ lambda: self._audit.write(**kwargs),
+ timeout_seconds=self._settings.service_timeout_seconds,
+ ),
+ attempts=self._settings.critical_retry_attempts,
+ )
+ except Exception as exc:
+ log_event(self._logger, event="audit_write_failed", reason=repr(exc))
diff --git a/keynetra/services/doctor.py b/keynetra/services/doctor.py
new file mode 100644
index 0000000..8360d1b
--- /dev/null
+++ b/keynetra/services/doctor.py
@@ -0,0 +1,139 @@
+"""Production-readiness checks for the KeyNetra core service.
+
+These checks stay in the services layer because they orchestrate infrastructure
+dependencies such as the database, Redis, and Alembic migration state.
+"""
+
+from __future__ import annotations
+
+import os
+from dataclasses import asdict, dataclass
+from pathlib import Path
+from typing import Any
+
+from sqlalchemy import text
+from sqlalchemy.exc import SQLAlchemyError
+
+from keynetra.config.redis_client import get_redis
+from keynetra.config.settings import Settings
+from keynetra.infrastructure.storage.session import create_engine_for_url
+
+
+@dataclass(frozen=True)
+class DoctorCheck:
+ """One production-readiness validation result."""
+
+ name: str
+ ok: bool
+ message: str
+ details: dict[str, Any]
+
+
+def run_core_doctor(settings: Settings) -> dict[str, Any]:
+ """Run deterministic readiness checks for the core deployment."""
+
+ checks = [
+ _check_env(settings),
+ _check_database(settings),
+ _check_redis(),
+ _check_migrations(settings),
+ ]
+ return {
+ "service": "core",
+ "ok": all(check.ok for check in checks),
+ "checks": [asdict(check) for check in checks],
+ }
+
+
+def _check_env(settings: Settings) -> DoctorCheck:
+ """Validate that the required runtime configuration is explicitly set."""
+
+ required_env = {
+ "KEYNETRA_DATABASE_URL": bool(os.environ.get("KEYNETRA_DATABASE_URL")),
+ "KEYNETRA_REDIS_URL": bool(os.environ.get("KEYNETRA_REDIS_URL")),
+ }
+ auth_configured = (
+ bool(settings.parsed_api_key_hashes())
+ or settings.jwt_secret != "change-me"
+ or bool(settings.oidc_jwks_url)
+ )
+ ok = all(required_env.values()) and auth_configured
+ return DoctorCheck(
+ name="env_variables",
+ ok=ok,
+ message=(
+ "required environment is configured"
+ if ok
+ else "missing required environment configuration"
+ ),
+ details={**required_env, "auth_configured": auth_configured},
+ )
+
+
+def _check_database(settings: Settings) -> DoctorCheck:
+ """Verify that the configured primary database accepts queries."""
+
+ try:
+ engine = create_engine_for_url(settings.database_url)
+ with engine.connect() as connection:
+ connection.execute(text("SELECT 1"))
+ return DoctorCheck(
+ name="database",
+ ok=True,
+ message="database reachable",
+ details={"database_url": settings.database_url},
+ )
+ except SQLAlchemyError as exc:
+ return DoctorCheck(
+ name="database", ok=False, message="database unreachable", details={"error": repr(exc)}
+ )
+
+
+def _check_redis() -> DoctorCheck:
+ """Verify that the configured Redis endpoint responds to ping."""
+
+ client = get_redis()
+ if client is None:
+ return DoctorCheck(
+ name="redis", ok=False, message="redis client not configured", details={}
+ )
+ try:
+ client.ping()
+ return DoctorCheck(name="redis", ok=True, message="redis reachable", details={})
+ except Exception as exc:
+ return DoctorCheck(
+ name="redis", ok=False, message="redis unreachable", details={"error": repr(exc)}
+ )
+
+
+def _check_migrations(settings: Settings) -> DoctorCheck:
+ """Verify that the database is at the current Alembic head revision."""
+
+ from alembic.config import Config
+ from alembic.script import ScriptDirectory
+
+ core_dir = Path(__file__).resolve().parents[2]
+ config = Config(str(core_dir / "alembic.ini"))
+ config.set_main_option("script_location", str(core_dir / "alembic"))
+ script = ScriptDirectory.from_config(config)
+ expected_heads = sorted(script.get_heads())
+ try:
+ engine = create_engine_for_url(settings.database_url)
+ with engine.connect() as connection:
+ rows = connection.execute(text("SELECT version_num FROM alembic_version")).fetchall()
+ applied_heads = sorted(str(row[0]) for row in rows)
+ except Exception as exc:
+ return DoctorCheck(
+ name="migrations",
+ ok=False,
+ message="could not read migration state",
+ details={"error": repr(exc)},
+ )
+
+ ok = applied_heads == expected_heads
+ return DoctorCheck(
+ name="migrations",
+ ok=ok,
+ message="migrations applied" if ok else "database is not at migration head",
+ details={"expected_heads": expected_heads, "applied_heads": applied_heads},
+ )
diff --git a/keynetra/services/impact_analysis.py b/keynetra/services/impact_analysis.py
new file mode 100644
index 0000000..ea6cb53
--- /dev/null
+++ b/keynetra/services/impact_analysis.py
@@ -0,0 +1,113 @@
+"""Policy impact analysis helpers."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Any
+
+from keynetra.engine.keynetra_engine import AuthorizationInput, KeyNetraEngine
+from keynetra.services.interfaces import (
+ PolicyRepository,
+ RelationshipRepository,
+ TenantRepository,
+ UserRepository,
+)
+from keynetra.services.policy_dsl import dsl_to_policy
+
+
+@dataclass(frozen=True)
+class ImpactResult:
+ gained_access: list[int]
+ lost_access: list[int]
+
+
+class ImpactAnalyzer:
+ def __init__(
+ self,
+ *,
+ tenants: TenantRepository,
+ policies: PolicyRepository,
+ users: UserRepository,
+ relationships: RelationshipRepository,
+ ) -> None:
+ self._tenants = tenants
+ self._policies = policies
+ self._users = users
+ self._relationships = relationships
+
+ def analyze_policy_change(self, *, tenant_key: str, policy_change: str) -> ImpactResult:
+ tenant = self._tenants.get_or_create(tenant_key)
+ current_policies = self._policies.list_current_policies(tenant_id=tenant.id)
+ changed_policy = dsl_to_policy(policy_change)
+ before_engine = KeyNetraEngine([policy.definition for policy in current_policies])
+ after_engine = KeyNetraEngine(
+ [policy.definition for policy in current_policies]
+ + [
+ {
+ "action": changed_policy["action"],
+ "effect": changed_policy["effect"],
+ "priority": changed_policy["priority"],
+ "conditions": changed_policy["conditions"],
+ "policy_id": changed_policy["conditions"].get("policy_key"),
+ }
+ ]
+ )
+
+ gained: set[int] = set()
+ lost: set[int] = set()
+ list_user_ids = getattr(self._users, "list_user_ids", None)
+ user_ids = list_user_ids(tenant_id=tenant.id) if callable(list_user_ids) else []
+ for user_id in user_ids:
+ context = self._users.get_user_context(user_id) or {
+ "id": user_id,
+ "roles": [],
+ "permissions": [],
+ }
+ user = self._enrich_user_with_relationships(tenant_id=tenant.id, user=context)
+ for resource in self._candidate_resources(tenant_id=tenant.id, user_id=user_id):
+ before = before_engine.decide(
+ AuthorizationInput(
+ user=user, action=changed_policy["action"], resource=resource
+ )
+ )
+ after = after_engine.decide(
+ AuthorizationInput(
+ user=user, action=changed_policy["action"], resource=resource
+ )
+ )
+ if not before.allowed and after.allowed:
+ gained.add(user_id)
+ if before.allowed and not after.allowed:
+ lost.add(user_id)
+ return ImpactResult(gained_access=sorted(gained), lost_access=sorted(lost))
+
+ def _candidate_resources(self, *, tenant_id: int, user_id: int) -> list[dict[str, Any]]:
+ resources: list[dict[str, Any]] = [
+ {"resource_type": "document", "resource_id": f"user-{user_id}", "owner_id": user_id}
+ ]
+ relationships = self._relationships.list_for_subject(
+ tenant_id=tenant_id, subject_type="user", subject_id=str(user_id)
+ )
+ for relation in relationships:
+ resources.append(
+ {
+ "resource_type": relation.object_type,
+ "resource_id": relation.object_id,
+ "owner_id": user_id,
+ }
+ )
+ return resources
+
+ def _enrich_user_with_relationships(
+ self, *, tenant_id: int, user: dict[str, Any]
+ ) -> dict[str, Any]:
+ enriched = dict(user)
+ user_id = enriched.get("id")
+ if isinstance(user_id, int):
+ enriched["relations"] = [
+ relation.to_dict()
+ for relation in self._relationships.list_for_subject(
+ tenant_id=tenant_id, subject_type="user", subject_id=str(user_id)
+ )
+ ]
+ return enriched
diff --git a/keynetra/services/interfaces.py b/keynetra/services/interfaces.py
new file mode 100644
index 0000000..4872c0e
--- /dev/null
+++ b/keynetra/services/interfaces.py
@@ -0,0 +1,447 @@
+"""Service-layer contracts.
+
+Services orchestrate authorization flows against these interfaces. Concrete
+database, cache, and external integrations belong in infrastructure.
+"""
+
+from __future__ import annotations
+
+from dataclasses import dataclass, field
+from typing import Any, Protocol
+
+from keynetra.engine.keynetra_engine import (
+ AuthorizationDecision,
+ AuthorizationInput,
+ PolicyDefinition,
+)
+
+
+@dataclass(frozen=True)
+class TenantRecord:
+ """Tenant data needed by orchestration services."""
+
+ id: int
+ tenant_key: str
+ policy_version: int
+ revision: int = 1
+
+
+@dataclass(frozen=True)
+class RelationshipRecord:
+ """Explicit relationship edge supplied to the engine as input."""
+
+ subject_type: str
+ subject_id: str
+ relation: str
+ object_type: str
+ object_id: str
+
+ def to_dict(self) -> dict[str, str]:
+ return {
+ "subject_type": self.subject_type,
+ "subject_id": self.subject_id,
+ "relation": self.relation,
+ "object_type": self.object_type,
+ "object_id": self.object_id,
+ }
+
+
+@dataclass(frozen=True)
+class ACLRecord:
+ """Explicit resource ACL row supplied to the engine."""
+
+ id: int
+ tenant_id: int
+ subject_type: str
+ subject_id: str
+ resource_type: str
+ resource_id: str
+ action: str
+ effect: str
+ created_at: Any | None = None
+
+ def to_dict(self) -> dict[str, Any]:
+ return {
+ "id": self.id,
+ "tenant_id": self.tenant_id,
+ "subject_type": self.subject_type,
+ "subject_id": self.subject_id,
+ "resource_type": self.resource_type,
+ "resource_id": self.resource_id,
+ "action": self.action,
+ "effect": self.effect,
+ "created_at": (
+ self.created_at.isoformat()
+ if hasattr(self.created_at, "isoformat")
+ else self.created_at
+ ),
+ }
+
+
+@dataclass(frozen=True)
+class PolicyRecord:
+ """Policy data loaded from persistence for engine evaluation."""
+
+ id: int
+ definition: PolicyDefinition
+
+
+@dataclass(frozen=True)
+class PolicyMutationResult:
+ """Service-facing result for policy writes."""
+
+ id: int
+ action: str
+ effect: str
+ priority: int
+ conditions: dict[str, Any]
+
+
+@dataclass(frozen=True)
+class PolicyListItem:
+ """Projected policy data for API reads."""
+
+ id: int
+ action: str
+ effect: str
+ priority: int
+ conditions: dict[str, Any]
+
+
+@dataclass(frozen=True)
+class AuditListItem:
+ id: int
+ principal_type: str
+ principal_id: str
+ user: dict[str, Any]
+ action: str
+ resource: dict[str, Any]
+ decision: str
+ matched_policies: list[Any]
+ reason: str | None
+ evaluated_rules: list[Any]
+ failed_conditions: list[Any]
+ created_at: Any
+
+
+@dataclass(frozen=True)
+class AuthModelRecord:
+ """Stored authorization model for a tenant."""
+
+ id: int
+ tenant_id: int
+ schema_text: str
+ schema_json: dict[str, Any]
+ compiled_json: dict[str, Any]
+ created_at: Any | None = None
+ updated_at: Any | None = None
+
+
+@dataclass(frozen=True)
+class CachedDecision:
+ """Cached authorization response owned by infrastructure."""
+
+ allowed: bool
+ decision: str
+ reason: str | None
+ policy_id: str | None
+ matched_policies: list[str] = field(default_factory=list)
+ explain_trace: list[dict[str, Any]] = field(default_factory=list)
+ failed_conditions: list[str] = field(default_factory=list)
+
+ @classmethod
+ def from_decision(cls, decision: AuthorizationDecision) -> "CachedDecision":
+ return cls(
+ allowed=decision.allowed,
+ decision=decision.decision,
+ reason=decision.reason,
+ policy_id=decision.policy_id,
+ matched_policies=list(decision.matched_policies),
+ explain_trace=[step.to_dict() for step in decision.explain_trace],
+ failed_conditions=list(decision.failed_conditions),
+ )
+
+
+class TenantRepository(Protocol):
+ """Persistence boundary for tenant data."""
+
+ def get_or_create(self, tenant_key: str) -> TenantRecord: ...
+
+ def get_by_id(self, tenant_id: int) -> TenantRecord | None: ...
+
+ def bump_policy_version(self, tenant: TenantRecord) -> TenantRecord: ...
+
+ def bump_revision(self, tenant: TenantRecord) -> TenantRecord: ...
+
+
+class PolicyRepository(Protocol):
+ """Persistence boundary for policy storage."""
+
+ def list_current_policies(self, *, tenant_id: int) -> list[PolicyRecord]: ...
+
+ def list_current_policy_views(self, *, tenant_id: int) -> list[PolicyListItem]: ...
+
+ def list_current_policy_page(
+ self,
+ *,
+ tenant_id: int,
+ limit: int,
+ cursor: dict[str, Any] | None,
+ ) -> tuple[list[PolicyListItem], str | None]: ...
+
+ def create_policy_version(
+ self,
+ *,
+ tenant_id: int,
+ policy_key: str,
+ action: str,
+ effect: str,
+ priority: int,
+ conditions: dict[str, Any],
+ created_by: str | None,
+ ) -> PolicyMutationResult: ...
+
+ def rollback_policy(
+ self, *, tenant_id: int, policy_key: str, version: int
+ ) -> tuple[str, int]: ...
+
+ def delete_policy(self, *, tenant_id: int, policy_key: str) -> None: ...
+
+
+class AuthModelRepository(Protocol):
+ """Persistence boundary for authorization modeling schemas."""
+
+ def get_model(self, *, tenant_id: int) -> AuthModelRecord | None: ...
+
+ def upsert_model(
+ self,
+ *,
+ tenant_id: int,
+ schema_text: str,
+ schema_json: dict[str, Any],
+ compiled_json: dict[str, Any],
+ ) -> AuthModelRecord: ...
+
+
+class UserRepository(Protocol):
+ """Persistence boundary for user context lookup."""
+
+ def get_user_context(self, user_id: int) -> dict[str, Any] | None: ...
+
+ def list_user_ids(self, *, tenant_id: int) -> list[int]: ...
+
+
+class RelationshipRepository(Protocol):
+ """Persistence boundary for relationship lookup and writes."""
+
+ def list_for_subject(
+ self, *, tenant_id: int, subject_type: str, subject_id: str
+ ) -> list[RelationshipRecord]: ...
+
+ def list_for_subject_page(
+ self,
+ *,
+ tenant_id: int,
+ subject_type: str,
+ subject_id: str,
+ limit: int,
+ cursor: dict[str, Any] | None,
+ ) -> tuple[list[RelationshipRecord], str | None]: ...
+
+ def list_for_object(
+ self,
+ *,
+ tenant_id: int,
+ object_type: str,
+ object_id: str,
+ ) -> list[RelationshipRecord]: ...
+
+ def create(
+ self,
+ *,
+ tenant_id: int,
+ subject_type: str,
+ subject_id: str,
+ relation: str,
+ object_type: str,
+ object_id: str,
+ ) -> int: ...
+
+
+class AuditRepository(Protocol):
+ """Persistence boundary for audit writes."""
+
+ def write(
+ self,
+ *,
+ tenant_id: int,
+ principal_type: str,
+ principal_id: str,
+ authorization_input: AuthorizationInput,
+ decision: AuthorizationDecision,
+ ) -> None: ...
+
+ def list_page(
+ self,
+ *,
+ tenant_id: int,
+ limit: int,
+ cursor: dict[str, Any] | None,
+ user_id: str | None,
+ resource_id: str | None,
+ decision: str | None,
+ start_time: Any | None,
+ end_time: Any | None,
+ ) -> tuple[list[AuditListItem], str | None]: ...
+
+
+class PolicyCache(Protocol):
+ """Cache boundary for policy definitions."""
+
+ def get(self, tenant_key: str, policy_version: int) -> list[PolicyRecord] | None: ...
+
+ def set(self, tenant_key: str, policy_version: int, policies: list[PolicyRecord]) -> None: ...
+
+ def invalidate(self, tenant_key: str) -> None: ...
+
+
+class RelationshipCache(Protocol):
+ """Cache boundary for relationship lookups."""
+
+ def get(
+ self, *, tenant_id: int, subject_type: str, subject_id: str
+ ) -> list[RelationshipRecord] | None: ...
+
+ def set(
+ self,
+ *,
+ tenant_id: int,
+ subject_type: str,
+ subject_id: str,
+ relationships: list[RelationshipRecord],
+ ) -> None: ...
+
+
+class ACLRepository(Protocol):
+ """Persistence boundary for ACL lookup and writes."""
+
+ def create_acl_entry(
+ self,
+ *,
+ tenant_id: int,
+ subject_type: str,
+ subject_id: str,
+ resource_type: str,
+ resource_id: str,
+ action: str,
+ effect: str,
+ ) -> int: ...
+
+ def list_resource_acl(
+ self, *, tenant_id: int, resource_type: str, resource_id: str
+ ) -> list[ACLRecord]: ...
+
+ def get_acl_entry(self, *, tenant_id: int, acl_id: int) -> ACLRecord | None: ...
+
+ def find_matching_acl(
+ self,
+ *,
+ tenant_id: int,
+ resource_type: str,
+ resource_id: str,
+ action: str,
+ ) -> list[ACLRecord]: ...
+
+ def delete_acl_entry(self, *, tenant_id: int, acl_id: int) -> None: ...
+
+
+class ACLCache(Protocol):
+ """Cache boundary for ACL lookups."""
+
+ def get(
+ self, *, tenant_id: int, resource_type: str, resource_id: str, action: str
+ ) -> list[ACLRecord] | None: ...
+
+ def set(
+ self,
+ *,
+ tenant_id: int,
+ resource_type: str,
+ resource_id: str,
+ action: str,
+ acl_entries: list[ACLRecord],
+ ) -> None: ...
+
+ def invalidate(self, *, tenant_id: int, resource_type: str, resource_id: str) -> None: ...
+
+
+@dataclass(frozen=True)
+class AccessIndexEntry:
+ """Cached access index entry for resource/action lookup."""
+
+ resource_type: str
+ resource_id: str
+ action: str
+ allowed_subjects: tuple[str, ...]
+ source: str
+ subject_type: str | None = None
+ subject_id: str | None = None
+ effect: str | None = None
+ acl_id: int | None = None
+
+
+class AccessIndexCache(Protocol):
+ """Cache boundary for distributed access indexing."""
+
+ def get(
+ self, *, tenant_id: int, resource_type: str, resource_id: str, action: str
+ ) -> list[AccessIndexEntry] | None: ...
+
+ def set(
+ self,
+ *,
+ tenant_id: int,
+ resource_type: str,
+ resource_id: str,
+ action: str,
+ entries: list[AccessIndexEntry],
+ ) -> None: ...
+
+ def invalidate(self, *, tenant_id: int, resource_type: str, resource_id: str) -> None: ...
+
+ def invalidate_tenant(self, *, tenant_id: int) -> None: ...
+
+ def invalidate_global(self) -> None: ...
+
+
+class RoleBindingRepository(Protocol):
+ """Optional persistence boundary for role binding state changes."""
+
+ def list_user_ids(self, *, tenant_id: int) -> list[int]: ...
+
+ def invalidate(self, *, tenant_id: int, subject_type: str, subject_id: str) -> None: ...
+
+
+class DecisionCache(Protocol):
+ """Cache boundary for authorization decisions."""
+
+ def get(self, key: str) -> CachedDecision | None: ...
+
+ def set(self, key: str, value: CachedDecision, ttl_seconds: int) -> None: ...
+
+ def make_key(
+ self,
+ *,
+ tenant_key: str,
+ policy_version: int,
+ authorization_input: AuthorizationInput,
+ revision: int | None = None,
+ ) -> str: ...
+
+ def bump_namespace(self, tenant_key: str) -> int: ...
+
+
+class PolicyEventPublisher(Protocol):
+ """External system boundary for policy invalidation fanout."""
+
+ def publish_policy_update(self, *, tenant_key: str, policy_version: int) -> None: ...
diff --git a/keynetra/services/policies.py b/keynetra/services/policies.py
new file mode 100644
index 0000000..74c1798
--- /dev/null
+++ b/keynetra/services/policies.py
@@ -0,0 +1,143 @@
+"""Policy orchestration service."""
+
+from __future__ import annotations
+
+from keynetra.engine.compiled.decision_graph import COMPILED_POLICY_STORE
+from keynetra.engine.compiled.policy_compiler import compile_policy_graph
+from keynetra.engine.keynetra_engine import ConditionEvaluator
+from keynetra.services.interfaces import (
+ DecisionCache,
+ PolicyCache,
+ PolicyEventPublisher,
+ PolicyMutationResult,
+ PolicyRepository,
+ TenantRepository,
+)
+from keynetra.services.revisions import RevisionService
+
+
+class PolicyService:
+ """Orchestrates policy persistence and cache invalidation."""
+
+ def __init__(
+ self,
+ *,
+ tenants: TenantRepository,
+ policies: PolicyRepository,
+ policy_cache: PolicyCache,
+ decision_cache: DecisionCache,
+ publisher: PolicyEventPublisher,
+ ) -> None:
+ self._tenants = tenants
+ self._policies = policies
+ self._policy_cache = policy_cache
+ self._decision_cache = decision_cache
+ self._publisher = publisher
+ self._revisions = RevisionService(tenants)
+
+ def list_policies(self, *, tenant_key: str) -> list[dict[str, object]]:
+ tenant = self._tenants.get_or_create(tenant_key)
+ return [
+ item.__dict__ for item in self._policies.list_current_policy_views(tenant_id=tenant.id)
+ ]
+
+ def list_policies_page(
+ self,
+ *,
+ tenant_key: str,
+ limit: int,
+ cursor: dict[str, object] | None,
+ ) -> tuple[list[dict[str, object]], str | None]:
+ tenant = self._tenants.get_or_create(tenant_key)
+ items, next_cursor = self._policies.list_current_policy_page(
+ tenant_id=tenant.id, limit=limit, cursor=cursor
+ )
+ return [item.__dict__ for item in items], next_cursor
+
+ def create_policy(
+ self,
+ *,
+ tenant_key: str,
+ policy_key: str,
+ action: str,
+ effect: str,
+ priority: int,
+ conditions: dict[str, object],
+ created_by: str | None,
+ ) -> PolicyMutationResult:
+ tenant = self._tenants.get_or_create(tenant_key)
+ result = self._policies.create_policy_version(
+ tenant_id=tenant.id,
+ policy_key=policy_key,
+ action=action,
+ effect=effect,
+ priority=priority,
+ conditions=conditions,
+ created_by=created_by,
+ )
+ updated_tenant = self._tenants.bump_policy_version(tenant)
+ self._policy_cache.invalidate(updated_tenant.tenant_key)
+ self._decision_cache.bump_namespace(updated_tenant.tenant_key)
+ self._revisions.bump_revision(tenant_key=updated_tenant.tenant_key)
+ COMPILED_POLICY_STORE.invalidate(updated_tenant.tenant_key)
+ self._compile_and_store(
+ updated_tenant.id, updated_tenant.tenant_key, updated_tenant.policy_version
+ )
+ self._publisher.publish_policy_update(
+ tenant_key=updated_tenant.tenant_key,
+ policy_version=updated_tenant.policy_version,
+ )
+ return result
+
+ def rollback_policy(self, *, tenant_key: str, policy_key: str, version: int) -> tuple[str, int]:
+ tenant = self._tenants.get_or_create(tenant_key)
+ result = self._policies.rollback_policy(
+ tenant_id=tenant.id, policy_key=policy_key, version=version
+ )
+ updated_tenant = self._tenants.bump_policy_version(tenant)
+ self._policy_cache.invalidate(updated_tenant.tenant_key)
+ self._decision_cache.bump_namespace(updated_tenant.tenant_key)
+ self._revisions.bump_revision(tenant_key=updated_tenant.tenant_key)
+ COMPILED_POLICY_STORE.invalidate(updated_tenant.tenant_key)
+ self._compile_and_store(
+ updated_tenant.id, updated_tenant.tenant_key, updated_tenant.policy_version
+ )
+ self._publisher.publish_policy_update(
+ tenant_key=updated_tenant.tenant_key,
+ policy_version=updated_tenant.policy_version,
+ )
+ return result
+
+ def delete_policy(self, *, tenant_key: str, policy_key: str) -> None:
+ tenant = self._tenants.get_or_create(tenant_key)
+ self._policies.delete_policy(tenant_id=tenant.id, policy_key=policy_key)
+ updated_tenant = self._tenants.bump_policy_version(tenant)
+ self._policy_cache.invalidate(updated_tenant.tenant_key)
+ self._decision_cache.bump_namespace(updated_tenant.tenant_key)
+ self._revisions.bump_revision(tenant_key=updated_tenant.tenant_key)
+ COMPILED_POLICY_STORE.invalidate(updated_tenant.tenant_key)
+ self._compile_and_store(
+ updated_tenant.id, updated_tenant.tenant_key, updated_tenant.policy_version
+ )
+ self._publisher.publish_policy_update(
+ tenant_key=updated_tenant.tenant_key,
+ policy_version=updated_tenant.policy_version,
+ )
+
+ def _compile_and_store(self, tenant_id: int, tenant_key: str, policy_version: int) -> None:
+ policies = self._policies.list_current_policies(tenant_id=tenant_id)
+ graph = compile_policy_graph(
+ [
+ {
+ "action": policy.definition.action,
+ "effect": policy.definition.effect,
+ "priority": policy.definition.priority,
+ "conditions": policy.definition.conditions,
+ "policy_id": policy.definition.policy_id,
+ }
+ for policy in policies
+ ],
+ evaluator=ConditionEvaluator(),
+ tenant_key=tenant_key,
+ )
+ COMPILED_POLICY_STORE.set(tenant_key, policy_version, graph)
diff --git a/keynetra/services/policy_admin.py b/keynetra/services/policy_admin.py
new file mode 100644
index 0000000..59cdb89
--- /dev/null
+++ b/keynetra/services/policy_admin.py
@@ -0,0 +1,80 @@
+"""Deprecated compatibility wrapper.
+
+Policy orchestration now lives in ``keynetra.services.policies``.
+"""
+
+from __future__ import annotations
+
+from typing import Any
+
+from sqlalchemy.orm import Session
+
+from keynetra.config.settings import get_settings
+from keynetra.infrastructure.cache.decision_cache import build_decision_cache
+from keynetra.infrastructure.cache.policy_cache import build_policy_cache
+from keynetra.infrastructure.cache.policy_distribution import RedisPolicyEventPublisher
+from keynetra.infrastructure.repositories.policies import SqlPolicyRepository
+from keynetra.infrastructure.repositories.tenants import SqlTenantRepository
+from keynetra.services.policies import PolicyService
+
+
+class PolicyAdmin:
+ """Backward-compatible adapter around the new policy service."""
+
+ def create_policy_version(
+ self,
+ db: Session,
+ *,
+ tenant_id: int,
+ policy_key: str,
+ action: str,
+ effect: str,
+ priority: int,
+ conditions: dict[str, Any],
+ created_by: str | None,
+ ) -> Any:
+ settings = get_settings()
+ tenants = SqlTenantRepository(db)
+ tenant = tenants.get_by_id(tenant_id)
+ if tenant is None:
+ raise ValueError("tenant not found")
+ service = PolicyService(
+ tenants=tenants,
+ policies=SqlPolicyRepository(db),
+ policy_cache=build_policy_cache(None),
+ decision_cache=build_decision_cache(None),
+ publisher=RedisPolicyEventPublisher(settings),
+ )
+ return service.create_policy(
+ tenant_key=tenant.tenant_key,
+ policy_key=policy_key,
+ action=action,
+ effect=effect,
+ priority=priority,
+ conditions=conditions,
+ created_by=created_by,
+ )
+
+ def rollback_policy(self, db: Session, *, tenant_id: int, policy_key: str, version: int) -> Any:
+ settings = get_settings()
+ tenants = SqlTenantRepository(db)
+ tenant = tenants.get_by_id(tenant_id)
+ if tenant is None:
+ raise ValueError("tenant not found")
+ service = PolicyService(
+ tenants=tenants,
+ policies=SqlPolicyRepository(db),
+ policy_cache=build_policy_cache(None),
+ decision_cache=build_decision_cache(None),
+ publisher=RedisPolicyEventPublisher(settings),
+ )
+ policy_name, current_version = service.rollback_policy(
+ tenant_key=tenant.tenant_key,
+ policy_key=policy_key,
+ version=version,
+ )
+ return type(
+ "RollbackPolicyResult",
+ (),
+ {"policy_key": policy_name, "current_version": current_version},
+ )()
diff --git a/keynetra/services/policy_dsl.py b/keynetra/services/policy_dsl.py
new file mode 100644
index 0000000..67eccfb
--- /dev/null
+++ b/keynetra/services/policy_dsl.py
@@ -0,0 +1,61 @@
+from __future__ import annotations
+
+import json
+from typing import Any
+
+try:
+ import yaml
+except ModuleNotFoundError: # pragma: no cover - optional parser dependency
+ yaml = None # type: ignore[assignment]
+
+
+def dsl_to_policy(dsl_text: str) -> dict[str, Any]:
+ """
+ Minimal DSL example:
+ allow:
+ action: read
+ priority: 10
+ policy_key: read_rule
+ when:
+ role: admin
+ owner_only: true
+ """
+ if yaml is not None:
+ data = yaml.safe_load(dsl_text)
+ else:
+ # Allow JSON payloads as a subset fallback when PyYAML is unavailable.
+ data = json.loads(dsl_text)
+ if not isinstance(data, dict) or not data:
+ raise ValueError("invalid dsl")
+
+ if "allow" in data:
+ block = data["allow"]
+ effect = "allow"
+ elif "deny" in data:
+ block = data["deny"]
+ effect = "deny"
+ else:
+ raise ValueError("dsl must start with allow: or deny:")
+
+ if not isinstance(block, dict):
+ raise ValueError("invalid dsl block")
+
+ action = block.get("action")
+ if not isinstance(action, str) or not action:
+ raise ValueError("missing action")
+
+ when = block.get("when") or {}
+ if when is None:
+ when = {}
+ if not isinstance(when, dict):
+ raise ValueError("when must be an object")
+
+ priority = int(block.get("priority", 100))
+ policy_key = block.get("policy_key") or action
+
+ return {
+ "action": action,
+ "effect": effect,
+ "priority": priority,
+ "conditions": dict(when) | {"policy_key": str(policy_key)},
+ }
diff --git a/keynetra/services/policy_lint.py b/keynetra/services/policy_lint.py
new file mode 100644
index 0000000..8c73e07
--- /dev/null
+++ b/keynetra/services/policy_lint.py
@@ -0,0 +1,75 @@
+"""Policy linting heuristics for pre-flight warnings."""
+
+from __future__ import annotations
+
+import json
+from dataclasses import dataclass
+from typing import Any
+
+from sqlalchemy import select
+from sqlalchemy.orm import Session
+
+from keynetra.domain.models.rbac import Role
+from keynetra.services.interfaces import PolicyRepository
+
+
+@dataclass(frozen=True)
+class PolicyLintWarning:
+ message: str
+
+
+class PolicyLintService:
+ """Heuristics for unused roles and duplicate/conflicting rules."""
+
+ def __init__(self, *, session: Session, policies: PolicyRepository) -> None:
+ self._session = session
+ self._policies = policies
+
+ def lint(self, *, tenant_id: int) -> list[str]:
+ warnings: list[str] = []
+ role_names = {name for name in self._session.execute(select(Role.name)).scalars().all()}
+ policy_views = self._policies.list_current_policy_views(tenant_id=tenant_id)
+
+ self._collect_unused_role_warnings(role_names, policy_views, warnings)
+ self._collect_duplicate_warnings(policy_views, warnings)
+ return warnings
+
+ @staticmethod
+ def _serialize_conditions(conditions: dict[str, Any]) -> str:
+ clean = {k: v for k, v in conditions.items() if k != "policy_key"}
+ return json.dumps(clean, sort_keys=True)
+
+ def _collect_unused_role_warnings(
+ self,
+ role_names: set[str],
+ policy_views: list[Any],
+ warnings: list[str],
+ ) -> None:
+ referenced: set[str] = set()
+ for policy in policy_views:
+ role = policy.conditions.get("role")
+ if isinstance(role, str):
+ referenced.add(role)
+ for role in sorted(role_names - referenced):
+ warnings.append(f"role '{role}' is defined but never referenced in policies")
+
+ def _collect_duplicate_warnings(self, policy_views: list[Any], warnings: list[str]) -> None:
+ seen: dict[tuple[str, str], str] = {}
+ for policy in sorted(policy_views, key=lambda item: item.priority):
+ conditions = policy.conditions or {}
+ key = (policy.action, self._serialize_conditions(conditions))
+ previous_effect = seen.get(key)
+ effect = policy.effect
+ policy_key = conditions.get("policy_key")
+ desc = f"{policy_key or policy.action} (priority {policy.priority})"
+ if previous_effect:
+ if previous_effect == effect:
+ warnings.append(
+ f"policy {desc} is unreachable because a higher-priority policy has identical conditions"
+ )
+ else:
+ warnings.append(
+ f"policy {desc} conflicts: higher-priority policy with same conditions returns '{previous_effect}'"
+ )
+ else:
+ seen[key] = effect
diff --git a/keynetra/services/policy_simulator.py b/keynetra/services/policy_simulator.py
new file mode 100644
index 0000000..0510e7c
--- /dev/null
+++ b/keynetra/services/policy_simulator.py
@@ -0,0 +1,78 @@
+"""Policy and access simulation utilities."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Any
+
+from keynetra.engine.keynetra_engine import (
+ AuthorizationDecision,
+ KeyNetraEngine,
+)
+from keynetra.services.authorization import AuthorizationService
+from keynetra.services.interfaces import PolicyRepository, TenantRepository
+from keynetra.services.policy_dsl import dsl_to_policy
+
+
+@dataclass(frozen=True)
+class SimulationResult:
+ decision_before: AuthorizationDecision
+ decision_after: AuthorizationDecision
+
+
+class PolicySimulator:
+ def __init__(
+ self,
+ *,
+ tenants: TenantRepository,
+ policies: PolicyRepository,
+ authorization_service: AuthorizationService,
+ ) -> None:
+ self._tenants = tenants
+ self._policies = policies
+ self._authorization_service = authorization_service
+
+ def simulate_policy_change(
+ self,
+ *,
+ tenant_key: str,
+ user: dict[str, Any],
+ action: str,
+ resource: dict[str, Any],
+ context: dict[str, Any],
+ policy_change: str,
+ ) -> SimulationResult:
+ tenant = self._tenants.get_or_create(tenant_key)
+ authorization_input, _ = self._authorization_service._build_input(
+ tenant_key=tenant_key,
+ user=user,
+ action=action,
+ resource=resource,
+ context=context,
+ )
+ before = self._authorization_service.authorize(
+ tenant_key=tenant_key,
+ principal={"type": "simulator", "id": "simulator"},
+ user=user,
+ action=action,
+ resource=resource,
+ context=context,
+ audit=False,
+ ).decision
+
+ changed_policy = dsl_to_policy(policy_change)
+ current_policies = self._policies.list_current_policies(tenant_id=tenant.id)
+ engine = KeyNetraEngine(
+ [policy.definition for policy in current_policies]
+ + [
+ {
+ "action": changed_policy["action"],
+ "effect": changed_policy["effect"],
+ "priority": changed_policy["priority"],
+ "conditions": changed_policy["conditions"],
+ "policy_id": changed_policy["conditions"].get("policy_key"),
+ }
+ ]
+ )
+ after = engine.decide(authorization_input)
+ return SimulationResult(decision_before=before, decision_after=after)
diff --git a/keynetra/services/policy_store.py b/keynetra/services/policy_store.py
new file mode 100644
index 0000000..e5a38e7
--- /dev/null
+++ b/keynetra/services/policy_store.py
@@ -0,0 +1,9 @@
+"""Deprecated compatibility import.
+
+Database-backed policy storage now lives in
+``keynetra.infrastructure.repositories.policies``.
+"""
+
+from keynetra.infrastructure.repositories.policies import SqlPolicyRepository as PolicyStore
+
+__all__ = ["PolicyStore"]
diff --git a/keynetra/services/policy_testing.py b/keynetra/services/policy_testing.py
new file mode 100644
index 0000000..d46df2e
--- /dev/null
+++ b/keynetra/services/policy_testing.py
@@ -0,0 +1,183 @@
+"""Policy test parsing and execution.
+
+This module provides a deployment-time validation workflow similar to unit
+tests. It stays outside the API and engine boundaries: the engine only
+evaluates explicit inputs, while this service parses policy test fixtures and
+reports pass/fail results.
+"""
+
+from __future__ import annotations
+
+import json
+from dataclasses import dataclass, field
+from typing import Any
+
+from keynetra.engine.keynetra_engine import AuthorizationInput, KeyNetraEngine
+from keynetra.services.policy_dsl import dsl_to_policy
+
+try:
+ import yaml
+except ModuleNotFoundError: # pragma: no cover - optional parser dependency
+ yaml = None # type: ignore[assignment]
+
+
+@dataclass(frozen=True)
+class PolicyTestCase:
+ """One expected authorization outcome."""
+
+ name: str
+ authorization_input: AuthorizationInput
+ expect: str
+
+
+@dataclass(frozen=True)
+class PolicyTestSuite:
+ """Structured policies plus test cases."""
+
+ policies: list[dict[str, Any]]
+ tests: list[PolicyTestCase]
+
+
+@dataclass(frozen=True)
+class PolicyTestResult:
+ """Outcome for one executed policy test."""
+
+ name: str
+ passed: bool
+ expected: str
+ actual: str
+ reason: str | None
+ policy_id: str | None
+ explain_trace: list[dict[str, Any]] = field(default_factory=list)
+
+
+def parse_policy_test_suite(document: str) -> PolicyTestSuite:
+ """Parse a YAML or JSON policy test document."""
+
+ raw = _load_document(document)
+ if not isinstance(raw, dict):
+ raise ValueError("policy test file must be an object")
+
+ raw_policies = raw.get("policies")
+ raw_tests = raw.get("tests")
+ if not isinstance(raw_policies, list) or not raw_policies:
+ raise ValueError("policies must be a non-empty list")
+ if not isinstance(raw_tests, list) or not raw_tests:
+ raise ValueError("tests must be a non-empty list")
+
+ policies = [
+ _parse_policy_entry(entry, index=index) for index, entry in enumerate(raw_policies, start=1)
+ ]
+ tests = [_parse_test_case(entry, index=index) for index, entry in enumerate(raw_tests, start=1)]
+ return PolicyTestSuite(policies=policies, tests=tests)
+
+
+def run_policy_test_suite(suite: PolicyTestSuite) -> list[PolicyTestResult]:
+ """Execute all policy tests against the pure engine."""
+
+ engine = KeyNetraEngine(suite.policies, strategy="first_match")
+ results: list[PolicyTestResult] = []
+ for case in suite.tests:
+ decision = engine.decide(case.authorization_input)
+ results.append(
+ PolicyTestResult(
+ name=case.name,
+ passed=decision.decision == case.expect,
+ expected=case.expect,
+ actual=decision.decision,
+ reason=decision.reason,
+ policy_id=decision.policy_id,
+ explain_trace=[step.to_dict() for step in decision.explain_trace],
+ )
+ )
+ return results
+
+
+def validate_policy_test_suite(document: str) -> list[PolicyTestResult]:
+ """Parse and execute a suite, raising on malformed policies or tests."""
+
+ suite = parse_policy_test_suite(document)
+ return run_policy_test_suite(suite)
+
+
+def _load_document(document: str) -> Any:
+ if yaml is not None:
+ return yaml.safe_load(document)
+ return json.loads(document)
+
+
+def _parse_policy_entry(entry: Any, *, index: int) -> dict[str, Any]:
+ if isinstance(entry, dict) and ("allow" in entry or "deny" in entry):
+ parsed = dsl_to_policy(_dump_document(entry))
+ conditions = dict(parsed.get("conditions") or {})
+ policy_key = conditions.get("policy_key")
+ conditions.pop("policy_key", None)
+ parsed["policy_id"] = str(policy_key) if isinstance(policy_key, str) else f"policy-{index}"
+ parsed["conditions"] = conditions
+ return parsed
+ if not isinstance(entry, dict):
+ raise ValueError(f"policy #{index} must be an object")
+ action = entry.get("action")
+ effect = entry.get("effect")
+ if not isinstance(action, str) or not action:
+ raise ValueError(f"policy #{index} is missing action")
+ if effect not in {"allow", "deny"}:
+ raise ValueError(f"policy #{index} effect must be allow or deny")
+ priority = int(entry.get("priority", 100))
+ conditions = entry.get("conditions") or {}
+ if not isinstance(conditions, dict):
+ raise ValueError(f"policy #{index} conditions must be an object")
+ policy_id = entry.get("policy_id")
+ if policy_id is not None and not isinstance(policy_id, str):
+ raise ValueError(f"policy #{index} policy_id must be a string")
+ return {
+ "action": action,
+ "effect": effect,
+ "priority": priority,
+ "conditions": dict(conditions),
+ "policy_id": policy_id or f"policy-{index}",
+ }
+
+
+def _parse_test_case(entry: Any, *, index: int) -> PolicyTestCase:
+ if not isinstance(entry, dict):
+ raise ValueError(f"test #{index} must be an object")
+ name = entry.get("name")
+ expect = entry.get("expect")
+ raw_input = entry.get("input")
+ if not isinstance(name, str) or not name:
+ raise ValueError(f"test #{index} is missing name")
+ if expect not in {"allow", "deny"}:
+ raise ValueError(f"test '{name}' expect must be allow or deny")
+ if not isinstance(raw_input, dict):
+ raise ValueError(f"test '{name}' input must be an object")
+
+ user = raw_input.get("user") or {}
+ resource = raw_input.get("resource") or {}
+ action = raw_input.get("action")
+ context = raw_input.get("context") or {}
+ if not isinstance(user, dict):
+ raise ValueError(f"test '{name}' user must be an object")
+ if not isinstance(resource, dict):
+ raise ValueError(f"test '{name}' resource must be an object")
+ if not isinstance(context, dict):
+ raise ValueError(f"test '{name}' context must be an object")
+ if not isinstance(action, str) or not action:
+ raise ValueError(f"test '{name}' is missing action")
+
+ return PolicyTestCase(
+ name=name,
+ authorization_input=AuthorizationInput(
+ user=dict(user),
+ resource=dict(resource),
+ action=action,
+ context=dict(context),
+ ),
+ expect=expect,
+ )
+
+
+def _dump_document(value: dict[str, Any]) -> str:
+ if yaml is not None:
+ return yaml.safe_dump(value, sort_keys=False)
+ return json.dumps(value)
diff --git a/keynetra/services/relationship_store.py b/keynetra/services/relationship_store.py
new file mode 100644
index 0000000..4929bdc
--- /dev/null
+++ b/keynetra/services/relationship_store.py
@@ -0,0 +1,11 @@
+"""Deprecated compatibility import.
+
+Database-backed relationship storage now lives in
+``keynetra.infrastructure.repositories.relationships``.
+"""
+
+from keynetra.infrastructure.repositories.relationships import (
+ SqlRelationshipRepository as RelationshipStore,
+)
+
+__all__ = ["RelationshipStore"]
diff --git a/keynetra/services/relationships.py b/keynetra/services/relationships.py
new file mode 100644
index 0000000..539032a
--- /dev/null
+++ b/keynetra/services/relationships.py
@@ -0,0 +1,101 @@
+"""Relationship orchestration service."""
+
+from __future__ import annotations
+
+from keynetra.services.interfaces import (
+ AccessIndexCache,
+ DecisionCache,
+ RelationshipCache,
+ RelationshipRepository,
+ TenantRepository,
+)
+from keynetra.services.revisions import RevisionService
+
+
+class RelationshipService:
+ """Orchestrates relationship reads, writes, and invalidation."""
+
+ def __init__(
+ self,
+ *,
+ tenants: TenantRepository,
+ relationships: RelationshipRepository,
+ relationship_cache: RelationshipCache,
+ decision_cache: DecisionCache,
+ access_index_cache: AccessIndexCache | None = None,
+ ) -> None:
+ self._tenants = tenants
+ self._relationships = relationships
+ self._relationship_cache = relationship_cache
+ self._decision_cache = decision_cache
+ self._access_index_cache = access_index_cache
+ self._revisions = RevisionService(tenants)
+
+ def list_relationships(
+ self, *, tenant_key: str, subject_type: str, subject_id: str
+ ) -> list[dict[str, str]]:
+ tenant = self._tenants.get_or_create(tenant_key)
+ cached = self._relationship_cache.get(
+ tenant_id=tenant.id, subject_type=subject_type, subject_id=subject_id
+ )
+ relationships = cached
+ if relationships is None:
+ relationships = self._relationships.list_for_subject(
+ tenant_id=tenant.id,
+ subject_type=subject_type,
+ subject_id=subject_id,
+ )
+ self._relationship_cache.set(
+ tenant_id=tenant.id,
+ subject_type=subject_type,
+ subject_id=subject_id,
+ relationships=relationships,
+ )
+ return [relationship.to_dict() for relationship in relationships]
+
+ def list_relationships_page(
+ self,
+ *,
+ tenant_key: str,
+ subject_type: str,
+ subject_id: str,
+ limit: int,
+ cursor: dict[str, object] | None,
+ ) -> tuple[list[dict[str, str]], str | None]:
+ tenant = self._tenants.get_or_create(tenant_key)
+ relationships, next_cursor = self._relationships.list_for_subject_page(
+ tenant_id=tenant.id,
+ subject_type=subject_type,
+ subject_id=subject_id,
+ limit=limit,
+ cursor=cursor,
+ )
+ return [relationship.to_dict() for relationship in relationships], next_cursor
+
+ def create_relationship(
+ self,
+ *,
+ tenant_key: str,
+ subject_type: str,
+ subject_id: str,
+ relation: str,
+ object_type: str,
+ object_id: str,
+ ) -> int:
+ tenant = self._tenants.get_or_create(tenant_key)
+ row_id = self._relationships.create(
+ tenant_id=tenant.id,
+ subject_type=subject_type,
+ subject_id=subject_id,
+ relation=relation,
+ object_type=object_type,
+ object_id=object_id,
+ )
+ self._relationship_cache.invalidate(
+ tenant_id=tenant.id, subject_type=subject_type, subject_id=subject_id
+ )
+ if self._access_index_cache is not None:
+ self._access_index_cache.invalidate_tenant(tenant_id=tenant.id)
+ self._decision_cache.bump_namespace(tenant.tenant_key)
+ self._revisions.bump_revision(tenant_key=tenant.tenant_key)
+ return row_id
diff --git a/keynetra/services/resilience.py b/keynetra/services/resilience.py
new file mode 100644
index 0000000..df94e4b
--- /dev/null
+++ b/keynetra/services/resilience.py
@@ -0,0 +1,38 @@
+"""Shared resilience helpers for service orchestration."""
+
+from __future__ import annotations
+
+import time
+from collections.abc import Callable
+from concurrent.futures import ThreadPoolExecutor
+from concurrent.futures import TimeoutError as FutureTimeoutError
+from typing import TypeVar
+
+ResultT = TypeVar("ResultT")
+
+_EXECUTOR = ThreadPoolExecutor(max_workers=4)
+
+
+def with_timeout(func: Callable[[], ResultT], *, timeout_seconds: float) -> ResultT:
+ future = _EXECUTOR.submit(func)
+ try:
+ return future.result(timeout=timeout_seconds)
+ except FutureTimeoutError as exc:
+ future.cancel()
+ raise TimeoutError(f"operation timed out after {timeout_seconds} seconds") from exc
+
+
+def retry(
+ func: Callable[[], ResultT], *, attempts: int, base_delay_seconds: float = 0.05
+) -> ResultT:
+ last_error: Exception | None = None
+ for attempt in range(1, max(1, attempts) + 1):
+ try:
+ return func()
+ except Exception as exc: # noqa: PERF203
+ last_error = exc
+ if attempt >= max(1, attempts):
+ break
+ time.sleep(base_delay_seconds * (2 ** (attempt - 1)))
+ assert last_error is not None
+ raise last_error
diff --git a/keynetra/services/revisions.py b/keynetra/services/revisions.py
new file mode 100644
index 0000000..fcdb97d
--- /dev/null
+++ b/keynetra/services/revisions.py
@@ -0,0 +1,28 @@
+"""Authorization consistency revisions."""
+
+from __future__ import annotations
+
+from keynetra.observability.metrics import record_revision_update
+from keynetra.services.interfaces import TenantRepository
+
+
+class RevisionService:
+ """Monotonic revision counter helper."""
+
+ def __init__(self, tenants: TenantRepository) -> None:
+ self._tenants = tenants
+
+ def get_revision(self, *, tenant_key: str) -> int:
+ tenant = self._tenants.get_or_create(tenant_key)
+ return int(getattr(tenant, "revision", 1))
+
+ def bump_revision(self, *, tenant_key: str) -> int:
+ tenant = self._tenants.get_or_create(tenant_key)
+ bump = getattr(self._tenants, "bump_revision", None)
+ if callable(bump):
+ updated = bump(tenant)
+ revision = int(getattr(updated, "revision", 1))
+ if revision != int(getattr(tenant, "revision", 1)):
+ record_revision_update(tenant=tenant_key)
+ return revision
+ return int(getattr(tenant, "revision", 1))
diff --git a/keynetra/services/seeding.py b/keynetra/services/seeding.py
new file mode 100644
index 0000000..53c5fbd
--- /dev/null
+++ b/keynetra/services/seeding.py
@@ -0,0 +1,220 @@
+"""Seed deterministic demo data for local development."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+
+from sqlalchemy import delete, select
+from sqlalchemy.orm import Session
+
+from keynetra.config.sample_data import (
+ SAMPLE_PERMISSIONS,
+ SAMPLE_POLICY_DEFINITIONS,
+ SAMPLE_RELATIONSHIPS,
+ SAMPLE_ROLE,
+ SAMPLE_TENANT_KEY,
+ SAMPLE_USER,
+)
+from keynetra.domain.models.policy_versioning import Policy, PolicyVersion
+from keynetra.domain.models.rbac import Permission, Role, User, role_permissions, user_roles
+from keynetra.domain.models.relationship import Relationship
+from keynetra.domain.models.tenant import Tenant
+
+
+@dataclass(frozen=True)
+class SeedSummary:
+ tenant_key: str
+ created_tenant: bool
+ created_user: bool
+ created_role: bool
+ created_permissions: int
+ created_relationships: int
+ created_policies: int
+
+
+def seed_demo_data(
+ db: Session, *, tenant_key: str = SAMPLE_TENANT_KEY, reset: bool = False
+) -> SeedSummary:
+ """Insert deterministic sample tenant data for local development and smoke tests.
+
+ The function is idempotent so it can be run repeatedly in local and CI
+ environments without duplicating rows. Pass ``reset=True`` to clear the
+ sample dataset for the target tenant before recreating it.
+ """
+
+ created_permissions = 0
+ created_relationships = 0
+ created_policies = 0
+
+ if reset:
+ _clear_sample_data(db, tenant_key=tenant_key)
+
+ tenant = db.execute(select(Tenant).where(Tenant.tenant_key == tenant_key)).scalars().first()
+ created_tenant = tenant is None
+ if tenant is None:
+ tenant = Tenant(tenant_key=tenant_key, policy_version=1)
+ db.add(tenant)
+ db.flush()
+
+ role = db.execute(select(Role).where(Role.name == SAMPLE_ROLE["name"])).scalars().first()
+ created_role = role is None
+ if role is None:
+ role = Role(name=str(SAMPLE_ROLE["name"]))
+ db.add(role)
+ db.flush()
+
+ for permission_data in SAMPLE_PERMISSIONS:
+ action = str(permission_data["action"])
+ permission = (
+ db.execute(select(Permission).where(Permission.action == action)).scalars().first()
+ )
+ if permission is None:
+ permission = Permission(action=action)
+ db.add(permission)
+ db.flush()
+ created_permissions += 1
+ if permission not in role.permissions:
+ role.permissions.append(permission)
+
+ user = db.execute(select(User).where(User.id == int(SAMPLE_USER["id"]))).scalars().first()
+ created_user = user is None
+ if user is None:
+ user = User(id=int(SAMPLE_USER["id"]), external_id=str(SAMPLE_USER["external_id"]))
+ db.add(user)
+ db.flush()
+ if role not in user.roles:
+ user.roles.append(role)
+
+ relationship = (
+ db.execute(
+ select(Relationship)
+ .where(Relationship.tenant_id == tenant.id)
+ .where(Relationship.subject_type == SAMPLE_RELATIONSHIPS[0]["subject_type"])
+ .where(Relationship.subject_id == SAMPLE_RELATIONSHIPS[0]["subject_id"])
+ .where(Relationship.relation == SAMPLE_RELATIONSHIPS[0]["relation"])
+ .where(Relationship.object_type == SAMPLE_RELATIONSHIPS[0]["object_type"])
+ .where(Relationship.object_id == SAMPLE_RELATIONSHIPS[0]["object_id"])
+ )
+ .scalars()
+ .first()
+ )
+ if relationship is None:
+ db.add(
+ Relationship(
+ tenant_id=tenant.id,
+ subject_type=str(SAMPLE_RELATIONSHIPS[0]["subject_type"]),
+ subject_id=str(SAMPLE_RELATIONSHIPS[0]["subject_id"]),
+ relation=str(SAMPLE_RELATIONSHIPS[0]["relation"]),
+ object_type=str(SAMPLE_RELATIONSHIPS[0]["object_type"]),
+ object_id=str(SAMPLE_RELATIONSHIPS[0]["object_id"]),
+ )
+ )
+ created_relationships += 1
+
+ for policy in SAMPLE_POLICY_DEFINITIONS:
+ created_policies += _ensure_policy(
+ db,
+ tenant_id=tenant.id,
+ policy_key=str(policy["policy_key"]),
+ action=str(policy["action"]),
+ effect=str(policy["effect"]),
+ priority=int(policy["priority"]),
+ conditions=dict(policy["conditions"]),
+ )
+
+ db.commit()
+ return SeedSummary(
+ tenant_key=tenant.tenant_key,
+ created_tenant=created_tenant,
+ created_user=created_user,
+ created_role=created_role,
+ created_permissions=created_permissions,
+ created_relationships=created_relationships,
+ created_policies=created_policies,
+ )
+
+
+def _clear_sample_data(db: Session, *, tenant_key: str) -> None:
+ tenant = db.execute(select(Tenant).where(Tenant.tenant_key == tenant_key)).scalars().first()
+ if tenant is None:
+ return
+
+ role = db.execute(select(Role).where(Role.name == SAMPLE_ROLE["name"])).scalars().first()
+ user = db.execute(select(User).where(User.id == int(SAMPLE_USER["id"]))).scalars().first()
+ permissions = (
+ db.execute(
+ select(Permission).where(
+ Permission.action.in_([item["action"] for item in SAMPLE_PERMISSIONS])
+ )
+ )
+ .scalars()
+ .all()
+ )
+
+ if user is not None:
+ db.execute(delete(user_roles).where(user_roles.c.user_id == user.id))
+
+ if role is not None:
+ db.execute(delete(role_permissions).where(role_permissions.c.role_id == role.id))
+
+ if permissions:
+ db.execute(
+ delete(role_permissions).where(
+ role_permissions.c.permission_id.in_([permission.id for permission in permissions])
+ )
+ )
+
+ db.execute(delete(PolicyVersion).where(PolicyVersion.tenant_id == tenant.id))
+ db.execute(delete(Policy).where(Policy.tenant_id == tenant.id))
+ db.execute(delete(Relationship).where(Relationship.tenant_id == tenant.id))
+ if role is not None:
+ db.execute(delete(Role).where(Role.id == role.id))
+ if permissions:
+ db.execute(
+ delete(Permission).where(
+ Permission.id.in_([permission.id for permission in permissions])
+ )
+ )
+ if user is not None:
+ db.execute(delete(User).where(User.id == user.id))
+ db.execute(delete(Tenant).where(Tenant.id == tenant.id))
+
+
+def _ensure_policy(
+ db: Session,
+ *,
+ tenant_id: int,
+ policy_key: str,
+ action: str,
+ effect: str,
+ priority: int,
+ conditions: dict[str, object],
+) -> int:
+ policy = (
+ db.execute(
+ select(Policy)
+ .where(Policy.tenant_id == tenant_id)
+ .where(Policy.policy_key == policy_key)
+ )
+ .scalars()
+ .first()
+ )
+ if policy is not None:
+ return 0
+
+ policy = Policy(tenant_id=tenant_id, policy_key=policy_key, current_version=1)
+ db.add(policy)
+ db.flush()
+ db.add(
+ PolicyVersion(
+ tenant_id=tenant_id,
+ policy_id=policy.id,
+ version=1,
+ action=action,
+ effect=effect,
+ priority=priority,
+ conditions=conditions,
+ created_by="seed-data",
+ )
+ )
+ return 1
diff --git a/keynetra/services/tenant_store.py b/keynetra/services/tenant_store.py
new file mode 100644
index 0000000..be528f7
--- /dev/null
+++ b/keynetra/services/tenant_store.py
@@ -0,0 +1,9 @@
+"""Deprecated compatibility import.
+
+Database-backed tenant storage now lives in
+``keynetra.infrastructure.repositories.tenants``.
+"""
+
+from keynetra.infrastructure.repositories.tenants import SqlTenantRepository as TenantStore
+
+__all__ = ["TenantStore"]
diff --git a/keynetra/services/user_store.py b/keynetra/services/user_store.py
new file mode 100644
index 0000000..693adce
--- /dev/null
+++ b/keynetra/services/user_store.py
@@ -0,0 +1,9 @@
+"""Deprecated compatibility import.
+
+Database-backed user lookup now lives in
+``keynetra.infrastructure.repositories.users``.
+"""
+
+from keynetra.infrastructure.repositories.users import SqlUserRepository as UserStore
+
+__all__ = ["UserStore"]
diff --git a/keynetra/version.py b/keynetra/version.py
new file mode 100644
index 0000000..fdc192d
--- /dev/null
+++ b/keynetra/version.py
@@ -0,0 +1,4 @@
+version = "0.1.0"
+__version__ = version
+
+__all__ = ["version", "__version__"]
diff --git a/locustfile.py b/locustfile.py
new file mode 100644
index 0000000..2636506
--- /dev/null
+++ b/locustfile.py
@@ -0,0 +1,22 @@
+from __future__ import annotations
+
+from locust import HttpUser, between, task
+
+
+class KeyNetraUser(HttpUser):
+ wait_time = between(0.0, 0.1)
+
+ def on_start(self) -> None:
+ self.headers = {"X-API-Key": "devkey"}
+
+ @task
+ def check_access(self) -> None:
+ self.client.post(
+ "/check-access",
+ json={
+ "user": {"id": 1, "permissions": []},
+ "action": "approve_payment",
+ "resource": {"amount": 10},
+ },
+ headers=self.headers,
+ )
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000..13d6a2b
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,79 @@
+[build-system]
+requires = ["setuptools>=68", "wheel"]
+build-backend = "setuptools.build_meta"
+
+[project]
+name = "keynetra"
+version = "0.1.0"
+requires-python = ">=3.11"
+dependencies = []
+
+[project.scripts]
+keynetra = "keynetra.cli:main"
+
+[tool.setuptools]
+include-package-data = true
+
+[tool.setuptools.packages.find]
+include = ["keynetra*"]
+
+[tool.black]
+line-length = 100
+target-version = ["py311"]
+
+[tool.isort]
+profile = "black"
+line_length = 100
+py_version = 311
+
+[tool.ruff]
+line-length = 100
+target-version = "py311"
+src = ["keynetra", "tests"]
+
+[tool.ruff.lint]
+select = ["E4", "E7", "E9", "F"]
+
+[tool.pytest.ini_options]
+testpaths = ["tests"]
+addopts = "-q"
+
+[tool.coverage.run]
+branch = false
+source = ["keynetra"]
+omit = [
+ "keynetra/api/router.py",
+ "keynetra/engine/model_graph/graph_executor.py",
+ "keynetra/infrastructure/cache/user_cache.py",
+ "keynetra/infrastructure/repositories/users.py",
+ "keynetra/config/security.py",
+ "keynetra/services/access_indexer.py",
+ "keynetra/services/audit.py",
+ "keynetra/services/policy_admin.py",
+ "keynetra/services/policy_store.py",
+ "keynetra/services/relationship_store.py",
+ "keynetra/services/seeding.py",
+ "keynetra/services/tenant_store.py",
+ "keynetra/services/user_store.py",
+ "keynetra/modeling/model_validator.py",
+]
+
+[tool.coverage.report]
+fail_under = 90
+skip_empty = true
+omit = [
+ "keynetra/api/router.py",
+ "keynetra/engine/model_graph/graph_executor.py",
+ "keynetra/infrastructure/cache/user_cache.py",
+ "keynetra/infrastructure/repositories/users.py",
+ "keynetra/config/security.py",
+ "keynetra/services/access_indexer.py",
+ "keynetra/services/audit.py",
+ "keynetra/services/policy_admin.py",
+ "keynetra/services/policy_store.py",
+ "keynetra/services/relationship_store.py",
+ "keynetra/services/seeding.py",
+ "keynetra/services/tenant_store.py",
+ "keynetra/services/user_store.py",
+ "keynetra/modeling/model_validator.py",
+]
diff --git a/requirements-dev.txt b/requirements-dev.txt
new file mode 100644
index 0000000..f60adbd
--- /dev/null
+++ b/requirements-dev.txt
@@ -0,0 +1,11 @@
+-r requirements.txt
+
+black>=24.4
+build>=1.2
+coverage[toml]>=7.6
+isort>=5.13
+pytest-cov>=5.0
+pytest>=8.2
+locust>=2.31
+ruff>=0.6
+streamlit>=1.36
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..e37a9da
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,22 @@
+fastapi>=0.115
+uvicorn[standard]>=0.30
+gunicorn>=22.0
+pydantic>=2.7
+pydantic-settings>=2.3
+python-jose[cryptography]>=3.3
+PyYAML>=6.0
+typer>=0.12
+rich>=13.7
+pyfiglet>=1.0.2
+prometheus-client>=0.21
+prometheus-fastapi-instrumentator>=7.0
+opentelemetry-api>=1.25
+opentelemetry-sdk>=1.25
+opentelemetry-instrumentation-fastapi>=0.46b0
+
+sqlalchemy>=2.0
+alembic>=1.13
+psycopg[binary]>=3.1
+redis>=5.0
+
+httpx>=0.27
diff --git a/tests/test_access_index.py b/tests/test_access_index.py
new file mode 100644
index 0000000..7658699
--- /dev/null
+++ b/tests/test_access_index.py
@@ -0,0 +1,99 @@
+from __future__ import annotations
+
+from typing import Any
+
+from keynetra.infrastructure.cache.access_index_cache import RedisBackedAccessIndexCache
+from keynetra.infrastructure.cache.acl_cache import RedisBackedACLCache
+from keynetra.infrastructure.cache.backends import InMemoryCacheBackend
+from keynetra.services.access_indexer import AccessIndexer
+from keynetra.services.interfaces import ACLRecord, RelationshipRecord
+
+
+class FakeACLRepository:
+ def __init__(self) -> None:
+ self.match_calls = 0
+
+ def create_acl_entry(self, **_: Any) -> int:
+ return 1
+
+ def list_resource_acl(self, *, tenant_id: int, resource_type: str, resource_id: str):
+ return []
+
+ def get_acl_entry(self, *, tenant_id: int, acl_id: int):
+ return None
+
+ def find_matching_acl(
+ self, *, tenant_id: int, resource_type: str, resource_id: str, action: str
+ ):
+ self.match_calls += 1
+ return [
+ ACLRecord(
+ id=1,
+ tenant_id=tenant_id,
+ subject_type="user",
+ subject_id="7",
+ resource_type=resource_type,
+ resource_id=resource_id,
+ action=action,
+ effect="allow",
+ )
+ ]
+
+ def delete_acl_entry(self, *, tenant_id: int, acl_id: int) -> None:
+ return None
+
+
+class FakeRelationshipRepository:
+ def __init__(self) -> None:
+ self.object_calls = 0
+
+ def list_for_subject(self, *, tenant_id: int, subject_type: str, subject_id: str):
+ return []
+
+ def list_for_subject_page(self, **_: Any):
+ return [], None
+
+ def list_for_object(self, *, tenant_id: int, object_type: str, object_id: str):
+ self.object_calls += 1
+ return [
+ RelationshipRecord(
+ subject_type="user",
+ subject_id="7",
+ relation="viewer_of",
+ object_type=object_type,
+ object_id=object_id,
+ )
+ ]
+
+ def create(self, **_: Any) -> int:
+ return 1
+
+
+def test_access_index_cache_builds_and_hits() -> None:
+ backend = InMemoryCacheBackend()
+ acl_repo = FakeACLRepository()
+ relationship_repo = FakeRelationshipRepository()
+ indexer = AccessIndexer(
+ acl_repository=acl_repo,
+ acl_cache=RedisBackedACLCache(backend),
+ access_index_cache=RedisBackedAccessIndexCache(backend),
+ relationships=relationship_repo,
+ )
+
+ first = indexer.build_resource_index(
+ tenant_id=1,
+ resource_type="doc",
+ resource_id="doc123",
+ action="read",
+ )
+ second = indexer.build_resource_index(
+ tenant_id=1,
+ resource_type="doc",
+ resource_id="doc123",
+ action="read",
+ )
+
+ assert len(first) == 2
+ assert second == first
+ assert acl_repo.match_calls == 1
+ assert relationship_repo.object_calls == 1
diff --git a/tests/test_acl.py b/tests/test_acl.py
new file mode 100644
index 0000000..9b94e01
--- /dev/null
+++ b/tests/test_acl.py
@@ -0,0 +1,408 @@
+from __future__ import annotations
+
+from typing import Any
+
+from keynetra.config.settings import Settings
+from keynetra.engine.keynetra_engine import PolicyDefinition
+from keynetra.infrastructure.cache.access_index_cache import RedisBackedAccessIndexCache
+from keynetra.infrastructure.cache.acl_cache import RedisBackedACLCache
+from keynetra.infrastructure.cache.backends import InMemoryCacheBackend
+from keynetra.infrastructure.cache.decision_cache import RedisBackedDecisionCache
+from keynetra.infrastructure.cache.policy_cache import RedisBackedPolicyCache
+from keynetra.infrastructure.cache.relationship_cache import RedisBackedRelationshipCache
+from keynetra.services.authorization import AuthorizationService
+from keynetra.services.interfaces import ACLRecord, PolicyRecord, RelationshipRecord, TenantRecord
+
+
+class FakeTenantRepository:
+ def __init__(self) -> None:
+ self._tenant = TenantRecord(id=1, tenant_key="default", policy_version=1)
+
+ def get_or_create(self, tenant_key: str) -> TenantRecord:
+ return self._tenant
+
+ def get_by_id(self, tenant_id: int) -> TenantRecord | None:
+ return self._tenant if tenant_id == self._tenant.id else None
+
+ def bump_policy_version(self, tenant: TenantRecord) -> TenantRecord:
+ self._tenant = TenantRecord(
+ id=tenant.id, tenant_key=tenant.tenant_key, policy_version=tenant.policy_version + 1
+ )
+ return self._tenant
+
+
+class FakeUserRepository:
+ def __init__(
+ self, *, roles: list[str] | None = None, permissions: list[str] | None = None
+ ) -> None:
+ self.roles = roles or ["manager"]
+ self.permissions = permissions or []
+
+ def get_user_context(self, user_id: int) -> dict[str, Any] | None:
+ return {"id": user_id, "roles": list(self.roles), "permissions": list(self.permissions)}
+
+
+class FakePolicyRepository:
+ def __init__(self, policies: list[PolicyRecord]) -> None:
+ self._policies = policies
+
+ def list_current_policies(self, *, tenant_id: int) -> list[PolicyRecord]:
+ return list(self._policies)
+
+ def list_current_policy_views(self, *, tenant_id: int) -> list[Any]:
+ return []
+
+ def create_policy_version(self, **_: Any) -> Any:
+ raise NotImplementedError
+
+ def rollback_policy(self, *, tenant_id: int, policy_key: str, version: int) -> tuple[str, int]:
+ return policy_key, version
+
+ def delete_policy(self, *, tenant_id: int, policy_key: str) -> None:
+ return None
+
+
+class FakeACLRepository:
+ def __init__(self, entries: list[ACLRecord]) -> None:
+ self.entries = entries
+ self.match_calls = 0
+
+ def create_acl_entry(self, **_: Any) -> int:
+ raise NotImplementedError
+
+ def list_resource_acl(
+ self, *, tenant_id: int, resource_type: str, resource_id: str
+ ) -> list[ACLRecord]:
+ return [
+ entry
+ for entry in self.entries
+ if entry.resource_type == resource_type and entry.resource_id == resource_id
+ ]
+
+ def get_acl_entry(self, *, tenant_id: int, acl_id: int) -> ACLRecord | None:
+ return next((entry for entry in self.entries if entry.id == acl_id), None)
+
+ def find_matching_acl(
+ self, *, tenant_id: int, resource_type: str, resource_id: str, action: str
+ ) -> list[ACLRecord]:
+ self.match_calls += 1
+ return [
+ entry
+ for entry in self.entries
+ if entry.resource_type == resource_type
+ and entry.resource_id == resource_id
+ and entry.action == action
+ ]
+
+ def delete_acl_entry(self, *, tenant_id: int, acl_id: int) -> None:
+ self.entries = [entry for entry in self.entries if entry.id != acl_id]
+
+
+class FakeRelationshipRepository:
+ def __init__(self, relations_by_object: list[RelationshipRecord] | None = None) -> None:
+ self.relations_by_object = relations_by_object or []
+ self.subject_calls = 0
+ self.object_calls = 0
+
+ def list_for_subject(
+ self, *, tenant_id: int, subject_type: str, subject_id: str
+ ) -> list[RelationshipRecord]:
+ self.subject_calls += 1
+ return []
+
+ def list_for_subject_page(
+ self,
+ *,
+ tenant_id: int,
+ subject_type: str,
+ subject_id: str,
+ limit: int,
+ cursor: dict[str, Any] | None,
+ ) -> tuple[list[RelationshipRecord], str | None]:
+ return [], None
+
+ def list_for_object(
+ self, *, tenant_id: int, object_type: str, object_id: str
+ ) -> list[RelationshipRecord]:
+ self.object_calls += 1
+ return [
+ row
+ for row in self.relations_by_object
+ if row.object_type == object_type and row.object_id == object_id
+ ]
+
+ def create(
+ self,
+ *,
+ tenant_id: int,
+ subject_type: str,
+ subject_id: str,
+ relation: str,
+ object_type: str,
+ object_id: str,
+ ) -> int:
+ return 1
+
+
+class FakeAuditRepository:
+ def write(self, **_: Any) -> None:
+ return None
+
+
+def _service(
+ *,
+ policies: list[PolicyRecord],
+ acl_entries: list[ACLRecord],
+ relations: list[RelationshipRecord] | None = None,
+ permissions: list[str] | None = None,
+) -> tuple[AuthorizationService, FakeRelationshipRepository, FakeACLRepository]:
+ backend = InMemoryCacheBackend()
+ relationship_repo = FakeRelationshipRepository(relations)
+ acl_repo = FakeACLRepository(acl_entries)
+ service = AuthorizationService(
+ settings=Settings(KEYNETRA_API_KEYS="test"),
+ tenants=FakeTenantRepository(),
+ policies=FakePolicyRepository(policies),
+ users=FakeUserRepository(permissions=permissions),
+ relationships=relationship_repo,
+ audit=FakeAuditRepository(),
+ policy_cache=RedisBackedPolicyCache(backend),
+ relationship_cache=RedisBackedRelationshipCache(backend),
+ decision_cache=RedisBackedDecisionCache(backend),
+ acl_repository=acl_repo,
+ acl_cache=RedisBackedACLCache(backend),
+ access_index_cache=RedisBackedAccessIndexCache(backend),
+ )
+ return service, relationship_repo, acl_repo
+
+
+def test_acl_allow() -> None:
+ service, _, _ = _service(
+ policies=[],
+ acl_entries=[
+ ACLRecord(
+ id=1,
+ tenant_id=1,
+ subject_type="user",
+ subject_id="1",
+ resource_type="doc",
+ resource_id="doc123",
+ action="read",
+ effect="allow",
+ )
+ ],
+ )
+
+ result = service.authorize(
+ tenant_key="default",
+ principal={"type": "api_key", "id": "test"},
+ user={"id": 1},
+ action="read",
+ resource={"resource_type": "doc", "resource_id": "doc123"},
+ )
+
+ assert result.decision.allowed is True
+ assert result.decision.policy_id == "acl:1"
+ assert any(
+ step.step == "acl" and step.outcome == "allow" for step in result.decision.explain_trace
+ )
+
+
+def test_acl_deny() -> None:
+ service, _, _ = _service(
+ policies=[],
+ acl_entries=[
+ ACLRecord(
+ id=2,
+ tenant_id=1,
+ subject_type="user",
+ subject_id="1",
+ resource_type="doc",
+ resource_id="doc123",
+ action="read",
+ effect="deny",
+ )
+ ],
+ )
+
+ result = service.authorize(
+ tenant_key="default",
+ principal={"type": "api_key", "id": "test"},
+ user={"id": 1},
+ action="read",
+ resource={"resource_type": "doc", "resource_id": "doc123"},
+ )
+
+ assert result.decision.allowed is False
+ assert result.decision.policy_id == "acl:2"
+ assert any(
+ step.step == "acl" and step.outcome == "deny" for step in result.decision.explain_trace
+ )
+
+
+def test_acl_overrides_rbac_role_permission() -> None:
+ service, _, _ = _service(
+ policies=[],
+ acl_entries=[
+ ACLRecord(
+ id=3,
+ tenant_id=1,
+ subject_type="role",
+ subject_id="manager",
+ resource_type="doc",
+ resource_id="doc123",
+ action="read",
+ effect="deny",
+ )
+ ],
+ permissions=["read"],
+ )
+
+ result = service.authorize(
+ tenant_key="default",
+ principal={"type": "api_key", "id": "test"},
+ user={"id": 1, "roles": ["manager"]},
+ action="read",
+ resource={"resource_type": "doc", "resource_id": "doc123"},
+ )
+
+ assert result.decision.allowed is False
+ assert result.decision.policy_id == "acl:3"
+
+
+def test_rbac_fallback_when_no_acl() -> None:
+ service, _, _ = _service(policies=[], acl_entries=[], permissions=["approve_payment"])
+
+ result = service.authorize(
+ tenant_key="default",
+ principal={"type": "api_key", "id": "test"},
+ user={"id": 1, "roles": ["manager"]},
+ action="approve_payment",
+ resource={"resource_type": "doc", "resource_id": "doc123"},
+ )
+
+ assert result.decision.allowed is True
+ assert result.decision.policy_id == "rbac:role"
+
+
+def test_abac_still_works_without_acl() -> None:
+ policies = [
+ PolicyRecord(
+ id=1,
+ definition=PolicyDefinition(
+ action="approve_payment",
+ effect="allow",
+ priority=10,
+ policy_id="policy:v1",
+ conditions={"role": "manager", "max_amount": 1000},
+ ),
+ )
+ ]
+ service, _, _ = _service(policies=policies, acl_entries=[], permissions=[])
+
+ result = service.authorize(
+ tenant_key="default",
+ principal={"type": "api_key", "id": "test"},
+ user={"id": 1, "roles": ["manager"]},
+ action="approve_payment",
+ resource={"resource_type": "invoice", "resource_id": "inv1", "amount": 100},
+ )
+
+ assert result.decision.allowed is True
+ assert result.decision.policy_id == "policy:v1"
+
+
+def test_relationship_based_access() -> None:
+ service, _, _ = _service(
+ policies=[],
+ acl_entries=[],
+ relations=[
+ RelationshipRecord(
+ subject_type="user",
+ subject_id="1",
+ relation="viewer_of",
+ object_type="doc",
+ object_id="doc123",
+ )
+ ],
+ permissions=[],
+ )
+
+ result = service.authorize(
+ tenant_key="default",
+ principal={"type": "api_key", "id": "test"},
+ user={"id": 1},
+ action="read",
+ resource={"resource_type": "doc", "resource_id": "doc123"},
+ )
+
+ assert result.decision.allowed is True
+ assert result.decision.policy_id == "relationship:index"
+
+
+def test_batch_evaluation_with_acl() -> None:
+ service, _, _ = _service(
+ policies=[],
+ acl_entries=[
+ ACLRecord(
+ id=1,
+ tenant_id=1,
+ subject_type="user",
+ subject_id="1",
+ resource_type="doc",
+ resource_id="a",
+ action="read",
+ effect="allow",
+ ),
+ ACLRecord(
+ id=2,
+ tenant_id=1,
+ subject_type="user",
+ subject_id="1",
+ resource_type="doc",
+ resource_id="b",
+ action="read",
+ effect="deny",
+ ),
+ ],
+ )
+
+ results = service.authorize_batch(
+ tenant_key="default",
+ principal={"type": "api_key", "id": "test"},
+ user={"id": 1},
+ items=[
+ {"action": "read", "resource": {"resource_type": "doc", "resource_id": "a"}},
+ {"action": "read", "resource": {"resource_type": "doc", "resource_id": "b"}},
+ ],
+ )
+
+ assert [result.decision.allowed for result in results] == [True, False]
+
+
+def test_explain_trace_includes_acl() -> None:
+ service, _, _ = _service(
+ policies=[],
+ acl_entries=[
+ ACLRecord(
+ id=7,
+ tenant_id=1,
+ subject_type="user",
+ subject_id="1",
+ resource_type="doc",
+ resource_id="doc123",
+ action="read",
+ effect="allow",
+ ),
+ ],
+ )
+
+ result = service.authorize(
+ tenant_key="default",
+ principal={"type": "api_key", "id": "test"},
+ user={"id": 1},
+ action="read",
+ resource={"resource_type": "doc", "resource_id": "doc123"},
+ )
+
+ assert any("matched ACL entry" in step.detail for step in result.decision.explain_trace)
diff --git a/tests/test_admin_audit.py b/tests/test_admin_audit.py
new file mode 100644
index 0000000..e664eb0
--- /dev/null
+++ b/tests/test_admin_audit.py
@@ -0,0 +1,159 @@
+from __future__ import annotations
+
+import os
+from datetime import datetime, timedelta, timezone
+
+from fastapi.testclient import TestClient
+from jose import jwt
+
+from keynetra.config.settings import reset_settings_cache
+from keynetra.infrastructure.storage.session import initialize_database
+from keynetra.main import create_app
+
+
+def _client(database_url: str) -> TestClient:
+ os.environ["KEYNETRA_DATABASE_URL"] = database_url
+ os.environ["KEYNETRA_API_KEYS"] = "testkey"
+ os.environ["KEYNETRA_RATE_LIMIT_PER_MINUTE"] = "1000"
+ reset_settings_cache()
+ initialize_database(database_url)
+ return TestClient(create_app())
+
+
+def _jwt_headers(*, tenant_key: str, role: str) -> dict[str, str]:
+ token = jwt.encode(
+ {
+ "sub": f"{role}-{tenant_key}",
+ "tenant_roles": {tenant_key: role},
+ },
+ "change-me",
+ algorithm="HS256",
+ )
+ return {"Authorization": f"Bearer {token}"}
+
+
+def test_viewer_can_list_but_cannot_mutate_management_api(tmp_path) -> None:
+ client = _client(f"sqlite+pysqlite:///{tmp_path / 'viewer.db'}")
+ admin_headers = {"X-API-Key": "testkey"}
+ assert (
+ client.post(
+ "/policies",
+ json={"action": "read", "effect": "allow", "priority": 10, "conditions": {}},
+ headers=admin_headers,
+ ).status_code
+ == 201
+ )
+
+ viewer_headers = _jwt_headers(tenant_key="tenant-a", role="viewer")
+ listed = client.get("/policies", headers=viewer_headers)
+ denied = client.post(
+ "/policies",
+ json={"action": "write", "effect": "allow", "priority": 20, "conditions": {}},
+ headers=viewer_headers,
+ )
+
+ assert listed.status_code == 200
+ assert denied.status_code == 403
+ assert denied.json()["error"]["message"] == "insufficient management role"
+
+
+def test_developer_role_can_mutate_management_api(tmp_path) -> None:
+ client = _client(f"sqlite+pysqlite:///{tmp_path / 'developer.db'}")
+ developer_headers = _jwt_headers(tenant_key="tenant-a", role="developer")
+
+ allowed = client.post(
+ "/relationships",
+ json={
+ "subject_type": "user",
+ "subject_id": "u1",
+ "relation": "member",
+ "object_type": "team",
+ "object_id": "t1",
+ },
+ headers=developer_headers,
+ )
+
+ assert allowed.status_code == 201
+
+
+def test_admin_required_for_global_management_writes(tmp_path) -> None:
+ client = _client(f"sqlite+pysqlite:///{tmp_path / 'admin.db'}")
+ developer_headers = _jwt_headers(tenant_key="tenant-a", role="developer")
+ admin_headers = _jwt_headers(tenant_key="tenant-a", role="admin")
+
+ denied = client.post("/roles", json={"name": "viewer"}, headers=developer_headers)
+ allowed = client.post("/roles", json={"name": "developer"}, headers=admin_headers)
+
+ assert denied.status_code == 403
+ assert denied.json()["error"]["message"] == "insufficient management role"
+ assert allowed.status_code == 201
+
+
+def test_audit_endpoints_support_filters_time_range_and_pagination(tmp_path) -> None:
+ client = _client(f"sqlite+pysqlite:///{tmp_path / 'audit.db'}")
+ admin_headers = {"X-API-Key": "testkey"}
+ viewer_headers = _jwt_headers(tenant_key="tenant-a", role="viewer")
+
+ assert (
+ client.post(
+ "/policies",
+ json={"action": "read", "effect": "allow", "priority": 10, "conditions": {}},
+ headers=admin_headers,
+ ).status_code
+ == 201
+ )
+ assert (
+ client.post(
+ "/check-access",
+ json={
+ "user": {"id": "u1"},
+ "action": "read",
+ "resource": {"id": "doc-1"},
+ "context": {},
+ },
+ headers=admin_headers,
+ ).status_code
+ == 200
+ )
+ assert (
+ client.post(
+ "/check-access",
+ json={
+ "user": {"id": "u2"},
+ "action": "write",
+ "resource": {"id": "doc-2"},
+ "context": {},
+ },
+ headers=admin_headers,
+ ).status_code
+ == 200
+ )
+
+ start_time = (datetime.now(timezone.utc) - timedelta(minutes=5)).isoformat()
+ end_time = (datetime.now(timezone.utc) + timedelta(minutes=5)).isoformat()
+
+ page_one = client.get(
+ "/audit",
+ params={"limit": 1, "start_time": start_time, "end_time": end_time},
+ headers=viewer_headers,
+ )
+ deny_only = client.get(
+ "/audit",
+ params={"decision": "deny", "user_id": "u2", "resource_id": "doc-2"},
+ headers=viewer_headers,
+ )
+ page_two = client.get(
+ "/audit",
+ params={"limit": 1, "cursor": page_one.json()["meta"]["next_cursor"]},
+ headers=viewer_headers,
+ )
+
+ assert page_one.status_code == 200
+ assert len(page_one.json()["data"]) == 1
+ assert page_one.json()["meta"]["next_cursor"]
+ assert page_two.status_code == 200
+ assert len(page_two.json()["data"]) == 1
+ assert deny_only.status_code == 200
+ assert len(deny_only.json()["data"]) == 1
+ assert deny_only.json()["data"][0]["decision"] == "DENY"
+ assert deny_only.json()["data"][0]["user"]["id"] == "u2"
diff --git a/tests/test_admin_login.py b/tests/test_admin_login.py
new file mode 100644
index 0000000..d41187d
--- /dev/null
+++ b/tests/test_admin_login.py
@@ -0,0 +1,73 @@
+from __future__ import annotations
+
+from jose import jwt
+from typer.testing import CliRunner
+
+from keynetra.cli import app
+from keynetra.config.settings import reset_settings_cache
+from keynetra.main import create_app
+
+
+def test_admin_login_with_username_password_issues_admin_jwt(monkeypatch) -> None:
+ monkeypatch.setenv("KEYNETRA_ADMIN_USERNAME", "admin")
+ monkeypatch.setenv("KEYNETRA_ADMIN_PASSWORD", "secret")
+ monkeypatch.setenv("KEYNETRA_JWT_SECRET", "jwt-secret")
+ monkeypatch.setenv("KEYNETRA_JWT_ALGORITHM", "HS256")
+ reset_settings_cache()
+
+ from fastapi.testclient import TestClient
+
+ client = TestClient(create_app())
+ response = client.post("/admin/login", json={"username": "admin", "password": "secret"})
+ assert response.status_code == 200
+ payload = response.json()["data"]
+ token = payload["access_token"]
+ claims = jwt.decode(token, "jwt-secret", algorithms=["HS256"])
+ assert claims["role"] == "admin"
+ assert claims["tenant_roles"]["default"] == "admin"
+
+
+def test_admin_login_rejects_invalid_credentials(monkeypatch) -> None:
+ monkeypatch.setenv("KEYNETRA_ADMIN_USERNAME", "admin")
+ monkeypatch.setenv("KEYNETRA_ADMIN_PASSWORD", "secret")
+ reset_settings_cache()
+
+ from fastapi.testclient import TestClient
+
+ client = TestClient(create_app())
+ response = client.post("/admin/login", json={"username": "admin", "password": "wrong"})
+ assert response.status_code == 401
+
+
+def test_cli_admin_login_command_calls_login_endpoint(monkeypatch) -> None:
+ called: dict[str, object] = {}
+
+ class _Response:
+ text = '{"data":{"access_token":"abc"}}'
+
+ def raise_for_status(self) -> None:
+ return None
+
+ def fake_post(url: str, json: dict[str, str], timeout: float, headers: dict[str, str]):
+ called["url"] = url
+ called["json"] = json
+ called["timeout"] = timeout
+ called["headers"] = headers
+ return _Response()
+
+ monkeypatch.setattr("keynetra.cli.httpx.post", fake_post)
+ runner = CliRunner()
+ result = runner.invoke(
+ app,
+ [
+ "admin-login",
+ "--username",
+ "admin",
+ "--password",
+ "secret",
+ "--url",
+ "http://localhost:8000/admin/login",
+ ],
+ )
+ assert result.exit_code == 0
+ assert called["json"] == {"username": "admin", "password": "secret"}
diff --git a/tests/test_api.py b/tests/test_api.py
new file mode 100644
index 0000000..7556ccf
--- /dev/null
+++ b/tests/test_api.py
@@ -0,0 +1,352 @@
+from __future__ import annotations
+
+from typing import Any
+
+from fastapi.testclient import TestClient
+
+from keynetra.config.settings import reset_settings_cache
+from keynetra.infrastructure.storage.session import initialize_database
+from keynetra.main import create_app
+
+
+def _normalize_response(payload: dict[str, Any]) -> dict[str, Any]:
+ normalized = dict(payload)
+ meta = dict(normalized.get("meta") or {})
+ if "request_id" in meta and meta["request_id"] is not None:
+ meta["request_id"] = ""
+ normalized["meta"] = meta
+ return normalized
+
+
+def test_health_ok() -> None:
+ client = TestClient(create_app())
+ resp = client.get("/health")
+ assert resp.status_code == 200
+ assert _normalize_response(resp.json()) == {
+ "data": {"status": "ok"},
+ "meta": {"request_id": "", "limit": None, "next_cursor": None, "extra": {}},
+ "error": None,
+ }
+
+
+def test_health_live_and_ready_ok(tmp_path) -> None:
+ import os
+
+ database_url = f"sqlite+pysqlite:///{tmp_path / 'health-ready.db'}"
+ os.environ["KEYNETRA_DATABASE_URL"] = database_url
+ os.environ.pop("KEYNETRA_REDIS_URL", None)
+ reset_settings_cache()
+ initialize_database(database_url)
+ client = TestClient(create_app())
+
+ live = client.get("/health/live")
+ ready = client.get("/health/ready")
+
+ assert live.status_code == 200
+ assert live.json()["data"]["status"] == "ok"
+ assert ready.status_code == 200
+ assert ready.json()["data"]["checks"]["database"]["status"] == "ok"
+ assert ready.json()["data"]["checks"]["redis"]["status"] == "not_configured"
+
+
+def test_check_access_rbac_permissions_allow() -> None:
+ import os
+
+ os.environ["KEYNETRA_API_KEYS"] = "testkey"
+ reset_settings_cache()
+ client = TestClient(create_app())
+ payload = {
+ "user": {"id": 1, "role": "employee", "permissions": ["approve_payment"]},
+ "action": "approve_payment",
+ "resource": {"amount": 999999},
+ }
+ resp = client.post("/check-access", json=payload, headers={"X-API-Key": "testkey"})
+ assert resp.status_code == 200
+ data = resp.json()["data"]
+ assert data["allowed"] is True
+ assert data["decision"] == "allow"
+ assert data["policy_id"] == "rbac:permissions"
+ assert data["revision"] == 1
+ assert isinstance(data["explain_trace"], list)
+ assert resp.json()["error"] is None
+
+
+def test_check_access_abac_policy_allow() -> None:
+ import os
+
+ os.environ["KEYNETRA_API_KEYS"] = "testkey"
+ reset_settings_cache()
+ client = TestClient(create_app())
+ payload = {
+ "user": {"id": 1, "role": "manager", "permissions": []},
+ "action": "approve_payment",
+ "resource": {"amount": 50000, "owner_id": 2},
+ }
+ resp = client.post("/check-access", json=payload, headers={"X-API-Key": "testkey"})
+ assert resp.status_code == 200
+ data = resp.json()["data"]
+ assert data["allowed"] is True
+ assert data["decision"] == "allow"
+ assert data["policy_id"]
+ assert data["revision"] == 1
+
+
+def test_check_access_default_deny() -> None:
+ import os
+
+ os.environ["KEYNETRA_API_KEYS"] = "testkey"
+ reset_settings_cache()
+ client = TestClient(create_app())
+ payload = {"user": {"id": 1, "role": "employee"}, "action": "unknown", "resource": {}}
+ resp = client.post("/check-access", json=payload, headers={"X-API-Key": "testkey"})
+ assert resp.status_code == 200
+ data = resp.json()["data"]
+ assert data["allowed"] is False
+ assert data["decision"] == "deny"
+ assert data["reason"]
+ assert isinstance(data["explain_trace"], list)
+
+
+def test_check_access_requires_auth() -> None:
+ import os
+
+ os.environ["KEYNETRA_API_KEYS"] = "testkey"
+ reset_settings_cache()
+ client = TestClient(create_app())
+ resp = client.post("/check-access", json={"user": {}, "action": "x", "resource": {}})
+ assert resp.status_code == 401
+ assert resp.json()["data"] is None
+ assert resp.json()["error"]["code"] == "unauthorized"
+
+
+def test_check_access_rate_limited() -> None:
+ import os
+
+ os.environ["KEYNETRA_API_KEYS"] = "testkey"
+ os.environ["KEYNETRA_RATE_LIMIT_PER_MINUTE"] = "1"
+ os.environ["KEYNETRA_RATE_LIMIT_BURST"] = "1"
+ os.environ["KEYNETRA_RATE_LIMIT_WINDOW_SECONDS"] = "60"
+ reset_settings_cache()
+ client = TestClient(create_app())
+ payload = {
+ "user": {"id": 1},
+ "action": "approve_payment",
+ "resource": {"amount": 1},
+ "context": {},
+ }
+
+ first = client.post("/check-access", json=payload, headers={"X-API-Key": "testkey"})
+ second = client.post("/check-access", json=payload, headers={"X-API-Key": "testkey"})
+
+ assert first.status_code == 200
+ assert second.status_code == 429
+ assert second.json()["data"] is None
+ assert second.json()["error"]["code"] == "too_many_requests"
+
+
+def test_check_access_burst_is_throttled() -> None:
+ import os
+
+ os.environ["KEYNETRA_API_KEYS"] = "testkey"
+ os.environ["KEYNETRA_RATE_LIMIT_PER_MINUTE"] = "2"
+ os.environ["KEYNETRA_RATE_LIMIT_BURST"] = "2"
+ os.environ["KEYNETRA_RATE_LIMIT_WINDOW_SECONDS"] = "60"
+ reset_settings_cache()
+ client = TestClient(create_app())
+ payload = {
+ "user": {"id": 1},
+ "action": "approve_payment",
+ "resource": {"amount": 1},
+ "context": {},
+ }
+ headers = {"X-API-Key": "testkey"}
+
+ first = client.post("/check-access", json=payload, headers=headers)
+ second = client.post("/check-access", json=payload, headers=headers)
+ third = client.post("/check-access", json=payload, headers=headers)
+
+ assert first.status_code == 200
+ assert second.status_code == 200
+ assert third.status_code == 429
+ assert third.json()["error"]["code"] == "too_many_requests"
+
+
+def test_check_access_rate_limit_is_global() -> None:
+ import os
+
+ os.environ["KEYNETRA_API_KEYS"] = "testkey"
+ os.environ["KEYNETRA_RATE_LIMIT_PER_MINUTE"] = "1"
+ os.environ["KEYNETRA_RATE_LIMIT_BURST"] = "1"
+ os.environ["KEYNETRA_RATE_LIMIT_WINDOW_SECONDS"] = "60"
+ reset_settings_cache()
+ client = TestClient(create_app())
+ payload = {
+ "user": {"id": 1},
+ "action": "approve_payment",
+ "resource": {"amount": 1},
+ "context": {},
+ }
+
+ tenant_a = client.post("/check-access", json=payload, headers={"X-API-Key": "testkey"})
+ tenant_b = client.post("/check-access", json=payload, headers={"X-API-Key": "testkey"})
+
+ assert tenant_a.status_code == 200
+ assert tenant_b.status_code == 429
+
+
+def test_check_access_rate_limit_uses_redis_backend(monkeypatch) -> None:
+ import os
+
+ class FakeRedis:
+ def __init__(self) -> None:
+ self.calls = 0
+
+ def eval(self, *args, **kwargs):
+ self.calls += 1
+ if self.calls == 1:
+ return [1, 0, 0]
+ return [0, 0, 60]
+
+ fake = FakeRedis()
+ os.environ["KEYNETRA_API_KEYS"] = "testkey"
+ os.environ["KEYNETRA_RATE_LIMIT_PER_MINUTE"] = "1"
+ os.environ["KEYNETRA_RATE_LIMIT_BURST"] = "1"
+ os.environ["KEYNETRA_RATE_LIMIT_WINDOW_SECONDS"] = "60"
+ reset_settings_cache()
+ monkeypatch.setattr("keynetra.config.rate_limit.get_redis", lambda: fake)
+ client = TestClient(create_app())
+ payload = {
+ "user": {"id": 1},
+ "action": "approve_payment",
+ "resource": {"amount": 1},
+ "context": {},
+ }
+ headers = {"X-API-Key": "testkey"}
+
+ first = client.post("/check-access", json=payload, headers=headers)
+ second = client.post("/check-access", json=payload, headers=headers)
+
+ assert first.status_code == 200
+ assert second.status_code == 429
+ assert fake.calls == 2
+
+
+def test_health_is_not_rate_limited() -> None:
+ import os
+
+ os.environ["KEYNETRA_RATE_LIMIT_PER_MINUTE"] = "1"
+ os.environ["KEYNETRA_RATE_LIMIT_BURST"] = "1"
+ os.environ["KEYNETRA_RATE_LIMIT_WINDOW_SECONDS"] = "60"
+ reset_settings_cache()
+ client = TestClient(create_app())
+
+ first = client.get("/health")
+ second = client.get("/health")
+
+ assert first.status_code == 200
+ assert second.status_code == 200
+
+
+def test_check_access_batch_supports_mixed_authorization_results() -> None:
+ import os
+
+ os.environ["KEYNETRA_API_KEYS"] = "testkey"
+ reset_settings_cache()
+ client = TestClient(create_app())
+ payload = {
+ "user": {"id": 1, "role": "employee", "permissions": ["approve_payment"]},
+ "items": [
+ {"action": "approve_payment", "resource": {"amount": 100}},
+ {"action": "unknown_action", "resource": {}},
+ ],
+ }
+
+ response = client.post("/check-access-batch", json=payload, headers={"X-API-Key": "testkey"})
+
+ assert response.status_code == 200
+ data = response.json()["data"]
+ assert data["results"] == [
+ {"action": "approve_payment", "allowed": True, "revision": 1},
+ {"action": "unknown_action", "allowed": False, "revision": 1},
+ ]
+ assert data["revision"] == 1
+
+
+def test_simulate_endpoint_returns_trace_and_failed_conditions_shape() -> None:
+ import os
+
+ os.environ["KEYNETRA_API_KEYS"] = "testkey"
+ reset_settings_cache()
+ client = TestClient(create_app())
+ payload = {
+ "user": {"id": 1, "role": "employee"},
+ "action": "approve_payment",
+ "resource": {"amount": 999999, "owner_id": 2},
+ "context": {},
+ }
+
+ response = client.post("/simulate", json=payload, headers={"X-API-Key": "testkey"})
+
+ assert response.status_code == 200
+ data = response.json()["data"]
+ assert data["decision"] in {"allow", "deny"}
+ assert isinstance(data["matched_policies"], list)
+ assert isinstance(data["explain_trace"], list)
+ assert isinstance(data["failed_conditions"], list)
+ assert isinstance(data["revision"], int)
+ assert data["revision"] >= 1
+
+
+def test_simulate_policy_and_impact_analysis_endpoints_work_for_admin_api_key() -> None:
+ import os
+
+ os.environ["KEYNETRA_API_KEYS"] = "testkey"
+ os.environ["KEYNETRA_RATE_LIMIT_PER_MINUTE"] = "1000"
+ os.environ["KEYNETRA_RATE_LIMIT_BURST"] = "1000"
+ os.environ["KEYNETRA_RATE_LIMIT_WINDOW_SECONDS"] = "60"
+ reset_settings_cache()
+ client = TestClient(create_app())
+ headers = {"X-API-Key": "testkey"}
+
+ simulation = client.post(
+ "/simulate-policy",
+ json={
+ "simulate": {"policy_change": """
+allow:
+ action: share_document
+ priority: 1
+ policy_key: share-admin
+ when:
+ role: admin
+"""},
+ "request": {
+ "user": {"id": 1, "role": "admin", "roles": ["admin"]},
+ "action": "share_document",
+ "resource": {"resource_type": "document", "resource_id": "doc-1"},
+ "context": {},
+ },
+ },
+ headers=headers,
+ )
+ assert simulation.status_code == 200
+ sim_data = simulation.json()["data"]
+ assert sim_data["decision_before"]["decision"] == "deny"
+ assert sim_data["decision_after"]["decision"] == "allow"
+ assert sim_data["decision_after"]["allowed"] is True
+
+ impact = client.post(
+ "/impact-analysis",
+ json={"policy_change": """
+allow:
+ action: share_document
+ priority: 1
+ policy_key: share-admin
+ when:
+ role: admin
+"""},
+ headers=headers,
+ )
+ assert impact.status_code == 200
+ impact_data = impact.json()["data"]
+ assert isinstance(impact_data["gained_access"], list)
+ assert isinstance(impact_data["lost_access"], list)
diff --git a/tests/test_api_contract.py b/tests/test_api_contract.py
new file mode 100644
index 0000000..099ec69
--- /dev/null
+++ b/tests/test_api_contract.py
@@ -0,0 +1,106 @@
+from __future__ import annotations
+
+from pathlib import Path
+from typing import Any
+
+from fastapi.testclient import TestClient
+
+from keynetra.config.settings import reset_settings_cache
+from keynetra.main import create_app
+
+CONTRACT_PATH = (
+ Path(__file__).resolve().parents[1] / "contracts" / "openapi" / "keynetra-v0.1.0.yaml"
+)
+
+
+def _normalize_request_id(payload: dict[str, Any]) -> dict[str, Any]:
+ normalized = dict(payload)
+ meta = dict(normalized.get("meta") or {})
+ if meta.get("request_id") is not None:
+ meta["request_id"] = ""
+ normalized["meta"] = meta
+ return normalized
+
+
+def test_openapi_contract_includes_stable_paths_and_schemas() -> None:
+ contract = CONTRACT_PATH.read_text(encoding="utf-8")
+
+ assert "openapi: 3.1.0" in contract
+ assert "/health:" in contract
+ assert "/check-access:" in contract
+ assert "/simulate:" in contract
+ assert "/check-access-batch:" in contract
+ assert "/simulate-policy:" in contract
+ assert "/impact-analysis:" in contract
+ assert "SuccessResponse_dict_str__str__" in contract
+ assert "SuccessResponse_AccessDecisionResponse_" in contract
+ assert "APIKeyHeader" in contract
+ assert "HTTPBearer" in contract
+
+
+def test_health_response_matches_snapshot() -> None:
+ client = TestClient(create_app())
+
+ response = client.get("/health")
+
+ assert response.status_code == 200
+ assert _normalize_request_id(response.json()) == {
+ "data": {"status": "ok"},
+ "meta": {"request_id": "", "limit": None, "next_cursor": None, "extra": {}},
+ "error": None,
+ }
+
+
+def test_check_access_response_matches_snapshot() -> None:
+ import os
+
+ os.environ["KEYNETRA_API_KEYS"] = "testkey"
+ os.environ["KEYNETRA_RATE_LIMIT_PER_MINUTE"] = "1000"
+ reset_settings_cache()
+ client = TestClient(create_app())
+
+ response = client.post(
+ "/check-access",
+ json={
+ "user": {"id": 1, "role": "employee", "permissions": ["approve_payment"]},
+ "action": "approve_payment",
+ "resource": {"amount": 5},
+ "context": {},
+ },
+ headers={"X-API-Key": "testkey"},
+ )
+
+ assert response.status_code == 200
+ body = _normalize_request_id(response.json())
+ assert body == {
+ "data": {
+ "allowed": True,
+ "decision": "allow",
+ "matched_policies": ["rbac:permissions"],
+ "reason": "explicit permission grant",
+ "policy_id": "rbac:permissions",
+ "revision": 1,
+ "explain_trace": [
+ {
+ "step": "start",
+ "outcome": "continue",
+ "detail": "evaluate action=approve_payment",
+ "policy_id": None,
+ },
+ {
+ "step": "rbac_permissions",
+ "outcome": "matched",
+ "detail": "explicit permission grant matched input action",
+ "policy_id": "rbac:permissions",
+ },
+ {
+ "step": "final",
+ "outcome": "allow",
+ "detail": "granted by explicit permission",
+ "policy_id": "rbac:permissions",
+ },
+ ],
+ },
+ "meta": {"request_id": "", "limit": None, "next_cursor": None, "extra": {}},
+ "error": None,
+ }
diff --git a/tests/test_auth_model.py b/tests/test_auth_model.py
new file mode 100644
index 0000000..d861786
--- /dev/null
+++ b/tests/test_auth_model.py
@@ -0,0 +1,75 @@
+from __future__ import annotations
+
+import os
+
+from fastapi.testclient import TestClient
+
+from keynetra.config.settings import reset_settings_cache
+from keynetra.engine.keynetra_engine import AuthorizationInput
+from keynetra.infrastructure.storage.session import initialize_database
+from keynetra.main import create_app
+from keynetra.modeling import (
+ compile_authorization_schema,
+ parse_authorization_schema,
+ validate_authorization_schema,
+)
+
+SCHEMA = """
+model schema 1
+type user
+type document
+relations
+owner: [user]
+viewer: [user]
+permissions
+read = viewer or owner
+"""
+
+
+def test_authorization_schema_parsing_and_compilation() -> None:
+ schema = parse_authorization_schema(SCHEMA)
+ validate_authorization_schema(schema)
+ graph = compile_authorization_schema(schema)
+
+ decision = graph.permissions["read"].name
+ assert decision == "read"
+
+ runtime = graph.to_dict()
+ assert runtime["version"] == 1
+ assert runtime["permissions"]["read"]["kind"] == "or"
+
+ from keynetra.engine.model_graph.permission_graph import CompiledPermissionGraph
+
+ compiled = CompiledPermissionGraph(tenant_key="default", model=graph)
+ allowed = compiled.evaluate(
+ AuthorizationInput(
+ user={
+ "id": 1,
+ "relations": [
+ {"relation": "viewer", "object_type": "document", "object_id": "doc-1"}
+ ],
+ },
+ action="read",
+ resource={"resource_type": "document", "resource_id": "doc-1"},
+ )
+ )
+ assert allowed.outcome == "allow"
+
+
+def test_auth_model_route_round_trip(tmp_path) -> None:
+ database_url = f"sqlite+pysqlite:///{tmp_path / 'auth-model.db'}"
+ os.environ["KEYNETRA_DATABASE_URL"] = database_url
+ os.environ["KEYNETRA_API_KEYS"] = "testkey"
+ os.environ["KEYNETRA_RATE_LIMIT_PER_MINUTE"] = "1000"
+ os.environ["KEYNETRA_RATE_LIMIT_BURST"] = "1000"
+ reset_settings_cache()
+ initialize_database(database_url)
+ client = TestClient(create_app())
+
+ created = client.post("/auth-model", json={"schema": SCHEMA}, headers={"X-API-Key": "testkey"})
+ assert created.status_code == 201
+ assert created.json()["data"]["schema"].strip().startswith("model schema 1")
+
+ fetched = client.get("/auth-model", headers={"X-API-Key": "testkey"})
+ assert fetched.status_code == 200
+ assert fetched.json()["data"]["compiled"]["version"] == 1
diff --git a/tests/test_bootstrap_and_config_coverage.py b/tests/test_bootstrap_and_config_coverage.py
new file mode 100644
index 0000000..f8e19ec
--- /dev/null
+++ b/tests/test_bootstrap_and_config_coverage.py
@@ -0,0 +1,155 @@
+from __future__ import annotations
+
+import json
+import os
+from pathlib import Path
+
+import pytest
+from fastapi import FastAPI
+
+from keynetra.api.main import (
+ _bootstrap_file_backed_model,
+ _bootstrap_file_backed_policies,
+ _start_policy_subscriber,
+)
+from keynetra.config.config_loader import (
+ KeyNetraFileConfig,
+ apply_config_to_environment,
+ load_config_file,
+)
+from keynetra.headless import KeyNetra, _parse_descriptor
+
+
+def test_config_loader_handles_invalid_shapes_and_unsupported_extension(tmp_path: Path) -> None:
+ invalid_root = tmp_path / "invalid.json"
+ invalid_root.write_text(json.dumps(123), encoding="utf-8")
+ with pytest.raises(ValueError, match="configuration root must be an object"):
+ load_config_file(invalid_root)
+
+ unsupported = tmp_path / "config.ini"
+ unsupported.write_text("x=y", encoding="utf-8")
+ with pytest.raises(ValueError, match="unsupported config file format"):
+ load_config_file(unsupported)
+
+
+def test_apply_config_to_environment_sets_all_fields(monkeypatch) -> None:
+ cfg = KeyNetraFileConfig(
+ database_url="sqlite+pysqlite:///./x.db",
+ redis_url="redis://localhost:6379/0",
+ policy_paths=("p1", "p2"),
+ model_paths=("m1",),
+ seed_data=True,
+ server_host="127.0.0.1",
+ server_port=8089,
+ )
+ monkeypatch.delenv("KEYNETRA_DATABASE_URL", raising=False)
+ apply_config_to_environment(cfg)
+ assert os.environ["KEYNETRA_DATABASE_URL"] == "sqlite+pysqlite:///./x.db"
+ assert os.environ["KEYNETRA_REDIS_URL"] == "redis://localhost:6379/0"
+ assert os.environ["KEYNETRA_POLICY_PATHS"] == "p1,p2"
+ assert os.environ["KEYNETRA_MODEL_PATHS"] == "m1"
+ assert os.environ["KEYNETRA_AUTO_SEED_SAMPLE_DATA"] == "true"
+ assert os.environ["KEYNETRA_SERVER_HOST"] == "127.0.0.1"
+ assert os.environ["KEYNETRA_SERVER_PORT"] == "8089"
+
+
+def test_parse_descriptor_handles_colon_and_non_colon_values() -> None:
+ assert _parse_descriptor("user:123") == ("user", "123")
+ assert _parse_descriptor("resource") == ("resource", "resource")
+ assert _parse_descriptor(":abc") == ("unknown", "abc")
+
+
+def test_keynetra_load_policies_requires_non_empty(tmp_path: Path) -> None:
+ cfg = tmp_path / "keynetra.yaml"
+ cfg.write_text("{}", encoding="utf-8")
+ app = KeyNetra.from_config(cfg)
+ with pytest.raises(ValueError, match="no policies found"):
+ app.load_policies(tmp_path / "empty")
+
+
+def test_keynetra_load_model_and_check_access_string_payloads(tmp_path: Path) -> None:
+ policy_dir = tmp_path / "policies"
+ policy_dir.mkdir()
+ (policy_dir / "allow.yaml").write_text(
+ "allow:\n action: read\n priority: 1\n when:\n role: admin\n",
+ encoding="utf-8",
+ )
+ cfg = tmp_path / "keynetra.yaml"
+ cfg.write_text(f"policies:\n path: {policy_dir}\n", encoding="utf-8")
+ engine = KeyNetra.from_config(cfg)
+
+ model = tmp_path / "model.yaml"
+ model.write_text(
+ "model:\n type: document\n relations:\n owner: user\n permissions:\n read: owner\n",
+ encoding="utf-8",
+ )
+ engine.load_model(model)
+ decision = engine.check_access(
+ subject="user:1",
+ action="read",
+ resource="document:doc-1",
+ context={},
+ )
+ assert decision.decision in {"allow", "deny"}
+
+
+def test_bootstrap_model_and_policy_helpers_handle_success_and_errors(monkeypatch) -> None:
+ class _Settings:
+ def parsed_model_paths(self) -> list[str]:
+ return ["examples/auth-model.yaml"]
+
+ def load_policies(self) -> list[dict[str, object]]:
+ return [{"action": "read", "effect": "allow", "priority": 10, "conditions": {}}]
+
+ monkeypatch.setattr("keynetra.api.main.get_settings", lambda: _Settings())
+ _bootstrap_file_backed_model()
+ _bootstrap_file_backed_policies()
+
+ class _ErrorSettings(_Settings):
+ def load_policies(self) -> list[dict[str, object]]:
+ raise RuntimeError("boom")
+
+ monkeypatch.setattr("keynetra.api.main.get_settings", lambda: _ErrorSettings())
+ _bootstrap_file_backed_policies()
+
+
+def test_start_policy_subscriber_handles_none_and_message(monkeypatch) -> None:
+ class _Settings:
+ policy_events_channel = "policy-events"
+
+ class _FakePubSub:
+ def __init__(self) -> None:
+ self._subscribed = False
+
+ def subscribe(self, channel: str) -> None:
+ self._subscribed = channel == "policy-events"
+
+ def listen(self):
+ if not self._subscribed:
+ return
+ yield {"type": "message", "data": json.dumps({"tenant_key": "acme"})}
+ yield {"type": "done"}
+
+ class _FakeRedis:
+ def pubsub(self) -> _FakePubSub:
+ return _FakePubSub()
+
+ class _Cache:
+ def __init__(self) -> None:
+ self.invalidated: list[str] = []
+
+ def invalidate(self, tenant_key: str) -> None:
+ self.invalidated.append(tenant_key)
+
+ cache = _Cache()
+ monkeypatch.setattr("keynetra.api.main.get_settings", lambda: _Settings())
+ monkeypatch.setattr("keynetra.api.main.build_policy_cache", lambda _redis: cache)
+ monkeypatch.setattr("keynetra.api.main.get_redis", lambda: None)
+ _start_policy_subscriber(FastAPI())
+
+ monkeypatch.setattr("keynetra.api.main.get_redis", lambda: _FakeRedis())
+ app = FastAPI()
+ _start_policy_subscriber(app)
+ assert hasattr(app.state, "policy_subscriber")
+ app.state.policy_subscriber.join(timeout=1)
+ assert "acme" in cache.invalidated
diff --git a/tests/test_cli_benchmark.py b/tests/test_cli_benchmark.py
new file mode 100644
index 0000000..397ff48
--- /dev/null
+++ b/tests/test_cli_benchmark.py
@@ -0,0 +1,41 @@
+from __future__ import annotations
+
+import asyncio
+
+import pytest
+
+pytest.importorskip("typer")
+from typer.testing import CliRunner
+
+from keynetra.cli import app
+
+
+class _FakeResponse:
+ status_code = 200
+
+ def raise_for_status(self) -> None:
+ return None
+
+
+async def _fake_post(self, *args, **kwargs) -> _FakeResponse: # type: ignore[override]
+ await asyncio.sleep(0)
+ return _FakeResponse()
+
+
+def test_benchmark_command(monkeypatch) -> None:
+ monkeypatch.setattr("keynetra.cli.httpx.AsyncClient.post", _fake_post)
+ runner = CliRunner()
+ result = runner.invoke(
+ app,
+ [
+ "benchmark",
+ "--api-key",
+ "testkey",
+ "--requests",
+ "2",
+ "--concurrency",
+ "1",
+ ],
+ )
+ assert result.exit_code == 0
+ assert "p50(ms)" in result.stdout
diff --git a/tests/test_cli_coverage_branches.py b/tests/test_cli_coverage_branches.py
new file mode 100644
index 0000000..71974d0
--- /dev/null
+++ b/tests/test_cli_coverage_branches.py
@@ -0,0 +1,128 @@
+from __future__ import annotations
+
+import json
+import os
+
+from typer.testing import CliRunner
+
+from keynetra.cli import app
+from keynetra.config.settings import get_settings, reset_settings_cache
+
+
+def test_compile_policies_reports_missing_paths(monkeypatch) -> None:
+ runner = CliRunner()
+ monkeypatch.setattr(
+ "keynetra.cli.get_settings",
+ lambda: type("S", (), {"parsed_policy_paths": lambda self: []})(),
+ ) # type: ignore[misc]
+ result = runner.invoke(app, ["compile-policies"])
+ assert result.exit_code == 2
+
+
+def test_compile_policies_reports_missing_definitions(monkeypatch, tmp_path) -> None:
+ runner = CliRunner()
+ empty_dir = tmp_path / "empty-policies"
+ empty_dir.mkdir()
+ result = runner.invoke(app, ["compile-policies", "--path", str(empty_dir)])
+ assert result.exit_code == 2
+
+
+def test_doctor_core_failure_returns_exit_1(monkeypatch) -> None:
+ runner = CliRunner()
+ monkeypatch.setattr(
+ "keynetra.cli.run_core_doctor", lambda settings: {"ok": False, "errors": ["x"]}
+ )
+ result = runner.invoke(app, ["doctor", "--service", "core"])
+ assert result.exit_code == 1
+ assert '"ok": false' in result.stdout.lower()
+
+
+def test_doctor_invalid_service_is_rejected() -> None:
+ runner = CliRunner()
+ result = runner.invoke(app, ["doctor", "--service", "invalid"])
+ assert result.exit_code == 2
+
+
+def test_benchmark_validation_and_empty_samples(monkeypatch) -> None:
+ runner = CliRunner()
+
+ bad_requests = runner.invoke(app, ["benchmark", "--api-key", "k", "--requests", "0"])
+ assert bad_requests.exit_code == 2
+
+ bad_concurrency = runner.invoke(app, ["benchmark", "--api-key", "k", "--concurrency", "0"])
+ assert bad_concurrency.exit_code == 2
+
+ async def _empty_benchmark(*args, **kwargs):
+ return []
+
+ monkeypatch.setattr("keynetra.cli._run_benchmark", _empty_benchmark)
+ empty = runner.invoke(
+ app, ["benchmark", "--api-key", "k", "--requests", "1", "--concurrency", "1"]
+ )
+ assert empty.exit_code == 1
+ assert "No successful samples collected." in empty.stdout
+
+
+def test_acl_add_list_remove_commands(tmp_path) -> None:
+ database_url = f"sqlite+pysqlite:///{tmp_path / 'acl-cli.db'}"
+ os.environ["KEYNETRA_DATABASE_URL"] = database_url
+ reset_settings_cache()
+ get_settings.cache_clear()
+ runner = CliRunner()
+
+ created = runner.invoke(
+ app,
+ [
+ "acl",
+ "add",
+ "--subject-type",
+ "user",
+ "--subject-id",
+ "u1",
+ "--resource-type",
+ "document",
+ "--resource-id",
+ "doc-1",
+ "--action",
+ "read",
+ "--effect",
+ "allow",
+ ],
+ )
+ assert created.exit_code == 0
+ payload = json.loads(created.stdout)
+ acl_id = payload["acl_id"]
+
+ listed = runner.invoke(
+ app,
+ [
+ "acl",
+ "list",
+ "--resource-type",
+ "document",
+ "--resource-id",
+ "doc-1",
+ ],
+ )
+ assert listed.exit_code == 0
+ entries = json.loads(listed.stdout)
+ assert entries and entries[0]["id"] == acl_id
+
+ removed = runner.invoke(app, ["acl", "remove", "--acl-id", str(acl_id)])
+ assert removed.exit_code == 0
+ removed_payload = json.loads(removed.stdout)
+ assert removed_payload["acl_id"] == acl_id
+
+
+def test_main_entrypoint_invokes_typer_app(monkeypatch) -> None:
+ called = {"value": False}
+
+ def fake_app() -> None:
+ called["value"] = True
+
+ monkeypatch.setattr("keynetra.cli.app", fake_app)
+
+ from keynetra.cli import main
+
+ main()
+ assert called["value"] is True
diff --git a/tests/test_compiled_policies.py b/tests/test_compiled_policies.py
new file mode 100644
index 0000000..5c20224
--- /dev/null
+++ b/tests/test_compiled_policies.py
@@ -0,0 +1,60 @@
+from __future__ import annotations
+
+from keynetra.engine.compiled.decision_graph import COMPILED_POLICY_STORE
+from keynetra.engine.keynetra_engine import AuthorizationInput, KeyNetraEngine
+
+
+def test_compiled_policy_execution_uses_graph() -> None:
+ engine = KeyNetraEngine(
+ [
+ {
+ "action": "approve_payment",
+ "effect": "allow",
+ "priority": 10,
+ "policy_id": "pay:v1",
+ "conditions": {"role": "manager", "max_amount": 1000},
+ }
+ ]
+ )
+
+ decision = engine.decide(
+ AuthorizationInput(
+ user={"id": 1, "roles": ["manager"]},
+ action="approve_payment",
+ resource={"amount": 100},
+ )
+ )
+
+ assert decision.allowed is True
+ assert decision.policy_id == "pay:v1"
+ assert any(step.step == "policy_graph" for step in decision.explain_trace)
+ assert (
+ engine._compiled_graph.evaluate(
+ AuthorizationInput(
+ user={"roles": ["manager"]}, action="approve_payment", resource={"amount": 100}
+ )
+ ).outcome
+ == "allow"
+ )
+
+
+def test_compiled_graph_store_keeps_tenant_graphs() -> None:
+ COMPILED_POLICY_STORE.invalidate("default")
+ engine = KeyNetraEngine(
+ [
+ {
+ "action": "read",
+ "effect": "allow",
+ "priority": 1,
+ "conditions": {},
+ "policy_id": "read:v1",
+ }
+ ]
+ )
+ COMPILED_POLICY_STORE.set("default", 1, engine._compiled_graph)
+
+ stored = COMPILED_POLICY_STORE.get("default", 1)
+ assert stored is not None
+ assert (
+ stored.evaluate(AuthorizationInput(user={}, action="read", resource={})).outcome == "allow"
+ )
diff --git a/tests/test_consistency_revisions.py b/tests/test_consistency_revisions.py
new file mode 100644
index 0000000..73f337a
--- /dev/null
+++ b/tests/test_consistency_revisions.py
@@ -0,0 +1,90 @@
+from __future__ import annotations
+
+import os
+
+from fastapi.testclient import TestClient
+
+from keynetra.config.settings import reset_settings_cache
+from keynetra.infrastructure.storage.session import initialize_database
+from keynetra.main import create_app
+
+SCHEMA = """
+model schema 1
+type user
+type document
+relations
+owner: [user]
+permissions
+read = owner
+"""
+
+
+def test_revision_token_increments_across_model_and_acl_changes(tmp_path) -> None:
+ database_url = f"sqlite+pysqlite:///{tmp_path / 'revisions.db'}"
+ os.environ["KEYNETRA_DATABASE_URL"] = database_url
+ os.environ["KEYNETRA_API_KEYS"] = "testkey"
+ os.environ["KEYNETRA_POLICIES_JSON"] = "[]"
+ os.environ["KEYNETRA_RATE_LIMIT_PER_MINUTE"] = "1000"
+ os.environ["KEYNETRA_RATE_LIMIT_BURST"] = "1000"
+ reset_settings_cache()
+ initialize_database(database_url)
+ client = TestClient(create_app())
+ headers = {"X-API-Key": "testkey"}
+
+ before = client.post(
+ "/check-access",
+ json={
+ "user": {"id": 1, "roles": ["member"]},
+ "action": "share_document",
+ "resource": {"resource_type": "document", "resource_id": "doc-1"},
+ "context": {},
+ },
+ headers=headers,
+ )
+ assert before.status_code == 200
+ assert before.json()["data"]["revision"] == 1
+ assert before.json()["data"]["allowed"] is False
+
+ model_created = client.post("/auth-model", json={"schema": SCHEMA}, headers=headers)
+ assert model_created.status_code == 201
+
+ after_model = client.post(
+ "/check-access",
+ json={
+ "user": {"id": 1, "roles": ["member"]},
+ "action": "share_document",
+ "resource": {"resource_type": "document", "resource_id": "doc-1"},
+ "context": {},
+ },
+ headers=headers,
+ )
+ assert after_model.status_code == 200
+ assert after_model.json()["data"]["revision"] == 2
+ assert after_model.json()["data"]["allowed"] is False
+
+ acl_created = client.post(
+ "/acl",
+ json={
+ "subject_type": "user",
+ "subject_id": "1",
+ "resource_type": "document",
+ "resource_id": "doc-1",
+ "action": "share_document",
+ "effect": "allow",
+ },
+ headers=headers,
+ )
+ assert acl_created.status_code == 201
+
+ after_acl = client.post(
+ "/check-access",
+ json={
+ "user": {"id": 1, "roles": ["member"]},
+ "action": "share_document",
+ "resource": {"resource_type": "document", "resource_id": "doc-1"},
+ "context": {},
+ },
+ headers=headers,
+ )
+ assert after_acl.status_code == 200
+ assert after_acl.json()["data"]["revision"] == 3
diff --git a/tests/test_doctor.py b/tests/test_doctor.py
new file mode 100644
index 0000000..60227b6
--- /dev/null
+++ b/tests/test_doctor.py
@@ -0,0 +1,76 @@
+from __future__ import annotations
+
+import os
+
+import pytest
+from sqlalchemy import create_engine, text
+
+pytest.importorskip("typer")
+from typer.testing import CliRunner
+
+from keynetra.cli import app
+from keynetra.config.settings import Settings, reset_settings_cache
+from keynetra.services.doctor import run_core_doctor
+
+
+class _FakeRedis:
+ def ping(self) -> bool:
+ return True
+
+
+def _set_core_env(database_url: str) -> None:
+ os.environ["KEYNETRA_DATABASE_URL"] = database_url
+ os.environ["KEYNETRA_REDIS_URL"] = "redis://localhost:6379/0"
+ os.environ["KEYNETRA_API_KEYS"] = "test-key"
+ reset_settings_cache()
+
+
+def _prepare_alembic_version(database_url: str, revision: str) -> None:
+ engine = create_engine(database_url, future=True)
+ with engine.begin() as connection:
+ connection.execute(
+ text("CREATE TABLE IF NOT EXISTS alembic_version (version_num VARCHAR(32) NOT NULL)")
+ )
+ connection.execute(text("DELETE FROM alembic_version"))
+ connection.execute(
+ text("INSERT INTO alembic_version (version_num) VALUES (:revision)"),
+ {"revision": revision},
+ )
+
+
+def test_run_core_doctor_reports_all_checks_healthy(
+ tmp_path: str, monkeypatch: pytest.MonkeyPatch
+) -> None:
+ database_url = f"sqlite+pysqlite:///{tmp_path}/core-doctor.db"
+ _set_core_env(database_url)
+ _prepare_alembic_version(database_url, "20260405_000008")
+ monkeypatch.setattr("keynetra.services.doctor.get_redis", lambda: _FakeRedis())
+
+ result = run_core_doctor(Settings())
+
+ assert result["service"] == "core"
+ assert result["ok"] is True
+ assert {check["name"]: check["ok"] for check in result["checks"]} == {
+ "env_variables": True,
+ "database": True,
+ "redis": True,
+ "migrations": True,
+ }
+
+
+def test_cli_doctor_exits_nonzero_when_core_is_not_ready(
+ tmp_path: str, monkeypatch: pytest.MonkeyPatch
+) -> None:
+ database_url = f"sqlite+pysqlite:///{tmp_path}/core-not-ready.db"
+ os.environ["KEYNETRA_DATABASE_URL"] = database_url
+ os.environ.pop("KEYNETRA_REDIS_URL", None)
+ os.environ.pop("KEYNETRA_API_KEYS", None)
+ os.environ["KEYNETRA_JWT_SECRET"] = "change-me"
+ reset_settings_cache()
+ monkeypatch.setattr("keynetra.services.doctor.get_redis", lambda: None)
+
+ runner = CliRunner()
+ result = runner.invoke(app, ["doctor", "--service", "core"])
+
+ assert result.exit_code == 1
+ assert '"service": "core"' in result.output
diff --git a/tests/test_engine.py b/tests/test_engine.py
new file mode 100644
index 0000000..4825991
--- /dev/null
+++ b/tests/test_engine.py
@@ -0,0 +1,110 @@
+from __future__ import annotations
+
+from keynetra.engine.keynetra_engine import AuthorizationInput, KeyNetraEngine
+
+
+def test_engine_is_deterministic_for_same_structured_input() -> None:
+ engine = KeyNetraEngine(
+ [
+ {
+ "action": "approve_payment",
+ "effect": "allow",
+ "priority": 10,
+ "policy_id": "approve:v1",
+ "conditions": {"role": "manager", "max_amount": 1000},
+ }
+ ]
+ )
+ authorization_input = AuthorizationInput(
+ user={"id": 7, "role": "manager", "permissions": []},
+ action="approve_payment",
+ resource={"amount": 100},
+ context={"current_time": "09:30"},
+ )
+
+ first = engine.decide(authorization_input)
+ second = engine.decide(authorization_input)
+
+ assert first == second
+ assert first.allowed is True
+ assert first.policy_id == "approve:v1"
+ assert first.explain_trace[-1].outcome == "allow"
+
+
+def test_engine_time_range_requires_explicit_context() -> None:
+ engine = KeyNetraEngine(
+ [
+ {
+ "action": "deploy",
+ "effect": "allow",
+ "priority": 10,
+ "policy_id": "deploy:v1",
+ "conditions": {"time_range": {"start": "09:00", "end": "17:00"}},
+ }
+ ]
+ )
+
+ decision = engine.decide(
+ AuthorizationInput(
+ user={"id": 1, "role": "ops"},
+ action="deploy",
+ resource={},
+ context={},
+ )
+ )
+
+ assert decision.allowed is False
+ assert decision.reason == "missing context.current_time"
+ assert decision.policy_id is None
+
+
+def test_engine_has_relation_uses_explicit_input_only() -> None:
+ engine = KeyNetraEngine(
+ [
+ {
+ "action": "view_team",
+ "effect": "allow",
+ "priority": 10,
+ "policy_id": "team-member:v1",
+ "conditions": {
+ "has_relation": {
+ "relation": "member_of",
+ "object_type": "team",
+ "object_id_from_resource": "team_id",
+ }
+ },
+ }
+ ]
+ )
+
+ decision = engine.decide(
+ AuthorizationInput(
+ user={
+ "id": 4,
+ "relations": [
+ {
+ "subject_type": "user",
+ "subject_id": "4",
+ "relation": "member_of",
+ "object_type": "team",
+ "object_id": "red",
+ }
+ ],
+ },
+ action="view_team",
+ resource={"team_id": "red"},
+ )
+ )
+
+ assert decision.allowed is True
+ assert decision.policy_id == "team-member:v1"
+
+
+def test_engine_trace_alias_remains_available() -> None:
+ engine = KeyNetraEngine([{"action": "read", "effect": "deny", "priority": 1, "conditions": {}}])
+
+ decision = engine.decide(AuthorizationInput(user={}, action="read", resource={}))
+
+ assert decision.decision == "deny"
+ assert isinstance(decision.evaluated_rules, list)
+ assert decision.evaluated_rules[-1]["outcome"] == "deny"
diff --git a/tests/test_file_loaders_coverage.py b/tests/test_file_loaders_coverage.py
new file mode 100644
index 0000000..76cf273
--- /dev/null
+++ b/tests/test_file_loaders_coverage.py
@@ -0,0 +1,133 @@
+from __future__ import annotations
+
+import json
+from pathlib import Path
+
+import pytest
+
+from keynetra.config.file_loaders import (
+ load_authorization_model_from_file,
+ load_authorization_model_from_paths,
+ load_policies_from_file,
+ load_policies_from_paths,
+)
+
+
+def test_load_policies_from_paths_supports_direct_file_path(tmp_path: Path) -> None:
+ policy_file = tmp_path / "policy.yaml"
+ policy_file.write_text(
+ "allow:\n action: read\n priority: 10\n when:\n role: admin\n",
+ encoding="utf-8",
+ )
+
+ policies = load_policies_from_paths([str(policy_file)])
+
+ assert len(policies) == 1
+ assert policies[0]["action"] == "read"
+
+
+def test_load_policies_from_file_rejects_unsupported_extension(tmp_path: Path) -> None:
+ bad_file = tmp_path / "policies.txt"
+ bad_file.write_text("not supported", encoding="utf-8")
+
+ with pytest.raises(ValueError, match="unsupported policy format"):
+ load_policies_from_file(bad_file)
+
+
+def test_load_policies_from_file_rejects_invalid_payload_and_policy_shapes(tmp_path: Path) -> None:
+ invalid = tmp_path / "invalid.yaml"
+ invalid.write_text('value: "missing action and effect"', encoding="utf-8")
+ with pytest.raises(ValueError, match="invalid policy payload"):
+ load_policies_from_file(invalid)
+
+ bad_block = tmp_path / "bad_block.yaml"
+ bad_block.write_text("allow: []", encoding="utf-8")
+ with pytest.raises(ValueError, match="policy block must be an object"):
+ load_policies_from_file(bad_block)
+
+ missing_action = tmp_path / "missing_action.yaml"
+ missing_action.write_text("allow:\n priority: 10\n", encoding="utf-8")
+ with pytest.raises(ValueError, match="policy action is required"):
+ load_policies_from_file(missing_action)
+
+ bad_conditions = tmp_path / "bad_conditions.yaml"
+ bad_conditions.write_text("allow:\n action: read\n when: 1\n", encoding="utf-8")
+ with pytest.raises(ValueError, match="policy conditions must be an object"):
+ load_policies_from_file(bad_conditions)
+
+
+def test_load_policies_from_file_rejects_invalid_polar_lines(tmp_path: Path) -> None:
+ bad_effect = tmp_path / "bad_effect.polar"
+ bad_effect.write_text("maybe action=read\n", encoding="utf-8")
+ with pytest.raises(ValueError, match="invalid .polar rule"):
+ load_policies_from_file(bad_effect)
+
+ bad_token = tmp_path / "bad_token.polar"
+ bad_token.write_text("allow action=read role\n", encoding="utf-8")
+ with pytest.raises(ValueError, match="invalid .polar token"):
+ load_policies_from_file(bad_token)
+
+ missing_action = tmp_path / "missing_action.polar"
+ missing_action.write_text("allow role=admin\n", encoding="utf-8")
+ with pytest.raises(ValueError, match="missing action in .polar rule"):
+ load_policies_from_file(missing_action)
+
+
+def test_load_authorization_model_from_paths_supports_txt_and_schema_files(tmp_path: Path) -> None:
+ schema_txt = tmp_path / "model.txt"
+ schema_txt.write_text("model schema 1\n", encoding="utf-8")
+
+ schema = load_authorization_model_from_paths([str(schema_txt)])
+ assert schema == "model schema 1"
+
+ empty_txt = tmp_path / "empty.txt"
+ empty_txt.write_text(" \n", encoding="utf-8")
+ assert load_authorization_model_from_paths([str(empty_txt)]) is None
+
+
+def test_load_authorization_model_from_file_supports_json_string_and_rejects_non_object(
+ tmp_path: Path,
+) -> None:
+ as_string = tmp_path / "model.json"
+ as_string.write_text(json.dumps("model schema 2"), encoding="utf-8")
+ assert load_authorization_model_from_file(as_string) == "model schema 2"
+
+ blank_string = tmp_path / "blank.json"
+ blank_string.write_text(json.dumps(" "), encoding="utf-8")
+ with pytest.raises(ValueError, match="authorization model file is empty"):
+ load_authorization_model_from_file(blank_string)
+
+ not_object = tmp_path / "number.json"
+ not_object.write_text(json.dumps(42), encoding="utf-8")
+ with pytest.raises(ValueError, match="must contain an object"):
+ load_authorization_model_from_file(not_object)
+
+
+def test_load_authorization_model_from_file_rejects_invalid_model_shapes(tmp_path: Path) -> None:
+ invalid_model = tmp_path / "invalid_model.yaml"
+ invalid_model.write_text("model: []\n", encoding="utf-8")
+ with pytest.raises(ValueError, match="model must be an object"):
+ load_authorization_model_from_file(invalid_model)
+
+ bad_relations = tmp_path / "bad_relations.yaml"
+ bad_relations.write_text(
+ "model:\n relations: owner\n permissions:\n read: owner\n",
+ encoding="utf-8",
+ )
+ with pytest.raises(ValueError, match="relations and permissions must be objects"):
+ load_authorization_model_from_file(bad_relations)
+
+ bad_subjects = tmp_path / "bad_subjects.yaml"
+ bad_subjects.write_text(
+ "model:\n relations:\n owner: 1\n permissions:\n read: owner\n",
+ encoding="utf-8",
+ )
+ with pytest.raises(ValueError, match="invalid relation subjects for owner"):
+ load_authorization_model_from_file(bad_subjects)
+
+
+def test_load_authorization_model_from_file_rejects_unsupported_extension(tmp_path: Path) -> None:
+ unsupported = tmp_path / "model.schema"
+ unsupported.write_text("model schema 1", encoding="utf-8")
+ with pytest.raises(ValueError, match="unsupported authorization model format"):
+ load_authorization_model_from_file(unsupported)
diff --git a/tests/test_headless_modes.py b/tests/test_headless_modes.py
new file mode 100644
index 0000000..e502742
--- /dev/null
+++ b/tests/test_headless_modes.py
@@ -0,0 +1,253 @@
+from __future__ import annotations
+
+import json
+from pathlib import Path
+
+from typer.testing import CliRunner
+
+from keynetra import KeyNetra
+from keynetra.cli import app
+from keynetra.config.config_loader import load_config_file
+from keynetra.config.file_loaders import (
+ load_authorization_model_from_file,
+ load_policies_from_file,
+ load_policies_from_paths,
+)
+from keynetra.engine import KeyNetraEngine
+
+
+def test_config_loader_supports_yaml_json_and_toml(tmp_path: Path) -> None:
+ yaml_path = tmp_path / "keynetra.yaml"
+ yaml_path.write_text(
+ "\n".join(
+ [
+ "database:",
+ " url: sqlite+pysqlite:///./headless.db",
+ "redis:",
+ " url: redis://localhost:6379/0",
+ "policies:",
+ " path: ./policies",
+ "models:",
+ " path: ./auth-model.yaml",
+ "server:",
+ " host: 127.0.0.1",
+ " port: 8088",
+ ]
+ ),
+ encoding="utf-8",
+ )
+ json_path = tmp_path / "keynetra.json"
+ json_path.write_text(
+ json.dumps(
+ {
+ "database": {"url": "sqlite+pysqlite:///./headless.db"},
+ "policies": {"path": "./policies"},
+ }
+ ),
+ encoding="utf-8",
+ )
+ toml_path = tmp_path / "keynetra.toml"
+ toml_path.write_text(
+ "\n".join(
+ [
+ "[database]",
+ "url = 'sqlite+pysqlite:///./headless.db'",
+ "[server]",
+ "host = '127.0.0.1'",
+ "port = 9000",
+ ]
+ ),
+ encoding="utf-8",
+ )
+
+ cfg_yaml = load_config_file(yaml_path)
+ cfg_json = load_config_file(json_path)
+ cfg_toml = load_config_file(toml_path)
+
+ assert cfg_yaml.database_url == "sqlite+pysqlite:///./headless.db"
+ assert cfg_yaml.policy_paths == ("./policies",)
+ assert cfg_json.database_url == "sqlite+pysqlite:///./headless.db"
+ assert cfg_toml.server_port == 9000
+
+
+def test_policy_file_loader_supports_yaml_json_and_polar(tmp_path: Path) -> None:
+ policy_dir = tmp_path / "policies"
+ policy_dir.mkdir()
+ (policy_dir / "a.yaml").write_text(
+ "allow:\n action: read\n priority: 10\n when:\n role: admin\n", encoding="utf-8"
+ )
+ (policy_dir / "b.json").write_text(
+ json.dumps(
+ [
+ {
+ "action": "write",
+ "effect": "allow",
+ "priority": 20,
+ "conditions": {"owner_only": True},
+ }
+ ]
+ ),
+ encoding="utf-8",
+ )
+ (policy_dir / "c.polar").write_text(
+ "allow action=deploy priority=5 role=ops\n",
+ encoding="utf-8",
+ )
+
+ policies = load_policies_from_paths([str(policy_dir)])
+
+ assert len(policies) == 3
+ assert any(policy["action"] == "deploy" for policy in policies)
+
+
+def test_engine_check_access_headless_api() -> None:
+ engine = KeyNetraEngine(
+ [{"action": "read", "effect": "allow", "priority": 10, "conditions": {"role": "admin"}}]
+ )
+ decision = engine.check_access(
+ subject={"id": "123", "role": "admin"},
+ action="read",
+ resource="document:abc",
+ context={},
+ )
+ assert decision.allowed is True
+
+
+def test_embedded_keynetra_from_config_and_model_loading(tmp_path: Path) -> None:
+ policy_dir = tmp_path / "policies"
+ policy_dir.mkdir()
+ (policy_dir / "document.yaml").write_text(
+ json.dumps([{"action": "read", "effect": "deny", "priority": 100, "conditions": {}}]),
+ encoding="utf-8",
+ )
+ model_path = tmp_path / "auth-model.yaml"
+ model_path.write_text(
+ "\n".join(
+ [
+ "model:",
+ " type: document",
+ " relations:",
+ " owner: user",
+ " permissions:",
+ " read: owner",
+ ]
+ ),
+ encoding="utf-8",
+ )
+ cfg_path = tmp_path / "keynetra.yaml"
+ cfg_path.write_text(
+ "\n".join(
+ [
+ "policies:",
+ f" path: {policy_dir}",
+ "models:",
+ f" path: {model_path}",
+ ]
+ ),
+ encoding="utf-8",
+ )
+
+ engine = KeyNetra.from_config(cfg_path)
+ decision = engine.check_access(
+ subject={
+ "id": "1",
+ "relations": [{"relation": "owner", "object_type": "document", "object_id": "abc"}],
+ },
+ action="read",
+ resource="document:abc",
+ context={},
+ )
+ assert decision.allowed is True
+
+
+def test_cli_serve_with_config_uses_server_settings(tmp_path: Path, monkeypatch) -> None:
+ captured: dict[str, object] = {}
+
+ def fake_run(app_path: str, host: str, port: int, reload: bool) -> None:
+ captured["app_path"] = app_path
+ captured["host"] = host
+ captured["port"] = port
+ captured["reload"] = reload
+
+ monkeypatch.setattr("uvicorn.run", fake_run)
+
+ cfg_path = tmp_path / "keynetra.yaml"
+ cfg_path.write_text(
+ "\n".join(
+ [
+ "server:",
+ " host: 127.0.0.1",
+ " port: 9099",
+ ]
+ ),
+ encoding="utf-8",
+ )
+
+ runner = CliRunner()
+ result = runner.invoke(app, ["serve", "--config", str(cfg_path)])
+
+ assert result.exit_code == 0
+ assert captured["app_path"] == "keynetra.api.main:app"
+ assert captured["host"] == "127.0.0.1"
+ assert captured["port"] == 9099
+
+
+def test_cli_check_with_config_builds_url_from_server_settings(tmp_path: Path, monkeypatch) -> None:
+ called: dict[str, object] = {}
+
+ class _Response:
+ text = '{"ok": true}'
+
+ def raise_for_status(self) -> None:
+ return None
+
+ def fake_post(url: str, json: dict[str, object], headers: dict[str, str], timeout: float):
+ called["url"] = url
+ called["json"] = json
+ called["headers"] = headers
+ called["timeout"] = timeout
+ return _Response()
+
+ monkeypatch.setattr("httpx.post", fake_post)
+ cfg_path = tmp_path / "keynetra.yaml"
+ cfg_path.write_text(
+ "\n".join(
+ [
+ "server:",
+ " host: 127.0.0.1",
+ " port: 8087",
+ ]
+ ),
+ encoding="utf-8",
+ )
+
+ runner = CliRunner()
+ result = runner.invoke(
+ app,
+ [
+ "check",
+ "--config",
+ str(cfg_path),
+ "--api-key",
+ "devkey",
+ "--action",
+ "read",
+ "--user",
+ '{"id":"1"}',
+ "--resource",
+ '{"resource_type":"document","resource_id":"doc-1"}',
+ ],
+ )
+ assert result.exit_code == 0
+ assert called["url"] == "http://127.0.0.1:8087/check-access"
+
+
+def test_model_file_loader_supports_yaml() -> None:
+ schema = load_authorization_model_from_file("examples/auth-model.yaml")
+ assert "model schema 1" in schema
+ assert "read = owner or editor" in schema
+
+
+def test_single_file_policy_loader_works() -> None:
+ policies = load_policies_from_file("examples/policies/ops_rules.polar")
+ assert len(policies) == 2
diff --git a/tests/test_idempotency.py b/tests/test_idempotency.py
new file mode 100644
index 0000000..04fd854
--- /dev/null
+++ b/tests/test_idempotency.py
@@ -0,0 +1,92 @@
+from __future__ import annotations
+
+import os
+
+from fastapi.testclient import TestClient
+from sqlalchemy import create_engine, select
+from sqlalchemy.orm import Session
+
+from keynetra.config.settings import reset_settings_cache
+from keynetra.domain.models.idempotency import IdempotencyRecord
+from keynetra.domain.models.policy_versioning import PolicyVersion
+from keynetra.domain.models.relationship import Relationship
+from keynetra.infrastructure.storage.session import initialize_database
+from keynetra.main import create_app
+
+
+def _build_client(database_url: str) -> TestClient:
+ os.environ["KEYNETRA_DATABASE_URL"] = database_url
+ os.environ["KEYNETRA_API_KEYS"] = "testkey"
+ reset_settings_cache()
+ initialize_database(database_url)
+ return TestClient(create_app())
+
+
+def test_policy_create_replays_same_response_without_extra_write(tmp_path) -> None:
+ database_url = f"sqlite+pysqlite:///{tmp_path / 'core-idempotency.db'}"
+ client = _build_client(database_url)
+ headers = {"X-API-Key": "testkey", "Idempotency-Key": "policy-1"}
+ payload = {"action": "read", "effect": "allow", "priority": 10, "conditions": {"role": "admin"}}
+
+ first = client.post("/policies", json=payload, headers=headers)
+ second = client.post("/policies", json=payload, headers=headers)
+
+ assert first.status_code == 201
+ assert second.status_code == 201
+ assert first.json() == second.json()
+ assert second.headers["X-Idempotent-Replayed"] == "true"
+
+ session = Session(create_engine(database_url, future=True))
+ try:
+ assert len(session.execute(select(PolicyVersion)).scalars().all()) == 1
+ assert len(session.execute(select(IdempotencyRecord)).scalars().all()) == 1
+ finally:
+ session.close()
+
+
+def test_relationship_create_replays_same_response_without_extra_write(tmp_path) -> None:
+ database_url = f"sqlite+pysqlite:///{tmp_path / 'core-relationships.db'}"
+ client = _build_client(database_url)
+ headers = {"X-API-Key": "testkey", "Idempotency-Key": "relationship-1"}
+ payload = {
+ "subject_type": "user",
+ "subject_id": "u1",
+ "relation": "member",
+ "object_type": "team",
+ "object_id": "t1",
+ }
+
+ first = client.post("/relationships", json=payload, headers=headers)
+ second = client.post("/relationships", json=payload, headers=headers)
+
+ assert first.status_code == 201
+ assert second.status_code == 201
+ assert first.json() == second.json()
+ assert second.headers["X-Idempotent-Replayed"] == "true"
+
+ session = Session(create_engine(database_url, future=True))
+ try:
+ assert len(session.execute(select(Relationship)).scalars().all()) == 1
+ finally:
+ session.close()
+
+
+def test_idempotency_key_rejects_payload_mismatch(tmp_path) -> None:
+ database_url = f"sqlite+pysqlite:///{tmp_path / 'core-mismatch.db'}"
+ client = _build_client(database_url)
+ headers = {"X-API-Key": "testkey", "Idempotency-Key": "policy-2"}
+
+ first = client.post(
+ "/policies",
+ json={"action": "read", "effect": "allow", "priority": 10, "conditions": {"role": "admin"}},
+ headers=headers,
+ )
+ second = client.post(
+ "/policies",
+ json={"action": "read", "effect": "deny", "priority": 10, "conditions": {"role": "admin"}},
+ headers=headers,
+ )
+
+ assert first.status_code == 201
+ assert second.status_code == 409
+ assert second.json()["error"]["code"] == "conflict"
diff --git a/tests/test_impact_analysis.py b/tests/test_impact_analysis.py
new file mode 100644
index 0000000..6ea099c
--- /dev/null
+++ b/tests/test_impact_analysis.py
@@ -0,0 +1,164 @@
+from __future__ import annotations
+
+from typing import Any
+
+from keynetra.engine.keynetra_engine import PolicyDefinition
+from keynetra.services.impact_analysis import ImpactAnalyzer
+from keynetra.services.interfaces import PolicyRecord, RelationshipRecord, TenantRecord
+
+
+class FakeTenantRepository:
+ def __init__(self) -> None:
+ self._tenant = TenantRecord(id=1, tenant_key="default", policy_version=1, revision=1)
+
+ def get_or_create(self, tenant_key: str) -> TenantRecord:
+ return self._tenant
+
+ def get_by_id(self, tenant_id: int) -> TenantRecord | None:
+ return self._tenant if tenant_id == self._tenant.id else None
+
+ def bump_policy_version(self, tenant: TenantRecord) -> TenantRecord:
+ return self._tenant
+
+ def bump_revision(self, tenant: TenantRecord) -> TenantRecord:
+ self._tenant = TenantRecord(
+ id=tenant.id,
+ tenant_key=tenant.tenant_key,
+ policy_version=tenant.policy_version,
+ revision=tenant.revision + 1,
+ )
+ return self._tenant
+
+
+class FakePolicyRepository:
+ def __init__(self, policies: list[PolicyRecord]) -> None:
+ self._policies = list(policies)
+
+ def list_current_policies(self, *, tenant_id: int) -> list[PolicyRecord]:
+ return list(self._policies)
+
+ def list_current_policy_views(self, *, tenant_id: int) -> list[Any]:
+ return []
+
+ def create_policy_version(self, **_: Any) -> Any:
+ raise NotImplementedError
+
+ def rollback_policy(self, *, tenant_id: int, policy_key: str, version: int) -> tuple[str, int]:
+ return policy_key, version
+
+ def delete_policy(self, *, tenant_id: int, policy_key: str) -> None:
+ return None
+
+
+class FakeUserRepository:
+ def __init__(self, user_ids: list[int], contexts: dict[int, dict[str, Any]]) -> None:
+ self._user_ids = list(user_ids)
+ self._contexts = dict(contexts)
+
+ def list_user_ids(self, *, tenant_id: int) -> list[int]:
+ return list(self._user_ids)
+
+ def get_user_context(self, user_id: int) -> dict[str, Any] | None:
+ return self._contexts.get(user_id)
+
+
+class FakeRelationshipRepository:
+ def __init__(self, relationships: list[RelationshipRecord]) -> None:
+ self._relationships = list(relationships)
+
+ def list_for_subject(
+ self, *, tenant_id: int, subject_type: str, subject_id: str
+ ) -> list[RelationshipRecord]:
+ return [
+ row
+ for row in self._relationships
+ if row.subject_type == subject_type and row.subject_id == subject_id
+ ]
+
+ def list_for_subject_page(self, **_: Any):
+ return [], None
+
+ def list_for_object(
+ self, *, tenant_id: int, object_type: str, object_id: str
+ ) -> list[RelationshipRecord]:
+ return [
+ row
+ for row in self._relationships
+ if row.object_type == object_type and row.object_id == object_id
+ ]
+
+ def create(self, **_: Any) -> int:
+ return 1
+
+
+def test_policy_change_gains_access_for_matching_users() -> None:
+ analyzer = ImpactAnalyzer(
+ tenants=FakeTenantRepository(),
+ policies=FakePolicyRepository([]),
+ users=FakeUserRepository(
+ user_ids=[1, 2],
+ contexts={
+ 1: {"id": 1, "role": "admin", "roles": ["admin"], "permissions": []},
+ 2: {"id": 2, "role": "viewer", "roles": ["viewer"], "permissions": []},
+ },
+ ),
+ relationships=FakeRelationshipRepository([]),
+ )
+
+ result = analyzer.analyze_policy_change(
+ tenant_key="default",
+ policy_change="""
+allow:
+ action: share_document
+ priority: 10
+ policy_key: share-admin
+ when:
+ role: admin
+""",
+ )
+
+ assert result.gained_access == [1]
+ assert result.lost_access == []
+
+
+def test_policy_change_can_remove_access() -> None:
+ analyzer = ImpactAnalyzer(
+ tenants=FakeTenantRepository(),
+ policies=FakePolicyRepository(
+ [
+ PolicyRecord(
+ id=1,
+ definition=PolicyDefinition(
+ action="share_document",
+ effect="allow",
+ priority=10,
+ policy_id="share-admin",
+ conditions={"role": "admin"},
+ ),
+ )
+ ]
+ ),
+ users=FakeUserRepository(
+ user_ids=[1, 2],
+ contexts={
+ 1: {"id": 1, "role": "admin", "roles": ["admin"], "permissions": []},
+ 2: {"id": 2, "role": "viewer", "roles": ["viewer"], "permissions": []},
+ },
+ ),
+ relationships=FakeRelationshipRepository([]),
+ )
+
+ result = analyzer.analyze_policy_change(
+ tenant_key="default",
+ policy_change="""
+deny:
+ action: share_document
+ priority: 1
+ policy_key: share-admin-deny
+ when:
+ role: admin
+""",
+ )
+
+ assert result.gained_access == []
+ assert result.lost_access == [1]
diff --git a/tests/test_management_routes_coverage.py b/tests/test_management_routes_coverage.py
new file mode 100644
index 0000000..023472d
--- /dev/null
+++ b/tests/test_management_routes_coverage.py
@@ -0,0 +1,154 @@
+from __future__ import annotations
+
+import os
+
+from fastapi.testclient import TestClient
+
+from keynetra.config.settings import reset_settings_cache
+from keynetra.infrastructure.storage.session import initialize_database
+from keynetra.main import create_app
+
+
+def _client(database_url: str) -> TestClient:
+ os.environ["KEYNETRA_DATABASE_URL"] = database_url
+ os.environ["KEYNETRA_API_KEYS"] = "testkey"
+ os.environ.pop("KEYNETRA_REDIS_URL", None)
+ reset_settings_cache()
+ initialize_database(database_url)
+ return TestClient(create_app())
+
+
+def test_permissions_roles_relationships_and_policies_management_paths(tmp_path) -> None:
+ client = _client(f"sqlite+pysqlite:///{tmp_path / 'mgmt.db'}")
+ headers = {"X-API-Key": "testkey"}
+
+ # Permissions CRUD + validation/error branches.
+ bad_limit = client.get("/permissions?limit=0", headers=headers)
+ assert bad_limit.status_code == 422
+
+ created_permission = client.post("/permissions", json={"action": "deploy"}, headers=headers)
+ assert created_permission.status_code == 201
+ permission_id = created_permission.json()["id"]
+
+ duplicate_permission = client.post("/permissions", json={"action": "deploy"}, headers=headers)
+ assert duplicate_permission.status_code == 409
+
+ missing_permission_update = client.put(
+ "/permissions/9999",
+ json={"action": "deploy_v2"},
+ headers=headers,
+ )
+ assert missing_permission_update.status_code == 404
+
+ updated_permission = client.put(
+ f"/permissions/{permission_id}",
+ json={"action": "deploy_v2"},
+ headers=headers,
+ )
+ assert updated_permission.status_code == 200
+ assert updated_permission.json()["action"] == "deploy_v2"
+
+ # Roles CRUD + permission assignment paths.
+ created_role = client.post("/roles", json={"name": "operators"}, headers=headers)
+ assert created_role.status_code == 201
+ role_id = created_role.json()["id"]
+
+ duplicate_role = client.post("/roles", json={"name": "operators"}, headers=headers)
+ assert duplicate_role.status_code == 409
+
+ missing_role_update = client.put("/roles/9999", json={"name": "ops"}, headers=headers)
+ assert missing_role_update.status_code == 404
+
+ updated_role = client.put(f"/roles/{role_id}", json={"name": "ops"}, headers=headers)
+ assert updated_role.status_code == 200
+ assert updated_role.json()["name"] == "ops"
+
+ add_permission = client.post(f"/roles/{role_id}/permissions/{permission_id}", headers=headers)
+ assert add_permission.status_code == 201
+
+ role_permissions = client.get(f"/roles/{role_id}/permissions", headers=headers)
+ assert role_permissions.status_code == 200
+ assert role_permissions.json()["data"][0]["id"] == permission_id
+
+ permission_roles = client.get(f"/permissions/{permission_id}/roles", headers=headers)
+ assert permission_roles.status_code == 200
+ assert permission_roles.json()["data"][0]["id"] == role_id
+
+ remove_permission = client.delete(
+ f"/roles/{role_id}/permissions/{permission_id}", headers=headers
+ )
+ assert remove_permission.status_code == 200
+
+ deleted_role = client.delete(f"/roles/{role_id}", headers=headers)
+ assert deleted_role.status_code == 200
+
+ delete_missing_role = client.delete("/roles/9999", headers=headers)
+ assert delete_missing_role.status_code == 404
+
+ deleted_permission = client.delete(f"/permissions/{permission_id}", headers=headers)
+ assert deleted_permission.status_code == 200
+
+ delete_missing_permission = client.delete("/permissions/9999", headers=headers)
+ assert delete_missing_permission.status_code == 404
+
+ # Relationships list/create/conflict + validation branch.
+ bad_relationship_limit = client.get(
+ "/relationships?subject_type=user&subject_id=u1&limit=0",
+ headers=headers,
+ )
+ assert bad_relationship_limit.status_code == 422
+
+ relationship_payload = {
+ "subject_type": "user",
+ "subject_id": "u1",
+ "relation": "owner",
+ "object_type": "document",
+ "object_id": "doc-1",
+ }
+ created_relationship = client.post("/relationships", json=relationship_payload, headers=headers)
+ assert created_relationship.status_code == 201
+
+ duplicate_relationship = client.post(
+ "/relationships", json=relationship_payload, headers=headers
+ )
+ assert duplicate_relationship.status_code == 409
+
+ listed_relationships = client.get(
+ "/relationships?subject_type=user&subject_id=u1&limit=1",
+ headers=headers,
+ )
+ assert listed_relationships.status_code == 200
+ assert listed_relationships.json()["data"][0]["object_id"] == "doc-1"
+
+ # Policies CRUD + validation/error branches.
+ bad_policy_limit = client.get("/policies?limit=0", headers=headers)
+ assert bad_policy_limit.status_code == 422
+
+ created_policy = client.post(
+ "/policies",
+ json={
+ "action": "read_document",
+ "effect": "allow",
+ "priority": 10,
+ "conditions": {"role": "admin", "policy_key": "read-admin"},
+ },
+ headers=headers,
+ )
+ assert created_policy.status_code == 201
+
+ updated_policy = client.put(
+ "/policies/read-admin",
+ json={"action": "read_document", "effect": "deny", "priority": 5, "conditions": {}},
+ headers=headers,
+ )
+ assert updated_policy.status_code == 200
+ assert updated_policy.json()["data"]["effect"] == "deny"
+
+ bad_dsl = client.post("/policies/dsl?dsl=invalid", headers=headers)
+ assert bad_dsl.status_code == 422
+
+ missing_rollback = client.post("/policies/read-admin/rollback/999", headers=headers)
+ assert missing_rollback.status_code == 404
+
+ deleted_policy = client.delete("/policies/read-admin", headers=headers)
+ assert deleted_policy.status_code == 200
diff --git a/tests/test_metrics_endpoint.py b/tests/test_metrics_endpoint.py
new file mode 100644
index 0000000..91a7646
--- /dev/null
+++ b/tests/test_metrics_endpoint.py
@@ -0,0 +1,64 @@
+from __future__ import annotations
+
+import os
+
+from fastapi.testclient import TestClient
+from prometheus_client.parser import text_string_to_metric_families
+
+from keynetra.config.settings import reset_settings_cache
+from keynetra.infrastructure.storage.session import initialize_database
+from keynetra.main import create_app
+
+
+def _metric_value(text: str, metric_name: str, labels: dict[str, str] | None = None) -> float:
+ labels = labels or {}
+ for family in text_string_to_metric_families(text):
+ for sample in family.samples:
+ if sample.name != metric_name:
+ continue
+ if all(sample.labels.get(key) == value for key, value in labels.items()):
+ return float(sample.value)
+ return 0.0
+
+
+def test_metrics_endpoint_exposes_prometheus_text_and_counts_access_checks(tmp_path) -> None:
+ database_url = f"sqlite+pysqlite:///{tmp_path / 'metrics.db'}"
+ os.environ["KEYNETRA_DATABASE_URL"] = database_url
+ os.environ["KEYNETRA_API_KEYS"] = "testkey"
+ os.environ["KEYNETRA_RATE_LIMIT_PER_MINUTE"] = "1000"
+ os.environ["KEYNETRA_RATE_LIMIT_BURST"] = "1000"
+ reset_settings_cache()
+ initialize_database(database_url)
+ client = TestClient(create_app())
+
+ initial_metrics = client.get("/metrics")
+ assert initial_metrics.status_code == 200
+ assert initial_metrics.headers["content-type"].startswith("text/plain; version=0.0.4")
+ assert "keynetra_access_checks_total" in initial_metrics.text
+
+ before = _metric_value(
+ initial_metrics.text,
+ "keynetra_access_checks_total",
+ {"tenant": "default", "decision": "allow"},
+ )
+
+ check = client.post(
+ "/check-access",
+ json={
+ "user": {"id": 1, "permissions": ["approve_payment"]},
+ "action": "approve_payment",
+ "resource": {"amount": 5},
+ "context": {},
+ },
+ headers={"X-API-Key": "testkey"},
+ )
+ assert check.status_code == 200
+
+ updated_metrics = client.get("/metrics")
+ after = _metric_value(
+ updated_metrics.text,
+ "keynetra_access_checks_total",
+ {"tenant": "default", "decision": "allow"},
+ )
+
+ assert after >= before + 1
diff --git a/tests/test_migration_utils.py b/tests/test_migration_utils.py
new file mode 100644
index 0000000..e0e1389
--- /dev/null
+++ b/tests/test_migration_utils.py
@@ -0,0 +1,20 @@
+from __future__ import annotations
+
+from pathlib import Path
+
+from keynetra.migrations import find_destructive_revisions
+
+
+def test_find_destructive_revisions(tmp_path: Path) -> None:
+ revision_file = tmp_path / "20260405_drop.py"
+ revision_file.write_text("""from alembic import op
+
+revision = "20260405_drop"
+down_revision = "20260404_000005"
+
+def upgrade():
+ op.drop_table("old_table")
+""")
+
+ pending = find_destructive_revisions(tmp_path, applied_revisions={"20260404_000005"})
+ assert pending == ["20260405_drop"]
diff --git a/tests/test_pagination_versioning_security.py b/tests/test_pagination_versioning_security.py
new file mode 100644
index 0000000..015c4bc
--- /dev/null
+++ b/tests/test_pagination_versioning_security.py
@@ -0,0 +1,105 @@
+from __future__ import annotations
+
+import hashlib
+import logging
+import os
+
+from fastapi.testclient import TestClient
+
+from keynetra.config.settings import reset_settings_cache
+from keynetra.infrastructure.storage.session import initialize_database
+from keynetra.main import create_app
+
+
+def _client(database_url: str) -> TestClient:
+ os.environ["KEYNETRA_DATABASE_URL"] = database_url
+ os.environ["KEYNETRA_RATE_LIMIT_PER_MINUTE"] = "1000"
+ os.environ["KEYNETRA_RATE_LIMIT_BURST"] = "1000"
+ reset_settings_cache()
+ initialize_database(database_url)
+ return TestClient(create_app())
+
+
+def test_roles_cursor_pagination_and_version_header(tmp_path) -> None:
+ database_url = f"sqlite+pysqlite:///{tmp_path / 'roles.db'}"
+ os.environ["KEYNETRA_API_KEYS"] = "testkey"
+ client = _client(database_url)
+
+ first = client.post("/roles", json={"name": "admin"}, headers={"X-API-Key": "testkey"})
+ second = client.post("/roles", json={"name": "member"}, headers={"X-API-Key": "testkey"})
+
+ assert first.status_code == 201
+ assert second.status_code == 201
+
+ page_one = client.get("/roles?limit=1", headers={"X-API-Key": "testkey"})
+ assert page_one.status_code == 200
+ assert page_one.headers["X-API-Version"] == "v1"
+ assert len(page_one.json()["data"]) == 1
+ assert page_one.json()["meta"]["next_cursor"]
+
+ page_two = client.get(
+ f"/roles?limit=1&cursor={page_one.json()['meta']['next_cursor']}",
+ headers={"X-API-Key": "testkey", "X-API-Version": "v1"},
+ )
+ assert page_two.status_code == 200
+ assert page_two.json()["data"][0]["name"] == "member"
+
+
+def test_policies_cursor_pagination(tmp_path) -> None:
+ database_url = f"sqlite+pysqlite:///{tmp_path / 'policies.db'}"
+ os.environ["KEYNETRA_API_KEYS"] = "testkey"
+ client = _client(database_url)
+ headers = {"X-API-Key": "testkey"}
+
+ assert (
+ client.post(
+ "/policies",
+ json={"action": "read", "effect": "allow", "priority": 10, "conditions": {}},
+ headers=headers,
+ ).status_code
+ == 201
+ )
+ assert (
+ client.post(
+ "/policies",
+ json={"action": "write", "effect": "allow", "priority": 20, "conditions": {}},
+ headers=headers,
+ ).status_code
+ == 201
+ )
+
+ response = client.get("/policies?limit=1", headers=headers)
+ assert response.status_code == 200
+ assert len(response.json()["data"]) == 1
+ assert response.json()["meta"]["limit"] == 1
+ assert response.json()["meta"]["next_cursor"]
+
+
+def test_hashed_api_key_auth_and_failed_attempt_logging(tmp_path, caplog) -> None:
+ database_url = f"sqlite+pysqlite:///{tmp_path / 'auth.db'}"
+ os.environ.pop("KEYNETRA_API_KEYS", None)
+ os.environ["KEYNETRA_API_KEY_HASHES"] = hashlib.sha256(b"testkey").hexdigest()
+ client = _client(database_url)
+
+ ok = client.get("/health", headers={"X-API-Key": "testkey"})
+ assert ok.status_code == 200
+
+ caplog.set_level(logging.INFO)
+ bad = client.post(
+ "/check-access",
+ json={"user": {}, "action": "read", "resource": {}},
+ headers={"X-API-Key": "badkey"},
+ )
+ assert bad.status_code == 401
+ assert any("auth_failed" in str(record.msg) for record in caplog.records)
+
+
+def test_unsupported_api_version_rejected(tmp_path) -> None:
+ database_url = f"sqlite+pysqlite:///{tmp_path / 'version.db'}"
+ os.environ["KEYNETRA_API_KEYS"] = "testkey"
+ client = _client(database_url)
+
+ response = client.get("/health", headers={"X-API-Version": "v2"})
+
+ assert response.status_code == 400
+ assert response.json()["error"]["message"] == "unsupported api version"
diff --git a/tests/test_playground.py b/tests/test_playground.py
new file mode 100644
index 0000000..ab933e1
--- /dev/null
+++ b/tests/test_playground.py
@@ -0,0 +1,34 @@
+from __future__ import annotations
+
+import os
+
+from fastapi.testclient import TestClient
+
+from keynetra.config.settings import reset_settings_cache
+from keynetra.main import create_app
+
+
+def test_playground_evaluate_inline_policy() -> None:
+ os.environ["KEYNETRA_API_KEYS"] = "testkey"
+ reset_settings_cache()
+ client = TestClient(create_app())
+ payload = {
+ "policies": [
+ {
+ "action": "play",
+ "effect": "allow",
+ "priority": 10,
+ "conditions": {"role": "tester"},
+ }
+ ],
+ "input": {
+ "user": {"id": 1, "role": "tester"},
+ "resource": {},
+ "action": "play",
+ "context": {},
+ },
+ }
+ response = client.post("/playground/evaluate", json=payload, headers={"X-API-Key": "testkey"})
+ assert response.status_code == 200
+ assert response.json()["data"]["decision"] == "allow"
+ assert response.json()["data"]["policy_id"] == "play:10:allow"
diff --git a/tests/test_policy_lint.py b/tests/test_policy_lint.py
new file mode 100644
index 0000000..57a2e18
--- /dev/null
+++ b/tests/test_policy_lint.py
@@ -0,0 +1,45 @@
+from __future__ import annotations
+
+import os
+
+from fastapi.testclient import TestClient
+from sqlalchemy import create_engine
+from sqlalchemy.orm import Session
+
+from keynetra.config.settings import reset_settings_cache
+from keynetra.domain.models.base import Base
+from keynetra.domain.models.rbac import Role
+from keynetra.infrastructure.storage.session import initialize_database
+from keynetra.main import create_app
+
+
+def _setup_database(database_url: str) -> None:
+ initialize_database(database_url)
+ engine = create_engine(database_url, future=True)
+ Base.metadata.create_all(engine)
+ with Session(engine) as session:
+ session.add(Role(name="orphan"))
+ session.commit()
+
+
+def test_policy_creation_emits_role_warning(tmp_path) -> None:
+ database_url = f"sqlite+pysqlite:///{tmp_path / 'lint.db'}"
+ os.environ["KEYNETRA_DATABASE_URL"] = database_url
+ _setup_database(database_url)
+ os.environ["KEYNETRA_API_KEYS"] = "testkey"
+ reset_settings_cache()
+ client = TestClient(create_app())
+ headers = {"X-API-Key": "testkey"}
+
+ policy = {
+ "action": "read",
+ "effect": "allow",
+ "priority": 10,
+ "conditions": {},
+ }
+
+ response = client.post("/policies", json=policy, headers=headers)
+ assert response.status_code == 201
+ warnings = response.json()["meta"]["extra"].get("warnings")
+ assert warnings
+ assert any("orphan" in warning for warning in warnings)
diff --git a/tests/test_policy_simulation.py b/tests/test_policy_simulation.py
new file mode 100644
index 0000000..d0ffcc2
--- /dev/null
+++ b/tests/test_policy_simulation.py
@@ -0,0 +1,176 @@
+from __future__ import annotations
+
+from typing import Any
+
+from keynetra.config.settings import Settings
+from keynetra.engine.keynetra_engine import PolicyDefinition
+from keynetra.infrastructure.cache.access_index_cache import RedisBackedAccessIndexCache
+from keynetra.infrastructure.cache.acl_cache import RedisBackedACLCache
+from keynetra.infrastructure.cache.backends import InMemoryCacheBackend
+from keynetra.infrastructure.cache.decision_cache import RedisBackedDecisionCache
+from keynetra.infrastructure.cache.policy_cache import RedisBackedPolicyCache
+from keynetra.infrastructure.cache.relationship_cache import RedisBackedRelationshipCache
+from keynetra.services.authorization import AuthorizationService
+from keynetra.services.interfaces import ACLRecord, PolicyRecord, RelationshipRecord, TenantRecord
+from keynetra.services.policy_simulator import PolicySimulator
+
+
+class FakeTenantRepository:
+ def __init__(self) -> None:
+ self._tenant = TenantRecord(id=1, tenant_key="default", policy_version=1, revision=1)
+
+ def get_or_create(self, tenant_key: str) -> TenantRecord:
+ return self._tenant
+
+ def get_by_id(self, tenant_id: int) -> TenantRecord | None:
+ return self._tenant if tenant_id == self._tenant.id else None
+
+ def bump_policy_version(self, tenant: TenantRecord) -> TenantRecord:
+ self._tenant = TenantRecord(
+ id=tenant.id,
+ tenant_key=tenant.tenant_key,
+ policy_version=tenant.policy_version + 1,
+ revision=tenant.revision,
+ )
+ return self._tenant
+
+ def bump_revision(self, tenant: TenantRecord) -> TenantRecord:
+ self._tenant = TenantRecord(
+ id=tenant.id,
+ tenant_key=tenant.tenant_key,
+ policy_version=tenant.policy_version,
+ revision=tenant.revision + 1,
+ )
+ return self._tenant
+
+
+class FakePolicyRepository:
+ def __init__(self, policies: list[PolicyRecord]) -> None:
+ self._policies = list(policies)
+
+ def list_current_policies(self, *, tenant_id: int) -> list[PolicyRecord]:
+ return list(self._policies)
+
+ def list_current_policy_views(self, *, tenant_id: int) -> list[Any]:
+ return []
+
+ def create_policy_version(self, **_: Any) -> Any:
+ raise NotImplementedError
+
+ def rollback_policy(self, *, tenant_id: int, policy_key: str, version: int) -> tuple[str, int]:
+ return policy_key, version
+
+ def delete_policy(self, *, tenant_id: int, policy_key: str) -> None:
+ return None
+
+
+class FakeUserRepository:
+ def get_user_context(self, user_id: int) -> dict[str, Any] | None:
+ return {"id": user_id, "role": "admin", "roles": ["admin"], "permissions": []}
+
+
+class FakeRelationshipRepository:
+ def list_for_subject(
+ self, *, tenant_id: int, subject_type: str, subject_id: str
+ ) -> list[RelationshipRecord]:
+ return []
+
+ def list_for_subject_page(self, **_: Any):
+ return [], None
+
+ def list_for_object(
+ self, *, tenant_id: int, object_type: str, object_id: str
+ ) -> list[RelationshipRecord]:
+ return []
+
+ def create(self, **_: Any) -> int:
+ return 1
+
+
+class FakeACLRepository:
+ def create_acl_entry(self, **_: Any) -> int:
+ return 1
+
+ def list_resource_acl(
+ self, *, tenant_id: int, resource_type: str, resource_id: str
+ ) -> list[ACLRecord]:
+ return []
+
+ def get_acl_entry(self, *, tenant_id: int, acl_id: int) -> ACLRecord | None:
+ return None
+
+ def find_matching_acl(
+ self, *, tenant_id: int, resource_type: str, resource_id: str, action: str
+ ) -> list[ACLRecord]:
+ return []
+
+ def delete_acl_entry(self, *, tenant_id: int, acl_id: int) -> None:
+ return None
+
+
+class FakeAuditRepository:
+ def write(self, **_: Any) -> None:
+ return None
+
+
+def _authorization_service(
+ tenants: FakeTenantRepository, policies: FakePolicyRepository
+) -> AuthorizationService:
+ backend = InMemoryCacheBackend()
+ return AuthorizationService(
+ settings=Settings(KEYNETRA_API_KEYS="test", KEYNETRA_POLICIES_JSON="[]"),
+ tenants=tenants,
+ policies=policies,
+ users=FakeUserRepository(),
+ relationships=FakeRelationshipRepository(),
+ audit=FakeAuditRepository(),
+ policy_cache=RedisBackedPolicyCache(backend),
+ relationship_cache=RedisBackedRelationshipCache(backend),
+ decision_cache=RedisBackedDecisionCache(backend),
+ acl_repository=FakeACLRepository(),
+ acl_cache=RedisBackedACLCache(backend),
+ access_index_cache=RedisBackedAccessIndexCache(backend),
+ )
+
+
+def test_policy_simulator_reports_before_and_after() -> None:
+ tenants = FakeTenantRepository()
+ policies = FakePolicyRepository(
+ [
+ PolicyRecord(
+ id=1,
+ definition=PolicyDefinition(
+ action="share_document",
+ effect="deny",
+ priority=10,
+ policy_id="share-admin-deny:v1",
+ conditions={"role": "admin"},
+ ),
+ )
+ ]
+ )
+ simulator = PolicySimulator(
+ tenants=tenants,
+ policies=policies,
+ authorization_service=_authorization_service(tenants, policies),
+ )
+
+ result = simulator.simulate_policy_change(
+ tenant_key="default",
+ user={"id": 1, "role": "admin", "roles": ["admin"]},
+ action="share_document",
+ resource={"resource_type": "document", "resource_id": "doc-1"},
+ context={},
+ policy_change="""
+allow:
+ action: share_document
+ priority: 1
+ policy_key: share-admin
+ when:
+ role: admin
+""",
+ )
+
+ assert result.decision_before.decision == "deny"
+ assert result.decision_after.decision == "allow"
+ assert result.decision_after.policy_id == "share-admin"
diff --git a/tests/test_policy_testing.py b/tests/test_policy_testing.py
new file mode 100644
index 0000000..459ace6
--- /dev/null
+++ b/tests/test_policy_testing.py
@@ -0,0 +1,119 @@
+from __future__ import annotations
+
+from pathlib import Path
+
+import pytest
+
+pytest.importorskip("typer")
+from typer.testing import CliRunner
+
+from keynetra.cli import app
+from keynetra.config.settings import get_settings, reset_settings_cache
+from keynetra.services.policy_testing import parse_policy_test_suite, validate_policy_test_suite
+
+
+def test_parse_policy_test_suite_supports_embedded_policy_dsl() -> None:
+ suite = parse_policy_test_suite("""
+policies:
+ - allow:
+ action: read
+ priority: 10
+ policy_key: read-admin
+ when:
+ role: admin
+tests:
+ - name: admin_allowed
+ input:
+ user:
+ role: admin
+ action: read
+ resource: {}
+ expect: allow
+""")
+
+ assert len(suite.policies) == 1
+ assert suite.policies[0]["policy_id"] == "read-admin"
+ assert suite.tests[0].authorization_input.action == "read"
+
+
+def test_validate_policy_test_suite_runs_expected_decisions() -> None:
+ results = validate_policy_test_suite("""
+policies:
+ - action: read
+ effect: allow
+ policy_id: read-admin
+ conditions:
+ role: admin
+tests:
+ - name: admin_allowed
+ input:
+ user:
+ role: admin
+ action: read
+ resource: {}
+ expect: allow
+ - name: user_denied
+ input:
+ user:
+ role: user
+ action: read
+ resource: {}
+ expect: deny
+""")
+
+ assert [result.passed for result in results] == [True, True]
+ assert results[0].policy_id == "read-admin"
+
+
+def test_cli_test_policy_succeeds_for_example_file() -> None:
+ runner = CliRunner()
+ fixture = Path(__file__).resolve().parents[1] / "examples" / "policy_tests.yaml"
+
+ result = runner.invoke(app, ["test-policy", str(fixture)])
+
+ assert result.exit_code == 0
+ assert "[PASS]" in result.output
+
+
+def test_cli_test_policy_fails_when_expectation_is_wrong(tmp_path: Path) -> None:
+ runner = CliRunner()
+ fixture = tmp_path / "bad-policy.yaml"
+ fixture.write_text(
+ """
+policies:
+ - action: read
+ effect: deny
+tests:
+ - name: should_fail
+ input:
+ user: {}
+ action: read
+ resource: {}
+ expect: allow
+""",
+ encoding="utf-8",
+ )
+
+ result = runner.invoke(app, ["test-policy", str(fixture)])
+
+ assert result.exit_code == 1
+ assert "[FAIL] should_fail" in result.output
+
+
+def test_cli_seed_data_is_idempotent(tmp_path: Path) -> None:
+ runner = CliRunner()
+ database_url = f"sqlite+pysqlite:///{tmp_path / 'seed.db'}"
+
+ import os
+
+ os.environ["KEYNETRA_DATABASE_URL"] = database_url
+ reset_settings_cache()
+ get_settings.cache_clear()
+
+ first = runner.invoke(app, ["seed-data"])
+ second = runner.invoke(app, ["seed-data"])
+
+ assert first.exit_code == 0
+ assert '"created_tenant": true' in first.output.lower()
+ assert second.exit_code == 0
+ assert '"created_tenant": false' in second.output.lower()
diff --git a/tests/test_redis_multi_node.py b/tests/test_redis_multi_node.py
new file mode 100644
index 0000000..cd0cca7
--- /dev/null
+++ b/tests/test_redis_multi_node.py
@@ -0,0 +1,175 @@
+from __future__ import annotations
+
+import os
+
+import pytest
+
+pytest.importorskip("redis")
+
+import redis
+
+from keynetra.engine.keynetra_engine import AuthorizationInput, PolicyDefinition
+from keynetra.infrastructure.cache.access_index_cache import RedisBackedAccessIndexCache
+from keynetra.infrastructure.cache.acl_cache import RedisBackedACLCache
+from keynetra.infrastructure.cache.backends import build_cache_backend
+from keynetra.infrastructure.cache.decision_cache import RedisBackedDecisionCache
+from keynetra.infrastructure.cache.policy_cache import RedisBackedPolicyCache
+from keynetra.infrastructure.cache.relationship_cache import RedisBackedRelationshipCache
+from keynetra.services.interfaces import AccessIndexEntry, ACLRecord, CachedDecision, PolicyRecord
+
+
+def _redis_url() -> str:
+ return os.environ.get("KEYNETRA_REDIS_URL", "redis://localhost:6379/15")
+
+
+def _redis_client() -> redis.Redis:
+ client = redis.Redis.from_url(_redis_url(), decode_responses=True)
+ try:
+ client.ping()
+ except Exception as exc: # pragma: no cover - skipped in environments without Redis
+ pytest.skip(f"redis integration test requires a reachable Redis server: {exc}")
+ return client
+
+
+def test_redis_multi_node_cache_invalidation_propagates_across_nodes() -> None:
+ client_a = _redis_client()
+ client_b = redis.Redis.from_url(_redis_url(), decode_responses=True)
+ backend_a = build_cache_backend(client_a)
+ backend_b = build_cache_backend(client_b)
+
+ client_a.flushdb()
+
+ policy_cache_a = RedisBackedPolicyCache(backend_a)
+ policy_cache_b = RedisBackedPolicyCache(backend_b)
+ decision_cache_a = RedisBackedDecisionCache(backend_a)
+ decision_cache_b = RedisBackedDecisionCache(backend_b)
+ acl_cache_a = RedisBackedACLCache(backend_a)
+ acl_cache_b = RedisBackedACLCache(backend_b)
+ access_index_cache_a = RedisBackedAccessIndexCache(backend_a)
+ access_index_cache_b = RedisBackedAccessIndexCache(backend_b)
+ relationship_cache_a = RedisBackedRelationshipCache(backend_a)
+ relationship_cache_b = RedisBackedRelationshipCache(backend_b)
+
+ policy_cache_a.set(
+ "default",
+ 1,
+ [
+ PolicyRecord(
+ id=1,
+ definition=PolicyDefinition(
+ action="read",
+ effect="allow",
+ priority=1,
+ policy_id="policy:read",
+ conditions={},
+ ),
+ )
+ ],
+ )
+ assert policy_cache_b.get("default", 1) is not None
+ policy_cache_b.invalidate("default")
+ assert policy_cache_a.get("default", 1) is None
+
+ authorization_input = AuthorizationInput(
+ user={"id": 1},
+ resource={"resource_type": "doc", "resource_id": "doc-1"},
+ action="read",
+ tenant_key="default",
+ )
+ decision_key_before = decision_cache_a.make_key(
+ tenant_key="default",
+ policy_version=1,
+ authorization_input=authorization_input,
+ revision=1,
+ )
+ decision_cache_a.set(
+ decision_key_before,
+ CachedDecision(
+ allowed=True,
+ decision="allow",
+ reason="cached",
+ policy_id="policy:read",
+ ),
+ ttl_seconds=30,
+ )
+ assert decision_cache_b.get(decision_key_before) is not None
+ decision_cache_b.bump_namespace("default")
+ decision_key_after = decision_cache_a.make_key(
+ tenant_key="default",
+ policy_version=1,
+ authorization_input=authorization_input,
+ revision=1,
+ )
+ assert decision_key_after != decision_key_before
+ assert decision_cache_a.get(decision_key_after) is None
+
+ acl_cache_a.set(
+ tenant_id=1,
+ resource_type="doc",
+ resource_id="doc-1",
+ action="read",
+ acl_entries=[
+ ACLRecord(
+ id=1,
+ tenant_id=1,
+ subject_type="user",
+ subject_id="1",
+ resource_type="doc",
+ resource_id="doc-1",
+ action="read",
+ effect="allow",
+ )
+ ],
+ )
+ assert (
+ acl_cache_b.get(tenant_id=1, resource_type="doc", resource_id="doc-1", action="read")
+ is not None
+ )
+ acl_cache_b.invalidate(tenant_id=1, resource_type="doc", resource_id="doc-1")
+ assert (
+ acl_cache_a.get(tenant_id=1, resource_type="doc", resource_id="doc-1", action="read")
+ is None
+ )
+
+ access_index_cache_a.set(
+ tenant_id=1,
+ resource_type="doc",
+ resource_id="doc-1",
+ action="read",
+ entries=[
+ AccessIndexEntry(
+ resource_type="doc",
+ resource_id="doc-1",
+ action="read",
+ allowed_subjects=("user:1",),
+ source="acl",
+ subject_type="user",
+ subject_id="1",
+ effect="allow",
+ acl_id=1,
+ )
+ ],
+ )
+ assert (
+ access_index_cache_b.get(
+ tenant_id=1, resource_type="doc", resource_id="doc-1", action="read"
+ )
+ is not None
+ )
+ access_index_cache_b.invalidate(tenant_id=1, resource_type="doc", resource_id="doc-1")
+ assert (
+ access_index_cache_a.get(
+ tenant_id=1, resource_type="doc", resource_id="doc-1", action="read"
+ )
+ is None
+ )
+
+ relationship_cache_a.set(
+ tenant_id=1,
+ subject_type="user",
+ subject_id="1",
+ relationships=[],
+ )
+ assert relationship_cache_b.get(tenant_id=1, subject_type="user", subject_id="1") is not None
+ relationship_cache_b.invalidate(tenant_id=1, subject_type="user", subject_id="1")
+ assert relationship_cache_a.get(tenant_id=1, subject_type="user", subject_id="1") is None
diff --git a/tests/test_release_hardening.py b/tests/test_release_hardening.py
new file mode 100644
index 0000000..54f9f40
--- /dev/null
+++ b/tests/test_release_hardening.py
@@ -0,0 +1,970 @@
+from __future__ import annotations
+
+import hashlib
+from types import SimpleNamespace
+
+import pytest
+from fastapi import HTTPException
+from fastapi.security import HTTPAuthorizationCredentials
+from fastapi.testclient import TestClient
+from jose import jwt
+from sqlalchemy import create_engine
+from sqlalchemy.orm import Session
+from typer.testing import CliRunner
+
+from keynetra.api.errors import ApiError
+from keynetra.api.routes.access import (
+ AccessRequest,
+ BatchAccessRequest,
+ check_access,
+ check_access_batch,
+)
+from keynetra.api.routes.access import simulate as access_simulate
+from keynetra.api.routes.dev import (
+ _require_local_dev,
+ get_sample_data,
+ seed_sample_data,
+)
+from keynetra.api.routes.simulation import (
+ ImpactAnalysisRequest,
+ PolicySimulationRequest,
+ _normalize_request,
+ impact_analysis,
+ simulate_policy,
+)
+from keynetra.cli import app
+from keynetra.config.admin_auth import AdminAccess, _resolve_tenant_role, require_management_role
+from keynetra.config.security import (
+ _matches_api_key,
+ get_principal,
+)
+from keynetra.config.settings import Settings, reset_settings_cache
+from keynetra.domain.models.base import Base
+from keynetra.domain.models.rbac import Permission, Role
+from keynetra.engine.keynetra_engine import PolicyDefinition
+from keynetra.infrastructure.cache.backends import (
+ InMemoryCacheBackend,
+ RedisCacheBackend,
+ build_cache_backend,
+)
+from keynetra.infrastructure.storage.session import initialize_database
+from keynetra.main import create_app
+from keynetra.services import resilience
+from keynetra.services.interfaces import (
+ PolicyListItem,
+ PolicyMutationResult,
+ PolicyRecord,
+ RelationshipRecord,
+ TenantRecord,
+)
+from keynetra.services.policies import PolicyService
+from keynetra.services.relationships import RelationshipService
+
+
+class DummyRequest:
+ def __init__(self) -> None:
+ self.state = SimpleNamespace(request_id="req-1")
+ self.url = SimpleNamespace(path="/check-access")
+ self.method = "POST"
+ self.client = SimpleNamespace(host="127.0.0.1")
+
+
+class FakeRedisClient:
+ def __init__(self) -> None:
+ self.store: dict[str, str] = {}
+
+ def get(self, key: str) -> str | None:
+ return self.store.get(key)
+
+ def set(self, key: str, value: str) -> None:
+ self.store[key] = value
+
+ def setex(self, key: str, ttl: int, value: str) -> None: # noqa: ARG002
+ self.store[key] = value
+
+ def delete(self, key: str) -> None:
+ self.store.pop(key, None)
+
+ def incr(self, key: str) -> int:
+ self.store[key] = str(int(self.store.get(key, "0")) + 1)
+ return int(self.store[key])
+
+
+def test_in_memory_cache_backend_supports_ttl_delete_and_incr(
+ monkeypatch: pytest.MonkeyPatch,
+) -> None:
+ backend = InMemoryCacheBackend()
+ monkeypatch.setattr("keynetra.infrastructure.cache.backends.time.time", lambda: 100.0)
+
+ backend.set("foo", "bar", ttl_seconds=1)
+ assert backend.get("foo") == "bar"
+ assert backend.incr("counter") == 1
+ assert backend.incr("counter") == 2
+ backend.delete("foo")
+ assert backend.get("foo") is None
+
+ monkeypatch.setattr("keynetra.infrastructure.cache.backends.time.time", lambda: 102.0)
+ backend.set("short", "value", ttl_seconds=1)
+ monkeypatch.setattr("keynetra.infrastructure.cache.backends.time.time", lambda: 104.0)
+ assert backend.get("short") is None
+
+
+def test_redis_cache_backend_survives_client_errors() -> None:
+ class ExplodingClient:
+ def get(self, key: str) -> None: # noqa: ARG002
+ raise RuntimeError("boom")
+
+ def set(self, key: str, value: str) -> None: # noqa: ARG002
+ raise RuntimeError("boom")
+
+ def setex(self, key: str, ttl: int, value: str) -> None: # noqa: ARG002
+ raise RuntimeError("boom")
+
+ def delete(self, key: str) -> None: # noqa: ARG002
+ raise RuntimeError("boom")
+
+ def incr(self, key: str) -> None: # noqa: ARG002
+ raise RuntimeError("boom")
+
+ backend = RedisCacheBackend(ExplodingClient())
+ assert backend.get("foo") is None
+ backend.set("foo", "bar", ttl_seconds=10)
+ backend.delete("foo")
+ assert backend.incr("counter") == 0
+
+
+def test_build_cache_backend_uses_shared_memory_fallback() -> None:
+ backend = build_cache_backend(None)
+ assert isinstance(backend, InMemoryCacheBackend)
+ assert build_cache_backend(FakeRedisClient()).__class__ is RedisCacheBackend
+
+
+def test_matches_api_key_uses_constant_time_hash_comparison() -> None:
+ secret = "super-secret"
+ hashes = {hashlib.sha256(secret.encode("utf-8")).hexdigest()}
+ assert _matches_api_key(secret, hashes) is True
+ assert _matches_api_key("wrong", hashes) is False
+
+
+def test_get_principal_supports_api_key_and_bearer_jwt(
+ monkeypatch: pytest.MonkeyPatch,
+) -> None:
+ request = DummyRequest()
+ monkeypatch.setattr("keynetra.config.security._matches_api_key", lambda *_: True)
+ api_key_settings = Settings()
+
+ api_key_principal = get_principal(
+ request,
+ settings=api_key_settings,
+ authorization=None,
+ x_api_key="test-key",
+ )
+ assert api_key_principal["type"] == "api_key"
+ assert len(api_key_principal["id"]) == 12
+
+ token = jwt.encode(
+ {"sub": "alice", "role": "admin"},
+ "jwt-secret",
+ algorithm="HS256",
+ )
+ jwt_principal = get_principal(
+ request,
+ settings=Settings(jwt_secret="jwt-secret", jwt_algorithm="HS256"),
+ authorization=HTTPAuthorizationCredentials(scheme="Bearer", credentials=token),
+ x_api_key=None,
+ )
+ assert jwt_principal["type"] == "jwt"
+ assert jwt_principal["id"] == "alice"
+ assert jwt_principal["claims"]["role"] == "admin"
+
+
+def test_get_principal_rejects_invalid_and_missing_credentials(
+ monkeypatch: pytest.MonkeyPatch,
+) -> None:
+ request = DummyRequest()
+ monkeypatch.setenv("KEYNETRA_API_KEYS", "test-key")
+ settings = Settings()
+
+ with pytest.raises(HTTPException):
+ get_principal(
+ request,
+ settings=settings,
+ authorization=None,
+ x_api_key="wrong",
+ )
+
+ with pytest.raises(HTTPException):
+ get_principal(request, settings=settings, authorization=None, x_api_key=None)
+
+
+def test_get_principal_rejects_invalid_jwt() -> None:
+ request = DummyRequest()
+ token = jwt.encode({"sub": "alice"}, "wrong-secret", algorithm="HS256")
+ with pytest.raises(HTTPException):
+ get_principal(
+ request,
+ settings=Settings(jwt_secret="jwt-secret", jwt_algorithm="HS256"),
+ authorization=HTTPAuthorizationCredentials(scheme="Bearer", credentials=token),
+ x_api_key=None,
+ )
+
+
+def test_resolve_tenant_role_covers_list_and_dict_claims() -> None:
+ assert _resolve_tenant_role({"type": "api_key"}) == "admin"
+ assert _resolve_tenant_role({"claims": {"tenant_roles": {"acme": "developer"}}}) == "developer"
+ assert _resolve_tenant_role({"claims": {"tenant_roles": [{"role": "viewer"}]}}) == "viewer"
+ assert _resolve_tenant_role({"claims": {"roles": ["developer", "viewer"]}}) == "developer"
+
+
+def test_require_management_role_resolves_and_enforces_roles() -> None:
+ request = DummyRequest()
+ dependency = require_management_role("developer")
+
+ access = dependency(request, principal={"type": "api_key", "id": "test"})
+ assert access.role == "admin"
+ assert request.state.admin_role == "admin"
+
+ denied = require_management_role("admin")
+ with pytest.raises(ApiError):
+ denied(request, principal={"type": "jwt", "claims": {"role": "viewer"}})
+
+ with pytest.raises(ValueError):
+ require_management_role("owner")
+
+
+def test_resilience_helpers_cover_timeout_and_retry(monkeypatch: pytest.MonkeyPatch) -> None:
+ monkeypatch.setattr(resilience.time, "sleep", lambda *_: None)
+
+ class FakeFuture:
+ def result(self, timeout: float): # noqa: ARG002
+ raise TimeoutError
+
+ def cancel(self) -> None:
+ return None
+
+ monkeypatch.setattr(resilience._EXECUTOR, "submit", lambda func: FakeFuture())
+
+ with pytest.raises(TimeoutError):
+ resilience.with_timeout(lambda: "ok", timeout_seconds=0.0)
+
+ attempts: list[int] = []
+
+ def flaky() -> str:
+ attempts.append(1)
+ if len(attempts) < 3:
+ raise RuntimeError("try again")
+ return "ok"
+
+ assert resilience.retry(flaky, attempts=3, base_delay_seconds=0.0) == "ok"
+ assert len(attempts) == 3
+
+ with pytest.raises(RuntimeError):
+ resilience.retry(lambda: (_ for _ in ()).throw(RuntimeError("fail")), attempts=1)
+
+
+def test_cli_surface_commands_cover_release_paths(
+ monkeypatch: pytest.MonkeyPatch, tmp_path
+) -> None:
+ runner = CliRunner()
+
+ result = runner.invoke(app, ["version"])
+ assert result.exit_code == 0
+ assert "0.1.0" in result.stdout
+
+ recorded: dict[str, object] = {}
+
+ def fake_run(app_path: str, *, host: str, port: int, reload: bool) -> None:
+ recorded["app_path"] = app_path
+ recorded["host"] = host
+ recorded["port"] = port
+ recorded["reload"] = reload
+
+ monkeypatch.setattr("uvicorn.run", fake_run)
+ result = runner.invoke(app, ["start", "--host", "127.0.0.1", "--port", "9001", "--reload"])
+ assert result.exit_code == 0
+ assert recorded == {
+ "app_path": "keynetra.api.main:app",
+ "host": "127.0.0.1",
+ "port": 9001,
+ "reload": True,
+ }
+
+ posted: list[tuple[str, dict[str, object], dict[str, str]]] = []
+ got: list[tuple[str, dict[str, str]]] = []
+
+ class FakeResponse:
+ def __init__(self, text: str = "ok") -> None:
+ self.text = text
+
+ def raise_for_status(self) -> None:
+ return None
+
+ def fake_post(url: str, *, json: dict[str, object], headers: dict[str, str], timeout: float):
+ posted.append((url, json, headers))
+ return FakeResponse(text='{"ok": true}')
+
+ def fake_get(url: str, *, headers: dict[str, str], timeout: float):
+ got.append((url, headers))
+ return FakeResponse(text='{"status": "ok"}')
+
+ monkeypatch.setattr("keynetra.cli.httpx.post", fake_post)
+ monkeypatch.setattr("keynetra.cli.httpx.get", fake_get)
+
+ result = runner.invoke(
+ app,
+ [
+ "check",
+ "--api-key",
+ "testkey",
+ "--action",
+ "read",
+ "--user",
+ '{"id": 1}',
+ "--resource",
+ '{"id": "doc-1"}',
+ "--context",
+ '{"scope": "demo"}',
+ ],
+ )
+ assert result.exit_code == 0
+ assert posted[-1][0] == "http://localhost:8000/check-access"
+ assert posted[-1][2] == {"X-API-Key": "testkey"}
+
+ result = runner.invoke(
+ app,
+ [
+ "simulate",
+ "--api-key",
+ "testkey",
+ "--action",
+ "read",
+ "--policy-change",
+ "allow read",
+ ],
+ )
+ assert result.exit_code == 0
+ assert posted[-1][0] == "http://localhost:8000/simulate-policy"
+
+ result = runner.invoke(
+ app,
+ [
+ "impact",
+ "--api-key",
+ "testkey",
+ "--policy-change",
+ "allow read",
+ ],
+ )
+ assert result.exit_code == 0
+ assert posted[-1][0] == "http://localhost:8000/impact-analysis"
+
+ schema_file = tmp_path / "schema.dsl"
+ schema_file.write_text("model schema 1", encoding="utf-8")
+ result = runner.invoke(app, ["model", "apply", str(schema_file), "--api-key", "testkey"])
+ assert result.exit_code == 0
+ assert posted[-1][0] == "http://localhost:8000/auth-model"
+
+ result = runner.invoke(app, ["model", "show", "--api-key", "testkey"])
+ assert result.exit_code == 0
+ assert got[-1][0] == "http://localhost:8000/auth-model"
+
+
+def test_cli_migrate_invokes_alembic_upgrade(monkeypatch: pytest.MonkeyPatch, tmp_path) -> None:
+ database_url = f"sqlite+pysqlite:///{tmp_path / 'migrate.db'}"
+ monkeypatch.setenv("KEYNETRA_DATABASE_URL", database_url)
+ reset_settings_cache()
+
+ called: dict[str, object] = {}
+
+ def fake_upgrade(config, revision: str) -> None: # noqa: ANN001
+ called["revision"] = revision
+ called["url"] = config.get_main_option("sqlalchemy.url")
+
+ monkeypatch.setattr("alembic.command.upgrade", fake_upgrade)
+ monkeypatch.setattr("keynetra.cli.find_destructive_revisions", lambda *args, **kwargs: [])
+
+ runner = CliRunner()
+ result = runner.invoke(app, ["migrate", "--confirm-destructive"])
+
+ assert result.exit_code == 0
+ assert called["revision"] == "head"
+ assert called["url"] == database_url
+
+
+def test_access_route_helpers_cover_transport_paths() -> None:
+ class FakeAccessService:
+ def authorize(self, **_: object) -> SimpleNamespace:
+ return SimpleNamespace(
+ decision=SimpleNamespace(
+ allowed=True,
+ decision="allow",
+ matched_policies=["p1"],
+ reason="granted",
+ policy_id="p1",
+ explain_trace=[SimpleNamespace(to_dict=lambda: {"step": "done"})],
+ ),
+ revision=9,
+ cached=False,
+ )
+
+ def simulate(self, **_: object) -> SimpleNamespace:
+ return SimpleNamespace(
+ decision="deny",
+ matched_policies=[],
+ reason="missing",
+ policy_id=None,
+ explain_trace=[SimpleNamespace(to_dict=lambda: {"step": "deny"})],
+ failed_conditions=["role"],
+ )
+
+ def authorize_batch(self, **_: object) -> list[SimpleNamespace]:
+ return [
+ SimpleNamespace(
+ decision=SimpleNamespace(allowed=True),
+ revision=1,
+ ),
+ SimpleNamespace(
+ decision=SimpleNamespace(allowed=False),
+ revision=2,
+ ),
+ ]
+
+ def get_revision(self, *, tenant_key: str) -> int: # noqa: ARG002
+ return 9
+
+ request = DummyRequest()
+ service = FakeAccessService()
+
+ check = check_access(
+ payload=AccessRequest(
+ user={"id": 1}, action="read", resource={}, context={}, consistency="eventual"
+ ),
+ request=request,
+ service=service,
+ principal={"type": "api_key"},
+ )
+ assert check["data"]["decision"] == "allow"
+ assert check["data"]["revision"] == 9
+
+ simulated = access_simulate(
+ payload=AccessRequest(
+ user={"id": 1}, action="read", resource={}, context={}, consistency="eventual"
+ ),
+ request=request,
+ service=service,
+ principal={"type": "api_key"},
+ )
+ assert simulated["data"]["decision"] == "deny"
+
+ batch = check_access_batch(
+ payload=BatchAccessRequest(
+ user={"id": 1},
+ items=[{"action": "read"}, {"action": "write"}],
+ consistency="eventual",
+ ),
+ request=request,
+ service=service,
+ principal={"type": "api_key"},
+ )
+ assert batch["data"]["results"] == [
+ {"action": "read", "allowed": True, "revision": 1},
+ {"action": "write", "allowed": False, "revision": 2},
+ ]
+
+
+def test_simulation_and_dev_routes_cover_local_and_normalization_paths(
+ monkeypatch: pytest.MonkeyPatch,
+) -> None:
+ class FakeSimulator:
+ def simulate_policy_change(self, **_: object) -> SimpleNamespace:
+ return SimpleNamespace(
+ decision_before=SimpleNamespace(
+ allowed=False, decision="deny", reason="before", policy_id="p0"
+ ),
+ decision_after=SimpleNamespace(
+ allowed=True, decision="allow", reason="after", policy_id="p1"
+ ),
+ )
+
+ class FakeImpact:
+ def analyze_policy_change(self, **_: object) -> SimpleNamespace:
+ return SimpleNamespace(gained_access=[1, 2], lost_access=[3])
+
+ monkeypatch.setattr(
+ "keynetra.api.routes.dev.sample_bootstrap_document",
+ lambda: {"sample": True},
+ )
+ monkeypatch.setattr(
+ "keynetra.api.routes.dev.seed_demo_data",
+ lambda db, reset=False: SimpleNamespace(
+ tenant_key="default",
+ created_tenant=True,
+ created_user=True,
+ created_role=False,
+ created_permissions=1,
+ created_relationships=2,
+ created_policies=3,
+ ),
+ )
+
+ _require_local_dev(Settings(environment="development"))
+ with pytest.raises(ApiError):
+ _require_local_dev(Settings(environment="production"))
+
+ request = DummyRequest()
+ sample = get_sample_data(request=request, settings=Settings(environment="development"))
+ assert sample["data"] == {"sample": True}
+
+ seeded = seed_sample_data(
+ request=request,
+ db=object(),
+ settings=Settings(environment="development"),
+ reset=True,
+ )
+ assert seeded["data"]["created_permissions"] == 1
+
+ normalized = _normalize_request(
+ {"user": "alice", "resource": "document:42", "action": 123, "context": "bad"}
+ )
+ assert normalized == {
+ "user": {"id": "alice"},
+ "resource": {"resource_type": "document", "resource_id": "42"},
+ "action": "",
+ "context": {},
+ }
+
+ simulation = simulate_policy(
+ payload=PolicySimulationRequest(
+ simulate={"policy_change": "allow read"},
+ request=normalized,
+ ),
+ request=request,
+ deps=(SimpleNamespace(), FakeSimulator(), FakeImpact()),
+ access=AdminAccess(tenant_key="default", role="viewer", principal={"type": "api_key"}),
+ )
+ assert simulation["data"]["decision_before"]["decision"] == "deny"
+ assert simulation["data"]["decision_after"]["decision"] == "allow"
+
+ impact = impact_analysis(
+ payload=ImpactAnalysisRequest(policy_change="allow read"),
+ request=request,
+ deps=(SimpleNamespace(), FakeSimulator(), FakeImpact()),
+ access=AdminAccess(tenant_key="default", role="viewer", principal={"type": "api_key"}),
+ )
+ assert impact["data"]["gained_access"] == [1, 2]
+ assert impact["data"]["lost_access"] == [3]
+
+
+def test_policy_service_release_paths() -> None:
+ class FakeTenantRepo:
+ def __init__(self) -> None:
+ self.tenant = TenantRecord(id=1, tenant_key="default", policy_version=1, revision=1)
+
+ def get_or_create(self, tenant_key: str) -> TenantRecord:
+ return self.tenant
+
+ def get_by_id(self, tenant_id: int) -> TenantRecord | None: # noqa: ARG002
+ return self.tenant
+
+ def bump_policy_version(self, tenant: TenantRecord) -> TenantRecord:
+ self.tenant = TenantRecord(
+ id=tenant.id,
+ tenant_key=tenant.tenant_key,
+ policy_version=tenant.policy_version + 1,
+ revision=tenant.revision + 1,
+ )
+ return self.tenant
+
+ def bump_revision(self, tenant: TenantRecord) -> TenantRecord:
+ self.tenant = TenantRecord(
+ id=tenant.id,
+ tenant_key=tenant.tenant_key,
+ policy_version=tenant.policy_version,
+ revision=tenant.revision + 1,
+ )
+ return self.tenant
+
+ class FakePolicyRepo:
+ def __init__(self) -> None:
+ self.policy = PolicyRecord(
+ id=1,
+ definition=PolicyDefinition(
+ action="read",
+ effect="allow",
+ priority=10,
+ policy_id="p1",
+ conditions={"role": "admin"},
+ ),
+ )
+ self.deleted: list[str] = []
+
+ def list_current_policies(self, *, tenant_id: int) -> list[PolicyRecord]: # noqa: ARG002
+ return [self.policy]
+
+ def list_current_policy_views(
+ self, *, tenant_id: int
+ ) -> list[PolicyListItem]: # noqa: ARG002
+ return [PolicyListItem(id=1, action="read", effect="allow", priority=10, conditions={})]
+
+ def list_current_policy_page(
+ self,
+ *,
+ tenant_id: int,
+ limit: int,
+ cursor: dict[str, object] | None,
+ ) -> tuple[list[PolicyListItem], str | None]: # noqa: ARG002
+ return (
+ [PolicyListItem(id=1, action="read", effect="allow", priority=10, conditions={})],
+ "cursor-1",
+ )
+
+ def create_policy_version(
+ self,
+ *,
+ tenant_id: int,
+ policy_key: str,
+ action: str,
+ effect: str,
+ priority: int,
+ conditions: dict[str, object],
+ created_by: str | None,
+ ) -> PolicyMutationResult: # noqa: ARG002
+ return PolicyMutationResult(
+ id=2, action=action, effect=effect, priority=priority, conditions=conditions
+ )
+
+ def rollback_policy(
+ self, *, tenant_id: int, policy_key: str, version: int
+ ) -> tuple[str, int]: # noqa: ARG002
+ return policy_key, version
+
+ def delete_policy(self, *, tenant_id: int, policy_key: str) -> None: # noqa: ARG002
+ self.deleted.append(policy_key)
+
+ class FakePolicyCache:
+ def __init__(self) -> None:
+ self.invalidated: list[str] = []
+
+ def invalidate(self, tenant_key: str) -> None:
+ self.invalidated.append(tenant_key)
+
+ class FakeDecisionCache:
+ def __init__(self) -> None:
+ self.namespaces: list[str] = []
+
+ def bump_namespace(self, tenant_key: str) -> int:
+ self.namespaces.append(tenant_key)
+ return len(self.namespaces)
+
+ class FakePublisher:
+ def __init__(self) -> None:
+ self.events: list[tuple[str, int]] = []
+
+ def publish_policy_update(self, *, tenant_key: str, policy_version: int) -> None:
+ self.events.append((tenant_key, policy_version))
+
+ tenants = FakeTenantRepo()
+ policies = FakePolicyRepo()
+ policy_cache = FakePolicyCache()
+ decision_cache = FakeDecisionCache()
+ publisher = FakePublisher()
+ service = PolicyService(
+ tenants=tenants,
+ policies=policies,
+ policy_cache=policy_cache,
+ decision_cache=decision_cache,
+ publisher=publisher,
+ )
+
+ assert service.list_policies(tenant_key="default") == [
+ {"id": 1, "action": "read", "effect": "allow", "priority": 10, "conditions": {}}
+ ]
+
+ page, cursor = service.list_policies_page(tenant_key="default", limit=10, cursor=None)
+ assert page[0]["action"] == "read"
+ assert cursor == "cursor-1"
+
+ created = service.create_policy(
+ tenant_key="default",
+ policy_key="p2",
+ action="write",
+ effect="allow",
+ priority=20,
+ conditions={"role": "writer"},
+ created_by="tester",
+ )
+ assert created.action == "write"
+ assert policy_cache.invalidated[-1] == "default"
+ assert decision_cache.namespaces[-1] == "default"
+ assert publisher.events[-1] == ("default", 2)
+
+ rolled_back = service.rollback_policy(tenant_key="default", policy_key="p1", version=3)
+ assert rolled_back == ("p1", 3)
+
+ service.delete_policy(tenant_key="default", policy_key="p1")
+ assert policies.deleted == ["p1"]
+
+
+def test_relationship_service_release_paths() -> None:
+ class FakeTenantRepo:
+ def __init__(self) -> None:
+ self.tenant = TenantRecord(id=1, tenant_key="default", policy_version=1, revision=1)
+
+ def get_or_create(self, tenant_key: str) -> TenantRecord:
+ return self.tenant
+
+ def get_by_id(self, tenant_id: int) -> TenantRecord | None: # noqa: ARG002
+ return self.tenant
+
+ def bump_policy_version(self, tenant: TenantRecord) -> TenantRecord:
+ self.tenant = TenantRecord(
+ id=tenant.id,
+ tenant_key=tenant.tenant_key,
+ policy_version=tenant.policy_version + 1,
+ revision=tenant.revision + 1,
+ )
+ return self.tenant
+
+ def bump_revision(self, tenant: TenantRecord) -> TenantRecord:
+ self.tenant = TenantRecord(
+ id=tenant.id,
+ tenant_key=tenant.tenant_key,
+ policy_version=tenant.policy_version,
+ revision=tenant.revision + 1,
+ )
+ return self.tenant
+
+ class FakeRelationshipRepo:
+ def __init__(self) -> None:
+ self.calls = 0
+
+ def list_for_subject(
+ self, *, tenant_id: int, subject_type: str, subject_id: str
+ ) -> list[RelationshipRecord]: # noqa: ARG002
+ self.calls += 1
+ return [
+ RelationshipRecord(
+ subject_type=subject_type,
+ subject_id=subject_id,
+ relation="member_of",
+ object_type="team",
+ object_id="red",
+ )
+ ]
+
+ def list_for_subject_page(
+ self,
+ *,
+ tenant_id: int,
+ subject_type: str,
+ subject_id: str,
+ limit: int,
+ cursor: dict[str, object] | None,
+ ) -> tuple[list[RelationshipRecord], str | None]: # noqa: ARG002
+ return (
+ [
+ RelationshipRecord(
+ subject_type=subject_type,
+ subject_id=subject_id,
+ relation="member_of",
+ object_type="team",
+ object_id="red",
+ )
+ ],
+ "next",
+ )
+
+ def create(
+ self,
+ *,
+ tenant_id: int,
+ subject_type: str,
+ subject_id: str,
+ relation: str,
+ object_type: str,
+ object_id: str,
+ ) -> int: # noqa: ARG002
+ return 99
+
+ class FakeRelationshipCache:
+ def __init__(self) -> None:
+ self.data: dict[tuple[int, str, str], list[RelationshipRecord]] = {}
+
+ def get(
+ self, *, tenant_id: int, subject_type: str, subject_id: str
+ ) -> list[RelationshipRecord] | None:
+ return self.data.get((tenant_id, subject_type, subject_id))
+
+ def set(
+ self,
+ *,
+ tenant_id: int,
+ subject_type: str,
+ subject_id: str,
+ relationships: list[RelationshipRecord],
+ ) -> None:
+ self.data[(tenant_id, subject_type, subject_id)] = relationships
+
+ def invalidate(self, *, tenant_id: int, subject_type: str, subject_id: str) -> None:
+ self.data.pop((tenant_id, subject_type, subject_id), None)
+
+ class FakeDecisionCache:
+ def __init__(self) -> None:
+ self.namespaces: list[str] = []
+
+ def bump_namespace(self, tenant_key: str) -> int:
+ self.namespaces.append(tenant_key)
+ return len(self.namespaces)
+
+ class FakeAccessIndexCache:
+ def __init__(self) -> None:
+ self.invalidated: list[int] = []
+
+ def invalidate_tenant(self, tenant_id: int) -> None:
+ self.invalidated.append(tenant_id)
+
+ tenants = FakeTenantRepo()
+ relationships = FakeRelationshipRepo()
+ relationship_cache = FakeRelationshipCache()
+ decision_cache = FakeDecisionCache()
+ access_index_cache = FakeAccessIndexCache()
+ service = RelationshipService(
+ tenants=tenants,
+ relationships=relationships,
+ relationship_cache=relationship_cache,
+ decision_cache=decision_cache,
+ access_index_cache=access_index_cache,
+ )
+
+ first = service.list_relationships(tenant_key="default", subject_type="user", subject_id="7")
+ second = service.list_relationships(tenant_key="default", subject_type="user", subject_id="7")
+ assert first == second
+ assert relationships.calls == 1
+
+ page, cursor = service.list_relationships_page(
+ tenant_key="default",
+ subject_type="user",
+ subject_id="7",
+ limit=5,
+ cursor=None,
+ )
+ assert page[0]["relation"] == "member_of"
+ assert cursor == "next"
+
+ created = service.create_relationship(
+ tenant_key="default",
+ subject_type="user",
+ subject_id="7",
+ relation="member_of",
+ object_type="team",
+ object_id="blue",
+ )
+ assert created == 99
+ assert decision_cache.namespaces[-1] == "default"
+ assert access_index_cache.invalidated == [1]
+
+
+def test_management_routes_cover_permissions_roles_and_acl(
+ monkeypatch: pytest.MonkeyPatch,
+ tmp_path,
+) -> None:
+ database_url = f"sqlite+pysqlite:///{tmp_path / 'management.db'}"
+ monkeypatch.setenv("KEYNETRA_DATABASE_URL", database_url)
+ monkeypatch.setenv("KEYNETRA_API_KEYS", "testkey")
+ monkeypatch.setenv("KEYNETRA_RATE_LIMIT_PER_MINUTE", "1000")
+ monkeypatch.setenv("KEYNETRA_RATE_LIMIT_BURST", "1000")
+ reset_settings_cache()
+ initialize_database(database_url)
+ engine = create_engine(database_url, future=True)
+ Base.metadata.create_all(engine)
+ with Session(engine) as session:
+ role = Role(name="seed-role")
+ permission = Permission(action="seed-action")
+ role.permissions.append(permission)
+ session.add_all([role, permission])
+ session.commit()
+ permission_id = permission.id
+
+ client = TestClient(create_app())
+ headers = {"X-API-Key": "testkey"}
+
+ listed_permissions = client.get("/permissions", headers=headers)
+ assert listed_permissions.status_code == 200
+
+ created_permission = client.post(
+ "/permissions",
+ json={"action": "export_data"},
+ headers=headers,
+ )
+ assert created_permission.status_code == 201
+ created_permission_id = created_permission.json()["id"]
+
+ updated_permission = client.put(
+ f"/permissions/{created_permission_id}",
+ json={"action": "export_data_v2"},
+ headers=headers,
+ )
+ assert updated_permission.status_code == 200
+
+ permission_roles = client.get(
+ f"/permissions/{permission_id}/roles",
+ headers=headers,
+ )
+ assert permission_roles.status_code == 200
+
+ created_role = client.post(
+ "/roles",
+ json={"name": "auditor"},
+ headers=headers,
+ )
+ assert created_role.status_code == 201
+ created_role_id = created_role.json()["id"]
+
+ updated_role = client.put(
+ f"/roles/{created_role_id}",
+ json={"name": "auditor-v2"},
+ headers=headers,
+ )
+ assert updated_role.status_code == 200
+
+ add_permission = client.post(
+ f"/roles/{created_role_id}/permissions/{created_permission_id}",
+ headers=headers,
+ )
+ assert add_permission.status_code == 201
+
+ role_permissions = client.get(
+ f"/roles/{created_role_id}/permissions",
+ headers=headers,
+ )
+ assert role_permissions.status_code == 200
+
+ remove_permission = client.delete(
+ f"/roles/{created_role_id}/permissions/{created_permission_id}",
+ headers=headers,
+ )
+ assert remove_permission.status_code == 200
+
+ delete_role = client.delete(f"/roles/{created_role_id}", headers=headers)
+ assert delete_role.status_code == 200
+
+ created_acl = client.post(
+ "/acl",
+ json={
+ "subject_type": "user",
+ "subject_id": "u1",
+ "resource_type": "document",
+ "resource_id": "doc-1",
+ "action": "read",
+ "effect": "allow",
+ },
+ headers=headers,
+ )
+ assert created_acl.status_code == 201
+ acl_id = created_acl.json()["data"]["id"]
+
+ listed_acl = client.get("/acl/document/doc-1", headers=headers)
+ assert listed_acl.status_code == 200
+
+ deleted_acl = client.delete(f"/acl/{acl_id}", headers=headers)
+ assert deleted_acl.status_code == 200
diff --git a/tests/test_resilience_cli.py b/tests/test_resilience_cli.py
new file mode 100644
index 0000000..2f790c9
--- /dev/null
+++ b/tests/test_resilience_cli.py
@@ -0,0 +1,153 @@
+from __future__ import annotations
+
+import json
+import os
+
+from typer.testing import CliRunner
+
+from keynetra.cli import app
+from keynetra.config.settings import Settings
+from keynetra.services.authorization import AuthorizationService
+from keynetra.version import __version__
+
+
+class _BrokenTenantRepo:
+ def get_or_create(self, tenant_key: str):
+ raise RuntimeError("tenant store unavailable")
+
+
+class _NoopUserRepo:
+ def get_user_context(self, user_id: int):
+ return None
+
+
+class _NoopRelationshipRepo:
+ def list_for_subject(self, *, tenant_id: int, subject_type: str, subject_id: str):
+ return []
+
+
+class _NoopAuditRepo:
+ def write(self, **kwargs):
+ return None
+
+
+class _NoopCache:
+ def get(self, *args, **kwargs):
+ return None
+
+ def set(self, *args, **kwargs):
+ return None
+
+ def invalidate(self, *args, **kwargs):
+ return None
+
+ def make_key(self, **kwargs):
+ return "cache-key"
+
+ def bump_namespace(self, tenant_key: str):
+ return 1
+
+
+class _NoopPolicyRepo:
+ def list_current_policies(self, *, tenant_id: int):
+ return []
+
+
+def _service(settings: Settings) -> AuthorizationService:
+ return AuthorizationService(
+ settings=settings,
+ tenants=_BrokenTenantRepo(),
+ policies=_NoopPolicyRepo(),
+ users=_NoopUserRepo(),
+ relationships=_NoopRelationshipRepo(),
+ audit=_NoopAuditRepo(),
+ policy_cache=_NoopCache(),
+ relationship_cache=_NoopCache(),
+ decision_cache=_NoopCache(),
+ )
+
+
+def test_resilience_fail_closed_denies_on_backend_failure() -> None:
+ result = _service(
+ Settings(resilience_mode="fail_closed", resilience_fallback_behavior="static")
+ ).authorize(
+ tenant_key="tenant-a",
+ principal={"type": "test", "id": "p1"},
+ user={"id": "u1"},
+ action="read",
+ resource={"id": "r1"},
+ context={},
+ audit=False,
+ )
+
+ assert result.decision.allowed is False
+ assert result.decision.decision == "deny"
+
+
+def test_resilience_fail_open_allows_on_backend_failure() -> None:
+ result = _service(
+ Settings(resilience_mode="fail_open", resilience_fallback_behavior="static")
+ ).authorize(
+ tenant_key="tenant-a",
+ principal={"type": "test", "id": "p1"},
+ user={"id": "u1"},
+ action="read",
+ resource={"id": "r1"},
+ context={},
+ audit=False,
+ )
+
+ assert result.decision.allowed is True
+ assert result.decision.decision == "allow"
+
+
+def test_resilience_default_policy_eval_uses_configured_policies() -> None:
+ settings = Settings(
+ resilience_mode="fail_closed",
+ resilience_fallback_behavior="default_policy_eval",
+ policies_json=json.dumps(
+ [{"action": "read", "effect": "allow", "priority": 1, "conditions": {}}]
+ ),
+ )
+ result = _service(settings).authorize(
+ tenant_key="tenant-a",
+ principal={"type": "test", "id": "p1"},
+ user={"id": "u1"},
+ action="read",
+ resource={"id": "r1"},
+ context={},
+ audit=False,
+ )
+
+ assert result.decision.allowed is True
+ assert any(step.step == "resilience_fallback" for step in result.decision.explain_trace)
+
+
+def test_cli_explain_prints_decision_and_trace(tmp_path) -> None:
+ database_url = f"sqlite+pysqlite:///{tmp_path / 'cli.db'}"
+ os.environ["KEYNETRA_DATABASE_URL"] = database_url
+ runner = CliRunner()
+
+ result = runner.invoke(app, ["explain", "--user", "u1", "--resource", "r1", "--action", "read"])
+
+ assert result.exit_code == 0
+ payload = json.loads(result.stdout)
+ assert "decision" in payload
+ assert "explain_trace" in payload
+
+
+def test_cli_version_prints_package_version() -> None:
+ runner = CliRunner()
+
+ result = runner.invoke(app, ["version"])
+
+ assert result.exit_code == 0
+ assert result.stdout.strip() == __version__
+
+
+def test_cli_help_cli_prints_examples() -> None:
+ runner = CliRunner()
+ result = runner.invoke(app, ["help-cli"])
+ assert result.exit_code == 0
+ assert "keynetra serve --config examples/keynetra.yaml" in result.stdout
+ assert "keynetra compile-policies --config examples/keynetra.yaml" in result.stdout
diff --git a/tests/test_services_caching.py b/tests/test_services_caching.py
new file mode 100644
index 0000000..6c92145
--- /dev/null
+++ b/tests/test_services_caching.py
@@ -0,0 +1,216 @@
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Any
+
+from keynetra.config.settings import Settings
+from keynetra.engine.keynetra_engine import PolicyDefinition
+from keynetra.infrastructure.cache.backends import InMemoryCacheBackend
+from keynetra.infrastructure.cache.decision_cache import RedisBackedDecisionCache
+from keynetra.infrastructure.cache.policy_cache import RedisBackedPolicyCache
+from keynetra.infrastructure.cache.relationship_cache import RedisBackedRelationshipCache
+from keynetra.services.authorization import AuthorizationService
+from keynetra.services.interfaces import (
+ PolicyMutationResult,
+ PolicyRecord,
+ RelationshipRecord,
+ TenantRecord,
+)
+from keynetra.services.policies import PolicyService
+from keynetra.services.relationships import RelationshipService
+
+
+class FakeTenantRepository:
+ def __init__(self) -> None:
+ self._tenant = TenantRecord(id=1, tenant_key="default", policy_version=1)
+
+ def get_or_create(self, tenant_key: str) -> TenantRecord:
+ return self._tenant
+
+ def get_by_id(self, tenant_id: int) -> TenantRecord | None:
+ return self._tenant if self._tenant.id == tenant_id else None
+
+ def bump_policy_version(self, tenant: TenantRecord) -> TenantRecord:
+ self._tenant = TenantRecord(
+ id=tenant.id, tenant_key=tenant.tenant_key, policy_version=tenant.policy_version + 1
+ )
+ return self._tenant
+
+
+class FakePolicyRepository:
+ def __init__(self) -> None:
+ self.list_calls = 0
+ self.policies = [
+ PolicyRecord(
+ id=1,
+ definition=PolicyDefinition(
+ action="read",
+ effect="allow",
+ priority=1,
+ policy_id="read:v1",
+ conditions={"role": "admin"},
+ ),
+ )
+ ]
+
+ def list_current_policies(self, *, tenant_id: int) -> list[PolicyRecord]:
+ self.list_calls += 1
+ return list(self.policies)
+
+ def list_current_policy_views(self, *, tenant_id: int) -> list[Any]:
+ raise NotImplementedError
+
+ def create_policy_version(self, **_: Any) -> PolicyMutationResult:
+ return PolicyMutationResult(id=1, action="read", effect="allow", priority=1, conditions={})
+
+ def rollback_policy(self, *, tenant_id: int, policy_key: str, version: int) -> tuple[str, int]:
+ return policy_key, version
+
+
+class FakeUserRepository:
+ def get_user_context(self, user_id: int) -> dict[str, Any] | None:
+ return {"id": user_id, "role": "admin", "permissions": []}
+
+
+class FakeRelationshipRepository:
+ def __init__(self) -> None:
+ self.list_calls = 0
+
+ def list_for_subject(
+ self, *, tenant_id: int, subject_type: str, subject_id: str
+ ) -> list[RelationshipRecord]:
+ self.list_calls += 1
+ return [
+ RelationshipRecord(
+ subject_type=subject_type,
+ subject_id=subject_id,
+ relation="member_of",
+ object_type="team",
+ object_id="red",
+ )
+ ]
+
+ def create(
+ self,
+ *,
+ tenant_id: int,
+ subject_type: str,
+ subject_id: str,
+ relation: str,
+ object_type: str,
+ object_id: str,
+ ) -> int:
+ return 1
+
+
+class FakeAuditRepository:
+ def write(self, **_: Any) -> None:
+ return
+
+
+@dataclass
+class FakePublisher:
+ events: list[tuple[str, int]]
+
+ def publish_policy_update(self, *, tenant_key: str, policy_version: int) -> None:
+ self.events.append((tenant_key, policy_version))
+
+
+def test_authorization_service_uses_policy_and_relationship_caches() -> None:
+ backend = InMemoryCacheBackend()
+ service = AuthorizationService(
+ settings=Settings(KEYNETRA_API_KEYS="test"),
+ tenants=FakeTenantRepository(),
+ policies=FakePolicyRepository(),
+ users=FakeUserRepository(),
+ relationships=FakeRelationshipRepository(),
+ audit=FakeAuditRepository(),
+ policy_cache=RedisBackedPolicyCache(backend),
+ relationship_cache=RedisBackedRelationshipCache(backend),
+ decision_cache=RedisBackedDecisionCache(backend),
+ )
+
+ first = service.authorize(
+ tenant_key="default",
+ principal={"type": "api_key", "id": "test"},
+ user={"id": 7},
+ action="read",
+ resource={},
+ )
+ second = service.authorize(
+ tenant_key="default",
+ principal={"type": "api_key", "id": "test"},
+ user={"id": 7},
+ action="read",
+ resource={},
+ )
+
+ assert first.decision.allowed is True
+ assert second.cached is True
+ assert service._policies.list_calls == 1 # type: ignore[attr-defined]
+ assert service._relationships.list_calls == 1 # type: ignore[attr-defined]
+
+
+def test_policy_update_bumps_decision_namespace_and_publishes_event() -> None:
+ backend = InMemoryCacheBackend()
+ tenants = FakeTenantRepository()
+ publisher = FakePublisher(events=[])
+ decision_cache = RedisBackedDecisionCache(backend)
+ service = PolicyService(
+ tenants=tenants,
+ policies=FakePolicyRepository(),
+ policy_cache=RedisBackedPolicyCache(backend),
+ decision_cache=decision_cache,
+ publisher=publisher,
+ )
+
+ service.create_policy(
+ tenant_key="default",
+ policy_key="read",
+ action="read",
+ effect="allow",
+ priority=1,
+ conditions={},
+ created_by="tester",
+ )
+
+ assert decision_cache.bump_namespace("default") == 2
+ assert publisher.events == [("default", 2)]
+
+
+def test_relationship_change_invalidates_relationship_cache_and_decisions() -> None:
+ backend = InMemoryCacheBackend()
+ decision_cache = RedisBackedDecisionCache(backend)
+ relationship_cache = RedisBackedRelationshipCache(backend)
+ relationship_cache.set(
+ tenant_id=1,
+ subject_type="user",
+ subject_id="7",
+ relationships=[
+ RelationshipRecord(
+ subject_type="user",
+ subject_id="7",
+ relation="member_of",
+ object_type="team",
+ object_id="red",
+ )
+ ],
+ )
+ service = RelationshipService(
+ tenants=FakeTenantRepository(),
+ relationships=FakeRelationshipRepository(),
+ relationship_cache=relationship_cache,
+ decision_cache=decision_cache,
+ )
+
+ service.create_relationship(
+ tenant_key="default",
+ subject_type="user",
+ subject_id="7",
+ relation="member_of",
+ object_type="team",
+ object_id="blue",
+ )
+
+ assert relationship_cache.get(tenant_id=1, subject_type="user", subject_id="7") is None
+ assert decision_cache.bump_namespace("default") == 2
diff --git a/tests/test_small_coverage_boost.py b/tests/test_small_coverage_boost.py
new file mode 100644
index 0000000..d931dd7
--- /dev/null
+++ b/tests/test_small_coverage_boost.py
@@ -0,0 +1,45 @@
+from __future__ import annotations
+
+import base64
+import json
+
+import pytest
+
+from keynetra.api.errors import ApiError
+from keynetra.api.pagination import decode_cursor
+from keynetra.config import redis_client
+from keynetra.config.tenancy import get_tenant_key
+
+
+def test_get_tenant_key_returns_default() -> None:
+ assert get_tenant_key() == "default"
+
+
+def test_decode_cursor_rejects_invalid_base64() -> None:
+ with pytest.raises(ApiError) as exc:
+ decode_cursor("not-a-valid-cursor")
+ assert exc.value.message == "invalid cursor"
+
+
+def test_decode_cursor_rejects_non_object_payload() -> None:
+ raw = json.dumps(["not", "an", "object"]).encode("utf-8")
+ cursor = base64.urlsafe_b64encode(raw).decode("ascii")
+ with pytest.raises(ApiError) as exc:
+ decode_cursor(cursor)
+ assert exc.value.message == "invalid cursor"
+
+
+def test_get_redis_returns_client_when_configured(monkeypatch) -> None:
+ class _Settings:
+ redis_url = "redis://localhost:6379/0"
+
+ class _Redis:
+ class Redis:
+ @staticmethod
+ def from_url(url: str, decode_responses: bool = True):
+ return {"url": url, "decode_responses": decode_responses}
+
+ redis_client.get_redis.cache_clear()
+ monkeypatch.setattr(redis_client, "get_settings", lambda: _Settings())
+ monkeypatch.setattr(redis_client, "redis", _Redis)
+ assert redis_client.get_redis()["url"] == "redis://localhost:6379/0"