Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1,944 changes: 1,944 additions & 0 deletions .backup/mcp_server_v1_fastmcp.py.bak

Large diffs are not rendered by default.

160 changes: 160 additions & 0 deletions .backup/start_mcp_v1.py.bak
Original file line number Diff line number Diff line change
@@ -0,0 +1,160 @@
#!/usr/bin/env python3
"""
MCP server for knowledge graph
Provide knowledge graph query service for AI
"""

import sys
from pathlib import Path
from loguru import logger
from config import settings,get_current_model_info

# add project root to Python path
project_root = Path(__file__).parent
sys.path.insert(0, str(project_root))

def check_dependencies():
"""check necessary dependencies"""
required_packages = [
"fastmcp",
"neo4j",
"ollama",
"loguru"
]

missing_packages = []

for package in required_packages:
try:
__import__(package)
logger.info(f"✓ {package} is available")
except ImportError:
missing_packages.append(package)
logger.error(f"✗ {package} is missing")

if missing_packages:
logger.error(f"Missing packages: {', '.join(missing_packages)}")
logger.error("Please install missing packages:")
logger.error(f"pip install {' '.join(missing_packages)}")
return False

return True

def check_services():
"""check necessary services"""
from config import validate_neo4j_connection, validate_ollama_connection, validate_openrouter_connection, settings

logger.info("Checking service connections...")

# check Neo4j connection
if validate_neo4j_connection():
logger.info("✓ Neo4j connection successful")
else:
logger.error("✗ Neo4j connection failed")
logger.error("Please ensure Neo4j is running and accessible")
return False

# Conditionally check LLM provider connections
if settings.llm_provider == "ollama" or settings.embedding_provider == "ollama":
if validate_ollama_connection():
logger.info("✓ Ollama connection successful")
else:
logger.error("✗ Ollama connection failed")
logger.error("Please ensure Ollama is running and accessible")
return False

if settings.llm_provider == "openrouter" or settings.embedding_provider == "openrouter":
if validate_openrouter_connection():
logger.info("✓ OpenRouter connection successful")
else:
logger.error("✗ OpenRouter connection failed")
logger.error("Please ensure OpenRouter API key is configured correctly")
return False

return True

def print_mcp_info():
"""print MCP server info"""
from config import settings

logger.info("=" * 60)
logger.info("Knowledge Graph MCP Server")
logger.info("=" * 60)
logger.info(f"App Name: {settings.app_name}")
logger.info(f"Version: {settings.app_version}")
logger.info(f"Neo4j URI: {settings.neo4j_uri}")
logger.info(f"Ollama URL: {settings.ollama_base_url}")
logger.info(f"Model: {get_current_model_info()}")
logger.info("=" * 60)

logger.info("Available MCP Tools:")
tools = [
"query_knowledge - Query the knowledge base with RAG",
"search_documents - Search for documents",
"search_code - Search for code snippets",
"search_relations - Search for relationships",
"add_document - Add a document to knowledge base",
"add_file - Add a file to knowledge base",
"add_directory - Add directory contents to knowledge base",
"get_statistics - Get knowledge base statistics"
]

for tool in tools:
logger.info(f" • {tool}")

logger.info("\nAvailable MCP Resources:")
resources = [
"knowledge://config - System configuration",
"knowledge://status - System status and health",
"knowledge://recent-documents/{limit} - Recent documents"
]

for resource in resources:
logger.info(f" • {resource}")

logger.info("\nAvailable MCP Prompts:")
prompts = [
"suggest_queries - Generate query suggestions for different domains"
]

for prompt in prompts:
logger.info(f" • {prompt}")

logger.info("=" * 60)

def main():
"""main function"""
logger.info("Starting Knowledge Graph MCP Server...")

# check dependencies
if not check_dependencies():
logger.error("Dependency check failed. Exiting.")
sys.exit(1)

# check services
if not check_services():
logger.error("Service check failed. Exiting.")
sys.exit(1)

# print service info
print_mcp_info()

# start MCP server
try:
logger.info("Starting MCP server...")
logger.info("The server will run in STDIO mode for MCP client connections")
logger.info("To test the server, run: python test_mcp_client.py")
logger.info("Press Ctrl+C to stop the server")

# import and run MCP server
from mcp_server import mcp
mcp.run()

except KeyboardInterrupt:
logger.info("Server stopped by user")
except Exception as e:
logger.error(f"Server error: {e}")
sys.exit(1)

if __name__ == "__main__":
main()
233 changes: 233 additions & 0 deletions .github/CONTRIBUTING.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,233 @@
# Contributing to Codebase RAG

Thank you for your interest in contributing! This document provides guidelines for contributing to the project.

## Getting Started

### Prerequisites

- Python 3.11, 3.12, or 3.13
- [uv](https://github.com/astral-sh/uv) (recommended) or pip
- Git

### Setup Development Environment

```bash
# Clone the repository
git clone https://github.com/yourusername/codebase-rag.git
cd codebase-rag

# Install dependencies
uv pip install -e .

# Or using pip
pip install -e .
```

## Testing

### Running Tests

We have comprehensive unit tests for all MCP handlers. Tests are required for all new features and bug fixes.

```bash
# Run all unit tests
pytest tests/test_mcp_*.py -v

# Run specific test file
pytest tests/test_mcp_handlers.py -v

# Run with coverage report
pytest tests/test_mcp_*.py --cov=mcp_tools --cov-report=html

# Run only unit tests (no external dependencies)
pytest tests/ -v -m "unit"

# Run integration tests (requires Neo4j)
pytest tests/ -v -m "integration"
```

### Writing Tests

When adding new features, please include tests:

1. **Unit Tests**: Test individual functions in isolation
- Mock all external dependencies
- Test success and failure cases
- Test edge cases and validation

2. **Integration Tests**: Test with real services (optional)
- Mark with `@pytest.mark.integration`
- Require Neo4j or other external services

Example test:

```python
import pytest
from unittest.mock import AsyncMock
from mcp_tools.knowledge_handlers import handle_query_knowledge

@pytest.mark.asyncio
async def test_handle_query_knowledge_success(mock_knowledge_service):
"""Test successful knowledge query"""
# Arrange
mock_knowledge_service.query.return_value = {
"success": True,
"answer": "Test response"
}

# Act
result = await handle_query_knowledge(
args={"question": "test question"},
knowledge_service=mock_knowledge_service
)

# Assert
assert result["success"] is True
assert result["answer"] == "Test response"
mock_knowledge_service.query.assert_called_once()
```

## Code Quality

### Formatting and Linting

We use `black`, `isort`, and `ruff` for code quality:

```bash
# Format code with black
black .

# Sort imports with isort
isort .

# Lint with ruff
ruff check .

# Auto-fix issues
ruff check --fix .
```

### Pre-commit Checks

Before committing:

1. Run tests: `pytest tests/test_mcp_*.py -v`
2. Format code: `black . && isort .`
3. Check linting: `ruff check .`

## Pull Request Process

### Creating a Pull Request

1. **Fork the repository** and create a new branch:
```bash
git checkout -b feature/your-feature-name
```

2. **Make your changes** with clear, descriptive commits:
```bash
git commit -m "feat: add new feature X"
git commit -m "fix: resolve issue with Y"
```

3. **Write tests** for your changes

4. **Run tests locally**:
```bash
pytest tests/test_mcp_*.py -v
```

5. **Push to your fork**:
```bash
git push origin feature/your-feature-name
```

6. **Create a Pull Request** on GitHub

### PR Requirements

For your PR to be merged:

- ✅ All tests must pass
- ✅ Code coverage should not decrease
- ✅ Code must be formatted (black, isort)
- ✅ Linting should pass (ruff)
- ✅ Clear description of changes
- ✅ Tests for new features

### GitHub Actions

When you create a PR, GitHub Actions will automatically:

1. **Run unit tests** on Python 3.11, 3.12, and 3.13
2. **Check code quality** (black, isort, ruff)
3. **Generate coverage report**
4. **Report results** in the PR

**PR cannot be merged until all checks pass.**

### Commit Message Format

We follow conventional commits:

- `feat:` New feature
- `fix:` Bug fix
- `docs:` Documentation changes
- `test:` Adding or updating tests
- `refactor:` Code refactoring
- `chore:` Maintenance tasks

Examples:
```
feat: add streaming support for MCP tools
fix: resolve memory leak in task queue
docs: update MCP server architecture guide
test: add tests for memory handlers
refactor: extract handlers to modules
```

## Code Organization

### Project Structure

```
codebase-rag/
├── api/ # FastAPI routes
├── core/ # Core application logic
├── services/ # Business logic services
├── mcp_tools/ # MCP handler modules
│ ├── knowledge_handlers.py
│ ├── code_handlers.py
│ ├── memory_handlers.py
│ ├── task_handlers.py
│ └── system_handlers.py
├── tests/ # Test suite
│ ├── test_mcp_handlers.py
│ ├── test_mcp_utils.py
│ └── test_mcp_integration.py
└── docs/ # Documentation
```

### Adding New MCP Tools

1. Add handler function to appropriate `mcp_tools/*.py` file
2. Add tool definition to `mcp_tools/tool_definitions.py`
3. Update routing in `mcp_server.py`
4. Write tests in `tests/test_mcp_handlers.py`
5. Update documentation

## Getting Help

- 📖 Read the documentation in `docs/`
- 🐛 Report bugs via GitHub Issues
- 💬 Ask questions in Discussions
- 📧 Contact maintainers

## License

By contributing, you agree that your contributions will be licensed under the same license as the project.

---

Thank you for contributing! 🎉
Loading
Loading