Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 8 additions & 4 deletions .experiments/code_review/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,8 +87,8 @@ def save_review(pr_number, review_desc, comments, issues, folder, combined_diff_

with open(issues_file, "w") as f:
json.dump(issues, f, indent=2)
with open(combined_diff, 'w') as f:

with open(combined_diff, "w") as f:
f.write(combined_diff_data)

logger.info(f"Saved review files for PR {pr_number}")
Expand All @@ -111,8 +111,12 @@ def main(pr_urls):
logger.info(f"Starting to process PR {pr_number}")

# Without re-evaluation
review_desc, comments, issues, combined_diff_data = process_pr(pr_url, reeval_response=False)
save_review(pr_number, review_desc, comments, issues, no_eval_folder, combined_diff_data)
review_desc, comments, issues, combined_diff_data = process_pr(
pr_url, reeval_response=False
)
save_review(
pr_number, review_desc, comments, issues, no_eval_folder, combined_diff_data
)

# # With re-evaluation
# review_desc, comments, topics = process_pr(pr_url, reeval_response=True)
Expand Down
37 changes: 30 additions & 7 deletions .kaizen/unit_test/kaizen/llms/test_llmprovider.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from litellm import Router
import os


@pytest.fixture
def mock_config_data():
return {
Expand All @@ -19,49 +20,71 @@ def mock_config_data():
}
}


@pytest.fixture
def mock_litellm():
with patch('kaizen.llms.provider.litellm') as mock:
with patch("kaizen.llms.provider.litellm") as mock:
mock.token_counter.return_value = 100
mock.get_max_tokens.return_value = 4000
mock.cost_per_token.return_value = (0.01, 0.02)
yield mock


@pytest.fixture
def llm_provider(mock_config_data, mock_litellm):
with patch.object(ConfigData, 'get_config_data', return_value=mock_config_data):
with patch.object(ConfigData, "get_config_data", return_value=mock_config_data):
return LLMProvider()


def test_initialization(llm_provider):
assert llm_provider.system_prompt is not None
assert llm_provider.model_config == {"model": "gpt-4o-mini"}
assert llm_provider.default_temperature == 0.3


def test_validate_config_correct_setup(llm_provider):
assert llm_provider.models[0]["model_name"] == "default"


def test_validate_config_missing_language_model():
with patch.object(ConfigData, 'get_config_data', return_value={}):
with pytest.raises(ValueError, match="Missing 'language_model' in configuration"):
with patch.object(ConfigData, "get_config_data", return_value={}):
with pytest.raises(
ValueError, match="Missing 'language_model' in configuration"
):
LLMProvider()


def test_token_limit_check_with_valid_prompt(llm_provider, mock_litellm):
assert llm_provider.is_inside_token_limit("Test prompt") is True


def test_available_tokens_calculation(llm_provider, mock_litellm):
assert llm_provider.available_tokens("Test message") == 3200


def test_usage_cost_calculation(llm_provider, mock_litellm):
total_usage = {"prompt_tokens": 100, "completion_tokens": 200}
cost = llm_provider.get_usage_cost(total_usage)
assert cost == (0.01, 0.02)


def test_setup_redis_missing_env_vars():
with patch.dict(os.environ, {}, clear=True):
with patch.object(ConfigData, 'get_config_data', return_value={"language_model": {"redis_enabled": True}}):
with pytest.raises(ValueError, match="Redis is enabled but REDIS_HOST or REDIS_PORT environment variables are missing"):
with patch.object(
ConfigData,
"get_config_data",
return_value={"language_model": {"redis_enabled": True}},
):
with pytest.raises(
ValueError,
match="Redis is enabled but REDIS_HOST or REDIS_PORT environment variables are missing",
):
LLMProvider()


def test_token_limit_check_boundary_condition(llm_provider, mock_litellm):
mock_litellm.token_counter.return_value = 3200
assert llm_provider.is_inside_token_limit("Boundary test prompt", percentage=0.8) is True
assert (
llm_provider.is_inside_token_limit("Boundary test prompt", percentage=0.8)
is True
)
29 changes: 16 additions & 13 deletions .kaizen/unit_test/kaizen/llms/test_set_all_loggers_to_error.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,37 +4,40 @@
# Assuming the function is in the module kaizen/llms/provider.py
from kaizen.llms.provider import set_all_loggers_to_ERROR


@pytest.fixture
def setup_loggers():
# Setup: Create some loggers with different levels
loggers = {
'logger1': logging.getLogger('logger1'),
'logger2': logging.getLogger('logger2'),
'logger3': logging.getLogger('logger3')
"logger1": logging.getLogger("logger1"),
"logger2": logging.getLogger("logger2"),
"logger3": logging.getLogger("logger3"),
}
loggers['logger1'].setLevel(logging.DEBUG)
loggers['logger2'].setLevel(logging.INFO)
loggers['logger3'].setLevel(logging.WARNING)
loggers["logger1"].setLevel(logging.DEBUG)
loggers["logger2"].setLevel(logging.INFO)
loggers["logger3"].setLevel(logging.WARNING)

yield loggers

# Teardown: Reset loggers to default level (WARNING)
for logger in loggers.values():
logger.setLevel(logging.WARNING)


def test_set_all_loggers_to_ERROR(setup_loggers):
# Test: Verify all existing loggers are set to ERROR level
set_all_loggers_to_ERROR()

for name, logger in setup_loggers.items():
assert logger.level == logging.ERROR, f"Logger {name} not set to ERROR level"


def test_no_loggers_present(monkeypatch):
# Edge Case: Handle scenario where no loggers are present
# Mock the loggerDict to simulate no loggers
monkeypatch.setattr(logging.Logger.manager, 'loggerDict', {})
monkeypatch.setattr(logging.Logger.manager, "loggerDict", {})

set_all_loggers_to_ERROR()

# Verify no errors occur and loggerDict is still empty
assert logging.Logger.manager.loggerDict == {}, "LoggerDict should be empty"
assert logging.Logger.manager.loggerDict == {}, "LoggerDict should be empty"
Loading