Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,5 @@ dist/
*.egg-info/
build/
.DS_Store
docs/.cache/
docs/.cache/
.env
1 change: 1 addition & 0 deletions pr_agent/algo/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,4 +44,5 @@
'groq/llama3-8b-8192': 8192,
'groq/llama3-70b-8192': 8192,
'ollama/llama3': 4096,
'watsonx/meta-llama/llama-3-8b-instruct': 100000
}
16 changes: 16 additions & 0 deletions pr_agent/algo/ai_handlers/litellm_ai_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,22 @@ def __init__(self):
self.azure = False
self.api_base = None
self.repetition_penalty = None
"""
get_settings().set("watsonx.url", watsonx_url)
get_settings().set("watsonx.iam_api_key", iam_api_key)
get_settings().set("watsonx.project_id", project_id)
common_cloud_provider_auth_params: dict = {
"params": ["project", "region_name", "token"],
"providers": ["vertex_ai", "bedrock", "watsonx", "azure"],
}
"""
if get_settings().get("watsonx.url", None) and \
get_settings().get("watsonx.iam_api_key", None) and \
get_settings().get("watsonx.project_id", None):
os.environ["WATSONX_URL"] = get_settings().watsonx.url
os.environ["WATSONX_APIKEY"] = get_settings().watsonx.iam_api_key
os.environ["WATSONX_PROJECT_ID"] = get_settings().watsonx.project_id

if get_settings().get("OPENAI.KEY", None):
openai.api_key = get_settings().openai.key
litellm.openai_key = get_settings().openai.key
Expand Down
5 changes: 3 additions & 2 deletions pr_agent/algo/pr_processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -344,9 +344,10 @@ async def retry_with_fallback_models(f: Callable, model_type: ModelType = ModelT

def _get_all_models(model_type: ModelType = ModelType.REGULAR) -> List[str]:
if model_type == ModelType.TURBO:
model = get_settings().config.model_turbo
model = get_settings().config.model_watsonx_llama3
else:
model = get_settings().config.model
# FIXME: Hardcode watsonx here
model = get_settings().config.model_watsonx_llama3
fallback_models = get_settings().config.fallback_models
if not isinstance(fallback_models, list):
fallback_models = [m.strip() for m in fallback_models.split(",")]
Expand Down
2 changes: 1 addition & 1 deletion pr_agent/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from pr_agent.config_loader import get_settings
from pr_agent.log import setup_logger

log_level = os.environ.get("LOG_LEVEL", "INFO")
log_level = os.environ.get("LOG_LEVEL", "DEBUG")
setup_logger(log_level)


Expand Down
21 changes: 15 additions & 6 deletions pr_agent/cli_pip.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,28 @@
from dotenv import load_dotenv
load_dotenv()
import os
import sys

sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from pr_agent import cli
from pr_agent.config_loader import get_settings


def main():
# Fill in the following values
provider = "github" # GitHub provider
user_token = "..." # GitHub user token
openai_key = "..." # OpenAI key
pr_url = "..." # PR URL, for example 'https://github.com/Codium-ai/pr-agent/pull/809'
command = "/review" # Command to run (e.g. '/review', '/describe', '/ask="What is the purpose of this PR?"')
pr_url = "https://github.com/multicloudlab/pr-agent-test/pull/3" # PR URL, for example 'https://github.com/Codium-ai/pr-agent/pull/809'
command = "/describe" # Command to run (e.g. '/review', '/describe', '/ask="What is the purpose of this PR?"')


# Setting the configurations
get_settings().set("CONFIG.git_provider", provider)
get_settings().set("openai.key", openai_key)
get_settings().set("github.user_token", user_token)
# get_settings().set("openai.key", os.getnenv("openai_key"))
get_settings().set("watsonx.url", os.getenv("watsonx_url"))
get_settings().set("watsonx.iam_api_key", os.getenv("iam_api_key"))
get_settings().set("watsonx.project_id", os.getenv("project_id"))
get_settings().set("github.user_token", os.getenv("user_token"))

# Run the command. Feedback will appear in GitHub PR comments
cli.run_command(pr_url, command)
Expand Down
3 changes: 2 additions & 1 deletion pr_agent/settings/configuration.toml
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
[config]
model="gpt-4-turbo-2024-04-09"
model_turbo="gpt-4o"
model_watsonx_llama3="watsonx/meta-llama/llama-3-8b-instruct"
fallback_models=["gpt-4-0125-preview"]
git_provider="github"
publish_output=true
publish_output_progress=true
verbosity_level=0 # 0,1,2
verbosity_level=2 # 0,1,2
use_extra_bad_extensions=false
use_wiki_settings_file=true
use_repo_settings_file=true
Expand Down
1 change: 1 addition & 0 deletions pr_agent/tools/pr_code_suggestions.py
Original file line number Diff line number Diff line change
Expand Up @@ -464,6 +464,7 @@ async def _prepare_prediction_extended(self, model: str) -> dict:
if self.patches_diff_list:
get_logger().info(f"Number of PR chunk calls: {len(self.patches_diff_list)}")
get_logger().debug(f"PR diff:", artifact=self.patches_diff_list)
get_logger().info(self.patches_diff_list)

# parallelize calls to AI:
if get_settings().pr_code_suggestions.parallel_calls:
Expand Down
4 changes: 3 additions & 1 deletion pr_agent/tools/pr_description.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,9 @@ async def run(self):
if get_settings().config.publish_output and not get_settings().config.get('is_auto_command', False):
self.git_provider.publish_comment("Preparing PR description...", is_temporary=True)

await retry_with_fallback_models(self._prepare_prediction, ModelType.TURBO)
# await retry_with_fallback_models(self._prepare_prediction, ModelType.TURBO)
# FIXME: Hardcode watsonx
await retry_with_fallback_models(self._prepare_prediction, ModelType.REGULAR)

if self.prediction:
self._prepare_data()
Expand Down
4 changes: 3 additions & 1 deletion pr_agent/tools/pr_line_questions.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,9 @@ async def run(self):
line_end=line_end,
side=side)
if self.patch_with_lines:
response = await retry_with_fallback_models(self._get_prediction, model_type=ModelType.TURBO)
# response = await retry_with_fallback_models(self._get_prediction, model_type=ModelType.TURBO)
# FIXME
response = await retry_with_fallback_models(self._get_prediction, model_type=ModelType.REGULAR)

get_logger().info('Preparing answer...')
if comment_id:
Expand Down
4 changes: 3 additions & 1 deletion pr_agent/tools/pr_questions.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,9 @@ async def run(self):
if img_path:
get_logger().debug(f"Image path identified", artifact=img_path)

await retry_with_fallback_models(self._prepare_prediction, model_type=ModelType.TURBO)
# await retry_with_fallback_models(self._prepare_prediction, model_type=ModelType.TURBO)
# FIXME
await retry_with_fallback_models(self._prepare_prediction, model_type=ModelType.REGULAR)

pr_comment = self._prepare_pr_answer()
get_logger().debug(f"PR output", artifact=pr_comment)
Expand Down
4 changes: 3 additions & 1 deletion pr_agent/tools/pr_update_changelog.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,9 @@ async def run(self):
if get_settings().config.publish_output:
self.git_provider.publish_comment("Preparing changelog updates...", is_temporary=True)

await retry_with_fallback_models(self._prepare_prediction, model_type=ModelType.TURBO)
# await retry_with_fallback_models(self._prepare_prediction, model_type=ModelType.TURBO)
# FIXME
await retry_with_fallback_models(self._prepare_prediction, model_type=ModelType.REGULAR)

new_file_content, answer = self._prepare_changelog_update()

Expand Down