diff --git a/.gitignore b/.gitignore index 9fcb919395..64aa503602 100644 --- a/.gitignore +++ b/.gitignore @@ -8,4 +8,5 @@ dist/ *.egg-info/ build/ .DS_Store -docs/.cache/ \ No newline at end of file +docs/.cache/ +.env diff --git a/pr_agent/algo/__init__.py b/pr_agent/algo/__init__.py index b048fc8474..4defc692b5 100644 --- a/pr_agent/algo/__init__.py +++ b/pr_agent/algo/__init__.py @@ -44,4 +44,5 @@ 'groq/llama3-8b-8192': 8192, 'groq/llama3-70b-8192': 8192, 'ollama/llama3': 4096, + 'watsonx/meta-llama/llama-3-8b-instruct': 100000 } diff --git a/pr_agent/algo/ai_handlers/litellm_ai_handler.py b/pr_agent/algo/ai_handlers/litellm_ai_handler.py index f857e34e26..426d5b7bb6 100644 --- a/pr_agent/algo/ai_handlers/litellm_ai_handler.py +++ b/pr_agent/algo/ai_handlers/litellm_ai_handler.py @@ -27,6 +27,22 @@ def __init__(self): self.azure = False self.api_base = None self.repetition_penalty = None + """ + get_settings().set("watsonx.url", watsonx_url) + get_settings().set("watsonx.iam_api_key", iam_api_key) + get_settings().set("watsonx.project_id", project_id) + common_cloud_provider_auth_params: dict = { + "params": ["project", "region_name", "token"], + "providers": ["vertex_ai", "bedrock", "watsonx", "azure"], + } + """ + if get_settings().get("watsonx.url", None) and \ + get_settings().get("watsonx.iam_api_key", None) and \ + get_settings().get("watsonx.project_id", None): + os.environ["WATSONX_URL"] = get_settings().watsonx.url + os.environ["WATSONX_APIKEY"] = get_settings().watsonx.iam_api_key + os.environ["WATSONX_PROJECT_ID"] = get_settings().watsonx.project_id + if get_settings().get("OPENAI.KEY", None): openai.api_key = get_settings().openai.key litellm.openai_key = get_settings().openai.key diff --git a/pr_agent/algo/pr_processing.py b/pr_agent/algo/pr_processing.py index 8065ea43df..c4bfc30744 100644 --- a/pr_agent/algo/pr_processing.py +++ b/pr_agent/algo/pr_processing.py @@ -344,9 +344,10 @@ async def retry_with_fallback_models(f: Callable, model_type: ModelType = ModelT def _get_all_models(model_type: ModelType = ModelType.REGULAR) -> List[str]: if model_type == ModelType.TURBO: - model = get_settings().config.model_turbo + model = get_settings().config.model_watsonx_llama3 else: - model = get_settings().config.model + # FIXME: Hardcode watsonx here + model = get_settings().config.model_watsonx_llama3 fallback_models = get_settings().config.fallback_models if not isinstance(fallback_models, list): fallback_models = [m.strip() for m in fallback_models.split(",")] diff --git a/pr_agent/cli.py b/pr_agent/cli.py index 7ab78a0e55..a6c360a561 100644 --- a/pr_agent/cli.py +++ b/pr_agent/cli.py @@ -6,7 +6,7 @@ from pr_agent.config_loader import get_settings from pr_agent.log import setup_logger -log_level = os.environ.get("LOG_LEVEL", "INFO") +log_level = os.environ.get("LOG_LEVEL", "DEBUG") setup_logger(log_level) diff --git a/pr_agent/cli_pip.py b/pr_agent/cli_pip.py index caa56f0c9b..1d193b1f15 100644 --- a/pr_agent/cli_pip.py +++ b/pr_agent/cli_pip.py @@ -1,3 +1,10 @@ +from dotenv import load_dotenv +load_dotenv() +import os +import sys + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + from pr_agent import cli from pr_agent.config_loader import get_settings @@ -5,15 +12,17 @@ def main(): # Fill in the following values provider = "github" # GitHub provider - user_token = "..." # GitHub user token - openai_key = "..." # OpenAI key - pr_url = "..." # PR URL, for example 'https://github.com/Codium-ai/pr-agent/pull/809' - command = "/review" # Command to run (e.g. '/review', '/describe', '/ask="What is the purpose of this PR?"') + pr_url = "https://github.com/multicloudlab/pr-agent-test/pull/3" # PR URL, for example 'https://github.com/Codium-ai/pr-agent/pull/809' + command = "/describe" # Command to run (e.g. '/review', '/describe', '/ask="What is the purpose of this PR?"') + # Setting the configurations get_settings().set("CONFIG.git_provider", provider) - get_settings().set("openai.key", openai_key) - get_settings().set("github.user_token", user_token) + # get_settings().set("openai.key", os.getnenv("openai_key")) + get_settings().set("watsonx.url", os.getenv("watsonx_url")) + get_settings().set("watsonx.iam_api_key", os.getenv("iam_api_key")) + get_settings().set("watsonx.project_id", os.getenv("project_id")) + get_settings().set("github.user_token", os.getenv("user_token")) # Run the command. Feedback will appear in GitHub PR comments cli.run_command(pr_url, command) diff --git a/pr_agent/settings/configuration.toml b/pr_agent/settings/configuration.toml index 4fc6ae5cfe..bd5808ead1 100644 --- a/pr_agent/settings/configuration.toml +++ b/pr_agent/settings/configuration.toml @@ -1,11 +1,12 @@ [config] model="gpt-4-turbo-2024-04-09" model_turbo="gpt-4o" +model_watsonx_llama3="watsonx/meta-llama/llama-3-8b-instruct" fallback_models=["gpt-4-0125-preview"] git_provider="github" publish_output=true publish_output_progress=true -verbosity_level=0 # 0,1,2 +verbosity_level=2 # 0,1,2 use_extra_bad_extensions=false use_wiki_settings_file=true use_repo_settings_file=true diff --git a/pr_agent/tools/pr_code_suggestions.py b/pr_agent/tools/pr_code_suggestions.py index baaa86f9bb..747c310963 100644 --- a/pr_agent/tools/pr_code_suggestions.py +++ b/pr_agent/tools/pr_code_suggestions.py @@ -464,6 +464,7 @@ async def _prepare_prediction_extended(self, model: str) -> dict: if self.patches_diff_list: get_logger().info(f"Number of PR chunk calls: {len(self.patches_diff_list)}") get_logger().debug(f"PR diff:", artifact=self.patches_diff_list) + get_logger().info(self.patches_diff_list) # parallelize calls to AI: if get_settings().pr_code_suggestions.parallel_calls: diff --git a/pr_agent/tools/pr_description.py b/pr_agent/tools/pr_description.py index b16a3a32fc..c8f38bd0ec 100644 --- a/pr_agent/tools/pr_description.py +++ b/pr_agent/tools/pr_description.py @@ -87,7 +87,9 @@ async def run(self): if get_settings().config.publish_output and not get_settings().config.get('is_auto_command', False): self.git_provider.publish_comment("Preparing PR description...", is_temporary=True) - await retry_with_fallback_models(self._prepare_prediction, ModelType.TURBO) + # await retry_with_fallback_models(self._prepare_prediction, ModelType.TURBO) + # FIXME: Hardcode watsonx + await retry_with_fallback_models(self._prepare_prediction, ModelType.REGULAR) if self.prediction: self._prepare_data() diff --git a/pr_agent/tools/pr_line_questions.py b/pr_agent/tools/pr_line_questions.py index e26e06d593..0e2d3e3299 100644 --- a/pr_agent/tools/pr_line_questions.py +++ b/pr_agent/tools/pr_line_questions.py @@ -79,7 +79,9 @@ async def run(self): line_end=line_end, side=side) if self.patch_with_lines: - response = await retry_with_fallback_models(self._get_prediction, model_type=ModelType.TURBO) + # response = await retry_with_fallback_models(self._get_prediction, model_type=ModelType.TURBO) + # FIXME + response = await retry_with_fallback_models(self._get_prediction, model_type=ModelType.REGULAR) get_logger().info('Preparing answer...') if comment_id: diff --git a/pr_agent/tools/pr_questions.py b/pr_agent/tools/pr_questions.py index 38efa13fea..a6284d8220 100644 --- a/pr_agent/tools/pr_questions.py +++ b/pr_agent/tools/pr_questions.py @@ -63,7 +63,9 @@ async def run(self): if img_path: get_logger().debug(f"Image path identified", artifact=img_path) - await retry_with_fallback_models(self._prepare_prediction, model_type=ModelType.TURBO) + # await retry_with_fallback_models(self._prepare_prediction, model_type=ModelType.TURBO) + # FIXME + await retry_with_fallback_models(self._prepare_prediction, model_type=ModelType.REGULAR) pr_comment = self._prepare_pr_answer() get_logger().debug(f"PR output", artifact=pr_comment) diff --git a/pr_agent/tools/pr_update_changelog.py b/pr_agent/tools/pr_update_changelog.py index 839d18c1d0..2b3ccfc781 100644 --- a/pr_agent/tools/pr_update_changelog.py +++ b/pr_agent/tools/pr_update_changelog.py @@ -71,7 +71,9 @@ async def run(self): if get_settings().config.publish_output: self.git_provider.publish_comment("Preparing changelog updates...", is_temporary=True) - await retry_with_fallback_models(self._prepare_prediction, model_type=ModelType.TURBO) + # await retry_with_fallback_models(self._prepare_prediction, model_type=ModelType.TURBO) + # FIXME + await retry_with_fallback_models(self._prepare_prediction, model_type=ModelType.REGULAR) new_file_content, answer = self._prepare_changelog_update()