diff --git a/src/madengine/mad.py b/src/madengine/mad.py index 75bee1ed..0b77934e 100644 --- a/src/madengine/mad.py +++ b/src/madengine/mad.py @@ -125,6 +125,10 @@ def main(): # Run models command parser_run = subparsers.add_parser('run', description="Run LLMs and Deep Learning models on container", help='Run models on container') parser_run.add_argument('--tags', nargs='+', default=[], help="tags to run (can be multiple).") + + # Deprecated Tag + parser_run.add_argument('--ignore-deprecated-flag', action='store_true', help="Force run deprecated models even if marked deprecated.") + parser_run.add_argument('--timeout', type=int, default=-1, help="time out for model run in seconds; Overrides per-model timeout if specified or default timeout of 7200 (2 hrs).\ Timeout of 0 will never timeout.") parser_run.add_argument('--live-output', action='store_true', help="prints output in real-time directly on STDOUT") diff --git a/src/madengine/tools/run_models.py b/src/madengine/tools/run_models.py index dd528c4e..4e3df620 100644 --- a/src/madengine/tools/run_models.py +++ b/src/madengine/tools/run_models.py @@ -905,6 +905,15 @@ def run_model(self, model_info: typing.Dict) -> bool: # Environment variable updates for MAD Public CI run_details.gpu_architecture = self.context.ctx["docker_env_vars"]["MAD_SYSTEM_GPU_ARCHITECTURE"] + # Check if model is deprecated + if model_info.get("is_deprecated", False): + print(f"WARNING: Model {model_info['name']} has been deprecated.") + if self.args.ignore_deprecated_flag: + print(f"WARNING: Running deprecated model {model_info['name']} due to --ignore-deprecated-flag.") + else: + print(f"WARNING: Skipping execution. No bypass flags mentioned.") + return True # exit early + # check if model is supported on current gpu architecture, if not skip. list_skip_gpu_arch = [] if (