From e89fffdd473dd60ecd1fa522a44a45c9d94cd95e Mon Sep 17 00:00:00 2001
From: Ishika Roy <41401566+Iroy30@users.noreply.github.com>
Date: Wed, 4 Feb 2026 14:05:36 -0600
Subject: [PATCH 1/3] Add regression testing (#556)
## Issue
Authors:
- Ishika Roy (https://github.com/Iroy30)
- Ramakrishnap (https://github.com/rgsl888prabhu)
Approvers:
- Ramakrishnap (https://github.com/rgsl888prabhu)
- James Lamb (https://github.com/jameslamb)
URL: https://github.com/NVIDIA/cuopt/pull/556
---
python/libcuopt/pyproject.toml | 3 +-
regression/benchmark_scripts/benchmark.py | 382 ++++++++
.../benchmark_scripts/configs/README.md | 19 +
.../configs/example_test_creation.py | 76 ++
.../configs/test_name_config.json | 23 +
.../configs/test_name_data.json | 117 +++
.../benchmark_scripts/results/test_name.csv | 2 +
regression/benchmark_scripts/utils.py | 70 ++
regression/config.sh | 60 ++
regression/create-html-reports.sh | 236 +++++
regression/cronjob.sh | 170 ++++
regression/functions.sh | 135 +++
regression/get_datasets.py | 915 ++++++++++++++++++
regression/lp_config.json | 13 +
regression/lp_regression_test.sh | 29 +
regression/mip_config.json | 29 +
regression/mip_regression_test.sh | 28 +
regression/routing_regression_test.sh | 29 +
regression/run_regression.sh | 16 +
regression/save_benchmark_results.py | 52 +
regression/save_benchmarks.sh | 38 +
regression/send-slack-report.sh | 119 +++
regression/slack_msg.json | 68 ++
regression/test-container.sh | 26 +
regression/update_asv_database.py | 157 +++
regression/write-meta-data.sh | 26 +
26 files changed, 2837 insertions(+), 1 deletion(-)
create mode 100644 regression/benchmark_scripts/benchmark.py
create mode 100644 regression/benchmark_scripts/configs/README.md
create mode 100644 regression/benchmark_scripts/configs/example_test_creation.py
create mode 100644 regression/benchmark_scripts/configs/test_name_config.json
create mode 100644 regression/benchmark_scripts/configs/test_name_data.json
create mode 100644 regression/benchmark_scripts/results/test_name.csv
create mode 100644 regression/benchmark_scripts/utils.py
create mode 100644 regression/config.sh
create mode 100755 regression/create-html-reports.sh
create mode 100755 regression/cronjob.sh
create mode 100644 regression/functions.sh
create mode 100644 regression/get_datasets.py
create mode 100644 regression/lp_config.json
create mode 100644 regression/lp_regression_test.sh
create mode 100644 regression/mip_config.json
create mode 100644 regression/mip_regression_test.sh
create mode 100644 regression/routing_regression_test.sh
create mode 100644 regression/run_regression.sh
create mode 100644 regression/save_benchmark_results.py
create mode 100644 regression/save_benchmarks.sh
create mode 100755 regression/send-slack-report.sh
create mode 100644 regression/slack_msg.json
create mode 100644 regression/test-container.sh
create mode 100644 regression/update_asv_database.py
create mode 100755 regression/write-meta-data.sh
diff --git a/python/libcuopt/pyproject.toml b/python/libcuopt/pyproject.toml
index 5286c52529..e2c1142523 100644
--- a/python/libcuopt/pyproject.toml
+++ b/python/libcuopt/pyproject.toml
@@ -53,7 +53,8 @@ libcuopt = "libcuopt"
select = [
"distro-too-large-compressed",
]
-max_allowed_size_compressed = '650M'
+
+max_allowed_size_compressed = '660M'
[project.scripts]
cuopt_cli = "libcuopt._cli_wrapper:main"
diff --git a/regression/benchmark_scripts/benchmark.py b/regression/benchmark_scripts/benchmark.py
new file mode 100644
index 0000000000..ead8fe98a4
--- /dev/null
+++ b/regression/benchmark_scripts/benchmark.py
@@ -0,0 +1,382 @@
+# SPDX-FileCopyrightText: Copyright (c) 2024-2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+
+
+import os
+from multiprocessing import Process
+import rmm
+import time
+import pandas as pd
+import glob
+import logging as log
+from datetime import datetime
+import argparse
+
+log.getLogger().setLevel(log.INFO)
+
+
+def create_regression_markdown(data, regression_path, test_type_string):
+ regression_md_file = (
+ regression_path + "/" + test_type_string + "_regressions.md"
+ )
+
+ md_data = "*No regressions*"
+ # This to reduce size of slack message
+ limit_no_of_regression_list = 5
+
+ if len(data) > 0:
+ status = "*!! Regressions found !!*"
+ end_msg = (
+ "\n*Continues ...*"
+ if len(data) > limit_no_of_regression_list
+ else ""
+ )
+ table = data[:limit_no_of_regression_list].to_string(index=False)
+ md_data = status + f"\n```\n{table}\n```" + end_msg
+
+ with open(regression_md_file, "w") as fp:
+ fp.write(md_data)
+
+
+def record_regressions(
+ test_name, data, req_metrics, regression_path, test_type_string
+):
+ regression_file = (
+ regression_path + "/" + test_type_string + "_regressions.csv"
+ )
+
+ regression_df = pd.DataFrame(
+ {
+ "Test Name": [],
+ "Metric Name": [],
+ "Value": [],
+ "Avg Value": [],
+ "Regression(%)": [],
+ }
+ )
+ for name in req_metrics:
+ if name.startswith("bks_change_"):
+ pchange = data[name].iloc[-1].item()
+ metric_name = name.replace("bks_change_", "")
+ limit = req_metrics[metric_name]["bks"].get("threshold", 5)
+ prev_val_mean = pchange
+ latest_val = pchange
+ else:
+ limit = req_metrics[name].get("threshold", 5)
+ prev_val_mean = (
+ data[name][:-1][-30:].mean().item()
+ if len(data) > 1
+ else data[name].iloc[-1].item()
+ )
+ latest_val = data[name].iloc[-1].item()
+
+ if prev_val_mean == 0:
+ pchange = latest_val
+ else:
+ pchange = ((latest_val - prev_val_mean) / prev_val_mean) * 100
+
+ if abs(pchange) >= limit:
+ regression_df.loc[len(regression_df)] = [
+ test_name,
+ name,
+ latest_val,
+ prev_val_mean,
+ pchange,
+ ]
+
+ regression_df.to_csv(regression_file)
+ create_regression_markdown(
+ regression_df, regression_path, test_type_string
+ )
+
+
+def get_bks_change(metrics, required_metrics):
+ bks_metrics = {}
+ for metric, value in required_metrics.items():
+ if "bks" in value.keys():
+ bks = value["bks"]["value"]
+ if bks is None:
+ continue
+ current = metrics[metric]
+ if bks == 0:
+ bks_metrics["bks_change_" + metric] = abs(current) * 100
+ elif current == 0:
+ bks_metrics["bks_change_" + metric] = abs(bks) * 100
+ else:
+ bks_metrics["bks_change_" + metric] = abs(
+ ((current - bks) / bks) * 100
+ )
+
+ return bks_metrics
+
+
+def record_result(
+ test_name, metrics, required_metrics, csv_path, test_type_string
+):
+ file_path = csv_path + "/"
+ if test_type_string == "lp" or test_type_string == "mip":
+ file_path += test_type_string + "_" + test_name + ".csv"
+ else:
+ file_path += test_name + ".csv"
+ bks_metrics = get_bks_change(metrics, required_metrics)
+
+ # Add default metrics to data
+ required_metrics.update(bks_metrics)
+ metrics.update(bks_metrics)
+ req_metrics = list(required_metrics.keys()) + ["date_time", "git_commit"]
+
+ current_data = pd.DataFrame(
+ {key: [metrics[key]] for key in sorted(req_metrics)}
+ )
+ if os.path.isfile(file_path):
+ previous_data = pd.read_csv(file_path, index_col=0)
+ updated_data = pd.concat(
+ [previous_data, current_data], ignore_index=True
+ )
+ else:
+ updated_data = current_data
+ record_regressions(
+ test_name, updated_data, required_metrics, csv_path, test_type_string
+ )
+ updated_data.to_csv(file_path)
+
+
+def run_benchmark(
+ test_name,
+ data_model,
+ solver_settings,
+ required_metrics,
+ csv_path,
+ git_commit,
+ test_status_file,
+ d_type,
+):
+ import rmm
+
+ mr = rmm.mr.get_current_device_resource()
+
+ from utils import LPMetrics, RoutingMetrics
+ from cuopt import linear_programming
+ from cuopt import routing
+
+ start_time = time.time()
+ if d_type == "lp" or d_type == "mip":
+ metrics = LPMetrics()._asdict()
+ solver_settings.set_parameter("infeasibility_detection", False)
+ solver_settings.set_parameter("time_limit", 60)
+ solution = linear_programming.Solve(data_model, solver_settings)
+ else:
+ metrics = RoutingMetrics()._asdict()
+ solution = routing.Solve(data_model)
+ end_time = time.time()
+
+ metrics["gpu_memory_usage"] = int(
+ mr.allocation_counts.peak_bytes / (1024 * 1024)
+ )
+ metrics["date_time"] = datetime.now().strftime("%m_%d_%Y_%H_%M_%S")
+ metrics["git_commit"] = git_commit
+
+ success_status = False
+
+ if d_type == "lp" or d_type == "mip":
+ ## Optimal solution
+ acceptable_termination = ["Optimal", "TimeLimit", "FeasibleFound"]
+ if solution.get_termination_reason() in acceptable_termination:
+ success_status = True
+ metrics["solver_time"] = solution.get_solve_time()
+ metrics["primal_objective_value"] = solution.get_primal_objective()
+ if d_type == "lp":
+ lp_stats = solution.get_lp_stats()
+ metrics["nb_iterations"] = lp_stats["nb_iterations"]
+ else:
+ milp_stats = solution.get_milp_stats()
+ metrics["mip_gap"] = milp_stats["mip_gap"]
+ metrics["max_constraint_violation"] = milp_stats[
+ "max_constraint_violation"
+ ]
+ metrics["max_int_violation"] = milp_stats["max_int_violation"]
+ metrics["max_variable_bound_violation"] = milp_stats[
+ "max_variable_bound_violation"
+ ]
+ record_result(
+ test_name, metrics, required_metrics, csv_path, d_type
+ )
+ else:
+ if solution.get_status() == 0:
+ success_status = True
+ metrics["solver_time"] = end_time - start_time
+ metrics["total_objective_value"] = solution.get_total_objective()
+ metrics["vehicle_count"] = solution.get_vehicle_count()
+
+ objectives = solution.get_objective_values()
+ if "prize" in required_metrics:
+ metrics["prize"] = objectives[routing.Objective.PRIZE]
+ if "cost" in required_metrics:
+ metrics["cost"] = objectives[routing.Objective.COST]
+ if "travel_time" in required_metrics:
+ metrics["travel_time"] = objectives[
+ routing.Objective.TRAVEL_TIME
+ ]
+ record_result(
+ test_name, metrics, required_metrics, csv_path, d_type
+ )
+ return "SUCCESS" if success_status is True else "FAILED"
+
+
+def reinitialize_rmm():
+ pool_size = 2**30
+ rmm.reinitialize(pool_allocator=True, initial_pool_size=pool_size)
+
+ base_mr = rmm.mr.get_current_device_resource()
+ stats_mr = rmm.mr.StatisticsResourceAdaptor(base_mr)
+ rmm.mr.set_current_device_resource(stats_mr)
+
+ return base_mr, stats_mr
+
+
+def worker(
+ gpu_id,
+ dataset_file_path,
+ csv_path,
+ git_commit,
+ log_path,
+ test_status_file,
+ n_gpus,
+ d_type="routing",
+):
+ import os
+
+ os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id
+
+ from utils import get_configuration
+
+ data_files = []
+ if d_type == "lp" or d_type == "mip":
+ data_files = glob.glob(dataset_file_path + "/*.mps")
+ else:
+ data_files = glob.glob(dataset_file_path + "/*_config.json")
+ idx = int(gpu_id)
+ n_files = 1 # len(data_files)
+
+ while idx < n_files:
+ mr, stats_mr = reinitialize_rmm()
+
+ data_file = data_files[idx]
+ test_name = str(data_file)
+ status = "FAILED"
+ try:
+ test_name, data_model, solver_settings, requested_metrics = (
+ get_configuration(data_file, dataset_file_path, d_type)
+ )
+ log.basicConfig(
+ level=log.INFO,
+ filename=log_path + "/" + test_name + "_log.txt",
+ filemode="a+",
+ format="%(asctime)-15s %(levelname)-8s %(message)s",
+ )
+ log.getLogger().setLevel(log.INFO)
+ log.info(
+ f"------------- Test Start : {test_name} gpu id : {gpu_id} -------------------"
+ )
+ status = run_benchmark(
+ test_name,
+ data_model,
+ solver_settings,
+ requested_metrics,
+ csv_path,
+ git_commit,
+ test_status_file,
+ d_type,
+ )
+ except Exception as e:
+ log.error(str(e))
+
+ with open(test_status_file, "a") as f:
+ f.write("\n")
+ f.write(test_name + ": " + status)
+
+ # Delete instance of rmm
+ del mr
+ del stats_mr
+
+ log.info(
+ f"------------- Test End : {test_name} gpu id : {gpu_id} -------------------"
+ )
+ idx = idx + n_gpus
+
+
+def run(
+ dataset_file_path,
+ csv_path,
+ git_commit,
+ log_path,
+ test_status_file,
+ n_gpus,
+ d_type,
+):
+ # Restricting n_gpus to one to avoid resource sharing
+ # n_gpus = 1
+ procs = []
+ for gpu_id in range(int(n_gpus)):
+ p = Process(
+ target=worker,
+ args=(
+ str(gpu_id),
+ dataset_file_path,
+ csv_path,
+ git_commit,
+ log_path,
+ test_status_file,
+ int(n_gpus),
+ d_type,
+ ),
+ )
+ p.start()
+ procs.append(p)
+
+ for p in procs:
+ p.join()
+ print("All processes finished.")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument(
+ "-c", "--config-path", type=str, help="Path to all configuration file"
+ )
+ parser.add_argument(
+ "-r",
+ "--csv-path",
+ type=str,
+ help="Path to store result files, this would be for github where results gets stored",
+ )
+ parser.add_argument(
+ "-g",
+ "--git-commit",
+ type=str,
+ help="git commit sha to keep track of runs",
+ )
+ parser.add_argument("-l", "--log-path", type=str, help="Path to log files")
+ parser.add_argument(
+ "-s",
+ "--test-status-file",
+ type=str,
+ help="All test status will be stored in this file",
+ )
+ parser.add_argument(
+ "-n", "--num-gpus", type=str, help="Number of GPUs available"
+ )
+ parser.add_argument(
+ "-t", "--type", type=str, default="", help="Type of benchmark"
+ )
+ args = parser.parse_args()
+ run(
+ args.config_path,
+ args.csv_path,
+ args.git_commit,
+ args.log_path,
+ args.test_status_file,
+ args.num_gpus,
+ args.type,
+ )
diff --git a/regression/benchmark_scripts/configs/README.md b/regression/benchmark_scripts/configs/README.md
new file mode 100644
index 0000000000..fb82836563
--- /dev/null
+++ b/regression/benchmark_scripts/configs/README.md
@@ -0,0 +1,19 @@
+# Creating configuration and data file for routing
+
+- For each test, create a configuration file and a corresponding data file.
+- Refer `test_name_config.json` for the format of the configuration file.
+- Supported metrics can be found in `cuopt/regression/benchmark_scripts/utils.py`
+- File names should start with test names followed by `config` or data depending on type of it.
+- Data file should be as per openapi spec of cuopt server
+- These configuration and data files needs to be uploaded to `s3://cuopt-datasets/regression_datasets/`
+
+ ```
+ aws s3 cp /path/to/files s3://cuopt-datasets/regression_datasets/
+ ```
+
+# Creating configuration and data file for lp and milp
+
+- For each test, create a mps file
+- Refer `lp_config.json` and `mip_config.json` for the format of the configuration file.
+- Supported metrics can be found in `cuopt/regression/benchmark_scripts/utils.py`
+- These configuration and data files needs to be in the LP_DATASETS_PATH set in config.sh
diff --git a/regression/benchmark_scripts/configs/example_test_creation.py b/regression/benchmark_scripts/configs/example_test_creation.py
new file mode 100644
index 0000000000..0b376eb001
--- /dev/null
+++ b/regression/benchmark_scripts/configs/example_test_creation.py
@@ -0,0 +1,76 @@
+# SPDX-FileCopyrightText: Copyright (c) 2024-2026, NVIDIA CORPORATION.
+# SPDX-License-Identifier: Apache-2.0
+# CONFIDENTIAL, provided under NDA.
+
+from cuopt.routing import utils
+import json
+
+"""
+This is an example of creating a modified test from Homberger dataset.
+In this test, the RC2_10_5 test is modified so that the vehicle count is reduced to 12 and the order prizes are set.
+The prizes are high enough so that prize always becomes the primary objective.
+One can easily use an existing json file and modify the data as well by loading the json as a dictionary
+"""
+test_name = "prize_collection_vrp"
+# test_name = "LC1_10_9"
+
+# base_file_name = "/home/nfs/rgandham/git-repos/reopt/datasets/pdptw/LC1_10_9.pdptw"
+base_file_name = (
+ "/home/nfs/rgandham/git-repos/reopt/datasets/cvrptw/RC2_10_5.TXT"
+)
+
+# model_dict = utils.create_model_dictionary_from_file(base_file_name, is_pdp=True)
+model_dict = utils.create_model_dictionary_from_file(base_file_name)
+
+
+# Reduce the fleet size to 12
+num_vehicles = 12
+fleet_data = model_dict["fleet_data"]
+
+vehicle_locations = fleet_data["vehicle_locations"]
+vehicle_tw = fleet_data["vehicle_time_windows"]
+capacities = fleet_data["capacities"]
+
+new_locs = [vehicle_locations[i] for i in range(num_vehicles)]
+new_tw = [vehicle_tw[i] for i in range(num_vehicles)]
+new_cap = [[capacities[0][i] for i in range(num_vehicles)]] * 1
+
+fleet_data["vehicle_locations"] = new_locs
+fleet_data["vehicle_time_windows"] = new_tw
+fleet_data["capacities"] = new_cap
+
+# Add prizes
+task_data = model_dict["task_data"]
+
+n_tasks = len(task_data["demand"][0])
+
+prizes = [10000.0] * n_tasks
+task_data["prizes"] = prizes
+
+
+# Set 10 min time limit
+solver_config = {}
+solver_config["time_limit"] = 600
+
+model_dict["solver_config"] = solver_config
+
+test_config_file_name = test_name + "_config.json"
+model_data_file_name = test_name + "_data.json"
+
+test_config = {}
+test_config["test_name"] = test_name
+test_config["file_name"] = model_data_file_name
+test_config["metrics"] = [
+ "vehicle_count",
+ "total_cost",
+ "prize",
+ "memory_usage",
+]
+
+with open(test_config_file_name, "w") as fp:
+ json.dump(test_config, fp)
+ fp.close()
+
+with open(model_data_file_name, "w") as fp:
+ json.dump(model_dict, fp)
+ fp.close()
diff --git a/regression/benchmark_scripts/configs/test_name_config.json b/regression/benchmark_scripts/configs/test_name_config.json
new file mode 100644
index 0000000000..d87b7bc842
--- /dev/null
+++ b/regression/benchmark_scripts/configs/test_name_config.json
@@ -0,0 +1,23 @@
+{
+ "test_name": "test_name",
+ "file_name": "test_name_data.json",
+ "metrics": {
+ "total_objective_value": {
+ "threshold": 5,
+ "unit": "total_objective_value"
+ },
+ "vehicle_count": {
+ "threshold": 5,
+ "unit": "vehicle_count"
+ },
+ "solver_time": {
+ "threshold": 5,
+ "unit": "seconds"
+ },
+ "gpu_memory_usage": {
+ "threshold": 20,
+ "unit": "MB"
+ }
+ },
+ "details": "Add details about you test"
+}
diff --git a/regression/benchmark_scripts/configs/test_name_data.json b/regression/benchmark_scripts/configs/test_name_data.json
new file mode 100644
index 0000000000..e6918ad58e
--- /dev/null
+++ b/regression/benchmark_scripts/configs/test_name_data.json
@@ -0,0 +1,117 @@
+{
+ "cost_waypoint_graph_data": {
+ "waypoint_graph": null
+ },
+ "travel_time_waypoint_graph_data": {
+ "waypoint_graph": null
+ },
+ "cost_matrix_data": {
+ "data": {
+ "0": [
+ [
+ 0,
+ 1,
+ 1
+ ],
+ [
+ 1,
+ 0,
+ 1
+ ],
+ [
+ 1,
+ 1,
+ 0
+ ]
+ ]
+ }
+ },
+ "travel_time_matrix_data": {
+ "data": null
+ },
+ "task_data": {
+ "task_locations": [
+ 0,
+ 1,
+ 2
+ ],
+ "demand": [
+ [
+ 0,
+ 1,
+ 1
+ ],
+ [
+ 0,
+ 3,
+ 1
+ ]
+ ],
+ "task_time_windows": [
+ [
+ 0,
+ 10
+ ],
+ [
+ 0,
+ 4
+ ],
+ [
+ 2,
+ 4
+ ]
+ ],
+ "service_times": [
+ 0,
+ 1,
+ 1
+ ]
+ },
+ "fleet_data": {
+ "vehicle_locations": [
+ [
+ 0,
+ 0
+ ],
+ [
+ 0,
+ 0
+ ]
+ ],
+ "capacities": [
+ [
+ 2,
+ 2
+ ],
+ [
+ 4,
+ 1
+ ]
+ ],
+ "vehicle_time_windows": [
+ [
+ 0,
+ 10
+ ],
+ [
+ 0,
+ 10
+ ]
+ ],
+ "skip_first_trips": [
+ false,
+ false
+ ],
+ "drop_return_trips": [
+ false,
+ false
+ ],
+ "vehicle_max_costs": [
+ 20,
+ 20
+ ]
+ },
+ "solver_config": {
+ "time_limit": 10
+ }
+}
diff --git a/regression/benchmark_scripts/results/test_name.csv b/regression/benchmark_scripts/results/test_name.csv
new file mode 100644
index 0000000000..85bf3d9761
--- /dev/null
+++ b/regression/benchmark_scripts/results/test_name.csv
@@ -0,0 +1,2 @@
+,solver_time,total_objective_value,vehicle_count
+0,10.004132270812988,3.0,1
diff --git a/regression/benchmark_scripts/utils.py b/regression/benchmark_scripts/utils.py
new file mode 100644
index 0000000000..04b1cbb9fb
--- /dev/null
+++ b/regression/benchmark_scripts/utils.py
@@ -0,0 +1,70 @@
+# SPDX-FileCopyrightText: Copyright (c) 2024-2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+
+
+from cuopt_server.utils.utils import build_routing_datamodel_from_json
+from cuopt.linear_programming.solver_settings import SolverSettings
+import cuopt_mps_parser
+import os
+import json
+from typing import NamedTuple
+
+
+def build_datamodel_from_mps(data):
+ """
+ data: A file in mps format
+ """
+
+ if os.path.isfile(data):
+ data_model = cuopt_mps_parser.ParseMps(data)
+ else:
+ raise ValueError(
+ f"Invalid type : {type(data)} has been provided as input, "
+ "requires mps input"
+ )
+ solver_settings = SolverSettings()
+
+ return data_model, solver_settings
+
+
+class RoutingMetrics(NamedTuple):
+ total_objective_value: float = -1
+ vehicle_count: int = -1
+ cost: float = -1
+ prize: float = -1
+ travel_time: float = -1
+ solver_time: float = -1
+ gpu_memory_usage: float = -1
+ git_commit: str = ""
+ date_time: str = ""
+
+
+class LPMetrics(NamedTuple):
+ primal_objective_value: float = -1
+ solver_time: float = -1
+ gpu_memory_usage: float = -1
+ git_commit: str = ""
+ date_time: str = ""
+
+
+def get_configuration(data_file, data_file_path, d_type):
+ data = {}
+ test_name = None
+ requested_metrics = {}
+
+ if d_type == "lp" or d_type == "mip":
+ with open(data_file_path + "/" + d_type + "_config.json") as f:
+ data = json.load(f)
+ test_name = data_file.split("/")[-1].split(".")[0]
+ data_model, solver_settings = build_datamodel_from_mps(data_file)
+ requested_metrics = data["metrics"]
+ else:
+ with open(data_file) as f:
+ data = json.load(f)
+ test_name = data["test_name"]
+ data_model, solver_settings = build_routing_datamodel_from_json(
+ data_file_path + "/" + data["file_name"]
+ )
+ requested_metrics = data["metrics"]
+
+ return test_name, data_model, solver_settings, requested_metrics
diff --git a/regression/config.sh b/regression/config.sh
new file mode 100644
index 0000000000..2b20597d07
--- /dev/null
+++ b/regression/config.sh
@@ -0,0 +1,60 @@
+#!/bin/bash
+# shellcheck disable=all
+# SPDX-FileCopyrightText: Copyright (c) 2025-2026, NVIDIA CORPORATION. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+
+
+THIS_DIR=$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)
+
+CUOPT_SCRIPTS_DIR=$THIS_DIR
+OUTPUT_DIR=$SCRATCH_DIR/benchmark_runs/
+
+ACCOUNT=datascience_rapids_testing
+PARTITION="batch"
+GPUS_PER_NODE=8
+
+# Path to the squashs file containing the container image
+IMAGE="nvidia/cuopt:26.2.0a-cuda12.9-py3.13"
+
+ALL_CONFIGS_PATH=$SCRATCH_DIR/configs/
+ROUTING_CONFIGS_PATH=$SCRATCH_DIR/routing_configs/
+ROUTING_DATASETS_PATH=$SCRATCH_DIR/routing_datasets/
+LP_DATASETS_PATH=$SCRATCH_DIR/lp_datasets/
+MIP_DATASETS_PATH=$SCRATCH_DIR/mip_datasets/
+
+STATUS_FILE=$OUTPUT_DIR/status.txt
+WORKER_RMM_POOL_SIZE=${WORKER_RMM_POOL_SIZE:-24G}
+
+DATASETS_DIR=$SCRATCH_DIR/datasets
+
+# Assume CUOPT_SLACK_APP_ID is defined!
+CUOPT_SLACK_APP_ID="MY_SLACK_APP_ID"
+WEBHOOK_URL=${WEBHOOK_URL:-https://hooks.slack.com/services/${CUOPT_SLACK_APP_ID}}
+S3_FILE_PREFIX="MY_S3_FILE_PREFIX"
+S3_URL_PREFIX="MY_S3_URL_PREFIX"
+
+# Most are defined using the bash := or :- syntax, which means they
+# will be set only if they were previously unset. The project config
+# is loaded first, which gives it the opportunity to override anything
+# in this file that uses that syntax. If there are variables in this
+# file that should not be overridded by a project, then they will
+# simply not use that syntax and override, since these variables are
+# read last.
+RESULTS_ARCHIVE_DIR=$OUTPUT_DIR/results
+RESULTS_DIR=$RESULTS_ARCHIVE_DIR/latest
+METADATA_FILE=$RESULTS_DIR/metadata.sh
+WORKSPACE=$OUTPUT_DIR/workspace
+TESTING_DIR=$WORKSPACE/testing
+BENCHMARK_DIR=$WORKSPACE/benchmark
+SCRIPTS_DIR=$THIS_DIR
+
+BUILD_LOG_FILE=$RESULTS_DIR/build_log.txt
+DATE=${DATE:-$(date --utc "+%Y-%m-%d_%H:%M:%S")_UTC}
+
+# vars that are not overridden by the project.
+
+# These must remain relative to $RESULTS_DIR since some scripts assume
+# that, and also assume the names "tests" and "benchmarks", and
+# therefore cannot be overridden by a project.
+TESTING_RESULTS_DIR=${RESULTS_DIR}/tests
+BENCHMARK_RESULTS_DIR=${RESULTS_DIR}/benchmarks
diff --git a/regression/create-html-reports.sh b/regression/create-html-reports.sh
new file mode 100755
index 0000000000..5b0883dda8
--- /dev/null
+++ b/regression/create-html-reports.sh
@@ -0,0 +1,236 @@
+#!/bin/bash
+# shellcheck disable=SC1090
+# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION.
+# SPDX-License-Identifier: Apache-2.0
+
+# Must ensure PROJECT_DIR is exported first then load env
+export PROJECT_DIR=${PROJECT_DIR:-$(cd "$(dirname ${BASH_SOURCE[0]})" && pwd)}
+source ${PROJECT_DIR}/config.sh
+source ${PROJECT_DIR}/functions.sh
+
+# FIXME: this assumes all reports are from running pytests
+ALL_REPORTS=$(find ${RESULTS_DIR}/benchmarks/results/ -name "*status.txt")
+
+# Create the html describing the build and test run
+REPORT_METADATA_HTML=""
+PROJECT_VERSION="unknown"
+PROJECT_BUILD=""
+PROJECT_CHANNEL="unknown"
+PROJECT_REPO_URL="unknown"
+PROJECT_REPO_BRANCH="unknown"
+if [ -f $METADATA_FILE ]; then
+ source $METADATA_FILE
+fi
+# Assume if PROJECT_BUILD is set then a conda version string should be
+# created, else a git version string.
+if [[ "$PROJECT_BUILD" != "" ]]; then
+ REPORT_METADATA_HTML="
+ | conda version | $PROJECT_VERSION |
+ | build | $PROJECT_BUILD |
+ | channel | $PROJECT_CHANNEL |
+
+
"
+else
+ REPORT_METADATA_HTML="
+ | commit hash | $PROJECT_VERSION |
+ | repo | $PROJECT_REPO_URL |
+ | branch | $PROJECT_REPO_BRANCH |
+
+
"
+fi
+
+################################################################################
+# create the html reports for each individual run (each
+# pytest-results*.txt file)
+if [ "$ALL_REPORTS" != "" ]; then
+ for report in $ALL_REPORTS; do
+ # Get the individual report name, and use the .txt file path
+ # to form the html report being generated (same location as
+ # the .txt file). This will be an abs path since it is a file
+ # on disk being written.
+ report_name=$(basename -s .txt $report)
+ html_report_abs_path=$(dirname $report)/${report_name}.html
+ echo "
+
+
+ ${report_name}
+
+
+${report_name}
" > $html_report_abs_path
+ echo "$REPORT_METADATA_HTML" >> $html_report_abs_path
+ echo "
+
+ | test file | status | logs |
+
+" >> $html_report_abs_path
+ awk '{ if($2 == "FAILED") {
+ color = "red"
+ } else {
+ color = "green"
+ }
+ printf "| %s | %s | %s |
\n", $1, color, $2, $3, $3
+ }' $report >> $html_report_abs_path
+ echo "
+
+
+ " >> $html_report_abs_path
+ done
+fi
+
+################################################################################
+# Create a .html file for each *_log.txt file, which is just the contents
+# of the log with a line number and anchor id for each line that can
+# be used for sharing links to lines.
+ALL_LOGS=$(find -L ${BENCHMARK_RESULTS_DIR} -type f -name "*_log.txt" -print)
+
+for f in $ALL_LOGS; do
+ base_no_extension=$(basename ${f: 0:-4})
+ html=${f: 0:-4}.html
+ echo "
+
+
+ $base_no_extension
+
+
+
+${base_no_extension}
+" > $html
+ awk '{ print ""NR": "$0"
"}' $f >> $html
+ echo "
+
+" >> $html
+done
+
+################################################################################
+# create the top-level report
+STATUS='FAILED'
+STATUS_IMG='https://img.icons8.com/cotton/80/000000/cancel--v1.png'
+if [ "$ALL_REPORTS" != "" ]; then
+ if ! (grep -w FAILED $ALL_REPORTS > /dev/null); then
+ STATUS='PASSED'
+ STATUS_IMG='https://img.icons8.com/bubbles/100/000000/approval.png'
+ fi
+fi
+BUILD_LOG_HTML="(build log not available or build not run)"
+BUILD_STATUS=""
+if [ -f $BUILD_LOG_FILE ]; then
+ if [ -f ${BUILD_LOG_FILE: 0:-4}.html ]; then
+ BUILD_LOG_HTML="log (plain text)"
+ else
+ BUILD_LOG_HTML="log"
+ fi
+ tail -3 $BUILD_LOG_FILE | grep -w "done."
+ if (tail -3 $BUILD_LOG_FILE | grep -qw "done."); then
+ BUILD_STATUS="PASSED"
+ else
+ BUILD_STATUS="FAILED"
+ fi
+fi
+
+report=${RESULTS_DIR}/report.html
+echo "
+
+
+ test report
+
+
+" > $report
+echo "$REPORT_METADATA_HTML" >> $report
+echo "
Overall status: $STATUS
" >> $report
+echo "Build: ${BUILD_STATUS} ${BUILD_LOG_HTML}
" >> $report
+if [ "$ALL_REPORTS" != "" ]; then
+ echo "
Test Status
" >>$report
+ echo "
+
+ | Test | Status |
+
+ " >> $report
+ for f in $ALL_REPORTS; do
+ report_name=$(basename -s .txt $f)
+ # report_path should be of the form "tests/foo.html"
+ prefix_to_remove="$RESULTS_DIR/"
+ report_rel_path=${f/$prefix_to_remove}
+ report_path=$(dirname $report_rel_path)/${report_name}.html
+
+ if (grep -w FAILED $f > /dev/null); then
+ status="FAILED"
+ color="red"
+ else
+ status="PASSED"
+ color="green"
+ fi
+ echo "| ${report_name} | ${status} |
" >> $report
+ done
+ echo "
" >> $report
+else
+ echo "Tests were not run." >> $report
+fi
+prefix_to_remove="$RESULTS_DIR/"
+plot_rel_path=${f/$prefix_to_remove}
+plot_path=$(dirname $plot_rel_path)/asv/html/index.html
+prefix_to_remove="$RESULTS_DIR/benchmarks/results/"
+log_rel_path=${f/$prefix_to_remove}
+log_path=$(dirname $log_rel_path)/index.html
+echo "

Plots : Rgression test results
" >>$report
+echo "

Logs and Details : All the data for this run
" >>$report
+echo "
+
+" >> $report
+
+################################################################################
+# (optional) generate the ASV html
+if hasArg --run-asv; then
+ asv_config_file=$(find ${BENCHMARK_RESULTS_DIR}/results/asv -name "asv.conf.json")
+ if [ "$asv_config_file" != "" ]; then
+ asv update --config $asv_config_file
+ asv publish --config $asv_config_file
+ fi
+fi
+
+################################################################################
+# Create an index.html for each dir (ALL_DIRS plus ".", but EXCLUDE
+# the asv html) This is needed since S3 (and probably others) will not
+# show the contents of a hosted directory by default, but will instead
+# return the index.html if present.
+# The index.html will just contain links to the individual files and
+# subdirs present in each dir, just as if browsing in a file explorer.
+ALL_DIRS=$(find -L ${RESULTS_DIR} -path ${BENCHMARK_RESULTS_DIR}/results/asv/html -prune -o -type d -printf "%P\n")
+
+for d in "." $ALL_DIRS; do
+ index=${RESULTS_DIR}/${d}/index.html
+ echo "
+
+
+ $d
+
+
+${d}
+" > $index
+ for f in ${RESULTS_DIR}/$d/*; do
+ b=$(basename $f)
+ # Do not include index.html in index.html (it's a link to itself)
+ if [[ "$b" == "index.html" ]]; then
+ continue
+ fi
+ if [ -d "$f" ]; then
+ echo "$b
" >> $index
+ # special case: if the file is a *_log.txt and has a corresponding .html
+ elif [[ "${f: -8}" == "_log.txt" ]] && [[ -f "${f: 0:-4}.html" ]]; then
+ markup="${b: 0:-4}.html"
+ plaintext=$b
+ echo "$markup (plain text)
" >> $index
+ elif [[ "${f: -9}" == "_log.html" ]] && [[ -f "${f: 0:-5}.txt" ]]; then
+ continue
+ else
+ echo "$b
" >> $index
+ fi
+ done
+ echo "
+
+" >> $index
+done
diff --git a/regression/cronjob.sh b/regression/cronjob.sh
new file mode 100755
index 0000000000..de05b22bc0
--- /dev/null
+++ b/regression/cronjob.sh
@@ -0,0 +1,170 @@
+#!/bin/bash
+# shellcheck disable=SC1090
+# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+
+# Abort script on first error to ensure script-env.sh is sourced.
+set -e
+
+if [[ -v SLURM_NODEID ]]; then
+ echo "Detected the env var SLURM_NODEID is set. Is this script running on a compute node?"
+ echo "This script must be run *outside* of a slurm job (this script starts slurm jobs, but is not a job itself)."
+ exit 1
+fi
+
+# Must ensure PROJECT_DIR is exported first then load rapids-mg-tools env
+export PROJECT_DIR=${PROJECT_DIR:-$(cd "$(dirname ${BASH_SOURCE[0]})" && pwd)}
+
+source ${PROJECT_DIR}/config.sh
+source ${PROJECT_DIR}/functions.sh
+
+RUN_BENCHMARKS=0
+
+if hasArg --benchmark; then
+ RUN_BENCHMARKS=1
+fi
+
+if (! hasArg --test) && (! hasArg --benchmark); then
+ RUN_BENCHMARKS=1
+fi
+
+################################################################################
+
+# Create a results dir unique for this run
+setupResultsDir
+
+# Switch to allowing errors from commands, since test failures will
+# result in non-zero return codes and this script should attempt to
+# run all tests.
+set +e
+
+################################################################################
+logger "Testing cuOpt in container..."
+srun \
+ --account $ACCOUNT \
+ --partition $PARTITION \
+ --job-name=test-container.testing \
+ --nodes 1 \
+ --gpus-per-node 1 \
+ --time=120 \
+ --export=ALL \
+ --container-mounts=${CUOPT_SCRIPTS_DIR}:${CUOPT_SCRIPTS_DIR},${OUTPUT_DIR}:${OUTPUT_DIR} \
+ --container-image=$IMAGE \
+ --output=$BUILD_LOG_FILE \
+ bash ${PROJECT_DIR}/test-container.sh
+TESTING_FAILED=$?
+logger "done testing container, return code was $TESTING_FAILED"
+
+
+if [[ $TESTING_FAILED == 0 ]]; then
+
+ ############################################################################
+ # Setup and run tests
+ if [[ $RUN_BENCHMARKS == 1 ]]; then
+ logger "Running benchmarks..."
+ logger "GPUs per node : $GPUS_PER_NODE"
+ # SNMG tests - run in parallel
+ srun \
+ --account $ACCOUNT \
+ --partition $PARTITION \
+ --job-name=run-nightly-benchmarks \
+ --nodes 1 \
+ --gpus-per-node $GPUS_PER_NODE \
+ --time=4:00:00 \
+ --export=ALL \
+ --exclusive \
+ -K \
+ --container-mounts ${ROUTING_CONFIGS_PATH}:${ROUTING_CONFIGS_PATH},${CUOPT_SCRIPTS_DIR}:${CUOPT_SCRIPTS_DIR},${OUTPUT_DIR}:${OUTPUT_DIR} \
+ --container-image=$IMAGE \
+ --output=${BENCHMARK_RESULTS_DIR}/benchmark_routing_log.txt \
+ bash ${CUOPT_SCRIPTS_DIR}/routing_regression_test.sh &
+ PID_1=$!
+ logger "Process ID $PID_1 in background"
+
+ srun \
+ --account $ACCOUNT \
+ --partition $PARTITION \
+ --job-name=run-nightly-benchmarks \
+ --nodes 1 \
+ --gpus-per-node $GPUS_PER_NODE \
+ --time=4:00:00 \
+ --export=ALL \
+ --exclusive \
+ -K \
+ --container-mounts ${LP_DATASETS_PATH}:${LP_DATASETS_PATH},${CUOPT_SCRIPTS_DIR}:${CUOPT_SCRIPTS_DIR},${OUTPUT_DIR}:${OUTPUT_DIR} \
+ --container-image=$IMAGE \
+ --output=${BENCHMARK_RESULTS_DIR}/benchmark_lp_log.txt \
+ bash ${CUOPT_SCRIPTS_DIR}/lp_regression_test.sh &
+ PID_2=$!
+
+ srun \
+ --account $ACCOUNT \
+ --partition $PARTITION \
+ --job-name=run-nightly-benchmarks \
+ --nodes 1 \
+ --gpus-per-node $GPUS_PER_NODE \
+ --time=4:00:00 \
+ --export=ALL \
+ --exclusive \
+ -K \
+ --container-mounts ${MIP_DATASETS_PATH}:${MIP_DATASETS_PATH},${CUOPT_SCRIPTS_DIR}:${CUOPT_SCRIPTS_DIR},${OUTPUT_DIR}:${OUTPUT_DIR} \
+ --container-image=$IMAGE \
+ --output=${BENCHMARK_RESULTS_DIR}/benchmark_mip_log.txt \
+ bash ${CUOPT_SCRIPTS_DIR}/mip_regression_test.sh &
+ PID_3=$!
+
+ wait $PID_1 $PID_2 $PID_3
+ fi
+
+else # if [[ $TESTING_FAILED == 0 ]]
+ logger "Container testing Failed!"
+fi
+
+################################################################################
+# Send report based on contents of $RESULTS_DIR
+# These steps do not require a worker node.
+
+# When running both testing and benchmark and if some benchmarks fail,
+# the entire nightly will fail. The benchmark logs reported on Slack
+# contains information about the failures.
+logger "Generating report"
+
+if [ -f $METADATA_FILE ]; then
+ source $METADATA_FILE
+fi
+
+# Copy all config files to one folder
+cp $ROUTING_CONFIGS_PATH/*config.json $LP_DATASETS_PATH/*config.json $MIP_DATASETS_PATH/*config.json $ALL_CONFIGS_PATH/
+
+RUN_ASV_OPTION=""
+if hasArg --skip-asv; then
+ logger "Skipping running ASV"
+else
+ # Only create/update the asv database if there is both a commit Hash and a branch otherwise
+ # asv will return an error. If there is $PROJECT_BUILD, that implies there is Neither the
+ # git commit hash nor the branch which are required to create/update the asv db
+ if [[ "$PROJECT_BUILD" == "" ]]; then
+ # Update/create the ASV database
+ logger "Updating ASV database"
+ python $PROJECT_DIR/update_asv_database.py --commitHash=$PROJECT_VERSION --repo-url=$PROJECT_REPO_URL --branch=$PROJECT_REPO_BRANCH --commitTime=$PROJECT_REPO_TIME --results-dir=$RESULTS_DIR --machine-name=$MACHINE --gpu-type=$GPU_TYPE --configs=$ALL_CONFIGS_PATH
+ RUN_ASV_OPTION=--run-asv
+ logger "Updated ASV database"
+ else
+ logger "Detected a conda install, cannot run ASV since a commit hash/time is needed."
+ fi
+fi
+
+# The cuopt pull has missing .git folder which causes subsequent runs, lets delete and pull it fresh everytime.
+rm -rf $RESULTS_DIR/benchmarks/results/asv/cuopt/
+rm -rf $RESULTS_DIR/tests
+
+${SCRIPTS_DIR}/create-html-reports.sh $RUN_ASV_OPTION
+
+if hasArg --skip-sending-report; then
+ logger "Skipping sending report."
+else
+ logger "Uploading to S3, posting to Slack"
+ ${PROJECT_DIR}/send-slack-report.sh
+fi
+
+logger "cronjob.sh done."
diff --git a/regression/functions.sh b/regression/functions.sh
new file mode 100644
index 0000000000..9d8147e821
--- /dev/null
+++ b/regression/functions.sh
@@ -0,0 +1,135 @@
+#!/bin/bash
+# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION.
+# SPDX-License-Identifier: Apache-2.0
+
+# This file is source'd from script-env.sh to add functions to the
+# calling environment, hence no #!/bin/bash as the first line. This
+# also assumes the variables used in this file have been defined
+# elsewhere.
+
+NUMARGS=$#
+ARGS=$*
+function hasArg {
+ (( ${NUMARGS} != 0 )) && (echo " ${ARGS} " | grep -q " $1 ")
+}
+
+function logger {
+ echo -e ">>>> $*"
+}
+
+# Calling "setTee outfile" will cause all stdout and stderr of the
+# current script to be output to "tee", which outputs to stdout and
+# "outfile" simultaneously. This is useful by allowing a script to
+# "tee" itself at any point without being called with tee.
+_origFileDescriptorsSaved=0
+function setTee {
+ if [[ $_origFileDescriptorsSaved == 0 ]]; then
+ # Save off the original file descr 1 and 2 as 3 and 4
+ exec 3>&1 4>&2
+ _origFileDescriptorsSaved=1
+ fi
+ teeFile=$1
+ # Create a named pipe.
+ pipeName=$(mktemp -u)
+ mkfifo $pipeName
+ # Close the currnet 1 and 2 and restore to original (3, 4) in the
+ # event this function is called repeatedly.
+ exec 1>&- 2>&-
+ exec 1>&3 2>&4
+ # Start a tee process reading from the named pipe. Redirect stdout
+ # and stderr to the named pipe which goes to the tee process. The
+ # named pipe "file" can be removed and the tee process stays alive
+ # until the fd is closed.
+ tee -a < $pipeName $teeFile &
+ exec > $pipeName 2>&1
+ rm $pipeName
+}
+
+# Call this to stop script output from going to "tee" after a prior
+# call to setTee.
+function unsetTee {
+ if [[ $_origFileDescriptorsSaved == 1 ]]; then
+ # Close the current fd 1 and 2 which should stop the tee
+ # process, then restore 1 and 2 to original (saved as 3, 4).
+ exec 1>&- 2>&-
+ exec 1>&3 2>&4
+ fi
+}
+
+# Creates a unique results dir based on date, then links the common
+# results dir name to it.
+function setupResultsDir {
+ mkdir -p ${RESULTS_ARCHIVE_DIR}/${DATE}
+ # Store the target of $RESULTS_DIR before $RESULTS_DIR get linked to
+ # a different dir
+ previous_results=$(readlink -f $RESULTS_DIR)
+
+ rm -rf $RESULTS_DIR
+ ln -s ${RESULTS_ARCHIVE_DIR}/${DATE} $RESULTS_DIR
+ mkdir -p $TESTING_RESULTS_DIR
+ mkdir -p $BENCHMARK_RESULTS_DIR/results/
+
+ old_asv_dir=$previous_results/benchmarks/results/asv
+ if [ -d $old_asv_dir ]; then
+ cp -r $old_asv_dir $BENCHMARK_RESULTS_DIR/results
+ fi
+}
+
+
+# echos the name of the directory that $1 is linked to. Useful for
+# getting the actual path of the results dir since that is often
+# sym-linked to a unique (based on timestamp) results dir name.
+function getNonLinkedFileName {
+ linkname=$1
+ targetname=$(readlink -f $linkname)
+ if [[ "$targetname" != "" ]]; then
+ echo $targetname
+ else
+ echo $linkname
+ fi
+}
+
+function waitForSlurmJobsToComplete {
+ ids=$*
+ jobs=$(python -c "print(\",\".join(\"$ids\".split()))") # make a comma-separated list
+ jobsInQueue=$(squeue --noheader --jobs=$jobs)
+ while [[ $jobsInQueue != "" ]]; do
+ sleep 2
+ jobsInQueue=$(squeue --noheader --jobs=$jobs)
+ done
+}
+
+# Clones repo from URL specified by $1 as name $2 in to directory
+# $3. For example:
+# "cloneRepo https://github.com/rapidsai/cugraph.git /my/repos cg"
+# results in cugraph being cloned to /my/repos/cg.
+# NOTE: This removes any existing cloned repos that match the
+# destination.
+function cloneRepo {
+ repo_url=$1
+ repo_name=$2
+ dest_dir=$3
+ mkdir -p $dest_dir
+ pushd $dest_dir > /dev/null || exit
+ logger "Clone $repo_url in $dest_dir..."
+ if [ -d $repo_name ]; then
+ rm -rf $repo_name
+ if [ -d $repo_name ]; then
+ echo "ERROR: ${dest_dir}/$repo_name was not completely removed."
+ error 1
+ fi
+ fi
+ git clone $repo_url
+ popd > /dev/null || exit
+}
+
+# Only define this function if it has not already been defined in the
+# current environment, which allows the project to override it from
+# its functions.sh file that was previously source'd.
+if [[ $(type -t activateCondaEnv) == "" ]]; then
+ function activateCondaEnv {
+ logger "Activating conda env ${CONDA_ENV}..."
+ eval "$(conda shell.bash hook)"
+ conda activate $CONDA_ENV
+ }
+fi
diff --git a/regression/get_datasets.py b/regression/get_datasets.py
new file mode 100644
index 0000000000..bb2a9f23d3
--- /dev/null
+++ b/regression/get_datasets.py
@@ -0,0 +1,915 @@
+# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # noqa
+# SPDX-License-Identifier: Apache-2.0
+
+import os
+import sys
+import urllib.request
+import urllib.parse
+import ssl
+import subprocess
+
+
+# From: https://plato.asu.edu/bench.html
+# Folder containg instances:
+# - https://miplib2010.zib.de/miplib2010.php
+# - https://www.netlib.org/lp/data/
+# - https://old.sztaki.hu/~meszaros/public_ftp/lptestset/ (and it's subfolders)
+# - https://plato.asu.edu/ftp/lptestset/ (and it's subfolders)
+# - https://miplib.zib.de/tag_benchmark.html
+# - https://miplib.zib.de/tag_collection.html
+
+LPFeasibleMittelmannSet = [
+ "L1_sixm250obs",
+ "Linf_520c",
+ "a2864",
+ "bdry2",
+ "cont1",
+ "cont11",
+ "datt256_lp",
+ "dlr1",
+ "ex10",
+ "fhnw-binschedule1",
+ "fome13",
+ "graph40-40",
+ "irish-electricity",
+ "neos",
+ "neos3",
+ "neos-3025225",
+ "neos-5052403-cygnet",
+ "neos-5251015",
+ "ns1687037",
+ "ns1688926",
+ "nug08-3rd",
+ "pds-100",
+ "physiciansched3-3",
+ "qap15",
+ "rail02",
+ "rail4284",
+ "rmine15",
+ "s82",
+ "s100",
+ "s250r10",
+ "savsched1",
+ "scpm1",
+ "shs1023",
+ "square41",
+ "stat96v2",
+ "stormG2_1000",
+ "stp3d",
+ "supportcase10",
+ "tpl-tub-ws1617",
+ "woodlands09",
+ "Dual2_5000",
+ "Primal2_1000",
+ "thk_48",
+ "thk_63",
+ "L1_sixm1000obs",
+ "L2CTA3D",
+ "degme",
+ "dlr2",
+ "set-cover-model",
+]
+
+MiplibInstances = [
+ "30n20b8.mps",
+ "cryptanalysiskb128n5obj14.mps",
+ "graph20-20-1rand.mps",
+ "n2seq36q.mps",
+ "neos-4338804-snowy.mps",
+ "neos-957323.mps",
+ "rail01.mps",
+ "splice1k1.mps",
+ "50v-10.mps",
+ "cryptanalysiskb128n5obj16.mps",
+ "graphdraw-domain.mps",
+ "n3div36.mps",
+ "neos-4387871-tavua.mps",
+ "neos-960392.mps",
+ "rail02.mps",
+ "square41.mps",
+ "academictimetablesmall.mps",
+ "csched007.mps",
+ "h80x6320d.mps",
+ "n5-3.mps",
+ "neos-4413714-turia.mps",
+ "net12.mps",
+ "rail507.mps",
+ "square47.mps",
+ "air05.mps",
+ "csched008.mps",
+ "highschool1-aigio.mps",
+ "neos-1122047.mps",
+ "neos-4532248-waihi.mps",
+ "netdiversion.mps",
+ "ran14x18-disj-8.mps",
+ "supportcase10.mps",
+ "app1-1.mps",
+ "cvs16r128-89.mps",
+ "hypothyroid-k1.mps",
+ "neos-1171448.mps",
+ "neos-4647030-tutaki.mps",
+ "nexp-150-20-8-5.mps",
+ "rd-rplusc-21.mps",
+ "supportcase12.mps",
+ "app1-2.mps",
+ "dano3_3.mps",
+ "ic97_potential.mps",
+ "neos-1171737.mps",
+ "neos-4722843-widden.mps",
+ "ns1116954.mps",
+ "reblock115.mps",
+ "supportcase18.mps",
+ "assign1-5-8.mps",
+ "dano3_5.mps",
+ "icir97_tension.mps",
+ "neos-1354092.mps",
+ "neos-4738912-atrato.mps",
+ "ns1208400.mps",
+ "rmatr100-p10.mps",
+ "supportcase19.mps",
+ "atlanta-ip.mps",
+ "decomp2.mps",
+ "irish-electricity.mps",
+ "neos-1445765.mps",
+ "neos-4763324-toguru.mps",
+ "ns1644855.mps",
+ "rmatr200-p5.mps",
+ "supportcase22.mps",
+ "b1c1s1.mps",
+ "drayage-100-23.mps",
+ "irp.mps",
+ "neos-1456979.mps",
+ "neos-4954672-berkel.mps",
+ "ns1760995.mps",
+ "rocI-4-11.mps",
+ "supportcase26.mps",
+ "bab2.mps",
+ "drayage-25-23.mps",
+ "istanbul-no-cutoff.mps",
+ "neos-1582420.mps",
+ "neos-5049753-cuanza.mps",
+ "ns1830653.mps",
+ "rocII-5-11.mps",
+ "supportcase33.mps",
+ "bab6.mps",
+ "dws008-01.mps",
+ "k1mushroom.mps",
+ "neos17.mps",
+ "neos-5052403-cygnet.mps",
+ "ns1952667.mps",
+ "rococoB10-011000.mps",
+ "supportcase40.mps",
+ "beasleyC3.mps",
+ "eil33-2.mps",
+ "lectsched-5-obj.mps",
+ "neos-2075418-temuka.mps",
+ "neos-5093327-huahum.mps",
+ "nu25-pr12.mps",
+ "rococoC10-001000.mps",
+ "supportcase42.mps",
+ "binkar10_1.mps",
+ "eilA101-2.mps",
+ "leo1.mps",
+ "neos-2657525-crna.mps",
+ "neos-5104907-jarama.mps",
+ "neos-5104907-jarama.mps",
+ "nursesched-medium-hint03.mps",
+ "roi2alpha3n4.mps",
+ "supportcase6.mps",
+ "blp-ar98.mps",
+ "enlight_hard.mps",
+ "leo2.mps",
+ "neos-2746589-doon.mps",
+ "neos-5107597-kakapo.mps",
+ "nursesched-sprint02.mps",
+ "roi5alpha10n8.mps",
+ "supportcase7.mps",
+ "blp-ic98.mps",
+ "ex10.mps",
+ "lotsize.mps",
+ "neos-2978193-inde.mps",
+ "neos-5114902-kasavu.mps",
+ "nw04.mps",
+ "roll3000.mps",
+ "swath1.mps",
+ "bnatt400.mps",
+ "ex9.mps",
+ "mad.mps",
+ "neos-2987310-joes.mps",
+ "neos-5188808-nattai.mps",
+ "opm2-z10-s4.mps",
+ "s100.mps",
+ "swath3.mps",
+ "bnatt500.mps",
+ "exp-1-500-5-5.mps",
+ "map10.mps",
+ "neos-3004026-krka.mps",
+ "neos-5195221-niemur.mps",
+ "p200x1188c.mps",
+ "s250r10.mps",
+ "tbfp-network.mps",
+ "bppc4-08.mps",
+ "fast0507.mps",
+ "map16715-04.mps",
+ "neos-3024952-loue.mps",
+ "neos5.mps",
+ "peg-solitaire-a3.mps",
+ "satellites2-40.mps",
+ "thor50dday.mps",
+ "brazil3.mps",
+ "fastxgemm-n2r6s0t2.mps",
+ "markshare2.mps",
+ "neos-3046615-murg.mps",
+ "neos-631710.mps",
+ "pg5_34.mps",
+ "satellites2-60-fs.mps",
+ "timtab1.mps",
+ "buildingenergy.mps",
+ "fhnw-binpack4-48.mps",
+ "markshare_4_0.mps",
+ "neos-3083819-nubu.mps",
+ "neos-662469.mps",
+ "pg.mps",
+ "savsched1.mps",
+ "tr12-30.mps",
+ "cbs-cta.mps",
+ "fhnw-binpack4-4.mps",
+ "mas74.mps",
+ "neos-3216931-puriri.mps",
+ "neos-787933.mps",
+ "physiciansched3-3.mps",
+ "sct2.mps",
+ "traininstance2.mps",
+ "chromaticindex1024-7.mps",
+ "fiball.mps",
+ "mas76.mps",
+ "neos-3381206-awhea.mps",
+ "neos-827175.mps",
+ "physiciansched6-2.mps",
+ "seymour1.mps",
+ "traininstance6.mps",
+ "chromaticindex512-7.mps",
+ "gen-ip002.mps",
+ "mc11.mps",
+ "neos-3402294-bobin.mps",
+ "neos-848589.mps",
+ "piperout-08.mps",
+ "seymour.mps",
+ "trento1.mps",
+ "cmflsp50-24-8-8.mps",
+ "gen-ip054.mps",
+ "mcsched.mps",
+ "neos-3402454-bohle.mps",
+ "neos859080.mps",
+ "piperout-27.mps",
+ "sing326.mps",
+ "triptim1.mps",
+ "CMS750_4.mps",
+ "germanrr.mps",
+ "mik-250-20-75-4.mps",
+ "neos-3555904-turama.mps",
+ "neos-860300.mps",
+ "pk1.mps",
+ "sing44.mps",
+ "uccase12.mps",
+ "co-100.mps",
+ "gfd-schedulen180f7d50m30k18.mps",
+ "milo-v12-6-r2-40-1.mps",
+ "neos-3627168-kasai.mps",
+ "neos-873061.mps",
+ "proteindesign121hz512p9.mps",
+ "snp-02-004-104.mps",
+ "uccase9.mps",
+ "cod105.mps",
+ "glass4.mps",
+ "momentum1.mps",
+ "neos-3656078-kumeu.mps",
+ "neos8.mps",
+ "proteindesign122trx11p8.mps",
+ "sorrell3.mps",
+ "uct-subprob.mps",
+ "comp07-2idx.mps",
+ "glass-sc.mps",
+ "mushroom-best.mps",
+ "neos-3754480-nidda.mps",
+ "neos-911970.mps",
+ "qap10.mps",
+ "sp150x300d.mps",
+ "unitcal_7.mps",
+ "comp21-2idx.mps",
+ "gmu-35-40.mps",
+ "mzzv11.mps",
+ "neos-3988577-wolgan.mps",
+ "neos-933966.mps",
+ "radiationm18-12-05.mps",
+ "sp97ar.mps",
+ "var-smallemery-m6j6.mps",
+ "cost266-UUE.mps",
+ "gmu-35-50.mps",
+ "mzzv42z.mps",
+ "neos-4300652-rahue.mps",
+ "neos-950242.mps",
+ "radiationm40-10-02.mps",
+ "sp98ar.mps",
+ "wachplan.mps",
+]
+
+MittelmannInstances = {
+ "emps": "https://old.sztaki.hu/~meszaros/public_ftp/lptestset/emps.c",
+ "problems": {
+ "irish-electricity": [
+ "https://plato.asu.edu/ftp/lptestset/irish-electricity.mps.bz2",
+ "mps",
+ ],
+ "physiciansched3-3": [
+ "https://plato.asu.edu/ftp/lptestset/physiciansched3-3.mps.bz2",
+ "mps",
+ ],
+ "16_n14": [
+ "https://plato.asu.edu/ftp/lptestset/network/16_n14.mps.bz2",
+ "mps",
+ ],
+ "Dual2_5000": [
+ "https://plato.asu.edu/ftp/lptestset/Dual2_5000.mps.bz2",
+ "mps",
+ ],
+ "L1_six1000": [
+ "https://plato.asu.edu/ftp/lptestset/L1_sixm1000obs.bz2",
+ "netlib",
+ ],
+ "L1_sixm": ["", "mps"],
+ "L1_sixm1000obs": [
+ "https://plato.asu.edu/ftp/lptestset/L1_sixm1000obs.bz2",
+ "netlib",
+ ],
+ "L1_sixm250": ["", "netlib"],
+ "L1_sixm250obs": [
+ "https://plato.asu.edu/ftp/lptestset/L1_sixm250obs.bz2",
+ "netlib",
+ ],
+ "L2CTA3D": [
+ "https://plato.asu.edu/ftp/lptestset/L2CTA3D.mps.bz2",
+ "mps",
+ ],
+ "Linf_520c": [
+ "https://plato.asu.edu/ftp/lptestset/Linf_520c.bz2",
+ "netlib",
+ ],
+ "Primal2_1000": [
+ "https://plato.asu.edu/ftp/lptestset/Primal2_1000.mps.bz2",
+ "mps",
+ ],
+ "a2864": ["https://plato.asu.edu/ftp/lptestset/a2864.mps.bz2", "mps"],
+ "bdry2": ["https://plato.asu.edu/ftp/lptestset/bdry2.bz2", "netlib"],
+ "braun": ["", "mps"],
+ "cont1": [
+ "https://plato.asu.edu/ftp/lptestset/misc/cont1.bz2",
+ "netlib",
+ ],
+ "cont11": [
+ "https://plato.asu.edu/ftp/lptestset/misc/cont11.bz2",
+ "netlib",
+ ],
+ "datt256": [
+ "https://plato.asu.edu/ftp/lptestset/datt256_lp.mps.bz2",
+ "mps",
+ ],
+ "datt256_lp": [
+ "https://plato.asu.edu/ftp/lptestset/datt256_lp.mps.bz2",
+ "mps",
+ ],
+ "degme": [
+ "https://old.sztaki.hu/~meszaros/public_ftp/lptestset/New/degme.gz",
+ "netlib",
+ ],
+ "dlr1": ["https://plato.asu.edu/ftp/lptestset/dlr1.mps.bz2", "mps"],
+ "dlr2": ["https://plato.asu.edu/ftp/lptestset/dlr2.mps.bz2", "mps"],
+ "energy1": ["", "mps"], # Kept secret by Mittlemman
+ "energy2": ["", "mps"],
+ "ex10": ["https://plato.asu.edu/ftp/lptestset/ex10.mps.bz2", "mps"],
+ "fhnw-binschedule1": [
+ "https://plato.asu.edu/ftp/lptestset/fhnw-binschedule1.mps.bz2",
+ "mps",
+ ],
+ "fome13": [
+ "https://plato.asu.edu/ftp/lptestset/fome/fome13.bz2",
+ "netlib",
+ ],
+ "gamora": ["", "mps"], # Kept secret by Mittlemman
+ "goto14_256_1": ["", "mps"],
+ "goto14_256_2": ["", "mps"],
+ "goto14_256_3": ["", "mps"],
+ "goto14_256_4": ["", "mps"],
+ "goto14_256_5": ["", "mps"],
+ "goto16_64_1": ["", "mps"],
+ "goto16_64_2": ["", "mps"],
+ "goto16_64_3": ["", "mps"],
+ "goto16_64_4": ["", "mps"],
+ "goto16_64_5": ["", "mps"],
+ "goto32_512_1": ["", "mps"],
+ "goto32_512_2": ["", "mps"],
+ "goto32_512_3": ["", "mps"],
+ "goto32_512_4": ["", "mps"],
+ "goto32_512_5": ["", "mps"],
+ "graph40-40": [
+ "https://plato.asu.edu/ftp/lptestset/graph40-40.mps.bz2",
+ "mps",
+ ],
+ "graph40-40_lp": [
+ "https://plato.asu.edu/ftp/lptestset/graph40-40.mps.bz2",
+ "mps",
+ ],
+ "groot": ["", "mps"], # Kept secret by Mittlemman
+ "heimdall": ["", "mps"], # Kept secret by Mittlemman
+ "hulk": ["", "mps"], # Kept secret by Mittlemman
+ "i_n13": [
+ "https://plato.asu.edu/ftp/lptestset/network/i_n13.mps.bz2",
+ "mps",
+ ],
+ "irish-e": ["", "mps"],
+ "karted": [
+ "https://old.sztaki.hu/~meszaros/public_ftp/lptestset/New/karted.gz",
+ "netlib",
+ ],
+ "lo10": [
+ "https://plato.asu.edu/ftp/lptestset/network/lo10.mps.bz2",
+ "mps",
+ ],
+ "loki": ["", "mps"], # Kept secret by Mittlemman
+ "long15": [
+ "https://plato.asu.edu/ftp/lptestset/network/long15.mps.bz2",
+ "mps",
+ ],
+ "nebula": ["", "mps"], # Kept secret by Mittlemman
+ "neos": [
+ "https://plato.asu.edu/ftp/lptestset/misc/neos.bz2",
+ "netlib",
+ ],
+ "neos-3025225": [
+ "https://plato.asu.edu/ftp/lptestset/neos-3025225.mps.bz2",
+ "mps",
+ ],
+ "neos-3025225_lp": [
+ "https://plato.asu.edu/ftp/lptestset/neos-3025225.mps.bz2",
+ "mps",
+ ],
+ "neos-5251015": [
+ "https://plato.asu.edu/ftp/lptestset/neos-5251015.mps.bz2",
+ "mps",
+ ],
+ "neos-5251015_lp": [
+ "https://plato.asu.edu/ftp/lptestset/neos-5251015.mps.bz2",
+ "mps",
+ ],
+ "neos3": [
+ "https://plato.asu.edu/ftp/lptestset/misc/neos3.bz2",
+ "netlib",
+ ],
+ "neos-5052403-cygnet": [
+ "https://plato.asu.edu/ftp/lptestset/neos-5052403-cygnet.mps.bz2",
+ "mps",
+ ],
+ "neos5251015_lp": [
+ "https://plato.asu.edu/ftp/lptestset/neos-5251015.mps.bz2",
+ "mps",
+ ],
+ "neos5251915": [
+ "https://plato.asu.edu/ftp/lptestset/neos-5251015.mps.bz2",
+ "mps",
+ ],
+ "netlarge1": [
+ "https://plato.asu.edu/ftp/lptestset/network/netlarge1.mps.bz2",
+ "mps",
+ ],
+ "netlarge2": [
+ "https://plato.asu.edu/ftp/lptestset/network/netlarge2.mps.bz2",
+ "mps",
+ ],
+ "netlarge3": [
+ "https://plato.asu.edu/ftp/lptestset/network/netlarge3.mps.bz2",
+ "mps",
+ ],
+ "netlarge6": [
+ "https://plato.asu.edu/ftp/lptestset/network/netlarge6.mps.bz2",
+ "mps",
+ ],
+ "ns1687037": [
+ "https://plato.asu.edu/ftp/lptestset/misc/ns1687037.bz2",
+ "netlib",
+ ],
+ "ns1688926": [
+ "https://plato.asu.edu/ftp/lptestset/misc/ns1688926.bz2",
+ "netlib",
+ ],
+ "nug08-3rd": [
+ "https://plato.asu.edu/ftp/lptestset/nug/nug08-3rd.bz2",
+ "netlib",
+ ],
+ "pds-100": [
+ "https://plato.asu.edu/ftp/lptestset/pds/pds-100.bz2",
+ "netlib",
+ ],
+ "psched3-3": ["", "mps"],
+ "qap15": ["https://plato.asu.edu/ftp/lptestset/qap15.mps.bz2", "mps"],
+ "rail02": ["https://miplib2010.zib.de/download/rail02.mps.gz", "mps"],
+ "rail4284": [
+ "https://plato.asu.edu/ftp/lptestset/rail/rail4284.bz2",
+ "netlib",
+ ],
+ "rmine15": [
+ "https://plato.asu.edu/ftp/lptestset/rmine15.mps.bz2",
+ "mps",
+ ],
+ "s100": ["https://plato.asu.edu/ftp/lptestset/s100.mps.bz2", "mps"],
+ "s250r10": [
+ "https://plato.asu.edu/ftp/lptestset/s250r10.mps.bz2",
+ "mps",
+ ],
+ "s82": ["https://plato.asu.edu/ftp/lptestset/s82.mps.bz2", "mps"],
+ "savsched1": [
+ "https://plato.asu.edu/ftp/lptestset/savsched1.mps.bz2",
+ "mps",
+ ],
+ "scpm1": ["https://plato.asu.edu/ftp/lptestset/scpm1.mps.bz2", "mps"],
+ "set-cover-model": [
+ "https://plato.asu.edu/ftp/lptestset/set-cover-model.mps.bz2",
+ "mps",
+ ],
+ "shs1023": [
+ "https://miplib2010.zib.de/download/shs1023.mps.gz",
+ "mps",
+ ],
+ "square15": [
+ "https://plato.asu.edu/ftp/lptestset/network/square15.mps.bz2",
+ "mps",
+ ],
+ "square41": [
+ "https://plato.asu.edu/ftp/lptestset/square41.mps.bz2",
+ "mps",
+ ],
+ "stat96v2": [
+ "https://old.sztaki.hu/~meszaros/public_ftp/lptestset/misc/stat96v2.gz",
+ "netlib",
+ ],
+ "stormG2_1000": [
+ "https://plato.asu.edu/ftp/lptestset/misc/stormG2_1000.bz2",
+ "netlib",
+ ],
+ "storm_1000": ["", "mps"],
+ "stp3d": [
+ "https://miplib.zib.de/WebData/instances/stp3d.mps.gz",
+ "mps",
+ ],
+ "supportcase10": [
+ "https://plato.asu.edu/ftp/lptestset/supportcase10.mps.bz2",
+ "mps",
+ ],
+ "support19": [
+ "https://plato.asu.edu/ftp/lptestset/supportcase19.mps.bz2",
+ "mps",
+ ],
+ "supportcase19": [
+ "https://plato.asu.edu/ftp/lptestset/supportcase19.mps.bz2",
+ "mps",
+ ],
+ "test03": ["", "mps"], # Kept secret by Mittlemman
+ "test13": ["", "mps"], # Kept secret by Mittlemman
+ "test23": ["", "mps"], # Kept secret by Mittlemman
+ "test33": ["", "mps"], # Kept secret by Mittlemman
+ "test43": ["", "mps"], # Kept secret by Mittlemman
+ "test53": ["", "mps"], # Kept secret by Mittlemman
+ "test63": ["", "mps"], # Kept secret by Mittlemman
+ "test83": ["", "mps"], # Kept secret by Mittlemman
+ "test93": ["", "mps"], # Kept secret by Mittlemman
+ "mars": ["", "mps"], # Kept secret by Mittlemman
+ "thk_48": [
+ "https://plato.asu.edu/ftp/lptestset/thk_48.mps.bz2",
+ "mps",
+ ],
+ "thk_63": [
+ "https://plato.asu.edu/ftp/lptestset/thk_63.mps.bz2",
+ "mps",
+ ],
+ "thor": ["", "mps"], # Kept secret by Mittlemman
+ "tpl-tub-ws": ["", "mps"],
+ "tpl-tub-ws1617": [
+ "https://plato.asu.edu/ftp/lptestset/tpl-tub-ws1617.mps.bz2",
+ "mps",
+ ],
+ "wide15": [
+ "https://plato.asu.edu/ftp/lptestset/network/wide15.mps.bz2",
+ "mps",
+ ],
+ "woodlands09": [
+ "https://plato.asu.edu/ftp/lptestset/woodlands09.mps.bz2",
+ "mps",
+ ],
+ },
+ "benchmarks": {
+ "simplex": [
+ "L1_sixm",
+ "L1_sixm250obs",
+ "Linf_520c",
+ "a2864",
+ "bdry2",
+ "braun",
+ "cont1",
+ "cont11",
+ "datt256",
+ "dlr1",
+ "energy1",
+ "energy2",
+ "ex10",
+ "fhnw-binschedule1",
+ "fome13",
+ "gamora",
+ "graph40-40",
+ "groot",
+ "heimdall",
+ "hulk",
+ "irish-e",
+ "loki",
+ "nebula",
+ "neos",
+ "neos-3025225_lp",
+ "neos-5251015_lp",
+ "neos3",
+ "neos3025225",
+ "neos5052403",
+ "neos5251015_lp",
+ "ns1687037",
+ "ns1688926",
+ "nug08-3rd",
+ "pds-100",
+ "psched3-3",
+ "qap15",
+ "rail02",
+ "rail4284",
+ "rmine15",
+ "s100",
+ "s250r10",
+ "s82",
+ "savsched1",
+ "scpm1",
+ "shs1023",
+ "square41",
+ "stat96v2",
+ "stormG2_1000",
+ "storm_1000",
+ "stp3d",
+ "support10",
+ "test03",
+ "test13",
+ "test23",
+ "test33",
+ "test43",
+ "test53",
+ "thor",
+ "tpl-tub-ws",
+ "tpl-tub-ws16",
+ "woodlands09",
+ ],
+ "barrier": [
+ "Dual2_5000",
+ "L1_six1000",
+ "L1_sixm1000obs",
+ "L1_sixm250",
+ "L1_sixm250obs",
+ "L2CTA3D",
+ "Linf_520c",
+ "Primal2_1000",
+ "a2864",
+ "bdry2",
+ "cont1",
+ "cont11",
+ "datt256",
+ "degme",
+ "dlr1",
+ "dlr2",
+ "ex10",
+ "fhnw-binschedule1",
+ "fome13",
+ "graph40-40",
+ "irish-e",
+ "karted",
+ "neos",
+ "neos-3025225_lp",
+ "neos-5251015_lp",
+ "neos3",
+ "neos3025225",
+ "neos5052403",
+ "neos5251915",
+ "ns1687037",
+ "ns1688926",
+ "nug08-3rd",
+ "pds-100",
+ "psched3-3",
+ "qap15",
+ "rail02",
+ "rail4284",
+ "rmine15",
+ "s100",
+ "s250r10",
+ "s82",
+ "savsched1",
+ "scpm1",
+ "set-cover-model",
+ "shs1023",
+ "square41",
+ "stat96v2",
+ "stormG2_1000",
+ "storm_1000",
+ "stp3d",
+ "support10",
+ "support19",
+ "supportcase19",
+ "thk_63",
+ "tpl-tub-ws",
+ "tpl-tub-ws16",
+ "woodlands09",
+ ],
+ "large": [
+ "16_n14",
+ "goto14_256_1",
+ "goto14_256_2",
+ "goto14_256_3",
+ "goto14_256_4",
+ "goto14_256_5",
+ "goto16_64_1",
+ "goto16_64_2",
+ "goto16_64_3",
+ "goto16_64_4",
+ "goto16_64_5",
+ "goto32_512_1",
+ "goto32_512_2",
+ "goto32_512_3",
+ "goto32_512_4",
+ "goto32_512_5",
+ "i_n13",
+ "lo10",
+ "long15",
+ "netlarge1",
+ "netlarge2",
+ "netlarge3",
+ "netlarge6",
+ "square15",
+ "wide15",
+ ],
+ # <=100s in bench: https://plato.asu.edu/ftp/lpbar.html
+ "L0": [
+ "ex10",
+ "datt256",
+ "graph40-40",
+ "neos5251915",
+ "nug08-3rd",
+ "qap15",
+ "savsched1",
+ "scpm1",
+ "a2864",
+ "support10",
+ "rmine15",
+ "fome13",
+ "L2CTA3D",
+ "neos5052403",
+ "karted",
+ "stp3d",
+ "woodlands09",
+ "rail4284",
+ "L1_sixm250",
+ "tpl-tub-ws",
+ ],
+ # >100 <1000
+ "L1": [
+ "s250r10",
+ "pds-100",
+ "set-cover-model",
+ "neos3025225",
+ "rail02",
+ "square41",
+ "degme",
+ "Linf_520c",
+ "cont1",
+ "neos",
+ "stat96v2",
+ "support19",
+ "shs1023",
+ "storm_1000",
+ ],
+ # >1000
+ "L2": [
+ "thk_63",
+ "Primal2_1000",
+ "L1_six1000",
+ "Dual2_5000",
+ "s100",
+ "fhnw-binschedule1",
+ "cont11",
+ "psched3-3",
+ ],
+ # t -> >15000
+ "L3": [
+ "dlr2",
+ "bdry2",
+ "dlr1",
+ "irish-e",
+ "ns1687037",
+ "ns1688926",
+ "s82",
+ ],
+ },
+}
+
+
+def download(url, dst):
+ if os.path.exists(dst):
+ return
+ print(f"Downloading {url} into {dst}...")
+ # Bypass SSL verification for plato.asu.edu URLs
+ if "plato.asu.edu" in url:
+ context = ssl.create_default_context()
+ context.check_hostname = False
+ context.verify_mode = ssl.CERT_NONE
+ response = urllib.request.urlopen(url, context=context)
+ else:
+ response = urllib.request.urlopen(url)
+ data = response.read()
+ with open(dst, "wb") as fp:
+ fp.write(data)
+
+
+def extract(file, dir, type):
+ basefile = os.path.basename(file)
+ outfile = ""
+ unzippedfile = ""
+ if basefile.endswith(".bz2"):
+ outfile = basefile.replace(".bz2", ".mps")
+ unzippedfile = basefile.replace(".bz2", "")
+ subprocess.run(f"cd {dir} && bzip2 -d {basefile}", shell=True)
+ elif basefile.endswith(".gz"):
+ outfile = basefile.replace(".gz", ".mps")
+ unzippedfile = basefile.replace(".gz", "")
+ subprocess.run(
+ f"cd {dir} && gunzip -c {basefile} > {unzippedfile}", shell=True
+ )
+ subprocess.run(f"cd {dir} && rm -rf {basefile}", shell=True)
+ else:
+ raise Exception(f"Unknown file extension found for extraction {file}")
+ # download emps and compile
+ # Disable emps for now
+ if type == "netlib":
+ url = MittelmannInstances["emps"]
+ file = os.path.join(dir, "emps.c")
+ download(url, file)
+ subprocess.run(
+ f"cd {dir} && gcc -Wno-implicit-int emps.c -o emps", shell=True
+ )
+ # determine output file and run emps
+ subprocess.run(
+ f"cd {dir} && ./emps {unzippedfile} > {outfile}", shell=True
+ )
+ subprocess.run(f"cd {dir} && rm -rf {unzippedfile}", shell=True)
+ # cleanup emps and emps.c
+ subprocess.run(f"rm -rf {dir}/emps*", shell=True)
+
+
+def download_lp_dataset(name, dir):
+ if name not in MittelmannInstances["problems"]:
+ raise Exception(f"Unknown dataset {name} passed")
+ if os.path.exists(dir):
+ if os.path.exists(os.path.join(dir, f"{name}.mps")):
+ print(
+ f"Dir for dataset {name} exists and contains {name}.mps. Skipping..."
+ )
+ return
+ url, type = MittelmannInstances["problems"][name]
+ if url == "":
+ print(f"Dataset {name} doesn't have a URL. Skipping...")
+ return
+ file = os.path.join(dir, os.path.basename(url))
+ download(url, file)
+ extract(file, dir, type)
+
+
+def download_mip_dataset(name, dir):
+ base_url = "https://miplib.zib.de/WebData/instances"
+ url = f"{base_url}/{name}.gz"
+ outfile = f"{dir}/{name}.gz"
+ if os.path.exists(dir):
+ if os.path.exists(os.path.join(dir, f"{name}")):
+ print(
+ f"Dir for dataset {name} exists and contains {name}.mps. Skipping..."
+ )
+ return
+ download(url, outfile)
+ extract(outfile, dir, "")
+
+
+datasets_path = sys.argv[1]
+dataset_type = sys.argv[2]
+
+if dataset_type == "lp":
+ for name in LPFeasibleMittelmannSet:
+ download_lp_dataset(name, datasets_path)
+elif dataset_type == "mip":
+ for name in MiplibInstances:
+ download_mip_dataset(name, datasets_path)
diff --git a/regression/lp_config.json b/regression/lp_config.json
new file mode 100644
index 0000000000..e2f8a9e936
--- /dev/null
+++ b/regression/lp_config.json
@@ -0,0 +1,13 @@
+{
+ "details": "LP test",
+ "metrics": {
+ "primal_objective_value": {
+ "threshold": 1,
+ "unit": "primal_objective_value"
+ },
+ "solver_time": {
+ "threshold": 1,
+ "unit": "seconds"
+ }
+ }
+}
diff --git a/regression/lp_regression_test.sh b/regression/lp_regression_test.sh
new file mode 100644
index 0000000000..23b28cd1f5
--- /dev/null
+++ b/regression/lp_regression_test.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+
+# Abort script on first error
+set -e
+
+# Must ensure PROJECT_DIR is exported first then load rapids-mg-tools env
+export PROJECT_DIR=${PROJECT_DIR:-$(cd "$(dirname ${BASH_SOURCE[0]})" && pwd)}
+source ${PROJECT_DIR}/config.sh
+source ${PROJECT_DIR}/functions.sh
+
+################################################################################
+
+# Extract the build meta-data from either the conda environment or the
+# cugraph source dir and write out a file which can be read by other
+# scripts. If the cugraph conda packages are present, those take
+# precedence, otherwise meta-data will be extracted from the sources.
+
+GIT_COMMIT=$( /dev/null); then
+ STATUS='PASSED'
+ STATUS_IMG='https://img.icons8.com/bubbles/100/000000/approval.png'
+ fi
+
+fi
+
+# Generate a one-line summary based on existance of certain reports, etc.
+if [[ "$ALL_REPORTS" == "" ]]; then
+ ONE_LINE_SUMMARY="*Build failed*"
+elif [[ "$STATUS" == "FAILED" ]]; then
+ if (grep -w FAILED $BENCHMARK_REPORT > /dev/null); then
+ ONE_LINE_SUMMARY="*One or more benchmarks failed*"
+ fi
+ if (grep -w FAILED $TEST_REPORT > /dev/null); then
+ ONE_LINE_SUMMARY="*One or more tests failed*"
+ fi
+ if (grep -w FAILED $TEST_REPORT > /dev/null) && (grep -w FAILED $BENCHMARK_REPORT > /dev/null); then
+ ONE_LINE_SUMMARY="*One or more tests and benchmarks failed*"
+ fi
+else
+ ONE_LINE_SUMMARY="Build succeeded, all tests and benchmarks passed"
+fi
+
+RESULTS_DIR_NAME=$(basename "$(getNonLinkedFileName $RESULTS_DIR)")
+
+# Upload everything
+logger "Uploading all files in $RESULTS_DIR ..."
+logger "Uploading all files in $RESULTS_DIR_NAME ..."
+aws s3 cp --follow-symlinks --acl public-read --recursive ${RESULTS_DIR} ${S3_FILE_PREFIX}/${RESULTS_DIR_NAME}
+logger "done uploading all files in $RESULTS_DIR"
+
+# Set vars used in the report
+PROJECT_VERSION_STRING=""
+PROJECT_VERSION=""
+PROJECT_BUILD=""
+PROJECT_CHANNEL=""
+PROJECT_REPO_URL=""
+PROJECT_REPO_BRANCH=""
+if [ -f $METADATA_FILE ]; then
+ source $METADATA_FILE
+fi
+# Assume if PROJECT_BUILD is set then a conda version string should be
+# created, else a git version string.
+if [[ "$PROJECT_BUILD" != "" ]]; then
+ PROJECT_VERSION_STRING=" cuOpt ver.: $PROJECT_VERSION
+ build: $PROJECT_BUILD
+ channel: $PROJECT_CHANNEL"
+else
+ PROJECT_VERSION_STRING=" cuOpt ver.: $PROJECT_VERSION
+ repo: $PROJECT_REPO_URL
+ branch: $PROJECT_REPO_BRANCH"
+fi
+
+export STATUS
+export STATUS_IMG
+export PROJECT_VERSION_STRING
+HUMAN_READABLE_DATE="$(date '+`%D`, `%H:%M` (PT)')"
+export HUMAN_READABLE_DATE
+# These files should be created by create-html-reports.sh
+export REPORT_URL="${S3_URL_PREFIX}/${RESULTS_DIR_NAME}/report.html"
+export ASV_URL="${S3_URL_PREFIX}/${RESULTS_DIR_NAME}/benchmarks/asv/html/index.html"
+export LOGS_URL="${S3_URL_PREFIX}/${RESULTS_DIR_NAME}/index.html"
+# export SPREADSHEET_URL=$SPREADSHEET_URL
+export ONE_LINE_SUMMARY
+
+echo
+echo "REPORT_URL: ${REPORT_URL}"
+# echo "SPREADSHEET_URL: ${SPREADSHEET_URL}"
+
+if hasArg --skip-sending-report; then
+ logger "Skipping sending Slack report."
+else
+ echo "$(envsubst < ${PROJECT_DIR}/slack_msg.json)"
+ curl -X POST \
+ -H 'Content-type: application/json' \
+ --data "$(envsubst < ${PROJECT_DIR}/slack_msg.json)" \
+ ${WEBHOOK_URL}
+fi
diff --git a/regression/slack_msg.json b/regression/slack_msg.json
new file mode 100644
index 0000000000..a73e659b2b
--- /dev/null
+++ b/regression/slack_msg.json
@@ -0,0 +1,68 @@
+{
+ "channel": "cuopt-regression-testing",
+ "username": "cuOpt Messaging",
+ "icon_emoji": ":robot_face:",
+ "blocks": [
+ {
+ "type": "section",
+ "text": {
+ "type": "mrkdwn",
+ "text": "${ONE_LINE_SUMMARY}"
+ }
+ },
+ {
+ "type": "divider"
+ },
+ {
+ "type": "section",
+ "text": {
+ "type": "mrkdwn",
+ "text": "<${REPORT_URL}|*Results Report*>\nBuild status and test results."
+ },
+ "accessory": {
+ "type": "button",
+ "url": "${REPORT_URL}",
+ "text": {
+ "type": "plain_text",
+ "emoji": true,
+ "text": "View"
+ },
+ "value": "click_me_123"
+ }
+ },
+ {
+ "type": "section",
+ "text": {
+ "type": "mrkdwn",
+ "text": "<${ASV_URL}|*ASV Dashboard*>\nBenchmark results."
+ },
+ "accessory": {
+ "type": "button",
+ "url": "${ASV_URL}",
+ "text": {
+ "type": "plain_text",
+ "emoji": true,
+ "text": "View"
+ },
+ "value": "click_me_123"
+ }
+ },
+ {
+ "type": "section",
+ "text": {
+ "type": "mrkdwn",
+ "text": "<${LOGS_URL}|*Logs*>\nAll available logs."
+ },
+ "accessory": {
+ "type": "button",
+ "url": "${LOGS_URL}",
+ "text": {
+ "type": "plain_text",
+ "emoji": true,
+ "text": "View"
+ },
+ "value": "click_me_123"
+ }
+ }
+ ]
+}
diff --git a/regression/test-container.sh b/regression/test-container.sh
new file mode 100644
index 0000000000..4aeacb5678
--- /dev/null
+++ b/regression/test-container.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+
+# Creates a conda environment to be used for cuopt benchmarking.
+
+# Abort script on first error
+set -e
+
+# Must ensure PROJECT_DIR is exported first then load rapids-mg-tools env
+export PROJECT_DIR=${PROJECT_DIR:-$(cd "$(dirname ${BASH_SOURCE[0]})" && pwd)}
+source ${PROJECT_DIR}/config.sh
+
+################################################################################
+
+# Test
+logger "Testing container image $IMAGE"
+python -c "import cuopt; print(cuopt)"
+
+trap '${SCRIPTS_DIR}/write-meta-data.sh' EXIT
+
+# Other scripts look for this to be the last line to determine if this
+# script completed successfully. This is only possible because of the
+# "set -e" above.
+echo "done."
+logger "done."
diff --git a/regression/update_asv_database.py b/regression/update_asv_database.py
new file mode 100644
index 0000000000..bef64bd313
--- /dev/null
+++ b/regression/update_asv_database.py
@@ -0,0 +1,157 @@
+# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+
+from pathlib import Path
+import platform
+import psutil
+from asvdb import BenchmarkInfo, BenchmarkResult, ASVDb
+import json
+import pandas as pd
+
+
+def update_asv_db(
+ commitHash=None,
+ commitTime=None,
+ branch=None,
+ repo_url=None,
+ results_dir=None,
+ machine_name=None,
+ gpu_type=None,
+ configs=None,
+):
+ """
+ Read the benchmark_result* files in results_dir/benchmarks and
+ update an existing asv benchmark database or create one if one
+ does not exist in results_dir/benchmarks/asv. If no
+ benchmark_result* files are present, return without updating or
+ creating.
+ """
+
+ # commitHash = commitHash + str(int(time.time()))
+ benchmark_dir_path = Path(results_dir) / "benchmarks" / "results" / "csvs"
+ asv_dir_path = Path(results_dir) / "benchmarks" / "results" / "asv"
+
+ # List all benchmark_result files
+ benchmark_result_list = benchmark_dir_path.glob("*.csv")
+
+ bResultList = []
+ # Skip these columns from benchmarking
+ skip_columns = ["date_time", "git_commit"]
+
+ # Create result objects for each benchmark result and store it in a list
+ for file_name in benchmark_result_list:
+ # skip if it's regression file
+ if "regressions.csv" in str(file_name):
+ continue
+ with open(file_name, "r") as openfile:
+ data = pd.read_csv(openfile, index_col=0).iloc[-1]
+ test_name = str(file_name).split("/")[-1].split(".")[-2]
+ config_file = None
+ if test_name.startswith("lp"):
+ config_file = configs + "/" + "lp_config.json"
+ elif test_name.startswith("mip"):
+ config_file = configs + "/" + "mip_config.json"
+ else:
+ config_file = configs + "/" + test_name + "_config.json"
+ metrics = {}
+ with open(config_file, "r") as fp:
+ metrics = json.load(fp)["metrics"]
+ for col_name in data.index:
+ if col_name not in skip_columns:
+ bResult = BenchmarkResult(
+ funcName=test_name + "." + col_name,
+ result=data[col_name].item(),
+ unit="percentage"
+ if "bks" in col_name
+ else metrics[col_name]["unit"],
+ )
+ bResultList.append(bResult)
+
+ if len(bResultList) == 0:
+ print(
+ "Could not find files matching 'csv' in "
+ f"{benchmark_dir_path}, not creating/updating ASV database "
+ f"in {asv_dir_path}."
+ )
+ return
+
+ uname = platform.uname()
+ # Maybe also write those metadata to metadata.sh ?
+ osType = "%s %s" % (uname.system, uname.release)
+ # Remove unnecessary osType detail
+ osType = ".".join(osType.split("-")[0].split(".", 2)[:2])
+ pythonVer = platform.python_version()
+ # Remove unnecessary python version detail
+ pythonVer = ".".join(pythonVer.split(".", 2)[:2])
+ bInfo_dict = {
+ "machineName": machine_name,
+ # cudaVer : "10.0",
+ "osType": osType,
+ "pythonVer": pythonVer,
+ "commitHash": commitHash,
+ "branch": branch,
+ # commit time needs to be in milliseconds
+ "commitTime": commitTime * 1000,
+ "gpuType": gpu_type,
+ "cpuType": uname.processor,
+ "arch": uname.machine,
+ "ram": "%d" % psutil.virtual_memory().total,
+ }
+ bInfo = BenchmarkInfo(**bInfo_dict)
+
+ # extract the branch name
+ branch = bInfo_dict["branch"]
+
+ db = ASVDb(dbDir=str(asv_dir_path), repo=repo_url, branches=[branch])
+
+ for res in bResultList:
+ db.addResult(bInfo, res)
+
+
+if __name__ == "__main__":
+ import argparse
+
+ ap = argparse.ArgumentParser()
+ ap.add_argument(
+ "--commitHash", type=str, required=True, help="project version"
+ )
+ ap.add_argument(
+ "--commitTime", type=str, required=True, help="project version date"
+ )
+ ap.add_argument(
+ "--repo-url", type=str, required=True, help="project repo url"
+ )
+ ap.add_argument("--branch", type=str, required=True, help="project branch")
+ ap.add_argument(
+ "--results-dir",
+ type=str,
+ required=True,
+ help="directory to store the results in json files",
+ )
+ ap.add_argument(
+ "--machine-name", type=str, required=True, help="Slurm cluster name"
+ )
+ ap.add_argument(
+ "--gpu-type",
+ type=str,
+ required=True,
+ help="the official product name of the GPU",
+ )
+ ap.add_argument(
+ "--configs",
+ type=str,
+ required=True,
+ help="the config file for all the tests",
+ )
+ args = ap.parse_args()
+
+ update_asv_db(
+ commitHash=args.commitHash,
+ commitTime=int(args.commitTime),
+ branch=args.branch,
+ repo_url=args.repo_url,
+ results_dir=args.results_dir,
+ machine_name=args.machine_name,
+ gpu_type=args.gpu_type,
+ configs=args.configs,
+ )
diff --git a/regression/write-meta-data.sh b/regression/write-meta-data.sh
new file mode 100755
index 0000000000..020631469d
--- /dev/null
+++ b/regression/write-meta-data.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+
+# Abort script on first error
+set -e
+
+# Must ensure PROJECT_DIR is exported first then load rapids-mg-tools env
+export PROJECT_DIR=${PROJECT_DIR:-$(cd "$(dirname ${BASH_SOURCE[0]})" && pwd)}
+source ${PROJECT_DIR}/config.sh
+source ${PROJECT_DIR}/functions.sh
+
+PROJECT_VERSION=$(> $METADATA_FILE
+echo "PROJECT_VERSION=\"$PROJECT_VERSION\"" >> $METADATA_FILE
+echo "PROJECT_BUILD=\"$PROJECT_BUILD\"" >> $METADATA_FILE
+echo "PROJECT_CHANNEL=\"$PROJECT_CHANNEL\"" >> $METADATA_FILE
+echo "PROJECT_REPO_URL=\"$PROJECT_REPO_URL\"" >> $METADATA_FILE
+echo "PROJECT_REPO_BRANCH=\"$PROJECT_REPO_BRANCH\"" >> $METADATA_FILE
+echo "PROJECT_REPO_TIME=\"$PROJECT_REPO_TIME\"" >> $METADATA_FILE
From 701db7ed1890cace3173d816ab2f16bc6bc245b3 Mon Sep 17 00:00:00 2001
From: Chris Maes
Date: Wed, 4 Feb 2026 20:12:13 -0800
Subject: [PATCH 2/3] Add cuts to the MIP solver (#814)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This PR adds cuts to the MIP solver. This includes the following:
1. Add constraints in the form C*x <= d to an LP that has already been solved to optimality (and has basis information).
- The constraints must be violated at the current relaxation solution x^star. That is, C*x^star > d.
- The constraint matrix, rhs, basis, and basis factorization, are all updated to include the additional constraints.
- Dual simplex is started in phase 2 from a dual feasible solution.
2. Remove constraints from an LP that has already been solved to optimality.
- The constraints must have slacks in the basis
- The basis is refactored from scratch
3. Add cut pass loop after solving the root relaxation
4. Add a cut pool to store cuts and select cuts
- We currently score cuts based on distance and orthogonality.
6. Add Mixed Integer Gomory Cuts
- These are computed via a MIR cut on a row of the simplex tableau
7. Add Mixed Integer Rounding (MIR) Cuts
- These are constructed by aggregating rows of the constraint matrix.
8. Add Strong Chvatal-Gomory Cuts
- These are constructed from a row of the tableau matrix and from rows of the constraint matrix.
9. Fixes to Handling of Steepest Edge Norms in Dual Simplex
- Ensure that all basic variables have a positive steepest edge norms
10. Reduced Costs Fixing at Root Node
- These are applied after each cut pass and after strong branching, if a heuristic solution is available.
12. Fix issues in Crossover when solving the dual problem
- We were not correctly populating slack variables when solving the dual. This issue appeared on graph20-80-1rand
14. Fix issue in Crossover when basis became rank-deficient in dual push
15. Fix issues across the code with handling and propagating concurrent halt.
16. New solver options: mip-cut-passes, mip-mixed-integer-gomory-cuts, mip-mir-cuts, mip-strong-chvatal-gomory-cuts, mip-knapsack-cuts, mip-cut-change-threshold, mip-cut-min-orthogonality.
Closes #698, #205
Results from a GH200 A/B test with 64 threads and a 300 second time limit. Further runs needed with larger time limit. Further work is needed to get the full benefit of cuts.
A: cuts PR with --mip-cut-passes=10
B: cuts PR with --mip-cut-passes=0
Geomean MIP GAP A / (B = baseline): 0.97
Geomean Time to Optimal A/B: 0.96
A optimal 45
B optimal 37
A problems with feasible solutions 225
B problems with feasible solutions 224
A wins
drayage-100-23 1.14 8.61
gfd-schedulen180f7d50m30k18 87.85 300.0
neos-827175 12.28 25.86
neos-1171448 28.85 98.2
n5-3 180.62 300.0
neos859080 0.78 1.38
seymour1 38.78 300.0
neos-860300 71.98 90.91
neos-3083819-nubu 30.48 300.0
neos-933966 248.26 300.0
neos-957323 46.36 300.0
neos-960392 50.12 115.68
netdiversion 121.59 300.0
ns1208400 162.15 300.0
nw04 39.89 45.98
piperout-27 8.22 14.09
supportcase7 30.48 300.0
supportcase6 70.22 300.0
uccase12 26.95 41.87
A wins 19
A losses
app1-1 2.3 0.74
cbs-cta 2.21 1.83
irp 17.59 12.94
istanbul-no-cutoff 18.26 13.56
mas76 300.0 20.98
neos-1122047 8.3 6.46
neos-1445765 1.86 1.52
neos-1582420 134.3 93.44
neos-3004026-krka 6.56 1.31
neos8 0.88 0.68
ns1952667 99.84 64.92
piperout-08 10.75 8.23
pk1 224.32 91.84
qap10 134.76 62.82
swath1 60.04 50.3
trento1 300.0 172.28
triptim1 246.65 130.92
A losses 17
Authors:
- Chris Maes (https://github.com/chris-maes)
Approvers:
- Ramakrishnap (https://github.com/rgsl888prabhu)
- Akif ÇÖRDÜK (https://github.com/akifcorduk)
- Alice Boucher (https://github.com/aliceb-nv)
URL: https://github.com/NVIDIA/cuopt/pull/814
---
.../cuopt/linear_programming/constants.h | 10 +-
.../mip/solver_settings.hpp | 18 +-
cpp/src/dual_simplex/CMakeLists.txt | 1 +
cpp/src/dual_simplex/barrier.cu | 10 +-
cpp/src/dual_simplex/basis_solves.cpp | 4 +-
cpp/src/dual_simplex/basis_updates.cpp | 255 +-
cpp/src/dual_simplex/basis_updates.hpp | 2 +
.../bound_flipping_ratio_test.cpp | 17 +-
.../bound_flipping_ratio_test.hpp | 8 +-
cpp/src/dual_simplex/bounds_strengthening.cpp | 36 +-
cpp/src/dual_simplex/bounds_strengthening.hpp | 13 +-
cpp/src/dual_simplex/branch_and_bound.cpp | 804 ++++-
cpp/src/dual_simplex/branch_and_bound.hpp | 34 +-
cpp/src/dual_simplex/crossover.cpp | 94 +-
cpp/src/dual_simplex/cuts.cpp | 2822 +++++++++++++++++
cpp/src/dual_simplex/cuts.hpp | 479 +++
cpp/src/dual_simplex/dense_matrix.hpp | 4 +-
cpp/src/dual_simplex/folding.cpp | 71 +-
cpp/src/dual_simplex/mip_node.hpp | 9 +-
cpp/src/dual_simplex/phase2.cpp | 211 +-
cpp/src/dual_simplex/presolve.cpp | 9 +-
cpp/src/dual_simplex/primal.cpp | 10 +-
cpp/src/dual_simplex/primal.hpp | 6 +-
cpp/src/dual_simplex/pseudo_costs.cpp | 25 +-
cpp/src/dual_simplex/right_looking_lu.cpp | 4 +-
.../dual_simplex/simplex_solver_settings.hpp | 23 +
cpp/src/dual_simplex/solution.hpp | 4 +-
cpp/src/dual_simplex/solve.cpp | 73 +-
cpp/src/dual_simplex/sparse_cholesky.cuh | 36 +-
cpp/src/dual_simplex/sparse_matrix.cpp | 83 +-
cpp/src/dual_simplex/sparse_matrix.hpp | 10 +-
cpp/src/dual_simplex/sparse_vector.cpp | 50 +-
cpp/src/dual_simplex/sparse_vector.hpp | 8 +-
cpp/src/dual_simplex/types.hpp | 5 +-
cpp/src/math_optimization/solver_settings.cu | 10 +-
cpp/src/mip/diversity/diversity_manager.cu | 3 +-
cpp/src/mip/diversity/lns/rins.cu | 8 +-
cpp/src/mip/diversity/recombiners/sub_mip.cuh | 7 +-
cpp/src/mip/presolve/third_party_presolve.cpp | 4 +-
cpp/src/mip/solve.cu | 2 +-
cpp/src/mip/solver.cu | 27 +-
cpp/src/utilities/timer.hpp | 31 +-
cpp/tests/mip/CMakeLists.txt | 5 +-
cpp/tests/mip/cuts_test.cu | 168 +
cpp/tests/mip/termination_test.cu | 4 +-
.../linear_programming/test_python_API.py | 7 +-
python/libcuopt/pyproject.toml | 1 -
47 files changed, 5178 insertions(+), 347 deletions(-)
create mode 100644 cpp/src/dual_simplex/cuts.cpp
create mode 100644 cpp/src/dual_simplex/cuts.hpp
create mode 100644 cpp/tests/mip/cuts_test.cu
diff --git a/cpp/include/cuopt/linear_programming/constants.h b/cpp/include/cuopt/linear_programming/constants.h
index 4ee0224845..7caf7aeeb2 100644
--- a/cpp/include/cuopt/linear_programming/constants.h
+++ b/cpp/include/cuopt/linear_programming/constants.h
@@ -52,12 +52,20 @@
#define CUOPT_MIP_ABSOLUTE_TOLERANCE "mip_absolute_tolerance"
#define CUOPT_MIP_RELATIVE_TOLERANCE "mip_relative_tolerance"
#define CUOPT_MIP_INTEGRALITY_TOLERANCE "mip_integrality_tolerance"
-#define CUOPT_MIP_BATCH_PDLP_STRONG_BRANCHING "mip_batch_pdlp_strong_branching"
#define CUOPT_MIP_ABSOLUTE_GAP "mip_absolute_gap"
#define CUOPT_MIP_RELATIVE_GAP "mip_relative_gap"
#define CUOPT_MIP_HEURISTICS_ONLY "mip_heuristics_only"
#define CUOPT_MIP_SCALING "mip_scaling"
#define CUOPT_MIP_PRESOLVE "mip_presolve"
+#define CUOPT_MIP_CUT_PASSES "mip_cut_passes"
+#define CUOPT_MIP_MIXED_INTEGER_ROUNDING_CUTS "mip_mixed_integer_rounding_cuts"
+#define CUOPT_MIP_MIXED_INTEGER_GOMORY_CUTS "mip_mixed_integer_gomory_cuts"
+#define CUOPT_MIP_KNAPSACK_CUTS "mip_knapsack_cuts"
+#define CUOPT_MIP_STRONG_CHVATAL_GOMORY_CUTS "mip_strong_chvatal_gomory_cuts"
+#define CUOPT_MIP_REDUCED_COST_STRENGTHENING "mip_reduced_cost_strengthening"
+#define CUOPT_MIP_CUT_CHANGE_THRESHOLD "mip_cut_change_threshold"
+#define CUOPT_MIP_CUT_MIN_ORTHOGONALITY "mip_cut_min_orthogonality"
+#define CUOPT_MIP_BATCH_PDLP_STRONG_BRANCHING "mip_batch_pdlp_strong_branching"
#define CUOPT_SOLUTION_FILE "solution_file"
#define CUOPT_NUM_CPU_THREADS "num_cpu_threads"
#define CUOPT_NUM_GPUS "num_gpus"
diff --git a/cpp/include/cuopt/linear_programming/mip/solver_settings.hpp b/cpp/include/cuopt/linear_programming/mip/solver_settings.hpp
index 326d7f76ad..863e5d66d6 100644
--- a/cpp/include/cuopt/linear_programming/mip/solver_settings.hpp
+++ b/cpp/include/cuopt/linear_programming/mip/solver_settings.hpp
@@ -83,12 +83,22 @@ class mip_solver_settings_t {
friend class problem_checking_t;
tolerances_t tolerances;
- f_t time_limit = std::numeric_limits::infinity();
- bool heuristics_only = false;
- i_t num_cpu_threads = -1; // -1 means use default number of threads in branch and bound
- i_t num_gpus = 1;
+ f_t time_limit = std::numeric_limits::infinity();
+ i_t node_limit = std::numeric_limits::max();
+ bool heuristics_only = false;
+ i_t num_cpu_threads = -1; // -1 means use default number of threads in branch and bound
+ i_t max_cut_passes = 10; // number of cut passes to make
+ i_t mir_cuts = -1;
+ i_t mixed_integer_gomory_cuts = -1;
+ i_t knapsack_cuts = -1;
+ i_t strong_chvatal_gomory_cuts = -1;
+ i_t reduced_cost_strengthening = -1;
+ f_t cut_change_threshold = 1e-3;
+ f_t cut_min_orthogonality = 0.5;
i_t mip_batch_pdlp_strong_branching = 0;
+ i_t num_gpus = 1;
bool log_to_console = true;
+
std::string log_file;
std::string sol_file;
std::string user_problem_file;
diff --git a/cpp/src/dual_simplex/CMakeLists.txt b/cpp/src/dual_simplex/CMakeLists.txt
index af1415fa9c..5405735043 100644
--- a/cpp/src/dual_simplex/CMakeLists.txt
+++ b/cpp/src/dual_simplex/CMakeLists.txt
@@ -10,6 +10,7 @@ set(DUAL_SIMPLEX_SRC_FILES
${CMAKE_CURRENT_SOURCE_DIR}/basis_updates.cpp
${CMAKE_CURRENT_SOURCE_DIR}/bound_flipping_ratio_test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/branch_and_bound.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/cuts.cpp
${CMAKE_CURRENT_SOURCE_DIR}/crossover.cpp
${CMAKE_CURRENT_SOURCE_DIR}/folding.cpp
${CMAKE_CURRENT_SOURCE_DIR}/initial_basis.cpp
diff --git a/cpp/src/dual_simplex/barrier.cu b/cpp/src/dual_simplex/barrier.cu
index a883232959..5eef97bb8a 100644
--- a/cpp/src/dual_simplex/barrier.cu
+++ b/cpp/src/dual_simplex/barrier.cu
@@ -681,7 +681,7 @@ class iteration_data_t {
solve_status = chol->solve(U_col, M_col);
if (solve_status != 0) { return solve_status; }
if (settings_.concurrent_halt != nullptr && *settings_.concurrent_halt == 1) {
- return -2;
+ return CONCURRENT_HALT_RETURN;
}
M.set_column(k, M_col);
@@ -700,7 +700,7 @@ class iteration_data_t {
AD_dense.transpose_multiply(
1.0, M.values.data() + k * M.m, 0.0, H.values.data() + k * H.m);
if (settings_.concurrent_halt != nullptr && *settings_.concurrent_halt == 1) {
- return -2;
+ return CONCURRENT_HALT_RETURN;
}
}
@@ -1745,7 +1745,7 @@ int barrier_solver_t::initial_point(iteration_data_t& data)
} else {
status = data.chol->factorize(data.device_ADAT);
}
- if (status == -2) { return -2; }
+ if (status == CONCURRENT_HALT_RETURN) { return CONCURRENT_HALT_RETURN; }
if (status != 0) {
settings.log.printf("Initial factorization failed\n");
return -1;
@@ -2309,7 +2309,7 @@ i_t barrier_solver_t::gpu_compute_search_direction(iteration_data_t::gpu_compute_search_direction(iteration_data_t& A,
S_perm_inv);
if (settings.concurrent_halt != nullptr && *settings.concurrent_halt == 1) {
settings.log.printf("Concurrent halt\n");
- return -1;
+ return CONCURRENT_HALT_RETURN;
}
if (Srank != Sdim) {
// Get the rank deficient columns
@@ -582,7 +582,7 @@ i_t factorize_basis(const csc_matrix_t& A,
}
if (settings.concurrent_halt != nullptr && *settings.concurrent_halt == 1) {
settings.log.printf("Concurrent halt\n");
- return -1;
+ return CONCURRENT_HALT_RETURN;
}
if (verbose) {
printf("Right Lnz+Unz %d t %.3f\n", L.col_start[m] + U.col_start[m], toc(fact_start));
diff --git a/cpp/src/dual_simplex/basis_updates.cpp b/cpp/src/dual_simplex/basis_updates.cpp
index 2c781a5156..1d94f41c7f 100644
--- a/cpp/src/dual_simplex/basis_updates.cpp
+++ b/cpp/src/dual_simplex/basis_updates.cpp
@@ -1108,6 +1108,217 @@ i_t basis_update_t::lower_triangular_multiply(const csc_matrix_t
+i_t basis_update_mpf_t::append_cuts(const csr_matrix_t& cuts_basic)
+{
+ const i_t m = L0_.m;
+
+ // Solve for U^T W^T = C_B^T
+ // We do this one row at a time of C_B
+ csc_matrix_t WT(m, cuts_basic.m, 0);
+
+ i_t WT_nz = 0;
+ for (i_t k = 0; k < cuts_basic.m; k++) {
+ sparse_vector_t rhs(cuts_basic, k);
+ u_transpose_solve(rhs);
+ WT.col_start[k] = WT_nz;
+ for (i_t q = 0; q < rhs.i.size(); q++) {
+ WT.i.push_back(rhs.i[q]);
+ WT.x.push_back(rhs.x[q]);
+ WT_nz++;
+ }
+ }
+ WT.col_start[cuts_basic.m] = WT_nz;
+
+#ifdef CHECK_W
+ {
+ for (i_t k = 0; k < cuts_basic.m; k++) {
+ std::vector WT_col(m, 0.0);
+ WT.load_a_column(k, WT_col);
+ std::vector CBT_col(m, 0.0);
+ matrix_transpose_vector_multiply(U0_, 1.0, WT_col, 0.0, CBT_col);
+ sparse_vector_t CBT_col_sparse(cuts_basic, k);
+ std::vector CBT_col_dense(m);
+ CBT_col_sparse.to_dense(CBT_col_dense);
+ for (i_t h = 0; h < m; h++) {
+ if (std::abs(CBT_col_dense[h] - CBT_col[h]) > 1e-6) {
+ printf("W: col %d CBT_col_dense[%d] = %e CBT_col[%d] = %e\n",
+ k,
+ h,
+ CBT_col_dense[h],
+ h,
+ CBT_col[h]);
+ exit(1);
+ }
+ }
+ }
+ }
+#endif
+
+ csc_matrix_t V(cuts_basic.m, m, 0);
+ if (num_updates_ > 0) {
+ // W = V T_0 ... T_{num_updates_ - 1}
+ // or V = W T_{num_updates_ - 1}^{-1} ... T_0^{-1}
+ // or V^T = T_0^{-T} ... T_{num_updates_ - 1}^{-T} W^T
+ // We can compute V^T column by column so that we have
+ // V^T(:, h) = T_0^{-T} ... T_{num_updates_ - 1}^{-T} W^T(:, h)
+ // or
+ // V(h, :) = T_0^{-T} ... T_{num_updates_ - 1}^{-T} W^T(:, h)
+ // So we can form V row by row in CSR and then covert it to CSC
+ // for appending to L0
+
+ csr_matrix_t V_row(cuts_basic.m, m, 0);
+ i_t V_nz = 0;
+ const f_t zero_tol = 1e-13;
+ for (i_t h = 0; h < cuts_basic.m; h++) {
+ sparse_vector_t rhs(WT, h);
+ scatter_into_workspace(rhs);
+ i_t nz = rhs.i.size();
+ for (i_t k = num_updates_ - 1; k >= 0; --k) {
+ // T_k^{-T} = ( I - v u^T/(1 + u^T v))
+ // T_k^{-T} * b = b - v * (u^T * b) / (1 + u^T * v) = b - theta * v, theta = u^T b / mu
+
+ const i_t u_col = 2 * k;
+ const i_t v_col = 2 * k + 1;
+ const f_t mu = mu_values_[k];
+
+ // dot = u^T * b
+ f_t dot = dot_product(u_col, xi_workspace_, x_workspace_);
+ const f_t theta = dot / mu;
+ if (std::abs(theta) > zero_tol) {
+ add_sparse_column(S_, v_col, -theta, xi_workspace_, nz, x_workspace_);
+ }
+ }
+ gather_into_sparse_vector(nz, rhs);
+ V_row.row_start[h] = V_nz;
+ for (i_t q = 0; q < rhs.i.size(); q++) {
+ V_row.j.push_back(rhs.i[q]);
+ V_row.x.push_back(rhs.x[q]);
+ V_nz++;
+ }
+ }
+ V_row.row_start[cuts_basic.m] = V_nz;
+
+ V_row.to_compressed_col(V);
+
+#ifdef CHECK_V
+ csc_matrix_t CB_col(cuts_basic.m, m, 0);
+ cuts_basic.to_compressed_col(CB_col);
+ for (i_t k = 0; k < m; k++) {
+ std::vector U_col(m, 0.0);
+ U0_.load_a_column(k, U_col);
+ for (i_t h = num_updates_ - 1; h >= 0; --h) {
+ // T_h = ( I + u_h v_h^T)
+ // T_h * x = x + u_h * v_h^T * x = x + theta * u_h
+ const i_t u_col = 2 * h;
+ const i_t v_col = 2 * h + 1;
+ f_t theta = dot_product(v_col, U_col);
+ const i_t col_start = S_.col_start[u_col];
+ const i_t col_end = S_.col_start[u_col + 1];
+ for (i_t p = col_start; p < col_end; ++p) {
+ const i_t i = S_.i[p];
+ U_col[i] += theta * S_.x[p];
+ }
+ }
+ std::vector CB_column(cuts_basic.m, 0.0);
+ matrix_vector_multiply(V, 1.0, U_col, 0.0, CB_column);
+ std::vector CB_col_dense(cuts_basic.m);
+ CB_col.load_a_column(k, CB_col_dense);
+ for (i_t l = 0; l < cuts_basic.m; l++) {
+ if (std::abs(CB_col_dense[l] - CB_column[l]) > 1e-6) {
+ printf("V: col %d CB_col_dense[%d] = %e CB_column[%d] = %e\n",
+ k,
+ l,
+ CB_col_dense[l],
+ l,
+ CB_column[l]);
+ exit(1);
+ }
+ }
+ }
+#endif
+ } else {
+ // W = V
+ WT.transpose(V);
+ }
+
+ // Extend u_i, v_i for i = 0, ..., num_updates_ - 1
+ S_.m += cuts_basic.m;
+
+ // Adjust L and U
+ // L = [ L0 0 ]
+ // [ V I ]
+
+ i_t V_nz = V.col_start[m];
+ i_t L_nz = L0_.col_start[m];
+ csc_matrix_t new_L(m + cuts_basic.m, m + cuts_basic.m, L_nz + V_nz + cuts_basic.m);
+ i_t predicted_nz = L_nz + V_nz + cuts_basic.m;
+ L_nz = 0;
+ for (i_t j = 0; j < m; ++j) {
+ new_L.col_start[j] = L_nz;
+ const i_t col_start = L0_.col_start[j];
+ const i_t col_end = L0_.col_start[j + 1];
+ for (i_t p = col_start; p < col_end; ++p) {
+ new_L.i[L_nz] = L0_.i[p];
+ new_L.x[L_nz] = L0_.x[p];
+ L_nz++;
+ }
+ const i_t V_col_start = V.col_start[j];
+ const i_t V_col_end = V.col_start[j + 1];
+ for (i_t p = V_col_start; p < V_col_end; ++p) {
+ new_L.i[L_nz] = V.i[p] + m;
+ new_L.x[L_nz] = V.x[p];
+ L_nz++;
+ }
+ }
+ for (i_t j = m; j < m + cuts_basic.m; ++j) {
+ new_L.col_start[j] = L_nz;
+ new_L.i[L_nz] = j;
+ new_L.x[L_nz] = 1.0;
+ L_nz++;
+ }
+ new_L.col_start[m + cuts_basic.m] = L_nz;
+ assert(L_nz == predicted_nz);
+
+ L0_ = new_L;
+
+ // Adjust U
+ // U = [ U0 0 ]
+ // [ 0 I ]
+
+ i_t U_nz = U0_.col_start[m];
+ U0_.col_start.resize(m + cuts_basic.m + 1);
+ U0_.i.resize(U_nz + cuts_basic.m);
+ U0_.x.resize(U_nz + cuts_basic.m);
+ for (i_t k = m; k < m + cuts_basic.m; ++k) {
+ U0_.col_start[k] = U_nz;
+ U0_.i[U_nz] = k;
+ U0_.x[U_nz] = 1.0;
+ U_nz++;
+ }
+ U0_.col_start[m + cuts_basic.m] = U_nz;
+ U0_.n = m + cuts_basic.m;
+ U0_.m = m + cuts_basic.m;
+
+ compute_transposes();
+
+ // Adjust row_permutation_ and inverse_row_permutation_
+ row_permutation_.resize(m + cuts_basic.m);
+ inverse_row_permutation_.resize(m + cuts_basic.m);
+ for (i_t k = m; k < m + cuts_basic.m; ++k) {
+ row_permutation_[k] = k;
+ }
+ inverse_permutation(row_permutation_, inverse_row_permutation_);
+
+ // Adjust workspace sizes
+ xi_workspace_.resize(2 * (m + cuts_basic.m), 0);
+ x_workspace_.resize(m + cuts_basic.m, 0.0);
+
+ return 0;
+}
+
template
void basis_update_mpf_t::gather_into_sparse_vector(i_t nz,
sparse_vector_t& out) const
@@ -2057,16 +2268,18 @@ int basis_update_mpf_t::refactor_basis(
if (L0_.m != A.m) { resize(A.m); }
std::vector q;
- if (factorize_basis(A,
- settings,
- basic_list,
- L0_,
- U0_,
- row_permutation_,
- inverse_row_permutation_,
- q,
- deficient,
- slacks_needed) == -1) {
+ i_t status = factorize_basis(A,
+ settings,
+ basic_list,
+ L0_,
+ U0_,
+ row_permutation_,
+ inverse_row_permutation_,
+ q,
+ deficient,
+ slacks_needed);
+ if (status == CONCURRENT_HALT_RETURN) { return CONCURRENT_HALT_RETURN; }
+ if (status == -1) {
settings.log.debug("Initial factorization failed\n");
basis_repair(
A, settings, lower, upper, deficient, slacks_needed, basic_list, nonbasic_list, vstatus);
@@ -2088,16 +2301,18 @@ int basis_update_mpf_t::refactor_basis(
}
#endif
- if (factorize_basis(A,
- settings,
- basic_list,
- L0_,
- U0_,
- row_permutation_,
- inverse_row_permutation_,
- q,
- deficient,
- slacks_needed) == -1) {
+ status = factorize_basis(A,
+ settings,
+ basic_list,
+ L0_,
+ U0_,
+ row_permutation_,
+ inverse_row_permutation_,
+ q,
+ deficient,
+ slacks_needed);
+ if (status == CONCURRENT_HALT_RETURN) { return CONCURRENT_HALT_RETURN; }
+ if (status == -1) {
#ifdef CHECK_L_FACTOR
if (L0_.check_matrix() == -1) { settings.log.printf("Bad L after basis repair\n"); }
#endif
diff --git a/cpp/src/dual_simplex/basis_updates.hpp b/cpp/src/dual_simplex/basis_updates.hpp
index afd4f4c9ab..8eca3ba8a9 100644
--- a/cpp/src/dual_simplex/basis_updates.hpp
+++ b/cpp/src/dual_simplex/basis_updates.hpp
@@ -291,6 +291,8 @@ class basis_update_mpf_t {
reset_stats();
}
+ i_t append_cuts(const csr_matrix_t& cuts_basic);
+
f_t estimate_solution_density(f_t rhs_nz, f_t sum, i_t& num_calls, bool& use_hypersparse) const
{
num_calls++;
diff --git a/cpp/src/dual_simplex/bound_flipping_ratio_test.cpp b/cpp/src/dual_simplex/bound_flipping_ratio_test.cpp
index 8e58c24f01..fac65b8140 100644
--- a/cpp/src/dual_simplex/bound_flipping_ratio_test.cpp
+++ b/cpp/src/dual_simplex/bound_flipping_ratio_test.cpp
@@ -1,6 +1,6 @@
/* clang-format off */
/*
- * SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright (c) 2025-2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*/
/* clang-format on */
@@ -84,10 +84,7 @@ i_t bound_flipping_ratio_test_t::single_pass(i_t start,
step_length = min_val;
nonbasic_entering = candidate;
// this should be temporary, find root causes where the candidate is not filled
- if (nonbasic_entering == -1) {
- // -1,-2 and -3 are reserved for other things
- return -4;
- }
+ if (nonbasic_entering == -1) { return RATIO_TEST_NUMERICAL_ISSUES; }
const i_t j = entering_index = nonbasic_list_[nonbasic_entering];
constexpr bool verbose = false;
@@ -123,16 +120,16 @@ i_t bound_flipping_ratio_test_t::compute_step_length(f_t& step_length,
if constexpr (verbose) { settings_.log.printf("Initial breakpoints %d\n", num_breakpoints); }
if (num_breakpoints == 0) {
nonbasic_entering = -1;
- return -1;
+ return RATIO_TEST_NO_ENTERING_VARIABLE;
}
f_t slope = slope_;
nonbasic_entering = -1;
- i_t entering_index = -1;
+ i_t entering_index = RATIO_TEST_NO_ENTERING_VARIABLE;
i_t k_idx = single_pass(
0, num_breakpoints, indicies, ratios, slope, step_length, nonbasic_entering, entering_index);
- if (k_idx == -4) { return -4; }
+ if (k_idx == RATIO_TEST_NUMERICAL_ISSUES) { return RATIO_TEST_NUMERICAL_ISSUES; }
bool continue_search = k_idx >= 0 && num_breakpoints > 1 && slope > 0.0;
if (!continue_search) {
if constexpr (0) {
@@ -255,11 +252,11 @@ void bound_flipping_ratio_test_t::heap_passes(const std::vector&
}
if (toc(start_time_) > settings_.time_limit) {
- entering_index = -2;
+ entering_index = RATIO_TEST_TIME_LIMIT;
return;
}
if (settings_.concurrent_halt != nullptr && *settings_.concurrent_halt == 1) {
- entering_index = -3;
+ entering_index = CONCURRENT_HALT_RETURN;
return;
}
}
diff --git a/cpp/src/dual_simplex/bound_flipping_ratio_test.hpp b/cpp/src/dual_simplex/bound_flipping_ratio_test.hpp
index d3164c623d..51b00b1097 100644
--- a/cpp/src/dual_simplex/bound_flipping_ratio_test.hpp
+++ b/cpp/src/dual_simplex/bound_flipping_ratio_test.hpp
@@ -1,6 +1,6 @@
/* clang-format off */
/*
- * SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright (c) 2025-2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*/
/* clang-format on */
@@ -8,11 +8,17 @@
#include
#include
+#include
#include
namespace cuopt::linear_programming::dual_simplex {
+#define RATIO_TEST_NO_ENTERING_VARIABLE -1
+#define RATIO_TEST_CONCURRENT_LIMIT CONCURRENT_HALT_RETURN // -2
+#define RATIO_TEST_TIME_LIMIT -3
+#define RATIO_TEST_NUMERICAL_ISSUES -4
+
template
class bound_flipping_ratio_test_t {
public:
diff --git a/cpp/src/dual_simplex/bounds_strengthening.cpp b/cpp/src/dual_simplex/bounds_strengthening.cpp
index 4114e7e097..2b20940d29 100644
--- a/cpp/src/dual_simplex/bounds_strengthening.cpp
+++ b/cpp/src/dual_simplex/bounds_strengthening.cpp
@@ -59,8 +59,7 @@ bounds_strengthening_t::bounds_strengthening_t(
const csr_matrix_t& Arow,
const std::vector& row_sense,
const std::vector& var_types)
- : bounds_changed(problem.num_cols, false),
- A(problem.A),
+ : A(problem.A),
Arow(Arow),
var_types(var_types),
delta_min_activity(problem.num_rows),
@@ -91,9 +90,10 @@ bounds_strengthening_t::bounds_strengthening_t(
template
bool bounds_strengthening_t::bounds_strengthening(
+ const simplex_solver_settings_t& settings,
+ const std::vector& bounds_changed,
std::vector& lower_bounds,
- std::vector& upper_bounds,
- const simplex_solver_settings_t& settings)
+ std::vector& upper_bounds)
{
const i_t m = A.m;
const i_t n = A.n;
@@ -104,13 +104,13 @@ bool bounds_strengthening_t::bounds_strengthening(
if (!bounds_changed.empty()) {
std::fill(constraint_changed.begin(), constraint_changed.end(), false);
- for (i_t i = 0; i < n; ++i) {
- if (bounds_changed[i]) {
- const i_t row_start = A.col_start[i];
- const i_t row_end = A.col_start[i + 1];
- for (i_t p = row_start; p < row_end; ++p) {
- const i_t j = A.i[p];
- constraint_changed[j] = true;
+ for (i_t j = 0; j < n; ++j) {
+ if (bounds_changed[j]) {
+ const i_t col_start = A.col_start[j];
+ const i_t col_end = A.col_start[j + 1];
+ for (i_t p = col_start; p < col_end; ++p) {
+ const i_t i = A.i[p];
+ constraint_changed[i] = true;
}
}
}
@@ -179,9 +179,9 @@ bool bounds_strengthening_t::bounds_strengthening(
f_t new_lb = old_lb;
f_t new_ub = old_ub;
- const i_t row_start = A.col_start[k];
- const i_t row_end = A.col_start[k + 1];
- for (i_t p = row_start; p < row_end; ++p) {
+ const i_t col_start = A.col_start[k];
+ const i_t col_end = A.col_start[k + 1];
+ for (i_t p = col_start; p < col_end; ++p) {
const i_t i = A.i[p];
if (!constraint_changed[i]) { continue; }
@@ -210,13 +210,13 @@ bool bounds_strengthening_t::bounds_strengthening(
new_lb = std::max(new_lb, lower_bounds[k]);
new_ub = std::min(new_ub, upper_bounds[k]);
- if (new_lb > new_ub + 1e-6) {
+ if (new_lb > new_ub + settings.primal_tol) {
settings.log.debug(
"Iter:: %d, Infeasible variable after update %d, %e > %e\n", iter, k, new_lb, new_ub);
return false;
}
if (new_lb != old_lb || new_ub != old_ub) {
- for (i_t p = row_start; p < row_end; ++p) {
+ for (i_t p = col_start; p < col_end; ++p) {
const i_t i = A.i[p];
constraint_changed_next[i] = true;
}
@@ -225,8 +225,8 @@ bool bounds_strengthening_t::bounds_strengthening(
lower[k] = std::min(new_lb, new_ub);
upper[k] = std::max(new_lb, new_ub);
- bool bounds_changed = lb_updated || ub_updated;
- if (bounds_changed) { num_bounds_changed++; }
+ bool bounds_updated = lb_updated || ub_updated;
+ if (bounds_updated) { num_bounds_changed++; }
}
if (num_bounds_changed == 0) { break; }
diff --git a/cpp/src/dual_simplex/bounds_strengthening.hpp b/cpp/src/dual_simplex/bounds_strengthening.hpp
index e7e218b824..b811fb1c18 100644
--- a/cpp/src/dual_simplex/bounds_strengthening.hpp
+++ b/cpp/src/dual_simplex/bounds_strengthening.hpp
@@ -1,6 +1,6 @@
/* clang-format off */
/*
- * SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright (c) 2025-2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*/
/* clang-format on */
@@ -20,11 +20,12 @@ class bounds_strengthening_t {
const std::vector& row_sense,
const std::vector& var_types);
- bool bounds_strengthening(std::vector& lower_bounds,
- std::vector& upper_bounds,
- const simplex_solver_settings_t& settings);
-
- std::vector bounds_changed;
+ // If bounds_changed is empty, all constraints are scanned for changes.
+ // Otherwise, bounds_changed must be a vector of length n, where n is the number of variables.
+ bool bounds_strengthening(const simplex_solver_settings_t& settings,
+ const std::vector& bounds_changed,
+ std::vector& lower_bounds,
+ std::vector& upper_bounds);
private:
const csc_matrix_t& A;
diff --git a/cpp/src/dual_simplex/branch_and_bound.cpp b/cpp/src/dual_simplex/branch_and_bound.cpp
index acdc9888a9..7f6e2c1921 100644
--- a/cpp/src/dual_simplex/branch_and_bound.cpp
+++ b/cpp/src/dual_simplex/branch_and_bound.cpp
@@ -7,8 +7,10 @@
#include
+#include
#include
#include
+#include
#include
#include
#include
@@ -122,9 +124,13 @@ bool check_guess(const lp_problem_t& original_lp,
}
template
-void set_uninitialized_steepest_edge_norms(std::vector& edge_norms)
+void set_uninitialized_steepest_edge_norms(const lp_problem_t& lp,
+ const std::vector& basic_list,
+ std::vector& edge_norms)
{
- for (i_t j = 0; j < edge_norms.size(); ++j) {
+ if (edge_norms.size() != lp.num_cols) { edge_norms.resize(lp.num_cols, -1.0); }
+ for (i_t k = 0; k < lp.num_rows; k++) {
+ const i_t j = basic_list[k];
if (edge_norms[j] <= 0.0) { edge_norms[j] = 1e-4; }
}
}
@@ -225,7 +231,8 @@ inline char feasible_solution_symbol(bnb_worker_type_t type)
template
branch_and_bound_t::branch_and_bound_t(
const user_problem_t& user_problem,
- const simplex_solver_settings_t& solver_settings)
+ const simplex_solver_settings_t& solver_settings,
+ f_t start_time)
: original_problem_(user_problem),
settings_(solver_settings),
original_lp_(user_problem.handle_ptr, 1, 1, 1),
@@ -236,11 +243,36 @@ branch_and_bound_t::branch_and_bound_t(
pc_(1),
solver_status_(mip_status_t::UNSET)
{
- exploration_stats_.start_time = tic();
+ exploration_stats_.start_time = start_time;
+#ifdef PRINT_CONSTRAINT_MATRIX
+ settings_.log.printf("A");
+ original_problem_.A.print_matrix();
+#endif
+
dualize_info_t dualize_info;
convert_user_problem(original_problem_, settings_, original_lp_, new_slacks_, dualize_info);
full_variable_types(original_problem_, original_lp_, var_types_);
+ // Check slack
+#ifdef CHECK_SLACKS
+ assert(new_slacks_.size() == original_lp_.num_rows);
+ for (i_t slack : new_slacks_) {
+ const i_t col_start = original_lp_.A.col_start[slack];
+ const i_t col_end = original_lp_.A.col_start[slack + 1];
+ const i_t col_len = col_end - col_start;
+ if (col_len != 1) {
+ settings_.log.printf("Slack %d has %d nzs\n", slack, col_len);
+ assert(col_len == 1);
+ }
+ const i_t i = original_lp_.A.i[col_start];
+ const f_t x = original_lp_.A.x[col_start];
+ if (std::abs(x) != 1.0) {
+ settings_.log.printf("Slack %d row %d has non-unit coefficient %e\n", slack, i, x);
+ assert(std::abs(x) == 1.0);
+ }
+ }
+#endif
+
upper_bound_ = inf;
}
@@ -267,7 +299,7 @@ void branch_and_bound_t::report_heuristic(f_t obj)
std::string user_gap = user_mip_gap(user_obj, user_lower);
settings_.log.printf(
- "H %+13.6e %+10.6e %s %9.2f\n",
+ "H %+13.6e %+10.6e %s %9.2f\n",
user_obj,
user_lower,
user_gap.c_str(),
@@ -280,27 +312,90 @@ void branch_and_bound_t::report_heuristic(f_t obj)
}
template
-void branch_and_bound_t::report(char symbol, f_t obj, f_t lower_bound, i_t node_depth)
+void branch_and_bound_t::report(
+ char symbol, f_t obj, f_t lower_bound, i_t node_depth, i_t node_int_infeas)
{
update_user_bound(lower_bound);
- i_t nodes_explored = exploration_stats_.nodes_explored;
- i_t nodes_unexplored = exploration_stats_.nodes_unexplored;
- f_t user_obj = compute_user_objective(original_lp_, obj);
- f_t user_lower = compute_user_objective(original_lp_, lower_bound);
- f_t iter_node = exploration_stats_.total_lp_iters / nodes_explored;
- std::string user_gap = user_mip_gap(user_obj, user_lower);
- settings_.log.printf("%c %10d %10lu %+13.6e %+10.6e %6d %7.1e %s %9.2f\n",
+ const i_t nodes_explored = exploration_stats_.nodes_explored;
+ const i_t nodes_unexplored = exploration_stats_.nodes_unexplored;
+ const f_t user_obj = compute_user_objective(original_lp_, obj);
+ const f_t user_lower = compute_user_objective(original_lp_, lower_bound);
+ const f_t iters = static_cast(exploration_stats_.total_lp_iters);
+ const f_t iter_node = nodes_explored > 0 ? iters / nodes_explored : iters;
+ const std::string user_gap = user_mip_gap(user_obj, user_lower);
+ settings_.log.printf("%c %10d %10lu %+13.6e %+10.6e %6d %6d %7.1e %s %9.2f\n",
symbol,
nodes_explored,
nodes_unexplored,
user_obj,
user_lower,
+ node_int_infeas,
node_depth,
iter_node,
user_gap.c_str(),
toc(exploration_stats_.start_time));
}
+template
+i_t branch_and_bound_t::find_reduced_cost_fixings(f_t upper_bound,
+ std::vector& lower_bounds,
+ std::vector& upper_bounds)
+{
+ std::vector reduced_costs = root_relax_soln_.z;
+ lower_bounds = original_lp_.lower;
+ upper_bounds = original_lp_.upper;
+ std::vector bounds_changed(original_lp_.num_cols, false);
+ const f_t root_obj = compute_objective(original_lp_, root_relax_soln_.x);
+ const f_t threshold = 100.0 * settings_.integer_tol;
+ const f_t weaken = settings_.integer_tol;
+ const f_t fixed_tol = settings_.fixed_tol;
+ i_t num_improved = 0;
+ i_t num_fixed = 0;
+ i_t num_cols_to_check = reduced_costs.size(); // Reduced costs will be smaller than the original
+ // problem because we have added slacks for cuts
+ for (i_t j = 0; j < num_cols_to_check; j++) {
+ if (std::isfinite(reduced_costs[j]) && std::abs(reduced_costs[j]) > threshold) {
+ const f_t lower_j = original_lp_.lower[j];
+ const f_t upper_j = original_lp_.upper[j];
+ const f_t abs_gap = upper_bound - root_obj;
+ f_t reduced_cost_upper_bound = upper_j;
+ f_t reduced_cost_lower_bound = lower_j;
+ if (lower_j > -inf && reduced_costs[j] > 0) {
+ const f_t new_upper_bound = lower_j + abs_gap / reduced_costs[j];
+ reduced_cost_upper_bound = var_types_[j] == variable_type_t::INTEGER
+ ? std::floor(new_upper_bound + weaken)
+ : new_upper_bound;
+ if (reduced_cost_upper_bound < upper_j && var_types_[j] == variable_type_t::INTEGER) {
+ num_improved++;
+ upper_bounds[j] = reduced_cost_upper_bound;
+ bounds_changed[j] = true;
+ }
+ }
+ if (upper_j < inf && reduced_costs[j] < 0) {
+ const f_t new_lower_bound = upper_j + abs_gap / reduced_costs[j];
+ reduced_cost_lower_bound = var_types_[j] == variable_type_t::INTEGER
+ ? std::ceil(new_lower_bound - weaken)
+ : new_lower_bound;
+ if (reduced_cost_lower_bound > lower_j && var_types_[j] == variable_type_t::INTEGER) {
+ num_improved++;
+ lower_bounds[j] = reduced_cost_lower_bound;
+ bounds_changed[j] = true;
+ }
+ }
+ if (var_types_[j] == variable_type_t::INTEGER &&
+ reduced_cost_upper_bound <= reduced_cost_lower_bound + fixed_tol) {
+ num_fixed++;
+ }
+ }
+ }
+
+ if (num_fixed > 0 || num_improved > 0) {
+ settings_.log.printf(
+ "Reduced costs: Found %d improved bounds and %d fixed variables\n", num_improved, num_fixed);
+ }
+ return num_fixed;
+}
+
template
void branch_and_bound_t::update_user_bound(f_t lower_bound)
{
@@ -312,6 +407,7 @@ void branch_and_bound_t::update_user_bound(f_t lower_bound)
template
void branch_and_bound_t::set_new_solution(const std::vector& solution)
{
+ mutex_original_lp_.lock();
if (solution.size() != original_problem_.num_cols) {
settings_.log.printf(
"Solution size mismatch %ld %d\n", solution.size(), original_problem_.num_cols);
@@ -319,17 +415,29 @@ void branch_and_bound_t::set_new_solution(const std::vector& solu
std::vector crushed_solution;
crush_primal_solution(
original_problem_, original_lp_, solution, new_slacks_, crushed_solution);
- f_t obj = compute_objective(original_lp_, crushed_solution);
+ f_t obj = compute_objective(original_lp_, crushed_solution);
+ mutex_original_lp_.unlock();
bool is_feasible = false;
bool attempt_repair = false;
mutex_upper_.lock();
- if (obj < upper_bound_) {
+ f_t current_upper_bound = upper_bound_;
+ mutex_upper_.unlock();
+ if (obj < current_upper_bound) {
f_t primal_err;
f_t bound_err;
i_t num_fractional;
+ mutex_original_lp_.lock();
+ if (crushed_solution.size() != original_lp_.num_cols) {
+ // original problem has been modified since the solution was crushed
+ // we need to re-crush the solution
+ crush_primal_solution(
+ original_problem_, original_lp_, solution, new_slacks_, crushed_solution);
+ }
is_feasible = check_guess(
original_lp_, settings_, var_types_, crushed_solution, primal_err, bound_err, num_fractional);
- if (is_feasible) {
+ mutex_original_lp_.unlock();
+ mutex_upper_.lock();
+ if (is_feasible && obj < upper_bound_) {
upper_bound_ = obj;
incumbent_.set_incumbent_solution(obj, crushed_solution);
} else {
@@ -344,10 +452,11 @@ void branch_and_bound_t::set_new_solution(const std::vector& solu
num_fractional);
}
}
+ mutex_upper_.unlock();
} else {
settings_.log.debug("Solution objective not better than current upper_bound_. Not accepted.\n");
}
- mutex_upper_.unlock();
+
if (is_feasible) { report_heuristic(obj); }
if (attempt_repair) {
mutex_repair_.lock();
@@ -459,6 +568,35 @@ void branch_and_bound_t::repair_heuristic_solutions()
}
}
+template
+void branch_and_bound_t::set_solution_at_root(mip_solution_t& solution,
+ const cut_info_t& cut_info)
+{
+ mutex_upper_.lock();
+ incumbent_.set_incumbent_solution(root_objective_, root_relax_soln_.x);
+ upper_bound_ = root_objective_;
+ mutex_upper_.unlock();
+
+ print_cut_info(settings_, cut_info);
+
+ // We should be done here
+ uncrush_primal_solution(original_problem_, original_lp_, incumbent_.x, solution.x);
+ solution.objective = incumbent_.objective;
+ solution.lower_bound = root_objective_;
+ solution.nodes_explored = 0;
+ solution.simplex_iterations = root_relax_soln_.iterations;
+ settings_.log.printf("Optimal solution found at root node. Objective %.16e. Time %.2f.\n",
+ compute_user_objective(original_lp_, root_objective_),
+ toc(exploration_stats_.start_time));
+
+ if (settings_.solution_callback != nullptr) {
+ settings_.solution_callback(solution.x, solution.objective);
+ }
+ if (settings_.heuristic_preemption_callback != nullptr) {
+ settings_.heuristic_preemption_callback();
+ }
+}
+
template
void branch_and_bound_t::set_final_solution(mip_solution_t& solution,
f_t lower_bound)
@@ -491,6 +629,9 @@ void branch_and_bound_t::set_final_solution(mip_solution_t&
if (gap <= settings_.absolute_mip_gap_tol || gap_rel <= settings_.relative_mip_gap_tol) {
solver_status_ = mip_status_t::OPTIMAL;
+#ifdef CHECK_CUTS_AGAINST_SAVED_SOLUTION
+ if (settings_.sub_mip == 0) { write_solution_for_cut_verification(original_lp_, incumbent_.x); }
+#endif
if (gap > 0 && gap <= settings_.absolute_mip_gap_tol) {
settings_.log.printf("Optimal solution found within absolute MIP gap tolerance (%.1e)\n",
settings_.absolute_mip_gap_tol);
@@ -542,7 +683,7 @@ void branch_and_bound_t::add_feasible_solution(f_t leaf_objective,
if (leaf_objective < upper_bound_) {
incumbent_.set_incumbent_solution(leaf_objective, leaf_solution);
upper_bound_ = leaf_objective;
- report(feasible_solution_symbol(thread_type), leaf_objective, get_lower_bound(), leaf_depth);
+ report(feasible_solution_symbol(thread_type), leaf_objective, get_lower_bound(), leaf_depth, 0);
send_solution = true;
}
@@ -621,11 +762,40 @@ branch_variable_t branch_and_bound_t::variable_selection(
}
}
+template
+void branch_and_bound_t::initialize_diving_heuristics_settings(
+ std::vector& diving_strategies)
+{
+ diving_strategies.reserve(4);
+
+ if (settings_.diving_settings.pseudocost_diving != 0) {
+ diving_strategies.push_back(bnb_worker_type_t::PSEUDOCOST_DIVING);
+ }
+
+ if (settings_.diving_settings.line_search_diving != 0) {
+ diving_strategies.push_back(bnb_worker_type_t::LINE_SEARCH_DIVING);
+ }
+
+ if (settings_.diving_settings.guided_diving != 0) {
+ diving_strategies.push_back(bnb_worker_type_t::GUIDED_DIVING);
+ }
+
+ if (settings_.diving_settings.coefficient_diving != 0) {
+ diving_strategies.push_back(bnb_worker_type_t::COEFFICIENT_DIVING);
+ calculate_variable_locks(original_lp_, var_up_locks_, var_down_locks_);
+ }
+
+ if (diving_strategies.empty()) {
+ settings_.log.printf("Warning: All diving heuristics are disabled!\n");
+ }
+}
+
template
dual::status_t branch_and_bound_t::solve_node_lp(
mip_node_t* node_ptr,
lp_problem_t& leaf_problem,
lp_solution_t& leaf_solution,
+ std::vector& leaf_edge_norms,
basis_update_mpf_t& basis_factors,
std::vector& basic_list,
std::vector& nonbasic_list,
@@ -637,6 +807,41 @@ dual::status_t branch_and_bound_t::solve_node_lp(
bnb_stats_t& stats,
logger_t& log)
{
+#ifdef DEBUG_BRANCHING
+ i_t num_integer_variables = 0;
+ for (i_t j = 0; j < original_lp_.num_cols; j++) {
+ if (var_types_[j] == variable_type_t::INTEGER) { num_integer_variables++; }
+ }
+ if (node_ptr->depth > num_integer_variables) {
+ std::vector branched_variables(original_lp_.num_cols, 0);
+ std::vector branched_lower(original_lp_.num_cols, std::numeric_limits::quiet_NaN());
+ std::vector branched_upper(original_lp_.num_cols, std::numeric_limits::quiet_NaN());
+ mip_node_t* parent = node_ptr->parent;
+ while (parent != nullptr) {
+ if (original_lp_.lower[parent->branch_var] != 0.0 ||
+ original_lp_.upper[parent->branch_var] != 1.0) {
+ break;
+ }
+ if (branched_variables[parent->branch_var] == 1) {
+ printf(
+ "Variable %d already branched. Previous lower %e upper %e. Current lower %e upper %e.\n",
+ parent->branch_var,
+ branched_lower[parent->branch_var],
+ branched_upper[parent->branch_var],
+ parent->branch_var_lower,
+ parent->branch_var_upper);
+ }
+ branched_variables[parent->branch_var] = 1;
+ branched_lower[parent->branch_var] = parent->branch_var_lower;
+ branched_upper[parent->branch_var] = parent->branch_var_upper;
+ parent = parent->parent;
+ }
+ if (parent == nullptr) {
+ printf("Depth %d > num_integer_variables %d\n", node_ptr->depth, num_integer_variables);
+ }
+ }
+#endif
+
std::vector& leaf_vstatus = node_ptr->vstatus;
assert(leaf_vstatus.size() == leaf_problem.num_cols);
@@ -678,29 +883,27 @@ dual::status_t branch_and_bound_t::solve_node_lp(
#endif
// Reset the bound_changed markers
- std::fill(node_presolver.bounds_changed.begin(), node_presolver.bounds_changed.end(), false);
+ std::vector bounds_changed(original_lp_.num_cols, false);
// Set the correct bounds for the leaf problem
if (recompute_bounds_and_basis) {
leaf_problem.lower = root_lower;
leaf_problem.upper = root_upper;
- node_ptr->get_variable_bounds(
- leaf_problem.lower, leaf_problem.upper, node_presolver.bounds_changed);
+ node_ptr->get_variable_bounds(leaf_problem.lower, leaf_problem.upper, bounds_changed);
} else {
node_ptr->update_branched_variable_bounds(
- leaf_problem.lower, leaf_problem.upper, node_presolver.bounds_changed);
+ leaf_problem.lower, leaf_problem.upper, bounds_changed);
}
- bool feasible =
- node_presolver.bounds_strengthening(leaf_problem.lower, leaf_problem.upper, lp_settings);
+ bool feasible = node_presolver.bounds_strengthening(
+ lp_settings, bounds_changed, leaf_problem.lower, leaf_problem.upper);
dual::status_t lp_status = dual::status_t::DUAL_UNBOUNDED;
if (feasible) {
- i_t node_iter = 0;
- f_t lp_start_time = tic();
- std::vector leaf_edge_norms = edge_norms_; // = node.steepest_edge_norms;
+ i_t node_iter = 0;
+ f_t lp_start_time = tic();
lp_status = dual_phase2_with_advanced_basis(2,
0,
@@ -717,7 +920,7 @@ dual::status_t branch_and_bound_t::solve_node_lp(
leaf_edge_norms);
if (lp_status == dual::status_t::NUMERICAL) {
- log.printf("Numerical issue node %d. Resolving from scratch.\n", node_ptr->node_id);
+ log.debug("Numerical issue node %d. Resolving from scratch.\n", node_ptr->node_id);
lp_status_t second_status = solve_linear_program_with_advanced_basis(leaf_problem,
lp_start_time,
lp_settings,
@@ -748,6 +951,7 @@ std::pair branch_and_bound_t::upd
search_tree_t& search_tree,
lp_problem_t& leaf_problem,
lp_solution_t& leaf_solution,
+ std::vector& leaf_edge_norms,
bnb_worker_type_t thread_type,
dual::status_t lp_status,
logger_t& log)
@@ -776,6 +980,23 @@ std::pair branch_and_bound_t::upd
i_t leaf_num_fractional =
fractional_variables(settings_, leaf_solution.x, var_types_, leaf_fractional);
+#ifdef DEBUG_FRACTIONAL_FIXED
+ // Check if any of the fractional variables were fixed to their bounds
+ for (i_t j : leaf_fractional) {
+ if (leaf_problem.lower[j] == leaf_problem.upper[j]) {
+ printf(
+ "Node %d: Fixed variable %d has a fractional value %e. Lower %e upper %e. Variable "
+ "status %d\n",
+ node_ptr->node_id,
+ j,
+ leaf_solution.x[j],
+ leaf_problem.lower[j],
+ leaf_problem.upper[j],
+ leaf_vstatus[j]);
+ }
+ }
+#endif
+
f_t leaf_objective = compute_objective(leaf_problem, leaf_solution.x);
node_ptr->lower_bound = leaf_objective;
search_tree.graphviz_node(log, node_ptr, "lower bound", leaf_objective);
@@ -815,8 +1036,13 @@ std::pair branch_and_bound_t::upd
pc_.obj_estimate(leaf_fractional, leaf_solution.x, node_ptr->lower_bound, pc_log);
}
- search_tree.branch(
- node_ptr, branch_var, leaf_solution.x[branch_var], leaf_vstatus, leaf_problem, log);
+ search_tree.branch(node_ptr,
+ branch_var,
+ leaf_solution.x[branch_var],
+ leaf_num_fractional,
+ leaf_vstatus,
+ leaf_problem,
+ log);
search_tree.update(node_ptr, node_status_t::HAS_CHILDREN);
return {node_status_t::HAS_CHILDREN, round_dir};
@@ -877,7 +1103,7 @@ void branch_and_bound_t::exploration_ramp_up(mip_node_t* nod
bool should_report = should_report_.exchange(false);
if (should_report) {
- report(' ', upper_bound, root_objective_, node->depth);
+ report(' ', upper_bound, root_objective_, node->depth, node->integer_infeasible);
exploration_stats_.nodes_since_last_log = 0;
exploration_stats_.last_log = tic();
should_report_ = true;
@@ -900,9 +1126,11 @@ void branch_and_bound_t::exploration_ramp_up(mip_node_t* nod
std::vector nonbasic_list;
lp_solution_t leaf_solution(leaf_problem.num_rows, leaf_problem.num_cols);
- dual::status_t lp_status = solve_node_lp(node,
+ std::vector leaf_edge_norms = edge_norms_; // = node.steepest_edge_norms;
+ dual::status_t lp_status = solve_node_lp(node,
leaf_problem,
leaf_solution,
+ leaf_edge_norms,
basis_factors,
basic_list,
nonbasic_list,
@@ -926,6 +1154,7 @@ void branch_and_bound_t::exploration_ramp_up(mip_node_t* nod
search_tree_,
leaf_problem,
leaf_solution,
+ leaf_edge_norms,
bnb_worker_type_t::BEST_FIRST,
lp_status,
settings_.log);
@@ -1000,7 +1229,7 @@ void branch_and_bound_t::plunge_from(i_t task_id,
abs_gap < 10 * settings_.absolute_mip_gap_tol) &&
time_since_last_log >= 1) ||
(time_since_last_log > 30) || now > settings_.time_limit) {
- report(' ', upper_bound, get_lower_bound(), node_ptr->depth);
+ report(' ', upper_bound, get_lower_bound(), node_ptr->depth, node_ptr->integer_infeasible);
exploration_stats_.last_log = tic();
exploration_stats_.nodes_since_last_log = 0;
}
@@ -1016,9 +1245,11 @@ void branch_and_bound_t::plunge_from(i_t task_id,
}
lp_solution_t leaf_solution(leaf_problem.num_rows, leaf_problem.num_cols);
- dual::status_t lp_status = solve_node_lp(node_ptr,
+ std::vector leaf_edge_norms = edge_norms_; // = node.steepest_edge_norms;
+ dual::status_t lp_status = solve_node_lp(node_ptr,
leaf_problem,
leaf_solution,
+ leaf_edge_norms,
basis_factors,
basic_list,
nonbasic_list,
@@ -1045,6 +1276,7 @@ void branch_and_bound_t::plunge_from(i_t task_id,
search_tree_,
leaf_problem,
leaf_solution,
+ leaf_edge_norms,
bnb_worker_type_t::BEST_FIRST,
lp_status,
settings_.log);
@@ -1187,9 +1419,11 @@ void branch_and_bound_t::dive_from(mip_node_t& start_node,
if (dive_stats.nodes_explored > diving_node_limit) { break; }
lp_solution_t leaf_solution(leaf_problem.num_rows, leaf_problem.num_cols);
- dual::status_t lp_status = solve_node_lp(node_ptr,
+ std::vector leaf_edge_norms = edge_norms_; // = node.steepest_edge_norms;
+ dual::status_t lp_status = solve_node_lp(node_ptr,
leaf_problem,
leaf_solution,
+ leaf_edge_norms,
basis_factors,
basic_list,
nonbasic_list,
@@ -1210,9 +1444,15 @@ void branch_and_bound_t::dive_from(mip_node_t& start_node,
++dive_stats.nodes_explored;
- auto [node_status, round_dir] =
- update_tree(node_ptr, dive_tree, leaf_problem, leaf_solution, diving_type, lp_status, log);
- recompute_bounds_and_basis = node_status != node_status_t::HAS_CHILDREN;
+ auto [node_status, round_dir] = update_tree(node_ptr,
+ dive_tree,
+ leaf_problem,
+ leaf_solution,
+ leaf_edge_norms,
+ diving_type,
+ lp_status,
+ log);
+ recompute_bounds_and_basis = node_status != node_status_t::HAS_CHILDREN;
if (node_status == node_status_t::HAS_CHILDREN) {
if (round_dir == rounding_direction_t::UP) {
@@ -1239,6 +1479,7 @@ void branch_and_bound_t::diving_thread(bnb_worker_type_t diving_type)
lp_problem_t leaf_problem = original_lp_;
std::vector row_sense;
bounds_strengthening_t node_presolver(leaf_problem, Arow_, row_sense, var_types_);
+ std::vector bounds_changed(original_lp_.num_cols, false);
const i_t m = leaf_problem.num_rows;
basis_update_mpf_t basis_factors(m, settings_.refactor_frequency);
@@ -1254,7 +1495,7 @@ void branch_and_bound_t::diving_thread(bnb_worker_type_t diving_type)
if (reset_starting_bounds) {
start_lower = original_lp_.lower;
start_upper = original_lp_.upper;
- std::fill(node_presolver.bounds_changed.begin(), node_presolver.bounds_changed.end(), false);
+ std::fill(bounds_changed.begin(), bounds_changed.end(), false);
reset_starting_bounds = false;
}
@@ -1268,8 +1509,7 @@ void branch_and_bound_t::diving_thread(bnb_worker_type_t diving_type)
std::optional> start_node = std::nullopt;
if (node_ptr.has_value()) {
- node_ptr.value()->get_variable_bounds(
- start_lower, start_upper, node_presolver.bounds_changed);
+ node_ptr.value()->get_variable_bounds(start_lower, start_upper, bounds_changed);
start_node = node_ptr.value()->detach_copy();
}
node_queue_.unlock();
@@ -1278,7 +1518,8 @@ void branch_and_bound_t::diving_thread(bnb_worker_type_t diving_type)
reset_starting_bounds = true;
if (upper_bound_ < start_node->lower_bound) { continue; }
- bool is_feasible = node_presolver.bounds_strengthening(start_lower, start_upper, settings_);
+ bool is_feasible =
+ node_presolver.bounds_strengthening(settings_, bounds_changed, start_lower, start_upper);
if (!is_feasible) { continue; }
dive_from(start_node.value(),
@@ -1296,7 +1537,13 @@ void branch_and_bound_t::diving_thread(bnb_worker_type_t diving_type)
template
lp_status_t branch_and_bound_t::solve_root_relaxation(
- simplex_solver_settings_t const& lp_settings)
+ simplex_solver_settings_t const& lp_settings,
+ lp_solution_t& root_relax_soln,
+ std::vector& root_vstatus,
+ basis_update_mpf_t& basis_update,
+ std::vector& basic_list,
+ std::vector& nonbasic_list,
+ std::vector& edge_norms)
{
f_t start_time = tic();
f_t user_objective = 0;
@@ -1307,13 +1554,16 @@ lp_status_t branch_and_bound_t::solve_root_relaxation(
lp_status_t root_status;
std::future root_status_future;
root_status_future = std::async(std::launch::async,
- &solve_linear_program_advanced,
+ &solve_linear_program_with_advanced_basis,
std::ref(original_lp_),
exploration_stats_.start_time,
std::ref(lp_settings),
- std::ref(root_relax_soln_),
- std::ref(root_vstatus_),
- std::ref(edge_norms_));
+ std::ref(root_relax_soln),
+ std::ref(basis_update),
+ std::ref(basic_list),
+ std::ref(nonbasic_list),
+ std::ref(root_vstatus),
+ std::ref(edge_norms));
// Wait for the root relaxation solution to be sent by the diversity manager or dual simplex
// to finish
while (!root_crossover_solution_set_.load(std::memory_order_acquire) &&
@@ -1355,17 +1605,55 @@ lp_status_t branch_and_bound_t::solve_root_relaxation(
// Check if crossover was stopped by dual simplex
if (crossover_status == crossover_status_t::OPTIMAL) {
- set_root_concurrent_halt(1); // Stop dual simplex
- root_status = root_status_future.get();
-
+ set_root_concurrent_halt(1); // Stop dual simplex
+ root_status = root_status_future.get(); // Wait for dual simplex to finish
+ set_root_concurrent_halt(0); // Clear the concurrent halt flag
// Override the root relaxation solution with the crossover solution
- root_relax_soln_ = root_crossover_soln_;
- root_vstatus_ = crossover_vstatus_;
- root_status = lp_status_t::OPTIMAL;
- user_objective = root_crossover_soln_.user_objective;
- iter = root_crossover_soln_.iterations;
- solver_name = "Barrier/PDLP and Crossover";
+ root_relax_soln = root_crossover_soln_;
+ root_vstatus = crossover_vstatus_;
+ root_status = lp_status_t::OPTIMAL;
+ basic_list.clear();
+ nonbasic_list.reserve(original_lp_.num_cols - original_lp_.num_rows);
+ nonbasic_list.clear();
+ // Get the basic list and nonbasic list from the vstatus
+ for (i_t j = 0; j < original_lp_.num_cols; j++) {
+ if (crossover_vstatus_[j] == variable_status_t::BASIC) {
+ basic_list.push_back(j);
+ } else {
+ nonbasic_list.push_back(j);
+ }
+ }
+ if (basic_list.size() != original_lp_.num_rows) {
+ settings_.log.printf(
+ "basic_list size %d != m %d\n", basic_list.size(), original_lp_.num_rows);
+ assert(basic_list.size() == original_lp_.num_rows);
+ }
+ if (nonbasic_list.size() != original_lp_.num_cols - original_lp_.num_rows) {
+ settings_.log.printf("nonbasic_list size %d != n - m %d\n",
+ nonbasic_list.size(),
+ original_lp_.num_cols - original_lp_.num_rows);
+ assert(nonbasic_list.size() == original_lp_.num_cols - original_lp_.num_rows);
+ }
+ // Populate the basis_update from the crossover vstatus
+ i_t refactor_status = basis_update.refactor_basis(original_lp_.A,
+ root_crossover_settings,
+ original_lp_.lower,
+ original_lp_.upper,
+ basic_list,
+ nonbasic_list,
+ crossover_vstatus_);
+ if (refactor_status != 0) {
+ settings_.log.printf("Failed to refactor basis. %d deficient columns.\n", refactor_status);
+ assert(refactor_status == 0);
+ root_status = lp_status_t::NUMERICAL_ISSUES;
+ }
+ // Set the edge norms to a default value
+ edge_norms.resize(original_lp_.num_cols, -1.0);
+ set_uninitialized_steepest_edge_norms(original_lp_, basic_list, edge_norms);
+ user_objective = root_crossover_soln_.user_objective;
+ iter = root_crossover_soln_.iterations;
+ solver_name = "Barrier/PDLP and Crossover";
} else {
root_status = root_status_future.get();
user_objective = root_relax_soln_.user_objective;
@@ -1410,28 +1698,7 @@ mip_status_t branch_and_bound_t::solve(mip_solution_t& solut
original_lp_.A.to_compressed_row(Arow_);
std::vector diving_strategies;
- diving_strategies.reserve(4);
-
- if (settings_.diving_settings.pseudocost_diving != 0) {
- diving_strategies.push_back(bnb_worker_type_t::PSEUDOCOST_DIVING);
- }
-
- if (settings_.diving_settings.line_search_diving != 0) {
- diving_strategies.push_back(bnb_worker_type_t::LINE_SEARCH_DIVING);
- }
-
- if (settings_.diving_settings.guided_diving != 0) {
- diving_strategies.push_back(bnb_worker_type_t::GUIDED_DIVING);
- }
-
- if (settings_.diving_settings.coefficient_diving != 0) {
- diving_strategies.push_back(bnb_worker_type_t::COEFFICIENT_DIVING);
- calculate_variable_locks(original_lp_, var_up_locks_, var_down_locks_);
- }
-
- if (diving_strategies.empty()) {
- settings_.log.printf("Warning: All diving heuristics are disabled!\n");
- }
+ initialize_diving_heuristics_settings(diving_strategies);
if (guess_.size() != 0) {
std::vector crushed_guess;
@@ -1452,25 +1719,37 @@ mip_status_t branch_and_bound_t::solve(mip_solution_t& solut
root_relax_soln_.resize(original_lp_.num_rows, original_lp_.num_cols);
- lp_status_t root_status;
+ i_t original_rows = original_lp_.num_rows;
simplex_solver_settings_t lp_settings = settings_;
lp_settings.inside_mip = 1;
+ lp_settings.scale_columns = false;
lp_settings.concurrent_halt = get_root_concurrent_halt();
- // RINS/SUBMIP path
+ std::vector basic_list(original_lp_.num_rows);
+ std::vector nonbasic_list;
+ basis_update_mpf_t basis_update(original_lp_.num_rows, settings_.refactor_frequency);
+ lp_status_t root_status;
if (!enable_concurrent_lp_root_solve()) {
+ // RINS/SUBMIP path
settings_.log.printf("\nSolving LP root relaxation with dual simplex\n");
- root_status = solve_linear_program_advanced(original_lp_,
- exploration_stats_.start_time,
- lp_settings,
- root_relax_soln_,
- root_vstatus_,
- edge_norms_);
-
+ root_status = solve_linear_program_with_advanced_basis(original_lp_,
+ exploration_stats_.start_time,
+ lp_settings,
+ root_relax_soln_,
+ basis_update,
+ basic_list,
+ nonbasic_list,
+ root_vstatus_,
+ edge_norms_);
} else {
settings_.log.printf("\nSolving LP root relaxation in concurrent mode\n");
- root_status = solve_root_relaxation(lp_settings);
+ root_status = solve_root_relaxation(lp_settings,
+ root_relax_soln_,
+ root_vstatus_,
+ basis_update,
+ basic_list,
+ nonbasic_list,
+ edge_norms_);
}
-
exploration_stats_.total_lp_iters = root_relax_soln_.iterations;
exploration_stats_.total_lp_solve_time = toc(exploration_stats_.start_time);
@@ -1491,15 +1770,19 @@ mip_status_t branch_and_bound_t::solve(mip_solution_t& solut
}
return mip_status_t::UNBOUNDED;
}
-
if (root_status == lp_status_t::TIME_LIMIT) {
solver_status_ = mip_status_t::TIME_LIMIT;
set_final_solution(solution, -inf);
return solver_status_;
}
+ if (root_status == lp_status_t::NUMERICAL_ISSUES) {
+ solver_status_ = mip_status_t::NUMERICAL;
+ set_final_solution(solution, -inf);
+ return solver_status_;
+ }
assert(root_vstatus_.size() == original_lp_.num_cols);
- set_uninitialized_steepest_edge_norms(edge_norms_);
+ set_uninitialized_steepest_edge_norms(original_lp_, basic_list, edge_norms_);
root_objective_ = compute_objective(original_lp_, root_relax_soln_.x);
local_lower_bounds_.assign(settings_.num_bfs_workers, root_objective_);
@@ -1520,33 +1803,280 @@ mip_status_t branch_and_bound_t::solve(mip_solution_t& solut
}
std::vector fractional;
- const i_t num_fractional =
- fractional_variables(settings_, root_relax_soln_.x, var_types_, fractional);
+ i_t num_fractional = fractional_variables(settings_, root_relax_soln_.x, var_types_, fractional);
+ cut_info_t cut_info;
if (num_fractional == 0) {
- mutex_upper_.lock();
- incumbent_.set_incumbent_solution(root_objective_, root_relax_soln_.x);
- upper_bound_ = root_objective_;
- mutex_upper_.unlock();
- // We should be done here
- uncrush_primal_solution(original_problem_, original_lp_, incumbent_.x, solution.x);
- solution.objective = incumbent_.objective;
- solution.lower_bound = root_objective_;
- solution.nodes_explored = 0;
- solution.simplex_iterations = root_relax_soln_.iterations;
- settings_.log.printf("Optimal solution found at root node. Objective %.16e. Time %.2f.\n",
- compute_user_objective(original_lp_, root_objective_),
- toc(exploration_stats_.start_time));
+ set_solution_at_root(solution, cut_info);
+ return mip_status_t::OPTIMAL;
+ }
- if (settings_.solution_callback != nullptr) {
- settings_.solution_callback(solution.x, solution.objective);
- }
- if (settings_.heuristic_preemption_callback != nullptr) {
- settings_.heuristic_preemption_callback();
+ is_running = true;
+ lower_bound_ceiling_ = inf;
+
+ if (num_fractional != 0 && settings_.max_cut_passes > 0) {
+ settings_.log.printf(
+ " | Explored | Unexplored | Objective | Bound | IntInf | Depth | Iter/Node | "
+ "Gap "
+ "| Time |\n");
+ }
+
+ cut_pool_t cut_pool(original_lp_.num_cols, settings_);
+ cut_generation_t cut_generation(
+ cut_pool, original_lp_, settings_, Arow_, new_slacks_, var_types_);
+
+ std::vector saved_solution;
+#ifdef CHECK_CUTS_AGAINST_SAVED_SOLUTION
+ read_saved_solution_for_cut_verification(original_lp_, settings_, saved_solution);
+#endif
+
+ f_t last_upper_bound = std::numeric_limits::infinity();
+ f_t last_objective = root_objective_;
+ f_t root_relax_objective = root_objective_;
+
+ i_t cut_pool_size = 0;
+ for (i_t cut_pass = 0; cut_pass < settings_.max_cut_passes; cut_pass++) {
+ if (num_fractional == 0) {
+ set_solution_at_root(solution, cut_info);
+ return mip_status_t::OPTIMAL;
+ } else {
+#ifdef PRINT_FRACTIONAL_INFO
+ settings_.log.printf(
+ "Found %d fractional variables on cut pass %d\n", num_fractional, cut_pass);
+ for (i_t j : fractional) {
+ settings_.log.printf("Fractional variable %d lower %e value %e upper %e\n",
+ j,
+ original_lp_.lower[j],
+ root_relax_soln_.x[j],
+ original_lp_.upper[j]);
+ }
+#endif
+
+ // Generate cuts and add them to the cut pool
+ f_t cut_start_time = tic();
+ cut_generation.generate_cuts(original_lp_,
+ settings_,
+ Arow_,
+ new_slacks_,
+ var_types_,
+ basis_update,
+ root_relax_soln_.x,
+ basic_list,
+ nonbasic_list);
+ f_t cut_generation_time = toc(cut_start_time);
+ if (cut_generation_time > 1.0) {
+ settings_.log.debug("Cut generation time %.2f seconds\n", cut_generation_time);
+ }
+ // Score the cuts
+ f_t score_start_time = tic();
+ cut_pool.score_cuts(root_relax_soln_.x);
+ f_t score_time = toc(score_start_time);
+ if (score_time > 1.0) { settings_.log.debug("Cut scoring time %.2f seconds\n", score_time); }
+ // Get the best cuts from the cut pool
+ csr_matrix_t cuts_to_add(0, original_lp_.num_cols, 0);
+ std::vector cut_rhs;
+ std::vector cut_types;
+ i_t num_cuts = cut_pool.get_best_cuts(cuts_to_add, cut_rhs, cut_types);
+ if (num_cuts == 0) { break; }
+ cut_info.record_cut_types(cut_types);
+#ifdef PRINT_CUT_POOL_TYPES
+ cut_pool.print_cutpool_types();
+ print_cut_types("In LP ", cut_types, settings_);
+ printf("Cut pool size: %d\n", cut_pool.pool_size());
+#endif
+
+#ifdef CHECK_CUT_MATRIX
+ if (cuts_to_add.check_matrix() != 0) {
+ settings_.log.printf("Bad cuts matrix\n");
+ for (i_t i = 0; i < static_cast(cut_types.size()); ++i) {
+ settings_.log.printf("row %d cut type %d\n", i, cut_types[i]);
+ }
+ return mip_status_t::NUMERICAL;
+ }
+#endif
+ // Check against saved solution
+#ifdef CHECK_CUTS_AGAINST_SAVED_SOLUTION
+ verify_cuts_against_saved_solution(cuts_to_add, cut_rhs, saved_solution);
+#endif
+ cut_pool_size = cut_pool.pool_size();
+
+ // Resolve the LP with the new cuts
+ settings_.log.debug(
+ "Solving LP with %d cuts (%d cut nonzeros). Cuts in pool %d. Total constraints %d\n",
+ num_cuts,
+ cuts_to_add.row_start[cuts_to_add.m],
+ cut_pool.pool_size(),
+ cuts_to_add.m + original_lp_.num_rows);
+ lp_settings.log.log = false;
+
+ f_t add_cuts_start_time = tic();
+ mutex_original_lp_.lock();
+ i_t add_cuts_status = add_cuts(settings_,
+ cuts_to_add,
+ cut_rhs,
+ original_lp_,
+ new_slacks_,
+ root_relax_soln_,
+ basis_update,
+ basic_list,
+ nonbasic_list,
+ root_vstatus_,
+ edge_norms_);
+ var_types_.resize(original_lp_.num_cols, variable_type_t::CONTINUOUS);
+ mutex_original_lp_.unlock();
+ f_t add_cuts_time = toc(add_cuts_start_time);
+ if (add_cuts_time > 1.0) {
+ settings_.log.debug("Add cuts time %.2f seconds\n", add_cuts_time);
+ }
+ if (add_cuts_status != 0) {
+ settings_.log.printf("Failed to add cuts\n");
+ return mip_status_t::NUMERICAL;
+ }
+
+ if (settings_.reduced_cost_strengthening >= 1 && upper_bound_.load() < last_upper_bound) {
+ mutex_upper_.lock();
+ last_upper_bound = upper_bound_.load();
+ std::vector lower_bounds;
+ std::vector upper_bounds;
+ find_reduced_cost_fixings(upper_bound_.load(), lower_bounds, upper_bounds);
+ mutex_upper_.unlock();
+ mutex_original_lp_.lock();
+ original_lp_.lower = lower_bounds;
+ original_lp_.upper = upper_bounds;
+ mutex_original_lp_.unlock();
+ }
+
+ // Try to do bound strengthening
+ std::vector bounds_changed(original_lp_.num_cols, true);
+ std::vector row_sense;
+#ifdef CHECK_MATRICES
+ settings_.log.printf("Before A check\n");
+ original_lp_.A.check_matrix();
+#endif
+ original_lp_.A.to_compressed_row(Arow_);
+
+ f_t node_presolve_start_time = tic();
+ bounds_strengthening_t node_presolve(original_lp_, Arow_, row_sense, var_types_);
+ std::vector new_lower = original_lp_.lower;
+ std::vector new_upper = original_lp_.upper;
+ bool feasible =
+ node_presolve.bounds_strengthening(settings_, bounds_changed, new_lower, new_upper);
+ mutex_original_lp_.lock();
+ original_lp_.lower = new_lower;
+ original_lp_.upper = new_upper;
+ mutex_original_lp_.unlock();
+ f_t node_presolve_time = toc(node_presolve_start_time);
+ if (node_presolve_time > 1.0) {
+ settings_.log.debug("Node presolve time %.2f seconds\n", node_presolve_time);
+ }
+ if (!feasible) {
+ settings_.log.printf("Bound strengthening detected infeasibility\n");
+ return mip_status_t::INFEASIBLE;
+ }
+
+ i_t iter = 0;
+ bool initialize_basis = false;
+ lp_settings.concurrent_halt = NULL;
+ f_t dual_phase2_start_time = tic();
+ dual::status_t cut_status = dual_phase2_with_advanced_basis(2,
+ 0,
+ initialize_basis,
+ exploration_stats_.start_time,
+ original_lp_,
+ lp_settings,
+ root_vstatus_,
+ basis_update,
+ basic_list,
+ nonbasic_list,
+ root_relax_soln_,
+ iter,
+ edge_norms_);
+ f_t dual_phase2_time = toc(dual_phase2_start_time);
+ if (dual_phase2_time > 1.0) {
+ settings_.log.debug("Dual phase2 time %.2f seconds\n", dual_phase2_time);
+ }
+ if (cut_status == dual::status_t::TIME_LIMIT) {
+ solver_status_ = mip_status_t::TIME_LIMIT;
+ set_final_solution(solution, root_objective_);
+ return solver_status_;
+ }
+
+ if (cut_status != dual::status_t::OPTIMAL) {
+ settings_.log.printf("Cut status %s\n", dual::status_to_string(cut_status).c_str());
+ return mip_status_t::NUMERICAL;
+ }
+ exploration_stats_.total_lp_iters += root_relax_soln_.iterations;
+ root_objective_ = compute_objective(original_lp_, root_relax_soln_.x);
+
+ local_lower_bounds_.assign(settings_.num_bfs_workers, root_objective_);
+
+ f_t remove_cuts_start_time = tic();
+ mutex_original_lp_.lock();
+ remove_cuts(original_lp_,
+ settings_,
+ Arow_,
+ new_slacks_,
+ original_rows,
+ var_types_,
+ root_vstatus_,
+ edge_norms_,
+ root_relax_soln_.x,
+ root_relax_soln_.y,
+ root_relax_soln_.z,
+ basic_list,
+ nonbasic_list,
+ basis_update);
+ mutex_original_lp_.unlock();
+ f_t remove_cuts_time = toc(remove_cuts_start_time);
+ if (remove_cuts_time > 1.0) {
+ settings_.log.debug("Remove cuts time %.2f seconds\n", remove_cuts_time);
+ }
+ fractional.clear();
+ num_fractional = fractional_variables(settings_, root_relax_soln_.x, var_types_, fractional);
+
+ if (num_fractional == 0) {
+ upper_bound_ = root_objective_;
+ mutex_upper_.lock();
+ incumbent_.set_incumbent_solution(root_objective_, root_relax_soln_.x);
+ mutex_upper_.unlock();
+ }
+ f_t obj = upper_bound_.load();
+ report(' ', obj, root_objective_, 0, num_fractional);
+
+ f_t rel_gap = user_relative_gap(original_lp_, upper_bound_.load(), root_objective_);
+ f_t abs_gap = upper_bound_.load() - root_objective_;
+ if (rel_gap < settings_.relative_mip_gap_tol || abs_gap < settings_.absolute_mip_gap_tol) {
+ set_solution_at_root(solution, cut_info);
+ set_final_solution(solution, root_objective_);
+ return mip_status_t::OPTIMAL;
+ }
+
+ f_t change_in_objective = root_objective_ - last_objective;
+ const f_t factor = settings_.cut_change_threshold;
+ const f_t min_objective = 1e-3;
+ if (change_in_objective <= factor * std::max(min_objective, std::abs(root_relax_objective))) {
+ settings_.log.debug(
+ "Change in objective %.16e is less than 1e-3 of root relax objective %.16e\n",
+ change_in_objective,
+ root_relax_objective);
+ break;
+ }
+ last_objective = root_objective_;
}
- return mip_status_t::OPTIMAL;
}
+ print_cut_info(settings_, cut_info);
+
+ if (cut_info.has_cuts()) {
+ settings_.log.printf("Cut pool size : %d\n", cut_pool_size);
+ settings_.log.printf("Size with cuts : %d constraints, %d variables, %d nonzeros\n",
+ original_lp_.num_rows,
+ original_lp_.num_cols,
+ original_lp_.A.col_start[original_lp_.A.n]);
+ }
+
+ set_uninitialized_steepest_edge_norms(original_lp_, basic_list, edge_norms_);
+
pc_.resize(original_lp_.num_cols);
strong_branching(original_problem_,
original_lp_,
@@ -1566,6 +2096,50 @@ mip_status_t branch_and_bound_t::solve(mip_solution_t& solut
return solver_status_;
}
+ if (settings_.reduced_cost_strengthening >= 2 && upper_bound_.load() < last_upper_bound) {
+ std::vector lower_bounds;
+ std::vector upper_bounds;
+ i_t num_fixed = find_reduced_cost_fixings(upper_bound_.load(), lower_bounds, upper_bounds);
+ if (num_fixed > 0) {
+ std::vector