diff --git a/.github/workflows/cron.yml b/.github/workflows/cron.yml index f76fe01699..734a84ff2f 100644 --- a/.github/workflows/cron.yml +++ b/.github/workflows/cron.yml @@ -15,7 +15,7 @@ jobs: runs-on: [self-hosted, linux, x64, common] strategy: matrix: - pytorch-version: [1.6.0, 1.7.1, 1.8.1, 1.9.1, latest] + pytorch-version: [1.7.1, 1.8.1, 1.9.1, 1.10.2, latest] steps: - uses: actions/checkout@v2 - name: Install the dependencies @@ -24,15 +24,15 @@ jobs: python -m pip install --upgrade pip wheel python -m pip uninstall -y torch torchvision if [ ${{ matrix.pytorch-version }} == "latest" ]; then - python -m pip install torch torchvision - elif [ ${{ matrix.pytorch-version }} == "1.6.0" ]; then - python -m pip install torch==1.6.0 torchvision==0.7.0 + python -m pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu113 elif [ ${{ matrix.pytorch-version }} == "1.7.1" ]; then - python -m pip install torch==1.7.1 torchvision==0.8.2 + python -m pip install torch==1.7.1 torchvision==0.8.2 --extra-index-url https://download.pytorch.org/whl/cu113 elif [ ${{ matrix.pytorch-version }} == "1.8.1" ]; then - python -m pip install torch==1.8.1 torchvision==0.9.1 + python -m pip install torch==1.8.1 torchvision==0.9.1 --extra-index-url https://download.pytorch.org/whl/cu113 elif [ ${{ matrix.pytorch-version }} == "1.9.1" ]; then - python -m pip install torch==1.9.1 torchvision==0.10.1 + python -m pip install torch==1.9.1 torchvision==0.10.1 --extra-index-url https://download.pytorch.org/whl/cu113 + elif [ ${{ matrix.pytorch-version }} == "1.10.2" ]; then + python -m pip install torch==1.10.2 torchvision==0.11.3 --extra-index-url https://download.pytorch.org/whl/cu113 fi python -m pip install -r requirements-dev.txt python -m pip list diff --git a/.github/workflows/pythonapp-gpu.yml b/.github/workflows/pythonapp-gpu.yml index d19bbe437f..90b31e99ab 100644 --- a/.github/workflows/pythonapp-gpu.yml +++ b/.github/workflows/pythonapp-gpu.yml @@ -50,7 +50,7 @@ jobs: pytorch: "-h" base: "nvcr.io/nvidia/pytorch:22.03-py3" - environment: PT110+CUDA102 - pytorch: "torch==1.10.1 torchvision==0.11.2" + pytorch: "torch==1.10.2 torchvision==0.11.3" base: "nvcr.io/nvidia/cuda:10.2-devel-ubuntu18.04" - environment: PT111+CUDA102 pytorch: "torch==1.11.0 torchvision==0.12.0" diff --git a/.github/workflows/pythonapp-min.yml b/.github/workflows/pythonapp-min.yml index c3294c2b2a..e50f74d816 100644 --- a/.github/workflows/pythonapp-min.yml +++ b/.github/workflows/pythonapp-min.yml @@ -119,7 +119,7 @@ jobs: strategy: fail-fast: false matrix: - pytorch-version: [1.6.0, 1.7.1, 1.8.1, 1.9.1, 1.10.1, latest] + pytorch-version: [1.7.1, 1.8.1, 1.9.1, 1.10.2, latest] timeout-minutes: 40 steps: - uses: actions/checkout@v2 @@ -148,16 +148,14 @@ jobs: # min. requirements if [ ${{ matrix.pytorch-version }} == "latest" ]; then python -m pip install torch - elif [ ${{ matrix.pytorch-version }} == "1.6.0" ]; then - python -m pip install torch==1.6.0 elif [ ${{ matrix.pytorch-version }} == "1.7.1" ]; then python -m pip install torch==1.7.1 elif [ ${{ matrix.pytorch-version }} == "1.8.1" ]; then python -m pip install torch==1.8.1 elif [ ${{ matrix.pytorch-version }} == "1.9.1" ]; then python -m pip install torch==1.9.1 - elif [ ${{ matrix.pytorch-version }} == "1.10.1" ]; then - python -m pip install torch==1.10.1 + elif [ ${{ matrix.pytorch-version }} == "1.10.2" ]; then + python -m pip install torch==1.10.2 fi python -m pip install -r requirements-min.txt python -m pip list diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml index cf251c2293..38c96b3b0d 100644 --- a/.github/workflows/pythonapp.yml +++ b/.github/workflows/pythonapp.yml @@ -137,7 +137,7 @@ jobs: # install the latest pytorch for testing # however, "pip install monai*.tar.gz" will build cpp/cuda with an isolated # fresh torch installation according to pyproject.toml - python -m pip install torch>=1.6 torchvision + python -m pip install torch>=1.7 torchvision - name: Check packages run: | pip uninstall monai diff --git a/monai/data/torchscript_utils.py b/monai/data/torchscript_utils.py index 61477e8ca9..ca46dd9dc4 100644 --- a/monai/data/torchscript_utils.py +++ b/monai/data/torchscript_utils.py @@ -18,7 +18,6 @@ from monai.config import get_config_values from monai.utils import JITMetadataKeys -from monai.utils.module import pytorch_after METADATA_FILENAME = "metadata.json" @@ -80,19 +79,10 @@ def save_net_with_metadata( json_data = json.dumps(metadict) - # Pytorch>1.6 can use dictionaries directly, otherwise need to use special map object - if pytorch_after(1, 7): - extra_files = {METADATA_FILENAME: json_data.encode()} + extra_files = {METADATA_FILENAME: json_data.encode()} - if more_extra_files is not None: - extra_files.update(more_extra_files) - else: - extra_files = torch._C.ExtraFilesMap() # type:ignore[attr-defined] - extra_files[METADATA_FILENAME] = json_data.encode() - - if more_extra_files is not None: - for k, v in more_extra_files.items(): - extra_files[k] = v + if more_extra_files is not None: + extra_files.update(more_extra_files) if isinstance(filename_prefix_or_stream, str): filename_no_ext, ext = os.path.splitext(filename_prefix_or_stream) @@ -123,16 +113,8 @@ def load_net_with_metadata( Returns: Triple containing loaded object, metadata dict, and extra files dict containing other file data if present """ - # Pytorch>1.6 can use dictionaries directly, otherwise need to use special map object - if pytorch_after(1, 7): - extra_files = {f: "" for f in more_extra_files} - extra_files[METADATA_FILENAME] = "" - else: - extra_files = torch._C.ExtraFilesMap() # type:ignore[attr-defined] - extra_files[METADATA_FILENAME] = "" - - for f in more_extra_files: - extra_files[f] = "" + extra_files = {f: "" for f in more_extra_files} + extra_files[METADATA_FILENAME] = "" jit_obj = torch.jit.load(filename_prefix_or_stream, map_location, extra_files) diff --git a/monai/engines/trainer.py b/monai/engines/trainer.py index 774e535e7f..a58387a5ef 100644 --- a/monai/engines/trainer.py +++ b/monai/engines/trainer.py @@ -26,7 +26,7 @@ from monai.engines.workflow import Workflow from monai.inferers import Inferer, SimpleInferer from monai.transforms import Transform -from monai.utils import min_version, optional_import, pytorch_after +from monai.utils import min_version, optional_import from monai.utils.enums import CommonKeys as Keys if TYPE_CHECKING: @@ -193,11 +193,7 @@ def _compute_pred_loss(): engine.fire_event(IterationEvents.LOSS_COMPLETED) self.network.train() - # `set_to_none` only work from PyTorch 1.7.0 - if not pytorch_after(1, 7): - self.optimizer.zero_grad() - else: - self.optimizer.zero_grad(set_to_none=self.optim_set_to_none) + self.optimizer.zero_grad(set_to_none=self.optim_set_to_none) if self.amp and self.scaler is not None: with torch.cuda.amp.autocast(): @@ -366,11 +362,7 @@ def _iteration( # Train Discriminator d_total_loss = torch.zeros(1) for _ in range(self.d_train_steps): - # `set_to_none` only work from PyTorch 1.7.0 - if not pytorch_after(1, 7): - self.d_optimizer.zero_grad() - else: - self.d_optimizer.zero_grad(set_to_none=self.optim_set_to_none) + self.d_optimizer.zero_grad(set_to_none=self.optim_set_to_none) dloss = self.d_loss_function(g_output, d_input) dloss.backward() self.d_optimizer.step() @@ -385,10 +377,7 @@ def _iteration( non_blocking=engine.non_blocking, # type: ignore ) g_output = self.g_inferer(g_input, self.g_network) - if not pytorch_after(1, 7): - self.g_optimizer.zero_grad() - else: - self.g_optimizer.zero_grad(set_to_none=self.optim_set_to_none) + self.g_optimizer.zero_grad(set_to_none=self.optim_set_to_none) g_loss = self.g_loss_function(g_output) g_loss.backward() self.g_optimizer.step() diff --git a/monai/networks/layers/simplelayers.py b/monai/networks/layers/simplelayers.py index 7a0a45cb64..3de4e75766 100644 --- a/monai/networks/layers/simplelayers.py +++ b/monai/networks/layers/simplelayers.py @@ -20,19 +20,11 @@ from monai.networks.layers.convutils import gaussian_1d from monai.networks.layers.factories import Conv -from monai.utils import ( - ChannelMatching, - InvalidPyTorchVersionError, - SkipMode, - look_up_option, - optional_import, - pytorch_after, -) +from monai.utils import ChannelMatching, SkipMode, look_up_option, optional_import, pytorch_after from monai.utils.misc import issequenceiterable _C, _ = optional_import("monai._C") -if pytorch_after(1, 7): - fft, _ = optional_import("torch.fft") +fft, _ = optional_import("torch.fft") __all__ = [ "ChannelPad", @@ -377,7 +369,6 @@ def _make_coeffs(window_length, order): class HilbertTransform(nn.Module): """ Determine the analytical signal of a Tensor along a particular axis. - Requires PyTorch 1.7.0+ and the PyTorch FFT module (which is not included in NVIDIA PyTorch Release 20.10). Args: axis: Axis along which to apply Hilbert transform. Default 2 (first spatial dimension). @@ -386,9 +377,6 @@ class HilbertTransform(nn.Module): def __init__(self, axis: int = 2, n: Union[int, None] = None) -> None: - if not pytorch_after(1, 7): - raise InvalidPyTorchVersionError("1.7.0", self.__class__.__name__) - super().__init__() self.axis = axis self.n = n diff --git a/monai/networks/utils.py b/monai/networks/utils.py index a6b0699107..b8c986268a 100644 --- a/monai/networks/utils.py +++ b/monai/networks/utils.py @@ -502,7 +502,6 @@ def convert_to_torchscript( filename_or_obj: if not None, specify a file-like object (has to implement write and flush) or a string containing a file path name to save the TorchScript model. extra_files: map from filename to contents which will be stored as part of the save model file. - works for PyTorch 1.7 or later. for more details: https://pytorch.org/docs/stable/generated/torch.jit.save.html. verify: whether to verify the input and output of TorchScript model. if `filename_or_obj` is not None, load the saved TorchScript model and verify. @@ -519,10 +518,7 @@ def convert_to_torchscript( with torch.no_grad(): script_module = torch.jit.script(model, **kwargs) if filename_or_obj is not None: - if not pytorch_after(1, 7): - torch.jit.save(m=script_module, f=filename_or_obj) - else: - torch.jit.save(m=script_module, f=filename_or_obj, _extra_files=extra_files) + torch.jit.save(m=script_module, f=filename_or_obj, _extra_files=extra_files) if verify: if device is None: diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index da46b105e1..06b8cfa108 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -30,14 +30,12 @@ from monai.transforms.utils import Fourier, equalize_hist, is_positive, rescale_array from monai.transforms.utils_pytorch_numpy_unification import clip, percentile, where from monai.utils import ( - InvalidPyTorchVersionError, convert_data_type, convert_to_dst_type, ensure_tuple, ensure_tuple_rep, ensure_tuple_size, fall_back_tuple, - pytorch_after, ) from monai.utils.deprecate_utils import deprecated_arg from monai.utils.enums import TransformBackends @@ -1085,7 +1083,6 @@ def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: class DetectEnvelope(Transform): """ Find the envelope of the input data along the requested axis using a Hilbert transform. - Requires PyTorch 1.7.0+ and the PyTorch FFT module (which is not included in NVIDIA PyTorch Release 20.10). Args: axis: Axis along which to detect the envelope. Default 1, i.e. the first spatial dimension. @@ -1098,9 +1095,6 @@ class DetectEnvelope(Transform): def __init__(self, axis: int = 1, n: Union[int, None] = None) -> None: - if not pytorch_after(1, 7): - raise InvalidPyTorchVersionError("1.7.0", self.__class__.__name__) - if axis < 0: raise ValueError("axis must be zero or positive.") diff --git a/monai/transforms/utils_pytorch_numpy_unification.py b/monai/transforms/utils_pytorch_numpy_unification.py index 2103ccff58..2aedc77dd7 100644 --- a/monai/transforms/utils_pytorch_numpy_unification.py +++ b/monai/transforms/utils_pytorch_numpy_unification.py @@ -15,7 +15,7 @@ import torch from monai.config.type_definitions import NdarrayOrTensor, NdarrayTensor -from monai.utils.misc import ensure_tuple, is_module_ver_at_least +from monai.utils.misc import is_module_ver_at_least from monai.utils.type_conversion import convert_data_type, convert_to_dst_type __all__ = [ @@ -54,31 +54,12 @@ def allclose(a: NdarrayTensor, b: NdarrayOrTensor, rtol=1e-5, atol=1e-8, equal_n def moveaxis(x: NdarrayOrTensor, src: Union[int, Sequence[int]], dst: Union[int, Sequence[int]]) -> NdarrayOrTensor: - """`moveaxis` for pytorch and numpy, using `permute` for pytorch version < 1.7""" + """`moveaxis` for pytorch and numpy""" if isinstance(x, torch.Tensor): - if hasattr(torch, "movedim"): # `movedim` is new in torch 1.7.0 - # torch.moveaxis is a recent alias since torch 1.8.0 - return torch.movedim(x, src, dst) # type: ignore - return _moveaxis_with_permute(x, src, dst) + return torch.movedim(x, src, dst) # type: ignore return np.moveaxis(x, src, dst) -def _moveaxis_with_permute( - x: torch.Tensor, src: Union[int, Sequence[int]], dst: Union[int, Sequence[int]] -) -> torch.Tensor: - # get original indices - indices = list(range(x.ndim)) - len_indices = len(indices) - for s, d in zip(ensure_tuple(src), ensure_tuple(dst)): - # make src and dst positive - # remove desired index and insert it in new position - pos_s = len_indices + s if s < 0 else s - pos_d = len_indices + d if d < 0 else d - indices.pop(pos_s) - indices.insert(pos_d, pos_s) - return x.permute(indices) - - def in1d(x, y): """`np.in1d` with equivalent implementation for torch.""" if isinstance(x, np.ndarray): @@ -101,10 +82,7 @@ def percentile( ) -> Union[NdarrayOrTensor, float, int]: """`np.percentile` with equivalent implementation for torch. - Pytorch uses `quantile`, but this functionality is only available from v1.7. - For earlier methods, we calculate it ourselves. This doesn't do interpolation, - so is the equivalent of ``numpy.percentile(..., interpolation="nearest")``. - For more details, please refer to: + Pytorch uses `quantile`. For more details please refer to: https://pytorch.org/docs/stable/generated/torch.quantile.html. https://numpy.org/doc/stable/reference/generated/numpy.percentile.html. @@ -112,7 +90,7 @@ def percentile( x: input data q: percentile to compute (should in range 0 <= q <= 100) dim: the dim along which the percentiles are computed. default is to compute the percentile - along a flattened version of the array. only work for numpy array or Tensor with PyTorch >= 1.7.0. + along a flattened version of the array. keepdim: whether the output data has dim retained or not. kwargs: if `x` is numpy array, additional args for `np.percentile`, more details: https://numpy.org/doc/stable/reference/generated/numpy.percentile.html. @@ -130,18 +108,7 @@ def percentile( result = np.percentile(x, q, axis=dim, keepdims=keepdim, **kwargs) else: q = torch.tensor(q, device=x.device) - if hasattr(torch, "quantile"): # `quantile` is new in torch 1.7.0 - result = torch.quantile(x, q / 100.0, dim=dim, keepdim=keepdim) - else: - # Note that ``kthvalue()`` works one-based, i.e., the first sorted value - # corresponds to k=1, not k=0. Thus, we need the `1 +`. - k = 1 + (0.01 * q * (x.numel() - 1)).round().int() - if k.numel() > 1: - r = [x.view(-1).kthvalue(int(_k)).values.item() for _k in k] - result = torch.tensor(r, device=x.device) - else: - result = x.view(-1).kthvalue(int(k)).values.item() - + result = torch.quantile(x, q / 100.0, dim=dim, keepdim=keepdim) return result @@ -277,8 +244,6 @@ def any_np_pt(x: NdarrayOrTensor, axis: Union[int, Sequence[int]]) -> NdarrayOrT def maximum(a: NdarrayOrTensor, b: NdarrayOrTensor) -> NdarrayOrTensor: """`np.maximum` with equivalent implementation for torch. - `torch.maximum` only available from pt>1.6, else use `torch.stack` and `torch.max`. - Args: a: first array/tensor b: second array/tensor @@ -287,10 +252,7 @@ def maximum(a: NdarrayOrTensor, b: NdarrayOrTensor) -> NdarrayOrTensor: Element-wise maximum between two arrays/tensors. """ if isinstance(a, torch.Tensor) and isinstance(b, torch.Tensor): - # is torch and has torch.maximum (pt>1.6) - if hasattr(torch, "maximum"): # `maximum` is new in torch 1.7.0 - return torch.maximum(a, b) - return torch.stack((a, b)).max(dim=0)[0] + return torch.maximum(a, b) return np.maximum(a, b) diff --git a/pyproject.toml b/pyproject.toml index 03e9f49ab5..eea4ebf9b1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ requires = [ "wheel", "setuptools", - "torch>=1.6", + "torch>=1.7", "ninja", ] diff --git a/requirements.txt b/requirements.txt index e4ea34b5d4..14eb2b30e9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,2 @@ -torch>=1.6 +torch>=1.7 numpy>=1.17 diff --git a/setup.cfg b/setup.cfg index a7d597d6bd..12f974ca6d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -24,7 +24,7 @@ setup_requires = torch ninja install_requires = - torch>=1.6 + torch>=1.7 numpy>=1.17 [options.extras_require] diff --git a/tests/test_torchscript_utils.py b/tests/test_torchscript_utils.py index d6bea09ed6..cdf2f19eb3 100644 --- a/tests/test_torchscript_utils.py +++ b/tests/test_torchscript_utils.py @@ -18,7 +18,6 @@ from monai.config import get_config_values from monai.data import load_net_with_metadata, save_net_with_metadata from monai.utils import JITMetadataKeys -from monai.utils.module import pytorch_after class TestModule(torch.nn.Module): @@ -102,10 +101,7 @@ def test_save_load_more_extra_files(self): _, _, loaded_extra_files = load_net_with_metadata(f"{tempdir}/test.ts", more_extra_files=("test.txt",)) - if pytorch_after(1, 7): - self.assertEqual(more_extra_files["test.txt"], loaded_extra_files["test.txt"]) - else: - self.assertEqual(more_extra_files["test.txt"].decode(), loaded_extra_files["test.txt"]) + self.assertEqual(more_extra_files["test.txt"], loaded_extra_files["test.txt"]) if __name__ == "__main__": diff --git a/tests/test_utils_pytorch_numpy_unification.py b/tests/test_utils_pytorch_numpy_unification.py index b13378debe..6adf7093bf 100644 --- a/tests/test_utils_pytorch_numpy_unification.py +++ b/tests/test_utils_pytorch_numpy_unification.py @@ -12,7 +12,6 @@ import unittest import numpy as np -import torch from parameterized import parameterized from monai.transforms.utils_pytorch_numpy_unification import mode, percentile @@ -37,10 +36,7 @@ def test_percentile(self): for p in TEST_NDARRAYS: arr = p(np.arange(100 * 101).reshape(1, 100, 101).astype(np.float32)) results.append(percentile(arr, q)) - # pre torch 1.7, no `quantile`. Our own method doesn't interpolate, - # so we can only be accurate to 0.5 - atol = 0.5 if not hasattr(torch, "quantile") else 1e-4 - assert_allclose(results[0], results[-1], type_test=False, atol=atol) + assert_allclose(results[0], results[-1], type_test=False, atol=1e-4) def test_fails(self): for p in TEST_NDARRAYS: @@ -56,10 +52,7 @@ def test_dim(self): for p in TEST_NDARRAYS: arr = p(np.arange(6).reshape(1, 2, 3).astype(np.float32)) results.append(percentile(arr, q, dim=1)) - # pre torch 1.7, no `quantile`. Our own method doesn't interpolate, - # so we can only be accurate to 0.5 - atol = 0.5 if not hasattr(torch, "quantile") else 1e-4 - assert_allclose(results[0], results[-1], type_test=False, atol=atol) + assert_allclose(results[0], results[-1], type_test=False, atol=1e-4) @parameterized.expand(TEST_MODE) def test_mode(self, array, expected, to_long):