diff --git a/.pyup.yml b/.pyup.yml
index d844b78b..4c53f50a 100644
--- a/.pyup.yml
+++ b/.pyup.yml
@@ -7,5 +7,8 @@ label_prs: update
assignees: sfarrens
requirements:
- requirements.txt
+ pin: False
- develop.txt
+ pin: False
- docs/requirements.txt
+ pin: True
diff --git a/MANIFEST.in b/MANIFEST.in
index 74db0634..9a2f374e 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -3,4 +3,3 @@ include develop.txt
include docs/requirements.txt
include README.rst
include LICENSE.txt
-include docs/source/modopt_logo.png
diff --git a/README.md b/README.md
index 0f7501f0..acb316ad 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
# ModOpt
-
+
| Usage | Development | Release |
| ----- | ----------- | ------- |
diff --git a/develop.txt b/develop.txt
index b88396e7..3f809fc2 100644
--- a/develop.txt
+++ b/develop.txt
@@ -1,8 +1,9 @@
-coverage==5.5
-nose==1.3.7
-pytest==6.2.2
-pytest-cov==2.11.1
-pytest-pep8==1.0.6
-pytest-emoji==0.2.0
-pytest-flake8==1.0.7
-wemake-python-styleguide==0.15.2
+coverage>=5.5
+flake8<4
+nose>=1.3.7
+pytest>=6.2.2
+pytest-cov>=2.11.1
+pytest-pep8>=1.0.6
+pytest-emoji>=0.2.0
+pytest-flake8>=1.0.7
+wemake-python-styleguide>=0.15.2
diff --git a/docs/requirements.txt b/docs/requirements.txt
index e1c873a2..9b196b1f 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -1,7 +1,8 @@
jupyter==1.0.0
-nbsphinx==0.8.2
+myst-parser==0.16.0
+nbsphinx==0.8.7
nbsphinx-link==1.3.0
numpydoc==1.1.0
-sphinx==3.5.2
-sphinxcontrib-bibtex==2.2.0
-sphinxawesome-theme==1.19.2
+sphinx==4.3.1
+sphinxcontrib-bibtex==2.4.1
+sphinxawesome-theme==3.2.1
diff --git a/docs/source/about.rst b/docs/source/about.rst
index 0d412b5d..00b2dbe2 100644
--- a/docs/source/about.rst
+++ b/docs/source/about.rst
@@ -15,6 +15,8 @@ Contributors
You can find a |link-to-contributors|.
+|CS_LOGO| |NS_LOGO|
+
.. |link-to-cosmic| raw:: html
COSMIC
@@ -38,11 +40,12 @@ You can find a |link-to-contributors|.
list of ModOpt contributors here
-
-.. image:: cosmostat_logo.jpg
- :width: 300
+.. |CS_LOGO| image:: cosmostat_logo.jpg
+ :width: 45%
:alt: CosmoStat Logo
+ :target: http://www.cosmostat.org/
-.. image:: neurospin_logo.png
- :width: 300
+.. |NS_LOGO| image:: neurospin_logo.png
+ :width: 45%
:alt: NeuroSpin Logo
+ :target: https://joliot.cea.fr/drf/joliot/en/Pages/research_entities/NeuroSpin.aspx
diff --git a/docs/source/conf.py b/docs/source/conf.py
index 95aaba20..fb954f6d 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -41,6 +41,7 @@
'sphinx.ext.viewcode',
'sphinxawesome_theme',
'sphinxcontrib.bibtex',
+ 'myst_parser',
'nbsphinx',
'nbsphinx_link',
'numpydoc',
@@ -95,12 +96,25 @@
html_theme_options = {
"nav_include_hidden": True,
"show_nav": True,
- "show_breadcrumbs": False,
- "breadcrumbs_separator": "/"
+ "show_breadcrumbs": True,
+ "breadcrumbs_separator": "/",
+ "show_prev_next": True,
+ "show_scrolltop": True,
+
}
html_collapsible_definitions = True
-
-
+html_awesome_headerlinks = True
+html_logo = 'modopt_logo.jpg'
+html_permalinks_icon = (
+ ''
+)
# The name for this set of Sphinx documents. If None, it defaults to
# " v documentation".
html_title = '{0} v{1}'.format(project, version)
@@ -216,12 +230,25 @@ def add_notebooks(nb_path='../../notebooks'):
'python': ('http://docs.python.org/3', None),
'numpy': ('https://numpy.org/doc/stable/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
+ 'progressbar': ('https://progressbar-2.readthedocs.io/en/latest/', None),
'matplotlib': ('https://matplotlib.org', None),
'astropy': ('http://docs.astropy.org/en/latest/', None),
'cupy': ('https://docs-cupy.chainer.org/en/stable/', None),
'torch': ('https://pytorch.org/docs/stable/', None),
+ 'sklearn': (
+ 'http://scikit-learn.org/stable',
+ (None, './_intersphinx/sklearn-objects.inv')
+ ),
+ 'tensorflow': (
+ 'https://www.tensorflow.org/api_docs/python',
+ (
+ 'https://github.com/GPflow/tensorflow-intersphinx/'
+ + 'raw/master/tf2_py_objects.inv')
+ )
+
}
# -- BibTeX Setting ----------------------------------------------
bibtex_bibfiles = ['refs.bib', 'my_ref.bib']
+bibtex_default_style = 'alpha'
diff --git a/docs/source/contributing.rst b/docs/source/contributing.rst
index f983d030..d2bac8e2 100644
--- a/docs/source/contributing.rst
+++ b/docs/source/contributing.rst
@@ -1,9 +1,16 @@
Contributing
============
-Read our `Contribution Guidelines `_
+Read our |link-to-contrib|.
for details on how to contribute to the development of this package.
-All contributors are kindly asked to adhere to the
-`Code of Conduct `_
+All contributors are kindly asked to adhere to the |link-to-conduct|
at all times to ensure a safe and inclusive environment for everyone.
+
+.. |link-to-contrib| raw:: html
+
+ Contribution Guidelines
+
+.. |link-to-conduct| raw:: html
+
+ Code of Conduct
diff --git a/docs/source/cosmostat_logo.jpg b/docs/source/cosmostat_logo.jpg
index 4945c468..8bf52ad5 100644
Binary files a/docs/source/cosmostat_logo.jpg and b/docs/source/cosmostat_logo.jpg differ
diff --git a/docs/source/dependencies.rst b/docs/source/dependencies.rst
index 2a513158..024b1fe5 100644
--- a/docs/source/dependencies.rst
+++ b/docs/source/dependencies.rst
@@ -11,11 +11,11 @@ Required Packages
In order to use ModOpt the following packages must be installed:
-* |link-to-python| ``[> 3.6]``
-* |link-to-metadata| ``[==3.7.0]``
-* |link-to-numpy| ``[==1.19.5]``
-* |link-to-scipy| ``[==1.5.4]``
-* |link-to-progressbar| ``[==3.53.1]``
+* |link-to-python| ``[>= 3.6]``
+* |link-to-metadata| ``[>=3.7.0]``
+* |link-to-numpy| ``[>=1.19.5]``
+* |link-to-scipy| ``[>=1.5.4]``
+* |link-to-progressbar| ``[>=3.53.1]``
.. |link-to-python| raw:: html
diff --git a/docs/source/index.rst b/docs/source/index.rst
index 238aa5b6..0eb6878f 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -7,13 +7,15 @@ ModOpt Documentation
======================
.. image:: modopt_logo.png
+ :width: 100%
+ :alt: ModOpt logo
.. Include table of contents
.. include:: toc.rst
:Author: Samuel Farrens `(samuel.farrens@cea.fr) `_
-:Version: 1.5.1
-:Release Date: 22/04/2021
+:Version: 1.6.0
+:Release Date: 17/12/2021
:Repository: |link-to-repo|
.. |link-to-repo| raw:: html
diff --git a/docs/source/neurospin_logo.png b/docs/source/neurospin_logo.png
index 669deb0d..4efb9cab 100644
Binary files a/docs/source/neurospin_logo.png and b/docs/source/neurospin_logo.png differ
diff --git a/docs/source/refs.bib b/docs/source/refs.bib
index cac8159e..d8365e71 100644
--- a/docs/source/refs.bib
+++ b/docs/source/refs.bib
@@ -65,6 +65,22 @@ @article{condat2013
doi = {10.1007/s10957-012-0245-9}
}
+@ARTICLE{defazio2014,
+ author = {{Defazio}, Aaron and {Bach}, Francis and {Lacoste-Julien}, Simon},
+ title = "{SAGA: A Fast Incremental Gradient Method With Support for Non-Strongly Convex Composite Objectives}",
+ journal = {arXiv e-prints},
+ keywords = {Computer Science - Machine Learning, Mathematics - Optimization and Control, Statistics - Machine Learning},
+ year = 2014,
+ month = jul,
+ eid = {arXiv:1407.0202},
+ pages = {arXiv:1407.0202},
+archivePrefix = {arXiv},
+ eprint = {1407.0202},
+ primaryClass = {cs.LG},
+ adsurl = {https://ui.adsabs.harvard.edu/abs/2014arXiv1407.0202D},
+ adsnote = {Provided by the SAO/NASA Astrophysics Data System}
+}
+
@ARTICLE{figueiredo2014,
author = {Figueiredo, Mario A.~T. and Nowak, Robert D.},
title = {Sparse Estimation with Strongly Correlated Variables using Ordered Weighted L1 Regularization},
@@ -145,6 +161,22 @@ @ARTICLE{raguet2011
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
}
+@ARTICLE{ruder2017,
+ author = {{Ruder}, Sebastian},
+ title = "{An overview of gradient descent optimization algorithms}",
+ journal = {arXiv e-prints},
+ keywords = {Computer Science - Machine Learning},
+ year = 2016,
+ month = sep,
+ eid = {arXiv:1609.04747},
+ pages = {arXiv:1609.04747},
+archivePrefix = {arXiv},
+ eprint = {1609.04747},
+ primaryClass = {cs.LG},
+ adsurl = {https://ui.adsabs.harvard.edu/abs/2016arXiv160904747R},
+ adsnote = {Provided by the SAO/NASA Astrophysics Data System}
+}
+
@book{starck2010,
place={Cambridge},
title={Sparse Image and Signal Processing: Wavelets, Curvelets, Morphological Diversity},
diff --git a/docs/source/z_ref.rst b/docs/source/z_ref.rst
index e7e816e6..f2153d73 100644
--- a/docs/source/z_ref.rst
+++ b/docs/source/z_ref.rst
@@ -2,4 +2,3 @@ References
==========
.. bibliography:: refs.bib
- :style: alpha
diff --git a/modopt/base/backend.py b/modopt/base/backend.py
index 5fbe912f..1f4e9a72 100644
--- a/modopt/base/backend.py
+++ b/modopt/base/backend.py
@@ -49,26 +49,26 @@
def get_backend(backend):
"""Get backend.
- Returns the backend module for input specified by string
+ Returns the backend module for input specified by string.
Parameters
----------
backend: str
- String holding the backend name. One of `tensorflow`,
- `numpy` or `cupy`.
+ String holding the backend name. One of ``'tensorflow'``,
+ ``'numpy'`` or ``'cupy'``.
Returns
-------
tuple
Returns the module for carrying out calculations and the actual backend
that was reverted towards. If the right libraries are not installed,
- the function warns and reverts to `numpy` backend
+ the function warns and reverts to the ``'numpy'`` backend.
"""
if backend not in LIBRARIES.keys() or LIBRARIES[backend] is None:
msg = (
'{0} backend not possible, please ensure that '
+ 'the optional libraries are installed.\n'
- + 'Reverting to numpy'
+ + 'Reverting to numpy.'
)
warn(msg.format(backend))
backend = 'numpy'
@@ -83,7 +83,7 @@ def get_array_module(input_data):
Parameters
----------
- input_data : numpy.ndarray or cupy.ndarray
+ input_data : numpy.ndarray, cupy.ndarray or tf.experimental.numpy.ndarray
Input data array
Returns
@@ -104,16 +104,16 @@ def get_array_module(input_data):
def change_backend(input_data, backend='cupy'):
"""Move data to device.
- This method changes the backend of an array
- This can be used to copy data to GPU or to CPU
+ This method changes the backend of an array. This can be used to copy data
+ to GPU or to CPU.
Parameters
----------
- input_data : numpy.ndarray or cupy.ndarray
+ input_data : numpy.ndarray, cupy.ndarray or tf.experimental.numpy.ndarray
Input data array to be moved
backend: str, optional
- The backend to use, one among `tensorflow`, `cupy` and
- `numpy`. Default is `cupy`.
+ The backend to use, one among ``'tensorflow'``, ``'cupy'`` and
+ ``'numpy'``. Default is ``'cupy'``.
Returns
-------
@@ -131,12 +131,12 @@ def change_backend(input_data, backend='cupy'):
def move_to_cpu(input_data):
"""Move data to CPU.
- This method moves data from GPU to CPU.
- It returns the same data if it is already on CPU.
+ This method moves data from GPU to CPU. It returns the same data if it is
+ already on CPU.
Parameters
----------
- input_data : cupy.ndarray
+ input_data : cupy.ndarray or tf.experimental.numpy.ndarray
Input data array to be moved
Returns
diff --git a/modopt/base/np_adjust.py b/modopt/base/np_adjust.py
index 3df8b411..6d290e43 100644
--- a/modopt/base/np_adjust.py
+++ b/modopt/base/np_adjust.py
@@ -144,6 +144,7 @@ def pad2d(input_data, padding):
See Also
--------
numpy.pad : base function
+
"""
input_data = np.array(input_data)
@@ -169,7 +170,7 @@ def pad2d(input_data, padding):
def ftr(input_data):
"""Fancy transpose right.
- Apply fancy_transpose() to data with roll=1.
+ Apply ``fancy_transpose`` to data with ``roll=1``.
Parameters
----------
@@ -192,7 +193,7 @@ def ftr(input_data):
def ftl(input_data):
"""Fancy transpose left.
- Apply fancy_transpose() to data with roll=-1.
+ Apply ``fancy_transpose`` to data with ``roll=-1``.
Parameters
----------
diff --git a/modopt/base/observable.py b/modopt/base/observable.py
index 581d3f7c..6471ba58 100644
--- a/modopt/base/observable.py
+++ b/modopt/base/observable.py
@@ -85,7 +85,7 @@ def notify_observers(self, signal, **kwargs):
----------
signal : str
A valid signal
- kwargs : dict
+ **kwargs : dict
The parameters that will be sent to the observers
Returns
@@ -191,9 +191,9 @@ class MetricObserver(object):
Metric function with this precise signature func(test, ref)
mapping : dict
Define the mapping between the iterate variable and the metric
- keyword: {'x_new':'name_var_1', 'y_new':'name_var_2'}. To cancel
+ keyword: ``{'x_new':'name_var_1', 'y_new':'name_var_2'}``. To cancel
the need of a variable, the dict value should be None:
- 'y_new':None.
+ ``'y_new': None``.
cst_kwargs : dict
Keywords arguments of constant argument for the metric computation
early_stopping : bool
diff --git a/modopt/base/transform.py b/modopt/base/transform.py
index 44bcfa58..07ce846f 100644
--- a/modopt/base/transform.py
+++ b/modopt/base/transform.py
@@ -15,7 +15,7 @@ def cube2map(data_cube, layout):
"""Cube to Map.
This method transforms the input data from a 3D cube to a 2D map with a
- specified layout
+ specified layout.
Parameters
----------
@@ -73,7 +73,7 @@ def map2cube(data_map, layout):
"""Map to cube.
This method transforms the input data from a 2D map with given layout to
- a 3D cube
+ a 3D cube.
Parameters
----------
@@ -137,7 +137,7 @@ def map2cube(data_map, layout):
def map2matrix(data_map, layout):
"""Map to Matrix.
- This method transforms a 2D map to a 2D matrix
+ This method transforms a 2D map to a 2D matrix.
Parameters
----------
@@ -197,7 +197,7 @@ def map2matrix(data_map, layout):
def matrix2map(data_matrix, map_shape):
"""Matrix to Map.
- This method transforms a 2D matrix to a 2D map
+ This method transforms a 2D matrix to a 2D map.
Parameters
----------
@@ -256,7 +256,7 @@ def matrix2map(data_matrix, map_shape):
def cube2matrix(data_cube):
"""Cube to Matrix.
- This method transforms a 3D cube to a 2D matrix
+ This method transforms a 3D cube to a 2D matrix.
Parameters
----------
@@ -292,7 +292,7 @@ def cube2matrix(data_cube):
def matrix2cube(data_matrix, im_shape):
"""Matrix to Cube.
- This method transforms a 2D matrix to a 3D cube
+ This method transforms a 2D matrix to a 3D cube.
Parameters
----------
diff --git a/modopt/base/types.py b/modopt/base/types.py
index f0eff33f..88051675 100644
--- a/modopt/base/types.py
+++ b/modopt/base/types.py
@@ -19,7 +19,7 @@ def check_callable(input_obj, add_agrs=True):
This method checks if the input operator is a callable funciton and
optionally adds support for arguments and keyword arguments if not already
- provided
+ provided.
Parameters
----------
@@ -31,7 +31,7 @@ def check_callable(input_obj, add_agrs=True):
Returns
-------
function
- Function wrapped by `add_args_kwargs`
+ Function wrapped by ``add_args_kwargs``
Raises
------
diff --git a/modopt/base/wrappers.py b/modopt/base/wrappers.py
index c6d4dbf4..baedb891 100644
--- a/modopt/base/wrappers.py
+++ b/modopt/base/wrappers.py
@@ -2,7 +2,7 @@
"""WRAPPERS.
-This module contains wrappers for adding additional features to functions
+This module contains wrappers for adding additional features to functions.
:Author: Samuel Farrens
@@ -13,7 +13,7 @@
def add_args_kwargs(func):
- """Add Args and Kwargs.
+ """Add args and kwargs.
This wrapper adds support for additional arguments and keyword arguments to
any callable function.
@@ -25,7 +25,7 @@ def add_args_kwargs(func):
Returns
-------
- function
+ callable
wrapper
"""
diff --git a/modopt/math/convolve.py b/modopt/math/convolve.py
index 7074e44a..a4322ff2 100644
--- a/modopt/math/convolve.py
+++ b/modopt/math/convolve.py
@@ -34,7 +34,7 @@ def convolve(input_data, kernel, method='scipy'):
"""Convolve data with kernel.
This method convolves the input data with a given kernel using FFT and
- is the default convolution used for all routines
+ is the default convolution used for all routines.
Parameters
----------
@@ -43,7 +43,7 @@ def convolve(input_data, kernel, method='scipy'):
kernel : numpy.ndarray
Input kernel array, normally a 2D kernel
method : {'scipy', 'astropy'}, optional
- Convolution method (default is 'scipy')
+ Convolution method (default is ``'scipy'``)
Returns
-------
@@ -106,7 +106,7 @@ def convolve_stack(input_data, kernel, rot_kernel=False, method='scipy'):
"""Convolve stack of data with stack of kernels.
This method convolves the input data with a given kernel using FFT and
- is the default convolution used for all routines
+ is the default convolution used for all routines.
Parameters
----------
@@ -117,7 +117,7 @@ def convolve_stack(input_data, kernel, rot_kernel=False, method='scipy'):
rot_kernel : bool
Option to rotate kernels by 180 degrees (default is ``False``)
method : {'astropy', 'scipy'}, optional
- Convolution method (default is 'scipy')
+ Convolution method (default is ``'scipy'``)
Returns
-------
diff --git a/modopt/math/matrix.py b/modopt/math/matrix.py
index be737f52..939cf41f 100644
--- a/modopt/math/matrix.py
+++ b/modopt/math/matrix.py
@@ -16,7 +16,7 @@
def gram_schmidt(matrix, return_opt='orthonormal'):
- """Gram-Schmit.
+ r"""Gram-Schmit.
This method orthonormalizes the row vectors of the input matrix.
@@ -25,12 +25,14 @@ def gram_schmidt(matrix, return_opt='orthonormal'):
matrix : numpy.ndarray
Input matrix array
return_opt : {'orthonormal', 'orthogonal', 'both'}
- Option to return u, e or both, (default is 'orthonormal')
+ Option to return :math:`\mathbf{u}`, :math:`\mathbf{e}` or both
+ (default is ``'orthonormal'``)
Returns
-------
tuple or numpy.ndarray
- Orthogonal vectors, u, and/or orthonormal vectors, e
+ Orthogonal vectors, :math:`\mathbf{u}`, and/or orthonormal vectors,
+ :math:`\mathbf{e}`
Raises
------
@@ -124,7 +126,8 @@ def nuclear_norm(input_data):
def project(u_vec, v_vec):
r"""Project vector.
- This method projects vector v onto vector u.
+ This method projects vector :math:`\mathbf{v}` onto vector
+ :math:`\mathbf{u}`.
Parameters
----------
@@ -259,11 +262,11 @@ class PowerMethod(object):
"""Power method class.
This method performs implements power method to calculate the spectral
- radius of the input data
+ radius of the input data.
Parameters
----------
- operator : function
+ operator : callable
Operator function
data_shape : tuple
Shape of the data array
@@ -313,9 +316,10 @@ def __init__(
self.get_spec_rad()
def _set_initial_x(self):
- """Set initial value of x.
+ """Set initial value of :math:`x`.
- This method sets the initial value of x to an arrray of random values
+ This method sets the initial value of :math:`x` to an arrray of random
+ values.
Returns
-------
diff --git a/modopt/math/stats.py b/modopt/math/stats.py
index 858e0ade..3ac818a7 100644
--- a/modopt/math/stats.py
+++ b/modopt/math/stats.py
@@ -21,7 +21,7 @@
def gaussian_kernel(data_shape, sigma, norm='max'):
"""Gaussian kernel.
- This method produces a Gaussian kerenal of a specified size and dispersion
+ This method produces a Gaussian kerenal of a specified size and dispersion.
Parameters
----------
@@ -30,8 +30,8 @@ def gaussian_kernel(data_shape, sigma, norm='max'):
sigma : float
Standard deviation of the kernel
norm : {'max', 'sum', 'none'}, optional
- Normalisation of the kerenl (options are 'max', 'sum' or 'none',
- default is 'max')
+ Normalisation of the kerenl (options are ``'max'``, ``'sum'`` or
+ ``'none'``, default is ``'max'``)
Returns
-------
@@ -150,8 +150,8 @@ def mse(data1, data2):
def psnr(data1, data2, method='starck', max_pix=255):
r"""Peak Signal-to-Noise Ratio.
- This method calculates the Peak Signal-to-Noise Ratio between an two data
- sets
+ This method calculates the Peak Signal-to-Noise Ratio between two data
+ sets.
Parameters
----------
@@ -160,7 +160,7 @@ def psnr(data1, data2, method='starck', max_pix=255):
data2 : numpy.ndarray
Second data set
method : {'starck', 'wiki'}, optional
- PSNR implementation (default is 'starck')
+ PSNR implementation (default is ``'starck'``)
max_pix : int, optional
Maximum number of pixels (default is ``255``)
@@ -187,11 +187,11 @@ def psnr(data1, data2, method='starck', max_pix=255):
Notes
-----
- 'starck':
+ ``'starck'``:
Implements eq.3.7 from :cite:`starck2010`
- 'wiki':
+ ``'wiki'``:
Implements PSNR equation on
https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
@@ -235,7 +235,7 @@ def psnr_stack(data1, data2, metric=np.mean, method='starck'):
The desired metric to be applied to the PSNR values (default is
``numpy.mean``)
method : {'starck', 'wiki'}, optional
- PSNR implementation (default is 'starck')
+ PSNR implementation (default is ``'starck'``)
Returns
-------
diff --git a/modopt/opt/algorithms/__init__.py b/modopt/opt/algorithms/__init__.py
new file mode 100644
index 00000000..e0ac2572
--- /dev/null
+++ b/modopt/opt/algorithms/__init__.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+r"""OPTIMISATION ALGOTITHMS.
+
+This module contains class implementations of various optimisation algoritms.
+
+:Authors:
+
+* Samuel Farrens ,
+* Zaccharie Ramzi ,
+* Pierre-Antoine Comby
+
+:Notes:
+
+Input classes must have the following properties:
+
+* **Gradient Operators**
+
+Must have the following methods:
+
+ * ``get_grad()`` - calculate the gradient
+
+Must have the following variables:
+
+ * ``grad`` - the gradient
+
+* **Linear Operators**
+
+Must have the following methods:
+
+ * ``op()`` - operator
+ * ``adj_op()`` - adjoint operator
+
+* **Proximity Operators**
+
+Must have the following methods:
+
+ * ``op()`` - operator
+
+The following notation is used to implement the algorithms:
+
+ * ``x_old`` is used in place of :math:`x_{n}`.
+ * ``x_new`` is used in place of :math:`x_{n+1}`.
+ * ``x_prox`` is used in place of :math:`\tilde{x}_{n+1}`.
+ * ``x_temp`` is used for intermediate operations.
+
+"""
+
+from modopt.opt.algorithms.base import SetUp
+from modopt.opt.algorithms.forward_backward import (FISTA, POGM,
+ ForwardBackward,
+ GenForwardBackward)
+from modopt.opt.algorithms.gradient_descent import (AdaGenericGradOpt,
+ ADAMGradOpt,
+ GenericGradOpt,
+ MomentumGradOpt,
+ RMSpropGradOpt,
+ SAGAOptGradOpt,
+ VanillaGenericGradOpt)
+from modopt.opt.algorithms.primal_dual import Condat
diff --git a/modopt/opt/algorithms/base.py b/modopt/opt/algorithms/base.py
new file mode 100644
index 00000000..85c36306
--- /dev/null
+++ b/modopt/opt/algorithms/base.py
@@ -0,0 +1,297 @@
+# -*- coding: utf-8 -*-
+"""Base SetUp for optimisation algorithms."""
+
+from inspect import getmro
+
+import numpy as np
+from progressbar import ProgressBar
+
+from modopt.base import backend
+from modopt.base.observable import MetricObserver, Observable
+from modopt.interface.errors import warn
+
+
+class SetUp(Observable):
+ r"""Algorithm Set-Up.
+
+ This class contains methods for checking the set-up of an optimisation
+ algotithm and produces warnings if they do not comply.
+
+ Parameters
+ ----------
+ metric_call_period : int, optional
+ Metric call period (default is ``5``)
+ metrics : dict, optional
+ Metrics to be used (default is ``\{\}``)
+ verbose : bool, optional
+ Option for verbose output (default is ``False``)
+ progress : bool, optional
+ Option to display progress bar (default is ``True``)
+ step_size : int, optional
+ Generic step size parameter to override default algorithm
+ parameter name (`e.g.` `step_size` will override the value set for
+ `beta_param` in `ForwardBackward`)
+ use_gpu : bool, optional
+ Option to use available GPU
+
+ See Also
+ --------
+ modopt.base.observable.Observable : parent class
+ modopt.base.observable.MetricObserver : definition of metrics
+
+ """
+
+ def __init__(
+ self,
+ metric_call_period=5,
+ metrics=None,
+ verbose=False,
+ progress=True,
+ step_size=None,
+ compute_backend='numpy',
+ **dummy_kwargs,
+ ):
+ self.idx = 0
+ self.converge = False
+ self.verbose = verbose
+ self.progress = progress
+ self.metrics = metrics
+ self.step_size = step_size
+ self._op_parents = (
+ 'GradParent',
+ 'ProximityParent',
+ 'LinearParent',
+ 'costObj',
+ )
+
+ self.metric_call_period = metric_call_period
+
+ # Declaration of observers for metrics
+ super().__init__(['cv_metrics'])
+
+ for name, dic in self.metrics.items():
+ observer = MetricObserver(
+ name,
+ dic['metric'],
+ dic['mapping'],
+ dic['cst_kwargs'],
+ dic['early_stopping'],
+ )
+ self.add_observer('cv_metrics', observer)
+
+ xp, compute_backend = backend.get_backend(compute_backend)
+ self.xp = xp
+ self.compute_backend = compute_backend
+
+ @property
+ def metrics(self):
+ """Set metrics dictionary."""
+ return self._metrics
+
+ @metrics.setter
+ def metrics(self, metrics):
+
+ if isinstance(metrics, type(None)):
+ self._metrics = {}
+ elif isinstance(metrics, dict):
+ self._metrics = metrics
+ else:
+ raise TypeError(
+ 'Metrics must be a dictionary, not {0}.'.format(type(metrics)),
+ )
+
+ def any_convergence_flag(self):
+ """Check convergence flag.
+
+ Retur True if any matrix values matched the convergence criteria.
+
+ Returns
+ -------
+ bool
+ True if any convergence criteria met
+
+ """
+ return any(
+ obs.converge_flag for obs in self._observers['cv_metrics']
+ )
+
+ def copy_data(self, input_data):
+ """Copy Data.
+
+ Set directive for copying data.
+
+ Parameters
+ ----------
+ input_data : numpy.ndarray
+ Input data
+
+ Returns
+ -------
+ numpy.ndarray
+ Copy of input data
+
+ """
+ return self.xp.copy(backend.change_backend(
+ input_data,
+ self.compute_backend,
+ ))
+
+ def _check_input_data(self, input_data):
+ """Check input data type.
+
+ This method checks if the input data is a numpy array
+
+ Parameters
+ ----------
+ input_data : numpy.ndarray
+ Input data array
+
+ Raises
+ ------
+ TypeError
+ For invalid input type
+
+ """
+ if not (isinstance(input_data, (self.xp.ndarray, np.ndarray))):
+ raise TypeError(
+ 'Input data must be a numpy array or backend array',
+ )
+
+ def _check_param(self, param_val):
+ """Check algorithm parameters.
+
+ This method checks if the specified algorithm parameters are floats
+
+ Parameters
+ ----------
+ param_val : float
+ Parameter value
+
+ Raises
+ ------
+ TypeError
+ For invalid input type
+
+ """
+ if not isinstance(param_val, float):
+ raise TypeError('Algorithm parameter must be a float value.')
+
+ def _check_param_update(self, param_update):
+ """Check algorithm parameter update methods.
+
+ This method checks if the specified algorithm parameters are floats
+
+ Parameters
+ ----------
+ param_update : callable
+ Callable function
+
+ Raises
+ ------
+ TypeError
+ For invalid input type
+
+ """
+ param_conditions = (
+ not isinstance(param_update, type(None))
+ and not callable(param_update)
+ )
+
+ if param_conditions:
+ raise TypeError(
+ 'Algorithm parameter update must be a callabale function.',
+ )
+
+ def _check_operator(self, operator):
+ """Check set-Up.
+
+ This method checks algorithm operator against the expected parent
+ classes
+
+ Parameters
+ ----------
+ operator : str
+ Algorithm operator to check
+
+ """
+ if not isinstance(operator, type(None)):
+ tree = [op_obj.__name__ for op_obj in getmro(operator.__class__)]
+
+ if not any(parent in tree for parent in self._op_parents):
+ message = '{0} does not inherit an operator parent.'
+ warn(message.format(str(operator.__class__)))
+
+ def _compute_metrics(self):
+ """Compute metrics during iteration.
+
+ This method create the args necessary for metrics computation, then
+ call the observers to compute metrics
+
+ """
+ kwargs = self.get_notify_observers_kwargs()
+ self.notify_observers('cv_metrics', **kwargs)
+
+ def _iterations(self, max_iter, progbar=None):
+ """Iterate method.
+
+ Iterate the update step of the given algorithm.
+
+ Parameters
+ ----------
+ max_iter : int
+ Maximum number of iterations
+ progbar : progressbar.bar.ProgressBar
+ Progress bar (default is ``None``)
+
+ """
+ for idx in range(max_iter):
+ self.idx = idx
+ self._update()
+
+ # Calling metrics every metric_call_period cycle
+ # Also calculate at the end (max_iter or at convergence)
+ # We do not call metrics if metrics is empty or metric call
+ # period is None
+ if self.metrics and self.metric_call_period is not None:
+
+ metric_conditions = (
+ self.idx % self.metric_call_period == 0
+ or self.idx == (max_iter - 1)
+ or self.converge,
+ )
+
+ if metric_conditions:
+ self._compute_metrics()
+
+ if self.converge:
+ if self.verbose:
+ print(' - Converged!')
+ break
+
+ if not isinstance(progbar, type(None)):
+ progbar.update(idx)
+
+ def _run_alg(self, max_iter):
+ """Run algorithm.
+
+ Run the update step of a given algorithm up to the maximum number of
+ iterations.
+
+ Parameters
+ ----------
+ max_iter : int
+ Maximum number of iterations
+
+ See Also
+ --------
+ progressbar.bar.ProgressBar
+
+ """
+ if self.progress:
+ with ProgressBar(
+ redirect_stdout=True,
+ max_value=max_iter,
+ ) as progbar:
+ self._iterations(max_iter, progbar=progbar)
+ else:
+ self._iterations(max_iter)
diff --git a/modopt/opt/algorithms.py b/modopt/opt/algorithms/forward_backward.py
similarity index 52%
rename from modopt/opt/algorithms.py
rename to modopt/opt/algorithms/forward_backward.py
index 125ac84c..e18f66c3 100644
--- a/modopt/opt/algorithms.py
+++ b/modopt/opt/algorithms/forward_backward.py
@@ -1,376 +1,53 @@
# -*- coding: utf-8 -*-
-
-r"""OPTIMISATION ALGOTITHMS.
-
-This module contains class implementations of various optimisation algoritms.
-
-:Authors: Samuel Farrens ,
- Zaccharie Ramzi
-
-:Notes:
-
-Input classes must have the following properties:
-
- * **Gradient Operators**
-
- Must have the following methods:
-
- * ``get_grad()`` - calculate the gradient
-
- Must have the following variables:
-
- * ``grad`` - the gradient
-
- * **Linear Operators**
-
- Must have the following methods:
-
- * ``op()`` - operator
- * ``adj_op()`` - adjoint operator
-
- * **Proximity Operators**
-
- Must have the following methods:
-
- * ``op()`` - operator
-
-The following notation is used to implement the algorithms:
-
- * x_old is used in place of :math:`x_{n}`.
- * x_new is used in place of :math:`x_{n+1}`.
- * x_prox is used in place of :math:`\tilde{x}_{n+1}`.
- * x_temp is used for intermediate operations.
-
-"""
-
-from inspect import getmro
+"""Forward-Backward Algorithms."""
import numpy as np
-from progressbar import ProgressBar
from modopt.base import backend
-from modopt.base.observable import MetricObserver, Observable
-from modopt.interface.errors import warn
+from modopt.opt.algorithms.base import SetUp
from modopt.opt.cost import costObj
from modopt.opt.linear import Identity
-class SetUp(Observable):
- r"""Algorithm Set-Up.
-
- This class contains methods for checking the set-up of an optimisation
- algotithm and produces warnings if they do not comply.
-
- Parameters
- ----------
- metric_call_period : int, optional
- Metric call period (default is ``5``)
- metrics : dict, optional
- Metrics to be used (default is ``\{\}``)
- verbose : bool, optional
- Option for verbose output (default is ``False``)
- progress : bool, optional
- Option to display progress bar (default is ``True``)
- step_size : int, optional
- Generic step size parameter to override default algorithm
- parameter name (`e.g.` `step_size` will override the value set for
- `beta_param` in `ForwardBackward`)
- use_gpu : bool, optional
- Option to use available GPU
-
- """
-
- def __init__(
- self,
- metric_call_period=5,
- metrics=None,
- verbose=False,
- progress=True,
- step_size=None,
- compute_backend='numpy',
- **dummy_kwargs,
- ):
-
- self.converge = False
- self.verbose = verbose
- self.progress = progress
- self.metrics = metrics
- self.step_size = step_size
- self._op_parents = (
- 'GradParent',
- 'ProximityParent',
- 'LinearParent',
- 'costObj',
- )
-
- self.metric_call_period = metric_call_period
-
- # Declaration of observers for metrics
- super().__init__(['cv_metrics'])
-
- for name, dic in self.metrics.items():
- observer = MetricObserver(
- name,
- dic['metric'],
- dic['mapping'],
- dic['cst_kwargs'],
- dic['early_stopping'],
- )
- self.add_observer('cv_metrics', observer)
-
- xp, compute_backend = backend.get_backend(compute_backend)
- self.xp = xp
- self.compute_backend = compute_backend
-
- @property
- def metrics(self):
- """Metrics."""
- return self._metrics
-
- @metrics.setter
- def metrics(self, metrics):
-
- if isinstance(metrics, type(None)):
- self._metrics = {}
- elif isinstance(metrics, dict):
- self._metrics = metrics
- else:
- raise TypeError(
- 'Metrics must be a dictionary, not {0}.'.format(type(metrics)),
- )
-
- def any_convergence_flag(self):
- """Check convergence flag.
-
- Return if any matrices values matched the convergence criteria.
-
- Returns
- -------
- bool
- True if any convergence criteria met
-
- """
- return any(
- obs.converge_flag for obs in self._observers['cv_metrics']
- )
-
- def copy_data(self, input_data):
- """Copy Data.
-
- Set directive for copying data.
-
- Parameters
- ----------
- input_data : numpy.ndarray
- Input data
-
- Returns
- -------
- numpy.ndarray
- Copy of input data
-
- """
- return self.xp.copy(backend.change_backend(
- input_data,
- self.compute_backend,
- ))
-
- def _check_input_data(self, input_data):
- """Check input data type.
-
- This method checks if the input data is a numpy array
-
- Parameters
- ----------
- input_data : numpy.ndarray
- Input data array
-
- Raises
- ------
- TypeError
- For invalid input type
-
- """
- if not (isinstance(input_data, (self.xp.ndarray, np.ndarray))):
- raise TypeError(
- 'Input data must be a numpy array or backend array',
- )
-
- def _check_param(self, param_val):
- """Check algorithm parameters.
-
- This method checks if the specified algorithm parameters are floats
-
- Parameters
- ----------
- param_val : float
- Parameter value
-
- Raises
- ------
- TypeError
- For invalid input type
-
- """
- if not isinstance(param_val, float):
- raise TypeError('Algorithm parameter must be a float value.')
-
- def _check_param_update(self, param_update):
- """Check algorithm parameter update methods.
-
- This method checks if the specified algorithm parameters are floats
-
- Parameters
- ----------
- param_update : function
- Callable function
-
- Raises
- ------
- TypeError
- For invalid input type
-
- """
- param_conditions = (
- not isinstance(param_update, type(None))
- and not callable(param_update)
- )
-
- if param_conditions:
- raise TypeError(
- 'Algorithm parameter update must be a callabale function.',
- )
-
- def _check_operator(self, operator):
- """Check set-Up.
-
- This method checks algorithm operator against the expected parent
- classes
-
- Parameters
- ----------
- operator : str
- Algorithm operator to check
-
- """
- if not isinstance(operator, type(None)):
- tree = [op_obj.__name__ for op_obj in getmro(operator.__class__)]
-
- if not any(parent in tree for parent in self._op_parents):
- message = '{0} does not inherit an operator parent.'
- warn(message.format(str(operator.__class__)))
-
- def _compute_metrics(self):
- """Compute metrics during iteration.
-
- This method create the args necessary for metrics computation, then
- call the observers to compute metrics
-
- """
- kwargs = self.get_notify_observers_kwargs()
- self.notify_observers('cv_metrics', **kwargs)
-
- def _iterations(self, max_iter, progbar=None):
- """Iterate method.
-
- Iterate the update step of the given algorithm.
-
- Parameters
- ----------
- max_iter : int
- Maximum number of iterations
- progbar : progressbar.ProgressBar
- Progress bar (default is ``None``)
-
- """
- for idx in range(max_iter):
- self.idx = idx
- self._update()
-
- # Calling metrics every metric_call_period cycle
- # Also calculate at the end (max_iter or at convergence)
- # We do not call metrics if metrics is empty or metric call
- # period is None
- if self.metrics and self.metric_call_period is not None:
-
- metric_conditions = (
- self.idx % self.metric_call_period == 0
- or self.idx == (max_iter - 1)
- or self.converge,
- )
-
- if metric_conditions:
- self._compute_metrics()
-
- if self.converge:
- if self.verbose:
- print(' - Converged!')
- break
-
- if not isinstance(progbar, type(None)):
- progbar.update(idx)
-
- def _run_alg(self, max_iter):
- """Run algorithm.
-
- Run the update step of a given algorithm up to the maximum number of
- iterations.
-
- Parameters
- ----------
- max_iter : int
- Maximum number of iterations
-
- """
- if self.progress:
- with ProgressBar(
- redirect_stdout=True,
- max_value=max_iter,
- ) as progbar:
- self._iterations(max_iter, progbar=progbar)
- else:
- self._iterations(max_iter)
-
-
class FISTA(object):
- """FISTA.
+ r"""FISTA.
This class is inherited by optimisation classes to speed up convergence
The parameters for the modified FISTA are as described in :cite:`liang2018`
- (p, q, r)_lazy or in :cite:`chambolle2015` (a_cd).
+ :math:`(p, q, r)`-lazy or in :cite:`chambolle2015` (a_cd).
The restarting strategies are those described in :cite:`liang2018`,
algorithms 4-5.
Parameters
----------
restart_strategy: str or None
- name of the restarting strategy. If None, there is no restarting.
- (Default is ``None``)
+ Name of the restarting strategy, if ``None``, there is no restarting
+ (default is ``None``)
min_beta: float or None
- the minimum beta when using the greedy restarting strategy.
- (Default is ``None``)
+ The minimum :math:`\beta` value when using the greedy restarting
+ strategy (default is ``None``)
s_greedy: float or None
- parameter for the safeguard comparison in the greedy restarting
- strategy. It has to be > 1.
- (Default is ``None``)
+ Parameter for the safeguard comparison in the greedy restarting
+ strategy, it must be > 1
+ (default is ``None``)
xi_restart: float or None
- mutlitplicative parameter for the update of beta in the greedy
+ Mutlitplicative parameter for the update of beta in the greedy
restarting strategy and for the update of r_lazy in the adaptive
- restarting strategies. It has to be > 1.
- (Default is None)
+ restarting strategies, it must be > 1
+ (default is ``None``)
a_cd: float or None
- parameter for the update of lambda in Chambolle-Dossal mode. If None
- the mode of the algorithm is the regular FISTA, else the mode is
- Chambolle-Dossal. It has to be > 2.
+ Parameter for the update of lambda in Chambolle-Dossal mode, if
+ ``None`` the mode of the algorithm is the regular FISTA, else the mode
+ is Chambolle-Dossal, it must be > 2
p_lazy: float
- parameter for the update of lambda in Fista-Mod. It has to be in
- ]0, 1].
+ Parameter for the update of lambda in Fista-Mod, it must satisfy
+ :math:`p \in ]0, 1]`
q_lazy: float
- parameter for the update of lambda in Fista-Mod. It has to be in
- ]0, (2-p)**2].
+ Parameter for the update of lambda in Fista-Mod, it must satisfy
+ :math:`q \in ]0, (2-p)^2]`
r_lazy: float
- parameter for the update of lambda in Fista-Mod. It has to be in
- ]0, 4].
+ Parameter for the update of lambda in Fista-Mod, it must satisfy
+ :math:`r \in ]0, 4]`
"""
@@ -441,7 +118,7 @@ def _check_restart_params(
s_greedy,
xi_restart,
):
- """Check restarting parameters.
+ r"""Check restarting parameters.
This method checks that the restarting parameters are set and satisfy
the correct assumptions. It also checks that the current mode is
@@ -450,25 +127,23 @@ def _check_restart_params(
Parameters
----------
restart_strategy: str or None
- name of the restarting strategy. If None, there is no restarting.
- (Default is ``None``)
+ Name of the restarting strategy, if ``None``, there is no
+ restarting (default is ``None``)
min_beta: float or None
- the minimum beta when using the greedy restarting strategy.
- (Default is ``None``)
+ The minimum :math:`\beta` value when using the greedy restarting
+ strategy (default is ``None``)
s_greedy: float or None
- parameter for the safeguard comparison in the greedy restarting
- strategy. It has to be > 1.
- (Default is ``None``)
+ Parameter for the safeguard comparison in the greedy restarting
+ strategy, it must be > 1 (default is ``None``)
xi_restart: float or None
- mutlitplicative parameter for the update of beta in the greedy
+ Mutlitplicative parameter for the update of beta in the greedy
restarting strategy and for the update of r_lazy in the adaptive
- restarting strategies. It has to be > 1.
- (Default is ``None``)
+ restarting strategies, it must be > 1 (default is ``None``)
Returns
-------
bool
- True
+ ``True``
Raises
------
@@ -500,21 +175,21 @@ def _check_restart_params(
return True
def is_restart(self, z_old, x_new, x_old):
- """Check whether the algorithm needs to restart.
+ r"""Check whether the algorithm needs to restart.
This method implements the checks necessary to tell whether the
algorithm needs to restart depending on the restarting strategy.
It also updates the FISTA parameters according to the restarting
- strategy (namely beta and r).
+ strategy (namely :math:`\beta` and :math:`r`).
Parameters
----------
z_old: numpy.ndarray
- Corresponds to y_n in :cite:`liang2018`.
+ Corresponds to :math:`y_n` in :cite:`liang2018`.
x_new: numpy.ndarray
- Corresponds to x_{n+1} in :cite:`liang2018`.
+ Corresponds to :math:`x_{n+1}`` in :cite:`liang2018`.
x_old: numpy.ndarray
- Corresponds to x_n in :cite:`liang2018`.
+ Corresponds to :math:`x_n` in :cite:`liang2018`.
Returns
-------
@@ -523,8 +198,8 @@ def is_restart(self, z_old, x_new, x_old):
Notes
-----
- Implements restarting and safeguarding steps in alg 4-5 o
- :cite:`liang2018`
+ Implements restarting and safeguarding steps in algorithms 4-5 of
+ :cite:`liang2018`.
"""
xp = backend.get_array_module(x_new)
@@ -550,20 +225,20 @@ def is_restart(self, z_old, x_new, x_old):
return criterion
def update_beta(self, beta):
- """Update beta.
+ r"""Update :math:`\beta`.
- This method updates beta only in the case of safeguarding (should only
- be done in the greedy restarting strategy).
+ This method updates :math:`\beta` only in the case of safeguarding
+ (should only be done in the greedy restarting strategy).
Parameters
----------
beta: float
- The beta parameter
+ The :math:`\beta` parameter
Returns
-------
float
- The new value for the beta parameter
+ The new value for the :math:`\beta` parameter
"""
if self._safeguard:
@@ -573,25 +248,25 @@ def update_beta(self, beta):
return beta
def update_lambda(self, *args, **kwargs):
- """Update lambda.
+ r"""Update :math:`\lambda`.
- This method updates the value of lambda
+ This method updates the value of :math:`\lambda`.
Parameters
----------
- args : interable
+ *args : tuple
Positional arguments
- kwargs : dict
+ **kwargs : dict
Keyword arguments
Returns
-------
float
- Current lambda value
+ Current :math:`\lambda` value
Notes
-----
- Implements steps 3 and 4 from algoritm 10.7 in :cite:`bauschke2009`
+ Implements steps 3 and 4 from algoritm 10.7 in :cite:`bauschke2009`.
"""
if self.restart_strategy == 'greedy':
@@ -612,29 +287,30 @@ def update_lambda(self, *args, **kwargs):
class ForwardBackward(SetUp):
- """Forward-Backward optimisation.
+ r"""Forward-Backward optimisation.
This class implements standard forward-backward optimisation with an the
- option to use the FISTA speed-up
+ option to use the FISTA speed-up.
Parameters
----------
x : numpy.ndarray
Initial guess for the primal variable
- grad : class
- Gradient operator class
- prox : class
- Proximity operator class
- cost : class or str, optional
- Cost function class (default is 'auto'); Use 'auto' to automatically
- generate a costObj instance
+ grad
+ Gradient operator class instance
+ prox
+ Proximity operator class instance
+ cost : class instance or str, optional
+ Cost function class instance (default is ``'auto'``); Use ``'auto'`` to
+ automatically generate a ``costObj`` instance
beta_param : float, optional
- Initial value of the beta parameter (default is ``1.0``)
+ Initial value of the beta parameter, :math:`\beta` (default is ``1.0``)
lambda_param : float, optional
- Initial value of the lambda parameter (default is ```1.0``)
- beta_update : function, optional
+ Initial value of the lambda parameter, :math:`\lambda`
+ (default is ```1.0``)
+ beta_update : callable, optional
Beta parameter update method (default is ``None``)
- lambda_update : function or str, optional
+ lambda_update : callable or str, optional
Lambda parameter update method (default is 'fista')
auto_iterate : bool, optional
Option to automatically begin iterations upon initialisation (default
@@ -642,13 +318,24 @@ class ForwardBackward(SetUp):
Notes
-----
- The `beta_param` can also be set using the keyword `step_size`, which will
- override the value of `beta_param`.
+ The ``beta_param`` can also be set using the keyword ``step_size``, which
+ will override the value of ``beta_param``.
+
+ The following state variable are available for metrics measurememts at
+ each iteration :
+
+ * ``'x_new'`` : new estimate of :math:`x`
+ * ``'z_new'`` : new estimate of :math:`z` (adjoint representation of
+ :math:`x`).
+ * ``'idx'`` : index of the iteration.
See Also
--------
FISTA : complementary class
- SetUp : parent class
+ modopt.opt.algorithms.base.SetUp : parent class
+ modopt.opt.cost.costObj : cost object class
+ modopt.opt.gradient : gradient operator classes
+ modopt.opt.proximity : proximity operator classes
"""
@@ -731,7 +418,7 @@ def _update_param(self):
"""Update parameters.
This method updates the values of the algorthm parameters with the
- methods provided
+ methods provided.
"""
# Update the gamma parameter.
@@ -745,11 +432,11 @@ def _update_param(self):
def _update(self):
"""Update.
- This method updates the current reconstruction
+ This method updates the current reconstruction.
Notes
-----
- Implements algorithm 10.7 (or 10.5) from :cite:`bauschke2009`
+ Implements algorithm 10.7 (or 10.5) from :cite:`bauschke2009`.
"""
# Step 1 from alg.10.7.
@@ -783,8 +470,8 @@ def _update(self):
def iterate(self, max_iter=150):
"""Iterate.
- This method calls update until either convergence criteria is met or
- the maximum number of iterations is reached
+ This method calls update until either the convergence criteria is met
+ or the maximum number of iterations is reached.
Parameters
----------
@@ -820,8 +507,8 @@ def get_notify_observers_kwargs(self):
def retrieve_outputs(self):
"""Retireve outputs.
- Declare the outputs of the algorithms as attributes: x_final,
- y_final, metrics.
+ Declare the outputs of the algorithms as attributes: ``x_final``,
+ ``y_final``, ``metrics``.
"""
metrics = {}
@@ -831,28 +518,30 @@ def retrieve_outputs(self):
class GenForwardBackward(SetUp):
- """Generalized Forward-Backward Algorithm.
+ r"""Generalized Forward-Backward Algorithm.
- This class implements algorithm 1 from :cite:`raguet2011`
+ This class implements algorithm 1 from :cite:`raguet2011`.
Parameters
----------
x : list, tuple or numpy.ndarray
Initial guess for the primal variable
- grad : class instance
+ grad
Gradient operator class
prox_list : list
List of proximity operator class instances
- cost : class or str, optional
- Cost function class (default is 'auto'); Use 'auto' to automatically
- generate a costObj instance
+ cost : class instance or str, optional
+ Cost function class instance (default is ``'auto'``); Use ``'auto'`` to
+ automatically generate a ``costObj`` instance
gamma_param : float, optional
- Initial value of the gamma parameter (default is ``1.0``)
+ Initial value of the gamma parameter, :math:`\gamma`
+ (default is ``1.0``)
lambda_param : float, optional
- Initial value of the lambda parameter (default is ``1.0``)
- gamma_update : function, optional
+ Initial value of the lambda parameter, :math:`\lambda`
+ (default is ``1.0``)
+ gamma_update : callable, optional
Gamma parameter update method (default is ``None``)
- lambda_update : function, optional
+ lambda_update : callable, optional
Lambda parameter parameter update method (default is ``None``)
weights : list, tuple or numpy.ndarray, optional
Proximity operator weights (default is ``None``)
@@ -862,12 +551,23 @@ class GenForwardBackward(SetUp):
Notes
-----
- The `gamma_param` can also be set using the keyword `step_size`, which will
- override the value of `gamma_param`.
+ The ``gamma_param`` can also be set using the keyword ``step_size``, which
+ will override the value of ``gamma_param``.
+
+ The following state variable are available for metrics measurememts at
+ each iteration :
+
+ * ``'x_new'`` : new estimate of :math:`x`
+ * ``'z_new'`` : new estimate of :math:`z` (adjoint representation of
+ :math:`x`).
+ * ``'idx'`` : index of the iteration.
See Also
--------
- SetUp : parent class
+ modopt.opt.algorithms.base.SetUp : parent class
+ modopt.opt.cost.costObj : cost object class
+ modopt.opt.gradient : gradient operator classes
+ modopt.opt.proximity : proximity operator classes
"""
@@ -951,7 +651,7 @@ def __init__(
def _set_weights(self, weights):
"""Set weights.
- This method sets weights on each of the proximty operators provided
+ This method sets weights on each of the proximty operators provided.
Parameters
----------
@@ -999,7 +699,7 @@ def _update_param(self):
"""Update parameters.
This method updates the values of the algorthm parameters with the
- methods provided
+ methods provided.
"""
# Update the gamma parameter.
@@ -1013,11 +713,11 @@ def _update_param(self):
def _update(self):
"""Update.
- This method updates the current reconstruction
+ This method updates the current reconstruction.
Notes
-----
- Implements algorithm 1 from :cite:`raguet2011`
+ Implements algorithm 1 from :cite:`raguet2011`.
"""
# Calculate gradient for current iteration.
@@ -1090,269 +790,8 @@ def get_notify_observers_kwargs(self):
def retrieve_outputs(self):
"""Retrieve outputs.
- Declare the outputs of the algorithms as attributes: x_final,
- y_final, metrics.
-
- """
- metrics = {}
- for obs in self._observers['cv_metrics']:
- metrics[obs.name] = obs.retrieve_metrics()
- self.metrics = metrics
-
-
-class Condat(SetUp):
- """Condat optimisation.
-
- This class implements algorithm 3.1 from :cite:`condat2013`
-
- Parameters
- ----------
- x : numpy.ndarray
- Initial guess for the primal variable
- y : numpy.ndarray
- Initial guess for the dual variable
- grad : class instance
- Gradient operator class
- prox : class instance
- Proximity primal operator class
- prox_dual : class instance
- Proximity dual operator class
- linear : class instance, optional
- Linear operator class (default is ``None``)
- cost : class or str, optional
- Cost function class (default is 'auto'); Use 'auto' to automatically
- generate a costObj instance
- reweight : class instance, optional
- Reweighting class
- rho : float, optional
- Relaxation parameter (default is ``0.5``)
- sigma : float, optional
- Proximal dual parameter (default is ``1.0``)
- tau : float, optional
- Proximal primal paramater (default is ``1.0``)
- rho_update : function, optional
- Relaxation parameter update method (default is ``None``)
- sigma_update : function, optional
- Proximal dual parameter update method (default is ``None``)
- tau_update : function, optional
- Proximal primal parameter update method (default is ``None``)
- auto_iterate : bool, optional
- Option to automatically begin iterations upon initialisation (default
- is ``True``)
- max_iter : int, optional
- Maximum number of iterations (default is ``150``)
- n_rewightings : int, optional
- Number of reweightings to perform (default is ``1``)
-
- Notes
- -----
- The `tau_param` can also be set using the keyword `step_size`, which will
- override the value of `tau_param`.
-
- See Also
- --------
- SetUp : parent class
-
- """
-
- def __init__(
- self,
- x,
- y,
- grad,
- prox,
- prox_dual,
- linear=None,
- cost='auto',
- reweight=None,
- rho=0.5,
- sigma=1.0,
- tau=1.0,
- rho_update=None,
- sigma_update=None,
- tau_update=None,
- auto_iterate=True,
- max_iter=150,
- n_rewightings=1,
- metric_call_period=5,
- metrics=None,
- **kwargs,
- ):
-
- # Set default algorithm properties
- super().__init__(
- metric_call_period=metric_call_period,
- metrics=metrics,
- **kwargs,
- )
-
- # Set the initial variable values
- for input_data in (x, y):
- self._check_input_data(input_data)
-
- self._x_old = self.xp.copy(x)
- self._y_old = self.xp.copy(y)
-
- # Set the algorithm operators
- for operator in (grad, prox, prox_dual, linear, cost):
- self._check_operator(operator)
-
- self._grad = grad
- self._prox = prox
- self._prox_dual = prox_dual
- self._reweight = reweight
- if isinstance(linear, type(None)):
- self._linear = Identity()
- else:
- self._linear = linear
- if cost == 'auto':
- self._cost_func = costObj([
- self._grad,
- self._prox,
- self._prox_dual,
- ])
- else:
- self._cost_func = cost
-
- # Set the algorithm parameters
- for param_val in (rho, sigma, tau):
- self._check_param(param_val)
-
- self._rho = rho
- self._sigma = sigma
- self._tau = self.step_size or tau
-
- # Set the algorithm parameter update methods
- for param_update in (rho_update, sigma_update, tau_update):
- self._check_param_update(param_update)
-
- self._rho_update = rho_update
- self._sigma_update = sigma_update
- self._tau_update = tau_update
-
- # Automatically run the algorithm
- if auto_iterate:
- self.iterate(max_iter=max_iter, n_rewightings=n_rewightings)
-
- def _update_param(self):
- """Update parameters.
-
- This method updates the values of the algorthm parameters with the
- methods provided
-
- """
- # Update relaxation parameter.
- if not isinstance(self._rho_update, type(None)):
- self._rho = self._rho_update(self._rho)
-
- # Update proximal dual parameter.
- if not isinstance(self._sigma_update, type(None)):
- self._sigma = self._sigma_update(self._sigma)
-
- # Update proximal primal parameter.
- if not isinstance(self._tau_update, type(None)):
- self._tau = self._tau_update(self._tau)
-
- def _update(self):
- """Update.
-
- This method updates the current reconstruction
-
- Notes
- -----
- Implements equation 9 (algorithm 3.1) from :cite:`condat2013`
-
- - primal proximity operator set up for positivity constraint
-
- """
- # Step 1 from eq.9.
- self._grad.get_grad(self._x_old)
-
- x_prox = self._prox.op(
- self._x_old - self._tau * self._grad.grad - self._tau
- * self._linear.adj_op(self._y_old),
- )
-
- # Step 2 from eq.9.
- y_temp = (
- self._y_old + self._sigma
- * self._linear.op(2 * x_prox - self._x_old)
- )
-
- y_prox = (
- y_temp - self._sigma
- * self._prox_dual.op(
- y_temp / self._sigma,
- extra_factor=(1.0 / self._sigma),
- )
- )
-
- # Step 3 from eq.9.
- self._x_new = self._rho * x_prox + (1 - self._rho) * self._x_old
- self._y_new = self._rho * y_prox + (1 - self._rho) * self._y_old
-
- del x_prox, y_prox, y_temp
-
- # Update old values for next iteration.
- self.xp.copyto(self._x_old, self._x_new)
- self.xp.copyto(self._y_old, self._y_new)
-
- # Update parameter values for next iteration.
- self._update_param()
-
- # Test cost function for convergence.
- if self._cost_func:
- self.converge = (
- self.any_convergence_flag()
- or self._cost_func.get_cost(self._x_new, self._y_new)
- )
-
- def iterate(self, max_iter=150, n_rewightings=1):
- """Iterate.
-
- This method calls update until either convergence criteria is met or
- the maximum number of iterations is reached
-
- Parameters
- ----------
- max_iter : int, optional
- Maximum number of iterations (default is ``150``)
- n_rewightings : int, optional
- Number of reweightings to perform (default is ``1``)
-
- """
- self._run_alg(max_iter)
-
- if not isinstance(self._reweight, type(None)):
- for _ in range(n_rewightings):
- self._reweight.reweight(self._linear.op(self._x_new))
- self._run_alg(max_iter)
-
- # retrieve metrics results
- self.retrieve_outputs()
- # rename outputs as attributes
- self.x_final = self._x_new
- self.y_final = self._y_new
-
- def get_notify_observers_kwargs(self):
- """Notify observers.
-
- Return the mapping between the metrics call and the iterated
- variables.
-
- Returns
- -------
- notify_observers_kwargs : dict,
- The mapping between the iterated variables
-
- """
- return {'x_new': self._x_new, 'y_new': self._y_new, 'idx': self.idx}
-
- def retrieve_outputs(self):
- """Retrieve outputs.
-
- Declare the outputs of the algorithms as attributes: x_final,
- y_final, metrics.
+ Declare the outputs of the algorithms as attributes: ``x_final``,
+ ``y_final``, ``metrics``.
"""
metrics = {}
@@ -1362,46 +801,63 @@ def retrieve_outputs(self):
class POGM(SetUp):
- """Proximal Optimised Gradient Method.
+ r"""Proximal Optimised Gradient Method.
- This class implements algorithm 3 from :cite:`kim2017`
+ This class implements algorithm 3 from :cite:`kim2017`.
Parameters
----------
u : numpy.ndarray
- Initial guess for the u variable
+ Initial guess for the :math:`u` variable
x : numpy.ndarray
- Initial guess for the x variable (primal)
+ Initial guess for the :math:`x` variable (primal)
y : numpy.ndarray
- Initial guess for the y variable
+ Initial guess for the :math:`y` variable
z : numpy.ndarray
- Initial guess for the z variable
- grad : class
+ Initial guess for the :math:`z` variable
+ grad
Gradient operator class
- prox : class
+ prox
Proximity operator class
- cost : class or str, optional
- Cost function class (default is 'auto'); Use 'auto' to automatically
- generate a costObj instance
+ cost : class instance or str, optional
+ Cost function class instance (default is ``'auto'``); Use ``'auto'`` to
+ automatically generate a ``costObj`` instance
linear : class instance, optional
- Linear operator class (default is ``None``)
+ Linear operator class instance (default is ``None``)
beta_param : float, optional
- Initial value of the beta parameter (default is ``1.0``).
+ Initial value of the beta parameter, :math:`\beta` (default is ``1.0``)
This corresponds to (1 / L) in :cite:`kim2017`
sigma_bar : float, optional
- Value of the shrinking parameter sigma bar (default is ``1.0``)
+ Value of the shrinking parameter, :math:`\bar{\sigma}`
+ (default is ``1.0``)
auto_iterate : bool, optional
Option to automatically begin iterations upon initialisation (default
is ``True``)
Notes
-----
- The `beta_param` can also be set using the keyword `step_size`, which will
- override the value of `beta_param`.
+ The ``beta_param`` can also be set using the keyword ``step_size``, which
+ will override the value of ``beta_param``.
+
+ The following state variable are available for metrics measurememts at
+ each iterations:
+
+ * ``'u_new'`` : new estimate of :math:`u`
+ * ``'x_new'`` : new estimate of :math:`x`
+ * ``'y_new'`` : new estimate of :math:`y`
+ * ``'z_new'`` : new estimate of :math:`z`
+ * ``'xi'``: :math:`\xi` variable
+ * ``'t'`` : new estimate of :math:`t`
+ * ``'sigma'``: :math:`\sigma` variable
+ * ``'idx'`` : index of the iteration.
See Also
--------
- SetUp : parent class
+ modopt.opt.algorithms.base.SetUp : parent class
+ modopt.opt.cost.costObj : cost object class
+ modopt.opt.gradient : gradient operator classes
+ modopt.opt.proximity : proximity operator classes
+ modopt.opt.linear : linear operator classes
"""
@@ -1477,11 +933,11 @@ def __init__(
def _update(self):
"""Update.
- This method updates the current reconstruction
+ This method updates the current reconstruction.
Notes
-----
- Implements algorithm 3 from :cite:`kim2017`
+ Implements algorithm 3 from :cite:`kim2017`.
"""
# Step 4 from alg. 3
@@ -1584,8 +1040,8 @@ def get_notify_observers_kwargs(self):
def retrieve_outputs(self):
"""Retrieve outputs.
- Declare the outputs of the algorithms as attributes: x_final,
- y_final, metrics.
+ Declare the outputs of the algorithms as attributes: ``x_final``,
+ ``y_final``, ``metrics``.
"""
metrics = {}
diff --git a/modopt/opt/algorithms/gradient_descent.py b/modopt/opt/algorithms/gradient_descent.py
new file mode 100644
index 00000000..f3fe4b10
--- /dev/null
+++ b/modopt/opt/algorithms/gradient_descent.py
@@ -0,0 +1,466 @@
+# -*- coding: utf-8 -*-
+"""Gradient Descent Algorithms."""
+
+import numpy as np
+
+from modopt.opt.algorithms.base import SetUp
+from modopt.opt.cost import costObj
+
+
+class GenericGradOpt(SetUp):
+ r"""Generic Gradient descent operator.
+
+ Performs the descent algorithm in the direction :math:`m_k` at speed
+ :math:`s_k`.
+
+
+ Parameters
+ ----------
+ x: numpy.ndarray
+ Initial value
+ grad
+ Gradient operator class instance
+ prox
+ Proximity operator class instance
+ cost : class instance or str, optional
+ Cost function class instance (default is ``'auto'``); Use ``'auto'`` to
+ automatically generate a ``costObj`` instance
+ eta: float
+ Descent step, :math:`\eta` (default is ``1.0``)
+ eta_update: callable
+ If not ``None``, used to update :math:`\eta` at each step
+ (default is ``None``)
+ epsilon: float
+ Numerical stability constant for the gradient, :math:`\epsilon`
+ (default is ``1e-6``)
+ epoch_size: int
+ Size of epoch for the descent (default is ``1``)
+ metric_call_period: int
+ The period of iteration on which metrics will be computed
+ (default is ``5``)
+ metrics: dict
+ If not None, specify which metrics to use (default is ``None``)
+
+ Notes
+ -----
+ The Gradient descent step is defined as:
+
+ .. math:: x_{k+1} = x_k - \frac{\eta}{\sqrt{s_k + \epsilon}} m_k
+
+ where:
+
+ * :math:`m_k` is the gradient direction
+ * :math:`\eta` is the gradient descent step
+ * :math:`s_k` is the gradient "speed"
+
+ At each Epoch, an optional Proximal step can be performed.
+
+ The following state variable are available for metrics measurememts:
+
+ * ``'x_new'`` : new estimate of the iterations
+ * ``'dir_grad'`` : direction of the gradient descent step
+ * ``'speed_grad'`` : speed for the gradient descent step
+ * ``'idx'`` : index of the iteration being reconstructed.
+
+ See Also
+ --------
+ modopt.opt.algorithms.base.SetUp : parent class
+ modopt.opt.cost.costObj : cost object class
+
+ """
+
+ def __init__(
+ self,
+ x,
+ grad,
+ prox,
+ cost,
+ eta=1.0,
+ eta_update=None,
+ epsilon=1e-6,
+ epoch_size=1,
+ metric_call_period=5,
+ metrics=None,
+ **kwargs,
+ ):
+ # Set the initial variable values
+ if metrics is None:
+ metrics = {}
+ # Set default algorithm properties
+ super().__init__(
+ metric_call_period=metric_call_period,
+ metrics=metrics,
+ **kwargs,
+ )
+ self.iter = 0
+ self._check_input_data(x)
+ self._x_old = np.copy(x)
+ self._x_new = np.copy(x)
+ self._speed_grad = np.zeros(x.shape, dtype=float)
+ self._dir_grad = np.zeros_like(x)
+ # Set the algorithm operators
+ for operator in (grad, prox, cost):
+ self._check_operator(operator)
+ self._grad = grad
+ self._prox = prox
+ if cost == 'auto':
+ self._cost_func = costObj([self._grad, self._prox])
+ else:
+ self._cost_func = cost
+ # Set the algorithm parameters
+ for param_val in (eta, epsilon):
+ self._check_param(param_val)
+ self._eta = eta
+ self._eps = epsilon
+
+ # Set the algorithm parameter update methods
+ self._check_param_update(eta_update)
+ self._eta_update = eta_update
+ self.idx = 0
+ self.epoch_size = epoch_size
+
+ def iterate(self, max_iter=150):
+ """Iterate.
+
+ This method calls update until either convergence criteria is met or
+ the maximum number of iterations is reached.
+
+ Parameters
+ ----------
+ max_iter : int, optional
+ Maximum number of iterations (default is ``150``)
+
+ """
+ self._run_alg(max_iter)
+
+ # retrieve metrics results
+ self.retrieve_outputs()
+
+ self.x_final = self._x_new
+
+ def _update(self):
+ """Update.
+
+ This method updates the current reconstruction.
+
+ """
+ self._grad.get_grad(self._x_old)
+ self._update_grad_dir(self._grad.grad)
+ self._update_grad_speed(self._grad.grad)
+ step = self._eta / (np.sqrt(self._speed_grad) + self._eps)
+ self._x_new = self._x_old - step * self._dir_grad
+ if self.idx % self.epoch_size == 0:
+ self.reset()
+ self._update_reg(step)
+ self._x_old = self._x_new.copy()
+ if self._eta_update is not None:
+ self._eta = self._eta_update(self._eta, self.idx)
+ # Test cost function for convergence.
+ if self._cost_func:
+ self.converge = (
+ self.any_convergence_flag()
+ or self._cost_func.get_cost(self._x_new)
+ )
+
+ def _update_grad_dir(self, grad):
+ """Update the gradient descent direction.
+
+ Parameters
+ ----------
+ grad: numpy.ndarray
+ The gradient direction
+
+ """
+ self._dir_grad = grad
+
+ def _update_grad_speed(self, grad):
+ """Update the gradient descent speed.
+
+ Parameters
+ ----------
+ grad: numpy.ndarray
+ The gradient direction
+
+ """
+ pass
+
+ def _update_reg(self, factor):
+ """Regularisation step.
+
+ Parameters
+ ----------
+ factor: float or numpy.ndarray
+ Extra factor for the proximal step
+
+ """
+ self._x_new = self._prox.op(self._x_new, extra_factor=factor)
+
+ def get_notify_observers_kwargs(self):
+ """Notify observers.
+
+ Return the mapping between the metrics call and the iterated
+ variables.
+
+ Returns
+ -------
+ dict
+ The mapping between the iterated variables
+
+ """
+ return {
+ 'x_new': self._x_new,
+ 'dir_grad': self._dir_grad,
+ 'speed_grad': self._speed_grad,
+ 'idx': self.idx,
+ }
+
+ def retrieve_outputs(self):
+ """Retrieve outputs.
+
+ Declare the outputs of the algorithms as attributes: x_final,
+ y_final, metrics.
+
+ """
+ metrics = {}
+ for obs in self._observers['cv_metrics']:
+ metrics[obs.name] = obs.retrieve_metrics()
+ self.metrics = metrics
+
+ def reset(self):
+ """Reset internal state of the algorithm."""
+ pass
+
+
+class VanillaGenericGradOpt(GenericGradOpt):
+ """Vanilla Descent Algorithm.
+
+ Fixed step size and no numerical precision threshold.
+
+ See Also
+ --------
+ GenericGradOpt : parent class
+
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ # no scale factor
+ self._speed_grad = 1.0
+ self._eps = 0
+
+
+class AdaGenericGradOpt(GenericGradOpt):
+ r"""Generic Grad descent Algorithm with ADA acceleration scheme.
+
+ Notes
+ -----
+ For AdaGrad (Section 4.2 of :cite:`ruder2017`) the gradient is
+ preconditioned using a speed update:
+
+ .. math:: s_k = \sum_{i=0}^k g_k * g_k
+
+
+ See Also
+ --------
+ GenericGradOpt : parent class
+
+ """
+
+ def _update_grad_speed(self, grad):
+ """Ada Acceleration Scheme.
+
+ Parameters
+ ----------
+ grad: numpy.ndarray
+ The new gradient for updating the speed
+
+ """
+ self._speed_grad += abs(grad) ** 2
+
+
+class RMSpropGradOpt(GenericGradOpt):
+ r"""RMSprop Gradient descent algorithm.
+
+ Parameters
+ ----------
+ gamma: float
+ Update weight for the speed of descent, :math:`\gamma`
+ (default is ``0.5``)
+
+ Raises
+ ------
+ ValueError
+ If :math:`\gamma` is outside :math:`]0,1[`
+
+ Notes
+ -----
+ The gradient speed of RMSProp (Section 4.5 of :cite:`ruder2017`) is
+ defined as:
+
+ .. math:: s_k = \gamma s_{k-1} + (1-\gamma) * |\nabla f|^2
+
+ See Also
+ --------
+ GenericGradOpt : parent class
+
+ """
+
+ def __init__(self, *args, gamma=0.5, **kwargs):
+ super().__init__(*args, **kwargs)
+ if gamma < 0 or gamma > 1:
+ raise ValueError('gamma is outside of range [0,1]')
+ self._check_param(gamma)
+ self._gamma = gamma
+
+ def _update_grad_speed(self, grad):
+ """Rmsprop update speed.
+
+ Parameters
+ ----------
+ grad: numpy.ndarray
+ The new gradient for updating the speed
+
+ """
+ self._speed_grad = (
+ self._gamma * self._speed_grad + (1 - self._gamma) * abs(grad) ** 2
+ )
+
+
+class MomentumGradOpt(GenericGradOpt):
+ r"""Momentum (Heavy-ball) descent algorithm.
+
+ Parameters
+ ----------
+ beta: float
+ update weight for the momentum, :math:`\beta` (default is ``0.9``)
+
+ Notes
+ -----
+ The Momentum (Section 4.1 of :cite:`ruder2017`) update is defined as:
+
+ .. math:: m_k = \beta * m_{k-1} + \nabla f(x_k)
+
+ See Also
+ --------
+ GenericGradOpt : parent class
+
+ """
+
+ def __init__(self, *args, beta=0.9, **kwargs):
+ super().__init__(*args, **kwargs)
+ self._check_param(beta)
+ self._beta = beta
+ # no scale factor
+ self._speed_grad = 1.0
+ self._eps = 0
+
+ def _update_grad_dir(self, grad):
+ """Momentum gradient direction update.
+
+ Parameters
+ ----------
+ grad: numpy.ndarray
+ The new gradient for updating the speed
+
+ """
+ self._dir_grad = self._beta * self._dir_grad + grad
+
+ def reset(self):
+ """Reset the gradient direction."""
+ self._dir_grad = np.zeros_like(self._x_new)
+
+
+class ADAMGradOpt(GenericGradOpt):
+ r"""ADAM optimizer.
+
+ Parameters
+ ----------
+ gamma: float
+ Update weight, :math:`\gamma`, for the direction in :math:`]0,1[`
+ beta: float
+ Update weight, :math:`\beta`, for the speed in :math:`]0,1[`
+
+ Raises
+ ------
+ ValueError
+ If gamma or beta is outside :math:`]0,1[`
+
+ Notes
+ -----
+ The ADAM optimizer (Section 4.6 of :cite:`ruder2017`) is defined as:
+
+ .. math::
+ m_{k+1} = \frac{1}{1-\beta^k}(\beta*m_{k}+(1-\beta)*|\nabla f_k|^2)
+ .. math::
+ s_{k+1} = \frac{1}{1-\gamma^k}(\gamma*s_k+(1-\gamma)*\nabla f_k)
+
+ See Also
+ --------
+ GenericGradOpt : parent class
+
+ """
+
+ def __init__(self, *args, gamma=0.9, beta=0.9, **kwargs):
+ super().__init__(*args, **kwargs)
+ self._check_param(gamma)
+ self._check_param(beta)
+ if gamma < 0 or gamma >= 1:
+ raise ValueError('gamma is outside of range [0,1]')
+ if beta < 0 or beta >= 1:
+ raise ValueError('beta is outside of range [0,1]')
+ self._gamma = gamma
+ self._beta = beta
+ self._beta_pow = 1
+ self._gamma_pow = 1
+
+ def _update_grad_dir(self, grad):
+ """ADAM Update of gradient direction."""
+ self._beta_pow *= self._beta
+
+ self._dir_grad = (1.0 / (1.0 - self._beta_pow)) * (
+ self._beta * self._dir_grad + (1 - self._beta) * grad
+ )
+
+ def _update_grad_speed(self, grad):
+ """ADAM Updatae of gradient speed."""
+ self._gamma_pow *= self._gamma
+ self._speed_grad = (1.0 / (1.0 - self._gamma_pow)) * (
+ self._gamma * self._speed_grad + (1 - self._gamma) * abs(grad) ** 2
+ )
+
+
+class SAGAOptGradOpt(GenericGradOpt):
+ """SAGA optimizer.
+
+ Implements equation 7 of :cite:`defazio2014`.
+
+ Notes
+ -----
+ The stochastic part is not handled here, and should be implemented by
+ changing the ``obs_data`` between each call to the ``_update`` function.
+
+ See Also
+ --------
+ GenericGradOpt : parent class
+
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self._grad_memory = np.zeros(
+ (self.epoch_size, *self._x_old.shape),
+ dtype=self._x_old.dtype,
+ )
+
+ def _update_grad_dir(self, grad):
+ """SAGA Update gradient direction.
+
+ Parameters
+ ----------
+ grad: numpy.ndarray
+ The new gradient for updating the speed
+
+ """
+ cycle = self.idx % self.epoch_size
+ self._dir_grad = self._dir_grad - self._grad_memory[cycle] + grad
+ self._grad_memory[cycle] = grad
diff --git a/modopt/opt/algorithms/primal_dual.py b/modopt/opt/algorithms/primal_dual.py
new file mode 100644
index 00000000..c8566969
--- /dev/null
+++ b/modopt/opt/algorithms/primal_dual.py
@@ -0,0 +1,279 @@
+# -*- coding: utf-8 -*-
+"""Primal-Dual Algorithms."""
+
+from modopt.opt.algorithms.base import SetUp
+from modopt.opt.cost import costObj
+from modopt.opt.linear import Identity
+
+
+class Condat(SetUp):
+ r"""Condat optimisation.
+
+ This class implements algorithm 3.1 from :cite:`condat2013`.
+
+ Parameters
+ ----------
+ x : numpy.ndarray
+ Initial guess for the primal variable
+ y : numpy.ndarray
+ Initial guess for the dual variable
+ grad
+ Gradient operator class instance
+ prox
+ Proximity primal operator class instance
+ prox_dual
+ Proximity dual operator class instance
+ linear : class instance, optional
+ Linear operator class instance (default is ``None``)
+ cost : class instance or str, optional
+ Cost function class instance (default is ``'auto'``); Use ``'auto'`` to
+ automatically generate a ``costObj`` instance
+ reweight : class instance, optional
+ Reweighting class instance
+ rho : float, optional
+ Relaxation parameter, :math:`\rho` (default is ``0.5``)
+ sigma : float, optional
+ Proximal dual parameter, :math:`\sigma` (default is ``1.0``)
+ tau : float, optional
+ Proximal primal paramater, :math:`\tau` (default is ``1.0``)
+ rho_update : callable, optional
+ Relaxation parameter update method (default is ``None``)
+ sigma_update : callable, optional
+ Proximal dual parameter update method (default is ``None``)
+ tau_update : callable, optional
+ Proximal primal parameter update method (default is ``None``)
+ auto_iterate : bool, optional
+ Option to automatically begin iterations upon initialisation (default
+ is ``True``)
+ max_iter : int, optional
+ Maximum number of iterations (default is ``150``)
+ n_rewightings : int, optional
+ Number of reweightings to perform (default is ``1``)
+
+ Notes
+ -----
+ The ``tau_param`` can also be set using the keyword `step_size`, which will
+ override the value of ``tau_param``.
+
+ The following state variable are available for metrics measurememts at
+ each iteration :
+
+ * ``'x_new'`` : new estimate of :math:`x` (primal variable)
+ * ``'y_new'`` : new estimate of :math:`y` (dual variable)
+ * ``'idx'`` : index of the iteration.
+
+ See Also
+ --------
+ modopt.opt.algorithms.base.SetUp : parent class
+ modopt.opt.cost.costObj : cost object class
+ modopt.opt.gradient : gradient operator classes
+ modopt.opt.proximity : proximity operator classes
+ modopt.opt.linear : linear operator classes
+ modopt.opt.reweight : reweighting classes
+
+ """
+
+ def __init__(
+ self,
+ x,
+ y,
+ grad,
+ prox,
+ prox_dual,
+ linear=None,
+ cost='auto',
+ reweight=None,
+ rho=0.5,
+ sigma=1.0,
+ tau=1.0,
+ rho_update=None,
+ sigma_update=None,
+ tau_update=None,
+ auto_iterate=True,
+ max_iter=150,
+ n_rewightings=1,
+ metric_call_period=5,
+ metrics=None,
+ **kwargs,
+ ):
+
+ # Set default algorithm properties
+ super().__init__(
+ metric_call_period=metric_call_period,
+ metrics=metrics,
+ **kwargs,
+ )
+
+ # Set the initial variable values
+ for input_data in (x, y):
+ self._check_input_data(input_data)
+
+ self._x_old = self.xp.copy(x)
+ self._y_old = self.xp.copy(y)
+
+ # Set the algorithm operators
+ for operator in (grad, prox, prox_dual, linear, cost):
+ self._check_operator(operator)
+
+ self._grad = grad
+ self._prox = prox
+ self._prox_dual = prox_dual
+ self._reweight = reweight
+ if isinstance(linear, type(None)):
+ self._linear = Identity()
+ else:
+ self._linear = linear
+ if cost == 'auto':
+ self._cost_func = costObj([
+ self._grad,
+ self._prox,
+ self._prox_dual,
+ ])
+ else:
+ self._cost_func = cost
+
+ # Set the algorithm parameters
+ for param_val in (rho, sigma, tau):
+ self._check_param(param_val)
+
+ self._rho = rho
+ self._sigma = sigma
+ self._tau = self.step_size or tau
+
+ # Set the algorithm parameter update methods
+ for param_update in (rho_update, sigma_update, tau_update):
+ self._check_param_update(param_update)
+
+ self._rho_update = rho_update
+ self._sigma_update = sigma_update
+ self._tau_update = tau_update
+
+ # Automatically run the algorithm
+ if auto_iterate:
+ self.iterate(max_iter=max_iter, n_rewightings=n_rewightings)
+
+ def _update_param(self):
+ """Update parameters.
+
+ This method updates the values of the algorthm parameters with the
+ methods provided.
+
+ """
+ # Update relaxation parameter.
+ if not isinstance(self._rho_update, type(None)):
+ self._rho = self._rho_update(self._rho)
+
+ # Update proximal dual parameter.
+ if not isinstance(self._sigma_update, type(None)):
+ self._sigma = self._sigma_update(self._sigma)
+
+ # Update proximal primal parameter.
+ if not isinstance(self._tau_update, type(None)):
+ self._tau = self._tau_update(self._tau)
+
+ def _update(self):
+ """Update.
+
+ This method updates the current reconstruction.
+
+ Notes
+ -----
+ Implements equation 9 (algorithm 3.1) from :cite:`condat2013`.
+
+ - Primal proximity operator set up for positivity constraint.
+
+ """
+ # Step 1 from eq.9.
+ self._grad.get_grad(self._x_old)
+
+ x_prox = self._prox.op(
+ self._x_old - self._tau * self._grad.grad - self._tau
+ * self._linear.adj_op(self._y_old),
+ )
+
+ # Step 2 from eq.9.
+ y_temp = (
+ self._y_old + self._sigma
+ * self._linear.op(2 * x_prox - self._x_old)
+ )
+
+ y_prox = (
+ y_temp - self._sigma
+ * self._prox_dual.op(
+ y_temp / self._sigma,
+ extra_factor=(1.0 / self._sigma),
+ )
+ )
+
+ # Step 3 from eq.9.
+ self._x_new = self._rho * x_prox + (1 - self._rho) * self._x_old
+ self._y_new = self._rho * y_prox + (1 - self._rho) * self._y_old
+
+ del x_prox, y_prox, y_temp
+
+ # Update old values for next iteration.
+ self.xp.copyto(self._x_old, self._x_new)
+ self.xp.copyto(self._y_old, self._y_new)
+
+ # Update parameter values for next iteration.
+ self._update_param()
+
+ # Test cost function for convergence.
+ if self._cost_func:
+ self.converge = (
+ self.any_convergence_flag()
+ or self._cost_func.get_cost(self._x_new, self._y_new)
+ )
+
+ def iterate(self, max_iter=150, n_rewightings=1):
+ """Iterate.
+
+ This method calls update until either convergence criteria is met or
+ the maximum number of iterations is reached.
+
+ Parameters
+ ----------
+ max_iter : int, optional
+ Maximum number of iterations (default is ``150``)
+ n_rewightings : int, optional
+ Number of reweightings to perform (default is ``1``)
+
+ """
+ self._run_alg(max_iter)
+
+ if not isinstance(self._reweight, type(None)):
+ for _ in range(n_rewightings):
+ self._reweight.reweight(self._linear.op(self._x_new))
+ self._run_alg(max_iter)
+
+ # retrieve metrics results
+ self.retrieve_outputs()
+ # rename outputs as attributes
+ self.x_final = self._x_new
+ self.y_final = self._y_new
+
+ def get_notify_observers_kwargs(self):
+ """Notify observers.
+
+ Return the mapping between the metrics call and the iterated
+ variables.
+
+ Returns
+ -------
+ notify_observers_kwargs : dict,
+ The mapping between the iterated variables
+
+ """
+ return {'x_new': self._x_new, 'y_new': self._y_new, 'idx': self.idx}
+
+ def retrieve_outputs(self):
+ """Retrieve outputs.
+
+ Declare the outputs of the algorithms as attributes: ``x_final``,
+ ``y_final``, ``metrics``.
+
+ """
+ metrics = {}
+ for obs in self._observers['cv_metrics']:
+ metrics[obs.name] = obs.retrieve_metrics()
+ self.metrics = metrics
diff --git a/modopt/opt/cost.py b/modopt/opt/cost.py
index e5690831..3cdfcc50 100644
--- a/modopt/opt/cost.py
+++ b/modopt/opt/cost.py
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-
"""COST FUNCTIONS.
This module contains classes of different cost functions for optimization.
@@ -18,20 +16,20 @@
class costObj(object):
"""Generic cost function object.
- This class updates the cost according to the input cost functio class and
- tests for convergence
+ This class updates the cost according to the input operator classes and
+ tests for convergence.
Parameters
----------
- costFunc : class
- Class for calculating the cost
+ opertors : list, tuple or numpy.ndarray
+ List of operators classes containing ``cost`` method
initial_cost : float, optional
Initial value of the cost (default is ``1e6``)
tolerance : float, optional
Tolerance threshold for convergence (default is ``1e-4``)
cost_interval : int, optional
Iteration interval to calculate cost (default is ``1``).
- If `cost_interval` is ``None`` the cost is never calculated,
+ If ``cost_interval`` is ``None`` the cost is never calculated,
thereby saving on computation time.
test_range : int, optional
Number of cost values to be used in test (default is ``4``)
@@ -42,7 +40,7 @@ class costObj(object):
Notes
-----
- The costFunc class must contain a method called `calc_cost()`.
+ The costFunc class must contain a method called ``cost``.
Examples
--------
@@ -96,16 +94,16 @@ def __init__(
self._verbose = verbose
def _check_operators(self):
- """Check Operators.
+ """Check operators.
- This method checks if the input operators have a `cost` method
+ This method checks if the input operators have a ``cost`` method.
Raises
------
TypeError
For invalid operators type
ValueError
- For operators without `cost` method
+ For operators without ``cost`` method
"""
if not isinstance(self._operators, (list, tuple, np.ndarray)):
@@ -123,7 +121,8 @@ def _check_cost(self):
"""Check cost function.
This method tests the cost function for convergence in the specified
- interval of iterations using the last n (test_range) cost values
+ interval of iterations using the last :math:`n` (``test_range``) cost
+ values.
Returns
-------
@@ -175,15 +174,15 @@ def _calc_cost(self, *args, **kwargs):
Parameters
----------
- args : interable
+ *args : tuple
Positional arguments
- kwargs : dict
+ **kwargs : dict
Keyword arguments
Returns
-------
float
- Cost
+ Cost value
"""
return np.sum([op.cost(*args, **kwargs) for op in self._operators])
@@ -195,9 +194,9 @@ def get_cost(self, *args, **kwargs):
Parameters
----------
- args : interable
+ *args : tuple
Positional arguments
- kwargs : dict
+ **kwargs : dict
Keyword arguments
Returns
@@ -238,7 +237,7 @@ def get_cost(self, *args, **kwargs):
def plot_cost(self): # pragma: no cover
"""Plot the cost function.
- This method plots the cost function as function of iteration number
+ This method plots the cost function as function of iteration number.
"""
plotCost(self._cost_list, self._plot_output)
diff --git a/modopt/opt/gradient.py b/modopt/opt/gradient.py
index b949a7eb..56004838 100644
--- a/modopt/opt/gradient.py
+++ b/modopt/opt/gradient.py
@@ -18,19 +18,19 @@ class GradParent(object):
"""Gradient Parent Class.
This class defines the basic methods that will be inherited by specific
- gradient classes
+ gradient classes.
Parameters
----------
input_data : numpy.ndarray
The observed data
- op : function
+ op : callable
The operator
- trans_op : function
+ trans_op : callable
The transpose operator
- get_grad : function, optional
+ get_grad : callable, optional
Method for calculating the gradient (default is ``None``)
- cost: function, optional
+ cost: callable, optional
Method for calculating the cost (default is ``None``)
data_type : type, optional
Expected data type of the input data (default is ``None``)
@@ -82,7 +82,16 @@ def __init__(
@property
def obs_data(self):
- """Observed Data."""
+ r"""Observed Data.
+
+ The observed data :math:`\mathbf{y}`.
+
+ Returns
+ -------
+ numpy.ndarray
+ The observed data
+
+ """
return self._obs_data
@obs_data.setter
@@ -101,7 +110,16 @@ def obs_data(self, input_data):
@property
def op(self):
- """Operator."""
+ r"""Operator.
+
+ The operator :math:`\mathbf{H}`.
+
+ Returns
+ -------
+ callable
+ The operator function
+
+ """
return self._op
@op.setter
@@ -111,7 +129,16 @@ def op(self, operator):
@property
def trans_op(self):
- """Transpose operator."""
+ r"""Transpose operator.
+
+ The transpose operator :math:`\mathbf{H}^T`.
+
+ Returns
+ -------
+ callable
+ The transpose operator function
+
+ """
return self._trans_op
@trans_op.setter
@@ -155,7 +182,7 @@ def trans_op_op(self, input_data):
r"""Transpose Operation of the Operator.
This method calculates the action of the transpose operator on
- the action of the operator on the data
+ the action of the operator on the data.
Parameters
----------
@@ -174,6 +201,8 @@ def trans_op_op(self, input_data):
.. math::
\mathbf{H}^T(\mathbf{H}\mathbf{x})
+ where :math:`\mathbf{x}` is the ``input_data``.
+
"""
return self.trans_op(self.op(input_data))
@@ -186,9 +215,9 @@ class GradBasic(GradParent):
Parameters
----------
- args : interable
+ *args : tuple
Positional arguments
- kwargs : dict
+ **kwargs : dict
Keyword arguments
Examples
@@ -218,7 +247,7 @@ def __init__(self, *args, **kwargs):
def _get_grad_method(self, input_data):
r"""Get the gradient.
- This method calculates the gradient step from the input data
+ This method calculates the gradient step from the input data.
Parameters
----------
@@ -239,13 +268,13 @@ def _cost_method(self, *args, **kwargs):
"""Calculate gradient component of the cost.
This method returns the l2 norm error of the difference between the
- original data and the data obtained after optimisation
+ original data and the data obtained after optimisation.
Parameters
----------
- args : interable
+ *args : tuple
Positional arguments
- kwargs : dict
+ **kwargs : dict
Keyword arguments
Returns
diff --git a/modopt/opt/linear.py b/modopt/opt/linear.py
index e21ffac9..d8679998 100644
--- a/modopt/opt/linear.py
+++ b/modopt/opt/linear.py
@@ -21,9 +21,9 @@ class LinearParent(object):
Parameters
----------
- op : function
+ op : callable
Callable function that implements the linear operation
- adj_op : function
+ adj_op : callable
Callable function that implements the linear adjoint operation
Examples
@@ -91,11 +91,12 @@ class WaveletConvolve(LinearParent):
filters: numpy.ndarray
Array of wavelet filter coefficients
method : str, optional
- Convolution method (default is 'scipy')
+ Convolution method (default is ``'scipy'``)
See Also
--------
LinearParent : parent class
+ modopt.signal.wavelet.filter_convolve_stack : wavelet filter convolution
"""
@@ -168,8 +169,8 @@ def _check_type(self, input_val):
Parameters
----------
- input_val : list, tuple or numpy.ndarray
- Any input type
+ input_val : any
+ Any input object
Returns
-------
@@ -234,7 +235,7 @@ def _check_inputs(self, operators, weights):
raise ValueError('Operators must contain "adj_op" method.')
operator.op = check_callable(operator.op)
- operator.cost = check_callable(operator.adj_op)
+ operator.adj_op = check_callable(operator.adj_op)
if not isinstance(weights, type(None)):
weights = self._check_type(weights)
diff --git a/modopt/opt/proximity.py b/modopt/opt/proximity.py
index 474d7395..e0f28e96 100644
--- a/modopt/opt/proximity.py
+++ b/modopt/opt/proximity.py
@@ -2,9 +2,12 @@
"""PROXIMITY OPERATORS.
-This module contains classes of proximity operators for optimisation
+This module contains classes of proximity operators for optimisation.
-:Author: Samuel Farrens
+:Authors:
+
+* Samuel Farrens ,
+* Loubna El Gueddari
"""
@@ -35,9 +38,9 @@ class ProximityParent(object):
Parameters
----------
- op : function
+ op : callable
Callable function that implements the proximity operation
- cost : function
+ cost : callable
Callable function that implements the proximity contribution to the
cost
@@ -68,7 +71,7 @@ def cost(self):
Returns
-------
float
- Cost
+ Cost contribution value
"""
return self._cost
@@ -108,6 +111,7 @@ class Positivity(ProximityParent):
See Also
--------
ProximityParent : parent class
+ modopt.signal.positivity.positive : positivity operator
"""
@@ -119,14 +123,14 @@ def __init__(self):
def _cost_method(self, *args, **kwargs):
"""Calculate positivity component of the cost.
- This method returns 0 as the posivituty does not contribute to the
+ This method returns ``0`` as the posivituty does not contribute to the
cost.
Parameters
----------
- args : interable
+ *args : tuple
Positional arguments
- kwargs : dict
+ **kwargs : dict
Keyword arguments
Returns
@@ -142,9 +146,9 @@ def _cost_method(self, *args, **kwargs):
class SparseThreshold(ProximityParent):
- """Threshold Proximity Operator.
+ """Sparse Threshold Proximity Operator.
- This class defines the threshold proximity operator.
+ This class defines the sparse thresholding proximity operator.
Parameters
----------
@@ -158,6 +162,7 @@ class SparseThreshold(ProximityParent):
See Also
--------
ProximityParent : parent class
+ modopt.signal.noise.thresh : thresholding function
"""
@@ -199,9 +204,9 @@ def _cost_method(self, *args, **kwargs):
Parameters
----------
- args : interable
+ *args : tuple
Positional arguments
- kwargs : dict
+ **kwargs : dict
Keyword arguments
Returns
@@ -252,6 +257,9 @@ class LowRankMatrix(ProximityParent):
See Also
--------
ProximityParent : parent class
+ modopt.signal.svd.svd_thresh : SVD thresholding function
+ modopt.signal.svd.svd_thresh_coef : SVD coefficient thresholding function
+ modopt.math.matrix.nuclear_norm : nuclear norm implementation
"""
@@ -318,9 +326,9 @@ def _cost_method(self, *args, **kwargs):
Parameters
----------
- args : interable
+ *args : tuple
Positional arguments
- kwargs : dict
+ **kwargs : dict
Keyword arguments
Returns
@@ -394,9 +402,9 @@ def _cost_method(self, *args, **kwargs):
Parameters
----------
- args : interable
+ *args : tuple
Positional arguments
- kwargs : dict
+ **kwargs : dict
Keyword arguments
Returns
@@ -528,9 +536,9 @@ def _cost_method(self, *args, **kwargs):
Parameters
----------
- args : interable
+ *args : tuple
Positional arguments
- kwargs : dict
+ **kwargs : dict
Keyword arguments
Returns
@@ -576,6 +584,7 @@ class OrderedWeightedL1Norm(ProximityParent):
See Also
--------
ProximityParent : parent class
+ sklearn.isotonic.isotonic_regression : isotonic regression implementation
"""
@@ -652,9 +661,9 @@ def _cost_method(self, *args, **kwargs):
Parameters
----------
- args : interable
+ *args : tuple
Positional arguments
- kwargs : dict
+ **kwargs : dict
Keyword arguments
Returns
@@ -736,9 +745,9 @@ def _cost_method(self, *args, **kwargs):
Parameters
----------
- args : interable
+ *args : tuple
Positional arguments
- kwargs : dict
+ **kwargs : dict
Keyword arguments
Returns
@@ -782,6 +791,7 @@ class ElasticNet(ProximityParent):
See Also
--------
ProximityParent : parent class
+ modopt.signal.noise.thresh : thresholding function
"""
@@ -823,9 +833,9 @@ def _cost_method(self, *args, **kwargs):
Parameters
----------
- args : interable
+ *args : tuple
Positional arguments
- kwargs : dict
+ **kwargs : dict
Keyword arguments
Returns
@@ -848,7 +858,7 @@ def _cost_method(self, *args, **kwargs):
class KSupportNorm(ProximityParent):
"""K-support Norm Proximity Operator.
- This class defines the squarred K-support norm proximity operator
+ This class defines the squarred :math:`k`-support norm proximity operator
described in :cite:`mcdonald2014`.
Parameters
@@ -856,18 +866,20 @@ class KSupportNorm(ProximityParent):
thresh : float
Threshold value
k_value : int
- Hyper-parameter of the k-support norm, equivalent to the cardinality
- value for the overlapping group lasso. k should included in
- {1, ..., dim(input_vector)}
+ Hyper-parameter of the :math:`k`-support norm, equivalent to the
+ cardinality value for the overlapping group lasso. :math:`k` should be
+ included in {1, ..., dim(input_vector)}.
Notes
-----
- The k-support norm can be seen as an extension to the group-LASSO with
- overlaps with groups of cardianlity at most equal to k.
- When k = 1 the norm is equivalent to the L1-norm.
- When k = dimension of the input vector than the norm is equivalent to the
- L2-norm.
- The dual of this norm correspond to the sum of the k biggest input entries.
+ The :math:`k`-support norm can be seen as an extension to the group-LASSO
+ with overlaps with groups of cardianlity at most equal to :math:`k`.
+ When :math:`k = 1` the norm is equivalent to the L1-norm.
+ When :math:`k` = dimension of the input vector than the norm is equivalent
+ to the L2-norm.
+
+ The dual of this norm corresponds to the sum of the k biggest input
+ entries.
Examples
--------
@@ -897,7 +909,7 @@ def __init__(self, beta, k_value):
@property
def k_value(self):
- """K value."""
+ """Get the :math:`k` value."""
return self._k_value
@k_value.setter
@@ -929,7 +941,7 @@ def _compute_theta(self, input_data, alpha, extra_factor=1.0):
input_data: numpy.ndarray
Input data
alpha: float
- Parameter choosen such that sum(theta_i) = k
+ Parameter choosen such that :math:`\sum\theta_i = k`
extra_factor: float
Potential extra factor comming from the optimization process
(default is ``1.0``)
@@ -937,7 +949,8 @@ def _compute_theta(self, input_data, alpha, extra_factor=1.0):
Returns
-------
theta: numpy.ndarray
- Same size as w and each component is equal to theta_i
+ Same size as :math:`w` and each component is equal to
+ :math:`theta_i`
"""
alpha_input = np.dot(
@@ -952,26 +965,26 @@ def _compute_theta(self, input_data, alpha, extra_factor=1.0):
return theta
def _interpolate(self, alpha0, alpha1, sum0, sum1):
- """Linear interpolation of alpha.
+ r"""Linear interpolation of alpha (:math:`\alpha`).
- This method estimats alpha* such that sum(theta(alpha*))=k via a linear
- interpolation.
+ This method estimats :math:`\alpha^*` such that
+ :math:`\sum\theta(\alpha^*)=k` via a linear interpolation.
Parameters
-----------
alpha0: float
- A value for wich sum(theta(alpha0)) <= k
+ A value for wich :math:`\sum\theta(\alpha^0) \leq k`
alpha1: float
- A value for which sum(theta(alpha1)) <= k
+ A value for which :math:`\sum\theta(\alpha^1) \leq k`
sum0: float
- Value of sum(theta(alpha0))
+ Value of :math:`\sum\theta(\alpha^0)`
sum1: float
- Value of sum(theta(alpha0))
+ Value of :math:`\sum\theta(\alpha^1)`
Returns
-------
float
- An interpolation for which sum(theta(alpha_star)) = k
+ An interpolation for which :math:`\sum\theta(\alpha^*) = k`
"""
if sum0 == self._k_value:
@@ -986,16 +999,16 @@ def _interpolate(self, alpha0, alpha1, sum0, sum1):
return (self._k_value - b_val) / slope
def _binary_search(self, input_data, alpha, extra_factor=1.0):
- """Binary search method.
+ r"""Binary search method.
- This method finds the coordinate of alpha (i) such that
- sum(theta(alpha[i])) =< k and sum(theta(alpha[i+1])) >= k via binary
- search method
+ This method finds the coordinate of :math:`\alpha^i` such that
+ :math:`\sum\theta(\alpha^i) =< k` and
+ :math:`\sum\theta(\alpha^{i+1}) >= k` via a binary search method.
Parameters
----------
input_data: numpy.ndarray
- absolute value of the input data
+ Absolute value of the input data
alpha: numpy.ndarray
Array same size as the input data
extra_factor: float
@@ -1010,11 +1023,12 @@ def _binary_search(self, input_data, alpha, extra_factor=1.0):
Returns
-------
tuple
- The index where: sum(theta(alpha[index])) <= k and
- sum(theta(alpha[index+1])) >= k, The alpha value for which
- sum(theta(alpha[index])) <= k, The alpha value for which
- sum(theta(alpha[index+1])) >= k, Value of sum(theta(alpha[index])),
- Value of sum(theta(alpha[index + 1]))
+ The index where: :math:`\sum\theta(\alpha^i) <= k` and
+ :math:`\sum\theta(\alpha^{i+1}) >= k`, The alpha value for which
+ :math:`\sum\theta(\alpha^i) <= k`, The alpha value for which
+ :math:`\sum\theta(\alpha^{i+1}) >= k`, Value of
+ :math:`\sum\theta(\alpha^i)`,
+ Value of :math:`\sum\theta(\alpha^{i + 1})`
"""
first_idx = 0
@@ -1199,11 +1213,9 @@ def _op_method(self, input_data, extra_factor=1.0):
return rslt.reshape(data_shape)
def _find_q(self, sorted_data):
- """Find q index value.
+ r"""Find :math:`q`.
- This method finds the value of q such that:
-
- sorted_data[q] >= sum(sorted_data[q+1:]) / (k - q)>= sorted_data[q+1]
+ Find the :math:`q` index value.
Parameters
----------
@@ -1213,8 +1225,17 @@ def _find_q(self, sorted_data):
Returns
-------
int
- index such that sorted_data[q] >= sum(sorted_data[q+1:]) /
- (k - q)>= sorted_data[q+1]
+ The :math:`q` index value
+
+ Notes
+ -----
+ This method finds the value of :math:`q` such that:
+
+ .. math::
+
+ |w_q| \geq \frac{\sum_{j=q+1}^d |w_j|}{k - q} \geq |w_{q+1}|
+
+ where :math:`w` is the input ``sorted_data`` and :math:`k \leq d`.
"""
first_idx = 0
@@ -1258,21 +1279,23 @@ def _find_q(self, sorted_data):
return q_val
def _cost_method(self, *args, **kwargs):
- """Calculate OWL component of the cost.
+ """Calculate :math:`k`-support component of the cost.
- This method returns the ordered weighted l1 norm of the data.
+ This method returns the :math:`k`-support contribution to the total
+ cost.
Parameters
----------
- args : interable
+ *args : tuple
Positional arguments
- kwargs : dict
+ **kwargs : dict
Keyword arguments
Returns
-------
float
- OWL cost component
+ The :math:`k`-support cost component
+
"""
data_abs = np.abs(args[0].flatten())
ix = np.argsort(data_abs)[::-1]
@@ -1280,7 +1303,7 @@ def _cost_method(self, *args, **kwargs):
q_val = self._find_q(data_abs)
cost_val = (
(
- np.sum(data_abs[:q_val]**2) * 0.5
+ np.sum(data_abs[:q_val] ** 2) * 0.5
+ np.sum(data_abs[q_val:]) ** 2
/ (self._k_value - q_val)
) * self.beta
@@ -1359,7 +1382,7 @@ def _op_method(self, input_data, extra_factor=1.0):
)
def _cost_method(self, input_data):
- """Cost function.
+ """Calculate the group LASSO component of the cost.
This method calculate the cost function of the proximable part.
diff --git a/modopt/opt/reweight.py b/modopt/opt/reweight.py
index 444b65d5..8c4f2449 100644
--- a/modopt/opt/reweight.py
+++ b/modopt/opt/reweight.py
@@ -2,7 +2,7 @@
"""REWEIGHTING CLASSES.
-This module contains classes for reweighting optimisation implementations
+This module contains classes for reweighting optimisation implementations.
:Author: Samuel Farrens
@@ -17,7 +17,7 @@ class cwbReweight(object):
"""Candes, Wakin and Boyd reweighting class.
This class implements the reweighting scheme described in
- :cite:`candes2007`
+ :cite:`candes2007`.
Parameters
----------
@@ -56,7 +56,7 @@ def reweight(self, input_data):
r"""Reweight.
This method implements the reweighting from section 4 in
- :cite:`candes2007`
+ :cite:`candes2007`.
Parameters
----------
@@ -76,6 +76,9 @@ def reweight(self, input_data):
w = w \left( \frac{1}{1 + \frac{|x^w|}{n \sigma}} \right)
+ where :math:`w` are the weights, :math:`x` is the ``input_data`` and
+ :math:`n` is the ``thresh_factor``.
+
"""
if self.verbose:
print(' - Reweighting: {0}'.format(self._rw_num))
diff --git a/modopt/signal/noise.py b/modopt/signal/noise.py
index f67cc066..a59d5553 100644
--- a/modopt/signal/noise.py
+++ b/modopt/signal/noise.py
@@ -18,17 +18,17 @@
def add_noise(input_data, sigma=1.0, noise_type='gauss'):
"""Add noise to data.
- This method adds Gaussian or Poisson noise to the input data
+ This method adds Gaussian or Poisson noise to the input data.
Parameters
----------
input_data : numpy.ndarray, list or tuple
Input data array
sigma : float or list, optional
- Standard deviation of the noise to be added ('gauss' only, default is
- ``1.0``)
+ Standard deviation of the noise to be added (``'gauss'`` only,
+ default is ``1.0``)
noise_type : {'gauss', 'poisson'}
- Type of noise to be added (default is 'gauss')
+ Type of noise to be added (default is ``'gauss'``)
Returns
-------
@@ -38,9 +38,9 @@ def add_noise(input_data, sigma=1.0, noise_type='gauss'):
Raises
------
ValueError
- If `noise_type` is not 'gauss' or 'poisson'
+ If ``noise_type`` is not ``'gauss'`` or ``'poisson'``
ValueError
- If number of `sigma` values does not match the first dimension of the
+ If number of ``sigma`` values does not match the first dimension of the
input data
Examples
@@ -99,7 +99,7 @@ def add_noise(input_data, sigma=1.0, noise_type='gauss'):
def thresh(input_data, threshold, threshold_type='hard'):
r"""Threshold data.
- This method perfoms hard or soft thresholding on the input data
+ This method perfoms hard or soft thresholding on the input data.
Parameters
----------
@@ -108,7 +108,7 @@ def thresh(input_data, threshold, threshold_type='hard'):
threshold : float or numpy.ndarray
Threshold level(s)
threshold_type : {'hard', 'soft'}
- Type of noise to be added (default is 'hard')
+ Type of noise to be added (default is ``'hard'``)
Returns
-------
@@ -118,7 +118,7 @@ def thresh(input_data, threshold, threshold_type='hard'):
Raises
------
ValueError
- If `threshold_type` is not 'hard' or 'soft'
+ If ``threshold_type`` is not ``'hard'`` or ``'soft'``
Notes
-----
diff --git a/modopt/signal/positivity.py b/modopt/signal/positivity.py
index 7b79d0ee..e4ec098d 100644
--- a/modopt/signal/positivity.py
+++ b/modopt/signal/positivity.py
@@ -3,7 +3,7 @@
"""POSITIVITY.
This module contains a function that retains only positive coefficients in
-an array
+an array.
:Author: Samuel Farrens
@@ -60,7 +60,7 @@ def positive(input_data, ragged=False):
"""Positivity operator.
This method preserves only the positive coefficients of the input data, all
- negative coefficients are set to zero
+ negative coefficients are set to zero.
Parameters
----------
diff --git a/modopt/signal/svd.py b/modopt/signal/svd.py
index de3fa453..41241b33 100644
--- a/modopt/signal/svd.py
+++ b/modopt/signal/svd.py
@@ -122,10 +122,10 @@ def svd_thresh(input_data, threshold=None, n_pc=None, thresh_type='hard'):
threshold : float or numpy.ndarray, optional
Threshold value(s) (default is ``None``)
n_pc : int or str, optional
- Number of principal components, specify an integer value or 'all'
+ Number of principal components, specify an integer value or ``'all'``
(default is ``None``)
thresh_type : {'hard', 'soft'}, optional
- Type of thresholding (default is 'hard')
+ Type of thresholding (default is ``'hard'``)
Returns
-------
@@ -203,7 +203,7 @@ def svd_thresh(input_data, threshold=None, n_pc=None, thresh_type='hard'):
def svd_thresh_coef(input_data, operator, threshold, thresh_type='hard'):
"""Threshold the singular values coefficients.
- This method thresholds the input data using singular value decomposition
+ This method thresholds the input data using singular value decomposition.
Parameters
----------
@@ -214,7 +214,7 @@ def svd_thresh_coef(input_data, operator, threshold, thresh_type='hard'):
threshold : float or numpy.ndarray
Threshold value(s)
thresh_type : {'hard', 'soft'}
- Type of noise to be added (default is 'hard')
+ Type of noise to be added (default is ``'hard'``)
Returns
-------
diff --git a/modopt/signal/validation.py b/modopt/signal/validation.py
index f39a2eb3..422a987b 100644
--- a/modopt/signal/validation.py
+++ b/modopt/signal/validation.py
@@ -24,9 +24,9 @@ def transpose_test(
Parameters
----------
- operator : function
+ operator : callable
Operator function
- operator_t : function
+ operator_t : callable
Transpose operator function
x_shape : tuple
Shape of operator input data
diff --git a/modopt/signal/wavelet.py b/modopt/signal/wavelet.py
index 0e509cbd..bc4ffc70 100644
--- a/modopt/signal/wavelet.py
+++ b/modopt/signal/wavelet.py
@@ -2,14 +2,15 @@
"""WAVELET MODULE.
-This module contains methods for performing wavelet transformations using iSAP
+This module contains methods for performing wavelet transformations using
+Spars2D.
:Author: Samuel Farrens
Notes
-----
This module serves as a wrapper for the wavelet transformation code
-`mr_transform`, which is part of the Sparse2D package. This executable
+``mr_transform``, which is part of the Sparse2D package. This executable
should be installed and built before using these methods.
Sparse2D Repository: https://github.com/CosmoStat/Sparse2D
@@ -73,18 +74,18 @@ def call_mr_transform(
path='./',
remove_files=True,
): # pragma: no cover
- """Call mr_transform.
+ """Call ``mr_transform``.
- This method calls the iSAP module mr_transform
+ This method calls the Sparse2D module ``mr_transform``.
Parameters
----------
input_data : numpy.ndarray
Input data, 2D array
opt : list or str, optional
- Options to be passed to mr_transform (default is '')
+ Options to be passed to mr_transform (default is ``''``)
path : str, optional
- Path for output files (default is './')
+ Path for output files (default is ``'./'``)
remove_files : bool, optional
Option to remove output files (default is ``True``)
@@ -98,9 +99,9 @@ def call_mr_transform(
ImportError
If the Astropy package is not found
ValueError
- If the input data is not a 2D numpy array
+ If the input data is not a 2D Numpy array
RuntimeError
- For exception encountered in call to mr_transform
+ For exception encountered in call to ``mr_transform``
Examples
--------
@@ -206,7 +207,7 @@ def get_mr_filters(
coarse=False,
trim=False,
): # pragma: no cover
- """Get mr_transform filters.
+ """Get ``mr_transform`` filters.
This method obtains wavelet filters by calling mr_transform.
@@ -215,7 +216,7 @@ def get_mr_filters(
data_shape : tuple
2D data shape
opt : list, optional
- List of additonal mr_transform options (default is '')
+ List of additonal mr_transform options (default is ``''``)
coarse : bool, optional
Option to keep coarse scale (default is ``False``)
trim: bool, optional
@@ -267,9 +268,9 @@ def filter_convolve(input_data, filters, filter_rot=False, method='scipy'):
filters : numpy.ndarray
Wavelet filters, 3D array
filter_rot : bool, optional
- Option to rotate wavelet filters (default is `False`)
+ Option to rotate wavelet filters (default is ``False``)
method : {'astropy', 'scipy'}, optional
- Convolution method (default is 'scipy')
+ Convolution method (default is ``'scipy'``)
Returns
-------
@@ -327,7 +328,7 @@ def filter_convolve_stack(
):
"""Filter convolve.
- This method convolves the a stack of input images with the wavelet filters
+ This method convolves the a stack of input images with the wavelet filters.
Parameters
----------
@@ -338,7 +339,7 @@ def filter_convolve_stack(
filter_rot : bool, optional
Option to rotate wavelet filters (default is ``False``)
method : {'astropy', 'scipy'}, optional
- Convolution method (default is 'scipy')
+ Convolution method (default is ``'scipy'``)
Returns
-------
diff --git a/modopt/tests/test_algorithms.py b/modopt/tests/test_algorithms.py
new file mode 100644
index 00000000..7ff96a8b
--- /dev/null
+++ b/modopt/tests/test_algorithms.py
@@ -0,0 +1,470 @@
+# -*- coding: utf-8 -*-
+
+"""UNIT TESTS FOR OPT.ALGORITHMS.
+
+This module contains unit tests for the modopt.opt.algorithms module.
+
+:Author: Samuel Farrens
+
+"""
+
+from unittest import TestCase
+
+import numpy as np
+import numpy.testing as npt
+
+from modopt.opt import algorithms, cost, gradient, linear, proximity, reweight
+
+# Basic functions to be used as operators or as dummy functions
+func_identity = lambda x_val: x_val
+func_double = lambda x_val: x_val * 2
+func_sq = lambda x_val: x_val ** 2
+func_cube = lambda x_val: x_val ** 3
+
+
+class Dummy(object):
+ """Dummy class for tests."""
+
+ pass
+
+
+class AlgorithmTestCase(TestCase):
+ """Test case for algorithms module."""
+
+ def setUp(self):
+ """Set test parameter values."""
+ self.data1 = np.arange(9).reshape(3, 3).astype(float)
+ self.data2 = self.data1 + np.random.randn(*self.data1.shape) * 1e-6
+ self.data3 = np.arange(9).reshape(3, 3).astype(float) + 1
+
+ grad_inst = gradient.GradBasic(
+ self.data1,
+ func_identity,
+ func_identity,
+ )
+
+ prox_inst = proximity.Positivity()
+ prox_dual_inst = proximity.IdentityProx()
+ linear_inst = linear.Identity()
+ reweight_inst = reweight.cwbReweight(self.data3)
+ cost_inst = cost.costObj([grad_inst, prox_inst, prox_dual_inst])
+ self.setup = algorithms.SetUp()
+ self.max_iter = 20
+
+ self.fb_all_iter = algorithms.ForwardBackward(
+ self.data1,
+ grad=grad_inst,
+ prox=prox_inst,
+ cost=None,
+ auto_iterate=False,
+ beta_update=func_identity,
+ )
+ self.fb_all_iter.iterate(self.max_iter)
+
+ self.fb1 = algorithms.ForwardBackward(
+ self.data1,
+ grad=grad_inst,
+ prox=prox_inst,
+ beta_update=func_identity,
+ )
+
+ self.fb2 = algorithms.ForwardBackward(
+ self.data1,
+ grad=grad_inst,
+ prox=prox_inst,
+ cost=cost_inst,
+ lambda_update=None,
+ )
+
+ self.fb3 = algorithms.ForwardBackward(
+ self.data1,
+ grad=grad_inst,
+ prox=prox_inst,
+ beta_update=func_identity,
+ a_cd=3,
+ )
+
+ self.fb4 = algorithms.ForwardBackward(
+ self.data1,
+ grad=grad_inst,
+ prox=prox_inst,
+ beta_update=func_identity,
+ r_lazy=3,
+ p_lazy=0.7,
+ q_lazy=0.7,
+ )
+
+ self.fb5 = algorithms.ForwardBackward(
+ self.data1,
+ grad=grad_inst,
+ prox=prox_inst,
+ restart_strategy='adaptive',
+ xi_restart=0.9,
+ )
+
+ self.fb6 = algorithms.ForwardBackward(
+ self.data1,
+ grad=grad_inst,
+ prox=prox_inst,
+ restart_strategy='greedy',
+ xi_restart=0.9,
+ min_beta=1.0,
+ s_greedy=1.1,
+ )
+
+ self.gfb_all_iter = algorithms.GenForwardBackward(
+ self.data1,
+ grad=grad_inst,
+ prox_list=[prox_inst, prox_dual_inst],
+ cost=None,
+ auto_iterate=False,
+ gamma_update=func_identity,
+ beta_update=func_identity,
+ )
+ self.gfb_all_iter.iterate(self.max_iter)
+
+ self.gfb1 = algorithms.GenForwardBackward(
+ self.data1,
+ grad=grad_inst,
+ prox_list=[prox_inst, prox_dual_inst],
+ gamma_update=func_identity,
+ lambda_update=func_identity,
+ )
+
+ self.gfb2 = algorithms.GenForwardBackward(
+ self.data1,
+ grad=grad_inst,
+ prox_list=[prox_inst, prox_dual_inst],
+ cost=cost_inst,
+ )
+
+ self.gfb3 = algorithms.GenForwardBackward(
+ self.data1,
+ grad=grad_inst,
+ prox_list=[prox_inst, prox_dual_inst],
+ cost=cost_inst,
+ step_size=2,
+ )
+
+ self.condat_all_iter = algorithms.Condat(
+ self.data1,
+ self.data2,
+ grad=grad_inst,
+ prox=prox_inst,
+ cost=None,
+ prox_dual=prox_dual_inst,
+ sigma_update=func_identity,
+ tau_update=func_identity,
+ rho_update=func_identity,
+ auto_iterate=False,
+ )
+ self.condat_all_iter.iterate(self.max_iter)
+
+ self.condat1 = algorithms.Condat(
+ self.data1,
+ self.data2,
+ grad=grad_inst,
+ prox=prox_inst,
+ prox_dual=prox_dual_inst,
+ sigma_update=func_identity,
+ tau_update=func_identity,
+ rho_update=func_identity,
+ )
+
+ self.condat2 = algorithms.Condat(
+ self.data1,
+ self.data2,
+ grad=grad_inst,
+ prox=prox_inst,
+ prox_dual=prox_dual_inst,
+ linear=linear_inst,
+ cost=cost_inst,
+ reweight=reweight_inst,
+ )
+
+ self.condat3 = algorithms.Condat(
+ self.data1,
+ self.data2,
+ grad=grad_inst,
+ prox=prox_inst,
+ prox_dual=prox_dual_inst,
+ linear=Dummy(),
+ cost=cost_inst,
+ auto_iterate=False,
+ )
+
+ self.pogm_all_iter = algorithms.POGM(
+ u=self.data1,
+ x=self.data1,
+ y=self.data1,
+ z=self.data1,
+ grad=grad_inst,
+ prox=prox_inst,
+ auto_iterate=False,
+ cost=None,
+ )
+ self.pogm_all_iter.iterate(self.max_iter)
+
+ self.pogm1 = algorithms.POGM(
+ u=self.data1,
+ x=self.data1,
+ y=self.data1,
+ z=self.data1,
+ grad=grad_inst,
+ prox=prox_inst,
+ )
+
+ self.vanilla_grad = algorithms.VanillaGenericGradOpt(
+ self.data1,
+ grad=grad_inst,
+ prox=prox_inst,
+ cost=cost_inst,
+ )
+ self.ada_grad = algorithms.AdaGenericGradOpt(
+ self.data1,
+ grad=grad_inst,
+ prox=prox_inst,
+ cost=cost_inst,
+ )
+ self.adam_grad = algorithms.ADAMGradOpt(
+ self.data1,
+ grad=grad_inst,
+ prox=prox_inst,
+ cost=cost_inst,
+ )
+ self.momentum_grad = algorithms.MomentumGradOpt(
+ self.data1,
+ grad=grad_inst,
+ prox=prox_inst,
+ cost=cost_inst,
+ )
+ self.rms_grad = algorithms.RMSpropGradOpt(
+ self.data1,
+ grad=grad_inst,
+ prox=prox_inst,
+ cost=cost_inst,
+ )
+ self.saga_grad = algorithms.SAGAOptGradOpt(
+ self.data1,
+ grad=grad_inst,
+ prox=prox_inst,
+ cost=cost_inst,
+ )
+
+ self.dummy = Dummy()
+ self.dummy.cost = func_identity
+ self.setup._check_operator(self.dummy.cost)
+
+ def tearDown(self):
+ """Unset test parameter values."""
+ self.data1 = None
+ self.data2 = None
+ self.setup = None
+ self.fb_all_iter = None
+ self.fb1 = None
+ self.fb2 = None
+ self.gfb_all_iter = None
+ self.gfb1 = None
+ self.gfb2 = None
+ self.condat_all_iter = None
+ self.condat1 = None
+ self.condat2 = None
+ self.condat3 = None
+ self.pogm1 = None
+ self.pogm_all_iter = None
+ self.dummy = None
+
+ def test_set_up(self):
+ """Test set_up."""
+ npt.assert_raises(TypeError, self.setup._check_input_data, 1)
+
+ npt.assert_raises(TypeError, self.setup._check_param, 1)
+
+ npt.assert_raises(TypeError, self.setup._check_param_update, 1)
+
+ def test_all_iter(self):
+ """Test if all opt run for all iterations."""
+ opts = [
+ self.fb_all_iter,
+ self.gfb_all_iter,
+ self.condat_all_iter,
+ self.pogm_all_iter,
+ ]
+ for opt in opts:
+ npt.assert_equal(opt.idx, self.max_iter - 1)
+
+ def test_forward_backward(self):
+ """Test forward_backward."""
+ npt.assert_array_equal(
+ self.fb1.x_final,
+ self.data1,
+ err_msg='Incorrect ForwardBackward result.',
+ )
+
+ npt.assert_array_equal(
+ self.fb2.x_final,
+ self.data1,
+ err_msg='Incorrect ForwardBackward result.',
+ )
+
+ npt.assert_array_equal(
+ self.fb3.x_final,
+ self.data1,
+ err_msg='Incorrect ForwardBackward result.',
+ )
+
+ npt.assert_array_equal(
+ self.fb4.x_final,
+ self.data1,
+ err_msg='Incorrect ForwardBackward result.',
+ )
+
+ npt.assert_array_equal(
+ self.fb5.x_final,
+ self.data1,
+ err_msg='Incorrect ForwardBackward result.',
+ )
+
+ npt.assert_array_equal(
+ self.fb6.x_final,
+ self.data1,
+ err_msg='Incorrect ForwardBackward result.',
+ )
+
+ def test_gen_forward_backward(self):
+ """Test gen_forward_backward."""
+ npt.assert_array_equal(
+ self.gfb1.x_final,
+ self.data1,
+ err_msg='Incorrect GenForwardBackward result.',
+ )
+
+ npt.assert_array_equal(
+ self.gfb2.x_final,
+ self.data1,
+ err_msg='Incorrect GenForwardBackward result.',
+ )
+
+ npt.assert_array_equal(
+ self.gfb3.x_final,
+ self.data1,
+ err_msg='Incorrect GenForwardBackward result.',
+ )
+
+ npt.assert_equal(
+ self.gfb3.step_size,
+ 2,
+ err_msg='Incorrect step size.',
+ )
+
+ npt.assert_raises(
+ TypeError,
+ algorithms.GenForwardBackward,
+ self.data1,
+ self.dummy,
+ [self.dummy],
+ weights=1,
+ )
+
+ npt.assert_raises(
+ ValueError,
+ algorithms.GenForwardBackward,
+ self.data1,
+ self.dummy,
+ [self.dummy],
+ weights=[1],
+ )
+
+ npt.assert_raises(
+ ValueError,
+ algorithms.GenForwardBackward,
+ self.data1,
+ self.dummy,
+ [self.dummy],
+ weights=[0.5, 0.5],
+ )
+
+ npt.assert_raises(
+ ValueError,
+ algorithms.GenForwardBackward,
+ self.data1,
+ self.dummy,
+ [self.dummy],
+ weights=[0.5],
+ )
+
+ def test_condat(self):
+ """Test gen_condat."""
+ npt.assert_almost_equal(
+ self.condat1.x_final,
+ self.data1,
+ err_msg='Incorrect Condat result.',
+ )
+
+ npt.assert_almost_equal(
+ self.condat2.x_final,
+ self.data1,
+ err_msg='Incorrect Condat result.',
+ )
+
+ def test_pogm(self):
+ """Test pogm."""
+ npt.assert_almost_equal(
+ self.pogm1.x_final,
+ self.data1,
+ err_msg='Incorrect POGM result.',
+ )
+
+ def test_ada_grad(self):
+ """Test ADA Gradient Descent."""
+ self.ada_grad.iterate()
+ npt.assert_almost_equal(
+ self.ada_grad.x_final,
+ self.data1,
+ err_msg='Incorrect ADAGrad results.',
+ )
+
+ def test_adam_grad(self):
+ """Test ADAM Gradient Descent."""
+ self.adam_grad.iterate()
+ npt.assert_almost_equal(
+ self.adam_grad.x_final,
+ self.data1,
+ err_msg='Incorrect ADAMGrad results.',
+ )
+
+ def test_momemtum_grad(self):
+ """Test Momemtum Gradient Descent."""
+ self.momentum_grad.iterate()
+ npt.assert_almost_equal(
+ self.momentum_grad.x_final,
+ self.data1,
+ err_msg='Incorrect MomentumGrad results.',
+ )
+
+ def test_rmsprop_grad(self):
+ """Test RMSProp Gradient Descent."""
+ self.rms_grad.iterate()
+ npt.assert_almost_equal(
+ self.rms_grad.x_final,
+ self.data1,
+ err_msg='Incorrect RMSPropGrad results.',
+ )
+
+ def test_saga_grad(self):
+ """Test SAGA Descent."""
+ self.saga_grad.iterate()
+ npt.assert_almost_equal(
+ self.saga_grad.x_final,
+ self.data1,
+ err_msg='Incorrect SAGA Grad results.',
+ )
+
+ def test_vanilla_grad(self):
+ """Test Vanilla Gradient Descent."""
+ self.vanilla_grad.iterate()
+ npt.assert_almost_equal(
+ self.vanilla_grad.x_final,
+ self.data1,
+ err_msg='Incorrect VanillaGrad results.',
+ )
diff --git a/requirements.txt b/requirements.txt
index 3c0e6d4f..63a404ba 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,4 @@
-importlib_metadata==3.7.0
-numpy==1.19.5
-scipy==1.5.4
-progressbar2==3.53.1
+importlib_metadata>=3.7.0
+numpy>=1.19.5
+scipy>=1.5.4
+progressbar2>=3.53.1
diff --git a/setup.cfg b/setup.cfg
index d2f544f0..eada1b8c 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -2,7 +2,7 @@
test=pytest
[metadata]
-description-file = README.rst
+description_file = README.rst
[darglint]
docstring_style = numpy
@@ -12,6 +12,8 @@ strictness = short
ignore =
D107, #Justification: Don't need docstring for __init__ in numpydoc style
RST304, #Justification: Need to use :cite: role for citations
+ RST210, #Justification: RST210, RST213 Inconsistent with numpydoc
+ RST213, # documentation for handling *args and **kwargs
W503, #Justification: Have to choose one multiline operator format
WPS202, #Todo: Rethink module size, possibly split large modules
WPS337, #Todo: Consider simplifying multiline conditions.
@@ -36,10 +38,16 @@ per-file-ignores =
modopt/math/convolve.py: WPS301,WPS420
#Todo: Rethink conditional imports
modopt/math/matrix.py: WPS420
- #Todo: Check need for del statement,
- modopt/opt/algorithms.py: WPS111,WPS420
+ #Todo: import has bad parenthesis
+ modopt/opt/algorithms/__init__.py: F401,F403,WPS318, WPS319, WPS412, WPS410
+ #Todo: x is a too short name.
+ modopt/opt/algorithms/forward_backward.py: WPS111
+ #Todo: Check need for del statement
+ modopt/opt/algorithms/primal_dual.py: WPS111, WPS420
+ #multiline parameters bug with tuples
+ modopt/opt/algorithms/gradient_descent.py: WPS111, WPS420, WPS317
#Todo: Consider changing costObj name
- modopt/opt/cost.py: N801
+ modopt/opt/cost.py: N801,
#Todo:
# - Rethink subscript slice assignment
# - Reduce complexity of KSupportNorm
diff --git a/setup.py b/setup.py
index 841ca1b1..59b03d2c 100644
--- a/setup.py
+++ b/setup.py
@@ -6,8 +6,8 @@
# Set the package release version
major = 1
-minor = 5
-patch = 1
+minor = 6
+patch = 0
# Set the package details
name = 'modopt'