From 29e946a0615cffed79620a8bb781aab69fb9e83f Mon Sep 17 00:00:00 2001 From: barry-jin Date: Thu, 8 Jul 2021 10:35:58 -0700 Subject: [PATCH 1/2] add warning --- python/mxnet/optimizer/adagrad.py | 5 ++++- python/mxnet/optimizer/rmsprop.py | 8 +++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/python/mxnet/optimizer/adagrad.py b/python/mxnet/optimizer/adagrad.py index aa31abf0030b..fd1c199feb83 100644 --- a/python/mxnet/optimizer/adagrad.py +++ b/python/mxnet/optimizer/adagrad.py @@ -60,10 +60,13 @@ class AdaGrad(Optimizer): otherwise, fused_step is called. """ - def __init__(self, learning_rate=0.01, epsilon=1e-6, use_fused_step=True, **kwargs): + def __init__(self, learning_rate=0.01, epsilon=1e-6, use_fused_step=True, eps=None, **kwargs): super(AdaGrad, self).__init__(learning_rate=learning_rate, use_fused_step=use_fused_step, **kwargs) + if eps is not None: + raise DeprecationWarning( + 'parameter \'eps\' is deprecated. Please use \'epsilon\' instead...') self.epsilon = epsilon def create_state(self, index, weight): diff --git a/python/mxnet/optimizer/rmsprop.py b/python/mxnet/optimizer/rmsprop.py index 2d4b2d618d64..fda1e209d800 100644 --- a/python/mxnet/optimizer/rmsprop.py +++ b/python/mxnet/optimizer/rmsprop.py @@ -69,10 +69,16 @@ class RMSProp(Optimizer): """ def __init__(self, learning_rate=0.001, rho=0.9, momentum=0.9, epsilon=1e-8, centered=False, clip_weights=None, - use_fused_step=True, **kwargs): + use_fused_step=True, gamma1=None, gamma2=None, **kwargs): super(RMSProp, self).__init__(learning_rate=learning_rate, use_fused_step=use_fused_step, **kwargs) + if gamma1 is not None: + raise DeprecationWarning( + 'parameter \'gamma1\' is deprecated. Please use \'rho\' instead...') + if gamma2 is not None: + raise DeprecationWarning( + 'parameter \'gamma2\' is deprecated. Please use \'momentum\' instead...') self.rho = rho self.momentum = momentum self.centered = centered From 6a0ac710f26f31656ed29b4ec22fff4365028017 Mon Sep 17 00:00:00 2001 From: barry-jin Date: Mon, 12 Jul 2021 14:45:24 -0700 Subject: [PATCH 2/2] update --- python/mxnet/optimizer/adagrad.py | 8 ++++---- python/mxnet/optimizer/rmsprop.py | 12 ++++++------ 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/python/mxnet/optimizer/adagrad.py b/python/mxnet/optimizer/adagrad.py index fd1c199feb83..2b42edc29de8 100644 --- a/python/mxnet/optimizer/adagrad.py +++ b/python/mxnet/optimizer/adagrad.py @@ -60,13 +60,13 @@ class AdaGrad(Optimizer): otherwise, fused_step is called. """ - def __init__(self, learning_rate=0.01, epsilon=1e-6, use_fused_step=True, eps=None, **kwargs): + def __init__(self, learning_rate=0.01, epsilon=1e-6, use_fused_step=True, **kwargs): + if kwargs.get("eps") is not None: + raise DeprecationWarning( + 'parameter \'eps\' is deprecated. Please use \'epsilon\' instead...') super(AdaGrad, self).__init__(learning_rate=learning_rate, use_fused_step=use_fused_step, **kwargs) - if eps is not None: - raise DeprecationWarning( - 'parameter \'eps\' is deprecated. Please use \'epsilon\' instead...') self.epsilon = epsilon def create_state(self, index, weight): diff --git a/python/mxnet/optimizer/rmsprop.py b/python/mxnet/optimizer/rmsprop.py index fda1e209d800..c08aa9397d2a 100644 --- a/python/mxnet/optimizer/rmsprop.py +++ b/python/mxnet/optimizer/rmsprop.py @@ -69,16 +69,16 @@ class RMSProp(Optimizer): """ def __init__(self, learning_rate=0.001, rho=0.9, momentum=0.9, epsilon=1e-8, centered=False, clip_weights=None, - use_fused_step=True, gamma1=None, gamma2=None, **kwargs): - super(RMSProp, self).__init__(learning_rate=learning_rate, - use_fused_step=use_fused_step, - **kwargs) - if gamma1 is not None: + use_fused_step=True, **kwargs): + if kwargs.get("gamma1") is not None: raise DeprecationWarning( 'parameter \'gamma1\' is deprecated. Please use \'rho\' instead...') - if gamma2 is not None: + if kwargs.get("gamma2") is not None: raise DeprecationWarning( 'parameter \'gamma2\' is deprecated. Please use \'momentum\' instead...') + super(RMSProp, self).__init__(learning_rate=learning_rate, + use_fused_step=use_fused_step, + **kwargs) self.rho = rho self.momentum = momentum self.centered = centered