diff --git a/python/mxnet/optimizer/adagrad.py b/python/mxnet/optimizer/adagrad.py index aa31abf0030b..2b42edc29de8 100644 --- a/python/mxnet/optimizer/adagrad.py +++ b/python/mxnet/optimizer/adagrad.py @@ -61,6 +61,9 @@ class AdaGrad(Optimizer): """ def __init__(self, learning_rate=0.01, epsilon=1e-6, use_fused_step=True, **kwargs): + if kwargs.get("eps") is not None: + raise DeprecationWarning( + 'parameter \'eps\' is deprecated. Please use \'epsilon\' instead...') super(AdaGrad, self).__init__(learning_rate=learning_rate, use_fused_step=use_fused_step, **kwargs) diff --git a/python/mxnet/optimizer/rmsprop.py b/python/mxnet/optimizer/rmsprop.py index 2d4b2d618d64..c08aa9397d2a 100644 --- a/python/mxnet/optimizer/rmsprop.py +++ b/python/mxnet/optimizer/rmsprop.py @@ -70,6 +70,12 @@ class RMSProp(Optimizer): def __init__(self, learning_rate=0.001, rho=0.9, momentum=0.9, epsilon=1e-8, centered=False, clip_weights=None, use_fused_step=True, **kwargs): + if kwargs.get("gamma1") is not None: + raise DeprecationWarning( + 'parameter \'gamma1\' is deprecated. Please use \'rho\' instead...') + if kwargs.get("gamma2") is not None: + raise DeprecationWarning( + 'parameter \'gamma2\' is deprecated. Please use \'momentum\' instead...') super(RMSProp, self).__init__(learning_rate=learning_rate, use_fused_step=use_fused_step, **kwargs)