diff --git a/mllib/src/main/scala/org/apache/spark/ml/optim/aggregator/HuberAggregator.scala b/mllib/src/main/scala/org/apache/spark/ml/optim/aggregator/HuberAggregator.scala index 13f64d2d50424..fc4c423a60b2a 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/optim/aggregator/HuberAggregator.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/optim/aggregator/HuberAggregator.scala @@ -81,6 +81,8 @@ private[ml] class HuberAggregator( } else { 0.0 } + // make transient so we do not serialize between aggregation stages + @transient private lazy val coefficients = bcParameters.value.toArray.slice(0, numFeatures) /** * Add a new training instance to this HuberAggregator, and update the loss and gradient @@ -97,7 +99,7 @@ private[ml] class HuberAggregator( if (weight == 0.0) return this val localFeaturesStd = bcFeaturesStd.value - val localCoefficients = bcParameters.value.toArray.slice(0, numFeatures) + val localCoefficients = coefficients val localGradientSumArray = gradientSumArray val margin = {