From 66d5580bfa8de1e84f36d457482e931c264d05a5 Mon Sep 17 00:00:00 2001 From: Kang Kim Date: Sat, 7 Nov 2015 12:49:15 +0900 Subject: [PATCH 1/2] Update computation of variance and global stats in BatchNormLayer --- src/caffe/layers/batch_norm_layer.cpp | 55 +++++++++++++------------- src/caffe/layers/batch_norm_layer.cu | 56 ++++++++++++++------------- 2 files changed, 57 insertions(+), 54 deletions(-) diff --git a/src/caffe/layers/batch_norm_layer.cpp b/src/caffe/layers/batch_norm_layer.cpp index 94c2b96b9cd..5eba25e9024 100644 --- a/src/caffe/layers/batch_norm_layer.cpp +++ b/src/caffe/layers/batch_norm_layer.cpp @@ -2,7 +2,6 @@ #include #include "caffe/common_layers.hpp" -#include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { @@ -80,20 +79,21 @@ void BatchNormLayer::Forward_cpu(const vector*>& bottom, int num = bottom[0]->shape(0); int spatial_dim = bottom[0]->count()/(bottom[0]->shape(0)*channels_); - // elementwise square - caffe_powx(bottom[0]->count(), bottom_data, Dtype(2), - temp_.mutable_cpu_data()); + if (bottom[0] != top[0]) { + caffe_copy(bottom[0]->count(), bottom_data, top_data); + } if (use_global_stats_) { // use the stored mean/variance estimates. TODO(cdoersch): allow an option // to use an unbiased variance estimate, like the paper does. - const Dtype scale_factor = 1 / this->blobs_[2]->cpu_data()[0]; + const Dtype scale_factor = this->blobs_[2]->cpu_data()[0] == 0 ? + 0 : 1 / this->blobs_[2]->cpu_data()[0]; caffe_cpu_scale(variance_.count(), scale_factor, this->blobs_[0]->cpu_data(), mean_.mutable_cpu_data()); caffe_cpu_scale(variance_.count(), scale_factor, this->blobs_[1]->cpu_data(), variance_.mutable_cpu_data()); } else { - // computes variance using var(X) = E(X^2) - (EX)^2 + // compute mean caffe_cpu_gemv(CblasNoTrans, channels_ * num, spatial_dim, 1. / (num * spatial_dim), bottom_data, spatial_sum_multiplier_.cpu_data(), 0., @@ -101,44 +101,45 @@ void BatchNormLayer::Forward_cpu(const vector*>& bottom, caffe_cpu_gemv(CblasTrans, num, channels_, 1., num_by_chans_.cpu_data(), batch_sum_multiplier_.cpu_data(), 0., mean_.mutable_cpu_data()); + } + + // subtract mean + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1, + batch_sum_multiplier_.cpu_data(), mean_.cpu_data(), 0., + num_by_chans_.mutable_cpu_data()); + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, channels_ * num, + spatial_dim, 1, -1, num_by_chans_.cpu_data(), + spatial_sum_multiplier_.cpu_data(), 1., top_data); + + if (!use_global_stats_) { + // compute variance using var(X) = E((X-EX)^2) + caffe_powx(top[0]->count(), top_data, Dtype(2), + temp_.mutable_cpu_data()); // (X-EX)^2 caffe_cpu_gemv(CblasNoTrans, channels_ * num, spatial_dim, 1. / (num * spatial_dim), temp_.cpu_data(), spatial_sum_multiplier_.cpu_data(), 0., num_by_chans_.mutable_cpu_data()); caffe_cpu_gemv(CblasTrans, num, channels_, 1., num_by_chans_.cpu_data(), batch_sum_multiplier_.cpu_data(), 0., - variance_.mutable_cpu_data()); + variance_.mutable_cpu_data()); // E((X_EX)^2) + + // compute and save moving average this->blobs_[2]->mutable_cpu_data()[0] *= moving_average_fraction_; this->blobs_[2]->mutable_cpu_data()[0] += 1; caffe_cpu_axpby(mean_.count(), Dtype(1), mean_.cpu_data(), moving_average_fraction_, this->blobs_[0]->mutable_cpu_data()); - Dtype m = Dtype(bottom[0]->count()/channels_); - caffe_cpu_axpby(variance_.count(), m/(m-1), variance_.cpu_data(), - moving_average_fraction_, this->blobs_[1]->mutable_cpu_data()); + int m = bottom[0]->count()/channels_; + Dtype bias_correction_factor = m > 1 ? Dtype(m)/(m-1) : 1; + caffe_cpu_axpby(variance_.count(), bias_correction_factor, + variance_.cpu_data(), moving_average_fraction_, + this->blobs_[1]->mutable_cpu_data()); } - // elementwise square of mean - caffe_powx(mean_.count(), mean_.cpu_data(), Dtype(2), - temp_.mutable_cpu_data()); - - caffe_sub(mean_.count(), variance_.cpu_data(), temp_.cpu_data(), - variance_.mutable_cpu_data()); // variance // normalize variance caffe_add_scalar(variance_.count(), eps_, variance_.mutable_cpu_data()); caffe_powx(variance_.count(), variance_.cpu_data(), Dtype(0.5), variance_.mutable_cpu_data()); - // do mean and variance normalization - if (bottom[0] != top[0]) { - caffe_copy(bottom[0]->count(), bottom_data, top_data); - } - // subtract mean - caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1, - batch_sum_multiplier_.cpu_data(), mean_.cpu_data(), 0., - num_by_chans_.mutable_cpu_data()); - caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, channels_ * num, - spatial_dim, 1, -1, num_by_chans_.cpu_data(), - spatial_sum_multiplier_.cpu_data(), 1., top_data); // replicate variance to input size caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1, batch_sum_multiplier_.cpu_data(), variance_.cpu_data(), 0., diff --git a/src/caffe/layers/batch_norm_layer.cu b/src/caffe/layers/batch_norm_layer.cu index cd8924a451d..921a58f07a9 100644 --- a/src/caffe/layers/batch_norm_layer.cu +++ b/src/caffe/layers/batch_norm_layer.cu @@ -2,7 +2,6 @@ #include #include "caffe/common_layers.hpp" -#include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { @@ -15,20 +14,22 @@ void BatchNormLayer::Forward_gpu(const vector*>& bottom, int num = bottom[0]->shape(0); int spatial_dim = bottom[0]->count()/(channels_*bottom[0]->shape(0)); - // elementwise square - caffe_gpu_powx(bottom[0]->count(), bottom_data, Dtype(2), - temp_.mutable_gpu_data()); + if (bottom[0] != top[0]) { + caffe_copy(bottom[0]->count(), bottom_data, top_data); + } + if (use_global_stats_) { // use the stored mean/variance estimates. TODO(cdoersch): allow an option // to use an unbiased variance estimate, like the paper does. - const Dtype scale_factor = 1 / this->blobs_[2]->cpu_data()[0]; + const Dtype scale_factor = this->blobs_[2]->cpu_data()[0] == 0 ? + 0 : 1 / this->blobs_[2]->cpu_data()[0]; caffe_gpu_scale(variance_.count(), scale_factor, this->blobs_[0]->gpu_data(), mean_.mutable_gpu_data()); caffe_gpu_scale(variance_.count(), scale_factor, this->blobs_[1]->gpu_data(), variance_.mutable_gpu_data()); } else { - // computes variance using var(X) = E(X^2) - (EX)^2 + // compute mean caffe_gpu_gemv(CblasNoTrans, channels_ * num, spatial_dim, 1. / (num * spatial_dim), bottom_data, spatial_sum_multiplier_.gpu_data(), 0., @@ -36,44 +37,45 @@ void BatchNormLayer::Forward_gpu(const vector*>& bottom, caffe_gpu_gemv(CblasTrans, num, channels_, 1., num_by_chans_.gpu_data(), batch_sum_multiplier_.gpu_data(), 0., mean_.mutable_gpu_data()); + } + + // subtract mean + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1, + batch_sum_multiplier_.gpu_data(), mean_.gpu_data(), 0., + num_by_chans_.mutable_gpu_data()); + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, channels_ * num, + spatial_dim, 1, -1, num_by_chans_.gpu_data(), + spatial_sum_multiplier_.gpu_data(), 1., top_data); + + if (!use_global_stats_) { + // compute variance using var(X) = E((X-EX)^2) + caffe_gpu_powx(top[0]->count(), top_data, Dtype(2), + temp_.mutable_gpu_data()); // (X-EX)^2 caffe_gpu_gemv(CblasNoTrans, channels_ * num, spatial_dim, 1. / (num * spatial_dim), temp_.gpu_data(), spatial_sum_multiplier_.gpu_data(), 0., num_by_chans_.mutable_gpu_data()); caffe_gpu_gemv(CblasTrans, num, channels_, 1., num_by_chans_.gpu_data(), batch_sum_multiplier_.gpu_data(), 0., - variance_.mutable_gpu_data()); + variance_.mutable_gpu_data()); // E((X_EX)^2) + + // compute and save moving average this->blobs_[2]->mutable_cpu_data()[0] *= moving_average_fraction_; this->blobs_[2]->mutable_cpu_data()[0] += 1; caffe_gpu_axpby(mean_.count(), Dtype(1), mean_.gpu_data(), moving_average_fraction_, this->blobs_[0]->mutable_gpu_data()); - Dtype m = Dtype(bottom[0]->count()/channels_); - caffe_gpu_axpby(variance_.count(), m/(m-1), variance_.gpu_data(), - moving_average_fraction_, this->blobs_[1]->mutable_gpu_data()); + int m = bottom[0]->count()/channels_; + Dtype bias_correction_factor = m > 1 ? Dtype(m)/(m-1) : 1; + caffe_gpu_axpby(variance_.count(), bias_correction_factor, + variance_.gpu_data(), moving_average_fraction_, + this->blobs_[1]->mutable_gpu_data()); } - // elementwise square of mean - caffe_gpu_powx(mean_.count(), mean_.gpu_data(), Dtype(2), - temp_.mutable_gpu_data()); - - caffe_gpu_sub(mean_.count(), variance_.gpu_data(), temp_.gpu_data(), - variance_.mutable_gpu_data()); // variance // normalize variance caffe_gpu_add_scalar(variance_.count(), eps_, variance_.mutable_gpu_data()); caffe_gpu_powx(variance_.count(), variance_.gpu_data(), Dtype(0.5), variance_.mutable_gpu_data()); - // do mean and variance normalization - if (bottom[0] != top[0]) { - caffe_copy(bottom[0]->count(), bottom_data, top_data); - } - // subtract mean - caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1, - batch_sum_multiplier_.gpu_data(), mean_.gpu_data(), 0., - num_by_chans_.mutable_gpu_data()); - caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, channels_ * num, - spatial_dim, 1, -1, num_by_chans_.gpu_data(), - spatial_sum_multiplier_.gpu_data(), 1., top_data); // replicate variance to input size caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1, batch_sum_multiplier_.gpu_data(), variance_.gpu_data(), 0., From f31002d9ef791a75166f91eccd919aec838866c8 Mon Sep 17 00:00:00 2001 From: Kang Kim Date: Fri, 13 Nov 2015 02:20:02 +0900 Subject: [PATCH 2/2] Make backward pass work when global stats is active for BatchNormLayer including minor code cleaning --- src/caffe/layers/batch_norm_layer.cpp | 10 ++++++---- src/caffe/layers/batch_norm_layer.cu | 10 ++++++---- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/src/caffe/layers/batch_norm_layer.cpp b/src/caffe/layers/batch_norm_layer.cpp index 5eba25e9024..b5c91b5e1b3 100644 --- a/src/caffe/layers/batch_norm_layer.cpp +++ b/src/caffe/layers/batch_norm_layer.cpp @@ -84,8 +84,7 @@ void BatchNormLayer::Forward_cpu(const vector*>& bottom, } if (use_global_stats_) { - // use the stored mean/variance estimates. TODO(cdoersch): allow an option - // to use an unbiased variance estimate, like the paper does. + // use the stored mean/variance estimates. const Dtype scale_factor = this->blobs_[2]->cpu_data()[0] == 0 ? 0 : 1 / this->blobs_[2]->cpu_data()[0]; caffe_cpu_scale(variance_.count(), scale_factor, @@ -158,7 +157,6 @@ template void BatchNormLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - CHECK(!use_global_stats_); const Dtype* top_diff; if (bottom[0] != top[0]) { top_diff = top[0]->cpu_diff(); @@ -166,8 +164,12 @@ void BatchNormLayer::Backward_cpu(const vector*>& top, caffe_copy(x_norm_.count(), top[0]->cpu_diff(), x_norm_.mutable_cpu_diff()); top_diff = x_norm_.cpu_diff(); } - const Dtype* top_data = x_norm_.cpu_data(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + if (use_global_stats_) { + caffe_div(temp_.count(), top_diff, temp_.cpu_data(), bottom_diff); + return; + } + const Dtype* top_data = x_norm_.cpu_data(); int num = bottom[0]->shape()[0]; int spatial_dim = bottom[0]->count()/(bottom[0]->shape(0)*channels_); // if Y = (X-mean(X))/(sqrt(var(X)+eps)), then diff --git a/src/caffe/layers/batch_norm_layer.cu b/src/caffe/layers/batch_norm_layer.cu index 921a58f07a9..2a6cac54168 100644 --- a/src/caffe/layers/batch_norm_layer.cu +++ b/src/caffe/layers/batch_norm_layer.cu @@ -20,8 +20,7 @@ void BatchNormLayer::Forward_gpu(const vector*>& bottom, if (use_global_stats_) { - // use the stored mean/variance estimates. TODO(cdoersch): allow an option - // to use an unbiased variance estimate, like the paper does. + // use the stored mean/variance estimates. const Dtype scale_factor = this->blobs_[2]->cpu_data()[0] == 0 ? 0 : 1 / this->blobs_[2]->cpu_data()[0]; caffe_gpu_scale(variance_.count(), scale_factor, @@ -94,7 +93,6 @@ template void BatchNormLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - CHECK(!use_global_stats_); const Dtype* top_diff; if (bottom[0] != top[0]) { top_diff = top[0]->gpu_diff(); @@ -102,8 +100,12 @@ void BatchNormLayer::Backward_gpu(const vector*>& top, caffe_copy(x_norm_.count(), top[0]->gpu_diff(), x_norm_.mutable_gpu_diff()); top_diff = x_norm_.gpu_diff(); } - const Dtype* top_data = x_norm_.gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + if (use_global_stats_) { + caffe_gpu_div(temp_.count(), top_diff, temp_.gpu_data(), bottom_diff); + return; + } + const Dtype* top_data = x_norm_.gpu_data(); int num = bottom[0]->shape()[0]; int spatial_dim = bottom[0]->count()/(channels_*bottom[0]->shape(0)); // if Y = (X-mean(X))/(sqrt(var(X)+eps)), then