Skip to content

Commit 7d45526

Browse files
committed
Check grad accum with Jeff's idea.
1 parent 4d3fbd5 commit 7d45526

File tree

1 file changed

+9
-1
lines changed

1 file changed

+9
-1
lines changed

include/caffe/test/test_gradient_check_util.hpp

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,10 @@ void GradientChecker<Dtype>::CheckGradientSingle(Layer<Dtype>* layer,
8686
vector<bool> propagate_down(bottom.size(), check_bottom < 0);
8787
for (int i = 0; i < layer->blobs().size(); ++i) {
8888
Blob<Dtype>* blob = layer->blobs()[i].get();
89-
caffe_set(blob->count(), static_cast<Dtype>(0), blob->mutable_cpu_diff());
89+
// To check layers are implmented for accumulation, let diff starts with
90+
// bias 1.0. However, this is really a quick hack. There is no
91+
// well-explained messages even if CHECK fails.
92+
caffe_set(blob->count(), static_cast<Dtype>(1.0), blob->mutable_cpu_diff());
9093
blobs_to_check.push_back(blob);
9194
}
9295
if (check_bottom < 0) {
@@ -118,6 +121,11 @@ void GradientChecker<Dtype>::CheckGradientSingle(Layer<Dtype>* layer,
118121
Dtype* computed_gradients =
119122
computed_gradient_blobs[blob_id]->mutable_cpu_data();
120123
caffe_copy(count, diff, computed_gradients);
124+
// Remove bias for layer blobs.
125+
const bool is_layer_blob = blob_id < layer->blobs().size();
126+
if (is_layer_blob) {
127+
caffe_add_scalar(count, static_cast<Dtype>(-1.0), computed_gradients);
128+
}
121129
}
122130
// Compute derivative of top w.r.t. each bottom and parameter input using
123131
// finite differencing.

0 commit comments

Comments
 (0)