@@ -83,9 +83,9 @@ class AdagradOp final : public Operator<Context> {
8383 decay_ (this ->template GetSingleArgument<float >(" decay" , 1 .0f )),
8484 weight_decay_ (
8585 this ->template GetSingleArgument<float >(" weight_decay" , 0 .f)) {
86- LOG (INFO ) << " gradient optimization operator in use: "
87- << " AdagradOp"
88- << " weight_decay_=" << weight_decay_;
86+ VLOG ( 1 ) << " gradient optimization operator in use: "
87+ << " AdagradOp"
88+ << " weight_decay_=" << weight_decay_;
8989 }
9090
9191 bool RunOnDevice () override {
@@ -173,9 +173,9 @@ class SparseAdagradOp final : public Operator<CPUContext> {
173173 epsilon_ (this ->template GetSingleArgument<float >(" epsilon" , 1e-5f )),
174174 weight_decay_ (
175175 this ->template GetSingleArgument<float >(" weight_decay" , 0 .f)) {
176- LOG (INFO ) << " gradient optimization operator in use: "
177- << " SparseAdagradOp"
178- << " weight_decay_=" << weight_decay_;
176+ VLOG ( 1 ) << " gradient optimization operator in use: "
177+ << " SparseAdagradOp"
178+ << " weight_decay_=" << weight_decay_;
179179 const float decay = this ->template GetSingleArgument <float >(" decay" , 1.0 );
180180 CAFFE_ENFORCE_EQ (
181181 decay, 1.0 , " Decay is not supported for SparseSimdAdagradOp" );
@@ -226,8 +226,7 @@ class SparseAdagradOp final : public Operator<CPUContext> {
226226 n);
227227
228228#if defined(USE_FBGEMM) && !defined(__NVCC__)
229- C10_LOG_FIRST_N (INFO, 1 )
230- << " using fbgemm::GenerateSparseAdaGrad in SparseAdagradOp" ;
229+ VLOG (1 ) << " using fbgemm::GenerateSparseAdaGrad in SparseAdagradOp" ;
231230
232231 if (block_size != last_block_size_) {
233232 last_block_size_ = block_size;
@@ -282,7 +281,7 @@ class SparseAdagradOp final : public Operator<CPUContext> {
282281 }
283282#endif
284283
285- C10_LOG_FIRST_N (INFO, 1 )
284+ VLOG ( 1 )
286285 << " using internal::adagrad_update_prefetch_inlined in SparseAdagradOp" ;
287286
288287 const auto * paramIn = Input (PARAM).template data <float >();
@@ -361,9 +360,9 @@ class RowWiseSparseAdagradOp final : public Operator<Context> {
361360 epsilon_ (this ->template GetSingleArgument<float >(" epsilon" , 1e-5f )),
362361 weight_decay_ (
363362 this ->template GetSingleArgument<float >(" weight_decay" , 0 .f)) {
364- LOG (INFO ) << " gradient optimization operator in use: "
365- << " RowWiseSparseAdagradOp"
366- << " weight_decay_=" << weight_decay_;
363+ VLOG ( 1 ) << " gradient optimization operator in use: "
364+ << " RowWiseSparseAdagradOp"
365+ << " weight_decay_=" << weight_decay_;
367366 }
368367
369368 bool RunOnDevice () override {
@@ -416,8 +415,7 @@ class RowWiseSparseAdagradOp final : public Operator<Context> {
416415 n);
417416
418417#if defined(USE_FBGEMM) && !defined(__NVCC__)
419- C10_LOG_FIRST_N (INFO, 1 )
420- << " using fbgemm::GenerateSparseAdaGrad in RowWiseSparseAdagradOp" ;
418+ VLOG (1 ) << " using fbgemm::GenerateSparseAdaGrad in RowWiseSparseAdagradOp" ;
421419
422420 if (block_size != last_block_size_) {
423421 last_block_size_ = block_size;
@@ -474,8 +472,7 @@ class RowWiseSparseAdagradOp final : public Operator<Context> {
474472 return true ;
475473 }
476474#else
477- C10_LOG_FIRST_N (INFO, 1 )
478- << " using plain adagrad updates in RowWiseSparseAdagradOp" ;
475+ VLOG (1 ) << " using plain adagrad updates in RowWiseSparseAdagradOp" ;
479476
480477 for (auto i = 0 ; i < n; ++i) {
481478 auto idx = indices[i];
0 commit comments