Skip to content

Commit 2a54533

Browse files
jianyuhfacebook-github-bot
authored andcommitted
Fix the flooding log issues (#38356)
Summary: Pull Request resolved: #38356 Reduce the log size ghstack-source-id: 103997991 Test Plan: CI Reviewed By: jspark1105 Differential Revision: D21532296 fbshipit-source-id: d5ab5a8acc18a2b4210131d0d6b932e293c303a9
1 parent f64d24c commit 2a54533

6 files changed

Lines changed: 38 additions & 42 deletions

File tree

caffe2/sgd/adagrad_fused.h

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,9 @@ class SparseAdagradFusedWithSparseLengthsSumGradientOp final
2222
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5)),
2323
weight_decay_(
2424
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
25-
LOG(INFO) << "gradient optimization operator in use: "
26-
<< "SparseAdagradFusedWithSparseLengthsSumGradientOp"
27-
<< " weight_decay_=" << weight_decay_;
25+
VLOG(1) << "gradient optimization operator in use: "
26+
<< "SparseAdagradFusedWithSparseLengthsSumGradientOp"
27+
<< " weight_decay_=" << weight_decay_;
2828
const T decay = this->template GetSingleArgument<T>("decay", 1.0);
2929
CAFFE_ENFORCE_EQ(
3030
decay, 1.0, "Decay is not supported for SparseSimdAdagradOp");
@@ -157,8 +157,8 @@ class SparseAdagradFusedWithSparseLengthsWeightedSumGradientOp final
157157
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5)),
158158
weight_decay_(
159159
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
160-
LOG(INFO) << "gradient optimization operator in use: "
161-
<< "SparseAdagradFusedWithSparseLengthsWeightedSumGradientOp";
160+
VLOG(1) << "gradient optimization operator in use: "
161+
<< "SparseAdagradFusedWithSparseLengthsWeightedSumGradientOp";
162162
const T decay = this->template GetSingleArgument<T>("decay", 1.0);
163163
CAFFE_ENFORCE_EQ(
164164
decay, 1.0, "Decay is not supported for SparseSimdAdagradOp");
@@ -330,9 +330,8 @@ class SparseAdagradFusedWithSparseLengthsWeightedSumGradientApproxOp final
330330
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5)),
331331
weight_decay_(
332332
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
333-
LOG(INFO)
334-
<< "gradient optimization operator in use: "
335-
<< "SparseAdagradFusedWithSparseLengthsWeightedSumGradientApproxOp";
333+
VLOG(1) << "gradient optimization operator in use: "
334+
<< "SparseAdagradFusedWithSparseLengthsWeightedSumGradientApproxOp";
336335
const T decay = this->template GetSingleArgument<T>("decay", 1.0);
337336
CAFFE_ENFORCE_EQ(
338337
decay, 1.0, "Decay is not supported for SparseSimdAdagradOp");

caffe2/sgd/adagrad_fused_op_gpu.cu

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -433,9 +433,9 @@ class CUDASparseAdagradFusedWithSparseLengthsSumGradientOp final
433433
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)),
434434
weight_decay_(
435435
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
436-
LOG(INFO) << "gradient optimization operator in use: "
437-
<< "CUDASparseAdagradFusedWithSparseLengthSumGradientOp"
438-
<< " weight_decay_=" << weight_decay_;
436+
VLOG(1) << "gradient optimization operator in use: "
437+
<< "CUDASparseAdagradFusedWithSparseLengthSumGradientOp"
438+
<< " weight_decay_=" << weight_decay_;
439439

440440
const T decay = this->template GetSingleArgument<T>("decay", 1.0f);
441441
CAFFE_ENFORCE_EQ(decay, 1.0, "Decay is not supported for SparseAdagradOp");
@@ -585,9 +585,9 @@ class CUDASparseAdagradFusedWithSparseLengthsWeightedSumGradientOp final
585585
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)),
586586
weight_decay_(
587587
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
588-
LOG(INFO) << "gradient optimization operator in use: "
589-
<< "CUDASparseAdagradFusedWithSparseLengthWeightedSumGradientOp"
590-
<< " weight_decay_=" << weight_decay_;
588+
VLOG(1) << "gradient optimization operator in use: "
589+
<< "CUDASparseAdagradFusedWithSparseLengthWeightedSumGradientOp"
590+
<< " weight_decay_=" << weight_decay_;
591591

592592
const T decay = this->template GetSingleArgument<T>("decay", 1.0f);
593593
CAFFE_ENFORCE_EQ(decay, 1.0, "Decay is not supported for SparseAdagradOp");
@@ -782,9 +782,9 @@ class CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientOp final
782782
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)),
783783
weight_decay_(
784784
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
785-
LOG(INFO) << "gradient optimization operator in use: "
786-
<< "CUDARowWiseSparseAdagradFusedWithSparseLengthSumGradientOp"
787-
<< " weight_decay_=" << weight_decay_;
785+
VLOG(1) << "gradient optimization operator in use: "
786+
<< "CUDARowWiseSparseAdagradFusedWithSparseLengthSumGradientOp"
787+
<< " weight_decay_=" << weight_decay_;
788788

789789
const T decay = this->template GetSingleArgument<T>("decay", 1.0f);
790790
CAFFE_ENFORCE_EQ(decay, 1.0, "Decay is not supported for SparseAdagradOp");
@@ -937,7 +937,7 @@ class CUDARowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradientOp final
937937
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)),
938938
weight_decay_(
939939
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
940-
LOG(INFO)
940+
VLOG(1)
941941
<< "gradient optimization operator in use: "
942942
<< "CUDARowWiseSparseAdagradFusedWithSparseLengthWeightedSumGradientOp"
943943
<< " weight_decay_=" << weight_decay_;

caffe2/sgd/adagrad_op.h

Lines changed: 13 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -83,9 +83,9 @@ class AdagradOp final : public Operator<Context> {
8383
decay_(this->template GetSingleArgument<float>("decay", 1.0f)),
8484
weight_decay_(
8585
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
86-
LOG(INFO) << "gradient optimization operator in use: "
87-
<< "AdagradOp"
88-
<< " weight_decay_=" << weight_decay_;
86+
VLOG(1) << "gradient optimization operator in use: "
87+
<< "AdagradOp"
88+
<< " weight_decay_=" << weight_decay_;
8989
}
9090

9191
bool RunOnDevice() override {
@@ -173,9 +173,9 @@ class SparseAdagradOp final : public Operator<CPUContext> {
173173
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)),
174174
weight_decay_(
175175
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
176-
LOG(INFO) << "gradient optimization operator in use: "
177-
<< "SparseAdagradOp"
178-
<< " weight_decay_=" << weight_decay_;
176+
VLOG(1) << "gradient optimization operator in use: "
177+
<< "SparseAdagradOp"
178+
<< " weight_decay_=" << weight_decay_;
179179
const float decay = this->template GetSingleArgument<float>("decay", 1.0);
180180
CAFFE_ENFORCE_EQ(
181181
decay, 1.0, "Decay is not supported for SparseSimdAdagradOp");
@@ -226,8 +226,7 @@ class SparseAdagradOp final : public Operator<CPUContext> {
226226
n);
227227

228228
#if defined(USE_FBGEMM) && !defined(__NVCC__)
229-
C10_LOG_FIRST_N(INFO, 1)
230-
<< "using fbgemm::GenerateSparseAdaGrad in SparseAdagradOp";
229+
VLOG(1) << "using fbgemm::GenerateSparseAdaGrad in SparseAdagradOp";
231230

232231
if (block_size != last_block_size_) {
233232
last_block_size_ = block_size;
@@ -282,7 +281,7 @@ class SparseAdagradOp final : public Operator<CPUContext> {
282281
}
283282
#endif
284283

285-
C10_LOG_FIRST_N(INFO, 1)
284+
VLOG(1)
286285
<< "using internal::adagrad_update_prefetch_inlined in SparseAdagradOp";
287286

288287
const auto* paramIn = Input(PARAM).template data<float>();
@@ -361,9 +360,9 @@ class RowWiseSparseAdagradOp final : public Operator<Context> {
361360
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)),
362361
weight_decay_(
363362
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
364-
LOG(INFO) << "gradient optimization operator in use: "
365-
<< "RowWiseSparseAdagradOp"
366-
<< " weight_decay_=" << weight_decay_;
363+
VLOG(1) << "gradient optimization operator in use: "
364+
<< "RowWiseSparseAdagradOp"
365+
<< " weight_decay_=" << weight_decay_;
367366
}
368367

369368
bool RunOnDevice() override {
@@ -416,8 +415,7 @@ class RowWiseSparseAdagradOp final : public Operator<Context> {
416415
n);
417416

418417
#if defined(USE_FBGEMM) && !defined(__NVCC__)
419-
C10_LOG_FIRST_N(INFO, 1)
420-
<< "using fbgemm::GenerateSparseAdaGrad in RowWiseSparseAdagradOp";
418+
VLOG(1) << "using fbgemm::GenerateSparseAdaGrad in RowWiseSparseAdagradOp";
421419

422420
if (block_size != last_block_size_) {
423421
last_block_size_ = block_size;
@@ -474,8 +472,7 @@ class RowWiseSparseAdagradOp final : public Operator<Context> {
474472
return true;
475473
}
476474
#else
477-
C10_LOG_FIRST_N(INFO, 1)
478-
<< "using plain adagrad updates in RowWiseSparseAdagradOp";
475+
VLOG(1) << "using plain adagrad updates in RowWiseSparseAdagradOp";
479476

480477
for (auto i = 0; i < n; ++i) {
481478
auto idx = indices[i];

caffe2/sgd/adagrad_op_gpu.cu

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -129,9 +129,9 @@ class CUDASparseAdagradOp final : public Operator<Context> {
129129
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)),
130130
weight_decay_(
131131
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
132-
LOG(INFO) << "gradient optimization operator in use: "
133-
<< "CUDASparseAdagradOp"
134-
<< " weight_decay_=" << weight_decay_;
132+
VLOG(1) << "gradient optimization operator in use: "
133+
<< "CUDASparseAdagradOp"
134+
<< " weight_decay_=" << weight_decay_;
135135
const T decay = this->template GetSingleArgument<T>("decay", 1.0f);
136136
CAFFE_ENFORCE_EQ(decay, 1.0, "Decay is not supported for SparseAdagradOp");
137137
}

caffe2/sgd/iter_op.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ class IterOp final : public Operator<Context> {
3838

3939
bool RunOnDevice() override {
4040
if (InputSize() == 0) {
41-
LOG(INFO) << "[Input size is zero]";
41+
VLOG(1) << "[Input size is zero]";
4242
if (!OperatorBase::OutputIsTensorType(0, CPU)) {
4343
// This is the first run; set the iter to start with 0.
4444
LOG(ERROR) << "You are using an old definition of IterOp that will "

caffe2/sgd/rowwise_adagrad_fused.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -140,8 +140,8 @@ class RowWiseSparseAdagradFusedWithSparseLengthsSumGradientOp final
140140
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5)),
141141
weight_decay_(
142142
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
143-
LOG(INFO) << "gradient optimization operator in use: "
144-
<< "RowWiseSparseAdagradFusedWithSparseLengthsSumGradientOp";
143+
VLOG(1) << "gradient optimization operator in use: "
144+
<< "RowWiseSparseAdagradFusedWithSparseLengthsSumGradientOp";
145145
const T decay = this->template GetSingleArgument<T>("decay", 1.0);
146146
CAFFE_ENFORCE_EQ(
147147
decay, 1.0, "Decay is not supported for SparseSimdAdagradOp");
@@ -386,7 +386,7 @@ class RowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradientOp final
386386
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5)),
387387
weight_decay_(
388388
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
389-
LOG(INFO)
389+
VLOG(1)
390390
<< "gradient optimization operator in use: "
391391
<< "RowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradientOp";
392392
}
@@ -690,7 +690,7 @@ class RowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradientApproxOp
690690
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5)),
691691
weight_decay_(
692692
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
693-
LOG(INFO)
693+
VLOG(1)
694694
<< "gradient optimization operator in use: "
695695
<< "RowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradientApproxOp";
696696
const T decay = this->template GetSingleArgument<T>("decay", 1.0);

0 commit comments

Comments
 (0)