@@ -206,16 +206,6 @@ type pickedTableCompaction struct {
206206 // L0-specific compaction info. Set to a non-nil value for all compactions
207207 // where startLevel == 0 that were generated by L0Sublevels.
208208 lcf * manifest.L0CompactionFiles
209- // maxOutputFileSize is the maximum size of an individual table created
210- // during compaction.
211- maxOutputFileSize uint64
212- // maxOverlapBytes is the maximum number of bytes of overlap allowed for a
213- // single output table with the tables in the grandparent level.
214- maxOverlapBytes uint64
215- // maxReadCompactionBytes is the maximum bytes a read compaction is allowed to
216- // overlap in its output level with. If the overlap is greater than
217- // maxReadCompaction bytes, then we don't proceed with the compaction.
218- maxReadCompactionBytes uint64
219209
220210 // The boundaries of the input data.
221211 bounds base.UserKeyBounds
@@ -286,23 +276,19 @@ func newPickedTableCompaction(
286276 l0Organizer * manifest.L0Organizer ,
287277 startLevel , outputLevel , baseLevel int ,
288278) * pickedTableCompaction {
289- if outputLevel > 0 && baseLevel == 0 {
279+ if baseLevel == 0 {
290280 panic ("base level cannot be 0" )
291281 }
292282 if startLevel > 0 && startLevel < baseLevel {
293283 panic (fmt .Sprintf ("invalid compaction: start level %d should not be empty (base level %d)" ,
294284 startLevel , baseLevel ))
295285 }
296286
297- targetFileSize := opts .TargetFileSize (outputLevel , baseLevel )
298287 pc := & pickedTableCompaction {
299- version : cur ,
300- l0Organizer : l0Organizer ,
301- baseLevel : baseLevel ,
302- inputs : []compactionLevel {{level : startLevel }, {level : outputLevel }},
303- maxOutputFileSize : uint64 (targetFileSize ),
304- maxOverlapBytes : maxGrandparentOverlapBytes (targetFileSize ),
305- maxReadCompactionBytes : maxReadCompactionBytes (targetFileSize ),
288+ version : cur ,
289+ l0Organizer : l0Organizer ,
290+ baseLevel : baseLevel ,
291+ inputs : []compactionLevel {{level : startLevel }, {level : outputLevel }},
306292 }
307293 pc .startLevel = & pc .inputs [0 ]
308294 pc .outputLevel = & pc .inputs [1 ]
@@ -355,8 +341,6 @@ func (pc *pickedTableCompaction) String() string {
355341 builder .WriteString (fmt .Sprintf (`Score=%f, ` , pc .score ))
356342 builder .WriteString (fmt .Sprintf (`Kind=%s, ` , pc .kind ))
357343 builder .WriteString (fmt .Sprintf (`AdjustedOutputLevel=%d, ` , adjustedOutputLevel (pc .outputLevel .level , pc .baseLevel )))
358- builder .WriteString (fmt .Sprintf (`maxOutputFileSize=%d, ` , pc .maxOutputFileSize ))
359- builder .WriteString (fmt .Sprintf (`maxReadCompactionBytes=%d, ` , pc .maxReadCompactionBytes ))
360344 builder .WriteString (fmt .Sprintf (`bounds=%s, ` , pc .bounds ))
361345 builder .WriteString (fmt .Sprintf (`version=%s, ` , pc .version ))
362346 builder .WriteString (fmt .Sprintf (`inputs=%s, ` , pc .inputs ))
@@ -373,13 +357,10 @@ func (pc *pickedTableCompaction) clone() *pickedTableCompaction {
373357 // Quickly copy over fields that do not require special deep copy care, and
374358 // set all fields that will require a deep copy to nil.
375359 newPC := & pickedTableCompaction {
376- score : pc .score ,
377- kind : pc .kind ,
378- baseLevel : pc .baseLevel ,
379- maxOutputFileSize : pc .maxOutputFileSize ,
380- maxOverlapBytes : pc .maxOverlapBytes ,
381- maxReadCompactionBytes : pc .maxReadCompactionBytes ,
382- bounds : pc .bounds .Clone (),
360+ score : pc .score ,
361+ kind : pc .kind ,
362+ baseLevel : pc .baseLevel ,
363+ bounds : pc .bounds .Clone (),
383364
384365 // TODO(msbutler): properly clone picker metrics
385366 pickerMetrics : pc .pickerMetrics ,
@@ -452,10 +433,9 @@ func (pc *pickedTableCompaction) setupInputs(
452433 // Grow the sstables in inputLevel.level as long as it doesn't affect the number
453434 // of sstables included from pc.outputLevel.level.
454435 if pc .lcf != nil && inputLevel .level == 0 {
455- pc .growL0ForBase (cmp , maxExpandedBytes )
456- } else if pc .grow (cmp , pc .bounds , maxExpandedBytes , inputLevel , problemSpans ) {
457- // inputLevel was expanded, adjust key range if necessary.
458- pc .bounds = manifest .ExtendKeyRange (cmp , pc .bounds , inputLevel .files .All ())
436+ pc .maybeGrowL0ForBase (cmp , maxExpandedBytes )
437+ } else {
438+ pc .maybeGrow (cmp , pc .bounds , maxExpandedBytes , inputLevel , problemSpans )
459439 }
460440 }
461441
@@ -464,13 +444,18 @@ func (pc *pickedTableCompaction) setupInputs(
464444 pc .startLevel .l0SublevelInfo = generateSublevelInfo (cmp , pc .startLevel .files )
465445 }
466446
467- return ! outputKeyRangeAlreadyCompacting (cmp , inProgressCompactions , pc )
447+ if outputKeyRangeAlreadyCompacting (cmp , inProgressCompactions , pc ) {
448+ return false
449+ }
450+
451+ return true
468452}
469453
470- // grow grows the number of inputs at startLevel without changing the number of
471- // pc.outputLevel files in the compaction, and returns whether the inputs grew. sm
472- // and la are the smallest and largest InternalKeys in all of the inputs.
473- func (pc * pickedTableCompaction ) grow (
454+ // maybeGrow grows the number of inputs at startLevel without changing the
455+ // number of pc.outputLevel files in the compaction, and returns whether the
456+ // inputs grew. sm and la are the smallest and largest InternalKeys in all
457+ // inputs.
458+ func (pc * pickedTableCompaction ) maybeGrow (
474459 cmp base.Compare ,
475460 bounds base.UserKeyBounds ,
476461 maxExpandedBytes uint64 ,
@@ -503,13 +488,15 @@ func (pc *pickedTableCompaction) grow(
503488 return false
504489 }
505490 inputLevel .files = expandedInputLevel
491+ // inputLevel was expanded, adjust key range if necessary.
492+ pc .bounds = manifest .ExtendKeyRange (cmp , pc .bounds , inputLevel .files .All ())
506493 return true
507494}
508495
509- // Similar logic as pc.grow . Additional L0 files are optionally added to the
510- // compaction at this step. Note that the bounds passed in are not the bounds
511- // of the compaction, but rather the smallest and largest internal keys that
512- // the compaction cannot include from L0 without pulling in more Lbase
496+ // Similar logic as pc.maybeGrow . Additional L0 files are optionally added to
497+ // the compaction at this step. Note that the bounds passed in are not the
498+ // bounds of the compaction, but rather the smallest and largest internal keys
499+ // that the compaction cannot include from L0 without pulling in more Lbase
513500// files. Consider this example:
514501//
515502// L0: c-d e+f g-h
@@ -523,7 +510,7 @@ func (pc *pickedTableCompaction) grow(
523510// will expand the compaction to include c-d and g-h from L0. The
524511// bounds passed in are exclusive; the compaction cannot be expanded
525512// to include files that "touch" it.
526- func (pc * pickedTableCompaction ) growL0ForBase (cmp base.Compare , maxExpandedBytes uint64 ) bool {
513+ func (pc * pickedTableCompaction ) maybeGrowL0ForBase (cmp base.Compare , maxExpandedBytes uint64 ) {
527514 if invariants .Enabled {
528515 if pc .startLevel .level != 0 {
529516 panic (fmt .Sprintf ("pc.startLevel.level is %d, expected 0" , pc .startLevel .level ))
@@ -533,7 +520,7 @@ func (pc *pickedTableCompaction) growL0ForBase(cmp base.Compare, maxExpandedByte
533520 if pc .outputLevel .files .Empty () {
534521 // If there are no overlapping fields in the output level, we do not
535522 // attempt to expand the compaction to encourage move compactions.
536- return false
523+ return
537524 }
538525
539526 smallestBaseKey := base .InvalidInternalKey
@@ -551,7 +538,7 @@ func (pc *pickedTableCompaction) growL0ForBase(cmp base.Compare, maxExpandedByte
551538 })
552539 oldLcf := pc .lcf .Clone ()
553540 if ! pc .l0Organizer .ExtendL0ForBaseCompactionTo (smallestBaseKey , largestBaseKey , pc .lcf ) {
554- return false
541+ return
555542 }
556543
557544 var newStartLevelFiles []* manifest.TableMetadata
@@ -566,13 +553,12 @@ func (pc *pickedTableCompaction) growL0ForBase(cmp base.Compare, maxExpandedByte
566553
567554 if sizeSum + pc .outputLevel .files .AggregateSizeSum () >= maxExpandedBytes {
568555 * pc .lcf = * oldLcf
569- return false
556+ return
570557 }
571558
572559 pc .startLevel .files = manifest .NewLevelSliceSeqSorted (newStartLevelFiles )
573560 pc .bounds = manifest .ExtendKeyRange (cmp , pc .bounds ,
574561 pc .startLevel .files .All (), pc .outputLevel .files .All ())
575- return true
576562}
577563
578564// estimatedInputSize returns an estimate of the size of the compaction's
@@ -2244,17 +2230,20 @@ func pickReadTriggeredCompactionHelper(
22442230 }
22452231 pc .kind = compactionKindRead
22462232
2247- // Prevent read compactions which are too wide.
2248- outputOverlaps := pc .version .Overlaps (pc .outputLevel .level , pc .bounds )
2249- if outputOverlaps .AggregateSizeSum () > pc .maxReadCompactionBytes {
2250- return nil
2251- }
2233+ if outputOverlaps := pc .version .Overlaps (pc .outputLevel .level , pc .bounds ); ! outputOverlaps .Empty () {
2234+ // Prevent read compactions which are too wide.
2235+ targetFileSize := p .opts .TargetFileSize (pc .outputLevel .level , pc .baseLevel )
2236+ if outputOverlaps .AggregateSizeSum () > maxReadCompactionBytes (targetFileSize ) {
2237+ return nil
2238+ }
22522239
2253- // Prevent compactions which start with a small seed file X, but overlap
2254- // with over allowedCompactionWidth * X file sizes in the output layer.
2255- const allowedCompactionWidth = 35
2256- if outputOverlaps .AggregateSizeSum () > overlapSlice .AggregateSizeSum ()* allowedCompactionWidth {
2257- return nil
2240+ // Prevent compactions which start with a small seed file of size X, but
2241+ // overlap with files in the output layer that have total size larger than
2242+ // allowedCompactionWidth * X.
2243+ const allowedCompactionWidth = 35
2244+ if outputOverlaps .AggregateSizeSum () > overlapSlice .AggregateSizeSum ()* allowedCompactionWidth {
2245+ return nil
2246+ }
22582247 }
22592248
22602249 return pc
0 commit comments