Skip to content

Commit 349854e

Browse files
author
Ibrahim Jarif
committed
Merge branch 'master' into pr/disable-cache
2 parents bfe482d + 82381ac commit 349854e

File tree

13 files changed

+730
-61
lines changed

13 files changed

+730
-61
lines changed

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -748,6 +748,7 @@ Below is a list of known projects that use Badger:
748748

749749
* [0-stor](https://github.com/zero-os/0-stor) - Single device object store.
750750
* [Dgraph](https://github.com/dgraph-io/dgraph) - Distributed graph database.
751+
* [Jaeger](https://github.com/jaegertracing/jaeger) - Distributed tracing platform.
751752
* [TalariaDB](https://github.com/grab/talaria) - Distributed, low latency time-series database.
752753
* [Dispatch Protocol](https://github.com/dispatchlabs/disgo) - Blockchain protocol for distributed application data analytics.
753754
* [Sandglass](https://github.com/celrenheit/sandglass) - distributed, horizontally scalable, persistent, time sorted message queue.

db_test.go

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,6 @@ import (
2626
"log"
2727
"math"
2828
"math/rand"
29-
"net/http"
3029
"os"
3130
"path/filepath"
3231
"runtime"
@@ -1964,12 +1963,7 @@ func TestVerifyChecksum(t *testing.T) {
19641963
}
19651964

19661965
func TestMain(m *testing.M) {
1967-
// call flag.Parse() here if TestMain uses flags
1968-
go func() {
1969-
if err := http.ListenAndServe("localhost:8080", nil); err != nil {
1970-
panic("Unable to open http port at 8080")
1971-
}
1972-
}()
1966+
flag.Parse()
19731967
os.Exit(m.Run())
19741968
}
19751969

go.mod

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ go 1.12
55
require (
66
github.com/DataDog/zstd v1.4.1
77
github.com/cespare/xxhash v1.1.0
8-
github.com/dgraph-io/ristretto v0.0.0-20191025175511-c1f00be0418e
8+
github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3
99
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2
1010
github.com/dustin/go-humanize v1.0.0
1111
github.com/golang/protobuf v1.3.1

go.sum

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,8 @@ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwc
1313
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
1414
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
1515
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
16-
github.com/dgraph-io/ristretto v0.0.0-20191025175511-c1f00be0418e h1:aeUNgwup7PnDOBAD1BOKAqzb/W/NksOj6r3dwKKuqfg=
17-
github.com/dgraph-io/ristretto v0.0.0-20191025175511-c1f00be0418e/go.mod h1:edzKIzGvqUCMzhTVWbiTSe75zD9Xxq0GtSBtFmaUTZs=
16+
github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3 h1:MQLRM35Pp0yAyBYksjbj1nZI/w6eyRY/mWoM1sFf4kU=
17+
github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
1818
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
1919
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
2020
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=

level_handler.go

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,9 @@ func (s *levelHandler) tryAddLevel0Table(t *table.Table) bool {
188188
// Need lock as we may be deleting the first table during a level 0 compaction.
189189
s.Lock()
190190
defer s.Unlock()
191-
if len(s.tables) >= s.db.opt.NumLevelZeroTablesStall {
191+
// Return false only if L0 is in memory and number of tables is more than number of
192+
// ZeroTableStall. For on disk L0, we should just add the tables to the level.
193+
if s.db.opt.KeepL0InMemory && len(s.tables) >= s.db.opt.NumLevelZeroTablesStall {
192194
return false
193195
}
194196

levels.go

Lines changed: 43 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -424,34 +424,42 @@ func (s *levelsController) pickCompactLevels() (prios []compactionPriority) {
424424
prios = append(prios, pri)
425425
}
426426
}
427-
sort.Slice(prios, func(i, j int) bool {
428-
return prios[i].score > prios[j].score
429-
})
427+
// We used to sort compaction priorities based on the score. But, we
428+
// decided to compact based on the level, not the priority. So, upper
429+
// levels (level 0, level 1, etc) always get compacted first, before the
430+
// lower levels -- this allows us to avoid stalls.
430431
return prios
431432
}
432433

433-
// compactBuildTables merge topTables and botTables to form a list of new tables.
434+
// checkOverlap checks if the given tables overlap with any level from the given "lev" onwards.
435+
func (s *levelsController) checkOverlap(tables []*table.Table, lev int) bool {
436+
kr := getKeyRange(tables...)
437+
for i, lh := range s.levels {
438+
if i < lev { // Skip upper levels.
439+
continue
440+
}
441+
lh.RLock()
442+
left, right := lh.overlappingTables(levelHandlerRLocked{}, kr)
443+
lh.RUnlock()
444+
if right-left > 0 {
445+
return true
446+
}
447+
}
448+
return false
449+
}
450+
451+
// compactBuildTables merges topTables and botTables to form a list of new tables.
434452
func (s *levelsController) compactBuildTables(
435453
lev int, cd compactDef) ([]*table.Table, func() error, error) {
436454
topTables := cd.top
437455
botTables := cd.bot
438456

439-
var hasOverlap bool
440-
{
441-
kr := getKeyRange(cd.top...)
442-
for i, lh := range s.levels {
443-
if i <= lev { // Skip upper levels.
444-
continue
445-
}
446-
lh.RLock()
447-
left, right := lh.overlappingTables(levelHandlerRLocked{}, kr)
448-
lh.RUnlock()
449-
if right-left > 0 {
450-
hasOverlap = true
451-
break
452-
}
453-
}
454-
}
457+
// Check overlap of the top level with the levels which are not being
458+
// compacted in this compaction. We don't need to check overlap of the bottom
459+
// tables with other levels because if the top tables overlap with any of the lower
460+
// levels, it implies bottom level also overlaps because top and bottom tables
461+
// overlap with each other.
462+
hasOverlap := s.checkOverlap(cd.top, cd.nextLevel.level+1)
455463

456464
// Try to collect stats so that we can inform value log about GC. That would help us find which
457465
// value log file should be GCed.
@@ -561,10 +569,15 @@ func (s *levelsController) compactBuildTables(
561569
// versions which are below the minReadTs, otherwise, we might end up discarding the
562570
// only valid version for a running transaction.
563571
numVersions++
564-
lastValidVersion := vs.Meta&bitDiscardEarlierVersions > 0
565-
if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) ||
566-
numVersions > s.kv.opt.NumVersionsToKeep ||
567-
lastValidVersion {
572+
573+
// Keep the current version and discard all the next versions if
574+
// - The `discardEarlierVersions` bit is set OR
575+
// - We've already processed `NumVersionsToKeep` number of versions
576+
// (including the current item being processed)
577+
lastValidVersion := vs.Meta&bitDiscardEarlierVersions > 0 ||
578+
numVersions == s.kv.opt.NumVersionsToKeep
579+
580+
if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) || lastValidVersion {
568581
// If this version of the key is deleted or expired, skip all the rest of the
569582
// versions. Ensure that we're only removing versions below readTs.
570583
skipKey = y.SafeCopy(skipKey, it.Key())
@@ -925,15 +938,13 @@ func (s *levelsController) addLevel0Table(t *table.Table) error {
925938
s.cstatus.RUnlock()
926939
timeStart = time.Now()
927940
}
928-
// Before we unstall, we need to make sure that level 0 and 1 are healthy. Otherwise, we
929-
// will very quickly fill up level 0 again and if the compaction strategy favors level 0,
930-
// then level 1 is going to super full.
941+
// Before we unstall, we need to make sure that level 0 is healthy. Otherwise, we
942+
// will very quickly fill up level 0 again.
931943
for i := 0; ; i++ {
932-
// Passing 0 for delSize to compactable means we're treating incomplete compactions as
933-
// not having finished -- we wait for them to finish. Also, it's crucial this behavior
934-
// replicates pickCompactLevels' behavior in computing compactability in order to
935-
// guarantee progress.
936-
if !s.isLevel0Compactable() && !s.levels[1].isCompactable(0) {
944+
// It's crucial that this behavior replicates pickCompactLevels' behavior in
945+
// computing compactability in order to guarantee progress.
946+
// Break the loop once L0 has enough space to accommodate new tables.
947+
if !s.isLevel0Compactable() {
937948
break
938949
}
939950
time.Sleep(10 * time.Millisecond)

0 commit comments

Comments
 (0)