@@ -21,7 +21,6 @@ import (
21
21
22
22
"github.com/dgraph-io/badger/v2/options"
23
23
"github.com/dgraph-io/badger/v2/table"
24
- "github.com/dgraph-io/badger/v2/y"
25
24
)
26
25
27
26
// Note: If you add a new option X make sure you also add a WithX method on Options.
@@ -102,11 +101,6 @@ type Options struct {
102
101
// DefaultOptions sets a list of recommended options for good performance.
103
102
// Feel free to modify these to suit your needs with the WithX methods.
104
103
func DefaultOptions (path string ) Options {
105
- defaultCompression := options .ZSTD
106
- // Use snappy as default compression algorithm if badger is built without CGO.
107
- if ! y .CgoEnabled {
108
- defaultCompression = options .Snappy
109
- }
110
104
return Options {
111
105
Dir : path ,
112
106
ValueDir : path ,
@@ -129,16 +123,19 @@ func DefaultOptions(path string) Options {
129
123
CompactL0OnClose : true ,
130
124
KeepL0InMemory : true ,
131
125
VerifyValueChecksum : false ,
132
- Compression : defaultCompression ,
126
+ Compression : options . None ,
133
127
MaxCacheSize : 1 << 30 , // 1 GB
134
- // Benchmarking compression level against performance showed that level 15 gives
135
- // the best speed vs ratio tradeoff.
136
- // For a data size of 4KB we get
137
- // Level: 3 Ratio: 2.72 Time: 24112 n/s
138
- // Level: 10 Ratio: 2.95 Time: 75655 n/s
139
- // Level: 15 Ratio: 4.38 Time: 239042 n/s
140
- // See https://github.com/dgraph-io/badger/pull/1111#issue-338120757
141
- ZSTDCompressionLevel : 15 ,
128
+ // The following benchmarks were done on a 4 KB block size (default block size). The
129
+ // compression is ratio supposed to increase with increasing compression level but since the
130
+ // input for compression algorithm is small (4 KB), we don't get significant benefit at
131
+ // level 3.
132
+ // no_compression-16 10 502848865 ns/op 165.46 MB/s -
133
+ // zstd_compression/level_1-16 7 739037966 ns/op 112.58 MB/s 2.93
134
+ // zstd_compression/level_3-16 7 756950250 ns/op 109.91 MB/s 2.72
135
+ // zstd_compression/level_15-16 1 11135686219 ns/op 7.47 MB/s 4.38
136
+ // Benchmark code can be found in table/builder_test.go file
137
+ ZSTDCompressionLevel : 1 ,
138
+
142
139
// Nothing to read/write value log using standard File I/O
143
140
// MemoryMap to mmap() the value log files
144
141
// (2^30 - 1)*2 when mmapping < 2^31 - 1, max int32.
@@ -561,7 +558,18 @@ func (opt Options) WithInMemory(b bool) Options {
561
558
// The ZSTD compression algorithm supports 20 compression levels. The higher the compression
562
559
// level, the better is the compression ratio but lower is the performance. Lower levels
563
560
// have better performance and higher levels have better compression ratios.
564
- // The default value of ZSTDCompressionLevel is 15.
561
+ // We recommend using level 1 ZSTD Compression Level. Any level higher than 1 seems to
562
+ // deteriorate badger's performance.
563
+ // The following benchmarks were done on a 4 KB block size (default block size). The compression is
564
+ // ratio supposed to increase with increasing compression level but since the input for compression
565
+ // algorithm is small (4 KB), we don't get significant benefit at level 3. It is advised to write
566
+ // your own benchmarks before choosing a compression algorithm or level.
567
+ //
568
+ // no_compression-16 10 502848865 ns/op 165.46 MB/s -
569
+ // zstd_compression/level_1-16 7 739037966 ns/op 112.58 MB/s 2.93
570
+ // zstd_compression/level_3-16 7 756950250 ns/op 109.91 MB/s 2.72
571
+ // zstd_compression/level_15-16 1 11135686219 ns/op 7.47 MB/s 4.38
572
+ // Benchmark code can be found in table/builder_test.go file
565
573
func (opt Options ) WithZSTDCompressionLevel (cLevel int ) Options {
566
574
opt .ZSTDCompressionLevel = cLevel
567
575
return opt
0 commit comments