You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

684 lines
18 KiB

  1. // Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
  2. // All rights reserved.
  3. //
  4. // Use of this source code is governed by a BSD-style license that can be
  5. // found in the LICENSE file.
  6. // Package opt provides sets of options used by LevelDB.
  7. package opt
  8. import (
  9. "math"
  10. "github.com/syndtr/goleveldb/leveldb/cache"
  11. "github.com/syndtr/goleveldb/leveldb/comparer"
  12. "github.com/syndtr/goleveldb/leveldb/filter"
  13. )
  14. const (
  15. KiB = 1024
  16. MiB = KiB * 1024
  17. GiB = MiB * 1024
  18. )
  19. var (
  20. DefaultBlockCacher = LRUCacher
  21. DefaultBlockCacheCapacity = 8 * MiB
  22. DefaultBlockRestartInterval = 16
  23. DefaultBlockSize = 4 * KiB
  24. DefaultCompactionExpandLimitFactor = 25
  25. DefaultCompactionGPOverlapsFactor = 10
  26. DefaultCompactionL0Trigger = 4
  27. DefaultCompactionSourceLimitFactor = 1
  28. DefaultCompactionTableSize = 2 * MiB
  29. DefaultCompactionTableSizeMultiplier = 1.0
  30. DefaultCompactionTotalSize = 10 * MiB
  31. DefaultCompactionTotalSizeMultiplier = 10.0
  32. DefaultCompressionType = SnappyCompression
  33. DefaultIteratorSamplingRate = 1 * MiB
  34. DefaultOpenFilesCacher = LRUCacher
  35. DefaultOpenFilesCacheCapacity = 500
  36. DefaultWriteBuffer = 4 * MiB
  37. DefaultWriteL0PauseTrigger = 12
  38. DefaultWriteL0SlowdownTrigger = 8
  39. )
  40. // Cacher is a caching algorithm.
  41. type Cacher interface {
  42. New(capacity int) cache.Cacher
  43. }
  44. type CacherFunc struct {
  45. NewFunc func(capacity int) cache.Cacher
  46. }
  47. func (f *CacherFunc) New(capacity int) cache.Cacher {
  48. if f.NewFunc != nil {
  49. return f.NewFunc(capacity)
  50. }
  51. return nil
  52. }
  53. func noCacher(int) cache.Cacher { return nil }
  54. var (
  55. // LRUCacher is the LRU-cache algorithm.
  56. LRUCacher = &CacherFunc{cache.NewLRU}
  57. // NoCacher is the value to disable caching algorithm.
  58. NoCacher = &CacherFunc{}
  59. )
  60. // Compression is the 'sorted table' block compression algorithm to use.
  61. type Compression uint
  62. func (c Compression) String() string {
  63. switch c {
  64. case DefaultCompression:
  65. return "default"
  66. case NoCompression:
  67. return "none"
  68. case SnappyCompression:
  69. return "snappy"
  70. }
  71. return "invalid"
  72. }
  73. const (
  74. DefaultCompression Compression = iota
  75. NoCompression
  76. SnappyCompression
  77. nCompression
  78. )
  79. // Strict is the DB 'strict level'.
  80. type Strict uint
  81. const (
  82. // If present then a corrupted or invalid chunk or block in manifest
  83. // journal will cause an error instead of being dropped.
  84. // This will prevent database with corrupted manifest to be opened.
  85. StrictManifest Strict = 1 << iota
  86. // If present then journal chunk checksum will be verified.
  87. StrictJournalChecksum
  88. // If present then a corrupted or invalid chunk or block in journal
  89. // will cause an error instead of being dropped.
  90. // This will prevent database with corrupted journal to be opened.
  91. StrictJournal
  92. // If present then 'sorted table' block checksum will be verified.
  93. // This has effect on both 'read operation' and compaction.
  94. StrictBlockChecksum
  95. // If present then a corrupted 'sorted table' will fails compaction.
  96. // The database will enter read-only mode.
  97. StrictCompaction
  98. // If present then a corrupted 'sorted table' will halts 'read operation'.
  99. StrictReader
  100. // If present then leveldb.Recover will drop corrupted 'sorted table'.
  101. StrictRecovery
  102. // This only applicable for ReadOptions, if present then this ReadOptions
  103. // 'strict level' will override global ones.
  104. StrictOverride
  105. // StrictAll enables all strict flags.
  106. StrictAll = StrictManifest | StrictJournalChecksum | StrictJournal | StrictBlockChecksum | StrictCompaction | StrictReader | StrictRecovery
  107. // DefaultStrict is the default strict flags. Specify any strict flags
  108. // will override default strict flags as whole (i.e. not OR'ed).
  109. DefaultStrict = StrictJournalChecksum | StrictBlockChecksum | StrictCompaction | StrictReader
  110. // NoStrict disables all strict flags. Override default strict flags.
  111. NoStrict = ^StrictAll
  112. )
  113. // Options holds the optional parameters for the DB at large.
  114. type Options struct {
  115. // AltFilters defines one or more 'alternative filters'.
  116. // 'alternative filters' will be used during reads if a filter block
  117. // does not match with the 'effective filter'.
  118. //
  119. // The default value is nil
  120. AltFilters []filter.Filter
  121. // BlockCacher provides cache algorithm for LevelDB 'sorted table' block caching.
  122. // Specify NoCacher to disable caching algorithm.
  123. //
  124. // The default value is LRUCacher.
  125. BlockCacher Cacher
  126. // BlockCacheCapacity defines the capacity of the 'sorted table' block caching.
  127. // Use -1 for zero, this has same effect as specifying NoCacher to BlockCacher.
  128. //
  129. // The default value is 8MiB.
  130. BlockCacheCapacity int
  131. // BlockRestartInterval is the number of keys between restart points for
  132. // delta encoding of keys.
  133. //
  134. // The default value is 16.
  135. BlockRestartInterval int
  136. // BlockSize is the minimum uncompressed size in bytes of each 'sorted table'
  137. // block.
  138. //
  139. // The default value is 4KiB.
  140. BlockSize int
  141. // CompactionExpandLimitFactor limits compaction size after expanded.
  142. // This will be multiplied by table size limit at compaction target level.
  143. //
  144. // The default value is 25.
  145. CompactionExpandLimitFactor int
  146. // CompactionGPOverlapsFactor limits overlaps in grandparent (Level + 2) that a
  147. // single 'sorted table' generates.
  148. // This will be multiplied by table size limit at grandparent level.
  149. //
  150. // The default value is 10.
  151. CompactionGPOverlapsFactor int
  152. // CompactionL0Trigger defines number of 'sorted table' at level-0 that will
  153. // trigger compaction.
  154. //
  155. // The default value is 4.
  156. CompactionL0Trigger int
  157. // CompactionSourceLimitFactor limits compaction source size. This doesn't apply to
  158. // level-0.
  159. // This will be multiplied by table size limit at compaction target level.
  160. //
  161. // The default value is 1.
  162. CompactionSourceLimitFactor int
  163. // CompactionTableSize limits size of 'sorted table' that compaction generates.
  164. // The limits for each level will be calculated as:
  165. // CompactionTableSize * (CompactionTableSizeMultiplier ^ Level)
  166. // The multiplier for each level can also fine-tuned using CompactionTableSizeMultiplierPerLevel.
  167. //
  168. // The default value is 2MiB.
  169. CompactionTableSize int
  170. // CompactionTableSizeMultiplier defines multiplier for CompactionTableSize.
  171. //
  172. // The default value is 1.
  173. CompactionTableSizeMultiplier float64
  174. // CompactionTableSizeMultiplierPerLevel defines per-level multiplier for
  175. // CompactionTableSize.
  176. // Use zero to skip a level.
  177. //
  178. // The default value is nil.
  179. CompactionTableSizeMultiplierPerLevel []float64
  180. // CompactionTotalSize limits total size of 'sorted table' for each level.
  181. // The limits for each level will be calculated as:
  182. // CompactionTotalSize * (CompactionTotalSizeMultiplier ^ Level)
  183. // The multiplier for each level can also fine-tuned using
  184. // CompactionTotalSizeMultiplierPerLevel.
  185. //
  186. // The default value is 10MiB.
  187. CompactionTotalSize int
  188. // CompactionTotalSizeMultiplier defines multiplier for CompactionTotalSize.
  189. //
  190. // The default value is 10.
  191. CompactionTotalSizeMultiplier float64
  192. // CompactionTotalSizeMultiplierPerLevel defines per-level multiplier for
  193. // CompactionTotalSize.
  194. // Use zero to skip a level.
  195. //
  196. // The default value is nil.
  197. CompactionTotalSizeMultiplierPerLevel []float64
  198. // Comparer defines a total ordering over the space of []byte keys: a 'less
  199. // than' relationship. The same comparison algorithm must be used for reads
  200. // and writes over the lifetime of the DB.
  201. //
  202. // The default value uses the same ordering as bytes.Compare.
  203. Comparer comparer.Comparer
  204. // Compression defines the 'sorted table' block compression to use.
  205. //
  206. // The default value (DefaultCompression) uses snappy compression.
  207. Compression Compression
  208. // DisableBufferPool allows disable use of util.BufferPool functionality.
  209. //
  210. // The default value is false.
  211. DisableBufferPool bool
  212. // DisableBlockCache allows disable use of cache.Cache functionality on
  213. // 'sorted table' block.
  214. //
  215. // The default value is false.
  216. DisableBlockCache bool
  217. // DisableCompactionBackoff allows disable compaction retry backoff.
  218. //
  219. // The default value is false.
  220. DisableCompactionBackoff bool
  221. // DisableLargeBatchTransaction allows disabling switch-to-transaction mode
  222. // on large batch write. If enable batch writes large than WriteBuffer will
  223. // use transaction.
  224. //
  225. // The default is false.
  226. DisableLargeBatchTransaction bool
  227. // ErrorIfExist defines whether an error should returned if the DB already
  228. // exist.
  229. //
  230. // The default value is false.
  231. ErrorIfExist bool
  232. // ErrorIfMissing defines whether an error should returned if the DB is
  233. // missing. If false then the database will be created if missing, otherwise
  234. // an error will be returned.
  235. //
  236. // The default value is false.
  237. ErrorIfMissing bool
  238. // Filter defines an 'effective filter' to use. An 'effective filter'
  239. // if defined will be used to generate per-table filter block.
  240. // The filter name will be stored on disk.
  241. // During reads LevelDB will try to find matching filter from
  242. // 'effective filter' and 'alternative filters'.
  243. //
  244. // Filter can be changed after a DB has been created. It is recommended
  245. // to put old filter to the 'alternative filters' to mitigate lack of
  246. // filter during transition period.
  247. //
  248. // A filter is used to reduce disk reads when looking for a specific key.
  249. //
  250. // The default value is nil.
  251. Filter filter.Filter
  252. // IteratorSamplingRate defines approximate gap (in bytes) between read
  253. // sampling of an iterator. The samples will be used to determine when
  254. // compaction should be triggered.
  255. //
  256. // The default is 1MiB.
  257. IteratorSamplingRate int
  258. // NoSync allows completely disable fsync.
  259. //
  260. // The default is false.
  261. NoSync bool
  262. // NoWriteMerge allows disabling write merge.
  263. //
  264. // The default is false.
  265. NoWriteMerge bool
  266. // OpenFilesCacher provides cache algorithm for open files caching.
  267. // Specify NoCacher to disable caching algorithm.
  268. //
  269. // The default value is LRUCacher.
  270. OpenFilesCacher Cacher
  271. // OpenFilesCacheCapacity defines the capacity of the open files caching.
  272. // Use -1 for zero, this has same effect as specifying NoCacher to OpenFilesCacher.
  273. //
  274. // The default value is 500.
  275. OpenFilesCacheCapacity int
  276. // If true then opens DB in read-only mode.
  277. //
  278. // The default value is false.
  279. ReadOnly bool
  280. // Strict defines the DB strict level.
  281. Strict Strict
  282. // WriteBuffer defines maximum size of a 'memdb' before flushed to
  283. // 'sorted table'. 'memdb' is an in-memory DB backed by an on-disk
  284. // unsorted journal.
  285. //
  286. // LevelDB may held up to two 'memdb' at the same time.
  287. //
  288. // The default value is 4MiB.
  289. WriteBuffer int
  290. // WriteL0StopTrigger defines number of 'sorted table' at level-0 that will
  291. // pause write.
  292. //
  293. // The default value is 12.
  294. WriteL0PauseTrigger int
  295. // WriteL0SlowdownTrigger defines number of 'sorted table' at level-0 that
  296. // will trigger write slowdown.
  297. //
  298. // The default value is 8.
  299. WriteL0SlowdownTrigger int
  300. }
  301. func (o *Options) GetAltFilters() []filter.Filter {
  302. if o == nil {
  303. return nil
  304. }
  305. return o.AltFilters
  306. }
  307. func (o *Options) GetBlockCacher() Cacher {
  308. if o == nil || o.BlockCacher == nil {
  309. return DefaultBlockCacher
  310. } else if o.BlockCacher == NoCacher {
  311. return nil
  312. }
  313. return o.BlockCacher
  314. }
  315. func (o *Options) GetBlockCacheCapacity() int {
  316. if o == nil || o.BlockCacheCapacity == 0 {
  317. return DefaultBlockCacheCapacity
  318. } else if o.BlockCacheCapacity < 0 {
  319. return 0
  320. }
  321. return o.BlockCacheCapacity
  322. }
  323. func (o *Options) GetBlockRestartInterval() int {
  324. if o == nil || o.BlockRestartInterval <= 0 {
  325. return DefaultBlockRestartInterval
  326. }
  327. return o.BlockRestartInterval
  328. }
  329. func (o *Options) GetBlockSize() int {
  330. if o == nil || o.BlockSize <= 0 {
  331. return DefaultBlockSize
  332. }
  333. return o.BlockSize
  334. }
  335. func (o *Options) GetCompactionExpandLimit(level int) int {
  336. factor := DefaultCompactionExpandLimitFactor
  337. if o != nil && o.CompactionExpandLimitFactor > 0 {
  338. factor = o.CompactionExpandLimitFactor
  339. }
  340. return o.GetCompactionTableSize(level+1) * factor
  341. }
  342. func (o *Options) GetCompactionGPOverlaps(level int) int {
  343. factor := DefaultCompactionGPOverlapsFactor
  344. if o != nil && o.CompactionGPOverlapsFactor > 0 {
  345. factor = o.CompactionGPOverlapsFactor
  346. }
  347. return o.GetCompactionTableSize(level+2) * factor
  348. }
  349. func (o *Options) GetCompactionL0Trigger() int {
  350. if o == nil || o.CompactionL0Trigger == 0 {
  351. return DefaultCompactionL0Trigger
  352. }
  353. return o.CompactionL0Trigger
  354. }
  355. func (o *Options) GetCompactionSourceLimit(level int) int {
  356. factor := DefaultCompactionSourceLimitFactor
  357. if o != nil && o.CompactionSourceLimitFactor > 0 {
  358. factor = o.CompactionSourceLimitFactor
  359. }
  360. return o.GetCompactionTableSize(level+1) * factor
  361. }
  362. func (o *Options) GetCompactionTableSize(level int) int {
  363. var (
  364. base = DefaultCompactionTableSize
  365. mult float64
  366. )
  367. if o != nil {
  368. if o.CompactionTableSize > 0 {
  369. base = o.CompactionTableSize
  370. }
  371. if level < len(o.CompactionTableSizeMultiplierPerLevel) && o.CompactionTableSizeMultiplierPerLevel[level] > 0 {
  372. mult = o.CompactionTableSizeMultiplierPerLevel[level]
  373. } else if o.CompactionTableSizeMultiplier > 0 {
  374. mult = math.Pow(o.CompactionTableSizeMultiplier, float64(level))
  375. }
  376. }
  377. if mult == 0 {
  378. mult = math.Pow(DefaultCompactionTableSizeMultiplier, float64(level))
  379. }
  380. return int(float64(base) * mult)
  381. }
  382. func (o *Options) GetCompactionTotalSize(level int) int64 {
  383. var (
  384. base = DefaultCompactionTotalSize
  385. mult float64
  386. )
  387. if o != nil {
  388. if o.CompactionTotalSize > 0 {
  389. base = o.CompactionTotalSize
  390. }
  391. if level < len(o.CompactionTotalSizeMultiplierPerLevel) && o.CompactionTotalSizeMultiplierPerLevel[level] > 0 {
  392. mult = o.CompactionTotalSizeMultiplierPerLevel[level]
  393. } else if o.CompactionTotalSizeMultiplier > 0 {
  394. mult = math.Pow(o.CompactionTotalSizeMultiplier, float64(level))
  395. }
  396. }
  397. if mult == 0 {
  398. mult = math.Pow(DefaultCompactionTotalSizeMultiplier, float64(level))
  399. }
  400. return int64(float64(base) * mult)
  401. }
  402. func (o *Options) GetComparer() comparer.Comparer {
  403. if o == nil || o.Comparer == nil {
  404. return comparer.DefaultComparer
  405. }
  406. return o.Comparer
  407. }
  408. func (o *Options) GetCompression() Compression {
  409. if o == nil || o.Compression <= DefaultCompression || o.Compression >= nCompression {
  410. return DefaultCompressionType
  411. }
  412. return o.Compression
  413. }
  414. func (o *Options) GetDisableBufferPool() bool {
  415. if o == nil {
  416. return false
  417. }
  418. return o.DisableBufferPool
  419. }
  420. func (o *Options) GetDisableBlockCache() bool {
  421. if o == nil {
  422. return false
  423. }
  424. return o.DisableBlockCache
  425. }
  426. func (o *Options) GetDisableCompactionBackoff() bool {
  427. if o == nil {
  428. return false
  429. }
  430. return o.DisableCompactionBackoff
  431. }
  432. func (o *Options) GetDisableLargeBatchTransaction() bool {
  433. if o == nil {
  434. return false
  435. }
  436. return o.DisableLargeBatchTransaction
  437. }
  438. func (o *Options) GetErrorIfExist() bool {
  439. if o == nil {
  440. return false
  441. }
  442. return o.ErrorIfExist
  443. }
  444. func (o *Options) GetErrorIfMissing() bool {
  445. if o == nil {
  446. return false
  447. }
  448. return o.ErrorIfMissing
  449. }
  450. func (o *Options) GetFilter() filter.Filter {
  451. if o == nil {
  452. return nil
  453. }
  454. return o.Filter
  455. }
  456. func (o *Options) GetIteratorSamplingRate() int {
  457. if o == nil || o.IteratorSamplingRate <= 0 {
  458. return DefaultIteratorSamplingRate
  459. }
  460. return o.IteratorSamplingRate
  461. }
  462. func (o *Options) GetNoSync() bool {
  463. if o == nil {
  464. return false
  465. }
  466. return o.NoSync
  467. }
  468. func (o *Options) GetNoWriteMerge() bool {
  469. if o == nil {
  470. return false
  471. }
  472. return o.NoWriteMerge
  473. }
  474. func (o *Options) GetOpenFilesCacher() Cacher {
  475. if o == nil || o.OpenFilesCacher == nil {
  476. return DefaultOpenFilesCacher
  477. }
  478. if o.OpenFilesCacher == NoCacher {
  479. return nil
  480. }
  481. return o.OpenFilesCacher
  482. }
  483. func (o *Options) GetOpenFilesCacheCapacity() int {
  484. if o == nil || o.OpenFilesCacheCapacity == 0 {
  485. return DefaultOpenFilesCacheCapacity
  486. } else if o.OpenFilesCacheCapacity < 0 {
  487. return 0
  488. }
  489. return o.OpenFilesCacheCapacity
  490. }
  491. func (o *Options) GetReadOnly() bool {
  492. if o == nil {
  493. return false
  494. }
  495. return o.ReadOnly
  496. }
  497. func (o *Options) GetStrict(strict Strict) bool {
  498. if o == nil || o.Strict == 0 {
  499. return DefaultStrict&strict != 0
  500. }
  501. return o.Strict&strict != 0
  502. }
  503. func (o *Options) GetWriteBuffer() int {
  504. if o == nil || o.WriteBuffer <= 0 {
  505. return DefaultWriteBuffer
  506. }
  507. return o.WriteBuffer
  508. }
  509. func (o *Options) GetWriteL0PauseTrigger() int {
  510. if o == nil || o.WriteL0PauseTrigger == 0 {
  511. return DefaultWriteL0PauseTrigger
  512. }
  513. return o.WriteL0PauseTrigger
  514. }
  515. func (o *Options) GetWriteL0SlowdownTrigger() int {
  516. if o == nil || o.WriteL0SlowdownTrigger == 0 {
  517. return DefaultWriteL0SlowdownTrigger
  518. }
  519. return o.WriteL0SlowdownTrigger
  520. }
  521. // ReadOptions holds the optional parameters for 'read operation'. The
  522. // 'read operation' includes Get, Find and NewIterator.
  523. type ReadOptions struct {
  524. // DontFillCache defines whether block reads for this 'read operation'
  525. // should be cached. If false then the block will be cached. This does
  526. // not affects already cached block.
  527. //
  528. // The default value is false.
  529. DontFillCache bool
  530. // Strict will be OR'ed with global DB 'strict level' unless StrictOverride
  531. // is present. Currently only StrictReader that has effect here.
  532. Strict Strict
  533. }
  534. func (ro *ReadOptions) GetDontFillCache() bool {
  535. if ro == nil {
  536. return false
  537. }
  538. return ro.DontFillCache
  539. }
  540. func (ro *ReadOptions) GetStrict(strict Strict) bool {
  541. if ro == nil {
  542. return false
  543. }
  544. return ro.Strict&strict != 0
  545. }
  546. // WriteOptions holds the optional parameters for 'write operation'. The
  547. // 'write operation' includes Write, Put and Delete.
  548. type WriteOptions struct {
  549. // NoWriteMerge allows disabling write merge.
  550. //
  551. // The default is false.
  552. NoWriteMerge bool
  553. // Sync is whether to sync underlying writes from the OS buffer cache
  554. // through to actual disk, if applicable. Setting Sync can result in
  555. // slower writes.
  556. //
  557. // If false, and the machine crashes, then some recent writes may be lost.
  558. // Note that if it is just the process that crashes (and the machine does
  559. // not) then no writes will be lost.
  560. //
  561. // In other words, Sync being false has the same semantics as a write
  562. // system call. Sync being true means write followed by fsync.
  563. //
  564. // The default value is false.
  565. Sync bool
  566. }
  567. func (wo *WriteOptions) GetNoWriteMerge() bool {
  568. if wo == nil {
  569. return false
  570. }
  571. return wo.NoWriteMerge
  572. }
  573. func (wo *WriteOptions) GetSync() bool {
  574. if wo == nil {
  575. return false
  576. }
  577. return wo.Sync
  578. }
  579. func GetStrict(o *Options, ro *ReadOptions, strict Strict) bool {
  580. if ro.GetStrict(StrictOverride) {
  581. return ro.GetStrict(strict)
  582. } else {
  583. return o.GetStrict(strict) || ro.GetStrict(strict)
  584. }
  585. }