You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

585 lines
18 KiB

  1. /**
  2. * @file
  3. * @copyright defined in aergo/LICENSE.txt
  4. */
  5. package trie
  6. import (
  7. "bytes"
  8. "fmt"
  9. "sync"
  10. "github.com/p4u/asmt/db"
  11. )
  12. // Trie is a modified sparse Merkle tree.
  13. // Instead of storing values at the leaves of the tree,
  14. // the values are stored at the highest subtree root that contains only that value.
  15. // If the tree is sparse, this requires fewer hashing operations.
  16. type Trie struct {
  17. db *CacheDB
  18. // Root is the current root of the smt.
  19. Root []byte
  20. // prevRoot is the root before the last update
  21. prevRoot []byte
  22. // lock is for the whole struct
  23. lock sync.RWMutex
  24. // hash is the hash function used in the trie
  25. hash func(data ...[]byte) []byte
  26. // TrieHeight is the number of bits in a key
  27. TrieHeight int
  28. // LoadDbCounter counts the nb of db reads in on update
  29. LoadDbCounter int
  30. // loadDbMux is a lock for LoadDbCounter
  31. loadDbMux sync.RWMutex
  32. // LoadCacheCounter counts the nb of cache reads in on update
  33. LoadCacheCounter int
  34. // liveCountMux is a lock fo LoadCacheCounter
  35. liveCountMux sync.RWMutex
  36. // counterOn is used to enable/diseable for efficiency
  37. counterOn bool
  38. // CacheHeightLimit is the number of tree levels we want to store in cache
  39. CacheHeightLimit int
  40. // pastTries stores the past maxPastTries trie roots to revert
  41. pastTries [][]byte
  42. // atomicUpdate, commit all the changes made by intermediate update calls
  43. atomicUpdate bool
  44. }
  45. // NewSMT creates a new SMT given a keySize and a hash function.
  46. func NewTrie(root []byte, hash func(data ...[]byte) []byte, store db.DB) *Trie {
  47. s := &Trie{
  48. hash: hash,
  49. TrieHeight: len(hash([]byte("height"))) * 8, // hash any string to get output length
  50. counterOn: false,
  51. }
  52. s.db = &CacheDB{
  53. liveCache: make(map[Hash][][]byte),
  54. updatedNodes: make(map[Hash][][]byte),
  55. Store: store,
  56. }
  57. // don't store any cache by default (contracts state don't use cache)
  58. s.CacheHeightLimit = s.TrieHeight + 1
  59. s.Root = root
  60. return s
  61. }
  62. // Update adds and deletes a sorted list of keys and their values to the trie
  63. // Adding and deleting can be simultaneous.
  64. // To delete, set the value to DefaultLeaf.
  65. // If Update is called multiple times, only the state after the last update
  66. // is commited.
  67. func (s *Trie) Update(keys, values [][]byte) ([]byte, error) {
  68. s.lock.Lock()
  69. defer s.lock.Unlock()
  70. s.atomicUpdate = false
  71. s.LoadDbCounter = 0
  72. s.LoadCacheCounter = 0
  73. ch := make(chan mresult, 1)
  74. s.update(s.Root, keys, values, nil, 0, s.TrieHeight, ch)
  75. result := <-ch
  76. if result.err != nil {
  77. return nil, result.err
  78. }
  79. if len(result.update) != 0 {
  80. s.Root = result.update[:HashLength]
  81. } else {
  82. s.Root = nil
  83. }
  84. return s.Root, nil
  85. }
  86. // AtomicUpdate can be called multiple times and all the updated nodes will be commited
  87. // and roots will be stored in past tries.
  88. // Can be used for updating several blocks before committing to DB.
  89. func (s *Trie) AtomicUpdate(keys, values [][]byte) ([]byte, error) {
  90. s.lock.Lock()
  91. defer s.lock.Unlock()
  92. s.atomicUpdate = true
  93. s.LoadDbCounter = 0
  94. s.LoadCacheCounter = 0
  95. ch := make(chan mresult, 1)
  96. s.update(s.Root, keys, values, nil, 0, s.TrieHeight, ch)
  97. result := <-ch
  98. if result.err != nil {
  99. return nil, result.err
  100. }
  101. if len(result.update) != 0 {
  102. s.Root = result.update[:HashLength]
  103. } else {
  104. s.Root = nil
  105. }
  106. s.updatePastTries()
  107. return s.Root, nil
  108. }
  109. // mresult is used to contain the result of goroutines and is sent through a channel.
  110. type mresult struct {
  111. update []byte
  112. // flag if a node was deleted and a shortcut node maybe has to move up the tree
  113. deleted bool
  114. err error
  115. }
  116. // update adds and deletes a sorted list of keys and their values to the trie.
  117. // Adding and deleting can be simultaneous.
  118. // To delete, set the value to DefaultLeaf.
  119. // It returns the root of the updated tree.
  120. func (s *Trie) update(root []byte, keys, values, batch [][]byte, iBatch, height int, ch chan<- (mresult)) {
  121. if height == 0 {
  122. if bytes.Equal(DefaultLeaf, values[0]) {
  123. // Delete the key-value from the trie if it is being set to DefaultLeaf
  124. // The value will be set to [] in batch by maybeMoveupShortcut or interiorHash
  125. s.deleteOldNode(root, height, false)
  126. ch <- mresult{nil, true, nil}
  127. } else {
  128. // create a new shortcut batch.
  129. // simply storing the value will make it hard to move up the
  130. // shortcut in case of sibling deletion
  131. batch = make([][]byte, 31, 31)
  132. node := s.leafHash(keys[0], values[0], root, batch, 0, height)
  133. ch <- mresult{node, false, nil}
  134. }
  135. return
  136. }
  137. // Load the node to update
  138. batch, iBatch, lnode, rnode, isShortcut, err := s.loadChildren(root, height, iBatch, batch)
  139. if err != nil {
  140. ch <- mresult{nil, false, err}
  141. return
  142. }
  143. // Check if the keys are updating the shortcut node
  144. if isShortcut {
  145. keys, values = s.maybeAddShortcutToKV(keys, values, lnode[:HashLength], rnode[:HashLength])
  146. if iBatch == 0 {
  147. // shortcut is moving so it's root will change
  148. s.deleteOldNode(root, height, false)
  149. }
  150. // The shortcut node was added to keys and values so consider this subtree default.
  151. lnode, rnode = nil, nil
  152. // update in the batch (set key, value to default so the next loadChildren is correct)
  153. batch[2*iBatch+1] = nil
  154. batch[2*iBatch+2] = nil
  155. if len(keys) == 0 {
  156. // Set true so that a potential sibling shortcut may move up.
  157. ch <- mresult{nil, true, nil}
  158. return
  159. }
  160. }
  161. // Store shortcut node
  162. if (len(lnode) == 0) && (len(rnode) == 0) && (len(keys) == 1) {
  163. // We are adding 1 key to an empty subtree so store it as a shortcut
  164. if bytes.Equal(DefaultLeaf, values[0]) {
  165. ch <- mresult{nil, true, nil}
  166. } else {
  167. node := s.leafHash(keys[0], values[0], root, batch, iBatch, height)
  168. ch <- mresult{node, false, nil}
  169. }
  170. return
  171. }
  172. // Split the keys array so each branch can be updated in parallel
  173. lkeys, rkeys := s.splitKeys(keys, s.TrieHeight-height)
  174. splitIndex := len(lkeys)
  175. lvalues, rvalues := values[:splitIndex], values[splitIndex:]
  176. switch {
  177. case len(lkeys) == 0 && len(rkeys) > 0:
  178. s.updateRight(lnode, rnode, root, keys, values, batch, iBatch, height, ch)
  179. case len(lkeys) > 0 && len(rkeys) == 0:
  180. s.updateLeft(lnode, rnode, root, keys, values, batch, iBatch, height, ch)
  181. default:
  182. s.updateParallel(lnode, rnode, root, lkeys, rkeys, lvalues, rvalues, batch, iBatch, height, ch)
  183. }
  184. }
  185. // updateRight updates the right side of the tree
  186. func (s *Trie) updateRight(lnode, rnode, root []byte, keys, values, batch [][]byte, iBatch, height int, ch chan<- (mresult)) {
  187. // all the keys go in the right subtree
  188. newch := make(chan mresult, 1)
  189. s.update(rnode, keys, values, batch, 2*iBatch+2, height-1, newch)
  190. result := <-newch
  191. if result.err != nil {
  192. ch <- mresult{nil, false, result.err}
  193. return
  194. }
  195. // Move up a shortcut node if necessary.
  196. if result.deleted {
  197. if s.maybeMoveUpShortcut(lnode, result.update, root, batch, iBatch, height, ch) {
  198. return
  199. }
  200. }
  201. node := s.interiorHash(lnode, result.update, root, batch, iBatch, height)
  202. ch <- mresult{node, false, nil}
  203. }
  204. // updateLeft updates the left side of the tree
  205. func (s *Trie) updateLeft(lnode, rnode, root []byte, keys, values, batch [][]byte, iBatch, height int, ch chan<- (mresult)) {
  206. // all the keys go in the left subtree
  207. newch := make(chan mresult, 1)
  208. s.update(lnode, keys, values, batch, 2*iBatch+1, height-1, newch)
  209. result := <-newch
  210. if result.err != nil {
  211. ch <- mresult{nil, false, result.err}
  212. return
  213. }
  214. // Move up a shortcut node if necessary.
  215. if result.deleted {
  216. if s.maybeMoveUpShortcut(result.update, rnode, root, batch, iBatch, height, ch) {
  217. return
  218. }
  219. }
  220. node := s.interiorHash(result.update, rnode, root, batch, iBatch, height)
  221. ch <- mresult{node, false, nil}
  222. }
  223. // updateParallel updates both sides of the trie simultaneously
  224. func (s *Trie) updateParallel(lnode, rnode, root []byte, lkeys, rkeys, lvalues, rvalues, batch [][]byte, iBatch, height int, ch chan<- (mresult)) {
  225. lch := make(chan mresult, 1)
  226. rch := make(chan mresult, 1)
  227. go s.update(lnode, lkeys, lvalues, batch, 2*iBatch+1, height-1, lch)
  228. go s.update(rnode, rkeys, rvalues, batch, 2*iBatch+2, height-1, rch)
  229. lresult := <-lch
  230. rresult := <-rch
  231. if lresult.err != nil {
  232. ch <- mresult{nil, false, lresult.err}
  233. return
  234. }
  235. if rresult.err != nil {
  236. ch <- mresult{nil, false, rresult.err}
  237. return
  238. }
  239. // Move up a shortcut node if it's sibling is default
  240. if lresult.deleted || rresult.deleted {
  241. if s.maybeMoveUpShortcut(lresult.update, rresult.update, root, batch, iBatch, height, ch) {
  242. return
  243. }
  244. }
  245. node := s.interiorHash(lresult.update, rresult.update, root, batch, iBatch, height)
  246. ch <- mresult{node, false, nil}
  247. }
  248. // deleteOldNode deletes an old node that has been updated
  249. func (s *Trie) deleteOldNode(root []byte, height int, movingUp bool) {
  250. var node Hash
  251. copy(node[:], root)
  252. if !s.atomicUpdate || movingUp {
  253. // dont delete old nodes with atomic updated except when
  254. // moving up a shortcut, we dont record every single move
  255. s.db.updatedMux.Lock()
  256. delete(s.db.updatedNodes, node)
  257. s.db.updatedMux.Unlock()
  258. }
  259. if height >= s.CacheHeightLimit {
  260. s.db.liveMux.Lock()
  261. delete(s.db.liveCache, node)
  262. s.db.liveMux.Unlock()
  263. }
  264. }
  265. // splitKeys devides the array of keys into 2 so they can update left and right branches in parallel
  266. func (s *Trie) splitKeys(keys [][]byte, height int) ([][]byte, [][]byte) {
  267. for i, key := range keys {
  268. if bitIsSet(key, height) {
  269. return keys[:i], keys[i:]
  270. }
  271. }
  272. return keys, nil
  273. }
  274. // maybeMoveUpShortcut moves up a shortcut if it's sibling node is default
  275. func (s *Trie) maybeMoveUpShortcut(left, right, root []byte, batch [][]byte, iBatch, height int, ch chan<- (mresult)) bool {
  276. if len(left) == 0 && len(right) == 0 {
  277. // Both update and sibling are deleted subtrees
  278. if iBatch == 0 {
  279. // If the deleted subtrees are at the root, then delete it.
  280. s.deleteOldNode(root, height, true)
  281. } else {
  282. batch[2*iBatch+1] = nil
  283. batch[2*iBatch+2] = nil
  284. }
  285. ch <- mresult{nil, true, nil}
  286. return true
  287. } else if len(left) == 0 {
  288. // If right is a shortcut move it up
  289. if right[HashLength] == 1 {
  290. s.moveUpShortcut(right, root, batch, iBatch, 2*iBatch+2, height, ch)
  291. return true
  292. }
  293. } else if len(right) == 0 {
  294. // If left is a shortcut move it up
  295. if left[HashLength] == 1 {
  296. s.moveUpShortcut(left, root, batch, iBatch, 2*iBatch+1, height, ch)
  297. return true
  298. }
  299. }
  300. return false
  301. }
  302. func (s *Trie) moveUpShortcut(shortcut, root []byte, batch [][]byte, iBatch, iShortcut, height int, ch chan<- (mresult)) {
  303. // it doesn't matter if atomic update is true or false since the batch is node modified
  304. _, _, shortcutKey, shortcutVal, _, err := s.loadChildren(shortcut, height-1, iShortcut, batch)
  305. if err != nil {
  306. ch <- mresult{nil, false, err}
  307. return
  308. }
  309. // when moving up the shortcut, it's hash will change because height is +1
  310. newShortcut := s.hash(shortcutKey[:HashLength], shortcutVal[:HashLength], []byte{byte(height)})
  311. newShortcut = append(newShortcut, byte(1))
  312. if iBatch == 0 {
  313. // Modify batch to a shortcut batch
  314. batch[0] = []byte{1}
  315. batch[2*iBatch+1] = shortcutKey
  316. batch[2*iBatch+2] = shortcutVal
  317. batch[2*iShortcut+1] = nil
  318. batch[2*iShortcut+2] = nil
  319. // cache and updatedNodes deleted by store node
  320. s.storeNode(batch, newShortcut, root, height)
  321. } else if (height-1)%4 == 0 {
  322. // move up shortcut and delete old batch
  323. batch[2*iBatch+1] = shortcutKey
  324. batch[2*iBatch+2] = shortcutVal
  325. // set true so that AtomicUpdate can also delete a node moving up
  326. // otherwise every nodes moved up is recorded
  327. s.deleteOldNode(shortcut, height, true)
  328. } else {
  329. //move up shortcut
  330. batch[2*iBatch+1] = shortcutKey
  331. batch[2*iBatch+2] = shortcutVal
  332. batch[2*iShortcut+1] = nil
  333. batch[2*iShortcut+2] = nil
  334. }
  335. // Return the left sibling node to move it up
  336. ch <- mresult{newShortcut, true, nil}
  337. }
  338. // maybeAddShortcutToKV adds a shortcut key to the keys array to be updated.
  339. // this is used when a subtree containing a shortcut node is being updated
  340. func (s *Trie) maybeAddShortcutToKV(keys, values [][]byte, shortcutKey, shortcutVal []byte) ([][]byte, [][]byte) {
  341. newKeys := make([][]byte, 0, len(keys)+1)
  342. newVals := make([][]byte, 0, len(keys)+1)
  343. if bytes.Compare(shortcutKey, keys[0]) < 0 {
  344. newKeys = append(newKeys, shortcutKey)
  345. newKeys = append(newKeys, keys...)
  346. newVals = append(newVals, shortcutVal)
  347. newVals = append(newVals, values...)
  348. } else if bytes.Compare(shortcutKey, keys[len(keys)-1]) > 0 {
  349. newKeys = append(newKeys, keys...)
  350. newKeys = append(newKeys, shortcutKey)
  351. newVals = append(newVals, values...)
  352. newVals = append(newVals, shortcutVal)
  353. } else {
  354. higher := false
  355. for i, key := range keys {
  356. if bytes.Equal(shortcutKey, key) {
  357. if !bytes.Equal(DefaultLeaf, values[i]) {
  358. // Do nothing if the shortcut is simply updated
  359. return keys, values
  360. }
  361. // Delete shortcut if it is updated to DefaultLeaf
  362. newKeys = append(newKeys, keys[:i]...)
  363. newKeys = append(newKeys, keys[i+1:]...)
  364. newVals = append(newVals, values[:i]...)
  365. newVals = append(newVals, values[i+1:]...)
  366. }
  367. if !higher && bytes.Compare(shortcutKey, key) > 0 {
  368. higher = true
  369. continue
  370. }
  371. if higher && bytes.Compare(shortcutKey, key) < 0 {
  372. // insert shortcut in slices
  373. newKeys = append(newKeys, keys[:i]...)
  374. newKeys = append(newKeys, shortcutKey)
  375. newKeys = append(newKeys, keys[i:]...)
  376. newVals = append(newVals, values[:i]...)
  377. newVals = append(newVals, shortcutVal)
  378. newVals = append(newVals, values[i:]...)
  379. break
  380. }
  381. }
  382. }
  383. return newKeys, newVals
  384. }
  385. // loadChildren looks for the children of a node.
  386. // if the node is not stored in cache, it will be loaded from db.
  387. func (s *Trie) loadChildren(root []byte, height, iBatch int, batch [][]byte) ([][]byte, int, []byte, []byte, bool, error) {
  388. isShortcut := false
  389. if height%4 == 0 {
  390. if len(root) == 0 {
  391. // create a new default batch
  392. batch = make([][]byte, 31, 31)
  393. batch[0] = []byte{0}
  394. } else {
  395. var err error
  396. batch, err = s.loadBatch(root)
  397. if err != nil {
  398. return nil, 0, nil, nil, false, err
  399. }
  400. }
  401. iBatch = 0
  402. if batch[0][0] == 1 {
  403. isShortcut = true
  404. }
  405. } else {
  406. if len(batch[iBatch]) != 0 && batch[iBatch][HashLength] == 1 {
  407. isShortcut = true
  408. }
  409. }
  410. return batch, iBatch, batch[2*iBatch+1], batch[2*iBatch+2], isShortcut, nil
  411. }
  412. // loadBatch fetches a batch of nodes in cache or db
  413. func (s *Trie) loadBatch(root []byte) ([][]byte, error) {
  414. var node Hash
  415. copy(node[:], root)
  416. s.db.liveMux.RLock()
  417. val, exists := s.db.liveCache[node]
  418. s.db.liveMux.RUnlock()
  419. if exists {
  420. if s.counterOn {
  421. s.liveCountMux.Lock()
  422. s.LoadCacheCounter++
  423. s.liveCountMux.Unlock()
  424. }
  425. if s.atomicUpdate {
  426. // Return a copy so that Commit() doesnt have to be called at
  427. // each block and still commit every state transition.
  428. // Before Commit, the same batch is in liveCache and in updatedNodes
  429. newVal := make([][]byte, 31, 31)
  430. copy(newVal, val)
  431. return newVal, nil
  432. }
  433. return val, nil
  434. }
  435. // checking updated nodes is useful if get() or update() is called twice in a row without db commit
  436. s.db.updatedMux.RLock()
  437. val, exists = s.db.updatedNodes[node]
  438. s.db.updatedMux.RUnlock()
  439. if exists {
  440. if s.atomicUpdate {
  441. // Return a copy so that Commit() doesnt have to be called at
  442. // each block and still commit every state transition.
  443. newVal := make([][]byte, 31, 31)
  444. copy(newVal, val)
  445. return newVal, nil
  446. }
  447. return val, nil
  448. }
  449. //Fetch node in disk database
  450. if s.db.Store == nil {
  451. return nil, fmt.Errorf("DB not connected to trie")
  452. }
  453. if s.counterOn {
  454. s.loadDbMux.Lock()
  455. s.LoadDbCounter++
  456. s.loadDbMux.Unlock()
  457. }
  458. s.db.lock.Lock()
  459. dbval := s.db.Store.Get(root[:HashLength])
  460. s.db.lock.Unlock()
  461. nodeSize := len(dbval)
  462. if nodeSize != 0 {
  463. return s.parseBatch(dbval), nil
  464. }
  465. return nil, fmt.Errorf("the trie node %x is unavailable in the disk db, db may be corrupted", root)
  466. }
  467. // parseBatch decodes the byte data into a slice of nodes and bitmap
  468. func (s *Trie) parseBatch(val []byte) [][]byte {
  469. batch := make([][]byte, 31, 31)
  470. bitmap := val[:4]
  471. // check if the batch root is a shortcut
  472. if bitIsSet(val, 31) {
  473. batch[0] = []byte{1}
  474. batch[1] = val[4 : 4+33]
  475. batch[2] = val[4+33 : 4+33*2]
  476. } else {
  477. batch[0] = []byte{0}
  478. j := 0
  479. for i := 1; i <= 30; i++ {
  480. if bitIsSet(bitmap, i-1) {
  481. batch[i] = val[4+33*j : 4+33*(j+1)]
  482. j++
  483. }
  484. }
  485. }
  486. return batch
  487. }
  488. // leafHash returns the hash of key_value_byte(height) concatenated, stores it in the updatedNodes and maybe in liveCache.
  489. // leafHash is never called for a default value. Default value should not be stored.
  490. func (s *Trie) leafHash(key, value, oldRoot []byte, batch [][]byte, iBatch, height int) []byte {
  491. // byte(height) is here for 2 reasons.
  492. // 1- to prevent potential problems with merkle proofs where if an account
  493. // has the same address as a node, it would be possible to prove a
  494. // different value for the account.
  495. // 2- when accounts are added to the trie, accounts on their path get pushed down the tree
  496. // with them. if an old account changes position from a shortcut batch to another
  497. // shortcut batch of different height, if would be deleted when reverting.
  498. h := s.hash(key, value, []byte{byte(height)})
  499. h = append(h, byte(1)) // byte(1) is a flag for the shortcut
  500. batch[2*iBatch+2] = append(value, byte(2))
  501. batch[2*iBatch+1] = append(key, byte(2))
  502. if height%4 == 0 {
  503. batch[0] = []byte{1} // byte(1) is a flag for the shortcut batch
  504. s.storeNode(batch, h, oldRoot, height)
  505. }
  506. return h
  507. }
  508. // storeNode stores a batch and deletes the old node from cache
  509. func (s *Trie) storeNode(batch [][]byte, h, oldRoot []byte, height int) {
  510. if !bytes.Equal(h, oldRoot) {
  511. var node Hash
  512. copy(node[:], h)
  513. // record new node
  514. s.db.updatedMux.Lock()
  515. s.db.updatedNodes[node] = batch
  516. s.db.updatedMux.Unlock()
  517. // Cache the shortcut node if it's height is over CacheHeightLimit
  518. if height >= s.CacheHeightLimit {
  519. s.db.liveMux.Lock()
  520. s.db.liveCache[node] = batch
  521. s.db.liveMux.Unlock()
  522. }
  523. s.deleteOldNode(oldRoot, height, false)
  524. }
  525. }
  526. // interiorHash hashes 2 children to get the parent hash and stores it in the updatedNodes and maybe in liveCache.
  527. func (s *Trie) interiorHash(left, right, oldRoot []byte, batch [][]byte, iBatch, height int) []byte {
  528. var h []byte
  529. // left and right cannot both be default. It is handled by maybeMoveUpShortcut()
  530. if len(left) == 0 {
  531. h = s.hash(DefaultLeaf, right[:HashLength])
  532. } else if len(right) == 0 {
  533. h = s.hash(left[:HashLength], DefaultLeaf)
  534. } else {
  535. h = s.hash(left[:HashLength], right[:HashLength])
  536. }
  537. h = append(h, byte(0))
  538. batch[2*iBatch+2] = right
  539. batch[2*iBatch+1] = left
  540. if height%4 == 0 {
  541. batch[0] = []byte{0}
  542. s.storeNode(batch, h, oldRoot, height)
  543. }
  544. return h
  545. }
  546. // updatePastTries appends the current Root to the list of past tries
  547. func (s *Trie) updatePastTries() {
  548. if len(s.pastTries) >= maxPastTries {
  549. copy(s.pastTries, s.pastTries[1:])
  550. s.pastTries[len(s.pastTries)-1] = s.Root
  551. } else {
  552. s.pastTries = append(s.pastTries, s.Root)
  553. }
  554. }