You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1026 lines
37 KiB

  1. // Copyright 2017-2018 DERO Project. All rights reserved.
  2. // Use of this source code in any form is governed by RESEARCH license.
  3. // license can be found in the LICENSE file.
  4. // GPG: 0F39 E425 8C65 3947 702A 8234 08B2 0360 A03A 9DE8
  5. //
  6. //
  7. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
  8. // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  9. // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
  10. // THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  11. // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  12. // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  13. // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  14. // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
  15. // THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  16. package blockchain
  17. // This file runs the core consensus protocol
  18. // please think before randomly editing for after effects
  19. // We must not call any packages that can call panic
  20. // NO Panics or FATALs please
  21. //import "os"
  22. import "fmt"
  23. import "sort"
  24. import "sync"
  25. import "time"
  26. import "bytes"
  27. import "sync/atomic"
  28. import "runtime/debug"
  29. import log "github.com/sirupsen/logrus"
  30. import "github.com/romana/rlog"
  31. import "github.com/arnaucode/derosuite/config"
  32. import "github.com/arnaucode/derosuite/crypto"
  33. import "github.com/arnaucode/derosuite/globals"
  34. import "github.com/arnaucode/derosuite/storage"
  35. import "github.com/arnaucode/derosuite/difficulty"
  36. import "github.com/arnaucode/derosuite/crypto/ringct"
  37. import "github.com/arnaucode/derosuite/block"
  38. import "github.com/arnaucode/derosuite/transaction"
  39. import "github.com/arnaucode/derosuite/checkpoints"
  40. import "github.com/arnaucode/derosuite/blockchain/mempool"
  41. import "github.com/arnaucode/derosuite/blockchain/inputmaturity"
  42. // all components requiring access to blockchain must use , this struct to communicate
  43. // this structure must be update while mutex
  44. type Blockchain struct {
  45. store storage.Store // interface to storage layer
  46. Height uint64 // chain height is always 1 more than block
  47. height_seen uint64 // height seen on peers
  48. Top_ID crypto.Hash // id of the top block
  49. Difficulty uint64 // current cumulative difficulty
  50. Mempool *mempool.Mempool
  51. Exit_Event chan bool // blockchain is shutting down and we must quit ASAP
  52. Top_Block_Median_Size uint64 // median block size of current top block
  53. Top_Block_Base_Reward uint64 // top block base reward
  54. checkpints_disabled bool // are checkpoints disabled
  55. sync.RWMutex
  56. }
  57. var logger *log.Entry
  58. //var Exit_Event = make(chan bool) // causes all threads to exit
  59. // All blockchain activity is store in a single
  60. /* do initialisation , setup storage, put genesis block and chain in store
  61. This is the first component to get up
  62. Global parameters are picked up from the config package
  63. */
  64. func Blockchain_Start(params map[string]interface{}) (*Blockchain, error) {
  65. var err error
  66. var chain Blockchain
  67. logger = globals.Logger.WithFields(log.Fields{"com": "BLKCHAIN"})
  68. logger.Infof("Initialising blockchain")
  69. init_static_checkpoints() // init some hard coded checkpoints
  70. chain.store = storage.Bolt_backend // setup backend
  71. chain.store.Init(params) // init backend
  72. chain.checkpints_disabled = params["--disable-checkpoints"].(bool)
  73. chain.Exit_Event = make(chan bool) // init exit channel
  74. // init mempool before chain starts
  75. chain.Mempool, err = mempool.Init_Mempool(params)
  76. // we need to check mainnet/testnet check whether the genesis block matches the testnet/mainet
  77. // mean whether the user is trying to use mainnet db with testnet option or vice-versa
  78. if chain.Block_Exists(config.Mainnet.Genesis_Block_Hash) || chain.Block_Exists(config.Testnet.Genesis_Block_Hash) {
  79. if globals.IsMainnet() && !chain.Block_Exists(config.Mainnet.Genesis_Block_Hash) {
  80. logger.Fatalf("Tryng to use a testnet database with mainnet, please add --testnet option")
  81. }
  82. if !globals.IsMainnet() && chain.Block_Exists(config.Testnet.Genesis_Block_Hash) {
  83. logger.Fatalf("Tryng to use a mainnet database with testnet, please remove --testnet option")
  84. }
  85. }
  86. // genesis block not in chain, add it to chain, together with its miner tx
  87. // make sure genesis is in the store
  88. if !chain.Block_Exists(globals.Config.Genesis_Block_Hash) {
  89. logger.Debugf("Genesis block not in store, add it now")
  90. var complete_block block.Complete_Block
  91. bl := Generate_Genesis_Block()
  92. complete_block.Bl = &bl
  93. if !chain.Add_Complete_Block(&complete_block) {
  94. logger.Fatalf("Failed to add genesis block, we can no longer continue")
  95. }
  96. }
  97. // load the chain from the disk
  98. chain.Initialise_Chain_From_DB()
  99. if chain.checkpints_disabled {
  100. logger.Infof("Internal Checkpoints are disabled")
  101. } else {
  102. logger.Debugf("Internal Checkpoints are enabled")
  103. }
  104. _ = err
  105. atomic.AddUint32(&globals.Subsystem_Active, 1) // increment subsystem
  106. //go chain.Handle_Block_Event_Loop()
  107. //go chain.Handle_Transaction_Event_Loop()
  108. /*for i := uint64(0); i < 100;i++{
  109. block_id,_ := chain.Load_BL_ID_at_Height(i)
  110. chain.write_output_index(block_id)
  111. }*/
  112. // chain.Inject_Alt_Chain()
  113. return &chain, nil
  114. }
  115. // this function is called to read blockchain state from DB
  116. // It is callable at any point in time
  117. func (chain *Blockchain) Initialise_Chain_From_DB() {
  118. chain.Lock()
  119. defer chain.Unlock()
  120. // locate top block
  121. chain.Top_ID = chain.Load_TOP_ID()
  122. chain.Height = (chain.Load_Height_for_BL_ID(chain.Top_ID) + 1)
  123. chain.Difficulty = chain.Get_Difficulty()
  124. chain.Top_Block_Median_Size = chain.Get_Median_BlockSize_At_Block(chain.Top_ID)
  125. chain.Top_Block_Base_Reward = chain.Load_Block_Reward(chain.Top_ID)
  126. logger.Infof("Chain Top Block %s Height %d", chain.Top_ID, chain.Height)
  127. }
  128. // before shutdown , make sure p2p is confirmed stopped
  129. func (chain *Blockchain) Shutdown() {
  130. chain.Lock() // take the lock as chain is no longer in unsafe mode
  131. close(chain.Exit_Event) // send signal to everyone we are shutting down
  132. chain.Mempool.Shutdown() // shutdown mempool first
  133. logger.Infof("Stopping Blockchain")
  134. chain.store.Shutdown()
  135. atomic.AddUint32(&globals.Subsystem_Active, ^uint32(0)) // this decrement 1 fom subsystem
  136. }
  137. func (chain *Blockchain) Get_Height() uint64 {
  138. return chain.Height
  139. }
  140. func (chain *Blockchain) Get_Top_ID() crypto.Hash {
  141. return chain.Top_ID
  142. }
  143. func (chain *Blockchain) Get_Difficulty() uint64 {
  144. return chain.Get_Difficulty_At_Block(chain.Top_ID)
  145. }
  146. func (chain *Blockchain) Get_Network_HashRate() uint64 {
  147. return chain.Get_Difficulty_At_Block(chain.Top_ID) / config.BLOCK_TIME
  148. }
  149. // confirm whether the block exist in the data
  150. // this only confirms whether the block has been downloaded
  151. // a separate check is required, whether the block is valid ( satifies PoW and other conditions)
  152. // we will not add a block to store, until it satisfies PoW
  153. func (chain *Blockchain) Block_Exists(h crypto.Hash) bool {
  154. _, err := chain.Load_BL_FROM_ID(h)
  155. if err == nil {
  156. return true
  157. }
  158. return false
  159. }
  160. // this is the only entrypoint for new txs in the chain
  161. // add a transaction to MEMPOOL,
  162. // verifying everything means everything possible
  163. // TODO: currently we are not verifying fees, its on TODO list
  164. // this only change mempool, no DB changes
  165. func (chain *Blockchain) Add_TX_To_Pool(tx *transaction.Transaction) (result bool) {
  166. // Coin base TX can not come through this path
  167. if tx.IsCoinbase() {
  168. logger.WithFields(log.Fields{"txid": tx.GetHash()}).Warnf("TX rejected coinbase tx cannot appear in mempool")
  169. return false
  170. }
  171. // check whether enough fees is provided in the transaction
  172. // calculate dynamic_fees_per_kb
  173. dynamic_fees_per_kb := uint64(0)
  174. previous_height := chain.Load_Height_for_BL_ID(chain.Get_Top_ID())
  175. if previous_height >= 2 {
  176. dynamic_fees_per_kb = chain.Get_Dynamic_Fee_Rate(previous_height)
  177. }
  178. // check whether the fee provided is enoough
  179. calculated_fee := chain.Calculate_TX_fee(dynamic_fees_per_kb, uint64(len(tx.Serialize())))
  180. provided_fee := tx.RctSignature.Get_TX_Fee() // get fee from tx
  181. if ((calculated_fee * 98) / 100) > provided_fee { // 2 % margin see blockchain.cpp L 2913
  182. logger.WithFields(log.Fields{"txid": tx.GetHash()}).Warnf("TX rejected due to low fees provided fee %d calculated fee %d", provided_fee, calculated_fee)
  183. return false
  184. }
  185. if chain.Verify_Transaction_NonCoinbase(tx) {
  186. if chain.Mempool.Mempool_Add_TX(tx, 0) {
  187. logger.Debugf("successfully added tx to pool")
  188. return true
  189. } else {
  190. logger.Debugf("TX rejected by pool")
  191. return false
  192. }
  193. }
  194. logger.Warnf("Incoming TX could not be verified")
  195. return false
  196. }
  197. // this is the only entrypoint for new / old blocks even for genesis block
  198. // this will add the entire block atomically to the chain
  199. // this is the only function which can add blocks to the chain
  200. // this is exported, so ii can be fed new blocks by p2p layer
  201. // genesis block is no different
  202. // TODO: we should stop mining while adding the new block
  203. func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (result bool) {
  204. var block_hash crypto.Hash
  205. chain.Lock()
  206. defer chain.Unlock()
  207. defer func() {
  208. // safety so if anything wrong happens, verification fails
  209. if r := recover(); r != nil {
  210. logger.Warnf("Recovered while adding new block, Stack trace below block_hash %s", block_hash)
  211. logger.Warnf("Stack trace \n%s", debug.Stack())
  212. result = false
  213. }
  214. if result == true { // block was successfully added, commit it atomically
  215. chain.store.Commit()
  216. chain.store.Sync() // sync the DB to disk after every execution of this function
  217. } else {
  218. chain.store.Rollback() // if block could not be added, rollback all changes to previous block
  219. }
  220. }()
  221. bl := cbl.Bl // small pointer to block
  222. // first of all lets do some quick checks
  223. // before doing extensive checks
  224. result = false
  225. block_hash = bl.GetHash()
  226. block_logger := logger.WithFields(log.Fields{"blid": block_hash})
  227. // check if block already exist skip it
  228. if chain.Block_Exists(block_hash) {
  229. block_logger.Debugf("block already in chain skipping it ")
  230. return
  231. }
  232. // make sure prev_hash refers to some point in our our chain
  233. // there is an edge case, where we know child but still donot know parent
  234. // this might be some some corrupted miner or initial sync
  235. if block_hash != globals.Config.Genesis_Block_Hash && !chain.Block_Exists(bl.Prev_Hash) {
  236. // TODO we must queue this block for say 60 minutes, if parents donot appear it, discard it
  237. block_logger.Warnf("Prev_Hash no where in the chain, skipping it till we get a parent ")
  238. return
  239. }
  240. // make sure time is NOT too much into future
  241. // if clock diff is more than 2 hrs, reject the block
  242. if bl.Timestamp > (uint64(time.Now().Unix()) + config.CRYPTONOTE_BLOCK_FUTURE_TIME_LIMIT) {
  243. block_logger.Warnf("Block timestamp is too much into future, make sure that system clock is correct")
  244. return
  245. }
  246. // verify that the clock is not being run in reverse
  247. median_timestamp := chain.Get_Median_Timestamp_At_Block(bl.Prev_Hash)
  248. if bl.Timestamp < median_timestamp {
  249. block_logger.Warnf("Block timestamp %d is less than median timestamp (%d) of %d blocks", bl.Timestamp, median_timestamp, config.BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW)
  250. return
  251. }
  252. // check a small list 100 hashes whether they have been reached
  253. if IsCheckPointKnown_Static(block_hash, chain.Load_Height_for_BL_ID(bl.Prev_Hash)+1) {
  254. rlog.Tracef(1, "Static Checkpoint reached at height %d", chain.Load_Height_for_BL_ID(bl.Prev_Hash)+1)
  255. }
  256. rlog.Tracef(1, "Checking Known checkpoint %s at height %d", block_hash, chain.Load_Height_for_BL_ID(bl.Prev_Hash)+1)
  257. // disable security checks if checkpoint is already known
  258. if chain.checkpints_disabled || !checkpoints.IsCheckPointKnown(block_hash, chain.Load_Height_for_BL_ID(bl.Prev_Hash)+1) {
  259. rlog.Tracef(1, "Unknown checkpoint %s at height %d, verifying throughly", block_hash, chain.Load_Height_for_BL_ID(bl.Prev_Hash)+1)
  260. // Verify Blocks Proof-Of-Work
  261. PoW := bl.GetPoWHash()
  262. current_difficulty := chain.Get_Difficulty_At_Block(bl.Prev_Hash)
  263. if block_hash != globals.Config.Genesis_Block_Hash {
  264. logger.Debugf("Difficulty at height %d is %d", chain.Load_Height_for_BL_ID(bl.Prev_Hash), current_difficulty)
  265. }
  266. // check if the PoW is satisfied
  267. if !difficulty.CheckPowHash(PoW, current_difficulty) { // if invalid Pow, reject the bloc
  268. block_logger.Warnf("Block has invalid PoW, rejecting it")
  269. return false
  270. }
  271. // TODO we need to verify block size whether it crosses the limits
  272. // we need to verify each and every tx contained in the block, sanity check everything
  273. // first of all check, whether all the tx contained in the block, match their hashes
  274. {
  275. if len(bl.Tx_hashes) != len(cbl.Txs) {
  276. block_logger.Warnf("Block says it has %d txs , however complete block contained %d txs", len(bl.Tx_hashes), len(cbl.Txs))
  277. return false
  278. }
  279. // first check whether the complete block contains any diplicate hashes
  280. tx_checklist := map[crypto.Hash]bool{}
  281. for i := 0; i < len(bl.Tx_hashes); i++ {
  282. tx_checklist[bl.Tx_hashes[i]] = true
  283. }
  284. if len(tx_checklist) != len(bl.Tx_hashes) { // block has duplicate tx, reject
  285. block_logger.Warnf("Block has %d duplicate txs, reject it", len(bl.Tx_hashes)-len(tx_checklist))
  286. }
  287. // now lets loop through complete block, matching each tx
  288. // detecting any duplicates using txid hash
  289. for i := 0; i < len(cbl.Txs); i++ {
  290. tx_hash := cbl.Txs[i].GetHash()
  291. if _, ok := tx_checklist[tx_hash]; !ok {
  292. // tx is NOT found in map, RED alert reject the block
  293. block_logger.Warnf("Block says it has tx %s, but complete block does not have it", tx_hash)
  294. return false
  295. }
  296. }
  297. }
  298. // another check, whether the tx contains any duplicate key images within the block
  299. // block wide duplicate input detector
  300. {
  301. block_pool, _ := mempool.Init_Block_Mempool(nil)
  302. for i := 0; i < len(cbl.Txs); i++ {
  303. if !block_pool.Mempool_Add_TX(cbl.Txs[i], 0) { // block pool will reject any tx which are duplicates or double spend attacks
  304. block_logger.Warnf("Double spend attack %s, rejecting ", cbl.Txs[i].GetHash())
  305. return false
  306. }
  307. }
  308. }
  309. // now we need to verify each and every tx in detail
  310. // verify coinbase tx
  311. if chain.Get_Height() > 5 { // skip checks for first 5 blocks
  312. if !chain.Verify_Transaction_Coinbase(cbl, &bl.Miner_tx) {
  313. block_logger.Warnf("Miner tx failed verification rejecting ")
  314. return false
  315. }
  316. }
  317. /*
  318. // verify all non coinbase tx, single threaded, we have a multithreaded version below
  319. for i := 0 ; i < len(cbl.Txs); i++ {
  320. if !chain.Verify_Transaction_NonCoinbase(cbl.Txs[i]){
  321. logger.Warnf("Non Coinbase tx failed verification rejecting " )
  322. return false
  323. }
  324. }
  325. */
  326. fail_count := uint64(0)
  327. wg := sync.WaitGroup{}
  328. wg.Add(len(cbl.Txs)) // add total number of tx as work
  329. for i := 0; i < len(cbl.Txs); i++ {
  330. go func(j int) {
  331. if !chain.Verify_Transaction_NonCoinbase(cbl.Txs[j]) { // transaction verification failed
  332. atomic.AddUint64(&fail_count, 1) // increase fail count by 1
  333. }
  334. wg.Done()
  335. }(i)
  336. }
  337. wg.Wait() // wait for verifications to finish
  338. if fail_count > 0 { // check the result
  339. block_logger.Warnf("Block verification failed rejecting ")
  340. return false
  341. }
  342. } // checkpoint based validation completed here
  343. // if checkpoint is found, we land here
  344. // we are here means everything looks good, proceed and save to chain
  345. // discard the transactions from mempool if they are present there
  346. chain.Mempool.Monitor()
  347. for i := 0; i < len(cbl.Txs); i++ {
  348. txid := cbl.Txs[i].GetHash()
  349. if chain.Mempool.Mempool_TX_Exist(txid) {
  350. rlog.Tracef(1, "Deleting TX from pool txid=%s", txid)
  351. chain.Mempool.Mempool_Delete_TX(txid)
  352. }
  353. }
  354. // save all the txs
  355. // and then save the block
  356. { // first lets save all the txs, together with their link to this block as height
  357. height := uint64(0)
  358. if block_hash != globals.Config.Genesis_Block_Hash {
  359. // get height from parent block
  360. height = chain.Load_Height_for_BL_ID(bl.Prev_Hash)
  361. height++
  362. }
  363. for i := 0; i < len(cbl.Txs); i++ {
  364. chain.Store_TX(cbl.Txs[i], height)
  365. }
  366. }
  367. // check we need to extend the chain or do a soft fork
  368. // this condition is automatically satisfied by the genesis block ( since golang gives everything a zero value)
  369. if bl.Prev_Hash == chain.Top_ID /* lock_hash == globals.Config.Genesis_Block_Hash */ {
  370. // we need to extend the chain
  371. //log.Debugf("Extendin chain using block %x", block_hash )
  372. chain.Store_BL(bl)
  373. chain.consume_keyimages(block_hash) // consume all keyimages as spent
  374. chain.write_output_index(block_hash) // extract and store keys
  375. chain.Store_TOP_ID(block_hash) // make new block top block
  376. //chain.Add_Child(bl.Prev_Hash, block_hash) // add the new block as chil
  377. chain.Store_Block_Child(bl.Prev_Hash, block_hash)
  378. chain.Store_BL_ID_at_Height(chain.Height, block_hash) // store height to block id mapping
  379. // lower the window, where top_id and chain height are different
  380. chain.Height = chain.Height + 1 // increment height
  381. chain.Top_ID = block_hash // set new top block id
  382. block_logger.Debugf("Chain extended new height %d", chain.Height)
  383. // every 20 block print a line
  384. if chain.Height%20 == 0 {
  385. block_logger.Infof("Chain Height %d", chain.Height)
  386. }
  387. } else { // a soft fork is in progress
  388. block_logger.Debugf("Soft Fork is in progress")
  389. chain.Chain_Add_And_Reorganise(bl)
  390. }
  391. result = true
  392. return // run any handlers necesary to atomically
  393. }
  394. /* the block we have is NOT at the top, it either belongs to an altchain or is an alternative */
  395. func (chain *Blockchain) Chain_Add_And_Reorganise(bl *block.Block) (result bool) {
  396. block_hash := bl.GetHash()
  397. // check whether the parent already has a child
  398. parent_has_child := chain.Does_Block_Have_Child(bl.Prev_Hash)
  399. // first lets add ourselves to the chain
  400. chain.Store_BL(bl)
  401. chain.consume_keyimages(block_hash)
  402. if !parent_has_child {
  403. chain.Store_Block_Child(bl.Prev_Hash, block_hash)
  404. logger.Infof("Adding alternative block %s to alt chain top", block_hash)
  405. } else {
  406. logger.Infof("Adding alternative block %s", block_hash)
  407. // load existing children, there can be more than 1 in extremely rare case or unknown attacks
  408. children_list := chain.Load_Block_Children(bl.Prev_Hash)
  409. children_list = append(children_list, block_hash) // add ourselves to children list
  410. // store children excluding main child of prev block
  411. chain.Store_Block_Children(bl.Prev_Hash, children_list, chain.Load_Block_Child(bl.Prev_Hash))
  412. }
  413. // now we must trigger the recursive reorganise process from the parent block,
  414. // the recursion should always end at the genesis block
  415. // adding a block can cause chain reorganisation 1 time in 99.99% cases
  416. // but we are prepared for the case, which might occur due to alt-alt-chains
  417. chain.reorganise(block_hash)
  418. return true
  419. }
  420. type chain_data struct {
  421. hash crypto.Hash
  422. cdifficulty uint64
  423. foundat uint64 // when block was found
  424. }
  425. // NOTE: below algorithm is the core and and is used to achieve network consensus
  426. // the best chain is found using the following algorithm
  427. // cryptonote protocol algo is below
  428. // compare cdiff, chain with higher diff wins, if diff is same, no reorg, this cause frequent splits
  429. // new algo is this
  430. // compare cdiff, chain with higher diff wins, if diff is same, go below
  431. // compare time stamp, block with lower timestamp wins (since it has probable spread more than other blocks)
  432. // if timestamps are same, block with lower block hash (No PoW involved) wins
  433. // block hash cannot be same
  434. type bestChain []chain_data
  435. func (s bestChain) Len() int { return len(s) }
  436. func (s bestChain) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
  437. func (s bestChain) Less(i, j int) bool {
  438. if s[i].cdifficulty > s[j].cdifficulty {
  439. return true
  440. }
  441. if s[i].cdifficulty < s[j].cdifficulty {
  442. return false
  443. }
  444. // we are here of if difficulty are same
  445. if s[i].foundat < s[j].foundat { // check if timestamps are diff
  446. return true
  447. }
  448. if s[i].foundat > s[j].foundat { // check if timestamps are diff
  449. return false
  450. }
  451. if bytes.Compare(s[i].hash[:], s[j].hash[:]) < 0 {
  452. return true
  453. }
  454. return false
  455. }
  456. // this function will recursive reorganise the chain, till the genesis block if required
  457. // we are doing it this way as we can do away with too much book keeping
  458. // this is processor and IO intensive in normal cases
  459. func (chain *Blockchain) reorganise(block_hash crypto.Hash) {
  460. var children_data bestChain
  461. if block_hash == globals.Config.Genesis_Block_Hash {
  462. logger.Infof("Reorganisation completed successfully, we reached genesis block")
  463. return
  464. }
  465. // check if the block mentioned has more than 1 child
  466. block_hash, found := chain.find_parent_with_children(block_hash)
  467. logger.Debugf("block with children (in reverse) %s found %d", block_hash, found)
  468. if found {
  469. // reorganise chain at this block
  470. children := chain.Load_Block_Children(block_hash)
  471. if len(children) < 2 {
  472. panic(fmt.Sprintf("Children disappeared for block %s", block_hash))
  473. }
  474. main_chain := chain.Load_Block_Child(block_hash)
  475. // choose the best chain and make it parent
  476. for i := range children {
  477. top_hash := chain.Get_Top_Block(children[i])
  478. top_cdiff := chain.Load_Block_Cumulative_Difficulty(top_hash)
  479. timestamp := chain.Load_Block_Timestamp(children[i])
  480. children_data = append(children_data, chain_data{hash: children[i], cdifficulty: top_cdiff, foundat: timestamp})
  481. }
  482. sort.Sort(children_data)
  483. logger.Infof("Choosing best chain")
  484. for i := range children {
  485. logger.Infof("%d %+v\n", i, children_data[i])
  486. }
  487. best_chain := children_data[0].hash
  488. if main_chain == best_chain {
  489. logger.Infof("Main chain is already best, nothing to do")
  490. return
  491. } else {
  492. logger.Infof("Making alt chain -> main chain and vice-versa")
  493. // first lets fix up the connection
  494. chain.Store_Block_Child(block_hash, best_chain) // store main connection
  495. chain.Store_Block_Children(block_hash, children, best_chain) // store remaining child
  496. // setup new height
  497. new_height := chain.Load_Height_for_BL_ID(chain.Get_Top_Block(best_chain)) + 1
  498. //also walk through all the new main chain till the top, setting output keys, first of all
  499. chain.write_output_index(block_hash) // extract and store keys
  500. loop_block_hash := block_hash
  501. for {
  502. chain.write_output_index(loop_block_hash) // extract and store keys
  503. chain.consume_keyimages(loop_block_hash) // consume all keyimages
  504. // fix up height to block id mapping, which is used to find orphans later on
  505. height := chain.Load_Height_for_BL_ID(loop_block_hash)
  506. chain.Store_BL_ID_at_Height(height, loop_block_hash)
  507. // check if the block has child, if not , we are the top
  508. if !chain.Does_Block_Have_Child(loop_block_hash) {
  509. break
  510. }
  511. loop_block_hash = chain.Load_Block_Child(loop_block_hash) // continue searching the new top
  512. }
  513. // invalidate all transactionw contained within old main chain
  514. // validate all transactions in new main chain
  515. logger.Debugf("Invalidating all transactions with old main chain")
  516. logger.Debugf("Validating all transactions with old alt chain")
  517. // pushing alt_chain txs to mempool after verification
  518. loop_block_hash = main_chain // main chain at this point is the old chain
  519. for {
  520. // load the block
  521. bl, err := chain.Load_BL_FROM_ID(loop_block_hash)
  522. if err != nil {
  523. chain.revoke_keyimages(bl.GetHash()) // revoke all keyimages
  524. for i := 0; i < len(bl.Tx_hashes); i++ {
  525. tx, err := chain.Load_TX_FROM_ID(bl.Tx_hashes[i])
  526. if err != nil {
  527. if !chain.Verify_Transaction_NonCoinbase(tx) {
  528. logger.Warnf("Non Coinbase tx failed verification rejecting ")
  529. } else { // tx passed verification add to mempool
  530. // TODO check whether the additiontion was successfull
  531. chain.Mempool.Mempool_Add_TX(tx, 0)
  532. }
  533. }
  534. }
  535. } else {
  536. logger.Debugf("error during chain reorganisation, failed to push alt chain TX to pool")
  537. }
  538. // check if the block has child, if not , we are the top
  539. if !chain.Does_Block_Have_Child(loop_block_hash) {
  540. break
  541. }
  542. loop_block_hash = chain.Load_Block_Child(loop_block_hash) // continue searching the new top
  543. }
  544. logger.Infof("Reorganise old height %d, new height %d", chain.Get_Height(), new_height)
  545. chain.Top_ID = chain.Get_Top_Block(best_chain)
  546. chain.Height = new_height
  547. chain.Store_TOP_ID(chain.Top_ID) // make new block top block
  548. logger.Infof("Reorganise success")
  549. }
  550. // TODO if we need to support alt-alt chains, uncomment the code below
  551. //chain.reorganise(chain.Load_Block_Parent_ID(block_hash))
  552. }
  553. }
  554. /*
  555. func (chain *Blockchain)find_best_chain(list []crypto.Hash) best_child crypto.Hash {
  556. if len(list) < 2 {
  557. panic("Cannot find best child, when child_count = 1")
  558. }
  559. }
  560. */
  561. // find a block with 2 or more child,
  562. // returns false, if we reach genesis block
  563. func (chain *Blockchain) find_parent_with_children(block_hash crypto.Hash) (hash crypto.Hash, found bool) {
  564. // TODO we can also stop on the heighest checkpointed state, to save computing resources and time
  565. if block_hash == globals.Config.Genesis_Block_Hash {
  566. return hash, false // we do not have parent of genesis block
  567. }
  568. for {
  569. // load children
  570. children := chain.Load_Block_Children(block_hash)
  571. if len(children) >= 2 {
  572. return block_hash, true
  573. }
  574. block_hash = chain.Load_Block_Parent_ID(block_hash)
  575. if block_hash == globals.Config.Genesis_Block_Hash {
  576. return hash, false // we do not have parent of genesis block
  577. }
  578. }
  579. }
  580. // Finds whether a block is orphan
  581. // since we donot store any fields, we need to calculate/find the block as orphan
  582. // using an algorithm
  583. // find the block height and then relook up block using height
  584. // if both are same, the block is good otherwise we treat it as orphan
  585. func (chain *Blockchain) Is_Block_Orphan(hash crypto.Hash) bool {
  586. height := chain.Load_Height_for_BL_ID(hash)
  587. block_hash_at_height, _ := chain.Load_BL_ID_at_Height(height)
  588. if hash == block_hash_at_height {
  589. return false
  590. }
  591. return true
  592. }
  593. // this function will mark all the key images present in the block as requested
  594. // this is done so as they cannot be respent
  595. // mark is bool
  596. func (chain *Blockchain) mark_keyimages(block_hash crypto.Hash, mark bool) bool {
  597. bl, err := chain.Load_BL_FROM_ID(block_hash)
  598. if err == nil {
  599. for i := 0; i < len(bl.Tx_hashes); i++ {
  600. tx, err := chain.Load_TX_FROM_ID(bl.Tx_hashes[i])
  601. if err != nil {
  602. logger.Debugf("TX loading error while marking keyimages as spent blid %s txid %s", block_hash, bl.Tx_hashes[i])
  603. return false
  604. } else {
  605. // mark keyimage as spent
  606. for i := 0; i < len(tx.Vin); i++ {
  607. k_image := tx.Vin[i].(transaction.Txin_to_key).K_image
  608. chain.Store_KeyImage(crypto.Hash(k_image), mark)
  609. }
  610. }
  611. }
  612. } else {
  613. logger.Debugf("BL loading error while marking keyimages as spent blid %s err %s", block_hash, err)
  614. return false
  615. }
  616. return true
  617. }
  618. //this will mark all the keyimages present in this block as spent
  619. //this is done so as an input cannot be spent twice
  620. func (chain *Blockchain) consume_keyimages(block_hash crypto.Hash) bool {
  621. return chain.mark_keyimages(block_hash, true)
  622. }
  623. //this will mark all the keyimages present in this block as unspent
  624. //this is required during chain reorganisation
  625. // when altchain becomes mainchain or viceversa,
  626. // one of the chains needs to be markek unconsumed, so they can be consumed again
  627. func (chain *Blockchain) revoke_keyimages(block_hash crypto.Hash) bool {
  628. return chain.mark_keyimages(block_hash, false)
  629. }
  630. /* this will only give you access to transactions which have been mined
  631. */
  632. func (chain *Blockchain) Get_TX(hash crypto.Hash) (*transaction.Transaction, error) {
  633. tx, err := chain.Load_TX_FROM_ID(hash)
  634. return tx, err
  635. }
  636. // get difficulty at specific height but height must be <= than current block chain height
  637. func (chain *Blockchain) Get_Difficulty_At_Height(Height uint64) uint64 {
  638. if Height > chain.Get_Height() {
  639. logger.Warnf("Difficulty Requested for invalid Height Chain Height %d requested Height %d", chain.Get_Height(), Height)
  640. panic("Difficulty Requested for invalid Height")
  641. }
  642. // get block id at that height
  643. block_id, err := chain.Load_BL_ID_at_Height(Height)
  644. if err != nil {
  645. logger.Warnf("No Block at Height %d , chain height %d", Height, chain.Get_Height())
  646. panic("No Block at Height")
  647. }
  648. // we have a block id, now Lets get the difficulty
  649. return chain.Get_Difficulty_At_Block(block_id)
  650. }
  651. // get difficulty at specific block_id, only condition is block must exist and must be connected
  652. func (chain *Blockchain) Get_Difficulty_At_Block(block_id crypto.Hash) uint64 {
  653. var cumulative_difficulties []uint64
  654. var timestamps []uint64
  655. var zero_block crypto.Hash
  656. current_block_id := block_id
  657. // traverse chain from the block referenced, to max 30 blocks ot till genesis block is reached
  658. for i := 0; i < config.DIFFICULTY_BLOCKS_COUNT_V2; i++ {
  659. if current_block_id == globals.Config.Genesis_Block_Hash || current_block_id == zero_block {
  660. rlog.Tracef(2, "Reached genesis block for difficulty calculation %s", block_id)
  661. break // break we have reached genesis block
  662. }
  663. // read timestamp of block and cumulative difficulty at that block
  664. timestamp := chain.Load_Block_Timestamp(current_block_id)
  665. cdifficulty := chain.Load_Block_Cumulative_Difficulty(current_block_id)
  666. timestamps = append([]uint64{timestamp}, timestamps...) // prepend timestamp
  667. cumulative_difficulties = append([]uint64{cdifficulty}, cumulative_difficulties...) // prepend timestamp
  668. current_block_id = chain.Load_Block_Parent_ID(current_block_id)
  669. }
  670. return difficulty.Next_Difficulty(timestamps, cumulative_difficulties, config.BLOCK_TIME)
  671. }
  672. // get median time stamp at specific block_id, only condition is block must exist and must be connected
  673. func (chain *Blockchain) Get_Median_Timestamp_At_Block(block_id crypto.Hash) uint64 {
  674. var timestamps []uint64
  675. var zero_block crypto.Hash
  676. current_block_id := block_id
  677. // traverse chain from the block referenced, to max 30 blocks ot till genesis block is researched
  678. for i := 0; i < config.BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW; i++ {
  679. if current_block_id == globals.Config.Genesis_Block_Hash || current_block_id == zero_block {
  680. rlog.Tracef(4, "Reached genesis block for median calculation %s", block_id)
  681. break // break we have reached genesis block
  682. }
  683. // read timestamp of block and cumulative difficulty at that block
  684. timestamp := chain.Load_Block_Timestamp(current_block_id)
  685. timestamps = append(timestamps, timestamp) // append timestamp
  686. current_block_id = chain.Load_Block_Parent_ID(current_block_id)
  687. }
  688. return Median(timestamps)
  689. }
  690. // get median blocksize at specific block_id, only condition is block must exist and must be connected
  691. func (chain *Blockchain) Get_Median_BlockSize_At_Block(block_id crypto.Hash) uint64 {
  692. var block_sizes []uint64
  693. var zero_block crypto.Hash
  694. current_block_id := block_id
  695. // traverse chain from the block referenced, to max 30 blocks ot till genesis block is researched
  696. for i := uint64(0); i < config.CRYPTONOTE_REWARD_BLOCKS_WINDOW; i++ {
  697. if current_block_id == globals.Config.Genesis_Block_Hash || current_block_id == zero_block {
  698. rlog.Tracef(4, "Reached genesis block for median calculation %s", block_id)
  699. break // break we have reached genesis block
  700. }
  701. // read timestamp of block and cumulative difficulty at that block
  702. block_size := chain.Load_Block_Size(current_block_id)
  703. block_sizes = append(block_sizes, block_size) // append size
  704. current_block_id = chain.Load_Block_Parent_ID(current_block_id)
  705. }
  706. return Median(block_sizes)
  707. }
  708. // this function return the current top block, if we start at specific block
  709. // this works for any blocks which were added
  710. func (chain *Blockchain) Get_Top_Block(block_id crypto.Hash) crypto.Hash {
  711. for {
  712. // check if the block has child, if not , we are the top
  713. if !chain.Does_Block_Have_Child(block_id) {
  714. return block_id
  715. }
  716. block_id = chain.Load_Block_Child(block_id) // continue searching the new top
  717. }
  718. // panic("We can never reach this point")
  719. // return block_id // we will never reach here
  720. }
  721. // verifies whether we are lagging
  722. // return true if we need resync
  723. // returns false if we are good and resync is not required
  724. func (chain *Blockchain) IsLagging(peer_cdifficulty, peer_height uint64, peer_top_id crypto.Hash) bool {
  725. top_id := chain.Get_Top_ID()
  726. cdifficulty := chain.Load_Block_Cumulative_Difficulty(top_id)
  727. height := chain.Load_Height_for_BL_ID(top_id) + 1
  728. rlog.Tracef(3, "P_cdiff %d cdiff %d , P_BH %d BH %d, p_top %s top %s",
  729. peer_cdifficulty, cdifficulty,
  730. peer_height, height,
  731. peer_top_id, top_id)
  732. if peer_cdifficulty > cdifficulty {
  733. return true // peer's cumulative difficulty is more than ours , active resync
  734. }
  735. if peer_cdifficulty == cdifficulty && peer_top_id != top_id {
  736. return true // cumulative difficulty is same but tops are different , active resync
  737. }
  738. return false
  739. }
  740. // This function will expand a transaction with all the missing info being reconstitued from the blockchain
  741. // this also increases security since data is coming from the chain or being calculated
  742. // basically this places data for ring signature verification
  743. // REMEMBER to expand key images from the blockchain
  744. // TODO we must enforce that the keyimages used are valid and specific outputs are unlocked
  745. func (chain *Blockchain) Expand_Transaction_v2(tx *transaction.Transaction) (result bool) {
  746. result = false
  747. if tx.Version != 2 {
  748. panic("TX not version 2")
  749. }
  750. //if rctsignature is null
  751. // fill up the message hash first
  752. tx.RctSignature.Message = ringct.Key(tx.GetPrefixHash())
  753. // fill up the key images from the blockchain
  754. for i := 0; i < len(tx.Vin); i++ {
  755. tx.RctSignature.MlsagSigs[i].II = tx.RctSignature.MlsagSigs[i].II[:0] // zero it out
  756. tx.RctSignature.MlsagSigs[i].II = make([]ringct.Key, 1, 1)
  757. tx.RctSignature.MlsagSigs[i].II[0] = ringct.Key(tx.Vin[i].(transaction.Txin_to_key).K_image)
  758. }
  759. // now we need to fill up the mixring ctkey
  760. // one part is the destination address, second is the commitment mask from the outpk
  761. // mixring is stored in different ways for rctfull and simple
  762. switch tx.RctSignature.Get_Sig_Type() {
  763. case ringct.RCTTypeFull:
  764. // TODO, we need to make sure all ring are of same size
  765. if len(tx.Vin) > 1 {
  766. panic("unsipported rcctt full case please investigate")
  767. }
  768. // make a matrix of mixin x 1 elements
  769. mixin := len(tx.Vin[0].(transaction.Txin_to_key).Key_offsets)
  770. tx.RctSignature.MixRing = make([][]ringct.CtKey, mixin, mixin)
  771. for n := 0; n < len(tx.Vin); n++ {
  772. offset := uint64(0)
  773. for m := 0; m < len(tx.Vin[n].(transaction.Txin_to_key).Key_offsets); m++ {
  774. tx.RctSignature.MixRing[m] = make([]ringct.CtKey, len(tx.Vin), len(tx.Vin))
  775. offset += tx.Vin[n].(transaction.Txin_to_key).Key_offsets[m]
  776. // extract the keys from specific offset
  777. offset_data := chain.load_output_index(offset)
  778. // check maturity of inputs
  779. if !inputmaturity.Is_Input_Mature(chain.Get_Height(), offset_data.Height, offset_data.Unlock_Height, 1) {
  780. logger.Warnf("transaction using immature inputs from block %d chain height %d", offset_data.Height, chain.Get_Height())
  781. return false
  782. }
  783. tx.RctSignature.MixRing[m][n].Destination = offset_data.InKey.Destination
  784. tx.RctSignature.MixRing[m][n].Mask = offset_data.InKey.Mask
  785. // fmt.Printf("%d %d dest %s\n",n,m, offset_data.InKey.Destination)
  786. // fmt.Printf("%d %d mask %s\n",n,m, offset_data.InKey.Mask)
  787. }
  788. }
  789. case ringct.RCTTypeSimple:
  790. mixin := len(tx.Vin[0].(transaction.Txin_to_key).Key_offsets)
  791. _ = mixin
  792. tx.RctSignature.MixRing = make([][]ringct.CtKey, len(tx.Vin), len(tx.Vin))
  793. for n := 0; n < len(tx.Vin); n++ {
  794. tx.RctSignature.MixRing[n] = make([]ringct.CtKey, len(tx.Vin[n].(transaction.Txin_to_key).Key_offsets),
  795. len(tx.Vin[n].(transaction.Txin_to_key).Key_offsets))
  796. offset := uint64(0)
  797. for m := 0; m < len(tx.Vin[n].(transaction.Txin_to_key).Key_offsets); m++ {
  798. offset += tx.Vin[n].(transaction.Txin_to_key).Key_offsets[m]
  799. // extract the keys from specific offset
  800. offset_data := chain.load_output_index(offset)
  801. // check maturity of inputs
  802. if !inputmaturity.Is_Input_Mature(chain.Get_Height(), offset_data.Height, offset_data.Unlock_Height, 1) {
  803. logger.Warnf("transaction using immature inputs from block %d chain height %d", offset_data.Height, chain.Get_Height())
  804. return false
  805. }
  806. tx.RctSignature.MixRing[n][m].Destination = offset_data.InKey.Destination
  807. tx.RctSignature.MixRing[n][m].Mask = offset_data.InKey.Mask
  808. // fmt.Printf("%d %d dest %s\n",n,m, offset_data.InKey.Destination)
  809. // fmt.Printf("%d %d mask %s\n",n,m, offset_data.InKey.Mask)
  810. }
  811. }
  812. default:
  813. logger.Warnf("unknown ringct transaction")
  814. return false
  815. }
  816. return true
  817. }
  818. // this function count all the vouts of the block,
  819. // this function exists here because only the chain knws the tx
  820. //
  821. func (chain *Blockchain) Block_Count_Vout(block_hash crypto.Hash) (count uint64) {
  822. count = 1 // miner tx is always present
  823. bl, err := chain.Load_BL_FROM_ID(block_hash)
  824. if err != nil {
  825. panic(fmt.Errorf("Cannot load block for %s err %s", block_hash, err))
  826. }
  827. for i := 0; i < len(bl.Tx_hashes); i++ { // load all tx one by one
  828. tx, err := chain.Load_TX_FROM_ID(bl.Tx_hashes[i])
  829. if err != nil {
  830. panic(fmt.Errorf("Cannot load tx for %s err %s", bl.Tx_hashes[i], err))
  831. }
  832. // tx has been loaded, now lets get the vout
  833. vout_count := uint64(len(tx.Vout))
  834. count += vout_count
  835. }
  836. return count
  837. }