You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

628 lines
16 KiB

  1. package main
  2. import (
  3. "crypto/rand"
  4. "encoding/binary"
  5. "flag"
  6. "fmt"
  7. "log"
  8. mrand "math/rand"
  9. "net/http"
  10. _ "net/http/pprof"
  11. "os"
  12. "os/signal"
  13. "path"
  14. "runtime"
  15. "strconv"
  16. "strings"
  17. "sync"
  18. "sync/atomic"
  19. "time"
  20. "github.com/syndtr/goleveldb/leveldb"
  21. "github.com/syndtr/goleveldb/leveldb/errors"
  22. "github.com/syndtr/goleveldb/leveldb/opt"
  23. "github.com/syndtr/goleveldb/leveldb/storage"
  24. "github.com/syndtr/goleveldb/leveldb/table"
  25. "github.com/syndtr/goleveldb/leveldb/util"
  26. )
  27. var (
  28. dbPath = path.Join(os.TempDir(), "goleveldb-testdb")
  29. openFilesCacheCapacity = 500
  30. keyLen = 63
  31. valueLen = 256
  32. numKeys = arrayInt{100000, 1332, 531, 1234, 9553, 1024, 35743}
  33. httpProf = "127.0.0.1:5454"
  34. transactionProb = 0.5
  35. enableBlockCache = false
  36. enableCompression = false
  37. enableBufferPool = false
  38. wg = new(sync.WaitGroup)
  39. done, fail uint32
  40. bpool *util.BufferPool
  41. )
  42. type arrayInt []int
  43. func (a arrayInt) String() string {
  44. var str string
  45. for i, n := range a {
  46. if i > 0 {
  47. str += ","
  48. }
  49. str += strconv.Itoa(n)
  50. }
  51. return str
  52. }
  53. func (a *arrayInt) Set(str string) error {
  54. var na arrayInt
  55. for _, s := range strings.Split(str, ",") {
  56. s = strings.TrimSpace(s)
  57. if s != "" {
  58. n, err := strconv.Atoi(s)
  59. if err != nil {
  60. return err
  61. }
  62. na = append(na, n)
  63. }
  64. }
  65. *a = na
  66. return nil
  67. }
  68. func init() {
  69. flag.StringVar(&dbPath, "db", dbPath, "testdb path")
  70. flag.IntVar(&openFilesCacheCapacity, "openfilescachecap", openFilesCacheCapacity, "open files cache capacity")
  71. flag.IntVar(&keyLen, "keylen", keyLen, "key length")
  72. flag.IntVar(&valueLen, "valuelen", valueLen, "value length")
  73. flag.Var(&numKeys, "numkeys", "num keys")
  74. flag.StringVar(&httpProf, "httpprof", httpProf, "http pprof listen addr")
  75. flag.Float64Var(&transactionProb, "transactionprob", transactionProb, "probablity of writes using transaction")
  76. flag.BoolVar(&enableBufferPool, "enablebufferpool", enableBufferPool, "enable buffer pool")
  77. flag.BoolVar(&enableBlockCache, "enableblockcache", enableBlockCache, "enable block cache")
  78. flag.BoolVar(&enableCompression, "enablecompression", enableCompression, "enable block compression")
  79. }
  80. func randomData(dst []byte, ns, prefix byte, i uint32, dataLen int) []byte {
  81. if dataLen < (2+4+4)*2+4 {
  82. panic("dataLen is too small")
  83. }
  84. if cap(dst) < dataLen {
  85. dst = make([]byte, dataLen)
  86. } else {
  87. dst = dst[:dataLen]
  88. }
  89. half := (dataLen - 4) / 2
  90. if _, err := rand.Reader.Read(dst[2 : half-8]); err != nil {
  91. panic(err)
  92. }
  93. dst[0] = ns
  94. dst[1] = prefix
  95. binary.LittleEndian.PutUint32(dst[half-8:], i)
  96. binary.LittleEndian.PutUint32(dst[half-8:], i)
  97. binary.LittleEndian.PutUint32(dst[half-4:], util.NewCRC(dst[:half-4]).Value())
  98. full := half * 2
  99. copy(dst[half:full], dst[:half])
  100. if full < dataLen-4 {
  101. if _, err := rand.Reader.Read(dst[full : dataLen-4]); err != nil {
  102. panic(err)
  103. }
  104. }
  105. binary.LittleEndian.PutUint32(dst[dataLen-4:], util.NewCRC(dst[:dataLen-4]).Value())
  106. return dst
  107. }
  108. func dataSplit(data []byte) (data0, data1 []byte) {
  109. n := (len(data) - 4) / 2
  110. return data[:n], data[n : n+n]
  111. }
  112. func dataNS(data []byte) byte {
  113. return data[0]
  114. }
  115. func dataPrefix(data []byte) byte {
  116. return data[1]
  117. }
  118. func dataI(data []byte) uint32 {
  119. return binary.LittleEndian.Uint32(data[(len(data)-4)/2-8:])
  120. }
  121. func dataChecksum(data []byte) (uint32, uint32) {
  122. checksum0 := binary.LittleEndian.Uint32(data[len(data)-4:])
  123. checksum1 := util.NewCRC(data[:len(data)-4]).Value()
  124. return checksum0, checksum1
  125. }
  126. func dataPrefixSlice(ns, prefix byte) *util.Range {
  127. return util.BytesPrefix([]byte{ns, prefix})
  128. }
  129. func dataNsSlice(ns byte) *util.Range {
  130. return util.BytesPrefix([]byte{ns})
  131. }
  132. type testingStorage struct {
  133. storage.Storage
  134. }
  135. func (ts *testingStorage) scanTable(fd storage.FileDesc, checksum bool) (corrupted bool) {
  136. r, err := ts.Open(fd)
  137. if err != nil {
  138. log.Fatal(err)
  139. }
  140. defer r.Close()
  141. size, err := r.Seek(0, os.SEEK_END)
  142. if err != nil {
  143. log.Fatal(err)
  144. }
  145. o := &opt.Options{
  146. DisableLargeBatchTransaction: true,
  147. Strict: opt.NoStrict,
  148. }
  149. if checksum {
  150. o.Strict = opt.StrictBlockChecksum | opt.StrictReader
  151. }
  152. tr, err := table.NewReader(r, size, fd, nil, bpool, o)
  153. if err != nil {
  154. log.Fatal(err)
  155. }
  156. defer tr.Release()
  157. checkData := func(i int, t string, data []byte) bool {
  158. if len(data) == 0 {
  159. panic(fmt.Sprintf("[%v] nil data: i=%d t=%s", fd, i, t))
  160. }
  161. checksum0, checksum1 := dataChecksum(data)
  162. if checksum0 != checksum1 {
  163. atomic.StoreUint32(&fail, 1)
  164. atomic.StoreUint32(&done, 1)
  165. corrupted = true
  166. data0, data1 := dataSplit(data)
  167. data0c0, data0c1 := dataChecksum(data0)
  168. data1c0, data1c1 := dataChecksum(data1)
  169. log.Printf("FATAL: [%v] Corrupted data i=%d t=%s (%#x != %#x): %x(%v) vs %x(%v)",
  170. fd, i, t, checksum0, checksum1, data0, data0c0 == data0c1, data1, data1c0 == data1c1)
  171. return true
  172. }
  173. return false
  174. }
  175. iter := tr.NewIterator(nil, nil)
  176. defer iter.Release()
  177. for i := 0; iter.Next(); i++ {
  178. ukey, _, kt, kerr := parseIkey(iter.Key())
  179. if kerr != nil {
  180. atomic.StoreUint32(&fail, 1)
  181. atomic.StoreUint32(&done, 1)
  182. corrupted = true
  183. log.Printf("FATAL: [%v] Corrupted ikey i=%d: %v", fd, i, kerr)
  184. return
  185. }
  186. if checkData(i, "key", ukey) {
  187. return
  188. }
  189. if kt == ktVal && checkData(i, "value", iter.Value()) {
  190. return
  191. }
  192. }
  193. if err := iter.Error(); err != nil {
  194. if errors.IsCorrupted(err) {
  195. atomic.StoreUint32(&fail, 1)
  196. atomic.StoreUint32(&done, 1)
  197. corrupted = true
  198. log.Printf("FATAL: [%v] Corruption detected: %v", fd, err)
  199. } else {
  200. log.Fatal(err)
  201. }
  202. }
  203. return
  204. }
  205. func (ts *testingStorage) Remove(fd storage.FileDesc) error {
  206. if atomic.LoadUint32(&fail) == 1 {
  207. return nil
  208. }
  209. if fd.Type == storage.TypeTable {
  210. if ts.scanTable(fd, true) {
  211. return nil
  212. }
  213. }
  214. return ts.Storage.Remove(fd)
  215. }
  216. type latencyStats struct {
  217. mark time.Time
  218. dur, min, max time.Duration
  219. num int
  220. }
  221. func (s *latencyStats) start() {
  222. s.mark = time.Now()
  223. }
  224. func (s *latencyStats) record(n int) {
  225. if s.mark.IsZero() {
  226. panic("not started")
  227. }
  228. dur := time.Now().Sub(s.mark)
  229. dur1 := dur / time.Duration(n)
  230. if dur1 < s.min || s.min == 0 {
  231. s.min = dur1
  232. }
  233. if dur1 > s.max {
  234. s.max = dur1
  235. }
  236. s.dur += dur
  237. s.num += n
  238. s.mark = time.Time{}
  239. }
  240. func (s *latencyStats) ratePerSec() int {
  241. durSec := s.dur / time.Second
  242. if durSec > 0 {
  243. return s.num / int(durSec)
  244. }
  245. return s.num
  246. }
  247. func (s *latencyStats) avg() time.Duration {
  248. if s.num > 0 {
  249. return s.dur / time.Duration(s.num)
  250. }
  251. return 0
  252. }
  253. func (s *latencyStats) add(x *latencyStats) {
  254. if x.min < s.min || s.min == 0 {
  255. s.min = x.min
  256. }
  257. if x.max > s.max {
  258. s.max = x.max
  259. }
  260. s.dur += x.dur
  261. s.num += x.num
  262. }
  263. func main() {
  264. flag.Parse()
  265. if enableBufferPool {
  266. bpool = util.NewBufferPool(opt.DefaultBlockSize + 128)
  267. }
  268. log.Printf("Test DB stored at %q", dbPath)
  269. if httpProf != "" {
  270. log.Printf("HTTP pprof listening at %q", httpProf)
  271. runtime.SetBlockProfileRate(1)
  272. go func() {
  273. if err := http.ListenAndServe(httpProf, nil); err != nil {
  274. log.Fatalf("HTTPPROF: %v", err)
  275. }
  276. }()
  277. }
  278. runtime.GOMAXPROCS(runtime.NumCPU())
  279. os.RemoveAll(dbPath)
  280. stor, err := storage.OpenFile(dbPath, false)
  281. if err != nil {
  282. log.Fatal(err)
  283. }
  284. tstor := &testingStorage{stor}
  285. defer tstor.Close()
  286. fatalf := func(err error, format string, v ...interface{}) {
  287. atomic.StoreUint32(&fail, 1)
  288. atomic.StoreUint32(&done, 1)
  289. log.Printf("FATAL: "+format, v...)
  290. if err != nil && errors.IsCorrupted(err) {
  291. cerr := err.(*errors.ErrCorrupted)
  292. if !cerr.Fd.Zero() && cerr.Fd.Type == storage.TypeTable {
  293. log.Print("FATAL: corruption detected, scanning...")
  294. if !tstor.scanTable(storage.FileDesc{Type: storage.TypeTable, Num: cerr.Fd.Num}, false) {
  295. log.Printf("FATAL: unable to find corrupted key/value pair in table %v", cerr.Fd)
  296. }
  297. }
  298. }
  299. runtime.Goexit()
  300. }
  301. if openFilesCacheCapacity == 0 {
  302. openFilesCacheCapacity = -1
  303. }
  304. o := &opt.Options{
  305. OpenFilesCacheCapacity: openFilesCacheCapacity,
  306. DisableBufferPool: !enableBufferPool,
  307. DisableBlockCache: !enableBlockCache,
  308. ErrorIfExist: true,
  309. Compression: opt.NoCompression,
  310. }
  311. if enableCompression {
  312. o.Compression = opt.DefaultCompression
  313. }
  314. db, err := leveldb.Open(tstor, o)
  315. if err != nil {
  316. log.Fatal(err)
  317. }
  318. defer db.Close()
  319. var (
  320. mu = &sync.Mutex{}
  321. gGetStat = &latencyStats{}
  322. gIterStat = &latencyStats{}
  323. gWriteStat = &latencyStats{}
  324. gTrasactionStat = &latencyStats{}
  325. startTime = time.Now()
  326. writeReq = make(chan *leveldb.Batch)
  327. writeAck = make(chan error)
  328. writeAckAck = make(chan struct{})
  329. )
  330. go func() {
  331. for b := range writeReq {
  332. var err error
  333. if mrand.Float64() < transactionProb {
  334. log.Print("> Write using transaction")
  335. gTrasactionStat.start()
  336. var tr *leveldb.Transaction
  337. if tr, err = db.OpenTransaction(); err == nil {
  338. if err = tr.Write(b, nil); err == nil {
  339. if err = tr.Commit(); err == nil {
  340. gTrasactionStat.record(b.Len())
  341. }
  342. } else {
  343. tr.Discard()
  344. }
  345. }
  346. } else {
  347. gWriteStat.start()
  348. if err = db.Write(b, nil); err == nil {
  349. gWriteStat.record(b.Len())
  350. }
  351. }
  352. writeAck <- err
  353. <-writeAckAck
  354. }
  355. }()
  356. go func() {
  357. for {
  358. time.Sleep(3 * time.Second)
  359. log.Print("------------------------")
  360. log.Printf("> Elapsed=%v", time.Now().Sub(startTime))
  361. mu.Lock()
  362. log.Printf("> GetLatencyMin=%v GetLatencyMax=%v GetLatencyAvg=%v GetRatePerSec=%d",
  363. gGetStat.min, gGetStat.max, gGetStat.avg(), gGetStat.ratePerSec())
  364. log.Printf("> IterLatencyMin=%v IterLatencyMax=%v IterLatencyAvg=%v IterRatePerSec=%d",
  365. gIterStat.min, gIterStat.max, gIterStat.avg(), gIterStat.ratePerSec())
  366. log.Printf("> WriteLatencyMin=%v WriteLatencyMax=%v WriteLatencyAvg=%v WriteRatePerSec=%d",
  367. gWriteStat.min, gWriteStat.max, gWriteStat.avg(), gWriteStat.ratePerSec())
  368. log.Printf("> TransactionLatencyMin=%v TransactionLatencyMax=%v TransactionLatencyAvg=%v TransactionRatePerSec=%d",
  369. gTrasactionStat.min, gTrasactionStat.max, gTrasactionStat.avg(), gTrasactionStat.ratePerSec())
  370. mu.Unlock()
  371. cachedblock, _ := db.GetProperty("leveldb.cachedblock")
  372. openedtables, _ := db.GetProperty("leveldb.openedtables")
  373. alivesnaps, _ := db.GetProperty("leveldb.alivesnaps")
  374. aliveiters, _ := db.GetProperty("leveldb.aliveiters")
  375. blockpool, _ := db.GetProperty("leveldb.blockpool")
  376. writeDelay, _ := db.GetProperty("leveldb.writedelay")
  377. log.Printf("> BlockCache=%s OpenedTables=%s AliveSnaps=%s AliveIter=%s BlockPool=%q WriteDelay=%q",
  378. cachedblock, openedtables, alivesnaps, aliveiters, blockpool, writeDelay)
  379. log.Print("------------------------")
  380. }
  381. }()
  382. for ns, numKey := range numKeys {
  383. func(ns, numKey int) {
  384. log.Printf("[%02d] STARTING: numKey=%d", ns, numKey)
  385. keys := make([][]byte, numKey)
  386. for i := range keys {
  387. keys[i] = randomData(nil, byte(ns), 1, uint32(i), keyLen)
  388. }
  389. wg.Add(1)
  390. go func() {
  391. var wi uint32
  392. defer func() {
  393. log.Printf("[%02d] WRITER DONE #%d", ns, wi)
  394. wg.Done()
  395. }()
  396. var (
  397. b = new(leveldb.Batch)
  398. k2, v2 []byte
  399. nReader int32
  400. )
  401. for atomic.LoadUint32(&done) == 0 {
  402. log.Printf("[%02d] WRITER #%d", ns, wi)
  403. b.Reset()
  404. for _, k1 := range keys {
  405. k2 = randomData(k2, byte(ns), 2, wi, keyLen)
  406. v2 = randomData(v2, byte(ns), 3, wi, valueLen)
  407. b.Put(k2, v2)
  408. b.Put(k1, k2)
  409. }
  410. writeReq <- b
  411. if err := <-writeAck; err != nil {
  412. writeAckAck <- struct{}{}
  413. fatalf(err, "[%02d] WRITER #%d db.Write: %v", ns, wi, err)
  414. }
  415. snap, err := db.GetSnapshot()
  416. if err != nil {
  417. writeAckAck <- struct{}{}
  418. fatalf(err, "[%02d] WRITER #%d db.GetSnapshot: %v", ns, wi, err)
  419. }
  420. writeAckAck <- struct{}{}
  421. wg.Add(1)
  422. atomic.AddInt32(&nReader, 1)
  423. go func(snapwi uint32, snap *leveldb.Snapshot) {
  424. var (
  425. ri int
  426. iterStat = &latencyStats{}
  427. getStat = &latencyStats{}
  428. )
  429. defer func() {
  430. mu.Lock()
  431. gGetStat.add(getStat)
  432. gIterStat.add(iterStat)
  433. mu.Unlock()
  434. atomic.AddInt32(&nReader, -1)
  435. log.Printf("[%02d] READER #%d.%d DONE Snap=%v Alive=%d IterLatency=%v GetLatency=%v", ns, snapwi, ri, snap, atomic.LoadInt32(&nReader), iterStat.avg(), getStat.avg())
  436. snap.Release()
  437. wg.Done()
  438. }()
  439. stopi := snapwi + 3
  440. for (ri < 3 || atomic.LoadUint32(&wi) < stopi) && atomic.LoadUint32(&done) == 0 {
  441. var n int
  442. iter := snap.NewIterator(dataPrefixSlice(byte(ns), 1), nil)
  443. iterStat.start()
  444. for iter.Next() {
  445. k1 := iter.Key()
  446. k2 := iter.Value()
  447. iterStat.record(1)
  448. if dataNS(k2) != byte(ns) {
  449. fatalf(nil, "[%02d] READER #%d.%d K%d invalid in-key NS: want=%d got=%d", ns, snapwi, ri, n, ns, dataNS(k2))
  450. }
  451. kwritei := dataI(k2)
  452. if kwritei != snapwi {
  453. fatalf(nil, "[%02d] READER #%d.%d K%d invalid in-key iter num: %d", ns, snapwi, ri, n, kwritei)
  454. }
  455. getStat.start()
  456. v2, err := snap.Get(k2, nil)
  457. if err != nil {
  458. fatalf(err, "[%02d] READER #%d.%d K%d snap.Get: %v\nk1: %x\n -> k2: %x", ns, snapwi, ri, n, err, k1, k2)
  459. }
  460. getStat.record(1)
  461. if checksum0, checksum1 := dataChecksum(v2); checksum0 != checksum1 {
  462. err := &errors.ErrCorrupted{Fd: storage.FileDesc{0xff, 0}, Err: fmt.Errorf("v2: %x: checksum mismatch: %v vs %v", v2, checksum0, checksum1)}
  463. fatalf(err, "[%02d] READER #%d.%d K%d snap.Get: %v\nk1: %x\n -> k2: %x", ns, snapwi, ri, n, err, k1, k2)
  464. }
  465. n++
  466. iterStat.start()
  467. }
  468. iter.Release()
  469. if err := iter.Error(); err != nil {
  470. fatalf(err, "[%02d] READER #%d.%d K%d iter.Error: %v", ns, snapwi, ri, numKey, err)
  471. }
  472. if n != numKey {
  473. fatalf(nil, "[%02d] READER #%d.%d missing keys: want=%d got=%d", ns, snapwi, ri, numKey, n)
  474. }
  475. ri++
  476. }
  477. }(wi, snap)
  478. atomic.AddUint32(&wi, 1)
  479. }
  480. }()
  481. delB := new(leveldb.Batch)
  482. wg.Add(1)
  483. go func() {
  484. var (
  485. i int
  486. iterStat = &latencyStats{}
  487. )
  488. defer func() {
  489. log.Printf("[%02d] SCANNER DONE #%d", ns, i)
  490. wg.Done()
  491. }()
  492. time.Sleep(2 * time.Second)
  493. for atomic.LoadUint32(&done) == 0 {
  494. var n int
  495. delB.Reset()
  496. iter := db.NewIterator(dataNsSlice(byte(ns)), nil)
  497. iterStat.start()
  498. for iter.Next() && atomic.LoadUint32(&done) == 0 {
  499. k := iter.Key()
  500. v := iter.Value()
  501. iterStat.record(1)
  502. for ci, x := range [...][]byte{k, v} {
  503. checksum0, checksum1 := dataChecksum(x)
  504. if checksum0 != checksum1 {
  505. if ci == 0 {
  506. fatalf(nil, "[%02d] SCANNER %d.%d invalid key checksum: want %d, got %d\n%x -> %x", ns, i, n, checksum0, checksum1, k, v)
  507. } else {
  508. fatalf(nil, "[%02d] SCANNER %d.%d invalid value checksum: want %d, got %d\n%x -> %x", ns, i, n, checksum0, checksum1, k, v)
  509. }
  510. }
  511. }
  512. if dataPrefix(k) == 2 || mrand.Int()%999 == 0 {
  513. delB.Delete(k)
  514. }
  515. n++
  516. iterStat.start()
  517. }
  518. iter.Release()
  519. if err := iter.Error(); err != nil {
  520. fatalf(err, "[%02d] SCANNER #%d.%d iter.Error: %v", ns, i, n, err)
  521. }
  522. if n > 0 {
  523. log.Printf("[%02d] SCANNER #%d IterLatency=%v", ns, i, iterStat.avg())
  524. }
  525. if delB.Len() > 0 && atomic.LoadUint32(&done) == 0 {
  526. t := time.Now()
  527. writeReq <- delB
  528. if err := <-writeAck; err != nil {
  529. writeAckAck <- struct{}{}
  530. fatalf(err, "[%02d] SCANNER #%d db.Write: %v", ns, i, err)
  531. } else {
  532. writeAckAck <- struct{}{}
  533. }
  534. log.Printf("[%02d] SCANNER #%d Deleted=%d Time=%v", ns, i, delB.Len(), time.Now().Sub(t))
  535. }
  536. i++
  537. }
  538. }()
  539. }(ns, numKey)
  540. }
  541. go func() {
  542. sig := make(chan os.Signal)
  543. signal.Notify(sig, os.Interrupt, os.Kill)
  544. log.Printf("Got signal: %v, exiting...", <-sig)
  545. atomic.StoreUint32(&done, 1)
  546. }()
  547. wg.Wait()
  548. }