diff --git a/cmd/tbcd/tbcd.go b/cmd/tbcd/tbcd.go index b519c233..32edd382 100644 --- a/cmd/tbcd/tbcd.go +++ b/cmd/tbcd/tbcd.go @@ -1,4 +1,4 @@ -// Copyright (c) 2024 Hemi Labs, Inc. +// Copyright (c) 2024-2025 Hemi Labs, Inc. // Use of this source code is governed by the MIT License, // which can be found in the LICENSE file. @@ -25,7 +25,8 @@ const ( defaultLogLevel = daemonName + "=INFO;tbc=INFO;level=INFO" defaultNetwork = "testnet3" // XXX make this mainnet defaultHome = "~/." + daemonName - bhsDefault = int(1e6) // enough for mainnet + bDefaultSize = "1gb" // ~640 blocks on mainnet + bhsDefaultSize = "128mb" // enough for mainnet ) var ( @@ -46,16 +47,16 @@ var ( Help: "enable auto utxo and tx indexes", Print: config.PrintAll, }, - "TBC_BLOCK_CACHE": config.Config{ - Value: &cfg.BlockCache, - DefaultValue: 250, - Help: "number of cached blocks", + "TBC_BLOCK_CACHE_SIZE": config.Config{ + Value: &cfg.BlockCacheSize, + DefaultValue: bDefaultSize, + Help: "size of block cache", Print: config.PrintAll, }, - "TBC_BLOCKHEADER_CACHE": config.Config{ - Value: &cfg.BlockheaderCache, - DefaultValue: bhsDefault, - Help: "number of cached blockheaders", + "TBC_BLOCKHEADER_CACHE_SIZE": config.Config{ + Value: &cfg.BlockheaderCacheSize, + DefaultValue: bhsDefaultSize, + Help: "size of blockheader cache", Print: config.PrintAll, }, "TBC_BLOCK_SANITY": config.Config{ diff --git a/database/tbcd/database.go b/database/tbcd/database.go index f5847e5c..b7480b7c 100644 --- a/database/tbcd/database.go +++ b/database/tbcd/database.go @@ -1,4 +1,4 @@ -// Copyright (c) 2024 Hemi Labs, Inc. +// Copyright (c) 2024-2025 Hemi Labs, Inc. // Use of this source code is governed by the MIT License, // which can be found in the LICENSE file. @@ -91,6 +91,7 @@ type Database interface { BlockHeaderBest(ctx context.Context) (*BlockHeader, error) // return canonical BlockHeaderByHash(ctx context.Context, hash *chainhash.Hash) (*BlockHeader, error) BlockHeaderGenesisInsert(ctx context.Context, wbh *wire.BlockHeader, height uint64, diff *big.Int) error + BlockHeaderCacheStats() CacheStats // Block headers BlockHeadersByHeight(ctx context.Context, height uint64) ([]BlockHeader, error) @@ -103,6 +104,7 @@ type Database interface { BlockInsert(ctx context.Context, b *btcutil.Block) (int64, error) // BlocksInsert(ctx context.Context, bs []*btcutil.Block) (int64, error) BlockByHash(ctx context.Context, hash *chainhash.Hash) (*btcutil.Block, error) + BlockCacheStats() CacheStats // Transactions BlockUtxoUpdate(ctx context.Context, direction int, utxos map[Outpoint]CacheOutput) error @@ -413,3 +415,12 @@ func TxIdBlockHashFromTxKey(txKey TxKey) (*chainhash.Hash, *chainhash.Hash, erro } return txId, blockHash, nil } + +// Cache +type CacheStats struct { + Hits int + Misses int + Purges int + Size int + Items int +} diff --git a/database/tbcd/level/blockcache.go b/database/tbcd/level/blockcache.go new file mode 100644 index 00000000..bab11792 --- /dev/null +++ b/database/tbcd/level/blockcache.go @@ -0,0 +1,117 @@ +// Copyright (c) 2025 Hemi Labs, Inc. +// Use of this source code is governed by the MIT License, +// which can be found in the LICENSE file. + +package level + +import ( + "container/list" + "fmt" + "sync" + + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg/chainhash" + + "github.com/hemilabs/heminetwork/database/tbcd" +) + +var blockSize = 1677721 // ~1.6MB rough size of a mainnet block as of Jan 2025 + +type blockElement struct { + element *list.Element + block []byte +} + +type lowIQLRU struct { + mtx sync.Mutex + + size int // this is the approximate max size + + m map[chainhash.Hash]blockElement + totalSize int + + // lru list, when used move to back of the list + l *list.List + + // stats + c tbcd.CacheStats +} + +func (l *lowIQLRU) Put(v *btcutil.Block) { + l.mtx.Lock() + defer l.mtx.Unlock() + + hash := v.Hash() + if _, ok := l.m[*hash]; ok { + return + } + + block, err := v.Bytes() + if err != nil { + // data corruption, panic + panic(err) + } + + // evict first element in list + if l.totalSize+len(block) > l.size { + // LET THEM EAT PANIC + re := l.l.Front() + rha := l.l.Remove(re) + rh := rha.(*chainhash.Hash) + l.totalSize -= len(l.m[*rh].block) + delete(l.m, *rh) + l.c.Purges++ + } + + // block lookup and lru append + l.m[*hash] = blockElement{element: l.l.PushBack(hash), block: block} + l.totalSize += len(block) + + l.c.Size = l.totalSize +} + +func (l *lowIQLRU) Get(k *chainhash.Hash) (*btcutil.Block, bool) { + l.mtx.Lock() + defer l.mtx.Unlock() + + be, ok := l.m[*k] + if !ok { + l.c.Misses++ + return nil, false + } + b, err := btcutil.NewBlockFromBytes(be.block) + if err != nil { + // panic for diagnostics at this time + panic(err) + } + + // update access + l.l.MoveToBack(be.element) + + l.c.Hits++ + + return b, true +} + +func (l *lowIQLRU) Stats() tbcd.CacheStats { + l.mtx.Lock() + defer l.mtx.Unlock() + l.c.Items = len(l.m) + return l.c +} + +func lowIQLRUSizeNew(size int) (*lowIQLRU, error) { + if size <= 0 { + return nil, fmt.Errorf("invalid size: %v", size) + } + // approximate number of blocks + count := size / blockSize + if count <= 0 { + return nil, fmt.Errorf("invalid count: %v", count) + } + return &lowIQLRU{ + size: size, + m: make(map[chainhash.Hash]blockElement, count), + l: list.New(), + }, nil +} diff --git a/database/tbcd/level/cache_test.go b/database/tbcd/level/cache_test.go new file mode 100644 index 00000000..0da5e0ef --- /dev/null +++ b/database/tbcd/level/cache_test.go @@ -0,0 +1,228 @@ +package level + +import ( + "math/big" + "testing" + + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/davecgh/go-spew/spew" + + "github.com/hemilabs/heminetwork/database/tbcd" +) + +func newBlock(prevHash *chainhash.Hash, nonce uint32) (chainhash.Hash, *btcutil.Block) { + bh := wire.NewBlockHeader(0, prevHash, &chainhash.Hash{}, 0, uint32(nonce)) + b := wire.NewMsgBlock(bh) + return bh.BlockHash(), btcutil.NewBlock(b) +} + +func TestLRUCache(t *testing.T) { + maxCache := 10 + blockSize = 81 // we'll use empty blocks + l, err := lowIQLRUSizeNew(blockSize * maxCache) + if err != nil { + t.Fatal(err) + } + + prevHash := chainhash.Hash{} // genesis + blocks := make([]chainhash.Hash, 0, maxCache*2) + for i := 0; i < maxCache; i++ { + h, b := newBlock(&prevHash, uint32(i)) + t.Logf("%v: %v", i, h) + blocks = append(blocks, h) + l.Put(b) + prevHash = h + } + + // verify stats are 0 + s := l.Stats() + if !(s.Hits == 0 && s.Misses == 0 && s.Purges == 0) { + t.Fatal(spew.Sdump(s)) + } + + // retrieve all blocks + for k := range blocks { + if _, ok := l.Get(&blocks[k]); !ok { + t.Fatalf("block not found: %v", blocks[k]) + } + } + + // verify hits are maxBlocks + s = l.Stats() + if !(s.Hits == 10 && s.Misses == 0 && s.Purges == 0) { + t.Fatal(spew.Sdump(s)) + } + + // purge oldest cache entries + for i := maxCache; i < maxCache*2; i++ { + h, b := newBlock(&prevHash, uint32(i)) + t.Logf("%v: %v", i, h) + blocks = append(blocks, h) + l.Put(b) + prevHash = h + } + + // verify purges are maxBlocks + s = l.Stats() + if !(s.Hits == 10 && s.Misses == 0 && s.Purges == 10) { + t.Fatal(spew.Sdump(s)) + } + + // retrieve purged blocks + for k := range blocks { + if k >= maxCache { + break + } + if _, ok := l.Get(&blocks[k]); ok { + t.Fatalf("block found: %v", blocks[k]) + } + } + + // verify misses are maxBlocks + s = l.Stats() + if !(s.Hits == 10 && s.Misses == 10 && s.Purges == 10) { + t.Fatal(spew.Sdump(s)) + } + + t.Logf(spew.Sdump(s)) +} + +func newHeader(prevHash *chainhash.Hash, nonce uint32) (chainhash.Hash, *tbcd.BlockHeader) { + bh := wire.NewBlockHeader(0, prevHash, &chainhash.Hash{}, 0, uint32(nonce)) + return bh.BlockHash(), &tbcd.BlockHeader{ + Hash: bh.BlockHash(), + Height: uint64(nonce), + Header: h2b(bh), + Difficulty: big.Int{}, + } +} + +func TestMapCache(t *testing.T) { + maxCacheCount := 10 + l, err := lowIQMapCountNew(maxCacheCount) + if err != nil { + t.Fatal(err) + } + + prevHash := chainhash.Hash{} // genesis + headers := make([]chainhash.Hash, 0, maxCacheCount*2) + for i := 0; i < maxCacheCount; i++ { + h, bh := newHeader(&prevHash, uint32(i)) + t.Logf("%v: %v", i, h) + headers = append(headers, h) + l.Put(bh) + prevHash = h + } + + // verify stats are 0 + s := l.Stats() + if !(s.Hits == 0 && s.Misses == 0 && s.Purges == 0) { + t.Fatal(spew.Sdump(s)) + } + + // retrieve all headers + for k := range headers { + if _, ok := l.Get(&headers[k]); !ok { + t.Fatalf("header not found: %v", headers[k]) + } + } + + // verify hits are maxBlocks + s = l.Stats() + if !(s.Hits == 10 && s.Misses == 0 && s.Purges == 0) { + t.Fatal(spew.Sdump(s)) + } + + // purge random cache entries + for i := maxCacheCount; i < maxCacheCount*2; i++ { + h, bh := newHeader(&prevHash, uint32(i)) + t.Logf("%v: %v", i, h) + headers = append(headers, h) + l.Put(bh) + prevHash = h + } + + // verify purges are maxBlocks + s = l.Stats() + if !(s.Hits == 10 && s.Misses == 0 && s.Purges == 10) { + t.Fatal(spew.Sdump(s)) + } + + // Force a random miss + hm, _ := newHeader(&chainhash.Hash{}, 0xdeadbeef) + _, ok := l.Get(&hm) + if ok { + t.Fatal("non cached header found") + } + + // verify misses + s = l.Stats() + if !(s.Hits == 10 && s.Misses == 1 && s.Purges == 10) { + t.Fatal(spew.Sdump(s)) + } + + t.Logf(spew.Sdump(s)) +} + +func intHash(b int) chainhash.Hash { + return chainhash.Hash{byte(b)} +} + +func TestHC(t *testing.T) { + _, err := lowIQMapSizeNew(0) + if err == nil { + t.Fatalf("expected invalid size error for size <= 0") + } + _, err = lowIQMapSizeNew(1) + if err == nil { + t.Fatalf("expected invalid count error for count <= 0") + } + size := 1024 + l, err := lowIQMapSizeNew(size) + if err != nil { + t.Fatal(err) + } + hs := intHash(0) + for range 2 { + l.Put(&tbcd.BlockHeader{ + Hash: hs, + }) + } + if len(l.m) > 1 { + t.Fatalf("duplicate headers not excluded by hash") + } + if _, ok := l.Get(&hs); !ok { + t.Fatalf("failed to retrieve header present in map") + } + hs = intHash(1) + if _, ok := l.Get(&hs); ok { + t.Fatalf("invalid header retrieved from Map") + } + for k := range l.count + 5 { + l.Put(&tbcd.BlockHeader{ + Hash: intHash(k), + }) + } + if len(l.m) > l.count { + t.Fatalf("map size exceeded bounds. expected %v, got %v", l.count, len(l.m)) + } + storedHashes := make([]*chainhash.Hash, 0, len(l.m)-1) + var lastHash *chainhash.Hash + for key := range l.m { + keyc := key + if len(storedHashes) >= len(l.m)-1 { + lastHash = &keyc + } else { + storedHashes = append(storedHashes, &keyc) + } + } + l.PurgeBatch(storedHashes) + if len(l.m) != 1 { + t.Fatalf("expected %d elements to be purged, purged %d", len(storedHashes), l.count-len(l.m)) + } + if _, ok := l.Get(lastHash); !ok { + t.Fatalf("incorrect element purged") + } +} diff --git a/database/tbcd/level/headercache.go b/database/tbcd/level/headercache.go index bd0a1676..557edbc7 100644 --- a/database/tbcd/level/headercache.go +++ b/database/tbcd/level/headercache.go @@ -1,10 +1,11 @@ -// Copyright (c) 2024 Hemi Labs, Inc. +// Copyright (c) 2024-2025 Hemi Labs, Inc. // Use of this source code is governed by the MIT License, // which can be found in the LICENSE file. package level import ( + "fmt" "sync" "github.com/btcsuite/btcd/chaincfg/chainhash" @@ -12,12 +13,17 @@ import ( "github.com/hemilabs/heminetwork/database/tbcd" ) +const blockHeaderSize = 8 + 32 + 80 + 8 // rough size of tbcd.BlockHeader + type lowIQMap struct { - mtx sync.RWMutex + mtx sync.Mutex count int - m map[chainhash.Hash]*tbcd.BlockHeader + m map[chainhash.Hash]*tbcd.BlockHeader // 32+8+80+len([]Word ~ 8) + + // stats + c tbcd.CacheStats } func (l *lowIQMap) Put(v *tbcd.BlockHeader) { @@ -29,9 +35,10 @@ func (l *lowIQMap) Put(v *tbcd.BlockHeader) { } if len(l.m) >= l.count { - // evict entry + // evict random entry for k := range l.m { delete(l.m, k) + l.c.Purges++ break } } @@ -40,16 +47,54 @@ func (l *lowIQMap) Put(v *tbcd.BlockHeader) { } func (l *lowIQMap) Get(k *chainhash.Hash) (*tbcd.BlockHeader, bool) { - l.mtx.RLock() - defer l.mtx.RUnlock() + l.mtx.Lock() + defer l.mtx.Unlock() bh, ok := l.m[*k] + if ok { + l.c.Hits++ + } else { + l.c.Misses++ + } return bh, ok } -func lowIQMapNew(count int) *lowIQMap { +func (l *lowIQMap) PurgeBatch(ks []*chainhash.Hash) { + l.mtx.Lock() + defer l.mtx.Unlock() + + for v := range ks { + delete(l.m, *ks[v]) + l.c.Purges++ + } +} + +func (l *lowIQMap) Stats() tbcd.CacheStats { + l.mtx.Lock() + defer l.mtx.Unlock() + l.c.Items = len(l.m) + l.c.Size = len(l.m) * blockHeaderSize // rough size + return l.c +} + +func lowIQMapCountNew(count int) (*lowIQMap, error) { + if count <= 0 { + return nil, fmt.Errorf("invalid count: %v", count) + } return &lowIQMap{ count: count, m: make(map[chainhash.Hash]*tbcd.BlockHeader, count), + }, nil +} + +// lowIQMapNewSize does a bit of math to estimate the number of cache items. +// Since it is an estimate it will overflow if Difficulty becomes bigger than +// 64 bits. This is not an issue since 100MB caches all of mainnet in Jan 2025 +// (~819200 items). +func lowIQMapSizeNew(size int) (*lowIQMap, error) { + if size <= 0 { + return nil, fmt.Errorf("invalid size: %v", size) } + // approximate number of headers + return lowIQMapCountNew(size / blockHeaderSize) } diff --git a/database/tbcd/level/level.go b/database/tbcd/level/level.go index a0c388a9..0e19e093 100644 --- a/database/tbcd/level/level.go +++ b/database/tbcd/level/level.go @@ -1,4 +1,4 @@ -// Copyright (c) 2024 Hemi Labs, Inc. +// Copyright (c) 2024-2025 Hemi Labs, Inc. // Use of this source code is governed by the MIT License, // which can be found in the LICENSE file. @@ -10,6 +10,7 @@ import ( "encoding/binary" "errors" "fmt" + "math" "math/big" "github.com/btcsuite/btcd/blockchain" @@ -17,7 +18,7 @@ import ( "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" - lru "github.com/hashicorp/golang-lru/v2" + "github.com/dustin/go-humanize" "github.com/juju/loggo" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/util" @@ -67,7 +68,7 @@ type ldb struct { pool level.Pool rawPool level.RawPool - blockCache *lru.Cache[chainhash.Hash, *btcutil.Block] // block cache + blockCache *lowIQLRU // Block Header cache. Note that it is only primed during reads. Doing // this during writes would be relatively expensive at nearly no gain. @@ -108,16 +109,38 @@ func headerHash(header []byte) *chainhash.Hash { } type Config struct { - Home string // home directory - BlockCache int // number of blocks to cache - BlockheaderCache int // number of blocks headers to cache + Home string // home directory + BlockCacheSize string // size of block cache + BlockheaderCacheSize string // size of block header cache + blockCacheSize int // parsed size of block cache + blockheaderCacheSize int // parsed size of block header cache } func NewConfig(home string) *Config { + blockheaderCacheSizeS := "128mb" // Cache all blockheaders on mainnet + blockheaderCacheSize, err := humanize.ParseBytes(blockheaderCacheSizeS) + if err != nil { + panic(err) + } + if blockheaderCacheSize > math.MaxInt64 { + panic("invalid blockheaderCacheSize") + } + + blockCacheSizeS := "1gb" // ~640 blocks on mainnet + blockCacheSize, err := humanize.ParseBytes(blockCacheSizeS) + if err != nil { + panic(err) + } + if blockCacheSize > math.MaxInt64 { + panic("invalid blockCacheSize") + } + return &Config{ - Home: home, // require user to set home. - BlockCache: 250, // max 4GB on mainnet - BlockheaderCache: 1e6, // Cache all blockheaders on mainnet + Home: home, // require user to set home. + BlockCacheSize: blockCacheSizeS, + blockCacheSize: int(blockCacheSize), + BlockheaderCacheSize: blockheaderCacheSizeS, + blockheaderCacheSize: int(blockheaderCacheSize), } } @@ -137,19 +160,22 @@ func New(ctx context.Context, cfg *Config) (*ldb, error) { cfg: cfg, } - if cfg.BlockCache > 0 { - l.blockCache, err = lru.New[chainhash.Hash, *btcutil.Block](cfg.BlockCache) + if cfg.blockCacheSize > 0 { + l.blockCache, err = lowIQLRUSizeNew(cfg.blockCacheSize) if err != nil { return nil, fmt.Errorf("couldn't setup block cache: %w", err) } - log.Infof("block cache: %v", cfg.BlockCache) + log.Infof("block cache: %v", humanize.Bytes(uint64(cfg.blockCacheSize))) } else { log.Infof("block cache: DISABLED") } - if cfg.BlockheaderCache > 0 { - l.headerCache = lowIQMapNew(cfg.BlockheaderCache) - - log.Infof("blockheader cache: %v", cfg.BlockheaderCache) + if cfg.blockheaderCacheSize > 0 { + l.headerCache, err = lowIQMapSizeNew(cfg.blockheaderCacheSize) + if err != nil { + return nil, fmt.Errorf("couldn't setup block header cache: %w", err) + } + log.Infof("blockheader cache: %v", + humanize.Bytes(uint64(cfg.blockheaderCacheSize))) } else { log.Infof("blockheader cache: DISABLED") } @@ -316,7 +342,7 @@ func (l *ldb) BlockHeaderByHash(ctx context.Context, hash *chainhash.Hash) (*tbc log.Tracef("BlockHeaderByHash") defer log.Tracef("BlockHeaderByHash exit") - if l.cfg.BlockheaderCache > 0 { + if l.cfg.blockheaderCacheSize > 0 { // Try cache first if b, ok := l.headerCache.Get(hash); ok { return b, nil @@ -338,7 +364,7 @@ func (l *ldb) BlockHeaderByHash(ctx context.Context, hash *chainhash.Hash) (*tbc bh := decodeBlockHeader(ebh) // Insert into cache, roughly 150 byte cost. - if l.cfg.BlockheaderCache > 0 { + if l.cfg.blockheaderCacheSize > 0 { l.headerCache.Put(bh) } @@ -822,18 +848,34 @@ func (l *ldb) BlockHeadersRemove(ctx context.Context, bhs *wire.MsgHeaders, tipA bhsBatch := new(leveldb.Batch) hhBatch := new(leveldb.Batch) + var bhCacheBatch []*chainhash.Hash + if l.cfg.blockheaderCacheSize > 0 { + // cache batch to delete blockheaders + bhCacheBatch = make([]*chainhash.Hash, 0, len(headersParsed)) + } + // Insert each block header deletion into the batch (for header itself and // height-header association) for i := 0; i < len(headersParsed); i++ { // Delete header i bhash := headersParsed[i].BlockHash() fh := fullHeadersFromDb[i] + // Make db delete batch bhsBatch.Delete(bhash[:]) + // Remove from header cache as well in a batch + if l.cfg.blockheaderCacheSize > 0 { + bhCacheBatch = append(bhCacheBatch, &bhash) + } + // Delete height mapping for header i hhKey := heightHashToKey(fh.Height, bhash[:]) hhBatch.Delete(hhKey) } + if l.cfg.blockheaderCacheSize > 0 { + // Delete right away. Cache can always be rehydrated. + l.headerCache.PurgeBatch(bhCacheBatch) + } // Insert updated canonical tip after removal of the provided block headers tipAfterRemovalEncoded := h2b(tipAfterRemoval) @@ -1242,8 +1284,8 @@ func (l *ldb) BlockInsert(ctx context.Context, b *btcutil.Block) (int64, error) if err = bDB.Insert(b.Hash()[:], raw); err != nil { return -1, fmt.Errorf("blocks insert put: %w", err) } - if l.cfg.BlockCache > 0 { - l.blockCache.Add(*b.Hash(), b) + if l.cfg.blockCacheSize > 0 { + l.blockCache.Put(b) } } @@ -1279,9 +1321,9 @@ func (l *ldb) BlockByHash(ctx context.Context, hash *chainhash.Hash) (*btcutil.B log.Tracef("BlockByHash") defer log.Tracef("BlockByHash exit") - if l.cfg.BlockCache > 0 { + if l.cfg.blockCacheSize > 0 { // Try cache first - if cb, ok := l.blockCache.Get(*hash); ok { + if cb, ok := l.blockCache.Get(hash); ok { return cb, nil } } @@ -1298,8 +1340,8 @@ func (l *ldb) BlockByHash(ctx context.Context, hash *chainhash.Hash) (*btcutil.B if err != nil { panic(fmt.Errorf("block decode data corruption: %v %w", hash, err)) } - if l.cfg.BlockCache > 0 { - l.blockCache.Add(*hash, b) + if l.cfg.blockCacheSize > 0 { + l.blockCache.Put(b) } return b, nil } @@ -1613,3 +1655,11 @@ func (l *ldb) BlockTxUpdate(ctx context.Context, direction int, txs map[tbcd.TxK return nil } + +func (l *ldb) BlockHeaderCacheStats() tbcd.CacheStats { + return l.headerCache.Stats() +} + +func (l *ldb) BlockCacheStats() tbcd.CacheStats { + return l.blockCache.Stats() +} diff --git a/go.mod b/go.mod index 944445c6..8dde509b 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,6 @@ require ( github.com/dustin/go-humanize v1.0.1 github.com/ethereum/go-ethereum v1.14.8 github.com/go-test/deep v1.1.1 - github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/juju/loggo v1.0.0 github.com/lib/pq v1.10.9 github.com/mitchellh/go-homedir v1.1.0 diff --git a/go.sum b/go.sum index ad8051a2..5fe5ab3e 100644 --- a/go.sum +++ b/go.sum @@ -118,8 +118,6 @@ github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= -github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= -github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hemilabs/websocket v0.0.0-20240813101919-bf33653e9aa5 h1:rNJaauce7ZAC5tv7NaiwDtF6afSVeTvWejhFuK5gY6I= github.com/hemilabs/websocket v0.0.0-20240813101919-bf33653e9aa5/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= github.com/holiman/uint256 v1.3.1 h1:JfTzmih28bittyHM8z360dCjIA9dbPIBlcTI6lmctQs= diff --git a/service/tbc/tbc.go b/service/tbc/tbc.go index a2723ed1..a20a2c13 100644 --- a/service/tbc/tbc.go +++ b/service/tbc/tbc.go @@ -81,8 +81,8 @@ func init() { type Config struct { AutoIndex bool - BlockCache int - BlockheaderCache int + BlockCacheSize string + BlockheaderCacheSize string BlockSanity bool LevelDBHome string ListenAddress string @@ -107,15 +107,15 @@ type Config struct { func NewDefaultConfig() *Config { return &Config{ - ListenAddress: tbcapi.DefaultListen, - BlockCache: 250, - BlockheaderCache: 1e6, - LogLevel: logLevel, - MaxCachedTxs: defaultMaxCachedTxs, - MempoolEnabled: false, // XXX default to false until it is fixed - PeersWanted: defaultPeersWanted, - PrometheusNamespace: appName, - ExternalHeaderMode: false, // Default anyway, but for readability + ListenAddress: tbcapi.DefaultListen, + BlockCacheSize: "1gb", + BlockheaderCacheSize: "128mb", + LogLevel: logLevel, + MaxCachedTxs: defaultMaxCachedTxs, + MempoolEnabled: false, // XXX default to false until it is fixed + PeersWanted: defaultPeersWanted, + PrometheusNamespace: appName, + ExternalHeaderMode: false, // Default anyway, but for readability } } @@ -161,6 +161,8 @@ type Server struct { syncInfo SyncInfo connected, good, bad int mempoolCount, mempoolSize int + blockCache tbcd.CacheStats + headerCache tbcd.CacheStats } // periodically updated by promPoll isRunning bool cmdsProcessed prometheus.Counter @@ -671,6 +673,66 @@ func (s *Server) promMempoolSize() float64 { return deucalion.IntToFloat(s.prom.mempoolSize) } +func (s *Server) promBlockCacheHits() float64 { + s.mtx.Lock() + defer s.mtx.Unlock() + return deucalion.IntToFloat(s.prom.blockCache.Hits) +} + +func (s *Server) promBlockCacheMisses() float64 { + s.mtx.Lock() + defer s.mtx.Unlock() + return deucalion.IntToFloat(s.prom.blockCache.Misses) +} + +func (s *Server) promBlockCachePurges() float64 { + s.mtx.Lock() + defer s.mtx.Unlock() + return deucalion.IntToFloat(s.prom.blockCache.Purges) +} + +func (s *Server) promBlockCacheSize() float64 { + s.mtx.Lock() + defer s.mtx.Unlock() + return deucalion.IntToFloat(s.prom.blockCache.Size) +} + +func (s *Server) promBlockCacheItems() float64 { + s.mtx.Lock() + defer s.mtx.Unlock() + return deucalion.IntToFloat(s.prom.blockCache.Items) +} + +func (s *Server) promHeaderCacheHits() float64 { + s.mtx.Lock() + defer s.mtx.Unlock() + return deucalion.IntToFloat(s.prom.headerCache.Hits) +} + +func (s *Server) promHeaderCacheMisses() float64 { + s.mtx.Lock() + defer s.mtx.Unlock() + return deucalion.IntToFloat(s.prom.headerCache.Misses) +} + +func (s *Server) promHeaderCachePurges() float64 { + s.mtx.Lock() + defer s.mtx.Unlock() + return deucalion.IntToFloat(s.prom.headerCache.Purges) +} + +func (s *Server) promHeaderCacheSize() float64 { + s.mtx.Lock() + defer s.mtx.Unlock() + return deucalion.IntToFloat(s.prom.headerCache.Size) +} + +func (s *Server) promHeaderCacheItems() float64 { + s.mtx.Lock() + defer s.mtx.Unlock() + return deucalion.IntToFloat(s.prom.headerCache.Items) +} + func (s *Server) promPoll(ctx context.Context) error { for { select { @@ -681,6 +743,8 @@ func (s *Server) promPoll(ctx context.Context) error { s.prom.syncInfo = s.Synced(ctx) s.prom.connected, s.prom.good, s.prom.bad = s.pm.Stats() + s.prom.blockCache = s.db.BlockCacheStats() + s.prom.headerCache = s.db.BlockHeaderCacheStats() //if s.cfg.MempoolEnabled { // s.prom.mempoolCount, s.prom.mempoolSize = s.mempool.stats(ctx) //} @@ -688,10 +752,16 @@ func (s *Server) promPoll(ctx context.Context) error { if s.promPollVerbose { s.mtx.RLock() log.Infof("Pending blocks %v/%v connected peers %v "+ - "good peers %v bad peers %v mempool %v %v", + "good peers %v bad peers %v mempool %v %v "+ + "block cache hits: %v misses: %v purges: %v size: %v "+ + "blocks: %v", s.blocks.Len(), defaultPendingBlocks, s.prom.connected, s.prom.good, s.prom.bad, s.prom.mempoolCount, - humanize.Bytes(uint64(s.prom.mempoolSize))) + humanize.Bytes(uint64(s.prom.mempoolSize)), + s.prom.blockCache.Hits, s.prom.blockCache.Misses, + s.prom.blockCache.Purges, + humanize.Bytes(uint64(s.prom.blockCache.Size)), + s.prom.blockCache.Items) s.mtx.RUnlock() } @@ -2142,8 +2212,8 @@ func (s *Server) DBOpen(ctx context.Context) error { // Open db. var err error cfg := level.NewConfig(filepath.Join(s.cfg.LevelDBHome, s.cfg.Network)) - cfg.BlockCache = s.cfg.BlockCache - cfg.BlockheaderCache = s.cfg.BlockheaderCache + cfg.BlockCacheSize = s.cfg.BlockCacheSize + cfg.BlockheaderCacheSize = s.cfg.BlockheaderCacheSize s.db, err = level.New(ctx, cfg) if err != nil { return fmt.Errorf("open level database: %w", err) @@ -2224,6 +2294,56 @@ func (s *Server) Collectors() []prometheus.Collector { Name: "mempool_size_bytes", Help: "Size of mempool in bytes", }, s.promMempoolSize), + prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: s.cfg.PrometheusNamespace, + Name: "block_cache_hits", + Help: "Block cache hits", + }, s.promBlockCacheHits), + prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: s.cfg.PrometheusNamespace, + Name: "block_cache_misses", + Help: "Block cache misses", + }, s.promBlockCacheMisses), + prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: s.cfg.PrometheusNamespace, + Name: "block_cache_purges", + Help: "Block cache purges", + }, s.promBlockCachePurges), + prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: s.cfg.PrometheusNamespace, + Name: "block_cache_size", + Help: "Block cache size", + }, s.promBlockCacheSize), + prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: s.cfg.PrometheusNamespace, + Name: "header_cache_items", + Help: "Number of cached blocks", + }, s.promHeaderCacheItems), + prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: s.cfg.PrometheusNamespace, + Name: "header_cache_hits", + Help: "Header cache hits", + }, s.promHeaderCacheHits), + prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: s.cfg.PrometheusNamespace, + Name: "header_cache_misses", + Help: "Header cache misses", + }, s.promHeaderCacheMisses), + prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: s.cfg.PrometheusNamespace, + Name: "header_cache_purges", + Help: "Header cache purges", + }, s.promHeaderCachePurges), + prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: s.cfg.PrometheusNamespace, + Name: "header_cache_size", + Help: "Header cache size", + }, s.promHeaderCacheSize), + prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: s.cfg.PrometheusNamespace, + Name: "block_cache_items", + Help: "Number of cached blocks", + }, s.promBlockCacheItems), } } return s.promCollectors diff --git a/service/tbc/tbc_test.go b/service/tbc/tbc_test.go index e9b173a9..c3a84a4f 100644 --- a/service/tbc/tbc_test.go +++ b/service/tbc/tbc_test.go @@ -1053,8 +1053,8 @@ func createTbcServerExternalHeaderMode(ctx context.Context, t *testing.T) *Serve cfg.LevelDBHome = home cfg.ExternalHeaderMode = true cfg.Network = networkLocalnet - cfg.BlockCache = 0 - cfg.BlockheaderCache = 0 + cfg.BlockCacheSize = "" + cfg.BlockheaderCacheSize = "" cfg.MempoolEnabled = false tbcServer, err := NewServer(cfg) diff --git a/service/tbc/tbcfork_test.go b/service/tbc/tbcfork_test.go index 51d2d69e..24637042 100644 --- a/service/tbc/tbcfork_test.go +++ b/service/tbc/tbcfork_test.go @@ -893,11 +893,11 @@ func TestFork(t *testing.T) { // Connect tbc service cfg := &Config{ - AutoIndex: false, - BlockCache: 1000, - BlockheaderCache: 1000, - BlockSanity: false, - LevelDBHome: t.TempDir(), + AutoIndex: false, + BlockCacheSize: "10mb", + BlockheaderCacheSize: "1mb", + BlockSanity: false, + LevelDBHome: t.TempDir(), // LogLevel: "tbcd=TRACE:tbc=TRACE:level=DEBUG", MaxCachedTxs: 1000, // XXX Network: networkLocalnet, @@ -1130,12 +1130,12 @@ func TestIndexNoFork(t *testing.T) { // Connect tbc service cfg := &Config{ - AutoIndex: false, - BlockCache: 1000, - BlockheaderCache: 1000, - BlockSanity: false, - LevelDBHome: t.TempDir(), - ListenAddress: "localhost:8882", + AutoIndex: false, + BlockCacheSize: "10mb", + BlockheaderCacheSize: "1mb", + BlockSanity: false, + LevelDBHome: t.TempDir(), + ListenAddress: "localhost:8882", // LogLevel: "tbcd=TRACE:tbc=TRACE:level=DEBUG", MaxCachedTxs: 1000, // XXX Network: networkLocalnet, @@ -1301,12 +1301,12 @@ func TestIndexFork(t *testing.T) { // Connect tbc service cfg := &Config{ - AutoIndex: false, - BlockCache: 1000, - BlockheaderCache: 1000, - BlockSanity: false, - LevelDBHome: t.TempDir(), - ListenAddress: "localhost:8883", + AutoIndex: false, + BlockCacheSize: "10mb", + BlockheaderCacheSize: "1mb", + BlockSanity: false, + LevelDBHome: t.TempDir(), + ListenAddress: "localhost:8883", // LogLevel: "tbcd=TRACE:tbc=TRACE:level=DEBUG", MaxCachedTxs: 1000, // XXX Network: networkLocalnet,