From 4043b674eeda047da08eae4668388e0ef6dff25a Mon Sep 17 00:00:00 2001 From: Marco Peereboom Date: Tue, 7 Jan 2025 18:13:56 +0000 Subject: [PATCH 01/18] back this up --- cmd/tbcd/tbcd.go | 12 ++++----- database/tbcd/level/headercache.go | 24 +++++++++++++++--- database/tbcd/level/level.go | 40 +++++++++++++++++++++--------- service/tbc/tbc.go | 29 ++++++++++++++-------- service/tbc/tbc_test.go | 2 +- service/tbc/tbcfork_test.go | 34 ++++++++++++------------- 6 files changed, 91 insertions(+), 50 deletions(-) diff --git a/cmd/tbcd/tbcd.go b/cmd/tbcd/tbcd.go index b519c233b..126b957c6 100644 --- a/cmd/tbcd/tbcd.go +++ b/cmd/tbcd/tbcd.go @@ -1,4 +1,4 @@ -// Copyright (c) 2024 Hemi Labs, Inc. +// Copyright (c) 2024-2025 Hemi Labs, Inc. // Use of this source code is governed by the MIT License, // which can be found in the LICENSE file. @@ -25,7 +25,7 @@ const ( defaultLogLevel = daemonName + "=INFO;tbc=INFO;level=INFO" defaultNetwork = "testnet3" // XXX make this mainnet defaultHome = "~/." + daemonName - bhsDefault = int(1e6) // enough for mainnet + bhsDefaultSize = "128mb" // enough for mainnet ) var ( @@ -52,10 +52,10 @@ var ( Help: "number of cached blocks", Print: config.PrintAll, }, - "TBC_BLOCKHEADER_CACHE": config.Config{ - Value: &cfg.BlockheaderCache, - DefaultValue: bhsDefault, - Help: "number of cached blockheaders", + "TBC_BLOCKHEADER_CACHE_SIZE": config.Config{ + Value: &cfg.BlockheaderCacheSize, + DefaultValue: bhsDefaultSize, + Help: "size of blockheader cache", Print: config.PrintAll, }, "TBC_BLOCK_SANITY": config.Config{ diff --git a/database/tbcd/level/headercache.go b/database/tbcd/level/headercache.go index bd0a16761..b2de81907 100644 --- a/database/tbcd/level/headercache.go +++ b/database/tbcd/level/headercache.go @@ -1,10 +1,11 @@ -// Copyright (c) 2024 Hemi Labs, Inc. +// Copyright (c) 2024-2025 Hemi Labs, Inc. // Use of this source code is governed by the MIT License, // which can be found in the LICENSE file. package level import ( + "fmt" "sync" "github.com/btcsuite/btcd/chaincfg/chainhash" @@ -12,12 +13,14 @@ import ( "github.com/hemilabs/heminetwork/database/tbcd" ) +const blockHeaderSize = 8 + 32 + 80 + 8 + type lowIQMap struct { mtx sync.RWMutex count int - m map[chainhash.Hash]*tbcd.BlockHeader + m map[chainhash.Hash]*tbcd.BlockHeader // 32+8+80+len([]Word ~ 8) } func (l *lowIQMap) Put(v *tbcd.BlockHeader) { @@ -47,9 +50,24 @@ func (l *lowIQMap) Get(k *chainhash.Hash) (*tbcd.BlockHeader, bool) { return bh, ok } -func lowIQMapNew(count int) *lowIQMap { +func lowIQMapNewCount(count int) (*lowIQMap, error) { + if count <= 0 { + return nil, fmt.Errorf("invalid size %v", count) + } return &lowIQMap{ count: count, m: make(map[chainhash.Hash]*tbcd.BlockHeader, count), + }, nil +} + +// lowIQMapNewSize does a bit of math to estimate the number of cache items. +// Since it is an estimate it will overflow if Difficulty becomes bigger than +// 64 bits. This is not an issue since 100MB caches all of mainnet in Jan 2025 +// (~819200 items). +func lowIQMapNewSize(size int) (*lowIQMap, error) { + if size <= 0 { + return nil, fmt.Errorf("invalid size %v", size) } + // approximate number of headers + return lowIQMapNewCount(blockHeaderSize / size) } diff --git a/database/tbcd/level/level.go b/database/tbcd/level/level.go index a0c388a9e..fb6547489 100644 --- a/database/tbcd/level/level.go +++ b/database/tbcd/level/level.go @@ -1,4 +1,4 @@ -// Copyright (c) 2024 Hemi Labs, Inc. +// Copyright (c) 2024-2025 Hemi Labs, Inc. // Use of this source code is governed by the MIT License, // which can be found in the LICENSE file. @@ -10,6 +10,7 @@ import ( "encoding/binary" "errors" "fmt" + "math" "math/big" "github.com/btcsuite/btcd/blockchain" @@ -17,6 +18,7 @@ import ( "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" + "github.com/dustin/go-humanize" lru "github.com/hashicorp/golang-lru/v2" "github.com/juju/loggo" "github.com/syndtr/goleveldb/leveldb" @@ -108,16 +110,26 @@ func headerHash(header []byte) *chainhash.Hash { } type Config struct { - Home string // home directory - BlockCache int // number of blocks to cache - BlockheaderCache int // number of blocks headers to cache + Home string // home directory + BlockCache int // number of blocks to cache + BlockheaderCacheSize string // size of block header cache + blockheaderCacheSize int // paser size of block header cache, } func NewConfig(home string) *Config { + sizeS := "128mb" // Cache all blockheaders on mainnet + size, err := humanize.ParseBytes(sizeS) + if err != nil { + panic(err) + } + if size > math.MaxInt64 { + panic("invalid size") + } return &Config{ - Home: home, // require user to set home. - BlockCache: 250, // max 4GB on mainnet - BlockheaderCache: 1e6, // Cache all blockheaders on mainnet + Home: home, // require user to set home. + BlockCache: 250, + BlockheaderCacheSize: sizeS, + blockheaderCacheSize: int(size), } } @@ -146,10 +158,14 @@ func New(ctx context.Context, cfg *Config) (*ldb, error) { } else { log.Infof("block cache: DISABLED") } - if cfg.BlockheaderCache > 0 { - l.headerCache = lowIQMapNew(cfg.BlockheaderCache) + if cfg.blockheaderCacheSize > 0 { + l.headerCache, err = lowIQMapNewSize(cfg.blockheaderCacheSize) + if err != nil { + return nil, fmt.Errorf("couldn't setup block header cache: %w", err) + } - log.Infof("blockheader cache: %v", cfg.BlockheaderCache) + log.Infof("blockheader cache: %v", + humanize.Bytes(uint64(cfg.blockheaderCacheSize))) } else { log.Infof("blockheader cache: DISABLED") } @@ -316,7 +332,7 @@ func (l *ldb) BlockHeaderByHash(ctx context.Context, hash *chainhash.Hash) (*tbc log.Tracef("BlockHeaderByHash") defer log.Tracef("BlockHeaderByHash exit") - if l.cfg.BlockheaderCache > 0 { + if l.cfg.blockheaderCacheSize > 0 { // Try cache first if b, ok := l.headerCache.Get(hash); ok { return b, nil @@ -338,7 +354,7 @@ func (l *ldb) BlockHeaderByHash(ctx context.Context, hash *chainhash.Hash) (*tbc bh := decodeBlockHeader(ebh) // Insert into cache, roughly 150 byte cost. - if l.cfg.BlockheaderCache > 0 { + if l.cfg.blockheaderCacheSize > 0 { l.headerCache.Put(bh) } diff --git a/service/tbc/tbc.go b/service/tbc/tbc.go index a2723ed15..37f9017b5 100644 --- a/service/tbc/tbc.go +++ b/service/tbc/tbc.go @@ -82,7 +82,7 @@ func init() { type Config struct { AutoIndex bool BlockCache int - BlockheaderCache int + BlockheaderCacheSize string BlockSanity bool LevelDBHome string ListenAddress string @@ -107,15 +107,15 @@ type Config struct { func NewDefaultConfig() *Config { return &Config{ - ListenAddress: tbcapi.DefaultListen, - BlockCache: 250, - BlockheaderCache: 1e6, - LogLevel: logLevel, - MaxCachedTxs: defaultMaxCachedTxs, - MempoolEnabled: false, // XXX default to false until it is fixed - PeersWanted: defaultPeersWanted, - PrometheusNamespace: appName, - ExternalHeaderMode: false, // Default anyway, but for readability + ListenAddress: tbcapi.DefaultListen, + BlockCache: 250, + BlockheaderCacheSize: "128mb", + LogLevel: logLevel, + MaxCachedTxs: defaultMaxCachedTxs, + MempoolEnabled: false, // XXX default to false until it is fixed + PeersWanted: defaultPeersWanted, + PrometheusNamespace: appName, + ExternalHeaderMode: false, // Default anyway, but for readability } } @@ -174,6 +174,13 @@ func NewServer(cfg *Config) (*Server, error) { if cfg == nil { cfg = NewDefaultConfig() } + if cfg.BlockheaderCacheSize != "" { + var err error + cfg.blockheaderCacheSize, err = humanize.ParseBytes(cfg.BlockheaderCacheSize) + if err != nil { + return nil, fmt.Errorf("invalid blockheader cache size: %w", err) + } + } if cfg.MempoolEnabled { log.Infof("mempool forced disabled") @@ -2143,7 +2150,7 @@ func (s *Server) DBOpen(ctx context.Context) error { var err error cfg := level.NewConfig(filepath.Join(s.cfg.LevelDBHome, s.cfg.Network)) cfg.BlockCache = s.cfg.BlockCache - cfg.BlockheaderCache = s.cfg.BlockheaderCache + cfg.BlockheaderCacheSize = s.cfg.BlockheaderCacheSize s.db, err = level.New(ctx, cfg) if err != nil { return fmt.Errorf("open level database: %w", err) diff --git a/service/tbc/tbc_test.go b/service/tbc/tbc_test.go index e9b173a95..5b525a039 100644 --- a/service/tbc/tbc_test.go +++ b/service/tbc/tbc_test.go @@ -1054,7 +1054,7 @@ func createTbcServerExternalHeaderMode(ctx context.Context, t *testing.T) *Serve cfg.ExternalHeaderMode = true cfg.Network = networkLocalnet cfg.BlockCache = 0 - cfg.BlockheaderCache = 0 + cfg.BlockheaderCacheSize = "1mb" cfg.MempoolEnabled = false tbcServer, err := NewServer(cfg) diff --git a/service/tbc/tbcfork_test.go b/service/tbc/tbcfork_test.go index 51d2d69ed..f38d9a39e 100644 --- a/service/tbc/tbcfork_test.go +++ b/service/tbc/tbcfork_test.go @@ -893,11 +893,11 @@ func TestFork(t *testing.T) { // Connect tbc service cfg := &Config{ - AutoIndex: false, - BlockCache: 1000, - BlockheaderCache: 1000, - BlockSanity: false, - LevelDBHome: t.TempDir(), + AutoIndex: false, + BlockCache: 1000, + BlockheaderCacheSize: "1mb", + BlockSanity: false, + LevelDBHome: t.TempDir(), // LogLevel: "tbcd=TRACE:tbc=TRACE:level=DEBUG", MaxCachedTxs: 1000, // XXX Network: networkLocalnet, @@ -1130,12 +1130,12 @@ func TestIndexNoFork(t *testing.T) { // Connect tbc service cfg := &Config{ - AutoIndex: false, - BlockCache: 1000, - BlockheaderCache: 1000, - BlockSanity: false, - LevelDBHome: t.TempDir(), - ListenAddress: "localhost:8882", + AutoIndex: false, + BlockCache: 1000, + BlockheaderCacheSize: "1mb", + BlockSanity: false, + LevelDBHome: t.TempDir(), + ListenAddress: "localhost:8882", // LogLevel: "tbcd=TRACE:tbc=TRACE:level=DEBUG", MaxCachedTxs: 1000, // XXX Network: networkLocalnet, @@ -1301,12 +1301,12 @@ func TestIndexFork(t *testing.T) { // Connect tbc service cfg := &Config{ - AutoIndex: false, - BlockCache: 1000, - BlockheaderCache: 1000, - BlockSanity: false, - LevelDBHome: t.TempDir(), - ListenAddress: "localhost:8883", + AutoIndex: false, + BlockCache: 1000, + BlockheaderCacheSize: "1mb", + BlockSanity: false, + LevelDBHome: t.TempDir(), + ListenAddress: "localhost:8883", // LogLevel: "tbcd=TRACE:tbc=TRACE:level=DEBUG", MaxCachedTxs: 1000, // XXX Network: networkLocalnet, From ca32e2476ad3a5da0d036ab15746d682a2bdc49e Mon Sep 17 00:00:00 2001 From: Marco Peereboom Date: Wed, 8 Jan 2025 10:48:13 +0000 Subject: [PATCH 02/18] Backup cache purge --- database/tbcd/level/headercache.go | 11 +++++++++-- database/tbcd/level/level.go | 6 +++++- service/tbc/tbc.go | 7 ------- service/tbc/tbc_test.go | 2 +- 4 files changed, 15 insertions(+), 11 deletions(-) diff --git a/database/tbcd/level/headercache.go b/database/tbcd/level/headercache.go index b2de81907..35e384107 100644 --- a/database/tbcd/level/headercache.go +++ b/database/tbcd/level/headercache.go @@ -50,9 +50,16 @@ func (l *lowIQMap) Get(k *chainhash.Hash) (*tbcd.BlockHeader, bool) { return bh, ok } +func (l *lowIQMap) Purge(k *chainhash.Hash) { + l.mtx.RLock() + defer l.mtx.RUnlock() + + delete(l.m, *k) +} + func lowIQMapNewCount(count int) (*lowIQMap, error) { if count <= 0 { - return nil, fmt.Errorf("invalid size %v", count) + return nil, fmt.Errorf("invalid count %v", count) } return &lowIQMap{ count: count, @@ -69,5 +76,5 @@ func lowIQMapNewSize(size int) (*lowIQMap, error) { return nil, fmt.Errorf("invalid size %v", size) } // approximate number of headers - return lowIQMapNewCount(blockHeaderSize / size) + return lowIQMapNewCount(size / blockHeaderSize) } diff --git a/database/tbcd/level/level.go b/database/tbcd/level/level.go index fb6547489..f84f9326b 100644 --- a/database/tbcd/level/level.go +++ b/database/tbcd/level/level.go @@ -161,9 +161,9 @@ func New(ctx context.Context, cfg *Config) (*ldb, error) { if cfg.blockheaderCacheSize > 0 { l.headerCache, err = lowIQMapNewSize(cfg.blockheaderCacheSize) if err != nil { + panic(spew.Sdump(cfg)) return nil, fmt.Errorf("couldn't setup block header cache: %w", err) } - log.Infof("blockheader cache: %v", humanize.Bytes(uint64(cfg.blockheaderCacheSize))) } else { @@ -845,6 +845,10 @@ func (l *ldb) BlockHeadersRemove(ctx context.Context, bhs *wire.MsgHeaders, tipA bhash := headersParsed[i].BlockHash() fh := fullHeadersFromDb[i] bhsBatch.Delete(bhash[:]) + // Make batch + if l.cfg.blockheaderCacheSize > 0 { + l.headerCache.Purge(&bhash) + } // Delete height mapping for header i hhKey := heightHashToKey(fh.Height, bhash[:]) diff --git a/service/tbc/tbc.go b/service/tbc/tbc.go index 37f9017b5..ad5484fe3 100644 --- a/service/tbc/tbc.go +++ b/service/tbc/tbc.go @@ -174,13 +174,6 @@ func NewServer(cfg *Config) (*Server, error) { if cfg == nil { cfg = NewDefaultConfig() } - if cfg.BlockheaderCacheSize != "" { - var err error - cfg.blockheaderCacheSize, err = humanize.ParseBytes(cfg.BlockheaderCacheSize) - if err != nil { - return nil, fmt.Errorf("invalid blockheader cache size: %w", err) - } - } if cfg.MempoolEnabled { log.Infof("mempool forced disabled") diff --git a/service/tbc/tbc_test.go b/service/tbc/tbc_test.go index 5b525a039..db3c6e325 100644 --- a/service/tbc/tbc_test.go +++ b/service/tbc/tbc_test.go @@ -1054,7 +1054,7 @@ func createTbcServerExternalHeaderMode(ctx context.Context, t *testing.T) *Serve cfg.ExternalHeaderMode = true cfg.Network = networkLocalnet cfg.BlockCache = 0 - cfg.BlockheaderCacheSize = "1mb" + cfg.BlockheaderCacheSize = "" cfg.MempoolEnabled = false tbcServer, err := NewServer(cfg) From 9f05309d2bab1970098eda9996c4f4a97938b12d Mon Sep 17 00:00:00 2001 From: Marco Peereboom Date: Wed, 8 Jan 2025 12:48:47 +0000 Subject: [PATCH 03/18] Joshua comments --- database/tbcd/level/headercache.go | 17 +++++++++++++---- database/tbcd/level/level.go | 1 - 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/database/tbcd/level/headercache.go b/database/tbcd/level/headercache.go index 35e384107..5edaf2afc 100644 --- a/database/tbcd/level/headercache.go +++ b/database/tbcd/level/headercache.go @@ -51,15 +51,24 @@ func (l *lowIQMap) Get(k *chainhash.Hash) (*tbcd.BlockHeader, bool) { } func (l *lowIQMap) Purge(k *chainhash.Hash) { - l.mtx.RLock() - defer l.mtx.RUnlock() + l.mtx.Lock() + defer l.mtx.Unlock() delete(l.m, *k) } +func (l *lowIQMap) PurgeBatch(ks []*chainhash.Hash) { + l.mtx.Lock() + defer l.mtx.Unlock() + + for v := range ks { + delete(l.m, *ks[v]) + } +} + func lowIQMapNewCount(count int) (*lowIQMap, error) { if count <= 0 { - return nil, fmt.Errorf("invalid count %v", count) + return nil, fmt.Errorf("invalid count: %v", count) } return &lowIQMap{ count: count, @@ -73,7 +82,7 @@ func lowIQMapNewCount(count int) (*lowIQMap, error) { // (~819200 items). func lowIQMapNewSize(size int) (*lowIQMap, error) { if size <= 0 { - return nil, fmt.Errorf("invalid size %v", size) + return nil, fmt.Errorf("invalid size: %v", size) } // approximate number of headers return lowIQMapNewCount(size / blockHeaderSize) diff --git a/database/tbcd/level/level.go b/database/tbcd/level/level.go index f84f9326b..795126a1a 100644 --- a/database/tbcd/level/level.go +++ b/database/tbcd/level/level.go @@ -161,7 +161,6 @@ func New(ctx context.Context, cfg *Config) (*ldb, error) { if cfg.blockheaderCacheSize > 0 { l.headerCache, err = lowIQMapNewSize(cfg.blockheaderCacheSize) if err != nil { - panic(spew.Sdump(cfg)) return nil, fmt.Errorf("couldn't setup block header cache: %w", err) } log.Infof("blockheader cache: %v", From e1350a25ac76f0fd43228b1da1faf306a4db164a Mon Sep 17 00:00:00 2001 From: Marco Peereboom Date: Wed, 8 Jan 2025 13:02:28 +0000 Subject: [PATCH 04/18] Purge cache in batches instead of taking the mutex all the time --- database/tbcd/level/level.go | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/database/tbcd/level/level.go b/database/tbcd/level/level.go index 795126a1a..5e4397404 100644 --- a/database/tbcd/level/level.go +++ b/database/tbcd/level/level.go @@ -837,22 +837,34 @@ func (l *ldb) BlockHeadersRemove(ctx context.Context, bhs *wire.MsgHeaders, tipA bhsBatch := new(leveldb.Batch) hhBatch := new(leveldb.Batch) + var bhCacheBatch []*chainhash.Hash + if l.cfg.blockheaderCacheSize > 0 { + // cache batch to delete blockheaders + bhCacheBatch = make([]*chainhash.Hash, 0, len(headersParsed)) + } + // Insert each block header deletion into the batch (for header itself and // height-header association) for i := 0; i < len(headersParsed); i++ { // Delete header i bhash := headersParsed[i].BlockHash() fh := fullHeadersFromDb[i] + // Make db delete batch bhsBatch.Delete(bhash[:]) - // Make batch + + // Remove from header cache as well in a batch if l.cfg.blockheaderCacheSize > 0 { - l.headerCache.Purge(&bhash) + bhCacheBatch = append(bhCacheBatch, &bhash) } // Delete height mapping for header i hhKey := heightHashToKey(fh.Height, bhash[:]) hhBatch.Delete(hhKey) } + if l.cfg.blockheaderCacheSize > 0 { + // Delete right away. Cache can always be rehydrated. + l.headerCache.PurgeBatch(bhCacheBatch) + } // Insert updated canonical tip after removal of the provided block headers tipAfterRemovalEncoded := h2b(tipAfterRemoval) From d30087877625ae9acd9c5771fdb1f5a055d7bb5c Mon Sep 17 00:00:00 2001 From: Marco Peereboom Date: Wed, 8 Jan 2025 13:15:37 +0000 Subject: [PATCH 05/18] One more Joshua comment --- database/tbcd/level/headercache.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/tbcd/level/headercache.go b/database/tbcd/level/headercache.go index 5edaf2afc..b9fb6a007 100644 --- a/database/tbcd/level/headercache.go +++ b/database/tbcd/level/headercache.go @@ -13,7 +13,7 @@ import ( "github.com/hemilabs/heminetwork/database/tbcd" ) -const blockHeaderSize = 8 + 32 + 80 + 8 +const blockHeaderSize = 8 + 32 + 80 + 8 // rough size of tbcd.BlockHeader type lowIQMap struct { mtx sync.RWMutex From e5e75fe7e9e06d58126a6a570191802088681e63 Mon Sep 17 00:00:00 2001 From: Marco Peereboom Date: Wed, 8 Jan 2025 15:51:54 +0000 Subject: [PATCH 06/18] nominally working of block cache minus eviction --- cmd/tbcd/tbcd.go | 9 ++-- database/tbcd/level/blockcache.go | 78 +++++++++++++++++++++++++++++++ database/tbcd/level/level.go | 51 ++++++++++++-------- go.mod | 1 - go.sum | 2 - service/tbc/tbc.go | 6 +-- service/tbc/tbc_test.go | 2 +- service/tbc/tbcfork_test.go | 6 +-- 8 files changed, 121 insertions(+), 34 deletions(-) create mode 100644 database/tbcd/level/blockcache.go diff --git a/cmd/tbcd/tbcd.go b/cmd/tbcd/tbcd.go index 126b957c6..6d8234d67 100644 --- a/cmd/tbcd/tbcd.go +++ b/cmd/tbcd/tbcd.go @@ -25,6 +25,7 @@ const ( defaultLogLevel = daemonName + "=INFO;tbc=INFO;level=INFO" defaultNetwork = "testnet3" // XXX make this mainnet defaultHome = "~/." + daemonName + bDefaultSize = "2gb" // ~1280 blocks on mainnet bhsDefaultSize = "128mb" // enough for mainnet ) @@ -46,10 +47,10 @@ var ( Help: "enable auto utxo and tx indexes", Print: config.PrintAll, }, - "TBC_BLOCK_CACHE": config.Config{ - Value: &cfg.BlockCache, - DefaultValue: 250, - Help: "number of cached blocks", + "TBC_BLOCK_CACHE_SIZE": config.Config{ + Value: &cfg.BlockCacheSize, + DefaultValue: bDefaultSize, + Help: "size of block cache", Print: config.PrintAll, }, "TBC_BLOCKHEADER_CACHE_SIZE": config.Config{ diff --git a/database/tbcd/level/blockcache.go b/database/tbcd/level/blockcache.go new file mode 100644 index 000000000..084a7a507 --- /dev/null +++ b/database/tbcd/level/blockcache.go @@ -0,0 +1,78 @@ +// Copyright (c) 2025 Hemi Labs, Inc. +// Use of this source code is governed by the MIT License, +// which can be found in the LICENSE file. + +package level + +import ( + "fmt" + "sync" + + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg/chainhash" +) + +const blockSize = 1677721 // ~1.6MB rough size of a mainnet block as of Jan 2025 + +type lowIQLRU struct { + mtx sync.RWMutex + + size int // this is the approximate max size + + m map[chainhash.Hash][]byte + totalSize int +} + +func (l *lowIQLRU) Put(v *btcutil.Block) { + l.mtx.Lock() + defer l.mtx.Unlock() + + hash := v.Hash() + if _, ok := l.m[*hash]; ok { + return + } + + block, err := v.Bytes() + if err != nil { + panic(err) + // XXX don't cache but panic for now for diagnostic + } + + // XXX add eviction here + if l.totalSize+len(block) >= l.size { + panic("evict") + } + + l.m[*hash] = block + l.totalSize += len(block) +} + +func (l *lowIQLRU) Get(k *chainhash.Hash) (*btcutil.Block, bool) { + l.mtx.RLock() + defer l.mtx.RUnlock() + + be, ok := l.m[*k] + if !ok { + return nil, false + } + b, err := btcutil.NewBlockFromBytes(be) + if err != nil { + panic(err) // XXX delete from cache and return nil, false but panic for diagnostics at this time + } + return b, true +} + +func lowIQLRUNewSize(size int) (*lowIQLRU, error) { + if size <= 0 { + return nil, fmt.Errorf("invalid size: %v", size) + } + // approximate number of blocks + count := size / blockSize + if count <= 0 { + return nil, fmt.Errorf("invalid count: %v", count) + } + return &lowIQLRU{ + size: size, + m: make(map[chainhash.Hash][]byte, count), + }, nil +} diff --git a/database/tbcd/level/level.go b/database/tbcd/level/level.go index 5e4397404..4eeb6d061 100644 --- a/database/tbcd/level/level.go +++ b/database/tbcd/level/level.go @@ -19,7 +19,6 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" "github.com/dustin/go-humanize" - lru "github.com/hashicorp/golang-lru/v2" "github.com/juju/loggo" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/util" @@ -69,7 +68,7 @@ type ldb struct { pool level.Pool rawPool level.RawPool - blockCache *lru.Cache[chainhash.Hash, *btcutil.Block] // block cache + blockCache *lowIQLRU // Block Header cache. Note that it is only primed during reads. Doing // this during writes would be relatively expensive at nearly no gain. @@ -111,25 +110,37 @@ func headerHash(header []byte) *chainhash.Hash { type Config struct { Home string // home directory - BlockCache int // number of blocks to cache + BlockCacheSize string // size of block cache BlockheaderCacheSize string // size of block header cache - blockheaderCacheSize int // paser size of block header cache, + blockCacheSize int // parsed size of block cache + blockheaderCacheSize int // parsed size of block header cache } func NewConfig(home string) *Config { - sizeS := "128mb" // Cache all blockheaders on mainnet - size, err := humanize.ParseBytes(sizeS) + blockheaderCacheSizeS := "128mb" // Cache all blockheaders on mainnet + blockheaderCacheSize, err := humanize.ParseBytes(blockheaderCacheSizeS) if err != nil { panic(err) } - if size > math.MaxInt64 { - panic("invalid size") + if blockheaderCacheSize > math.MaxInt64 { + panic("invalid blockheaderCacheSize") } + + blockCacheSizeS := "2gb" // ~512 blocks on mainnet + blockCacheSize, err := humanize.ParseBytes(blockCacheSizeS) + if err != nil { + panic(err) + } + if blockCacheSize > math.MaxInt64 { + panic("invalid blockCacheSize") + } + return &Config{ Home: home, // require user to set home. - BlockCache: 250, - BlockheaderCacheSize: sizeS, - blockheaderCacheSize: int(size), + BlockCacheSize: blockCacheSizeS, + blockCacheSize: int(blockCacheSize), + BlockheaderCacheSize: blockheaderCacheSizeS, + blockheaderCacheSize: int(blockheaderCacheSize), } } @@ -149,12 +160,12 @@ func New(ctx context.Context, cfg *Config) (*ldb, error) { cfg: cfg, } - if cfg.BlockCache > 0 { - l.blockCache, err = lru.New[chainhash.Hash, *btcutil.Block](cfg.BlockCache) + if cfg.blockCacheSize > 0 { + l.blockCache, err = lowIQLRUNewSize(cfg.blockCacheSize) if err != nil { return nil, fmt.Errorf("couldn't setup block cache: %w", err) } - log.Infof("block cache: %v", cfg.BlockCache) + log.Infof("block cache: %v", humanize.Bytes(uint64(cfg.blockCacheSize))) } else { log.Infof("block cache: DISABLED") } @@ -1273,8 +1284,8 @@ func (l *ldb) BlockInsert(ctx context.Context, b *btcutil.Block) (int64, error) if err = bDB.Insert(b.Hash()[:], raw); err != nil { return -1, fmt.Errorf("blocks insert put: %w", err) } - if l.cfg.BlockCache > 0 { - l.blockCache.Add(*b.Hash(), b) + if l.cfg.blockCacheSize > 0 { + l.blockCache.Put(b) } } @@ -1310,9 +1321,9 @@ func (l *ldb) BlockByHash(ctx context.Context, hash *chainhash.Hash) (*btcutil.B log.Tracef("BlockByHash") defer log.Tracef("BlockByHash exit") - if l.cfg.BlockCache > 0 { + if l.cfg.blockCacheSize > 0 { // Try cache first - if cb, ok := l.blockCache.Get(*hash); ok { + if cb, ok := l.blockCache.Get(hash); ok { return cb, nil } } @@ -1329,8 +1340,8 @@ func (l *ldb) BlockByHash(ctx context.Context, hash *chainhash.Hash) (*btcutil.B if err != nil { panic(fmt.Errorf("block decode data corruption: %v %w", hash, err)) } - if l.cfg.BlockCache > 0 { - l.blockCache.Add(*hash, b) + if l.cfg.blockCacheSize > 0 { + l.blockCache.Put(b) } return b, nil } diff --git a/go.mod b/go.mod index 944445c6c..8dde509b6 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,6 @@ require ( github.com/dustin/go-humanize v1.0.1 github.com/ethereum/go-ethereum v1.14.8 github.com/go-test/deep v1.1.1 - github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/juju/loggo v1.0.0 github.com/lib/pq v1.10.9 github.com/mitchellh/go-homedir v1.1.0 diff --git a/go.sum b/go.sum index ad8051a25..5fe5ab3e1 100644 --- a/go.sum +++ b/go.sum @@ -118,8 +118,6 @@ github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= -github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= -github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hemilabs/websocket v0.0.0-20240813101919-bf33653e9aa5 h1:rNJaauce7ZAC5tv7NaiwDtF6afSVeTvWejhFuK5gY6I= github.com/hemilabs/websocket v0.0.0-20240813101919-bf33653e9aa5/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= github.com/holiman/uint256 v1.3.1 h1:JfTzmih28bittyHM8z360dCjIA9dbPIBlcTI6lmctQs= diff --git a/service/tbc/tbc.go b/service/tbc/tbc.go index ad5484fe3..a1f8e9a06 100644 --- a/service/tbc/tbc.go +++ b/service/tbc/tbc.go @@ -81,7 +81,7 @@ func init() { type Config struct { AutoIndex bool - BlockCache int + BlockCacheSize string BlockheaderCacheSize string BlockSanity bool LevelDBHome string @@ -108,7 +108,7 @@ type Config struct { func NewDefaultConfig() *Config { return &Config{ ListenAddress: tbcapi.DefaultListen, - BlockCache: 250, + BlockCacheSize: "2gb", BlockheaderCacheSize: "128mb", LogLevel: logLevel, MaxCachedTxs: defaultMaxCachedTxs, @@ -2142,7 +2142,7 @@ func (s *Server) DBOpen(ctx context.Context) error { // Open db. var err error cfg := level.NewConfig(filepath.Join(s.cfg.LevelDBHome, s.cfg.Network)) - cfg.BlockCache = s.cfg.BlockCache + cfg.BlockCacheSize = s.cfg.BlockCacheSize cfg.BlockheaderCacheSize = s.cfg.BlockheaderCacheSize s.db, err = level.New(ctx, cfg) if err != nil { diff --git a/service/tbc/tbc_test.go b/service/tbc/tbc_test.go index db3c6e325..c3a84a4f6 100644 --- a/service/tbc/tbc_test.go +++ b/service/tbc/tbc_test.go @@ -1053,7 +1053,7 @@ func createTbcServerExternalHeaderMode(ctx context.Context, t *testing.T) *Serve cfg.LevelDBHome = home cfg.ExternalHeaderMode = true cfg.Network = networkLocalnet - cfg.BlockCache = 0 + cfg.BlockCacheSize = "" cfg.BlockheaderCacheSize = "" cfg.MempoolEnabled = false diff --git a/service/tbc/tbcfork_test.go b/service/tbc/tbcfork_test.go index f38d9a39e..246370420 100644 --- a/service/tbc/tbcfork_test.go +++ b/service/tbc/tbcfork_test.go @@ -894,7 +894,7 @@ func TestFork(t *testing.T) { // Connect tbc service cfg := &Config{ AutoIndex: false, - BlockCache: 1000, + BlockCacheSize: "10mb", BlockheaderCacheSize: "1mb", BlockSanity: false, LevelDBHome: t.TempDir(), @@ -1131,7 +1131,7 @@ func TestIndexNoFork(t *testing.T) { // Connect tbc service cfg := &Config{ AutoIndex: false, - BlockCache: 1000, + BlockCacheSize: "10mb", BlockheaderCacheSize: "1mb", BlockSanity: false, LevelDBHome: t.TempDir(), @@ -1302,7 +1302,7 @@ func TestIndexFork(t *testing.T) { // Connect tbc service cfg := &Config{ AutoIndex: false, - BlockCache: 1000, + BlockCacheSize: "10mb", BlockheaderCacheSize: "1mb", BlockSanity: false, LevelDBHome: t.TempDir(), From 2d2ce019cee36d35a8790ea74e614428dbc5f287 Mon Sep 17 00:00:00 2001 From: Marco Peereboom Date: Wed, 8 Jan 2025 16:15:52 +0000 Subject: [PATCH 07/18] Remove unused Purge --- database/tbcd/level/headercache.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/database/tbcd/level/headercache.go b/database/tbcd/level/headercache.go index b9fb6a007..803a69985 100644 --- a/database/tbcd/level/headercache.go +++ b/database/tbcd/level/headercache.go @@ -50,13 +50,6 @@ func (l *lowIQMap) Get(k *chainhash.Hash) (*tbcd.BlockHeader, bool) { return bh, ok } -func (l *lowIQMap) Purge(k *chainhash.Hash) { - l.mtx.Lock() - defer l.mtx.Unlock() - - delete(l.m, *k) -} - func (l *lowIQMap) PurgeBatch(ks []*chainhash.Hash) { l.mtx.Lock() defer l.mtx.Unlock() From ee0ffd29cdc0344e72c5d0f85c4840a477aafb71 Mon Sep 17 00:00:00 2001 From: Marco Peereboom Date: Thu, 9 Jan 2025 09:19:58 +0000 Subject: [PATCH 08/18] Add LRU mechanics --- database/tbcd/level/blockcache.go | 36 +++++++++++++++++++++++++------ 1 file changed, 30 insertions(+), 6 deletions(-) diff --git a/database/tbcd/level/blockcache.go b/database/tbcd/level/blockcache.go index 084a7a507..b59dabab3 100644 --- a/database/tbcd/level/blockcache.go +++ b/database/tbcd/level/blockcache.go @@ -5,6 +5,7 @@ package level import ( + "container/list" "fmt" "sync" @@ -14,13 +15,21 @@ import ( const blockSize = 1677721 // ~1.6MB rough size of a mainnet block as of Jan 2025 +type timeBlock struct { + element *list.Element + block []byte +} + type lowIQLRU struct { mtx sync.RWMutex size int // this is the approximate max size - m map[chainhash.Hash][]byte + m map[chainhash.Hash]timeBlock totalSize int + + // lru list + l *list.List } func (l *lowIQLRU) Put(v *btcutil.Block) { @@ -38,12 +47,22 @@ func (l *lowIQLRU) Put(v *btcutil.Block) { // XXX don't cache but panic for now for diagnostic } - // XXX add eviction here + // evict first element in list if l.totalSize+len(block) >= l.size { - panic("evict") + // LET THEM EAT PANIC + re := l.l.Front() + rha := l.l.Remove(re) + rh := rha.(chainhash.Hash) + l.totalSize -= len(l.m[rh].block) + delete(l.m, rh) } - l.m[*hash] = block + // lru list + element := &list.Element{Value: hash} + l.l.PushBack(element) + + // block lookup + l.m[*hash] = timeBlock{element: element, block: block} l.totalSize += len(block) } @@ -55,10 +74,14 @@ func (l *lowIQLRU) Get(k *chainhash.Hash) (*btcutil.Block, bool) { if !ok { return nil, false } - b, err := btcutil.NewBlockFromBytes(be) + b, err := btcutil.NewBlockFromBytes(be.block) if err != nil { panic(err) // XXX delete from cache and return nil, false but panic for diagnostics at this time } + + // update access + l.l.MoveToBack(be.element) + return b, true } @@ -73,6 +96,7 @@ func lowIQLRUNewSize(size int) (*lowIQLRU, error) { } return &lowIQLRU{ size: size, - m: make(map[chainhash.Hash][]byte, count), + m: make(map[chainhash.Hash]timeBlock, count), + l: list.New(), }, nil } From 5fac4b02e3160469632f9def2d3ac249a39df1cc Mon Sep 17 00:00:00 2001 From: Marco Peereboom Date: Thu, 9 Jan 2025 12:38:58 +0000 Subject: [PATCH 09/18] Add tests --- database/tbcd/level/blockcache.go | 46 ++++++++++++++++------ database/tbcd/level/cache_test.go | 65 +++++++++++++++++++++++++++++++ 2 files changed, 99 insertions(+), 12 deletions(-) create mode 100644 database/tbcd/level/cache_test.go diff --git a/database/tbcd/level/blockcache.go b/database/tbcd/level/blockcache.go index b59dabab3..9b858c97c 100644 --- a/database/tbcd/level/blockcache.go +++ b/database/tbcd/level/blockcache.go @@ -13,23 +13,32 @@ import ( "github.com/btcsuite/btcd/chaincfg/chainhash" ) -const blockSize = 1677721 // ~1.6MB rough size of a mainnet block as of Jan 2025 +var blockSize = 1677721 // ~1.6MB rough size of a mainnet block as of Jan 2025 -type timeBlock struct { +type blockElement struct { element *list.Element block []byte } +type CacheStats struct { + Hits int + Misses int + Purges int +} + type lowIQLRU struct { mtx sync.RWMutex size int // this is the approximate max size - m map[chainhash.Hash]timeBlock + m map[chainhash.Hash]blockElement totalSize int - // lru list + // lru list, when used move to back of the list l *list.List + + // stats + c CacheStats } func (l *lowIQLRU) Put(v *btcutil.Block) { @@ -43,18 +52,21 @@ func (l *lowIQLRU) Put(v *btcutil.Block) { block, err := v.Bytes() if err != nil { + // data corruption, panic panic(err) - // XXX don't cache but panic for now for diagnostic } // evict first element in list - if l.totalSize+len(block) >= l.size { + if l.totalSize+len(block) > l.size { // LET THEM EAT PANIC re := l.l.Front() rha := l.l.Remove(re) - rh := rha.(chainhash.Hash) - l.totalSize -= len(l.m[rh].block) - delete(l.m, rh) + // fmt.Printf("rha %v\n", spew.Sdump(rha)) + // fmt.Printf("==== re %T rha %T\n", re, rha) + rh := rha.(*list.Element).Value.(*chainhash.Hash) + l.totalSize -= len(l.m[*rh].block) + delete(l.m, *rh) + l.c.Purges++ } // lru list @@ -62,7 +74,7 @@ func (l *lowIQLRU) Put(v *btcutil.Block) { l.l.PushBack(element) // block lookup - l.m[*hash] = timeBlock{element: element, block: block} + l.m[*hash] = blockElement{element: element, block: block} l.totalSize += len(block) } @@ -72,19 +84,29 @@ func (l *lowIQLRU) Get(k *chainhash.Hash) (*btcutil.Block, bool) { be, ok := l.m[*k] if !ok { + l.c.Misses++ return nil, false } b, err := btcutil.NewBlockFromBytes(be.block) if err != nil { - panic(err) // XXX delete from cache and return nil, false but panic for diagnostics at this time + // panic for diagnostics at this time + panic(err) } // update access l.l.MoveToBack(be.element) + l.c.Hits++ + return b, true } +func (l *lowIQLRU) Stats() CacheStats { + l.mtx.RLock() + defer l.mtx.RUnlock() + return l.c +} + func lowIQLRUNewSize(size int) (*lowIQLRU, error) { if size <= 0 { return nil, fmt.Errorf("invalid size: %v", size) @@ -96,7 +118,7 @@ func lowIQLRUNewSize(size int) (*lowIQLRU, error) { } return &lowIQLRU{ size: size, - m: make(map[chainhash.Hash]timeBlock, count), + m: make(map[chainhash.Hash]blockElement, count), l: list.New(), }, nil } diff --git a/database/tbcd/level/cache_test.go b/database/tbcd/level/cache_test.go new file mode 100644 index 000000000..b625d015b --- /dev/null +++ b/database/tbcd/level/cache_test.go @@ -0,0 +1,65 @@ +package level + +import ( + "testing" + + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/davecgh/go-spew/spew" +) + +func newBlock(prevHash *chainhash.Hash, nonce uint32) (chainhash.Hash, *btcutil.Block) { + bh := wire.NewBlockHeader(0, prevHash, &chainhash.Hash{}, 0, uint32(nonce)) + b := wire.NewMsgBlock(bh) + return bh.BlockHash(), btcutil.NewBlock(b) +} + +func TestLRUCache(t *testing.T) { + maxCache := 10 + blockSize = 81 // we'll use empty blocks + l, err := lowIQLRUNewSize(blockSize * maxCache) + if err != nil { + t.Fatal(err) + } + + prevHash := chainhash.Hash{} // genesis + blocks := make([]chainhash.Hash, 0, maxCache*2) + for i := 0; i < maxCache; i++ { + h, b := newBlock(&prevHash, uint32(i)) + t.Logf("%v: %v", i, h) + blocks = append(blocks, h) + l.Put(b) + prevHash = h + } + + // verify stats are 0 + s := l.Stats() + if s.Hits != 0 && s.Misses != 0 && s.Purges != 0 { + t.Fatal(spew.Sdump(s)) + } + + // retrieve all blocks + for k := range blocks { + if _, ok := l.Get(&blocks[k]); !ok { + t.Fatalf("block not found: %v", blocks[k]) + } + } + + // verify hits are maxBlocks + s = l.Stats() + if s.Hits != 10 && s.Misses != 0 && s.Purges != 0 { + t.Fatal(spew.Sdump(s)) + } + + // purge oldest cache entries + for i := maxCache; i < maxCache*2; i++ { + h, b := newBlock(&prevHash, uint32(i)) + t.Logf("%v: %v", i, h) + blocks = append(blocks, h) + l.Put(b) + prevHash = h + } + + t.Logf("%v", spew.Sdump(l.Stats())) +} From 2bf9cfd42ebd2ea0859d37badc62b1b90e992567 Mon Sep 17 00:00:00 2001 From: Marco Peereboom Date: Thu, 9 Jan 2025 13:15:50 +0000 Subject: [PATCH 10/18] Fix insert of element --- database/tbcd/level/blockcache.go | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/database/tbcd/level/blockcache.go b/database/tbcd/level/blockcache.go index 9b858c97c..54a84d075 100644 --- a/database/tbcd/level/blockcache.go +++ b/database/tbcd/level/blockcache.go @@ -61,20 +61,14 @@ func (l *lowIQLRU) Put(v *btcutil.Block) { // LET THEM EAT PANIC re := l.l.Front() rha := l.l.Remove(re) - // fmt.Printf("rha %v\n", spew.Sdump(rha)) - // fmt.Printf("==== re %T rha %T\n", re, rha) - rh := rha.(*list.Element).Value.(*chainhash.Hash) + rh := rha.(*chainhash.Hash) l.totalSize -= len(l.m[*rh].block) delete(l.m, *rh) l.c.Purges++ } - // lru list - element := &list.Element{Value: hash} - l.l.PushBack(element) - - // block lookup - l.m[*hash] = blockElement{element: element, block: block} + // block lookup and lru append + l.m[*hash] = blockElement{element: l.l.PushBack(hash), block: block} l.totalSize += len(block) } From 78ea7c9c2af4654afc97aa309e88f105a6598aa8 Mon Sep 17 00:00:00 2001 From: Marco Peereboom Date: Thu, 9 Jan 2025 13:59:53 +0000 Subject: [PATCH 11/18] Test misses and purges as well --- database/tbcd/level/cache_test.go | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/database/tbcd/level/cache_test.go b/database/tbcd/level/cache_test.go index b625d015b..3479e2ff0 100644 --- a/database/tbcd/level/cache_test.go +++ b/database/tbcd/level/cache_test.go @@ -61,5 +61,25 @@ func TestLRUCache(t *testing.T) { prevHash = h } - t.Logf("%v", spew.Sdump(l.Stats())) + // verify purges are maxBlocks + s = l.Stats() + if s.Hits != 10 && s.Misses != 0 && s.Purges != 10 { + t.Fatal(spew.Sdump(s)) + } + + // retrieve purged blocks + for k := range blocks { + if k >= maxCache { + break + } + if _, ok := l.Get(&blocks[k]); ok { + t.Fatalf("block found: %v", blocks[k]) + } + } + + // verify misses are maxBlocks + s = l.Stats() + if s.Hits != 10 && s.Misses != 10 && s.Purges != 10 { + t.Fatal(spew.Sdump(s)) + } } From e0a9bf8a326db9ad01c892030b07409ca8a90dd6 Mon Sep 17 00:00:00 2001 From: Marco Peereboom Date: Thu, 9 Jan 2025 15:20:39 +0000 Subject: [PATCH 12/18] Limit cache to 1gb by default and rig up prometheus --- cmd/tbcd/tbcd.go | 2 +- database/tbcd/database.go | 12 +++++- database/tbcd/level/blockcache.go | 15 ++++--- database/tbcd/level/cache_test.go | 2 + database/tbcd/level/level.go | 6 ++- service/tbc/tbc.go | 70 +++++++++++++++++++++++++++++-- 6 files changed, 93 insertions(+), 14 deletions(-) diff --git a/cmd/tbcd/tbcd.go b/cmd/tbcd/tbcd.go index 6d8234d67..32edd3825 100644 --- a/cmd/tbcd/tbcd.go +++ b/cmd/tbcd/tbcd.go @@ -25,7 +25,7 @@ const ( defaultLogLevel = daemonName + "=INFO;tbc=INFO;level=INFO" defaultNetwork = "testnet3" // XXX make this mainnet defaultHome = "~/." + daemonName - bDefaultSize = "2gb" // ~1280 blocks on mainnet + bDefaultSize = "1gb" // ~640 blocks on mainnet bhsDefaultSize = "128mb" // enough for mainnet ) diff --git a/database/tbcd/database.go b/database/tbcd/database.go index f5847e5c3..a48df6540 100644 --- a/database/tbcd/database.go +++ b/database/tbcd/database.go @@ -1,4 +1,4 @@ -// Copyright (c) 2024 Hemi Labs, Inc. +// Copyright (c) 2024-2025 Hemi Labs, Inc. // Use of this source code is governed by the MIT License, // which can be found in the LICENSE file. @@ -103,6 +103,7 @@ type Database interface { BlockInsert(ctx context.Context, b *btcutil.Block) (int64, error) // BlocksInsert(ctx context.Context, bs []*btcutil.Block) (int64, error) BlockByHash(ctx context.Context, hash *chainhash.Hash) (*btcutil.Block, error) + BlockCacheStats() CacheStats // Transactions BlockUtxoUpdate(ctx context.Context, direction int, utxos map[Outpoint]CacheOutput) error @@ -413,3 +414,12 @@ func TxIdBlockHashFromTxKey(txKey TxKey) (*chainhash.Hash, *chainhash.Hash, erro } return txId, blockHash, nil } + +// Cache +type CacheStats struct { + Hits int + Misses int + Purges int + Size int + Items int +} diff --git a/database/tbcd/level/blockcache.go b/database/tbcd/level/blockcache.go index 54a84d075..d3375450f 100644 --- a/database/tbcd/level/blockcache.go +++ b/database/tbcd/level/blockcache.go @@ -11,6 +11,8 @@ import ( "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" + + "github.com/hemilabs/heminetwork/database/tbcd" ) var blockSize = 1677721 // ~1.6MB rough size of a mainnet block as of Jan 2025 @@ -20,12 +22,6 @@ type blockElement struct { block []byte } -type CacheStats struct { - Hits int - Misses int - Purges int -} - type lowIQLRU struct { mtx sync.RWMutex @@ -38,7 +34,7 @@ type lowIQLRU struct { l *list.List // stats - c CacheStats + c tbcd.CacheStats } func (l *lowIQLRU) Put(v *btcutil.Block) { @@ -70,6 +66,8 @@ func (l *lowIQLRU) Put(v *btcutil.Block) { // block lookup and lru append l.m[*hash] = blockElement{element: l.l.PushBack(hash), block: block} l.totalSize += len(block) + + l.c.Size = l.totalSize } func (l *lowIQLRU) Get(k *chainhash.Hash) (*btcutil.Block, bool) { @@ -95,9 +93,10 @@ func (l *lowIQLRU) Get(k *chainhash.Hash) (*btcutil.Block, bool) { return b, true } -func (l *lowIQLRU) Stats() CacheStats { +func (l *lowIQLRU) Stats() tbcd.CacheStats { l.mtx.RLock() defer l.mtx.RUnlock() + l.c.Items = len(l.m) return l.c } diff --git a/database/tbcd/level/cache_test.go b/database/tbcd/level/cache_test.go index 3479e2ff0..3580d2e16 100644 --- a/database/tbcd/level/cache_test.go +++ b/database/tbcd/level/cache_test.go @@ -82,4 +82,6 @@ func TestLRUCache(t *testing.T) { if s.Hits != 10 && s.Misses != 10 && s.Purges != 10 { t.Fatal(spew.Sdump(s)) } + + t.Logf(spew.Sdump(s)) } diff --git a/database/tbcd/level/level.go b/database/tbcd/level/level.go index 4eeb6d061..8ffaa795a 100644 --- a/database/tbcd/level/level.go +++ b/database/tbcd/level/level.go @@ -126,7 +126,7 @@ func NewConfig(home string) *Config { panic("invalid blockheaderCacheSize") } - blockCacheSizeS := "2gb" // ~512 blocks on mainnet + blockCacheSizeS := "1gb" // ~640 blocks on mainnet blockCacheSize, err := humanize.ParseBytes(blockCacheSizeS) if err != nil { panic(err) @@ -1655,3 +1655,7 @@ func (l *ldb) BlockTxUpdate(ctx context.Context, direction int, txs map[tbcd.TxK return nil } + +func (l *ldb) BlockCacheStats() tbcd.CacheStats { + return l.blockCache.Stats() +} diff --git a/service/tbc/tbc.go b/service/tbc/tbc.go index a1f8e9a06..53e7e147c 100644 --- a/service/tbc/tbc.go +++ b/service/tbc/tbc.go @@ -108,7 +108,7 @@ type Config struct { func NewDefaultConfig() *Config { return &Config{ ListenAddress: tbcapi.DefaultListen, - BlockCacheSize: "2gb", + BlockCacheSize: "1gb", BlockheaderCacheSize: "128mb", LogLevel: logLevel, MaxCachedTxs: defaultMaxCachedTxs, @@ -161,6 +161,7 @@ type Server struct { syncInfo SyncInfo connected, good, bad int mempoolCount, mempoolSize int + blockCache tbcd.CacheStats } // periodically updated by promPoll isRunning bool cmdsProcessed prometheus.Counter @@ -671,6 +672,36 @@ func (s *Server) promMempoolSize() float64 { return deucalion.IntToFloat(s.prom.mempoolSize) } +func (s *Server) promBlockCacheHits() float64 { + s.mtx.Lock() + defer s.mtx.Unlock() + return deucalion.IntToFloat(s.prom.blockCache.Hits) +} + +func (s *Server) promBlockCacheMisses() float64 { + s.mtx.Lock() + defer s.mtx.Unlock() + return deucalion.IntToFloat(s.prom.blockCache.Misses) +} + +func (s *Server) promBlockCachePurges() float64 { + s.mtx.Lock() + defer s.mtx.Unlock() + return deucalion.IntToFloat(s.prom.blockCache.Purges) +} + +func (s *Server) promBlockCacheSize() float64 { + s.mtx.Lock() + defer s.mtx.Unlock() + return deucalion.IntToFloat(s.prom.blockCache.Size) +} + +func (s *Server) promBlockCacheItems() float64 { + s.mtx.Lock() + defer s.mtx.Unlock() + return deucalion.IntToFloat(s.prom.blockCache.Items) +} + func (s *Server) promPoll(ctx context.Context) error { for { select { @@ -681,17 +712,25 @@ func (s *Server) promPoll(ctx context.Context) error { s.prom.syncInfo = s.Synced(ctx) s.prom.connected, s.prom.good, s.prom.bad = s.pm.Stats() + s.prom.blockCache = s.db.BlockCacheStats() //if s.cfg.MempoolEnabled { // s.prom.mempoolCount, s.prom.mempoolSize = s.mempool.stats(ctx) //} + s.promPollVerbose = true if s.promPollVerbose { s.mtx.RLock() log.Infof("Pending blocks %v/%v connected peers %v "+ - "good peers %v bad peers %v mempool %v %v", + "good peers %v bad peers %v mempool %v %v "+ + "block cache hits: %v misses: %v purges: %v size: %v "+ + "blocks: %v", s.blocks.Len(), defaultPendingBlocks, s.prom.connected, s.prom.good, s.prom.bad, s.prom.mempoolCount, - humanize.Bytes(uint64(s.prom.mempoolSize))) + humanize.Bytes(uint64(s.prom.mempoolSize)), + s.prom.blockCache.Hits, s.prom.blockCache.Misses, + s.prom.blockCache.Purges, + humanize.Bytes(uint64(s.prom.blockCache.Size)), + s.prom.blockCache.Items) s.mtx.RUnlock() } @@ -2224,6 +2263,31 @@ func (s *Server) Collectors() []prometheus.Collector { Name: "mempool_size_bytes", Help: "Size of mempool in bytes", }, s.promMempoolSize), + prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: s.cfg.PrometheusNamespace, + Name: "block_cache_hits", + Help: "Block cache hits", + }, s.promBlockCacheHits), + prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: s.cfg.PrometheusNamespace, + Name: "block_cache_misses", + Help: "Block cache misses", + }, s.promBlockCacheMisses), + prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: s.cfg.PrometheusNamespace, + Name: "block_cache_purges", + Help: "Block cache purges", + }, s.promBlockCachePurges), + prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: s.cfg.PrometheusNamespace, + Name: "block_cache_size", + Help: "Block cache size", + }, s.promBlockCacheSize), + prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: s.cfg.PrometheusNamespace, + Name: "block_cache_items", + Help: "Number of cached blocks", + }, s.promBlockCacheItems), } } return s.promCollectors From 0a9f59d1ca19351721f9e2801228e117c838f058 Mon Sep 17 00:00:00 2001 From: Marco Peereboom Date: Thu, 9 Jan 2025 15:41:53 +0000 Subject: [PATCH 13/18] Derp disable promPollVerbose --- service/tbc/tbc.go | 1 - 1 file changed, 1 deletion(-) diff --git a/service/tbc/tbc.go b/service/tbc/tbc.go index 53e7e147c..7811c4bfa 100644 --- a/service/tbc/tbc.go +++ b/service/tbc/tbc.go @@ -717,7 +717,6 @@ func (s *Server) promPoll(ctx context.Context) error { // s.prom.mempoolCount, s.prom.mempoolSize = s.mempool.stats(ctx) //} - s.promPollVerbose = true if s.promPollVerbose { s.mtx.RLock() log.Infof("Pending blocks %v/%v connected peers %v "+ From 49b82ec38b3094d405e113fd1e31438e7c5a67f7 Mon Sep 17 00:00:00 2001 From: Marco Peereboom Date: Thu, 9 Jan 2025 16:07:52 +0000 Subject: [PATCH 14/18] Fix stats races (thanks joshua) and add stats to header cache as well --- database/tbcd/database.go | 1 + database/tbcd/level/blockcache.go | 10 +++--- database/tbcd/level/headercache.go | 26 +++++++++++--- database/tbcd/level/level.go | 4 +++ service/tbc/tbc.go | 57 ++++++++++++++++++++++++++++++ 5 files changed, 89 insertions(+), 9 deletions(-) diff --git a/database/tbcd/database.go b/database/tbcd/database.go index a48df6540..b7480b7c7 100644 --- a/database/tbcd/database.go +++ b/database/tbcd/database.go @@ -91,6 +91,7 @@ type Database interface { BlockHeaderBest(ctx context.Context) (*BlockHeader, error) // return canonical BlockHeaderByHash(ctx context.Context, hash *chainhash.Hash) (*BlockHeader, error) BlockHeaderGenesisInsert(ctx context.Context, wbh *wire.BlockHeader, height uint64, diff *big.Int) error + BlockHeaderCacheStats() CacheStats // Block headers BlockHeadersByHeight(ctx context.Context, height uint64) ([]BlockHeader, error) diff --git a/database/tbcd/level/blockcache.go b/database/tbcd/level/blockcache.go index d3375450f..cac88c0ef 100644 --- a/database/tbcd/level/blockcache.go +++ b/database/tbcd/level/blockcache.go @@ -23,7 +23,7 @@ type blockElement struct { } type lowIQLRU struct { - mtx sync.RWMutex + mtx sync.Mutex size int // this is the approximate max size @@ -71,8 +71,8 @@ func (l *lowIQLRU) Put(v *btcutil.Block) { } func (l *lowIQLRU) Get(k *chainhash.Hash) (*btcutil.Block, bool) { - l.mtx.RLock() - defer l.mtx.RUnlock() + l.mtx.Lock() + defer l.mtx.Unlock() be, ok := l.m[*k] if !ok { @@ -94,8 +94,8 @@ func (l *lowIQLRU) Get(k *chainhash.Hash) (*btcutil.Block, bool) { } func (l *lowIQLRU) Stats() tbcd.CacheStats { - l.mtx.RLock() - defer l.mtx.RUnlock() + l.mtx.Lock() + defer l.mtx.Unlock() l.c.Items = len(l.m) return l.c } diff --git a/database/tbcd/level/headercache.go b/database/tbcd/level/headercache.go index 803a69985..eb8612cf4 100644 --- a/database/tbcd/level/headercache.go +++ b/database/tbcd/level/headercache.go @@ -16,11 +16,14 @@ import ( const blockHeaderSize = 8 + 32 + 80 + 8 // rough size of tbcd.BlockHeader type lowIQMap struct { - mtx sync.RWMutex + mtx sync.Mutex count int m map[chainhash.Hash]*tbcd.BlockHeader // 32+8+80+len([]Word ~ 8) + + // stats + c tbcd.CacheStats } func (l *lowIQMap) Put(v *tbcd.BlockHeader) { @@ -32,9 +35,10 @@ func (l *lowIQMap) Put(v *tbcd.BlockHeader) { } if len(l.m) >= l.count { - // evict entry + // evict random entry for k := range l.m { delete(l.m, k) + l.c.Purges++ break } } @@ -43,10 +47,15 @@ func (l *lowIQMap) Put(v *tbcd.BlockHeader) { } func (l *lowIQMap) Get(k *chainhash.Hash) (*tbcd.BlockHeader, bool) { - l.mtx.RLock() - defer l.mtx.RUnlock() + l.mtx.Lock() + defer l.mtx.Unlock() bh, ok := l.m[*k] + if ok { + l.c.Hits++ + } else { + l.c.Misses++ + } return bh, ok } @@ -56,9 +65,18 @@ func (l *lowIQMap) PurgeBatch(ks []*chainhash.Hash) { for v := range ks { delete(l.m, *ks[v]) + l.c.Purges++ } } +func (l *lowIQMap) Stats() tbcd.CacheStats { + l.mtx.Lock() + defer l.mtx.Unlock() + l.c.Items = len(l.m) + l.c.Size = len(l.m) * blockHeaderSize // rough size + return l.c +} + func lowIQMapNewCount(count int) (*lowIQMap, error) { if count <= 0 { return nil, fmt.Errorf("invalid count: %v", count) diff --git a/database/tbcd/level/level.go b/database/tbcd/level/level.go index 8ffaa795a..180c89aa4 100644 --- a/database/tbcd/level/level.go +++ b/database/tbcd/level/level.go @@ -1656,6 +1656,10 @@ func (l *ldb) BlockTxUpdate(ctx context.Context, direction int, txs map[tbcd.TxK return nil } +func (l *ldb) BlockHeaderCacheStats() tbcd.CacheStats { + return l.headerCache.Stats() +} + func (l *ldb) BlockCacheStats() tbcd.CacheStats { return l.blockCache.Stats() } diff --git a/service/tbc/tbc.go b/service/tbc/tbc.go index 7811c4bfa..a20a2c135 100644 --- a/service/tbc/tbc.go +++ b/service/tbc/tbc.go @@ -162,6 +162,7 @@ type Server struct { connected, good, bad int mempoolCount, mempoolSize int blockCache tbcd.CacheStats + headerCache tbcd.CacheStats } // periodically updated by promPoll isRunning bool cmdsProcessed prometheus.Counter @@ -702,6 +703,36 @@ func (s *Server) promBlockCacheItems() float64 { return deucalion.IntToFloat(s.prom.blockCache.Items) } +func (s *Server) promHeaderCacheHits() float64 { + s.mtx.Lock() + defer s.mtx.Unlock() + return deucalion.IntToFloat(s.prom.headerCache.Hits) +} + +func (s *Server) promHeaderCacheMisses() float64 { + s.mtx.Lock() + defer s.mtx.Unlock() + return deucalion.IntToFloat(s.prom.headerCache.Misses) +} + +func (s *Server) promHeaderCachePurges() float64 { + s.mtx.Lock() + defer s.mtx.Unlock() + return deucalion.IntToFloat(s.prom.headerCache.Purges) +} + +func (s *Server) promHeaderCacheSize() float64 { + s.mtx.Lock() + defer s.mtx.Unlock() + return deucalion.IntToFloat(s.prom.headerCache.Size) +} + +func (s *Server) promHeaderCacheItems() float64 { + s.mtx.Lock() + defer s.mtx.Unlock() + return deucalion.IntToFloat(s.prom.headerCache.Items) +} + func (s *Server) promPoll(ctx context.Context) error { for { select { @@ -713,6 +744,7 @@ func (s *Server) promPoll(ctx context.Context) error { s.prom.syncInfo = s.Synced(ctx) s.prom.connected, s.prom.good, s.prom.bad = s.pm.Stats() s.prom.blockCache = s.db.BlockCacheStats() + s.prom.headerCache = s.db.BlockHeaderCacheStats() //if s.cfg.MempoolEnabled { // s.prom.mempoolCount, s.prom.mempoolSize = s.mempool.stats(ctx) //} @@ -2282,6 +2314,31 @@ func (s *Server) Collectors() []prometheus.Collector { Name: "block_cache_size", Help: "Block cache size", }, s.promBlockCacheSize), + prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: s.cfg.PrometheusNamespace, + Name: "header_cache_items", + Help: "Number of cached blocks", + }, s.promHeaderCacheItems), + prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: s.cfg.PrometheusNamespace, + Name: "header_cache_hits", + Help: "Header cache hits", + }, s.promHeaderCacheHits), + prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: s.cfg.PrometheusNamespace, + Name: "header_cache_misses", + Help: "Header cache misses", + }, s.promHeaderCacheMisses), + prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: s.cfg.PrometheusNamespace, + Name: "header_cache_purges", + Help: "Header cache purges", + }, s.promHeaderCachePurges), + prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: s.cfg.PrometheusNamespace, + Name: "header_cache_size", + Help: "Header cache size", + }, s.promHeaderCacheSize), prometheus.NewGaugeFunc(prometheus.GaugeOpts{ Namespace: s.cfg.PrometheusNamespace, Name: "block_cache_items", From 949e7176e7c30d6fab67f34e0e8c522d2387f388 Mon Sep 17 00:00:00 2001 From: Marco Peereboom Date: Thu, 9 Jan 2025 16:34:09 +0000 Subject: [PATCH 15/18] Test header cache as well and make checks more readable --- database/tbcd/level/cache_test.go | 88 +++++++++++++++++++++++++++++-- 1 file changed, 84 insertions(+), 4 deletions(-) diff --git a/database/tbcd/level/cache_test.go b/database/tbcd/level/cache_test.go index 3580d2e16..3ba560ee1 100644 --- a/database/tbcd/level/cache_test.go +++ b/database/tbcd/level/cache_test.go @@ -1,12 +1,15 @@ package level import ( + "math/big" "testing" "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" + + "github.com/hemilabs/heminetwork/database/tbcd" ) func newBlock(prevHash *chainhash.Hash, nonce uint32) (chainhash.Hash, *btcutil.Block) { @@ -35,7 +38,7 @@ func TestLRUCache(t *testing.T) { // verify stats are 0 s := l.Stats() - if s.Hits != 0 && s.Misses != 0 && s.Purges != 0 { + if !(s.Hits == 0 && s.Misses == 0 && s.Purges == 0) { t.Fatal(spew.Sdump(s)) } @@ -48,7 +51,7 @@ func TestLRUCache(t *testing.T) { // verify hits are maxBlocks s = l.Stats() - if s.Hits != 10 && s.Misses != 0 && s.Purges != 0 { + if !(s.Hits == 10 && s.Misses == 0 && s.Purges == 0) { t.Fatal(spew.Sdump(s)) } @@ -63,7 +66,7 @@ func TestLRUCache(t *testing.T) { // verify purges are maxBlocks s = l.Stats() - if s.Hits != 10 && s.Misses != 0 && s.Purges != 10 { + if !(s.Hits == 10 && s.Misses == 0 && s.Purges == 10) { t.Fatal(spew.Sdump(s)) } @@ -79,7 +82,84 @@ func TestLRUCache(t *testing.T) { // verify misses are maxBlocks s = l.Stats() - if s.Hits != 10 && s.Misses != 10 && s.Purges != 10 { + if !(s.Hits == 10 && s.Misses == 10 && s.Purges == 10) { + t.Fatal(spew.Sdump(s)) + } + + t.Logf(spew.Sdump(s)) +} + +func newHeader(prevHash *chainhash.Hash, nonce uint32) (chainhash.Hash, *tbcd.BlockHeader) { + bh := wire.NewBlockHeader(0, prevHash, &chainhash.Hash{}, 0, uint32(nonce)) + return bh.BlockHash(), &tbcd.BlockHeader{ + Hash: bh.BlockHash(), + Height: uint64(nonce), + Header: h2b(bh), + Difficulty: big.Int{}, + } +} + +func TestMapCache(t *testing.T) { + maxCacheCount := 10 + l, err := lowIQMapNewCount(maxCacheCount) + if err != nil { + t.Fatal(err) + } + + prevHash := chainhash.Hash{} // genesis + headers := make([]chainhash.Hash, 0, maxCacheCount*2) + for i := 0; i < maxCacheCount; i++ { + h, bh := newHeader(&prevHash, uint32(i)) + t.Logf("%v: %v", i, h) + headers = append(headers, h) + l.Put(bh) + prevHash = h + } + + // verify stats are 0 + s := l.Stats() + if !(s.Hits == 0 && s.Misses == 0 && s.Purges == 0) { + t.Fatal(spew.Sdump(s)) + } + + // retrieve all headers + for k := range headers { + if _, ok := l.Get(&headers[k]); !ok { + t.Fatalf("header not found: %v", headers[k]) + } + } + + // verify hits are maxBlocks + s = l.Stats() + if !(s.Hits == 10 && s.Misses == 0 && s.Purges == 0) { + t.Fatal(spew.Sdump(s)) + } + + // purge random cache entries + for i := maxCacheCount; i < maxCacheCount*2; i++ { + h, bh := newHeader(&prevHash, uint32(i)) + t.Logf("%v: %v", i, h) + headers = append(headers, h) + l.Put(bh) + prevHash = h + } + + // verify purges are maxBlocks + s = l.Stats() + if !(s.Hits == 10 && s.Misses == 0 && s.Purges == 10) { + t.Fatal(spew.Sdump(s)) + } + + // Force a random miss + hm, _ := newHeader(&chainhash.Hash{}, 0xdeadbeef) + _, ok := l.Get(&hm) + if ok { + t.Fatal("non cached header found") + } + + // verify misses + s = l.Stats() + if !(s.Hits == 10 && s.Misses == 1 && s.Purges == 10) { t.Fatal(spew.Sdump(s)) } From 9d4e98ef9c2927f83ecd4c1b0227fdc987fb3b4c Mon Sep 17 00:00:00 2001 From: AL-CT Date: Thu, 9 Jan 2025 16:45:37 +0000 Subject: [PATCH 16/18] added extra basic test for headercache --- database/tbcd/level/cache_test.go | 61 +++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/database/tbcd/level/cache_test.go b/database/tbcd/level/cache_test.go index 3ba560ee1..372b82b24 100644 --- a/database/tbcd/level/cache_test.go +++ b/database/tbcd/level/cache_test.go @@ -165,3 +165,64 @@ func TestMapCache(t *testing.T) { t.Logf(spew.Sdump(s)) } + +func intHash(b int) chainhash.Hash { + return chainhash.Hash{byte(b)} +} + +func TestHC(t *testing.T) { + _, err := lowIQMapNewSize(0) + if err == nil { + t.Fatalf("expected invalid size error for size <= 0") + } + _, err = lowIQMapNewSize(1) + if err == nil { + t.Fatalf("expected invalid count error for count <= 0") + } + size := 1024 + l, err := lowIQMapNewSize(size) + if err != nil { + t.Fatal(err) + } + hs := intHash(0) + for range 2 { + l.Put(&tbcd.BlockHeader{ + Hash: hs, + }) + } + if len(l.m) > 1 { + t.Fatalf("duplicate headers not excluded by hash") + } + if _, ok := l.Get(&hs); !ok { + t.Fatalf("failed to retrieve header present in map") + } + hs = intHash(1) + if _, ok := l.Get(&hs); ok { + t.Fatalf("invalid header retrieved from Map") + } + for k := range l.count + 5 { + l.Put(&tbcd.BlockHeader{ + Hash: intHash(k), + }) + } + if len(l.m) > l.count { + t.Fatalf("map size exceeded bounds. expected %v, got %v", l.count, len(l.m)) + } + storedHashes := make([]*chainhash.Hash, 0, len(l.m)-1) + var lastHash *chainhash.Hash + for key := range l.m { + keyc := key + if len(storedHashes) >= len(l.m)-1 { + lastHash = &keyc + } else { + storedHashes = append(storedHashes, &keyc) + } + } + l.PurgeBatch(storedHashes) + if len(l.m) != 1 { + t.Fatalf("expected %d elements to be purged, purged %d", len(storedHashes), l.count-len(l.m)) + } + if _, ok := l.Get(lastHash); !ok { + t.Fatalf("incorrect element purged") + } +} From d8175f1a119767b8342ef010705366f7870972cf Mon Sep 17 00:00:00 2001 From: Marco Peereboom Date: Thu, 9 Jan 2025 16:54:18 +0000 Subject: [PATCH 17/18] fix naming, form joshua --- database/tbcd/level/blockcache.go | 2 +- database/tbcd/level/cache_test.go | 4 ++-- database/tbcd/level/headercache.go | 6 +++--- database/tbcd/level/level.go | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/database/tbcd/level/blockcache.go b/database/tbcd/level/blockcache.go index cac88c0ef..bab117926 100644 --- a/database/tbcd/level/blockcache.go +++ b/database/tbcd/level/blockcache.go @@ -100,7 +100,7 @@ func (l *lowIQLRU) Stats() tbcd.CacheStats { return l.c } -func lowIQLRUNewSize(size int) (*lowIQLRU, error) { +func lowIQLRUSizeNew(size int) (*lowIQLRU, error) { if size <= 0 { return nil, fmt.Errorf("invalid size: %v", size) } diff --git a/database/tbcd/level/cache_test.go b/database/tbcd/level/cache_test.go index 372b82b24..3956a3918 100644 --- a/database/tbcd/level/cache_test.go +++ b/database/tbcd/level/cache_test.go @@ -21,7 +21,7 @@ func newBlock(prevHash *chainhash.Hash, nonce uint32) (chainhash.Hash, *btcutil. func TestLRUCache(t *testing.T) { maxCache := 10 blockSize = 81 // we'll use empty blocks - l, err := lowIQLRUNewSize(blockSize * maxCache) + l, err := lowIQLRUSizeNew(blockSize * maxCache) if err != nil { t.Fatal(err) } @@ -101,7 +101,7 @@ func newHeader(prevHash *chainhash.Hash, nonce uint32) (chainhash.Hash, *tbcd.Bl func TestMapCache(t *testing.T) { maxCacheCount := 10 - l, err := lowIQMapNewCount(maxCacheCount) + l, err := lowIQMapCountNew(maxCacheCount) if err != nil { t.Fatal(err) } diff --git a/database/tbcd/level/headercache.go b/database/tbcd/level/headercache.go index eb8612cf4..557edbc7c 100644 --- a/database/tbcd/level/headercache.go +++ b/database/tbcd/level/headercache.go @@ -77,7 +77,7 @@ func (l *lowIQMap) Stats() tbcd.CacheStats { return l.c } -func lowIQMapNewCount(count int) (*lowIQMap, error) { +func lowIQMapCountNew(count int) (*lowIQMap, error) { if count <= 0 { return nil, fmt.Errorf("invalid count: %v", count) } @@ -91,10 +91,10 @@ func lowIQMapNewCount(count int) (*lowIQMap, error) { // Since it is an estimate it will overflow if Difficulty becomes bigger than // 64 bits. This is not an issue since 100MB caches all of mainnet in Jan 2025 // (~819200 items). -func lowIQMapNewSize(size int) (*lowIQMap, error) { +func lowIQMapSizeNew(size int) (*lowIQMap, error) { if size <= 0 { return nil, fmt.Errorf("invalid size: %v", size) } // approximate number of headers - return lowIQMapNewCount(size / blockHeaderSize) + return lowIQMapCountNew(size / blockHeaderSize) } diff --git a/database/tbcd/level/level.go b/database/tbcd/level/level.go index 180c89aa4..0e19e093c 100644 --- a/database/tbcd/level/level.go +++ b/database/tbcd/level/level.go @@ -161,7 +161,7 @@ func New(ctx context.Context, cfg *Config) (*ldb, error) { } if cfg.blockCacheSize > 0 { - l.blockCache, err = lowIQLRUNewSize(cfg.blockCacheSize) + l.blockCache, err = lowIQLRUSizeNew(cfg.blockCacheSize) if err != nil { return nil, fmt.Errorf("couldn't setup block cache: %w", err) } @@ -170,7 +170,7 @@ func New(ctx context.Context, cfg *Config) (*ldb, error) { log.Infof("block cache: DISABLED") } if cfg.blockheaderCacheSize > 0 { - l.headerCache, err = lowIQMapNewSize(cfg.blockheaderCacheSize) + l.headerCache, err = lowIQMapSizeNew(cfg.blockheaderCacheSize) if err != nil { return nil, fmt.Errorf("couldn't setup block header cache: %w", err) } From 52eb29fb27e85fe57c8aaebe660bc6f6069f19b1 Mon Sep 17 00:00:00 2001 From: Marco Peereboom Date: Thu, 9 Jan 2025 17:15:11 +0000 Subject: [PATCH 18/18] Fixup of New in antonio code --- database/tbcd/level/cache_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/database/tbcd/level/cache_test.go b/database/tbcd/level/cache_test.go index 3956a3918..0da5e0ef1 100644 --- a/database/tbcd/level/cache_test.go +++ b/database/tbcd/level/cache_test.go @@ -171,16 +171,16 @@ func intHash(b int) chainhash.Hash { } func TestHC(t *testing.T) { - _, err := lowIQMapNewSize(0) + _, err := lowIQMapSizeNew(0) if err == nil { t.Fatalf("expected invalid size error for size <= 0") } - _, err = lowIQMapNewSize(1) + _, err = lowIQMapSizeNew(1) if err == nil { t.Fatalf("expected invalid count error for count <= 0") } size := 1024 - l, err := lowIQMapNewSize(size) + l, err := lowIQMapSizeNew(size) if err != nil { t.Fatal(err) }