Skip to content

Commit 6eef141

Browse files
authored
les: historical data garbage collection (ethereum#19570)
This change introduces garbage collection for the light client. Historical chain data is deleted periodically. If you want to disable the GC, use the --light.nopruning flag.
1 parent b8dd089 commit 6eef141

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

45 files changed

+843
-215
lines changed

cmd/geth/main.go

+1
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,7 @@ var (
9898
utils.LightEgressFlag,
9999
utils.LightMaxPeersFlag,
100100
utils.LegacyLightPeersFlag,
101+
utils.LightNoPruneFlag,
101102
utils.LightKDFFlag,
102103
utils.UltraLightServersFlag,
103104
utils.UltraLightFractionFlag,

cmd/geth/usage.go

+1
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,7 @@ var AppHelpFlagGroups = []flagGroup{
9696
utils.UltraLightServersFlag,
9797
utils.UltraLightFractionFlag,
9898
utils.UltraLightOnlyAnnounceFlag,
99+
utils.LightNoPruneFlag,
99100
},
100101
},
101102
{

cmd/utils/flags.go

+15-3
Original file line numberDiff line numberDiff line change
@@ -282,6 +282,10 @@ var (
282282
Name: "ulc.onlyannounce",
283283
Usage: "Ultra light server sends announcements only",
284284
}
285+
LightNoPruneFlag = cli.BoolFlag{
286+
Name: "light.nopruning",
287+
Usage: "Disable ancient light chain data pruning",
288+
}
285289
// Ethash settings
286290
EthashCacheDirFlag = DirectoryFlag{
287291
Name: "ethash.cachedir",
@@ -1070,6 +1074,9 @@ func setLes(ctx *cli.Context, cfg *eth.Config) {
10701074
if ctx.GlobalIsSet(UltraLightOnlyAnnounceFlag.Name) {
10711075
cfg.UltraLightOnlyAnnounce = ctx.GlobalBool(UltraLightOnlyAnnounceFlag.Name)
10721076
}
1077+
if ctx.GlobalIsSet(LightNoPruneFlag.Name) {
1078+
cfg.LightNoPrune = ctx.GlobalBool(LightNoPruneFlag.Name)
1079+
}
10731080
}
10741081

10751082
// makeDatabaseHandles raises out the number of allowed file handles per process
@@ -1800,12 +1807,17 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database {
18001807
var (
18011808
cache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100
18021809
handles = makeDatabaseHandles()
1810+
1811+
err error
1812+
chainDb ethdb.Database
18031813
)
1804-
name := "chaindata"
18051814
if ctx.GlobalString(SyncModeFlag.Name) == "light" {
1806-
name = "lightchaindata"
1815+
name := "lightchaindata"
1816+
chainDb, err = stack.OpenDatabase(name, cache, handles, "")
1817+
} else {
1818+
name := "chaindata"
1819+
chainDb, err = stack.OpenDatabaseWithFreezer(name, cache, handles, ctx.GlobalString(AncientFlag.Name), "")
18071820
}
1808-
chainDb, err := stack.OpenDatabaseWithFreezer(name, cache, handles, ctx.GlobalString(AncientFlag.Name), "")
18091821
if err != nil {
18101822
Fatalf("Could not open database: %v", err)
18111823
}

consensus/clique/clique.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -369,7 +369,7 @@ func (c *Clique) snapshot(chain consensus.ChainReader, number uint64, hash commo
369369
// at a checkpoint block without a parent (light client CHT), or we have piled
370370
// up more headers than allowed to be reorged (chain reinit from a freezer),
371371
// consider the checkpoint trusted and snapshot it.
372-
if number == 0 || (number%c.config.Epoch == 0 && (len(headers) > params.ImmutabilityThreshold || chain.GetHeaderByNumber(number-1) == nil)) {
372+
if number == 0 || (number%c.config.Epoch == 0 && (len(headers) > params.FullImmutabilityThreshold || chain.GetHeaderByNumber(number-1) == nil)) {
373373
checkpoint := chain.GetHeaderByNumber(number)
374374
if checkpoint != nil {
375375
hash := checkpoint.Hash()

core/blockchain.go

+4-4
Original file line numberDiff line numberDiff line change
@@ -901,14 +901,14 @@ func (bc *BlockChain) Stop() {
901901
recent := bc.GetBlockByNumber(number - offset)
902902

903903
log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
904-
if err := triedb.Commit(recent.Root(), true); err != nil {
904+
if err := triedb.Commit(recent.Root(), true, nil); err != nil {
905905
log.Error("Failed to commit recent state trie", "err", err)
906906
}
907907
}
908908
}
909909
if snapBase != (common.Hash{}) {
910910
log.Info("Writing snapshot state to disk", "root", snapBase)
911-
if err := triedb.Commit(snapBase, true); err != nil {
911+
if err := triedb.Commit(snapBase, true, nil); err != nil {
912912
log.Error("Failed to commit recent state trie", "err", err)
913913
}
914914
}
@@ -1442,7 +1442,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
14421442

14431443
// If we're running an archive node, always flush
14441444
if bc.cacheConfig.TrieDirtyDisabled {
1445-
if err := triedb.Commit(root, false); err != nil {
1445+
if err := triedb.Commit(root, false, nil); err != nil {
14461446
return NonStatTy, err
14471447
}
14481448
} else {
@@ -1476,7 +1476,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
14761476
log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/TriesInMemory)
14771477
}
14781478
// Flush an entire trie and restart the counters
1479-
triedb.Commit(header.Root, true)
1479+
triedb.Commit(header.Root, true, nil)
14801480
lastWrite = chosen
14811481
bc.gcproc = 0
14821482
}

core/chain_indexer.go

+8-1
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,9 @@ type ChainIndexerBackend interface {
4646

4747
// Commit finalizes the section metadata and stores it into the database.
4848
Commit() error
49+
50+
// Prune deletes the chain index older than the given threshold.
51+
Prune(threshold uint64) error
4952
}
5053

5154
// ChainIndexerChain interface is used for connecting the indexer to a blockchain
@@ -386,7 +389,6 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com
386389
c.log.Trace("Processing new chain section", "section", section)
387390

388391
// Reset and partial processing
389-
390392
if err := c.backend.Reset(c.ctx, section, lastHead); err != nil {
391393
c.setValidSections(0)
392394
return common.Hash{}, err
@@ -459,6 +461,11 @@ func (c *ChainIndexer) AddChildIndexer(indexer *ChainIndexer) {
459461
}
460462
}
461463

464+
// Prune deletes all chain data older than given threshold.
465+
func (c *ChainIndexer) Prune(threshold uint64) error {
466+
return c.backend.Prune(threshold)
467+
}
468+
462469
// loadValidSections reads the number of valid sections from the index database
463470
// and caches is into the local state.
464471
func (c *ChainIndexer) loadValidSections() {

core/chain_indexer_test.go

+4
Original file line numberDiff line numberDiff line change
@@ -236,3 +236,7 @@ func (b *testChainIndexBackend) Commit() error {
236236
}
237237
return nil
238238
}
239+
240+
func (b *testChainIndexBackend) Prune(threshold uint64) error {
241+
return nil
242+
}

core/chain_makers.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -220,7 +220,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
220220
if err != nil {
221221
panic(fmt.Sprintf("state write error: %v", err))
222222
}
223-
if err := statedb.Database().TrieDB().Commit(root, false); err != nil {
223+
if err := statedb.Database().TrieDB().Commit(root, false, nil); err != nil {
224224
panic(fmt.Sprintf("trie write error: %v", err))
225225
}
226226
return block, b.receipts

core/dao_test.go

+4-4
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
7979
if _, err := bc.InsertChain(blocks); err != nil {
8080
t.Fatalf("failed to import contra-fork chain for expansion: %v", err)
8181
}
82-
if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true); err != nil {
82+
if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil {
8383
t.Fatalf("failed to commit contra-fork head for expansion: %v", err)
8484
}
8585
blocks, _ = GenerateChain(&proConf, conBc.CurrentBlock(), ethash.NewFaker(), db, 1, func(i int, gen *BlockGen) {})
@@ -104,7 +104,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
104104
if _, err := bc.InsertChain(blocks); err != nil {
105105
t.Fatalf("failed to import pro-fork chain for expansion: %v", err)
106106
}
107-
if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true); err != nil {
107+
if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil {
108108
t.Fatalf("failed to commit pro-fork head for expansion: %v", err)
109109
}
110110
blocks, _ = GenerateChain(&conConf, proBc.CurrentBlock(), ethash.NewFaker(), db, 1, func(i int, gen *BlockGen) {})
@@ -130,7 +130,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
130130
if _, err := bc.InsertChain(blocks); err != nil {
131131
t.Fatalf("failed to import contra-fork chain for expansion: %v", err)
132132
}
133-
if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true); err != nil {
133+
if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil {
134134
t.Fatalf("failed to commit contra-fork head for expansion: %v", err)
135135
}
136136
blocks, _ = GenerateChain(&proConf, conBc.CurrentBlock(), ethash.NewFaker(), db, 1, func(i int, gen *BlockGen) {})
@@ -150,7 +150,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
150150
if _, err := bc.InsertChain(blocks); err != nil {
151151
t.Fatalf("failed to import pro-fork chain for expansion: %v", err)
152152
}
153-
if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true); err != nil {
153+
if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil {
154154
t.Fatalf("failed to commit pro-fork head for expansion: %v", err)
155155
}
156156
blocks, _ = GenerateChain(&conConf, proBc.CurrentBlock(), ethash.NewFaker(), db, 1, func(i int, gen *BlockGen) {})

core/genesis.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -285,7 +285,7 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
285285
head.Difficulty = params.GenesisDifficulty
286286
}
287287
statedb.Commit(false)
288-
statedb.Database().TrieDB().Commit(root, true)
288+
statedb.Database().TrieDB().Commit(root, true, nil)
289289

290290
return types.NewBlock(head, nil, nil, nil)
291291
}

core/rawdb/accessors_chain.go

+33
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,39 @@ func ReadAllHashes(db ethdb.Iteratee, number uint64) []common.Hash {
8080
return hashes
8181
}
8282

83+
// ReadAllCanonicalHashes retrieves all canonical number and hash mappings at the
84+
// certain chain range. If the accumulated entries reaches the given threshold,
85+
// abort the iteration and return the semi-finish result.
86+
func ReadAllCanonicalHashes(db ethdb.Iteratee, from uint64, to uint64, limit int) ([]uint64, []common.Hash) {
87+
// Short circuit if the limit is 0.
88+
if limit == 0 {
89+
return nil, nil
90+
}
91+
var (
92+
numbers []uint64
93+
hashes []common.Hash
94+
)
95+
// Construct the key prefix of start point.
96+
start, end := headerHashKey(from), headerHashKey(to)
97+
it := db.NewIterator(nil, start)
98+
defer it.Release()
99+
100+
for it.Next() {
101+
if bytes.Compare(it.Key(), end) >= 0 {
102+
break
103+
}
104+
if key := it.Key(); len(key) == len(headerPrefix)+8+1 && bytes.Equal(key[len(key)-1:], headerHashSuffix) {
105+
numbers = append(numbers, binary.BigEndian.Uint64(key[len(headerPrefix):len(headerPrefix)+8]))
106+
hashes = append(hashes, common.BytesToHash(it.Value()))
107+
// If the accumulated entries reaches the limit threshold, return.
108+
if len(numbers) >= limit {
109+
break
110+
}
111+
}
112+
}
113+
return numbers, hashes
114+
}
115+
83116
// ReadHeaderNumber returns the header number assigned to a hash.
84117
func ReadHeaderNumber(db ethdb.KeyValueReader, hash common.Hash) *uint64 {
85118
data, _ := db.Get(headerNumberKey(hash))

core/rawdb/accessors_chain_test.go

+33
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ import (
2323
"io/ioutil"
2424
"math/big"
2525
"os"
26+
"reflect"
2627
"testing"
2728

2829
"github.com/ethereum/go-ethereum/common"
@@ -424,3 +425,35 @@ func TestAncientStorage(t *testing.T) {
424425
t.Fatalf("invalid td returned")
425426
}
426427
}
428+
429+
func TestCanonicalHashIteration(t *testing.T) {
430+
var cases = []struct {
431+
from, to uint64
432+
limit int
433+
expect []uint64
434+
}{
435+
{1, 8, 0, nil},
436+
{1, 8, 1, []uint64{1}},
437+
{1, 8, 10, []uint64{1, 2, 3, 4, 5, 6, 7}},
438+
{1, 9, 10, []uint64{1, 2, 3, 4, 5, 6, 7, 8}},
439+
{2, 9, 10, []uint64{2, 3, 4, 5, 6, 7, 8}},
440+
{9, 10, 10, nil},
441+
}
442+
// Test empty db iteration
443+
db := NewMemoryDatabase()
444+
numbers, _ := ReadAllCanonicalHashes(db, 0, 10, 10)
445+
if len(numbers) != 0 {
446+
t.Fatalf("No entry should be returned to iterate an empty db")
447+
}
448+
// Fill database with testing data.
449+
for i := uint64(1); i <= 8; i++ {
450+
WriteCanonicalHash(db, common.Hash{}, i)
451+
WriteTd(db, common.Hash{}, i, big.NewInt(10)) // Write some interferential data
452+
}
453+
for i, c := range cases {
454+
numbers, _ := ReadAllCanonicalHashes(db, c.from, c.to, c.limit)
455+
if !reflect.DeepEqual(numbers, c.expect) {
456+
t.Fatalf("Case %d failed, want %v, got %v", i, c.expect, numbers)
457+
}
458+
}
459+
}

core/rawdb/accessors_indexes.go

+22
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
package rawdb
1818

1919
import (
20+
"bytes"
2021
"math/big"
2122

2223
"github.com/ethereum/go-ethereum/common"
@@ -151,3 +152,24 @@ func WriteBloomBits(db ethdb.KeyValueWriter, bit uint, section uint64, head comm
151152
log.Crit("Failed to store bloom bits", "err", err)
152153
}
153154
}
155+
156+
// DeleteBloombits removes all compressed bloom bits vector belonging to the
157+
// given section range and bit index.
158+
func DeleteBloombits(db ethdb.Database, bit uint, from uint64, to uint64) {
159+
start, end := bloomBitsKey(bit, from, common.Hash{}), bloomBitsKey(bit, to, common.Hash{})
160+
it := db.NewIterator(nil, start)
161+
defer it.Release()
162+
163+
for it.Next() {
164+
if bytes.Compare(it.Key(), end) >= 0 {
165+
break
166+
}
167+
if len(it.Key()) != len(bloomBitsPrefix)+2+8+32 {
168+
continue
169+
}
170+
db.Delete(it.Key())
171+
}
172+
if it.Error() != nil {
173+
log.Crit("Failed to delete bloom bits", "err", it.Error())
174+
}
175+
}

core/rawdb/accessors_indexes_test.go

+45
Original file line numberDiff line numberDiff line change
@@ -17,12 +17,14 @@
1717
package rawdb
1818

1919
import (
20+
"bytes"
2021
"math/big"
2122
"testing"
2223

2324
"github.com/ethereum/go-ethereum/common"
2425
"github.com/ethereum/go-ethereum/core/types"
2526
"github.com/ethereum/go-ethereum/ethdb"
27+
"github.com/ethereum/go-ethereum/params"
2628
"github.com/ethereum/go-ethereum/rlp"
2729
)
2830

@@ -106,3 +108,46 @@ func TestLookupStorage(t *testing.T) {
106108
})
107109
}
108110
}
111+
112+
func TestDeleteBloomBits(t *testing.T) {
113+
// Prepare testing data
114+
db := NewMemoryDatabase()
115+
for i := uint(0); i < 2; i++ {
116+
for s := uint64(0); s < 2; s++ {
117+
WriteBloomBits(db, i, s, params.MainnetGenesisHash, []byte{0x01, 0x02})
118+
WriteBloomBits(db, i, s, params.RinkebyGenesisHash, []byte{0x01, 0x02})
119+
}
120+
}
121+
check := func(bit uint, section uint64, head common.Hash, exist bool) {
122+
bits, _ := ReadBloomBits(db, bit, section, head)
123+
if exist && !bytes.Equal(bits, []byte{0x01, 0x02}) {
124+
t.Fatalf("Bloombits mismatch")
125+
}
126+
if !exist && len(bits) > 0 {
127+
t.Fatalf("Bloombits should be removed")
128+
}
129+
}
130+
// Check the existence of written data.
131+
check(0, 0, params.MainnetGenesisHash, true)
132+
check(0, 0, params.RinkebyGenesisHash, true)
133+
134+
// Check the existence of deleted data.
135+
DeleteBloombits(db, 0, 0, 1)
136+
check(0, 0, params.MainnetGenesisHash, false)
137+
check(0, 0, params.RinkebyGenesisHash, false)
138+
check(0, 1, params.MainnetGenesisHash, true)
139+
check(0, 1, params.RinkebyGenesisHash, true)
140+
141+
// Check the existence of deleted data.
142+
DeleteBloombits(db, 0, 0, 2)
143+
check(0, 0, params.MainnetGenesisHash, false)
144+
check(0, 0, params.RinkebyGenesisHash, false)
145+
check(0, 1, params.MainnetGenesisHash, false)
146+
check(0, 1, params.RinkebyGenesisHash, false)
147+
148+
// Bit1 shouldn't be affect.
149+
check(1, 0, params.MainnetGenesisHash, true)
150+
check(1, 0, params.RinkebyGenesisHash, true)
151+
check(1, 1, params.MainnetGenesisHash, true)
152+
check(1, 1, params.RinkebyGenesisHash, true)
153+
}

core/rawdb/freezer.go

+4-4
Original file line numberDiff line numberDiff line change
@@ -287,12 +287,12 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) {
287287
backoff = true
288288
continue
289289

290-
case *number < params.ImmutabilityThreshold:
291-
log.Debug("Current full block not old enough", "number", *number, "hash", hash, "delay", params.ImmutabilityThreshold)
290+
case *number < params.FullImmutabilityThreshold:
291+
log.Debug("Current full block not old enough", "number", *number, "hash", hash, "delay", params.FullImmutabilityThreshold)
292292
backoff = true
293293
continue
294294

295-
case *number-params.ImmutabilityThreshold <= f.frozen:
295+
case *number-params.FullImmutabilityThreshold <= f.frozen:
296296
log.Debug("Ancient blocks frozen already", "number", *number, "hash", hash, "frozen", f.frozen)
297297
backoff = true
298298
continue
@@ -304,7 +304,7 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) {
304304
continue
305305
}
306306
// Seems we have data ready to be frozen, process in usable batches
307-
limit := *number - params.ImmutabilityThreshold
307+
limit := *number - params.FullImmutabilityThreshold
308308
if limit-f.frozen > freezerBatchLimit {
309309
limit = f.frozen + freezerBatchLimit
310310
}

0 commit comments

Comments
 (0)