forked from ethereum/go-ethereum
-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy pathjournal.go
412 lines (388 loc) · 14.2 KB
/
journal.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package snapshot
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"time"
"github.com/VictoriaMetrics/fastcache"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
)
const journalVersion uint64 = 0
// journalGenerator is a disk layer entry containing the generator progress marker.
type journalGenerator struct {
Wiping bool // Whether the database was in progress of being wiped
Done bool // Whether the generator finished creating the snapshot
Marker []byte
Accounts uint64
Slots uint64
Storage uint64
}
// journalDestruct is an account deletion entry in a diffLayer's disk journal.
type journalDestruct struct {
Hash common.Hash
}
// journalAccount is an account entry in a diffLayer's disk journal.
type journalAccount struct {
Hash common.Hash
Blob []byte
}
// journalStorage is an account's storage map in a diffLayer's disk journal.
type journalStorage struct {
Hash common.Hash
Keys []common.Hash
Vals [][]byte
}
// loadAndParseLegacyJournal tries to parse the snapshot journal in legacy format.
func loadAndParseLegacyJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, journalGenerator, error) {
// Retrieve the journal, for legacy journal it must exist since even for
// 0 layer it stores whether we've already generated the snapshot or are
// in progress only.
journal := rawdb.ReadSnapshotJournal(db)
if len(journal) == 0 {
return nil, journalGenerator{}, errors.New("missing or corrupted snapshot journal")
}
r := rlp.NewStream(bytes.NewReader(journal), 0)
// Read the snapshot generation progress for the disk layer
var generator journalGenerator
if err := r.Decode(&generator); err != nil {
return nil, journalGenerator{}, fmt.Errorf("failed to load snapshot progress marker: %v", err)
}
// Load all the snapshot diffs from the journal
snapshot, err := loadDiffLayer(base, r)
if err != nil {
return nil, generator, err
}
return snapshot, generator, nil
}
// loadAndParseJournal tries to parse the snapshot journal in latest format.
func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, journalGenerator, error) {
// Retrieve the disk layer generator. It must exist, no matter the
// snapshot is fully generated or not. Otherwise the entire disk
// layer is invalid.
generatorBlob := rawdb.ReadSnapshotGenerator(db)
if len(generatorBlob) == 0 {
return nil, journalGenerator{}, errors.New("missing snapshot generator")
}
var generator journalGenerator
if err := rlp.DecodeBytes(generatorBlob, &generator); err != nil {
return nil, journalGenerator{}, fmt.Errorf("failed to decode snapshot generator: %v", err)
}
// Retrieve the diff layer journal. It's possible that the journal is
// not existent, e.g. the disk layer is generating while that the Geth
// crashes without persisting the diff journal.
// So if there is no journal, or the journal is not matched with disk
// layer, we just discard all diffs and try to recover them later.
journal := rawdb.ReadSnapshotJournal(db)
if len(journal) == 0 {
log.Debug("Loaded snapshot journal", "diskroot", base.root, "diff", "missing")
return base, generator, nil
}
r := rlp.NewStream(bytes.NewReader(journal), 0)
// Firstly, resolve the first element as the journal version
version, err := r.Uint()
if err != nil {
return nil, journalGenerator{}, err
}
if version != journalVersion {
return nil, journalGenerator{}, fmt.Errorf("journal version mismatch, want %d got %v", journalVersion, version)
}
// Secondly, resolve the disk layer root, ensure it's continuous
// with disk layer.
var root common.Hash
if err := r.Decode(&root); err != nil {
return nil, journalGenerator{}, errors.New("missing disk layer root")
}
// The diff journal is not matched with disk, discard them.
// It can happen that Geth crashes without persisting the latest
// diff journal.
if !bytes.Equal(root.Bytes(), base.root.Bytes()) {
log.Debug("Loaded snapshot journal", "diskroot", base.root, "diff", "unmatched")
return base, generator, nil
}
// Load all the snapshot diffs from the journal
snapshot, err := loadDiffLayer(base, r)
if err != nil {
return nil, journalGenerator{}, err
}
log.Debug("Loaded snapshot journal", "diskroot", base.root, "diffhead", snapshot.Root())
return snapshot, generator, nil
}
// loadSnapshot loads a pre-existing state snapshot backed by a key-value store.
func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, recovery bool) (snapshot, error) {
// Retrieve the block number and hash of the snapshot, failing if no snapshot
// is present in the database (or crashed mid-update).
baseRoot := rawdb.ReadSnapshotRoot(diskdb)
if baseRoot == (common.Hash{}) {
return nil, errors.New("missing or corrupted snapshot")
}
base := &diskLayer{
diskdb: diskdb,
triedb: triedb,
cache: fastcache.New(cache * 1024 * 1024),
root: baseRoot,
}
var legacy bool
snapshot, generator, err := loadAndParseJournal(diskdb, base)
if err != nil {
log.Debug("Failed to load new-format journal", "error", err)
snapshot, generator, err = loadAndParseLegacyJournal(diskdb, base)
legacy = true
}
if err != nil {
return nil, err
}
// Entire snapshot journal loaded, sanity check the head. If the loaded
// snapshot is not matched with current state root, print a warning log
// or discard the entire snapshot it's legacy snapshot.
//
// Possible scenario: Geth was crashed without persisting journal and then
// restart, the head is rewound to the point with available state(trie)
// which is below the snapshot. In this case the snapshot can be recovered
// by re-executing blocks but right now it's unavailable.
if head := snapshot.Root(); head != root {
// If it's legacy snapshot, or it's new-format snapshot but
// it's not in recovery mode, returns the error here for
// rebuilding the entire snapshot forcibly.
if legacy || !recovery {
return nil, fmt.Errorf("head doesn't match snapshot: have %#x, want %#x", head, root)
}
// It's in snapshot recovery, the assumption is held that
// the disk layer is always higher than chain head. It can
// be eventually recovered when the chain head beyonds the
// disk layer.
log.Warn("Snapshot is not continous with chain", "snaproot", head, "chainroot", root)
}
// Everything loaded correctly, resume any suspended operations
if !generator.Done {
// If the generator was still wiping, restart one from scratch (fine for
// now as it's rare and the wiper deletes the stuff it touches anyway, so
// restarting won't incur a lot of extra database hops.
var wiper chan struct{}
if generator.Wiping {
log.Info("Resuming previous snapshot wipe")
wiper = wipeSnapshot(diskdb, false)
}
// Whether or not wiping was in progress, load any generator progress too
base.genMarker = generator.Marker
if base.genMarker == nil {
base.genMarker = []byte{}
}
base.genPending = make(chan struct{})
base.genAbort = make(chan chan *generatorStats)
var origin uint64
if len(generator.Marker) >= 8 {
origin = binary.BigEndian.Uint64(generator.Marker)
}
go base.generate(&generatorStats{
wiping: wiper,
origin: origin,
start: time.Now(),
accounts: generator.Accounts,
slots: generator.Slots,
storage: common.StorageSize(generator.Storage),
})
}
return snapshot, nil
}
// loadDiffLayer reads the next sections of a snapshot journal, reconstructing a new
// diff and verifying that it can be linked to the requested parent.
func loadDiffLayer(parent snapshot, r *rlp.Stream) (snapshot, error) {
// Read the next diff journal entry
var root common.Hash
if err := r.Decode(&root); err != nil {
// The first read may fail with EOF, marking the end of the journal
if err == io.EOF {
return parent, nil
}
return nil, fmt.Errorf("load diff root: %v", err)
}
var destructs []journalDestruct
if err := r.Decode(&destructs); err != nil {
return nil, fmt.Errorf("load diff destructs: %v", err)
}
destructSet := make(map[common.Hash]struct{})
for _, entry := range destructs {
destructSet[entry.Hash] = struct{}{}
}
var accounts []journalAccount
if err := r.Decode(&accounts); err != nil {
return nil, fmt.Errorf("load diff accounts: %v", err)
}
accountData := make(map[common.Hash][]byte)
for _, entry := range accounts {
if len(entry.Blob) > 0 { // RLP loses nil-ness, but `[]byte{}` is not a valid item, so reinterpret that
accountData[entry.Hash] = entry.Blob
} else {
accountData[entry.Hash] = nil
}
}
var storage []journalStorage
if err := r.Decode(&storage); err != nil {
return nil, fmt.Errorf("load diff storage: %v", err)
}
storageData := make(map[common.Hash]map[common.Hash][]byte)
for _, entry := range storage {
slots := make(map[common.Hash][]byte)
for i, key := range entry.Keys {
if len(entry.Vals[i]) > 0 { // RLP loses nil-ness, but `[]byte{}` is not a valid item, so reinterpret that
slots[key] = entry.Vals[i]
} else {
slots[key] = nil
}
}
storageData[entry.Hash] = slots
}
return loadDiffLayer(newDiffLayer(parent, root, destructSet, accountData, storageData), r)
}
// Journal writes the persistent layer generator stats into a buffer to be stored
// in the database as the snapshot journal.
func (dl *diskLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
// If the snapshot is currently being generated, abort it
var stats *generatorStats
if dl.genAbort != nil {
abort := make(chan *generatorStats)
dl.genAbort <- abort
if stats = <-abort; stats != nil {
stats.Log("Journalling in-progress snapshot", dl.root, dl.genMarker)
}
}
// Ensure the layer didn't get stale
dl.lock.RLock()
defer dl.lock.RUnlock()
if dl.stale {
return common.Hash{}, ErrSnapshotStale
}
// Write out the generator marker. Note it's a standalone disk layer generator
// which is not mixed with journal. It's ok if the generator is persisted while
// journal is not.
entry := journalGenerator{
Done: dl.genMarker == nil,
Marker: dl.genMarker,
}
if stats != nil {
entry.Wiping = (stats.wiping != nil)
entry.Accounts = stats.accounts
entry.Slots = stats.slots
entry.Storage = uint64(stats.storage)
}
blob, err := rlp.EncodeToBytes(entry)
if err != nil {
return common.Hash{}, err
}
log.Debug("Journalled disk layer", "root", dl.root, "complete", dl.genMarker == nil)
rawdb.WriteSnapshotGenerator(dl.diskdb, blob)
return dl.root, nil
}
// Journal writes the memory layer contents into a buffer to be stored in the
// database as the snapshot journal.
func (dl *diffLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
// Journal the parent first
base, err := dl.parent.Journal(buffer)
if err != nil {
return common.Hash{}, err
}
// Ensure the layer didn't get stale
dl.lock.RLock()
defer dl.lock.RUnlock()
if dl.Stale() {
return common.Hash{}, ErrSnapshotStale
}
// Everything below was journalled, persist this layer too
if err := rlp.Encode(buffer, dl.root); err != nil {
return common.Hash{}, err
}
destructs := make([]journalDestruct, 0, len(dl.destructSet))
for hash := range dl.destructSet {
destructs = append(destructs, journalDestruct{Hash: hash})
}
if err := rlp.Encode(buffer, destructs); err != nil {
return common.Hash{}, err
}
accounts := make([]journalAccount, 0, len(dl.accountData))
for hash, blob := range dl.accountData {
accounts = append(accounts, journalAccount{Hash: hash, Blob: blob})
}
if err := rlp.Encode(buffer, accounts); err != nil {
return common.Hash{}, err
}
storage := make([]journalStorage, 0, len(dl.storageData))
for hash, slots := range dl.storageData {
keys := make([]common.Hash, 0, len(slots))
vals := make([][]byte, 0, len(slots))
for key, val := range slots {
keys = append(keys, key)
vals = append(vals, val)
}
storage = append(storage, journalStorage{Hash: hash, Keys: keys, Vals: vals})
}
if err := rlp.Encode(buffer, storage); err != nil {
return common.Hash{}, err
}
log.Debug("Journalled diff layer", "root", dl.root, "parent", dl.parent.Root())
return base, nil
}
// LegacyJournal writes the persistent layer generator stats into a buffer
// to be stored in the database as the snapshot journal.
//
// Note it's the legacy version which is only used in testing right now.
func (dl *diskLayer) LegacyJournal(buffer *bytes.Buffer) (common.Hash, error) {
// If the snapshot is currently being generated, abort it
var stats *generatorStats
if dl.genAbort != nil {
abort := make(chan *generatorStats)
dl.genAbort <- abort
if stats = <-abort; stats != nil {
stats.Log("Journalling in-progress snapshot", dl.root, dl.genMarker)
}
}
// Ensure the layer didn't get stale
dl.lock.RLock()
defer dl.lock.RUnlock()
if dl.stale {
return common.Hash{}, ErrSnapshotStale
}
// Write out the generator marker
entry := journalGenerator{
Done: dl.genMarker == nil,
Marker: dl.genMarker,
}
if stats != nil {
entry.Wiping = (stats.wiping != nil)
entry.Accounts = stats.accounts
entry.Slots = stats.slots
entry.Storage = uint64(stats.storage)
}
if err := rlp.Encode(buffer, entry); err != nil {
return common.Hash{}, err
}
return dl.root, nil
}
// Journal writes the memory layer contents into a buffer to be stored in the
// database as the snapshot journal.
//
// Note it's the legacy version which is only used in testing right now.
func (dl *diffLayer) LegacyJournal(buffer *bytes.Buffer) (common.Hash, error) {
return dl.Journal(buffer)
}