@@ -118,31 +118,31 @@ func (p *triePrefetcher) report() {
118
118
fetcher .wait () // ensure the fetcher's idle before poking in its internals
119
119
120
120
if fetcher .root == p .root {
121
- p .accountLoadReadMeter .Mark (int64 (len (fetcher .seenRead )))
122
- p .accountLoadWriteMeter .Mark (int64 (len (fetcher .seenWrite )))
121
+ p .accountLoadReadMeter .Mark (int64 (len (fetcher .seenReadAddr )))
122
+ p .accountLoadWriteMeter .Mark (int64 (len (fetcher .seenWriteAddr )))
123
123
124
124
p .accountDupReadMeter .Mark (int64 (fetcher .dupsRead ))
125
125
p .accountDupWriteMeter .Mark (int64 (fetcher .dupsWrite ))
126
126
p .accountDupCrossMeter .Mark (int64 (fetcher .dupsCross ))
127
127
128
- for _ , key := range fetcher .used {
129
- delete (fetcher .seenRead , string ( key ) )
130
- delete (fetcher .seenWrite , string ( key ) )
128
+ for _ , key := range fetcher .usedAddr {
129
+ delete (fetcher .seenReadAddr , key )
130
+ delete (fetcher .seenWriteAddr , key )
131
131
}
132
- p .accountWasteMeter .Mark (int64 (len (fetcher .seenRead ) + len (fetcher .seenWrite )))
132
+ p .accountWasteMeter .Mark (int64 (len (fetcher .seenReadAddr ) + len (fetcher .seenWriteAddr )))
133
133
} else {
134
- p .storageLoadReadMeter .Mark (int64 (len (fetcher .seenRead )))
135
- p .storageLoadWriteMeter .Mark (int64 (len (fetcher .seenWrite )))
134
+ p .storageLoadReadMeter .Mark (int64 (len (fetcher .seenReadSlot )))
135
+ p .storageLoadWriteMeter .Mark (int64 (len (fetcher .seenWriteSlot )))
136
136
137
137
p .storageDupReadMeter .Mark (int64 (fetcher .dupsRead ))
138
138
p .storageDupWriteMeter .Mark (int64 (fetcher .dupsWrite ))
139
139
p .storageDupCrossMeter .Mark (int64 (fetcher .dupsCross ))
140
140
141
- for _ , key := range fetcher .used {
142
- delete (fetcher .seenRead , string ( key ) )
143
- delete (fetcher .seenWrite , string ( key ) )
141
+ for _ , key := range fetcher .usedSlot {
142
+ delete (fetcher .seenReadSlot , key )
143
+ delete (fetcher .seenWriteSlot , key )
144
144
}
145
- p .storageWasteMeter .Mark (int64 (len (fetcher .seenRead ) + len (fetcher .seenWrite )))
145
+ p .storageWasteMeter .Mark (int64 (len (fetcher .seenReadSlot ) + len (fetcher .seenWriteSlot )))
146
146
}
147
147
}
148
148
}
@@ -158,7 +158,7 @@ func (p *triePrefetcher) report() {
158
158
// upon the same contract, the parameters invoking this method may be
159
159
// repeated.
160
160
// 2. Finalize of the main account trie. This happens only once per block.
161
- func (p * triePrefetcher ) prefetch (owner common.Hash , root common.Hash , addr common.Address , keys [][] byte , read bool ) error {
161
+ func (p * triePrefetcher ) prefetch (owner common.Hash , root common.Hash , addr common.Address , addrs []common. Address , slots []common. Hash , read bool ) error {
162
162
// If the state item is only being read, but reads are disabled, return
163
163
if read && p .noreads {
164
164
return nil
@@ -175,7 +175,7 @@ func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, addr comm
175
175
fetcher = newSubfetcher (p .db , p .root , owner , root , addr )
176
176
p .fetchers [id ] = fetcher
177
177
}
178
- return fetcher .schedule (keys , read )
178
+ return fetcher .schedule (addrs , slots , read )
179
179
}
180
180
181
181
// trie returns the trie matching the root hash, blocking until the fetcher of
@@ -195,10 +195,12 @@ func (p *triePrefetcher) trie(owner common.Hash, root common.Hash) Trie {
195
195
196
196
// used marks a batch of state items used to allow creating statistics as to
197
197
// how useful or wasteful the fetcher is.
198
- func (p * triePrefetcher ) used (owner common.Hash , root common.Hash , used [][] byte ) {
198
+ func (p * triePrefetcher ) used (owner common.Hash , root common.Hash , usedAddr []common. Address , usedSlot []common. Hash ) {
199
199
if fetcher := p .fetchers [p .trieID (owner , root )]; fetcher != nil {
200
200
fetcher .wait () // ensure the fetcher's idle before poking in its internals
201
- fetcher .used = append (fetcher .used , used ... )
201
+
202
+ fetcher .usedAddr = append (fetcher .usedAddr , usedAddr ... )
203
+ fetcher .usedSlot = append (fetcher .usedSlot , usedSlot ... )
202
204
}
203
205
}
204
206
@@ -235,44 +237,50 @@ type subfetcher struct {
235
237
stop chan struct {} // Channel to interrupt processing
236
238
term chan struct {} // Channel to signal interruption
237
239
238
- seenRead map [string ]struct {} // Tracks the entries already loaded via read operations
239
- seenWrite map [string ]struct {} // Tracks the entries already loaded via write operations
240
+ seenReadAddr map [common.Address ]struct {} // Tracks the accounts already loaded via read operations
241
+ seenWriteAddr map [common.Address ]struct {} // Tracks the accounts already loaded via write operations
242
+ seenReadSlot map [common.Hash ]struct {} // Tracks the storage already loaded via read operations
243
+ seenWriteSlot map [common.Hash ]struct {} // Tracks the storage already loaded via write operations
240
244
241
245
dupsRead int // Number of duplicate preload tasks via reads only
242
246
dupsWrite int // Number of duplicate preload tasks via writes only
243
247
dupsCross int // Number of duplicate preload tasks via read-write-crosses
244
248
245
- used [][]byte // Tracks the entries used in the end
249
+ usedAddr []common.Address // Tracks the accounts used in the end
250
+ usedSlot []common.Hash // Tracks the storage used in the end
246
251
}
247
252
248
253
// subfetcherTask is a trie path to prefetch, tagged with whether it originates
249
254
// from a read or a write request.
250
255
type subfetcherTask struct {
251
256
read bool
252
- key []byte
257
+ addr * common.Address
258
+ slot * common.Hash
253
259
}
254
260
255
261
// newSubfetcher creates a goroutine to prefetch state items belonging to a
256
262
// particular root hash.
257
263
func newSubfetcher (db Database , state common.Hash , owner common.Hash , root common.Hash , addr common.Address ) * subfetcher {
258
264
sf := & subfetcher {
259
- db : db ,
260
- state : state ,
261
- owner : owner ,
262
- root : root ,
263
- addr : addr ,
264
- wake : make (chan struct {}, 1 ),
265
- stop : make (chan struct {}),
266
- term : make (chan struct {}),
267
- seenRead : make (map [string ]struct {}),
268
- seenWrite : make (map [string ]struct {}),
265
+ db : db ,
266
+ state : state ,
267
+ owner : owner ,
268
+ root : root ,
269
+ addr : addr ,
270
+ wake : make (chan struct {}, 1 ),
271
+ stop : make (chan struct {}),
272
+ term : make (chan struct {}),
273
+ seenReadAddr : make (map [common.Address ]struct {}),
274
+ seenWriteAddr : make (map [common.Address ]struct {}),
275
+ seenReadSlot : make (map [common.Hash ]struct {}),
276
+ seenWriteSlot : make (map [common.Hash ]struct {}),
269
277
}
270
278
go sf .loop ()
271
279
return sf
272
280
}
273
281
274
282
// schedule adds a batch of trie keys to the queue to prefetch.
275
- func (sf * subfetcher ) schedule (keys [][] byte , read bool ) error {
283
+ func (sf * subfetcher ) schedule (addrs []common. Address , slots []common. Hash , read bool ) error {
276
284
// Ensure the subfetcher is still alive
277
285
select {
278
286
case <- sf .term :
@@ -281,8 +289,11 @@ func (sf *subfetcher) schedule(keys [][]byte, read bool) error {
281
289
}
282
290
// Append the tasks to the current queue
283
291
sf .lock .Lock ()
284
- for _ , key := range keys {
285
- sf .tasks = append (sf .tasks , & subfetcherTask {read : read , key : key })
292
+ for _ , addr := range addrs {
293
+ sf .tasks = append (sf .tasks , & subfetcherTask {read : read , addr : & addr })
294
+ }
295
+ for _ , slot := range slots {
296
+ sf .tasks = append (sf .tasks , & subfetcherTask {read : read , slot : & slot })
286
297
}
287
298
sf .lock .Unlock ()
288
299
@@ -378,35 +389,66 @@ func (sf *subfetcher) loop() {
378
389
sf .lock .Unlock ()
379
390
380
391
for _ , task := range tasks {
381
- key := string (task .key )
382
- if task .read {
383
- if _ , ok := sf .seenRead [key ]; ok {
384
- sf .dupsRead ++
385
- continue
386
- }
387
- if _ , ok := sf .seenWrite [key ]; ok {
388
- sf .dupsCross ++
389
- continue
392
+ if task .addr != nil {
393
+ key := * task .addr
394
+ if task .read {
395
+ if _ , ok := sf .seenReadAddr [key ]; ok {
396
+ sf .dupsRead ++
397
+ continue
398
+ }
399
+ if _ , ok := sf .seenWriteAddr [key ]; ok {
400
+ sf .dupsCross ++
401
+ continue
402
+ }
403
+ } else {
404
+ if _ , ok := sf .seenReadAddr [key ]; ok {
405
+ sf .dupsCross ++
406
+ continue
407
+ }
408
+ if _ , ok := sf .seenWriteAddr [key ]; ok {
409
+ sf .dupsWrite ++
410
+ continue
411
+ }
390
412
}
391
413
} else {
392
- if _ , ok := sf .seenRead [key ]; ok {
393
- sf .dupsCross ++
394
- continue
395
- }
396
- if _ , ok := sf .seenWrite [key ]; ok {
397
- sf .dupsWrite ++
398
- continue
414
+ key := * task .slot
415
+ if task .read {
416
+ if _ , ok := sf .seenReadSlot [key ]; ok {
417
+ sf .dupsRead ++
418
+ continue
419
+ }
420
+ if _ , ok := sf .seenWriteSlot [key ]; ok {
421
+ sf .dupsCross ++
422
+ continue
423
+ }
424
+ } else {
425
+ if _ , ok := sf .seenReadSlot [key ]; ok {
426
+ sf .dupsCross ++
427
+ continue
428
+ }
429
+ if _ , ok := sf .seenWriteSlot [key ]; ok {
430
+ sf .dupsWrite ++
431
+ continue
432
+ }
399
433
}
400
434
}
401
- if len ( task .key ) == common . AddressLength {
402
- sf .trie .GetAccount (common . BytesToAddress ( task .key ) )
435
+ if task .addr != nil {
436
+ sf .trie .GetAccount (* task .addr )
403
437
} else {
404
- sf .trie .GetStorage (sf .addr , task .key )
438
+ sf .trie .GetStorage (sf .addr , ( * task .slot )[:] )
405
439
}
406
440
if task .read {
407
- sf .seenRead [key ] = struct {}{}
441
+ if task .addr != nil {
442
+ sf .seenReadAddr [* task .addr ] = struct {}{}
443
+ } else {
444
+ sf .seenReadSlot [* task .slot ] = struct {}{}
445
+ }
408
446
} else {
409
- sf .seenWrite [key ] = struct {}{}
447
+ if task .addr != nil {
448
+ sf .seenWriteAddr [* task .addr ] = struct {}{}
449
+ } else {
450
+ sf .seenWriteSlot [* task .slot ] = struct {}{}
451
+ }
410
452
}
411
453
}
412
454
0 commit comments