Skip to content

Commit

Permalink
blockchain/txpool: improve validation of Authorities(EIP-7702: SetCod…
Browse files Browse the repository at this point in the history
  • Loading branch information
tnasu committed Mar 5, 2025
1 parent f991368 commit 543c083
Show file tree
Hide file tree
Showing 4 changed files with 456 additions and 11 deletions.
6 changes: 6 additions & 0 deletions blockchain/tx_list.go
Original file line number Diff line number Diff line change
Expand Up @@ -305,6 +305,12 @@ func (l *txList) Overlaps(tx *types.Transaction) bool {
return l.txs.Get(tx.Nonce()) != nil
}

// Contains returns whether the list contains a transaction
// with the provided nonce.
func (l *txList) Contains(nonce uint64) bool {
return l.txs.Get(nonce) != nil
}

// Add tries to insert a new transaction into the list, returning whether the
// transaction was accepted, and if yes, any previous transaction it replaced.
//
Expand Down
163 changes: 152 additions & 11 deletions blockchain/tx_pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
"fmt"
"math"
"math/big"
"slices"
"sort"
"sync"
"time"
Expand Down Expand Up @@ -72,9 +73,24 @@ var (
evictionInterval = time.Minute // Time interval to check for evictable transactions
statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats

txPoolIsFullErr = fmt.Errorf("txpool is full")

errNotAllowedAnchoringTx = errors.New("locally anchoring chaindata tx is not allowed in this node")

// ErrTxPoolOverflow is returned if the transaction pool is full and can't accept
// another remote transaction.
ErrTxPoolOverflow = errors.New("txpool is full")

// ErrInflightTxLimitReached is returned when the maximum number of in-flight
// transactions is reached for specific accounts.
ErrInflightTxLimitReached = errors.New("in-flight transaction limit reached for delegated accounts")

// ErrAuthorityReserved is returned if a transaction has an authorization
// signed by an address which already has in-flight transactions known to the
// pool.
ErrAuthorityReserved = errors.New("authority already reserved")

// ErrFutureReplacePending is returned if a future transaction replaces a pending
// one. Future transactions should only be able to replace other future transactions.
ErrFutureReplacePending = errors.New("future transaction tries to replace pending")
)

var (
Expand Down Expand Up @@ -855,6 +871,16 @@ func (pool *TxPool) validateTx(tx *types.Transaction) error {
}
}

if tx.Type() == types.TxTypeEthereumSetCode {
if len(tx.AuthList()) == 0 {
return errors.New("set code tx must have at least one authorization tuple")
}
}

if err := pool.validateAuth(tx); err != nil {
return err
}

// "tx.Validate()" conducts additional validation for each new txType.
// Validate humanReadable address when this tx has "true" in the humanReadable field.
// Validate accountKey when the this create or update an account
Expand All @@ -867,6 +893,45 @@ func (pool *TxPool) validateTx(tx *types.Transaction) error {
return nil
}

// validateAuth verifies that the transaction complies with code authorization
// restrictions brought by SetCode transaction type.
func (pool *TxPool) validateAuth(tx *types.Transaction) error {
from, _ := types.Sender(pool.signer, tx) // validated

// Allow at most one in-flight tx for delegated accounts or those with a
// pending authorization.
if pool.currentState.GetCodeHash(from) != types.EmptyCodeHash || len(pool.all.auths[from]) != 0 {
var (
count int
exists bool
)
pending := pool.pending[from]
if pending != nil {
count += pending.Len()
exists = pending.Contains(tx.Nonce())
}
queue := pool.queue[from]
if queue != nil {
count += queue.Len()
exists = exists || queue.Contains(tx.Nonce())
}
// Replace the existing in-flight transaction for delegated accounts
// are still supported
if count >= 1 && !exists {
return ErrInflightTxLimitReached
}
}
// Authorities cannot conflict with any pending or queued transactions.
if auths := tx.SetCodeAuthorities(); len(auths) > 0 {
for _, auth := range auths {
if pool.pending[auth] != nil || pool.queue[auth] != nil {
return ErrAuthorityReserved
}
}
}
return nil
}

// getMaxTxFromQueueWhenNonceIsMissing finds and returns a trasaction with max nonce in queue when a given Tx has missing nonce.
// Otherwise it returns a given Tx itself.
func (pool *TxPool) getMaxTxFromQueueWhenNonceIsMissing(tx *types.Transaction, from *common.Address) *types.Transaction {
Expand Down Expand Up @@ -1269,7 +1334,7 @@ func (pool *TxPool) checkAndAddTxs(txs []*types.Transaction, local bool) []error

if poolCapacity < numTxs {
for i := 0; i < numTxs-poolCapacity; i++ {
errs = append(errs, txPoolIsFullErr)
errs = append(errs, ErrTxPoolOverflow)
}
}

Expand Down Expand Up @@ -1772,16 +1837,19 @@ func (as *accountSet) add(addr common.Address) {
// peeking into the pool in TxPool.Get without having to acquire the widely scoped
// TxPool.mu mutex.
type txLookup struct {
all map[common.Hash]*types.Transaction
slots int
lock sync.RWMutex
txs map[common.Hash]*types.Transaction

auths map[common.Address][]common.Hash // All accounts with a pooled authorization
}

// newTxLookup returns a new txLookup structure.
func newTxLookup() *txLookup {
slotsGauge.Update(int64(0))
return &txLookup{
all: make(map[common.Hash]*types.Transaction),
txs: make(map[common.Hash]*types.Transaction),
auths: make(map[common.Address][]common.Hash),
}
}

Expand All @@ -1798,7 +1866,7 @@ func (t *txLookup) Range(f func(hash common.Hash, tx *types.Transaction) bool) {
t.lock.RLock()
defer t.lock.RUnlock()

for key, value := range t.all {
for key, value := range t.txs {
if !f(key, value) {
break
}
Expand All @@ -1810,15 +1878,15 @@ func (t *txLookup) Get(hash common.Hash) *types.Transaction {
t.lock.RLock()
defer t.lock.RUnlock()

return t.all[hash]
return t.txs[hash]
}

// Count returns the current number of items in the lookup.
func (t *txLookup) Count() int {
t.lock.RLock()
defer t.lock.RUnlock()

return len(t.all)
return len(t.txs)
}

// Add adds a transaction to the lookup.
Expand All @@ -1829,21 +1897,94 @@ func (t *txLookup) Add(tx *types.Transaction) {
t.slots += numSlots(tx)
slotsGauge.Update(int64(t.slots))

t.all[tx.Hash()] = tx
t.txs[tx.Hash()] = tx
t.addAuthorities(tx)
}

// Remove removes a transaction from the lookup.
func (t *txLookup) Remove(hash common.Hash) {
t.lock.Lock()
defer t.lock.Unlock()

t.slots -= numSlots(t.all[hash])
tx, ok := t.txs[hash]
if !ok {
logger.Error("No transaction found to be deleted", "hash", hash)
return
}

t.removeAuthorities(tx)
t.slots -= numSlots(tx)
slotsGauge.Update(int64(t.slots))

delete(t.all, hash)
delete(t.txs, hash)
}

// addAuthorities tracks the supplied tx in relation to each authority it
// specifies.
func (t *txLookup) addAuthorities(tx *types.Transaction) {
for _, addr := range tx.SetCodeAuthorities() {
list, ok := t.auths[addr]
if !ok {
list = []common.Hash{}
}
if slices.Contains(list, tx.Hash()) {
// Don't add duplicates.
continue
}
list = append(list, tx.Hash())
t.auths[addr] = list
}
}

// removeAuthorities stops tracking the supplied tx in relation to its
// authorities.
func (t *txLookup) removeAuthorities(tx *types.Transaction) {
hash := tx.Hash()
for _, addr := range tx.SetCodeAuthorities() {
list := t.auths[addr]
// Remove tx from tracker.
if i := slices.Index(list, hash); i >= 0 {
list = append(list[:i], list[i+1:]...)
} else {
logger.Error("Authority with untracked tx", "addr", addr, "hash", hash)
}
if len(list) == 0 {
// If list is newly empty, delete it entirely.
delete(t.auths, addr)
continue
}
t.auths[addr] = list
}
}

// numSlots calculates the number of slots needed for a single transaction.
func numSlots(tx *types.Transaction) int {
return int((tx.Size() + txSlotSize - 1) / txSlotSize)
}

// Clear implements txpool.SubPool, removing all tracked txs from the pool
// and rotating the journal.
func (pool *TxPool) Clear() {
pool.mu.Lock()
defer pool.mu.Unlock()

// unreserve each tracked account. Ideally, we could just clear the
// reservation map in the parent txpool context. However, if we clear in
// parent context, to avoid exposing the subpool lock, we have to lock the
// reservations and then lock each subpool.
//
// This creates the potential for a deadlock situation:
//
// * TxPool.Clear locks the reservations
// * a new transaction is received which locks the subpool mutex
// * TxPool.Clear attempts to lock subpool mutex
//
// The transaction addition may attempt to reserve the sender addr which
// can't happen until Clear releases the reservation lock. Clear cannot
// acquire the subpool lock until the transaction addition is completed.
pool.all = newTxLookup()
pool.priced = newTxPricedList(pool.all)
pool.pending = make(map[common.Address]*txList)
pool.queue = make(map[common.Address]*txList)
pool.pendingNonce = make(map[common.Address]uint64)
}
Loading

0 comments on commit 543c083

Please sign in to comment.