diff --git a/Cargo.lock b/Cargo.lock index 3c6256d971bb1..40afbee64a743 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -81,9 +81,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a824f2aa7e75a0c98c5a504fceb80649e9c35265d44525b5f94de4771a395cd" +checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" dependencies = [ "getrandom 0.2.11", "once_cell", @@ -92,9 +92,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.7" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" +checksum = "42cd52102d3df161c77a887b608d7a4897d7cc112886a9537b738a887a03aaff" dependencies = [ "cfg-if", "getrandom 0.2.11", @@ -2290,6 +2290,7 @@ name = "aptos-indexer-grpc-server-framework" version = "1.0.0" dependencies = [ "anyhow", + "aptos-admin-service", "aptos-metrics-core", "aptos-runtimes", "async-trait", @@ -2798,6 +2799,7 @@ dependencies = [ "derivative", "move-binary-format", "move-core-types", + "move-vm-types", "proptest", "proptest-derive 0.4.0", "rayon", @@ -4173,6 +4175,8 @@ dependencies = [ "rand 0.7.3", "rayon", "regex", + "ring 0.16.20", + "rsa 0.9.6", "serde", "serde-big-array", "serde_bytes", @@ -4402,6 +4406,7 @@ dependencies = [ "either", "move-binary-format", "move-core-types", + "move-vm-types", "rand 0.7.3", "serde", "test-case", @@ -8547,7 +8552,7 @@ dependencies = [ "regex", "reqwest", "ring 0.16.20", - "rsa", + "rsa 0.6.1", "serde", "serde_json", "sha2 0.10.8", @@ -8597,7 +8602,7 @@ version = "0.17.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "114a100a9aa9f4c468a7b9e96626cdab267bb652660d8408e8f6d56d4c310edd" dependencies = [ - "ahash 0.8.7", + "ahash 0.8.8", "camino", "cargo_metadata 0.18.1", "cfg-if", @@ -8717,7 +8722,7 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "ahash 0.7.7", + "ahash 0.7.8", ] [[package]] @@ -8726,7 +8731,7 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.7", + "ahash 0.8.8", ] [[package]] @@ -8735,7 +8740,7 @@ version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ - "ahash 0.8.7", + "ahash 0.8.8", "allocator-api2", ] @@ -9348,7 +9353,7 @@ version = "0.11.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "321f0f839cd44a4686e9504b0a62b4d69a50b62072144c71c68f5873c167b8d9" dependencies = [ - "ahash 0.8.7", + "ahash 0.8.8", "clap 4.4.14", "crossbeam-channel", "crossbeam-utils", @@ -9392,7 +9397,7 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ab388864246d58a276e60e7569a833d9cc4cd75c66e5ca77c177dad38e59996" dependencies = [ - "ahash 0.7.7", + "ahash 0.7.8", "dashmap", "hashbrown 0.12.3", "once_cell", @@ -11398,6 +11403,7 @@ dependencies = [ "bcs 0.1.4", "claims", "derivative", + "itertools 0.10.5", "move-binary-format", "move-core-types", "once_cell", @@ -12457,6 +12463,17 @@ dependencies = [ "zeroize", ] +[[package]] +name = "pkcs1" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" +dependencies = [ + "der 0.7.8", + "pkcs8 0.10.2", + "spki 0.7.3", +] + [[package]] name = "pkcs8" version = "0.8.0" @@ -12989,7 +13006,7 @@ name = "processor" version = "1.0.0" source = "git+https://github.com/aptos-labs/aptos-indexer-processors.git?rev=9936ec73cef251fb01fd2c47412e064cad3975c2#9936ec73cef251fb01fd2c47412e064cad3975c2" dependencies = [ - "ahash 0.8.7", + "ahash 0.8.8", "anyhow", "aptos-moving-average 0.1.0 (git+https://github.com/aptos-labs/aptos-indexer-processors.git?rev=9936ec73cef251fb01fd2c47412e064cad3975c2)", "aptos-protos 1.1.2", @@ -13876,7 +13893,7 @@ dependencies = [ "num-integer", "num-iter", "num-traits", - "pkcs1", + "pkcs1 0.3.3", "pkcs8 0.8.0", "rand_core 0.6.4", "smallvec", @@ -13884,6 +13901,26 @@ dependencies = [ "zeroize", ] +[[package]] +name = "rsa" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d0e5124fcb30e76a7e79bfee683a2746db83784b86289f6251b54b7950a0dfc" +dependencies = [ + "const-oid 0.9.6", + "digest 0.10.7", + "num-bigint-dig", + "num-integer", + "num-traits", + "pkcs1 0.7.5", + "pkcs8 0.10.2", + "rand_core 0.6.4", + "signature 2.2.0", + "spki 0.7.3", + "subtle", + "zeroize", +] + [[package]] name = "rstack" version = "0.3.3" diff --git a/Cargo.toml b/Cargo.toml index 0b63823b02e9a..3874cd0802edf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -641,6 +641,7 @@ reqwest-retry = "0.2.1" ring = { version = "0.16.20", features = ["std"] } ripemd = "0.1.1" rocksdb = { version = "0.21.0", features = ["lz4"] } +rsa = { version = "0.9.6" } rstack-self = { version = "0.3.0", features = ["dw"], default_features = false } rstest = "0.15.0" rusty-fork = "0.3.0" diff --git a/api/doc/spec.json b/api/doc/spec.json index 01743f5643b0f..d3b99f3aabc8c 100644 --- a/api/doc/spec.json +++ b/api/doc/spec.json @@ -13433,7 +13433,7 @@ "ed25519": "#/components/schemas/PublicKey_string(HexEncodedBytes)", "secp256k1_ecdsa": "#/components/schemas/PublicKey_string(HexEncodedBytes)", "secp256r1_ecdsa": "#/components/schemas/PublicKey_string(HexEncodedBytes)", - "zk_id": "#/components/schemas/PublicKey_string(HexEncodedBytes)" + "oidb": "#/components/schemas/PublicKey_string(HexEncodedBytes)" } } }, @@ -13447,7 +13447,7 @@ "properties": { "type": { "type": "string", - "example": "zk_id" + "example": "oidb" } } }, @@ -13538,7 +13538,7 @@ "ed25519": "#/components/schemas/Signature_string(HexEncodedBytes)", "secp256k1_ecdsa": "#/components/schemas/Signature_string(HexEncodedBytes)", "web_authn": "#/components/schemas/Signature_string(HexEncodedBytes)", - "zk_id": "#/components/schemas/Signature_string(HexEncodedBytes)" + "oidb": "#/components/schemas/Signature_string(HexEncodedBytes)" } } }, @@ -13552,7 +13552,7 @@ "properties": { "type": { "type": "string", - "example": "zk_id" + "example": "oidb" } } }, diff --git a/api/doc/spec.yaml b/api/doc/spec.yaml index d24bc416dcde1..9d21ee68a6c7f 100644 --- a/api/doc/spec.yaml +++ b/api/doc/spec.yaml @@ -10115,7 +10115,7 @@ components: ed25519: '#/components/schemas/PublicKey_string(HexEncodedBytes)' secp256k1_ecdsa: '#/components/schemas/PublicKey_string(HexEncodedBytes)' secp256r1_ecdsa: '#/components/schemas/PublicKey_string(HexEncodedBytes)' - zk_id: '#/components/schemas/PublicKey_string(HexEncodedBytes)' + oidb: '#/components/schemas/PublicKey_string(HexEncodedBytes)' PublicKey_string(HexEncodedBytes): allOf: - type: object @@ -10124,7 +10124,7 @@ components: properties: type: type: string - example: zk_id + example: oidb - $ref: '#/components/schemas/HexEncodedBytes' RawTableItemRequest: type: object @@ -10181,7 +10181,7 @@ components: ed25519: '#/components/schemas/Signature_string(HexEncodedBytes)' secp256k1_ecdsa: '#/components/schemas/Signature_string(HexEncodedBytes)' web_authn: '#/components/schemas/Signature_string(HexEncodedBytes)' - zk_id: '#/components/schemas/Signature_string(HexEncodedBytes)' + oidb: '#/components/schemas/Signature_string(HexEncodedBytes)' Signature_string(HexEncodedBytes): allOf: - type: object @@ -10190,7 +10190,7 @@ components: properties: type: type: string - example: zk_id + example: oidb - $ref: '#/components/schemas/HexEncodedBytes' SingleKeySignature: type: object diff --git a/api/src/tests/accounts_test.rs b/api/src/tests/accounts_test.rs index ceb6ee8def93f..da00aeff218e7 100644 --- a/api/src/tests/accounts_test.rs +++ b/api/src/tests/accounts_test.rs @@ -237,8 +237,8 @@ async fn test_get_account_resources_with_pagination() { { println!("0x1::{}::{}", r.module, r.name); } - assert_eq!(resources.len(), 5); - assert_eq!(resources, all_resources[0..5].to_vec()); + assert_eq!(resources.len(), 4); + assert_eq!(resources, all_resources[0..4].to_vec()); // Make a request using the cursor. Assert the 5 results we get back are the next 5. let req = warp::test::request().method("GET").path(&format!( @@ -255,7 +255,7 @@ async fn test_get_account_resources_with_pagination() { let cursor_header = StateKeyWrapper::from_str(cursor_header.to_str().unwrap()).unwrap(); let resources: Vec = serde_json::from_slice(resp.body()).unwrap(); assert_eq!(resources.len(), 5); - assert_eq!(resources, all_resources[5..10].to_vec()); + assert_eq!(resources, all_resources[4..9].to_vec()); // Get the rest of the resources, assert there is no cursor now. let req = warp::test::request().method("GET").path(&format!( @@ -267,8 +267,8 @@ async fn test_get_account_resources_with_pagination() { assert_eq!(resp.status(), 200); assert!(!resp.headers().contains_key("X-Aptos-Cursor")); let resources: Vec = serde_json::from_slice(resp.body()).unwrap(); - assert_eq!(resources.len(), all_resources.len() - 10); - assert_eq!(resources, all_resources[10..].to_vec()); + assert_eq!(resources.len(), all_resources.len() - 9); + assert_eq!(resources, all_resources[9..].to_vec()); } // Same as the above test but for modules. diff --git a/api/types/src/transaction.rs b/api/types/src/transaction.rs index 7d058586f261c..5a548669f1692 100755 --- a/api/types/src/transaction.rs +++ b/api/types/src/transaction.rs @@ -20,6 +20,7 @@ use aptos_types::{ block_metadata::BlockMetadata, block_metadata_ext::BlockMetadataExt, contract_event::{ContractEvent, EventWithVersion}, + oidb, transaction::{ authenticator::{ AccountAuthenticator, AnyPublicKey, AnySignature, MultiKey, MultiKeyAuthenticator, @@ -28,7 +29,6 @@ use aptos_types::{ webauthn::{PartialAuthenticatorAssertionResponse, MAX_WEBAUTHN_SIGNATURE_BYTES}, Script, SignedTransaction, TransactionOutput, TransactionWithProof, }, - zkid, }; use once_cell::sync::Lazy; use poem_openapi::{Object, Union}; @@ -1198,24 +1198,24 @@ impl VerifyInput for WebAuthnSignature { } #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Object)] -pub struct ZkIdSignature { +pub struct OidbSignature { pub public_key: HexEncodedBytes, pub signature: HexEncodedBytes, } -impl VerifyInput for ZkIdSignature { +impl VerifyInput for OidbSignature { fn verify(&self) -> anyhow::Result<()> { let public_key_len = self.public_key.inner().len(); let signature_len = self.signature.inner().len(); - if public_key_len > zkid::ZkIdPublicKey::MAX_LEN { + if public_key_len > oidb::OidbPublicKey::MAX_LEN { bail!( - "zkID public key length is greater than the maximum number of {} bytes: found {} bytes", - zkid::ZkIdPublicKey::MAX_LEN, public_key_len + "OIDB public key length is greater than the maximum number of {} bytes: found {} bytes", + oidb::OidbPublicKey::MAX_LEN, public_key_len ) - } else if signature_len > zkid::ZkIdSignature::MAX_LEN { + } else if signature_len > oidb::OidbSignature::MAX_LEN { bail!( - "zkID signature length is greater than the maximum number of {} bytes: found {} bytes", - zkid::ZkIdSignature::MAX_LEN, signature_len + "OIDB signature length is greater than the maximum number of {} bytes: found {} bytes", + oidb::OidbSignature::MAX_LEN, signature_len ) } else { Ok(()) @@ -1230,7 +1230,7 @@ pub enum Signature { Ed25519(HexEncodedBytes), Secp256k1Ecdsa(HexEncodedBytes), WebAuthn(HexEncodedBytes), - ZkId(HexEncodedBytes), + Oidb(HexEncodedBytes), } impl TryFrom for AnySignature { @@ -1241,7 +1241,7 @@ impl TryFrom for AnySignature { Signature::Ed25519(s) => AnySignature::ed25519(s.inner().try_into()?), Signature::Secp256k1Ecdsa(s) => AnySignature::secp256k1_ecdsa(s.inner().try_into()?), Signature::WebAuthn(s) => AnySignature::webauthn(s.inner().try_into()?), - Signature::ZkId(s) => AnySignature::zkid(s.inner().try_into()?), + Signature::Oidb(s) => AnySignature::oidb(s.inner().try_into()?), }) } } @@ -1258,7 +1258,7 @@ impl From for Signature { AnySignature::WebAuthn { signature } => { Signature::WebAuthn(signature.to_bytes().to_vec().into()) }, - AnySignature::ZkId { signature } => Signature::ZkId(signature.to_bytes().into()), + AnySignature::OIDB { signature } => Signature::Oidb(signature.to_bytes().into()), } } } @@ -1270,7 +1270,7 @@ pub enum PublicKey { Ed25519(HexEncodedBytes), Secp256k1Ecdsa(HexEncodedBytes), Secp256r1Ecdsa(HexEncodedBytes), - ZkId(HexEncodedBytes), + Oidb(HexEncodedBytes), } impl TryFrom for AnyPublicKey { @@ -1281,7 +1281,7 @@ impl TryFrom for AnyPublicKey { PublicKey::Ed25519(p) => AnyPublicKey::ed25519(p.inner().try_into()?), PublicKey::Secp256k1Ecdsa(p) => AnyPublicKey::secp256k1_ecdsa(p.inner().try_into()?), PublicKey::Secp256r1Ecdsa(p) => AnyPublicKey::secp256r1_ecdsa(p.inner().try_into()?), - PublicKey::ZkId(p) => AnyPublicKey::zkid(p.inner().try_into()?), + PublicKey::Oidb(p) => AnyPublicKey::oidb(p.inner().try_into()?), }) } } @@ -1298,7 +1298,7 @@ impl From for PublicKey { AnyPublicKey::Secp256r1Ecdsa { public_key } => { PublicKey::Secp256r1Ecdsa(public_key.to_bytes().to_vec().into()) }, - AnyPublicKey::ZkId { public_key } => PublicKey::ZkId(public_key.to_bytes().into()), + AnyPublicKey::OIDB { public_key } => PublicKey::Oidb(public_key.to_bytes().into()), } } } @@ -1330,7 +1330,7 @@ impl VerifyInput for SingleKeySignature { signature: s.clone(), } .verify(), - (PublicKey::ZkId(p), Signature::ZkId(s)) => ZkIdSignature { + (PublicKey::Oidb(p), Signature::Oidb(s)) => OidbSignature { public_key: p.clone(), signature: s.clone(), } @@ -1374,12 +1374,12 @@ impl TryFrom for AccountAuthenticator { )?; AnyPublicKey::secp256r1_ecdsa(key) }, - PublicKey::ZkId(p) => { + PublicKey::Oidb(p) => { let key = p .inner() .try_into() - .context("Failed to parse given public_key bytes as ZkIdPublicKey")?; - AnyPublicKey::zkid(key) + .context("Failed to parse given public_key bytes as OidbPublicKey")?; + AnyPublicKey::oidb(key) }, }; @@ -1405,12 +1405,12 @@ impl TryFrom for AccountAuthenticator { .context( "Failed to parse given signature bytes as PartialAuthenticatorAssertionResponse")?; AnySignature::webauthn(signature) }, - Signature::ZkId(s) => { + Signature::Oidb(s) => { let signature = s .inner() .try_into() - .context("Failed to parse given signature bytes as ZkIdSignature")?; - AnySignature::zkid(signature) + .context("Failed to parse given signature bytes as OidbSignature")?; + AnySignature::oidb(signature) }, }; @@ -1475,12 +1475,12 @@ impl TryFrom for AccountAuthenticator { )?; AnyPublicKey::secp256r1_ecdsa(key) }, - PublicKey::ZkId(p) => { + PublicKey::Oidb(p) => { let key = p .inner() .try_into() - .context("Failed to parse given public_key bytes as ZkIdPublicKey")?; - AnyPublicKey::zkid(key) + .context("Failed to parse given public_key bytes as OidbPublicKey")?; + AnyPublicKey::oidb(key) }, }; public_keys.push(key); @@ -1508,12 +1508,12 @@ impl TryFrom for AccountAuthenticator { )?; AnySignature::webauthn(paar) }, - Signature::ZkId(s) => { + Signature::Oidb(s) => { let signature = s .inner() .try_into() - .context("Failed to parse given signature as ZkIdSignature")?; - AnySignature::zkid(signature) + .context("Failed to parse given signature as OidbSignature")?; + AnySignature::oidb(signature) }, }; signatures.push((indexed_signature.index, signature)); diff --git a/aptos-move/aptos-aggregator/src/delayed_change.rs b/aptos-move/aptos-aggregator/src/delayed_change.rs index df1fef613eec4..4535d891bd644 100644 --- a/aptos-move/aptos-aggregator/src/delayed_change.rs +++ b/aptos-move/aptos-aggregator/src/delayed_change.rs @@ -251,8 +251,9 @@ impl DelayedApplyEntry { #[cfg(test)] mod test { use super::*; - use crate::{bounded_math::SignedU128, types::DelayedFieldID}; + use crate::bounded_math::SignedU128; use claims::{assert_err, assert_ok}; + use move_vm_types::delayed_values::delayed_field_id::DelayedFieldID; use DelayedApplyChange::*; use DelayedChange::*; use DelayedFieldValue::*; diff --git a/aptos-move/aptos-aggregator/src/delayed_field_extension.rs b/aptos-move/aptos-aggregator/src/delayed_field_extension.rs index 6fd72449a9194..bd696d489fe17 100644 --- a/aptos-move/aptos-aggregator/src/delayed_field_extension.rs +++ b/aptos-move/aptos-aggregator/src/delayed_field_extension.rs @@ -7,8 +7,8 @@ use crate::{ delta_change_set::DeltaWithMax, resolver::DelayedFieldResolver, types::{ - code_invariant_error, expect_ok, DelayedFieldID, DelayedFieldValue, - DelayedFieldsSpeculativeError, PanicOr, ReadPosition, + code_invariant_error, expect_ok, DelayedFieldValue, DelayedFieldsSpeculativeError, PanicOr, + ReadPosition, }, }; use aptos_types::delayed_fields::{ @@ -16,6 +16,7 @@ use aptos_types::delayed_fields::{ SnapshotToStringFormula, }; use move_binary_format::errors::PartialVMResult; +use move_vm_types::delayed_values::delayed_field_id::DelayedFieldID; use std::collections::{btree_map::Entry, BTreeMap}; fn get_delayed_field_value_from_storage( diff --git a/aptos-move/aptos-aggregator/src/resolver.rs b/aptos-move/aptos-aggregator/src/resolver.rs index f91bc257c851e..5203e1ca30639 100644 --- a/aptos-move/aptos-aggregator/src/resolver.rs +++ b/aptos-move/aptos-aggregator/src/resolver.rs @@ -6,7 +6,7 @@ use crate::{ bounded_math::SignedU128, delta_change_set::{serialize, DeltaOp}, types::{ - code_invariant_error, DelayedFieldID, DelayedFieldValue, DelayedFieldsSpeculativeError, + code_invariant_error, DelayedFieldValue, DelayedFieldsSpeculativeError, DeltaApplicationFailureReason, PanicOr, }, }; @@ -21,6 +21,7 @@ use aptos_types::{ }; use move_binary_format::errors::{PartialVMError, PartialVMResult}; use move_core_types::{language_storage::StructTag, value::MoveTypeLayout, vm_status::StatusCode}; +use move_vm_types::delayed_values::delayed_field_id::DelayedFieldID; use std::{ collections::{BTreeMap, HashSet}, fmt::Debug, diff --git a/aptos-move/aptos-aggregator/src/tests/types.rs b/aptos-move/aptos-aggregator/src/tests/types.rs index 4b0a6d25b7b1a..2ff5950808d64 100644 --- a/aptos-move/aptos-aggregator/src/tests/types.rs +++ b/aptos-move/aptos-aggregator/src/tests/types.rs @@ -7,8 +7,7 @@ use crate::{ delta_change_set::serialize, resolver::{TAggregatorV1View, TDelayedFieldView}, types::{ - code_invariant_error, expect_ok, DelayedFieldID, DelayedFieldValue, - DelayedFieldsSpeculativeError, PanicOr, + code_invariant_error, expect_ok, DelayedFieldValue, DelayedFieldsSpeculativeError, PanicOr, }, }; use aptos_types::{ @@ -20,7 +19,7 @@ use aptos_types::{ }; use move_binary_format::errors::PartialVMResult; use move_core_types::{language_storage::StructTag, value::MoveTypeLayout}; -use move_vm_types::delayed_values::delayed_field_id::ExtractUniqueIndex; +use move_vm_types::delayed_values::delayed_field_id::{DelayedFieldID, ExtractUniqueIndex}; use std::{ cell::RefCell, collections::{BTreeMap, HashMap, HashSet}, diff --git a/aptos-move/aptos-aggregator/src/types.rs b/aptos-move/aptos-aggregator/src/types.rs index 8042070812b9c..a4530bb750cef 100644 --- a/aptos-move/aptos-aggregator/src/types.rs +++ b/aptos-move/aptos-aggregator/src/types.rs @@ -3,17 +3,15 @@ use crate::bounded_math::SignedU128; use aptos_logger::error; -// TODO[agg_v2](cleanup): After aggregators_v2 branch land, consolidate these, instead of using alias here -pub use aptos_types::delayed_fields::PanicError; +use aptos_types::delayed_fields::PanicError; use move_binary_format::errors::PartialVMError; use move_core_types::{ value::{IdentifierMappingKind, MoveTypeLayout}, vm_status::StatusCode, }; -pub use move_vm_types::delayed_values::delayed_field_id::DelayedFieldID; use move_vm_types::{ delayed_values::{ - delayed_field_id::TryFromMoveValue, + delayed_field_id::{DelayedFieldID, TryFromMoveValue}, derived_string_snapshot::{ bytes_and_width_to_derived_string_struct, derived_string_struct_to_bytes_and_length, is_derived_string_struct_layout, diff --git a/aptos-move/aptos-release-builder/data/release.yaml b/aptos-move/aptos-release-builder/data/release.yaml index 135cd52e7f94f..27f89e14552fe 100644 --- a/aptos-move/aptos-release-builder/data/release.yaml +++ b/aptos-move/aptos-release-builder/data/release.yaml @@ -151,16 +151,16 @@ proposals: - FeatureFlag: enabled: - jwk_consensus - - name: step_12_enable_zkid + - name: step_12_enable_oidb metadata: title: "AIP-61: OpenID blockchain (OIDB) accounts" - description: "Enable validation of zkID transactions, allowing users to transact using their Web2 accounts" + description: "Enable validation of OIDB transactions, allowing users to transact using their Web2 accounts" discussion_url: "https://github.com/aptos-foundation/AIPs/issues/297" execution_mode: MultiStep update_sequence: - FeatureFlag: enabled: - - zk_id_signature + - oidb_signature - name: step_13_start_watching_google_jwks metadata: title: "Start JWK consensus for Google" diff --git a/aptos-move/aptos-release-builder/src/components/feature_flags.rs b/aptos-move/aptos-release-builder/src/components/feature_flags.rs index f343e0ca92950..df07ea5bffb7e 100644 --- a/aptos-move/aptos-release-builder/src/components/feature_flags.rs +++ b/aptos-move/aptos-release-builder/src/components/feature_flags.rs @@ -96,13 +96,14 @@ pub enum FeatureFlag { Bn254Structures, WebAuthnSignature, ReconfigureWithDkg, - ZkIdSignature, - ZkIdZkLessSignature, + OidbSignature, + OidbZkLessSignature, RemoveDetailedError, JwkConsensus, ConcurrentFungibleAssets, RefundableBytes, ObjectCodeDeployment, + MaxObjectNestingCheck, } fn generate_features_blob(writer: &CodeWriter, data: &[u64]) { @@ -257,13 +258,14 @@ impl From for AptosFeatureFlag { FeatureFlag::Bn254Structures => AptosFeatureFlag::BN254_STRUCTURES, FeatureFlag::WebAuthnSignature => AptosFeatureFlag::WEBAUTHN_SIGNATURE, FeatureFlag::ReconfigureWithDkg => AptosFeatureFlag::RECONFIGURE_WITH_DKG, - FeatureFlag::ZkIdSignature => AptosFeatureFlag::ZK_ID_SIGNATURES, - FeatureFlag::ZkIdZkLessSignature => AptosFeatureFlag::ZK_ID_ZKLESS_SIGNATURE, + FeatureFlag::OidbSignature => AptosFeatureFlag::OIDB_SIGNATURE, + FeatureFlag::OidbZkLessSignature => AptosFeatureFlag::OIDB_ZKLESS_SIGNATURE, FeatureFlag::RemoveDetailedError => AptosFeatureFlag::REMOVE_DETAILED_ERROR_FROM_HASH, FeatureFlag::JwkConsensus => AptosFeatureFlag::JWK_CONSENSUS, FeatureFlag::ConcurrentFungibleAssets => AptosFeatureFlag::CONCURRENT_FUNGIBLE_ASSETS, FeatureFlag::RefundableBytes => AptosFeatureFlag::REFUNDABLE_BYTES, FeatureFlag::ObjectCodeDeployment => AptosFeatureFlag::OBJECT_CODE_DEPLOYMENT, + FeatureFlag::MaxObjectNestingCheck => AptosFeatureFlag::MAX_OBJECT_NESTING_CHECK, } } } @@ -341,13 +343,14 @@ impl From for FeatureFlag { AptosFeatureFlag::BN254_STRUCTURES => FeatureFlag::Bn254Structures, AptosFeatureFlag::WEBAUTHN_SIGNATURE => FeatureFlag::WebAuthnSignature, AptosFeatureFlag::RECONFIGURE_WITH_DKG => FeatureFlag::ReconfigureWithDkg, - AptosFeatureFlag::ZK_ID_SIGNATURES => FeatureFlag::ZkIdSignature, - AptosFeatureFlag::ZK_ID_ZKLESS_SIGNATURE => FeatureFlag::ZkIdZkLessSignature, + AptosFeatureFlag::OIDB_SIGNATURE => FeatureFlag::OidbSignature, + AptosFeatureFlag::OIDB_ZKLESS_SIGNATURE => FeatureFlag::OidbZkLessSignature, AptosFeatureFlag::REMOVE_DETAILED_ERROR_FROM_HASH => FeatureFlag::RemoveDetailedError, AptosFeatureFlag::JWK_CONSENSUS => FeatureFlag::JwkConsensus, AptosFeatureFlag::CONCURRENT_FUNGIBLE_ASSETS => FeatureFlag::ConcurrentFungibleAssets, AptosFeatureFlag::REFUNDABLE_BYTES => FeatureFlag::RefundableBytes, AptosFeatureFlag::OBJECT_CODE_DEPLOYMENT => FeatureFlag::ObjectCodeDeployment, + AptosFeatureFlag::MAX_OBJECT_NESTING_CHECK => FeatureFlag::MaxObjectNestingCheck, } } } diff --git a/aptos-move/aptos-vm-types/Cargo.toml b/aptos-move/aptos-vm-types/Cargo.toml index ab17a99fafbc3..72f92e55e866b 100644 --- a/aptos-move/aptos-vm-types/Cargo.toml +++ b/aptos-move/aptos-vm-types/Cargo.toml @@ -24,6 +24,7 @@ claims = { workspace = true } either = { workspace = true } move-binary-format = { workspace = true } move-core-types = { workspace = true } +move-vm-types = { workspace = true } rand = { workspace = true } serde = { workspace = true } diff --git a/aptos-move/aptos-vm-types/src/change_set.rs b/aptos-move/aptos-vm-types/src/change_set.rs index f7eb506a66c30..5f79f116ef614 100644 --- a/aptos-move/aptos-vm-types/src/change_set.rs +++ b/aptos-move/aptos-vm-types/src/change_set.rs @@ -13,7 +13,7 @@ use aptos_aggregator::{ delayed_change::DelayedChange, delta_change_set::{serialize, DeltaOp}, resolver::AggregatorV1Resolver, - types::{code_invariant_error, DelayedFieldID}, + types::code_invariant_error, }; use aptos_types::{ contract_event::ContractEvent, @@ -34,6 +34,7 @@ use move_core_types::{ value::MoveTypeLayout, vm_status::StatusCode, }; +use move_vm_types::delayed_values::delayed_field_id::DelayedFieldID; use rand::Rng; use std::{ collections::{ diff --git a/aptos-move/aptos-vm-types/src/resolver.rs b/aptos-move/aptos-vm-types/src/resolver.rs index 25c072a62d848..8f2fb475bb5ec 100644 --- a/aptos-move/aptos-vm-types/src/resolver.rs +++ b/aptos-move/aptos-vm-types/src/resolver.rs @@ -1,10 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use aptos_aggregator::{ - resolver::{TAggregatorV1View, TDelayedFieldView}, - types::DelayedFieldID, -}; +use aptos_aggregator::resolver::{TAggregatorV1View, TDelayedFieldView}; use aptos_types::{ serde_helper::bcs_utils::size_u32_as_uleb128, state_store::{ @@ -19,6 +16,7 @@ use aptos_types::{ use bytes::Bytes; use move_binary_format::errors::{PartialVMError, PartialVMResult}; use move_core_types::{language_storage::StructTag, value::MoveTypeLayout, vm_status::StatusCode}; +use move_vm_types::delayed_values::delayed_field_id::DelayedFieldID; use std::collections::{BTreeMap, HashMap}; /// Allows to query resources from the state. diff --git a/aptos-move/aptos-vm-types/src/tests/test_change_set.rs b/aptos-move/aptos-vm-types/src/tests/test_change_set.rs index 78dd909d718fc..35289fc0ba9b4 100644 --- a/aptos-move/aptos-vm-types/src/tests/test_change_set.rs +++ b/aptos-move/aptos-vm-types/src/tests/test_change_set.rs @@ -15,7 +15,6 @@ use aptos_aggregator::{ bounded_math::SignedU128, delayed_change::{DelayedApplyChange, DelayedChange}, delta_change_set::DeltaWithMax, - types::DelayedFieldID, }; use aptos_types::{ access_path::AccessPath, @@ -34,6 +33,7 @@ use move_core_types::{ value::MoveTypeLayout, vm_status::StatusCode, }; +use move_vm_types::delayed_values::delayed_field_id::DelayedFieldID; use std::{collections::BTreeMap, sync::Arc}; /// Testcases: diff --git a/aptos-move/aptos-vm-types/src/tests/utils.rs b/aptos-move/aptos-vm-types/src/tests/utils.rs index 0d5eee8ca441d..b69590b271a3b 100644 --- a/aptos-move/aptos-vm-types/src/tests/utils.rs +++ b/aptos-move/aptos-vm-types/src/tests/utils.rs @@ -10,7 +10,6 @@ use crate::{ use aptos_aggregator::{ delayed_change::DelayedChange, delta_change_set::{delta_add, DeltaOp}, - types::DelayedFieldID, }; use aptos_types::{ account_address::AccountAddress, @@ -26,6 +25,7 @@ use move_core_types::{ language_storage::{StructTag, TypeTag}, value::MoveTypeLayout, }; +use move_vm_types::delayed_values::delayed_field_id::DelayedFieldID; use std::{collections::BTreeMap, sync::Arc}; pub(crate) struct MockChangeSetChecker; diff --git a/aptos-move/aptos-vm/src/aptos_vm.rs b/aptos-move/aptos-vm/src/aptos_vm.rs index 9630aa691aed0..bd5f2cd468a49 100644 --- a/aptos-move/aptos-vm/src/aptos_vm.rs +++ b/aptos-move/aptos-vm/src/aptos_vm.rs @@ -9,15 +9,16 @@ use crate::{ errors::{discarded_output, expect_only_successful_execution}, gas::{check_gas, get_gas_parameters}, move_vm_ext::{ - get_max_binary_format_version, AptosMoveResolver, MoveVmExt, RespawnedSession, SessionExt, - SessionId, + get_max_binary_format_version, get_max_identifier_size, AptosMoveResolver, MoveVmExt, + RespawnedSession, SessionExt, SessionId, }, + oidb_validation, sharded_block_executor::{executor_client::ExecutorClient, ShardedBlockExecutor}, system_module_names::*, transaction_metadata::TransactionMetadata, - transaction_validation, verifier, zkid_validation, VMExecutor, VMValidator, + transaction_validation, verifier, VMExecutor, VMValidator, }; -use anyhow::{anyhow, Result}; +use anyhow::anyhow; use aptos_block_executor::txn_commit_hook::NoOpTransactionCommitHook; use aptos_crypto::HashValue; use aptos_framework::{ @@ -49,16 +50,15 @@ use aptos_types::{ TimedFeatureOverride, TimedFeatures, TimedFeaturesBuilder, }, randomness::Randomness, - state_store::StateView, + state_store::{StateView, TStateView}, transaction::{ authenticator::AnySignature, signature_verified_transaction::SignatureVerifiedTransaction, BlockOutput, EntryFunction, ExecutionError, ExecutionStatus, ModuleBundle, Multisig, - MultisigTransactionPayload, SignatureCheckedTransaction, SignedTransaction, Transaction, - TransactionOutput, TransactionPayload, TransactionStatus, VMValidatorResult, + MultisigTransactionPayload, Script, SignatureCheckedTransaction, SignedTransaction, + Transaction, TransactionOutput, TransactionPayload, TransactionStatus, VMValidatorResult, ViewFunctionOutput, WriteSetPayload, }, vm_status::{AbortLocation, StatusCode, VMStatus}, - zkid::ZkpOrOpenIdSig, }; use aptos_utils::{aptos_try, return_on_failure}; use aptos_vm_logging::{log_schema::AdapterLogSchema, speculative_error, speculative_log}; @@ -76,7 +76,6 @@ use move_binary_format::{ compatibility::Compatibility, deserializer::DeserializerConfig, errors::{Location, PartialVMError, PartialVMResult, VMError, VMResult}, - file_format_common::{IDENTIFIER_SIZE_MAX, LEGACY_IDENTIFIER_SIZE_MAX}, CompiledModule, }; use move_core_types::{ @@ -171,7 +170,6 @@ pub struct AptosVM { gas_feature_version: u64, gas_params: Result, pub(crate) storage_gas_params: Result, - features: Features, timed_features: TimedFeatures, } @@ -217,7 +215,7 @@ impl AptosVM { misc_gas_params, gas_feature_version, chain_id.id(), - features.clone(), + features, timed_features.clone(), resolver, aggregator_v2_type_tagging, @@ -230,7 +228,6 @@ impl AptosVM { gas_feature_version, gas_params, storage_gas_params, - features, timed_features, } } @@ -243,6 +240,11 @@ impl AptosVM { self.move_vm.new_session(resolver, session_id) } + #[inline(always)] + fn features(&self) -> &Features { + self.move_vm.features() + } + /// Sets execution concurrency level when invoked the first time. pub fn set_concurrency_level_once(mut concurrency_level: usize) { concurrency_level = min(concurrency_level, num_cpus::get()); @@ -349,28 +351,6 @@ impl AptosVM { get_or_vm_startup_failure(&self.gas_params, &log_context) } - /// Generates a transaction output for a transaction that encountered errors during the - /// execution process. This is public for now only for tests. - pub fn failed_transaction_cleanup( - &self, - error_code: VMStatus, - gas_meter: &mut impl AptosGasMeter, - txn_data: &TransactionMetadata, - resolver: &impl AptosMoveResolver, - log_context: &AdapterLogSchema, - change_set_configs: &ChangeSetConfigs, - ) -> VMOutput { - self.failed_transaction_cleanup_and_keep_vm_status( - error_code, - gas_meter, - txn_data, - resolver, - log_context, - change_set_configs, - ) - .1 - } - pub fn as_move_resolver<'r, R: ExecutorView>( &self, executor_view: &'r R, @@ -378,7 +358,7 @@ impl AptosVM { StorageAdapter::new_with_config( executor_view, self.gas_feature_version, - &self.features, + self.features(), None, ) } @@ -390,7 +370,7 @@ impl AptosVM { StorageAdapter::new_with_config( executor_view, self.gas_feature_version, - &self.features, + self.features(), Some(executor_view), ) } @@ -400,12 +380,9 @@ impl AptosVM { gas_meter: &impl AptosGasMeter, storage_fee_refund: u64, ) -> FeeStatement { - let gas_used = txn_data - .max_gas_amount() - .checked_sub(gas_meter.balance()) - .expect("Balance should always be less than or equal to max gas amount"); + let gas_used = Self::gas_used(txn_data.max_gas_amount(), gas_meter); FeeStatement::new( - gas_used.into(), + gas_used, u64::from(gas_meter.execution_gas_used()), u64::from(gas_meter.io_gas_used()), u64::from(gas_meter.storage_fee_used()), @@ -413,9 +390,9 @@ impl AptosVM { ) } - fn failed_transaction_cleanup_and_keep_vm_status( + pub(crate) fn failed_transaction_cleanup( &self, - error_code: VMStatus, + error_vm_status: VMStatus, gas_meter: &mut impl AptosGasMeter, txn_data: &TransactionMetadata, resolver: &impl AptosMoveResolver, @@ -444,11 +421,13 @@ impl AptosVM { } } - match TransactionStatus::from_vm_status( - error_code.clone(), - self.features + let txn_status = TransactionStatus::from_vm_status( + error_vm_status.clone(), + self.features() .is_enabled(FeatureFlag::CHARGE_INVARIANT_VIOLATION), - ) { + ); + + match txn_status { TransactionStatus::Keep(status) => { // The transaction should be kept. Run the appropriate post transaction workflows // including epilogue. This runs a new session that ignores any side effects that @@ -469,12 +448,12 @@ impl AptosVM { }, Err(err) => discarded_output(err.status_code()), }; - (error_code, txn_output) + (error_vm_status, txn_output) + }, + TransactionStatus::Discard(status_code) => { + let discarded_output = discarded_output(status_code); + (error_vm_status, discarded_output) }, - TransactionStatus::Discard(status_code) => ( - VMStatus::error(status_code, None), - discarded_output(status_code), - ), TransactionStatus::Retry => unreachable!(), } } @@ -512,7 +491,7 @@ impl AptosVM { const ZERO_STORAGE_REFUND: u64 = 0; let is_account_init_for_sponsored_transaction = - is_account_init_for_sponsored_transaction(txn_data, &self.features, resolver)?; + is_account_init_for_sponsored_transaction(txn_data, self.features(), resolver)?; if is_account_init_for_sponsored_transaction { let mut session = self.new_session(resolver, SessionId::run_on_abort(txn_data)); @@ -542,7 +521,7 @@ impl AptosVM { { info!( *log_context, - "Failed during charge_change_set: {:?}. Most likely exceded gas limited.", err, + "Failed during charge_change_set: {:?}. Most likely exceeded gas limited.", err, ); }; @@ -590,7 +569,7 @@ impl AptosVM { session, gas_meter.balance(), fee_statement, - &self.features, + self.features(), txn_data, log_context, ) @@ -608,7 +587,7 @@ impl AptosVM { &mut session, gas_meter.balance(), fee_statement, - &self.features, + self.features(), txn_data, log_context, )?; @@ -652,7 +631,7 @@ impl AptosVM { session, gas_meter.balance(), fee_statement, - &self.features, + self.features(), txn_data, log_context, ) @@ -667,17 +646,44 @@ impl AptosVM { Ok((VMStatus::Executed, output)) } + fn validate_and_execute_script( + &self, + session: &mut SessionExt, + // Note: cannot use AptosGasMeter because it is not implemented for + // UnmeteredGasMeter. + gas_meter: &mut impl GasMeter, + senders: Vec, + script: &Script, + ) -> Result { + let loaded_func = session.load_script(script.code(), script.ty_args().to_vec())?; + // TODO(Gerardo): consolidate the extended validation to verifier. + verifier::event_validation::verify_no_event_emission_in_script( + script.code(), + &session.get_vm_config().deserializer_config, + )?; + + let args = verifier::transaction_arg_validation::validate_combine_signer_and_txn_args( + session, + senders, + convert_txn_args(script.args()), + &loaded_func, + self.features().is_enabled(FeatureFlag::STRUCT_CONSTRUCTORS), + )?; + + Ok(session.execute_script(script.code(), script.ty_args().to_vec(), args, gas_meter)?) + } + fn validate_and_execute_entry_function( &self, session: &mut SessionExt, gas_meter: &mut impl AptosGasMeter, senders: Vec, - script_fn: &EntryFunction, + entry_fn: &EntryFunction, ) -> Result { let is_friend_or_private = session.load_function_def_is_friend_or_private( - script_fn.module(), - script_fn.function(), - script_fn.ty_args(), + entry_fn.module(), + entry_fn.function(), + entry_fn.ty_args(), )?; if is_friend_or_private { let txn_context = session @@ -686,23 +692,19 @@ impl AptosVM { txn_context.set_is_friend_or_private_entry_func(); } - let function = session.load_function( - script_fn.module(), - script_fn.function(), - script_fn.ty_args(), - )?; - let struct_constructors = self.features.is_enabled(FeatureFlag::STRUCT_CONSTRUCTORS); + let function = + session.load_function(entry_fn.module(), entry_fn.function(), entry_fn.ty_args())?; let args = verifier::transaction_arg_validation::validate_combine_signer_and_txn_args( session, senders, - script_fn.args().to_vec(), + entry_fn.args().to_vec(), &function, - struct_constructors, + self.features().is_enabled(FeatureFlag::STRUCT_CONSTRUCTORS), )?; Ok(session.execute_entry_function( - script_fn.module(), - script_fn.function(), - script_fn.ty_args().to_vec(), + entry_fn.module(), + entry_fn.function(), + entry_fn.ty_args().to_vec(), args, gas_meter, )?) @@ -719,7 +721,7 @@ impl AptosVM { new_published_modules_loaded: &mut bool, change_set_configs: &ChangeSetConfigs, ) -> Result<(VMStatus, VMOutput), VMStatus> { - fail_point!("move_adapter::execute_script_or_entry_function", |_| { + fail_point!("aptos_vm::execute_script_or_entry_function", |_| { Err(VMStatus::Error { status_code: StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR, sub_status: Some(move_core_types::vm_status::sub_status::unknown_invariant_violation::EPARANOID_FAILURE), @@ -727,73 +729,48 @@ impl AptosVM { }) }); - // Run the execution logic - { - gas_meter.charge_intrinsic_gas_for_transaction(txn_data.transaction_size())?; - - match payload { - TransactionPayload::Script(script) => { - let loaded_func = - session.load_script(script.code(), script.ty_args().to_vec())?; - // Gerardo: consolidate the extended validation to verifier. - verifier::event_validation::verify_no_event_emission_in_script( - script.code(), - &session.get_vm_config().deserializer_config, - )?; + gas_meter.charge_intrinsic_gas_for_transaction(txn_data.transaction_size())?; - let args = - verifier::transaction_arg_validation::validate_combine_signer_and_txn_args( - &mut session, - txn_data.senders(), - convert_txn_args(script.args()), - &loaded_func, - self.features.is_enabled(FeatureFlag::STRUCT_CONSTRUCTORS), - )?; - session.execute_script( - script.code(), - script.ty_args().to_vec(), - args, - gas_meter, - )?; - }, - TransactionPayload::EntryFunction(script_fn) => { - self.validate_and_execute_entry_function( - &mut session, - gas_meter, - txn_data.senders(), - script_fn, - )?; - }, + match payload { + TransactionPayload::Script(script) => { + self.validate_and_execute_script( + &mut session, + gas_meter, + txn_data.senders(), + script, + )?; + }, + TransactionPayload::EntryFunction(entry_fn) => { + self.validate_and_execute_entry_function( + &mut session, + gas_meter, + txn_data.senders(), + entry_fn, + )?; + }, - // Not reachable as this function should only be invoked for entry or script - // transaction payload. - _ => { - return Err(VMStatus::error(StatusCode::UNREACHABLE, None)); - }, - }; + // Not reachable as this function should only be invoked for entry or script + // transaction payload. + _ => unreachable!("Only scripts or entry functions are executed"), + }; - self.resolve_pending_code_publish( - &mut session, - gas_meter, - new_published_modules_loaded, - )?; + self.resolve_pending_code_publish(&mut session, gas_meter, new_published_modules_loaded)?; - let respawned_session = self.charge_change_set_and_respawn_session( - session, - resolver, - gas_meter, - change_set_configs, - txn_data, - )?; + let respawned_session = self.charge_change_set_and_respawn_session( + session, + resolver, + gas_meter, + change_set_configs, + txn_data, + )?; - self.success_transaction_cleanup( - respawned_session, - gas_meter, - txn_data, - log_context, - change_set_configs, - ) - } + self.success_transaction_cleanup( + respawned_session, + gas_meter, + txn_data, + log_context, + change_set_configs, + ) } fn charge_change_set( @@ -813,7 +790,7 @@ impl AptosVM { txn_data.gas_unit_price, resolver.as_executor_view(), )?; - if !self.features.is_storage_deletion_refund_enabled() { + if !self.features().is_storage_deletion_refund_enabled() { storage_refund = 0.into(); } @@ -1165,15 +1142,8 @@ impl AptosVM { /// Deserialize a module bundle. fn deserialize_module_bundle(&self, modules: &ModuleBundle) -> VMResult> { - let max_version = get_max_binary_format_version(&self.features, None); - let max_identifier_size = if self - .features - .is_enabled(FeatureFlag::LIMIT_MAX_IDENTIFIER_LENGTH) - { - IDENTIFIER_SIZE_MAX - } else { - LEGACY_IDENTIFIER_SIZE_MAX - }; + let max_version = get_max_binary_format_version(self.features(), None); + let max_identifier_size = get_max_identifier_size(self.features()); let config = DeserializerConfig::new(max_version, max_identifier_size); let mut result = vec![]; for module_blob in modules.iter() { @@ -1234,7 +1204,7 @@ impl AptosVM { true, true, !self - .features + .features() .is_enabled(FeatureFlag::TREAT_FRIEND_AS_PRIVATE), ), )); @@ -1283,13 +1253,14 @@ impl AptosVM { } } } - aptos_framework::verify_module_metadata(m, &self.features, &self.timed_features) + aptos_framework::verify_module_metadata(m, self.features(), &self.timed_features) .map_err(|err| Self::metadata_validation_error(&err.to_string()))?; } verifier::resource_groups::validate_resource_groups( session, modules, - self.features.is_enabled(FeatureFlag::SAFER_RESOURCE_GROUPS), + self.features() + .is_enabled(FeatureFlag::SAFER_RESOURCE_GROUPS), )?; verifier::event_validation::validate_module_events(session, modules)?; @@ -1307,7 +1278,7 @@ impl AptosVM { .finish(Location::Undefined) } - fn make_standard_gas_meter( + pub(crate) fn make_standard_gas_meter( &self, balance: Gas, log_context: &AdapterLogSchema, @@ -1340,29 +1311,9 @@ impl AptosVM { )); } - // zkID feature gating - let authenticators = aptos_types::zkid::get_zkid_authenticators(transaction); - match &authenticators { - Ok(authenticators) => { - for (_, sig) in authenticators { - if !self.features.is_zkid_enabled() - && matches!(sig.sig, ZkpOrOpenIdSig::Groth16Zkp { .. }) - { - return Err(VMStatus::error(StatusCode::FEATURE_UNDER_GATING, None)); - } - if (!self.features.is_zkid_enabled() || !self.features.is_zkid_zkless_enabled()) - && matches!(sig.sig, ZkpOrOpenIdSig::OpenIdSig { .. }) - { - return Err(VMStatus::error(StatusCode::FEATURE_UNDER_GATING, None)); - } - } - }, - Err(_) => { - return Err(VMStatus::error(StatusCode::INVALID_SIGNATURE, None)); - }, - } - - zkid_validation::validate_zkid_authenticators(&authenticators.unwrap(), resolver)?; + let authenticators = aptos_types::oidb::get_oidb_authenticators(transaction) + .map_err(|_| VMStatus::error(StatusCode::INVALID_SIGNATURE, None))?; + oidb_validation::validate_oidb_authenticators(&authenticators, self.features(), resolver)?; // The prologue MUST be run AFTER any validation. Otherwise you may run prologue and hit // SEQUENCE_NUMBER_TOO_NEW if there is more than one transaction from the same sender and @@ -1397,24 +1348,14 @@ impl AptosVM { self.move_vm.mark_loader_cache_as_invalid(); }; - let txn_status = TransactionStatus::from_vm_status( - err.clone(), - self.features - .is_enabled(FeatureFlag::CHARGE_INVARIANT_VIOLATION), - ); - if txn_status.is_discarded() { - let discarded_output = discarded_output(err.status_code()); - (err, discarded_output) - } else { - self.failed_transaction_cleanup_and_keep_vm_status( - err, - gas_meter, - txn_data, - resolver, - log_context, - &storage_gas_params.change_set_configs, - ) - } + self.failed_transaction_cleanup( + err, + gas_meter, + txn_data, + resolver, + log_context, + &storage_gas_params.change_set_configs, + ) } fn execute_user_transaction_impl( @@ -1447,7 +1388,7 @@ impl AptosVM { } let is_account_init_for_sponsored_transaction = - match is_account_init_for_sponsored_transaction(&txn_data, &self.features, resolver) { + match is_account_init_for_sponsored_transaction(&txn_data, self.features(), resolver) { Ok(result) => result, Err(err) => { let vm_status = err.into_vm_status(); @@ -1589,7 +1530,6 @@ impl AptosVM { txn_sender: Option, session_id: SessionId, ) -> Result { - let mut gas_meter = UnmeteredGasMeter; let change_set_configs = ChangeSetConfigs::unlimited_at_gas_feature_version(self.gas_feature_version); @@ -1624,23 +1564,12 @@ impl AptosVM { Some(sender) => vec![sender, *execute_as], }; - let loaded_func = - tmp_session.load_script(script.code(), script.ty_args().to_vec())?; - let args = - verifier::transaction_arg_validation::validate_combine_signer_and_txn_args( - &mut tmp_session, - senders, - convert_txn_args(script.args()), - &loaded_func, - self.features.is_enabled(FeatureFlag::STRUCT_CONSTRUCTORS), - )?; - - return_on_failure!(tmp_session.execute_script( - script.code(), - script.ty_args().to_vec(), - args, - &mut gas_meter, - )); + self.validate_and_execute_script( + &mut tmp_session, + &mut UnmeteredGasMeter, + senders, + script, + )?; Ok(tmp_session.finish(&change_set_configs)?) }, } @@ -1730,7 +1659,7 @@ impl AptosVM { Ok((VMStatus::Executed, output)) } - pub(crate) fn process_block_prologue( + fn process_block_prologue( &self, resolver: &impl AptosMoveResolver, block_metadata: BlockMetadata, @@ -1772,7 +1701,7 @@ impl AptosVM { Ok((VMStatus::Executed, output)) } - pub(crate) fn process_block_prologue_ext( + fn process_block_prologue_ext( &self, resolver: &impl AptosMoveResolver, block_metadata_ext: BlockMetadataExt, @@ -1849,7 +1778,7 @@ impl AptosVM { } fn extract_module_metadata(&self, module: &ModuleId) -> Option> { - if self.features.is_enabled(FeatureFlag::VM_BINARY_FORMAT_V6) { + if self.features().is_enabled(FeatureFlag::VM_BINARY_FORMAT_V6) { aptos_framework::get_vm_metadata(&self.move_vm, module) } else { aptos_framework::get_vm_metadata_v0(&self.move_vm, module) @@ -1862,7 +1791,7 @@ impl AptosVM { func_name: Identifier, type_args: Vec, arguments: Vec>, - gas_budget: u64, + max_gas_amount: u64, ) -> ViewFunctionOutput { let resolver = state_view.as_move_resolver(); let vm = AptosVM::new( @@ -1870,13 +1799,13 @@ impl AptosVM { /*override_is_delayed_field_optimization_capable=*/ Some(false), ); let log_context = AdapterLogSchema::new(state_view.id(), 0); - let mut gas_meter = match Self::memory_tracked_gas_meter(&vm, &log_context, gas_budget) { + let mut gas_meter = match vm.make_standard_gas_meter(max_gas_amount.into(), &log_context) { Ok(gas_meter) => gas_meter, - Err(e) => return ViewFunctionOutput::new(Err(e), 0), + Err(e) => return ViewFunctionOutput::new(Err(anyhow::Error::msg(format!("{}", e))), 0), }; let mut session = vm.new_session(&resolver, SessionId::Void); - match Self::execute_view_function_in_vm( + let execution_result = Self::execute_view_function_in_vm( &mut session, &vm, module_id, @@ -1884,35 +1813,16 @@ impl AptosVM { type_args, arguments, &mut gas_meter, - ) { - Ok(result) => { - ViewFunctionOutput::new(Ok(result), Self::gas_used(gas_budget, &gas_meter)) - }, - Err(e) => ViewFunctionOutput::new(Err(e), Self::gas_used(gas_budget, &gas_meter)), + ); + let gas_used = Self::gas_used(max_gas_amount.into(), &gas_meter); + match execution_result { + Ok(result) => ViewFunctionOutput::new(Ok(result), gas_used), + Err(e) => ViewFunctionOutput::new(Err(e), gas_used), } } - fn memory_tracked_gas_meter( - vm: &AptosVM, - log_context: &AdapterLogSchema, - gas_budget: u64, - ) -> Result>> { - let gas_meter = MemoryTrackedGasMeter::new(StandardGasMeter::new(StandardGasAlgebra::new( - vm.gas_feature_version, - get_or_vm_startup_failure(&vm.gas_params, log_context)? - .vm - .clone(), - get_or_vm_startup_failure(&vm.storage_gas_params, log_context)?.clone(), - gas_budget, - ))); - Ok(gas_meter) - } - - fn gas_used( - gas_budget: u64, - gas_meter: &MemoryTrackedGasMeter>, - ) -> u64 { - GasQuantity::new(gas_budget) + fn gas_used(max_gas_amount: Gas, gas_meter: &impl AptosGasMeter) -> u64 { + max_gas_amount .checked_sub(gas_meter.balance()) .expect("Balance should always be less than or equal to max gas amount") .into() @@ -1925,8 +1835,8 @@ impl AptosVM { func_name: Identifier, type_args: Vec, arguments: Vec>, - gas_meter: &mut MemoryTrackedGasMeter>, - ) -> Result>> { + gas_meter: &mut impl AptosGasMeter, + ) -> anyhow::Result>> { let func_inst = session.load_function(&module_id, &func_name, &type_args)?; let metadata = vm.extract_module_metadata(&module_id); let arguments = verifier::view_function::validate_view_function( @@ -1935,7 +1845,7 @@ impl AptosVM { func_name.as_ident_str(), &func_inst, metadata.as_ref().map(Arc::as_ref), - vm.features.is_enabled(FeatureFlag::STRUCT_CONSTRUCTORS), + vm.features().is_enabled(FeatureFlag::STRUCT_CONSTRUCTORS), )?; Ok(session @@ -1966,7 +1876,7 @@ impl AptosVM { self.gas_feature_version, resolver, txn_data, - &self.features, + self.features(), log_context, )?; @@ -2015,13 +1925,13 @@ impl AptosVM { txn: &SignatureVerifiedTransaction, resolver: &impl AptosMoveResolver, log_context: &AdapterLogSchema, - ) -> Result<(VMStatus, VMOutput, Option), VMStatus> { + ) -> Result<(VMStatus, VMOutput), VMStatus> { assert!(!self.is_simulation, "VM has to be created for execution"); if let SignatureVerifiedTransaction::Invalid(_) = txn { let vm_status = VMStatus::error(StatusCode::INVALID_SIGNATURE, None); let discarded_output = discarded_output(vm_status.status_code()); - return Ok((vm_status, discarded_output, None)); + return Ok((vm_status, discarded_output)); } Ok(match txn.expect_valid() { @@ -2029,7 +1939,7 @@ impl AptosVM { fail_point!("aptos_vm::execution::block_metadata"); let (vm_status, output) = self.process_block_prologue(resolver, block_metadata.clone(), log_context)?; - (vm_status, output, Some("block_prologue".to_string())) + (vm_status, output) }, Transaction::BlockMetadataExt(block_metadata_ext) => { fail_point!("aptos_vm::execution::block_metadata_ext"); @@ -2038,7 +1948,7 @@ impl AptosVM { block_metadata_ext.clone(), log_context, )?; - (vm_status, output, Some("block_prologue_ext".to_string())) + (vm_status, output) }, Transaction::GenesisTransaction(write_set_payload) => { let (vm_status, output) = self.process_waypoint_change_set( @@ -2046,11 +1956,10 @@ impl AptosVM { write_set_payload.clone(), log_context, )?; - (vm_status, output, Some("waypoint_write_set".to_string())) + (vm_status, output) }, Transaction::UserTransaction(txn) => { fail_point!("aptos_vm::execution::user_transaction"); - let sender = txn.sender().to_hex(); let _timer = TXN_TOTAL_SECONDS.start_timer(); let (vm_status, output) = self.execute_user_transaction(resolver, txn, log_context); @@ -2121,17 +2030,17 @@ impl AptosVM { if let Some(label) = counter_label { USER_TRANSACTIONS_EXECUTED.with_label_values(&[label]).inc(); } - (vm_status, output, Some(sender)) + (vm_status, output) }, Transaction::StateCheckpoint(_) => { let status = TransactionStatus::Keep(ExecutionStatus::Success); let output = VMOutput::empty_with_status(status); - (VMStatus::Executed, output, Some("state_checkpoint".into())) + (VMStatus::Executed, output) }, Transaction::ValidatorTransaction(txn) => { let (vm_status, output) = self.process_validator_transaction(resolver, txn.clone(), log_context)?; - (vm_status, output, Some("validator_transaction".to_string())) + (vm_status, output) }, }) } @@ -2237,7 +2146,7 @@ impl VMValidator for AptosVM { let log_context = AdapterLogSchema::new(state_view.id(), 0); if !self - .features + .features() .is_enabled(FeatureFlag::SINGLE_SENDER_AUTHENTICATOR) { if let aptos_types::transaction::authenticator::TransactionAuthenticator::SingleSender{ .. } = transaction.authenticator_ref() { @@ -2245,7 +2154,7 @@ impl VMValidator for AptosVM { } } - if !self.features.is_enabled(FeatureFlag::WEBAUTHN_SIGNATURE) { + if !self.features().is_enabled(FeatureFlag::WEBAUTHN_SIGNATURE) { if let Ok(sk_authenticators) = transaction .authenticator_ref() .to_single_key_authenticators() @@ -2382,8 +2291,6 @@ fn vm_thread_safe() { fn assert_send() {} fn assert_sync() {} - use crate::AptosVM; - assert_send::(); assert_sync::(); assert_send::(); diff --git a/aptos-move/aptos-vm/src/block_executor/mod.rs b/aptos-move/aptos-vm/src/block_executor/mod.rs index 48ad0bafb22b8..76e303eeacdfd 100644 --- a/aptos-move/aptos-vm/src/block_executor/mod.rs +++ b/aptos-move/aptos-vm/src/block_executor/mod.rs @@ -10,7 +10,6 @@ use crate::{ }; use aptos_aggregator::{ delayed_change::DelayedChange, delta_change_set::DeltaOp, resolver::TAggregatorV1View, - types::DelayedFieldID, }; use aptos_block_executor::{ errors::BlockExecutionError, executor::BlockExecutor, @@ -38,6 +37,7 @@ use move_core_types::{ value::MoveTypeLayout, vm_status::{StatusCode, VMStatus}, }; +use move_vm_types::delayed_values::delayed_field_id::DelayedFieldID; use once_cell::sync::OnceCell; use rayon::ThreadPool; use std::{ diff --git a/aptos-move/aptos-vm/src/block_executor/vm_wrapper.rs b/aptos-move/aptos-vm/src/block_executor/vm_wrapper.rs index 10c0cb6197a53..3561599e0cd18 100644 --- a/aptos-move/aptos-vm/src/block_executor/vm_wrapper.rs +++ b/aptos-move/aptos-vm/src/block_executor/vm_wrapper.rs @@ -64,23 +64,12 @@ impl<'a, S: 'a + StateView + Sync> ExecutorTask for AptosExecutorTask<'a, S> { .vm .execute_single_transaction(txn, &resolver, &log_context) { - Ok((vm_status, vm_output, sender)) => { + Ok((vm_status, vm_output)) => { if vm_output.status().is_discarded() { - match sender { - Some(s) => speculative_trace!( - &log_context, - format!( - "Transaction discarded, sender: {}, error: {:?}", - s, vm_status - ), - ), - None => { - speculative_trace!( - &log_context, - format!("Transaction malformed, error: {:?}", vm_status), - ) - }, - }; + speculative_trace!( + &log_context, + format!("Transaction discarded, status: {:?}", vm_status), + ); } if vm_status.status_code() == StatusCode::SPECULATIVE_EXECUTION_ABORT_ERROR { ExecutionStatus::SpeculativeExecutionAbortError( diff --git a/aptos-move/aptos-vm/src/data_cache.rs b/aptos-move/aptos-vm/src/data_cache.rs index 24470633a44dd..4c316a650d4d2 100644 --- a/aptos-move/aptos-vm/src/data_cache.rs +++ b/aptos-move/aptos-vm/src/data_cache.rs @@ -13,7 +13,7 @@ use crate::{ use aptos_aggregator::{ bounded_math::SignedU128, resolver::{TAggregatorV1View, TDelayedFieldView}, - types::{DelayedFieldID, DelayedFieldValue, DelayedFieldsSpeculativeError, PanicOr}, + types::{DelayedFieldValue, DelayedFieldsSpeculativeError, PanicOr}, }; use aptos_table_natives::{TableHandle, TableResolver}; use aptos_types::{ @@ -43,6 +43,7 @@ use move_core_types::{ resolver::{resource_size, ModuleResolver, ResourceResolver}, value::MoveTypeLayout, }; +use move_vm_types::delayed_values::delayed_field_id::DelayedFieldID; use std::{ cell::RefCell, collections::{BTreeMap, HashMap, HashSet}, diff --git a/aptos-move/aptos-vm/src/lib.rs b/aptos-move/aptos-vm/src/lib.rs index 34b11d889c2b5..12bef53e2bcd8 100644 --- a/aptos-move/aptos-vm/src/lib.rs +++ b/aptos-move/aptos-vm/src/lib.rs @@ -111,6 +111,7 @@ mod errors; mod gas; pub mod move_vm_ext; pub mod natives; +mod oidb_validation; pub mod sharded_block_executor; pub mod system_module_names; pub mod testing; @@ -118,7 +119,6 @@ pub mod transaction_metadata; mod transaction_validation; pub mod validator_txns; pub mod verifier; -mod zkid_validation; pub use crate::aptos_vm::{AptosSimulationVM, AptosVM}; use crate::sharded_block_executor::{executor_client::ExecutorClient, ShardedBlockExecutor}; diff --git a/aptos-move/aptos-vm/src/move_vm_ext/respawned_session.rs b/aptos-move/aptos-vm/src/move_vm_ext/respawned_session.rs index 940ad9a0f717d..c742b86b18de8 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/respawned_session.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/respawned_session.rs @@ -12,8 +12,7 @@ use aptos_aggregator::{ delta_change_set::DeltaWithMax, resolver::{TAggregatorV1View, TDelayedFieldView}, types::{ - code_invariant_error, expect_ok, DelayedFieldID, DelayedFieldValue, - DelayedFieldsSpeculativeError, PanicOr, + code_invariant_error, expect_ok, DelayedFieldValue, DelayedFieldsSpeculativeError, PanicOr, }, }; use aptos_gas_algebra::Fee; @@ -44,6 +43,7 @@ use move_core_types::{ value::MoveTypeLayout, vm_status::{err_msg, StatusCode, VMStatus}, }; +use move_vm_types::delayed_values::delayed_field_id::DelayedFieldID; use std::{ collections::{BTreeMap, HashMap, HashSet}, sync::Arc, diff --git a/aptos-move/aptos-vm/src/move_vm_ext/session.rs b/aptos-move/aptos-vm/src/move_vm_ext/session.rs index 6ce6e0911cb1c..f98be6aabcfe2 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/session.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/session.rs @@ -16,7 +16,7 @@ use aptos_framework::natives::{ use aptos_table_natives::{NativeTableContext, TableChangeSet}; use aptos_types::{ access_path::AccessPath, block_metadata::BlockMetadata, block_metadata_ext::BlockMetadataExt, - contract_event::ContractEvent, on_chain_config::Features, state_store::state_key::StateKey, + contract_event::ContractEvent, state_store::state_key::StateKey, validator_txn::ValidatorTransaction, }; use aptos_vm_types::{change_set::VMChangeSet, storage::change_set_configs::ChangeSetConfigs}; @@ -151,24 +151,54 @@ impl SessionId { pub fn as_uuid(&self) -> HashValue { self.hash() } + + pub(crate) fn into_script_hash(self) -> Vec { + match self { + Self::Txn { + sender: _, + sequence_number: _, + script_hash, + } + | Self::Prologue { + sender: _, + sequence_number: _, + script_hash, + } + | Self::Epilogue { + sender: _, + sequence_number: _, + script_hash, + } + | Self::RunOnAbort { + sender: _, + sequence_number: _, + script_hash, + } + | Self::ValidatorTxn { script_hash } => script_hash, + Self::BlockMeta { id: _ } + | Self::Genesis { id: _ } + | Self::Void + | Self::BlockMetaExt { id: _ } => vec![], + } + } } pub struct SessionExt<'r, 'l> { inner: Session<'r, 'l>, remote: &'r dyn AptosMoveResolver, - features: Arc, + is_storage_slot_metadata_enabled: bool, } impl<'r, 'l> SessionExt<'r, 'l> { pub fn new( inner: Session<'r, 'l>, remote: &'r dyn AptosMoveResolver, - features: Arc, + is_storage_slot_metadata_enabled: bool, ) -> Self { Self { inner, remote, - features, + is_storage_slot_metadata_enabled, } } @@ -219,10 +249,7 @@ impl<'r, 'l> SessionExt<'r, 'l> { let event_context: NativeEventContext = extensions.remove(); let events = event_context.into_events(); - let woc = WriteOpConverter::new( - self.remote, - self.features.is_storage_slot_metadata_enabled(), - ); + let woc = WriteOpConverter::new(self.remote, self.is_storage_slot_metadata_enabled); let change_set = Self::convert_change_set( &woc, diff --git a/aptos-move/aptos-vm/src/move_vm_ext/vm.rs b/aptos-move/aptos-vm/src/move_vm_ext/vm.rs index 5cbec35d4cb7d..ea40b90149eba 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/vm.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/vm.rs @@ -15,10 +15,7 @@ use aptos_gas_algebra::DynamicExpression; use aptos_gas_schedule::{MiscGasParameters, NativeGasParameters}; use aptos_native_interface::SafeNativeBuilder; use aptos_table_natives::NativeTableContext; -use aptos_types::{ - chain_id::ChainId, - on_chain_config::{FeatureFlag, Features, TimedFeatureFlag, TimedFeatures}, -}; +use aptos_types::on_chain_config::{FeatureFlag, Features, TimedFeatureFlag, TimedFeatures}; use move_binary_format::{ deserializer::DeserializerConfig, errors::VMResult, @@ -29,12 +26,12 @@ use move_bytecode_verifier::VerifierConfig; use move_vm_runtime::{ config::VMConfig, move_vm::MoveVM, native_extensions::NativeContextExtensions, }; -use std::{ops::Deref, sync::Arc}; +use std::ops::Deref; pub struct MoveVmExt { inner: MoveVM, chain_id: u8, - features: Arc, + features: Features, } pub fn get_max_binary_format_version( @@ -135,7 +132,7 @@ impl MoveVmExt { resolver, )?, chain_id, - features: Arc::new(features), + features, }) } @@ -206,30 +203,9 @@ impl MoveVmExt { extensions.add(AlgebraContext::new()); extensions.add(NativeAggregatorContext::new(txn_hash, resolver, resolver)); extensions.add(RandomnessContext::new()); - - let script_hash = match session_id { - SessionId::Txn { - sender: _, - sequence_number: _, - script_hash, - } - | SessionId::Prologue { - sender: _, - sequence_number: _, - script_hash, - } - | SessionId::Epilogue { - sender: _, - sequence_number: _, - script_hash, - } => script_hash, - SessionId::ValidatorTxn { script_hash } => script_hash, - _ => vec![], - }; - extensions.add(NativeTransactionContext::new( txn_hash.to_vec(), - script_hash, + session_id.into_script_hash(), self.chain_id, )); extensions.add(NativeCodeContext::default()); @@ -243,12 +219,12 @@ impl MoveVmExt { SessionExt::new( self.inner.new_session_with_extensions(resolver, extensions), resolver, - self.features.clone(), + self.features.is_storage_slot_metadata_enabled(), ) } - pub fn get_chain_id(&self) -> ChainId { - ChainId::new(self.chain_id) + pub(crate) fn features(&self) -> &Features { + &self.features } } diff --git a/aptos-move/aptos-vm/src/natives.rs b/aptos-move/aptos-vm/src/natives.rs index d4146f1c681f1..5762c62ad2192 100644 --- a/aptos-move/aptos-vm/src/natives.rs +++ b/aptos-move/aptos-vm/src/natives.rs @@ -10,10 +10,7 @@ use aptos_aggregator::{ types::{DelayedFieldsSpeculativeError, PanicOr}, }; #[cfg(feature = "testing")] -use aptos_aggregator::{ - resolver::TDelayedFieldView, - types::{DelayedFieldID, DelayedFieldValue}, -}; +use aptos_aggregator::{resolver::TDelayedFieldView, types::DelayedFieldValue}; #[cfg(feature = "testing")] use aptos_framework::natives::randomness::RandomnessContext; #[cfg(feature = "testing")] @@ -43,6 +40,8 @@ use move_binary_format::errors::PartialVMResult; use move_core_types::{language_storage::StructTag, value::MoveTypeLayout}; use move_vm_runtime::native_functions::NativeFunctionTable; #[cfg(feature = "testing")] +use move_vm_types::delayed_values::delayed_field_id::DelayedFieldID; +#[cfg(feature = "testing")] use std::{ collections::{BTreeMap, HashSet}, sync::Arc, diff --git a/aptos-move/aptos-vm/src/zkid_validation.rs b/aptos-move/aptos-vm/src/oidb_validation.rs similarity index 77% rename from aptos-move/aptos-vm/src/zkid_validation.rs rename to aptos-move/aptos-vm/src/oidb_validation.rs index b483eb84c83e1..655e697c3f124 100644 --- a/aptos-move/aptos-vm/src/zkid_validation.rs +++ b/aptos-move/aptos-vm/src/oidb_validation.rs @@ -5,14 +5,15 @@ use crate::move_vm_ext::AptosMoveResolver; use aptos_crypto::ed25519::Ed25519PublicKey; use aptos_types::{ - bn254_circom::{get_public_inputs_hash, Groth16VerificationKey}, invalid_signature, jwks::{jwk::JWK, PatchedJWKs}, - on_chain_config::{CurrentTimeMicroseconds, OnChainConfig}, + oidb::{ + get_public_inputs_hash, Configuration, Groth16VerificationKey, OidbPublicKey, + OidbSignature, ZkpOrOpenIdSig, + }, + on_chain_config::{CurrentTimeMicroseconds, Features, OnChainConfig}, transaction::authenticator::EphemeralPublicKey, vm_status::{StatusCode, VMStatus}, - zkid, - zkid::{Configuration, ZkIdPublicKey, ZkIdSignature, ZkpOrOpenIdSig}, }; use move_binary_format::errors::Location; use move_core_types::{language_storage::CORE_CODE_ADDRESS, move_resource::MoveStructType}; @@ -77,20 +78,20 @@ fn get_configs_onchain( get_resource_on_chain::(resolver) } -fn get_jwk_for_zkid_authenticator( +fn get_jwk_for_oidb_authenticator( jwks: &PatchedJWKs, - zkid_pub_key: &ZkIdPublicKey, - zkid_sig: &ZkIdSignature, + oidb_pub_key: &OidbPublicKey, + oidb_sig: &OidbSignature, ) -> Result { - let jwt_header = zkid_sig + let jwt_header = oidb_sig .parse_jwt_header() .map_err(|_| invalid_signature!("Failed to parse JWT header"))?; let jwk_move_struct = jwks - .get_jwk(&zkid_pub_key.iss, &jwt_header.kid) + .get_jwk(&oidb_pub_key.iss_val, &jwt_header.kid) .map_err(|_| { invalid_signature!(format!( "JWK for {} with KID {} was not found", - zkid_pub_key.iss, jwt_header.kid + oidb_pub_key.iss_val, jwt_header.kid )) })?; @@ -99,24 +100,36 @@ fn get_jwk_for_zkid_authenticator( Ok(jwk) } -pub fn validate_zkid_authenticators( - authenticators: &Vec<(ZkIdPublicKey, ZkIdSignature)>, +pub(crate) fn validate_oidb_authenticators( + authenticators: &Vec<(OidbPublicKey, OidbSignature)>, + features: &Features, resolver: &impl AptosMoveResolver, ) -> Result<(), VMStatus> { + // OIDB feature gating. + for (_, sig) in authenticators { + if !features.is_oidb_enabled() && matches!(sig.sig, ZkpOrOpenIdSig::Groth16Zkp { .. }) { + return Err(VMStatus::error(StatusCode::FEATURE_UNDER_GATING, None)); + } + if (!features.is_oidb_enabled() || !features.is_oidb_zkless_enabled()) + && matches!(sig.sig, ZkpOrOpenIdSig::OpenIdSig { .. }) + { + return Err(VMStatus::error(StatusCode::FEATURE_UNDER_GATING, None)); + } + } + if authenticators.is_empty() { return Ok(()); } let config = &get_configs_onchain(resolver)?; - - if authenticators.len() > config.max_zkid_signatures_per_txn as usize { - return Err(invalid_signature!("Too many zkID authenticators")); + if authenticators.len() > config.max_oidb_signatures_per_txn as usize { + return Err(invalid_signature!("Too many OIDB authenticators")); } let onchain_timestamp_obj = get_current_time_onchain(resolver)?; // Check the expiry timestamp on all authenticators first to fail fast - for (_, zkid_sig) in authenticators { - zkid_sig + for (_, oidb_sig) in authenticators { + oidb_sig .verify_expiry(&onchain_timestamp_obj) .map_err(|_| invalid_signature!("The ephemeral keypair has expired"))?; } @@ -135,10 +148,10 @@ pub fn validate_zkid_authenticators( )), }; - for (zkid_pub_key, zkid_sig) in authenticators { - let jwk = get_jwk_for_zkid_authenticator(&patched_jwks, zkid_pub_key, zkid_sig)?; + for (oidb_pub_key, oidb_sig) in authenticators { + let jwk = get_jwk_for_oidb_authenticator(&patched_jwks, oidb_pub_key, oidb_sig)?; - match &zkid_sig.sig { + match &oidb_sig.sig { ZkpOrOpenIdSig::Groth16Zkp(proof) => match jwk { JWK::RSA(rsa_jwk) => { if proof.exp_horizon_secs > config.max_exp_horizon_secs { @@ -148,10 +161,7 @@ pub fn validate_zkid_authenticators( // If an `aud` override was set for account recovery purposes, check that it is // in the allow-list on-chain. if proof.override_aud_val.is_some() { - zkid::is_allowed_override_aud( - config, - proof.override_aud_val.as_ref().unwrap(), - )?; + config.is_allowed_override_aud(proof.override_aud_val.as_ref().unwrap())?; } // The training wheels signature is only checked if a training wheels PK is set on chain @@ -163,14 +173,10 @@ pub fn validate_zkid_authenticators( })?; } - let public_inputs_hash = get_public_inputs_hash( - zkid_sig, - zkid_pub_key, - &rsa_jwk, - proof.exp_horizon_secs, - config, - ) - .map_err(|_| invalid_signature!("Could not compute public inputs hash"))?; + let public_inputs_hash = + get_public_inputs_hash(oidb_sig, oidb_pub_key, &rsa_jwk, config).map_err( + |_| invalid_signature!("Could not compute public inputs hash"), + )?; proof .verify_proof(public_inputs_hash, pvk) .map_err(|_| invalid_signature!("Proof verification failed"))?; @@ -182,9 +188,9 @@ pub fn validate_zkid_authenticators( JWK::RSA(rsa_jwk) => { openid_sig .verify_jwt_claims( - zkid_sig.exp_timestamp_secs, - &zkid_sig.ephemeral_pubkey, - zkid_pub_key, + oidb_sig.exp_timestamp_secs, + &oidb_sig.ephemeral_pubkey, + oidb_pub_key, config, ) .map_err(|_| invalid_signature!("OpenID claim verification failed"))?; @@ -201,7 +207,7 @@ pub fn validate_zkid_authenticators( // // We are now ready to verify the RSA signature openid_sig - .verify_jwt_signature(rsa_jwk, &zkid_sig.jwt_header) + .verify_jwt_signature(&rsa_jwk, &oidb_sig.jwt_header_b64) .map_err(|_| { invalid_signature!( "RSA signature verification failed for OpenIdSig" diff --git a/aptos-move/aptos-vm/src/testing.rs b/aptos-move/aptos-vm/src/testing.rs index 37d3b71e2df99..baae2c3f98705 100644 --- a/aptos-move/aptos-vm/src/testing.rs +++ b/aptos-move/aptos-vm/src/testing.rs @@ -1,7 +1,15 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 +use crate::{ + aptos_vm::get_or_vm_startup_failure, data_cache::AsMoveResolver, + transaction_metadata::TransactionMetadata, AptosVM, +}; +use aptos_types::{state_store::StateView, transaction::SignedTransaction}; +use aptos_vm_logging::log_schema::AdapterLogSchema; +use aptos_vm_types::output::VMOutput; use move_binary_format::errors::VMResult; +use move_core_types::vm_status::VMStatus; #[derive(Debug, Eq, Hash, PartialEq)] pub enum InjectedError { @@ -47,3 +55,34 @@ pub mod testing_only { }) } } + +impl AptosVM { + #[cfg(any(test, feature = "testing"))] + pub fn test_failed_transaction_cleanup( + &self, + error_vm_status: VMStatus, + txn: &SignedTransaction, + state_view: &impl StateView, + gas_meter_balance: u64, + ) -> (VMStatus, VMOutput) { + let txn_data = TransactionMetadata::new(txn); + let log_context = AdapterLogSchema::new(state_view.id(), 0); + + let mut gas_meter = self + .make_standard_gas_meter(gas_meter_balance.into(), &log_context) + .expect("Should be able to create a gas meter for tests"); + let change_set_configs = &get_or_vm_startup_failure(&self.storage_gas_params, &log_context) + .expect("Storage gas parameters should exist for tests") + .change_set_configs; + + let resolver = state_view.as_move_resolver(); + self.failed_transaction_cleanup( + error_vm_status, + &mut gas_meter, + &txn_data, + &resolver, + &log_context, + change_set_configs, + ) + } +} diff --git a/aptos-move/aptos-vm/src/transaction_metadata.rs b/aptos-move/aptos-vm/src/transaction_metadata.rs index bd39cc629c822..cdc83dba3f648 100644 --- a/aptos-move/aptos-vm/src/transaction_metadata.rs +++ b/aptos-move/aptos-vm/src/transaction_metadata.rs @@ -2,14 +2,13 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -use aptos_crypto::{ed25519::Ed25519PrivateKey, HashValue, PrivateKey}; +use aptos_crypto::HashValue; use aptos_gas_algebra::{FeePerGasUnit, Gas, NumBytes}; use aptos_types::{ account_address::AccountAddress, chain_id::ChainId, - transaction::{authenticator::AuthenticationKey, SignedTransaction, TransactionPayload}, + transaction::{SignedTransaction, TransactionPayload}, }; -use std::convert::TryFrom; pub struct TransactionMetadata { pub sender: AccountAddress, @@ -118,30 +117,6 @@ impl TransactionMetadata { } pub fn is_multi_agent(&self) -> bool { - !(self.secondary_signers.is_empty() && self.fee_payer.is_none()) - } -} - -impl Default for TransactionMetadata { - fn default() -> Self { - let mut buf = [0u8; Ed25519PrivateKey::LENGTH]; - buf[Ed25519PrivateKey::LENGTH - 1] = 1; - let public_key = Ed25519PrivateKey::try_from(&buf[..]).unwrap().public_key(); - TransactionMetadata { - sender: AccountAddress::ZERO, - authentication_key: AuthenticationKey::ed25519(&public_key).to_vec(), - secondary_signers: vec![], - secondary_authentication_keys: vec![], - sequence_number: 0, - fee_payer: None, - fee_payer_authentication_key: None, - max_gas_amount: 100_000_000.into(), - gas_unit_price: 0.into(), - transaction_size: 0.into(), - expiration_timestamp_secs: 0, - chain_id: ChainId::test(), - script_hash: vec![], - script_size: NumBytes::zero(), - } + !self.secondary_signers.is_empty() || self.fee_payer.is_some() } } diff --git a/aptos-move/block-executor/src/captured_reads.rs b/aptos-move/block-executor/src/captured_reads.rs index d70a9bab34ed4..b9c9929434c6b 100644 --- a/aptos-move/block-executor/src/captured_reads.rs +++ b/aptos-move/block-executor/src/captured_reads.rs @@ -756,9 +756,9 @@ impl UnsyncReadSet { mod test { use super::*; use crate::proptest_types::types::{raw_metadata, KeyType, MockEvent, ValueType}; - use aptos_aggregator::types::DelayedFieldID; use aptos_mvhashmap::types::StorageVersion; use claims::{assert_err, assert_gt, assert_matches, assert_none, assert_ok, assert_some_eq}; + use move_vm_types::delayed_values::delayed_field_id::DelayedFieldID; use test_case::test_case; #[test] diff --git a/aptos-move/block-executor/src/proptest_types/types.rs b/aptos-move/block-executor/src/proptest_types/types.rs index b81992f7f4c44..9bd38c5f2bfc4 100644 --- a/aptos-move/block-executor/src/proptest_types/types.rs +++ b/aptos-move/block-executor/src/proptest_types/types.rs @@ -10,7 +10,6 @@ use aptos_aggregator::{ delayed_change::DelayedChange, delta_change_set::{delta_add, delta_sub, serialize, DeltaOp}, resolver::TAggregatorV1View, - types::DelayedFieldID, }; use aptos_mvhashmap::types::TxnIndex; use aptos_types::{ @@ -34,6 +33,7 @@ use aptos_vm_types::resolver::{TExecutorView, TResourceGroupView}; use bytes::Bytes; use claims::{assert_ge, assert_le, assert_ok}; use move_core_types::value::MoveTypeLayout; +use move_vm_types::delayed_values::delayed_field_id::DelayedFieldID; use once_cell::sync::OnceCell; use proptest::{arbitrary::Arbitrary, collection::vec, prelude::*, proptest, sample::Index}; use proptest_derive::Arbitrary; diff --git a/aptos-move/block-executor/src/value_exchange.rs b/aptos-move/block-executor/src/value_exchange.rs index 9dab64fcb8512..5170291e6dba5 100644 --- a/aptos-move/block-executor/src/value_exchange.rs +++ b/aptos-move/block-executor/src/value_exchange.rs @@ -4,10 +4,11 @@ use crate::view::{LatestView, ViewState}; use aptos_aggregator::{ resolver::TDelayedFieldView, - types::{code_invariant_error, DelayedFieldValue, PanicError, ReadPosition}, + types::{code_invariant_error, DelayedFieldValue, ReadPosition}, }; use aptos_mvhashmap::{types::TxnIndex, versioned_delayed_fields::TVersionedDelayedFieldView}; use aptos_types::{ + delayed_fields::PanicError, executable::Executable, state_store::{state_value::StateValueMetadata, TStateView}, transaction::BlockExecutableTransaction as Transaction, diff --git a/aptos-move/e2e-move-tests/src/harness.rs b/aptos-move/e2e-move-tests/src/harness.rs index e0ccc159ab8d7..f4d08b00ea4f8 100644 --- a/aptos-move/e2e-move-tests/src/harness.rs +++ b/aptos-move/e2e-move-tests/src/harness.rs @@ -413,7 +413,7 @@ impl MoveHarness { account: &Account, package: &BuiltPackage, mut patch_metadata: impl FnMut(&mut PackageMetadata), - publisher_ref: AccountAddress, + code_object: AccountAddress, ) -> SignedTransaction { let code = package.extract_code(); let mut metadata = package @@ -425,7 +425,7 @@ impl MoveHarness { aptos_stdlib::object_code_deployment_upgrade( bcs::to_bytes(&metadata).expect("PackageMetadata has BCS"), code, - publisher_ref, + code_object, ), ) } @@ -452,7 +452,7 @@ impl MoveHarness { path: &Path, options: BuildOptions, patch_metadata: impl FnMut(&mut PackageMetadata), - publisher_ref: AccountAddress, + code_object: AccountAddress, ) -> SignedTransaction { let package = build_package(path.to_owned(), options).expect("building package must succeed"); @@ -460,7 +460,7 @@ impl MoveHarness { account, &package, patch_metadata, - publisher_ref, + code_object, ) } @@ -532,10 +532,10 @@ impl MoveHarness { account: &Account, path: &Path, options: BuildOptions, - publisher_ref: AccountAddress, + code_object: AccountAddress, ) -> TransactionStatus { let txn = - self.create_object_code_upgrade_package(account, path, options, |_| {}, publisher_ref); + self.create_object_code_upgrade_package(account, path, options, |_| {}, code_object); self.run(txn) } diff --git a/aptos-move/e2e-move-tests/src/tests/account.rs b/aptos-move/e2e-move-tests/src/tests/account.rs new file mode 100644 index 0000000000000..fddf1683fded8 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/account.rs @@ -0,0 +1,25 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::MoveHarness; +use aptos_cached_packages::aptos_stdlib::aptos_account_transfer; +use aptos_language_e2e_tests::account::Account; +use claims::assert_err_eq; +use move_core_types::vm_status::StatusCode; + +#[test] +fn non_existent_sender() { + let mut h = MoveHarness::new(); + + let sender = Account::new(); + let receiver = h.new_account_with_balance_and_sequence_number(100_000, 0); + + let txn = sender + .transaction() + .payload(aptos_account_transfer(*receiver.address(), 10)) + .sequence_number(0) + .sign(); + + let status = h.run(txn); + assert_err_eq!(status.status(), StatusCode::SENDING_ACCOUNT_DOES_NOT_EXIST); +} diff --git a/aptos-move/e2e-move-tests/src/tests/mod.rs b/aptos-move/e2e-move-tests/src/tests/mod.rs index a55e051cfde69..5722b0a777c46 100644 --- a/aptos-move/e2e-move-tests/src/tests/mod.rs +++ b/aptos-move/e2e-move-tests/src/tests/mod.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 mod access_path_test; +mod account; mod aggregator; mod aggregator_v2; mod aggregator_v2_events; @@ -48,4 +49,5 @@ mod token_objects; mod transaction_fee; mod type_too_large; mod vector_numeric_address; +mod vm; mod vote; diff --git a/aptos-move/e2e-move-tests/src/tests/object_code_deployment.rs b/aptos-move/e2e-move-tests/src/tests/object_code_deployment.rs index 16dff55cb2166..4193816148e9e 100644 --- a/aptos-move/e2e-move-tests/src/tests/object_code_deployment.rs +++ b/aptos-move/e2e-move-tests/src/tests/object_code_deployment.rs @@ -5,7 +5,7 @@ use crate::{assert_abort, assert_success, assert_vm_status, tests::common, MoveH use aptos_framework::{ natives::{ code::{PackageRegistry, UpgradePolicy}, - object_code_deployment::PublisherRef, + object_code_deployment::ManagingRefs, }, BuildOptions, }; @@ -150,16 +150,16 @@ fn object_code_deployment_publish_package(enabled: Vec, disabled: V assert_eq!(registry.packages[0].modules.len(), 1); assert_eq!(registry.packages[0].modules[0].name, "test"); - let publisher_ref: PublisherRef = context + let code_object: ManagingRefs = context .harness .read_resource_from_resource_group( &context.object_address, parse_struct_tag("0x1::object::ObjectGroup").unwrap(), - parse_struct_tag("0x1::object_code_deployment::PublisherRef").unwrap(), + parse_struct_tag("0x1::object_code_deployment::ManagingRefs").unwrap(), ) .unwrap(); - // Verify the object created owns the `PublisherRef` - assert_eq!(publisher_ref, PublisherRef::new(context.object_address)); + // Verify the object created owns the `ManagingRefs` + assert_eq!(code_object, ManagingRefs::new(context.object_address)); let module_address = context.object_address.to_string(); assert_success!(context.harness.run_entry_function( @@ -244,8 +244,8 @@ fn object_code_deployment_upgrade_fail_when_publisher_ref_does_not_exist() { let mut context = TestContext::new(None, None); let acc = context.account.clone(); - // We should not be able to `upgrade` as `PublisherRef` does not exist. - // `PublisherRef` is only created when calling `publish` first, i.e. deploying a package. + // We should not be able to `upgrade` as `ManagingRefs` does not exist. + // `ManagingRefs` is only created when calling `publish` first, i.e. deploying a package. let status = context.execute_object_code_action( &acc, "object_code_deployment.data/pack_initial", diff --git a/aptos-move/e2e-move-tests/src/tests/vm.rs b/aptos-move/e2e-move-tests/src/tests/vm.rs new file mode 100644 index 0000000000000..c4a30284a8f32 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/vm.rs @@ -0,0 +1,58 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::MoveHarness; +use aptos_cached_packages::aptos_stdlib::aptos_account_transfer; +use aptos_types::{ + state_store::state_key::StateKey, transaction::ExecutionStatus, write_set::WriteOp, +}; +use aptos_vm::{data_cache::AsMoveResolver, AptosVM}; +use claims::{assert_ok_eq, assert_some}; +use move_core_types::vm_status::{StatusCode, VMStatus}; +use test_case::test_case; + +// Make sure verification and invariant violation errors are kept. +#[test_case(StatusCode::TYPE_MISMATCH)] +#[test_case(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR)] +fn failed_transaction_cleanup_charges_gas(status_code: StatusCode) { + let mut h = MoveHarness::new(); + let sender = h.new_account_with_balance_and_sequence_number(1_000_000, 10); + let receiver = h.new_account_with_balance_and_sequence_number(1_000_000, 10); + + let max_gas_amount = 100_000; + let txn = sender + .transaction() + .sequence_number(10) + .max_gas_amount(max_gas_amount) + .payload(aptos_account_transfer(*receiver.address(), 1)) + .sign(); + + let state_view = h.executor.get_state_view(); + let vm = AptosVM::new( + &state_view.as_move_resolver(), + /*override_is_delayed_field_optimization_capable=*/ Some(false), + ); + + let balance = 10_000; + let output = vm + .test_failed_transaction_cleanup( + VMStatus::error(status_code, None), + &txn, + state_view, + balance, + ) + .1; + + let write_set: Vec<(&StateKey, &WriteOp)> = output + .change_set() + .concrete_write_set_iter() + .map(|(k, v)| (k, assert_some!(v))) + .collect(); + assert!(!write_set.is_empty()); + assert_eq!(output.gas_used(), max_gas_amount - balance); + assert!(!output.status().is_discarded()); + assert_ok_eq!( + output.status().as_kept_status(), + ExecutionStatus::MiscellaneousError(Some(status_code)) + ); +} diff --git a/aptos-move/e2e-testsuite/src/tests/failed_transaction_tests.rs b/aptos-move/e2e-testsuite/src/tests/failed_transaction_tests.rs deleted file mode 100644 index 11446d98d514b..0000000000000 --- a/aptos-move/e2e-testsuite/src/tests/failed_transaction_tests.rs +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright © Aptos Foundation -// Parts of the project are originally copyright © Meta Platforms, Inc. -// SPDX-License-Identifier: Apache-2.0 - -use aptos_gas_meter::{StandardGasAlgebra, StandardGasMeter}; -use aptos_gas_schedule::{AptosGasParameters, LATEST_GAS_FEATURE_VERSION}; -use aptos_language_e2e_tests::{common_transactions::peer_to_peer_txn, executor::FakeExecutor}; -use aptos_memory_usage_tracker::MemoryTrackedGasMeter; -use aptos_types::{ - state_store::{state_key::StateKey, TStateView}, - transaction::ExecutionStatus, - vm_status::{StatusCode, VMStatus}, - write_set::WriteOp, -}; -use aptos_vm::{data_cache::AsMoveResolver, transaction_metadata::TransactionMetadata, AptosVM}; -use aptos_vm_logging::log_schema::AdapterLogSchema; -use aptos_vm_types::storage::StorageGasParameters; -use claims::assert_some; -use move_core_types::vm_status::StatusCode::TYPE_MISMATCH; - -#[test] -fn failed_transaction_cleanup_test() { - let mut executor = FakeExecutor::from_head_genesis(); - // TODO(Gas): double check this - let sender = executor.create_raw_account_data(1_000_000, 10); - executor.add_account_data(&sender); - - let log_context = AdapterLogSchema::new(executor.get_state_view().id(), 0); - let aptos_vm = AptosVM::new( - &executor.get_state_view().as_move_resolver(), - /*override_is_delayed_field_optimization_capable=*/ None, - ); - let data_cache = executor.get_state_view().as_move_resolver(); - - let txn_data = TransactionMetadata { - sender: *sender.address(), - max_gas_amount: 100_000.into(), - gas_unit_price: 0.into(), - sequence_number: 10, - ..Default::default() - }; - - let gas_params = AptosGasParameters::zeros(); - let storage_gas_params = - StorageGasParameters::unlimited(gas_params.vm.txn.legacy_free_write_bytes_quota); - - let change_set_configs = storage_gas_params.change_set_configs.clone(); - - let mut gas_meter = MemoryTrackedGasMeter::new(StandardGasMeter::new(StandardGasAlgebra::new( - LATEST_GAS_FEATURE_VERSION, - gas_params.vm, - storage_gas_params, - 10_000, - ))); - - // TYPE_MISMATCH should be kept and charged. - let out1 = aptos_vm.failed_transaction_cleanup( - VMStatus::error(StatusCode::TYPE_MISMATCH, None), - &mut gas_meter, - &txn_data, - &data_cache, - &log_context, - &change_set_configs, - ); - - let write_set: Vec<(&StateKey, &WriteOp)> = out1 - .change_set() - .concrete_write_set_iter() - .map(|(k, v)| (k, assert_some!(v))) - .collect(); - assert!(!write_set.is_empty()); - assert_eq!(out1.gas_used(), 90_000); - assert!(!out1.status().is_discarded()); - assert_eq!( - out1.status().status(), - // StatusCode::TYPE_MISMATCH - Ok(ExecutionStatus::MiscellaneousError(Some(TYPE_MISMATCH))) - ); - - // Invariant violations should be charged. - let out2 = aptos_vm.failed_transaction_cleanup( - VMStatus::error(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR, None), - &mut gas_meter, - &txn_data, - &data_cache, - &log_context, - &change_set_configs, - ); - assert!(out2.gas_used() != 0); - assert!(!out2.status().is_discarded()); - assert_eq!( - out2.status().status(), - Ok(ExecutionStatus::MiscellaneousError(Some( - StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR - ))) - ); -} - -#[test] -fn non_existent_sender() { - let mut executor = FakeExecutor::from_head_genesis(); - let sequence_number = 0; - let sender = executor.create_raw_account(); - let receiver = executor.create_raw_account_data(100_000, sequence_number); - executor.add_account_data(&receiver); - - let transfer_amount = 10; - let txn = peer_to_peer_txn( - &sender, - receiver.account(), - sequence_number, - transfer_amount, - 0, - ); - - let output = &executor.execute_transaction(txn); - assert_eq!( - output.status().status(), - Err(StatusCode::SENDING_ACCOUNT_DOES_NOT_EXIST), - ); -} diff --git a/aptos-move/e2e-testsuite/src/tests/invariant_violation.rs b/aptos-move/e2e-testsuite/src/tests/invariant_violation.rs index 2a7d94f972757..9cef3a8dcb6b5 100644 --- a/aptos-move/e2e-testsuite/src/tests/invariant_violation.rs +++ b/aptos-move/e2e-testsuite/src/tests/invariant_violation.rs @@ -13,17 +13,12 @@ use move_core_types::{value::MoveValue, vm_status::StatusCode}; #[test] fn invariant_violation_error() { let _scenario = fail::FailScenario::setup(); - fail::cfg( - "move_adapter::execute_script_or_entry_function", - "100%return", - ) - .unwrap(); + fail::cfg("aptos_vm::execute_script_or_entry_function", "100%return").unwrap(); ::aptos_logger::Logger::init_for_testing(); let mut executor = FakeExecutor::from_head_genesis(); - // create and publish a sender with 1_000_000 coins and a receiver with 100_000 coins let sender = executor.create_raw_account_data(1_000_000, 10); let receiver = executor.create_raw_account_data(100_000, 10); executor.add_account_data(&sender); diff --git a/aptos-move/e2e-testsuite/src/tests/mod.rs b/aptos-move/e2e-testsuite/src/tests/mod.rs index 03f411f1bd17c..e4515b78e7cf5 100644 --- a/aptos-move/e2e-testsuite/src/tests/mod.rs +++ b/aptos-move/e2e-testsuite/src/tests/mod.rs @@ -16,7 +16,6 @@ mod account_universe; mod create_account; mod data_store; mod execution_strategies; -mod failed_transaction_tests; mod genesis; mod genesis_initializations; mod invariant_violation; diff --git a/aptos-move/framework/aptos-framework/doc/account.md b/aptos-move/framework/aptos-framework/doc/account.md index 9565d16e617ad..d17c58a984c9c 100644 --- a/aptos-move/framework/aptos-framework/doc/account.md +++ b/aptos-move/framework/aptos-framework/doc/account.md @@ -1143,8 +1143,11 @@ is returned. This way, the caller of this function can publish additional resour ## Function `rotate_authentication_key_internal` -This function is used to rotate a resource account's authentication key to 0, so that no private key can control -the resource account. +This function is used to rotate a resource account's authentication key to new_auth_key. This is done in +many contexts: +1. During normal key rotation via rotate_authentication_key or rotate_authentication_key_call +2. During resource account initialization so that no private key can control the resource account +3. During multisig_v2 account creation
public(friend) fun rotate_authentication_key_internal(account: &signer, new_auth_key: vector<u8>)
@@ -1176,7 +1179,11 @@ the resource account.
 
 ## Function `rotate_authentication_key_call`
 
-Entry function-only rotation key function that allows the signer update their authentication_key.
+Private entry function for key rotation that allows the signer to update their authentication key.
+Note that this does not update the OriginatingAddress table because the new_auth_key is not "verified": it
+does not come with a proof-of-knowledge of the underlying SK. Nonetheless, we need this functionality due to
+the introduction of non-standard key algorithms, such as passkeys, which cannot produce proofs-of-knowledge in
+the format expected in rotate_authentication_key.
 
 
 
entry fun rotate_authentication_key_call(account: &signer, new_auth_key: vector<u8>)
@@ -1188,15 +1195,8 @@ Entry function-only rotation key function that allows the signer update their au
 Implementation
 
 
-
entry fun rotate_authentication_key_call(account: &signer, new_auth_key: vector<u8>) acquires Account, OriginatingAddress {
-    let addr = signer::address_of(account);
-    assert!(exists_at(addr), error::not_found(EACCOUNT_DOES_NOT_EXIST));
-    assert!(
-        vector::length(&new_auth_key) == 32,
-        error::invalid_argument(EMALFORMED_AUTHENTICATION_KEY)
-    );
-    let account_resource = borrow_global_mut<Account>(addr);
-    update_auth_key_and_originating_address_table(addr, account_resource, new_auth_key);
+
entry fun rotate_authentication_key_call(account: &signer, new_auth_key: vector<u8>) acquires Account {
+    rotate_authentication_key_internal(account, new_auth_key);
 }
 
@@ -2526,29 +2526,15 @@ The length of new_auth_key is 32.
-The Account existed under the signer before the call. -The length of new_auth_key is 32.
let addr = signer::address_of(account);
+// This enforces high-level requirement 10:
+let post account_resource = global<Account>(addr);
 aborts_if !exists<Account>(addr);
 aborts_if vector::length(new_auth_key) != 32;
-let account_resource = global<Account>(addr);
-let curr_auth_key = from_bcs::deserialize<address>(account_resource.authentication_key);
-let originating_addr = addr;
-let address_map = global<OriginatingAddress>(@aptos_framework).address_map;
-let new_auth_key_addr = from_bcs::deserialize<address>(new_auth_key);
-aborts_if !exists<OriginatingAddress>(@aptos_framework);
-aborts_if !from_bcs::deserializable<address>(account_resource.authentication_key);
-aborts_if table::spec_contains(address_map, curr_auth_key) &&
-    table::spec_get(address_map, curr_auth_key) != originating_addr;
-aborts_if curr_auth_key != new_auth_key_addr && table::spec_contains(address_map, new_auth_key_addr);
-include UpdateAuthKeyAndOriginatingAddressTableAbortsIf {
-    originating_addr: addr,
-    new_auth_key_vector: new_auth_key,
-};
-let post auth_key = global<Account>(addr).authentication_key;
-ensures auth_key == new_auth_key;
+modifies global<Account>(addr);
+ensures account_resource.authentication_key == new_auth_key;
 
diff --git a/aptos-move/framework/aptos-framework/doc/object.md b/aptos-move/framework/aptos-framework/doc/object.md index 39a70aa12ce28..f6fedad987f5c 100644 --- a/aptos-move/framework/aptos-framework/doc/object.md +++ b/aptos-move/framework/aptos-framework/doc/object.md @@ -32,7 +32,6 @@ make it so that a reference to a global object can be returned from a function. - [Struct `LinearTransferRef`](#0x1_object_LinearTransferRef) - [Struct `DeriveRef`](#0x1_object_DeriveRef) - [Struct `TransferEvent`](#0x1_object_TransferEvent) -- [Resource `Ghost$g_roll`](#0x1_object_Ghost$g_roll) - [Constants](#@Constants_0) - [Function `is_burnt`](#0x1_object_is_burnt) - [Function `address_to_object`](#0x1_object_address_to_object) @@ -129,6 +128,7 @@ make it so that a reference to a global object can be returned from a function. use 0x1::create_signer; use 0x1::error; use 0x1::event; +use 0x1::features; use 0x1::from_bcs; use 0x1::guid; use 0x1::hash; @@ -496,33 +496,6 @@ Emitted whenever the object's owner field is changed. - - - - -## Resource `Ghost$g_roll` - - - -
struct Ghost$g_roll has copy, drop, store, key
-
- - - -
-Fields - - -
-
-v: u8 -
-
- -
-
- -
@@ -1913,18 +1886,11 @@ objects may have cyclic dependencies. let current_address = object.owner; let count = 0; - while ({ - spec { - invariant count < MAXIMUM_OBJECT_NESTING; - invariant forall i in 0..count: - exists<ObjectCore>(current_address) && global<ObjectCore>(current_address).allow_ungated_transfer; - // invariant forall i in 0..count: - // current_address == get_transfer_address(global<ObjectCore>(destination).owner, i); + while (owner != current_address) { + count = count + 1; + if (std::features::max_object_nesting_check_enabled()) { + assert!(count < MAXIMUM_OBJECT_NESTING, error::out_of_range(EMAXIMUM_NESTING)) }; - owner != current_address - }) { - let count = count + 1; - assert!(count < MAXIMUM_OBJECT_NESTING, error::out_of_range(EMAXIMUM_NESTING)); // At this point, the first object exists and so the more likely case is that the // object's owner is not an object. So we return a more sensible error. assert!( @@ -2124,16 +2090,11 @@ Return true if the provided address has indirect or direct ownership of the prov let current_address = object.owner; let count = 0; - while ({ - spec { - invariant count < MAXIMUM_OBJECT_NESTING; - invariant forall i in 0..count: - owner != current_address && exists<ObjectCore>(current_address); + while (owner != current_address) { + count = count + 1; + if (std::features::max_object_nesting_check_enabled()) { + assert!(count < MAXIMUM_OBJECT_NESTING, error::out_of_range(EMAXIMUM_NESTING)) }; - owner != current_address - }) { - let count = count + 1; - assert!(count < MAXIMUM_OBJECT_NESTING, error::out_of_range(EMAXIMUM_NESTING)); if (!exists<ObjectCore>(current_address)) { return false }; @@ -2224,8 +2185,6 @@ Return true if the provided address has indirect or direct ownership of the prov
pragma aborts_if_is_strict;
-
-global g_roll: u8;
 
@@ -3058,7 +3017,8 @@ Return true if the provided address has indirect or direct ownership of the prov -
let current_address_0 = object.inner;
+
pragma aborts_if_is_partial;
+let current_address_0 = object.inner;
 let object_0 = global<ObjectCore>(current_address_0);
 let current_address = object_0.owner;
 aborts_if object.inner != owner && !exists<ObjectCore>(object.inner);
diff --git a/aptos-move/framework/aptos-framework/doc/object_code_deployment.md b/aptos-move/framework/aptos-framework/doc/object_code_deployment.md
index 02324d527575d..e4caf56a77c74 100644
--- a/aptos-move/framework/aptos-framework/doc/object_code_deployment.md
+++ b/aptos-move/framework/aptos-framework/doc/object_code_deployment.md
@@ -15,7 +15,7 @@ Publishing modules flow:
 1. Create a new object with the address derived from the publisher address and the object seed.
 2. Publish the module passed in the function via metadata_serialized and code to the newly created object.
 3. Emits 'Publish' event with the address of the newly created object.
-4. Create a PublisherRef which stores the extend ref of the newly created object.
+4. Create a ManagingRefs which stores the extend ref of the newly created object.
 Note: This is needed to upgrade the code as the signer must be generated to upgrade the existing code in an object.
 
 Upgrading modules flow:
@@ -35,7 +35,7 @@ Note: There is no unfreeze function as this gives no benefit if the user can fre
 Once modules are marked as immutable, they cannot be made mutable again.
 
 
--  [Resource `PublisherRef`](#0x1_object_code_deployment_PublisherRef)
+-  [Resource `ManagingRefs`](#0x1_object_code_deployment_ManagingRefs)
 -  [Struct `Publish`](#0x1_object_code_deployment_Publish)
 -  [Struct `Upgrade`](#0x1_object_code_deployment_Upgrade)
 -  [Struct `Freeze`](#0x1_object_code_deployment_Freeze)
@@ -59,15 +59,15 @@ Once modules are marked as immutable, they cannot be made mutable again.
 
 
 
-
+
 
-## Resource `PublisherRef`
+## Resource `ManagingRefs`
 
-Object which contains the code deployed, along with the extend ref to upgrade the code.
+Internal struct, attached to the object, that holds Refs we need to manage the code deployment (i.e. upgrades).
 
 
 
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
-struct PublisherRef has key
+struct ManagingRefs has key
 
@@ -256,7 +256,7 @@ the code to be published via code. T event::emit(Publish { object_address: signer::address_of(code_signer), }); - move_to(code_signer, PublisherRef { + move_to(code_signer, ManagingRefs { extend_ref: object::generate_extend_ref(constructor_ref), }); } @@ -304,7 +304,7 @@ Note: If the modules were deployed as immutable when calling publishcode_object
. -
public entry fun upgrade(publisher: &signer, metadata_serialized: vector<u8>, code: vector<vector<u8>>, code_object: object::Object<object_code_deployment::PublisherRef>)
+
public entry fun upgrade(publisher: &signer, metadata_serialized: vector<u8>, code: vector<vector<u8>>, code_object: object::Object<code::PackageRegistry>)
 
@@ -317,8 +317,8 @@ Requires the publisher to be the owner of the code_object. publisher: &signer, metadata_serialized: vector<u8>, code: vector<vector<u8>>, - code_object: Object<PublisherRef>, -) acquires PublisherRef { + code_object: Object<PackageRegistry>, +) acquires ManagingRefs { let publisher_address = signer::address_of(publisher); assert!( object::is_owner(code_object, publisher_address), @@ -326,9 +326,9 @@ Requires the publisher to be the owner of the code_object. ); let code_object_address = object::object_address(&code_object); - assert!(exists<PublisherRef>(code_object_address), error::not_found(ECODE_OBJECT_DOES_NOT_EXIST)); + assert!(exists<ManagingRefs>(code_object_address), error::not_found(ECODE_OBJECT_DOES_NOT_EXIST)); - let extend_ref = &borrow_global<PublisherRef>(code_object_address).extend_ref; + let extend_ref = &borrow_global<ManagingRefs>(code_object_address).extend_ref; let code_signer = &object::generate_signer_for_extending(extend_ref); code::publish_package_txn(code_signer, metadata_serialized, code); diff --git a/aptos-move/framework/aptos-framework/doc/openid_account.md b/aptos-move/framework/aptos-framework/doc/openid_account.md new file mode 100644 index 0000000000000..1b1f7993f0b3a --- /dev/null +++ b/aptos-move/framework/aptos-framework/doc/openid_account.md @@ -0,0 +1,466 @@ + + + +# Module `0x1::openid_account` + +This module is responsible for configuring OpenID-based blockchain accounts (OIDBs), which were introduced in +[AIP-61](https://github.com/aptos-foundation/AIPs/blob/main/aips/aip-61.md). + + +- [Struct `Group`](#0x1_openid_account_Group) +- [Resource `Groth16VerificationKey`](#0x1_openid_account_Groth16VerificationKey) +- [Resource `Configuration`](#0x1_openid_account_Configuration) +- [Constants](#@Constants_0) +- [Function `new_groth16_verification_key`](#0x1_openid_account_new_groth16_verification_key) +- [Function `new_configuration`](#0x1_openid_account_new_configuration) +- [Function `update_groth16_verification_key`](#0x1_openid_account_update_groth16_verification_key) +- [Function `update_configuration`](#0x1_openid_account_update_configuration) +- [Function `update_training_wheels`](#0x1_openid_account_update_training_wheels) +- [Function `update_max_exp_horizon`](#0x1_openid_account_update_max_exp_horizon) +- [Function `remove_all_override_auds`](#0x1_openid_account_remove_all_override_auds) +- [Function `add_override_aud`](#0x1_openid_account_add_override_aud) + + +
use 0x1::option;
+use 0x1::signer;
+use 0x1::string;
+use 0x1::system_addresses;
+
+ + + + + +## Struct `Group` + + + +
#[resource_group(#[scope = global])]
+struct Group
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Resource `Groth16VerificationKey` + +The 288-byte Groth16 verification key (VK) for the ZK relation that implements OIDBs. + + +
#[resource_group_member(#[group = 0x1::openid_account::Group])]
+struct Groth16VerificationKey has store, key
+
+ + + +
+Fields + + +
+
+alpha_g1: vector<u8> +
+
+ 32-byte serialization of alpha * G, where G is the generator of G1. +
+
+beta_g2: vector<u8> +
+
+ 64-byte serialization of alpha * H, where H is the generator of G2. +
+
+gamma_g2: vector<u8> +
+
+ 64-byte serialization of gamma * H, where H is the generator of G2. +
+
+delta_g2: vector<u8> +
+
+ 64-byte serialization of delta * H, where H is the generator of G2. +
+
+gamma_abc_g1: vector<vector<u8>> +
+
+ \forall i \in {0, ..., \ell}, 64-byte serialization of gamma^{-1} * (beta * a_i + alpha * b_i + c_i) * H, where + H is the generator of G1 and \ell is 1 for the ZK relation. +
+
+ + +
+ + + +## Resource `Configuration` + + + +
#[resource_group_member(#[group = 0x1::openid_account::Group])]
+struct Configuration has store, key
+
+ + + +
+Fields + + +
+
+override_aud_vals: vector<string::String> +
+
+ An override aud for the identity of a recovery service, which will help users recover their OIDB accounts + associated with dapps or wallets that have disappeared. + IMPORTANT: This recovery service **cannot** on its own take over user accounts; a user must first sign in + via OAuth in the recovery service in order to allow it to rotate any of that user's OIDB accounts. +
+
+max_oidb_signatures_per_txn: u16 +
+
+ No transaction can have more than this many OIDB signatures. +
+
+max_exp_horizon_secs: u64 +
+
+ How far in the future from the JWT issued at time the EPK expiry can be set. +
+
+training_wheels_pubkey: option::Option<vector<u8>> +
+
+ The training wheels PK, if training wheels are on +
+
+max_commited_epk_bytes: u16 +
+
+ The max length of an ephemeral public key supported in our circuit (93 bytes) +
+
+max_iss_val_bytes: u16 +
+
+ The max length of the value of the JWT's iss field supported in our circuit (e.g., "https://accounts.google.com") +
+
+max_extra_field_bytes: u16 +
+
+ The max length of the JWT field name and value (e.g., "max_age":"18") supported in our circuit +
+
+max_jwt_header_b64_bytes: u32 +
+
+ The max length of the base64url-encoded JWT header in bytes supported in our circuit +
+
+ + +
+ + + +## Constants + + + + +The training wheels PK needs to be 32 bytes long. + + +
const E_TRAINING_WHEELS_PK_WRONG_SIZE: u64 = 1;
+
+ + + + + +## Function `new_groth16_verification_key` + + + +
public fun new_groth16_verification_key(alpha_g1: vector<u8>, beta_g2: vector<u8>, gamma_g2: vector<u8>, delta_g2: vector<u8>, gamma_abc_g1: vector<vector<u8>>): openid_account::Groth16VerificationKey
+
+ + + +
+Implementation + + +
public fun new_groth16_verification_key(alpha_g1: vector<u8>,
+                                        beta_g2: vector<u8>,
+                                        gamma_g2: vector<u8>,
+                                        delta_g2: vector<u8>,
+                                        gamma_abc_g1: vector<vector<u8>>
+): Groth16VerificationKey {
+    Groth16VerificationKey {
+        alpha_g1,
+        beta_g2,
+        gamma_g2,
+        delta_g2,
+        gamma_abc_g1,
+    }
+}
+
+ + + +
+ + + +## Function `new_configuration` + + + +
public fun new_configuration(override_aud_val: vector<string::String>, max_oidb_signatures_per_txn: u16, max_exp_horizon_secs: u64, training_wheels_pubkey: option::Option<vector<u8>>, max_commited_epk_bytes: u16, max_iss_val_bytes: u16, max_extra_field_bytes: u16, max_jwt_header_b64_bytes: u32): openid_account::Configuration
+
+ + + +
+Implementation + + +
public fun new_configuration(
+    override_aud_val: vector<String>,
+    max_oidb_signatures_per_txn: u16,
+    max_exp_horizon_secs: u64,
+    training_wheels_pubkey: Option<vector<u8>>,
+    max_commited_epk_bytes: u16,
+    max_iss_val_bytes: u16,
+    max_extra_field_bytes: u16,
+    max_jwt_header_b64_bytes: u32
+): Configuration {
+    Configuration {
+        override_aud_vals: override_aud_val,
+        max_oidb_signatures_per_txn,
+        max_exp_horizon_secs,
+        training_wheels_pubkey,
+        max_commited_epk_bytes,
+        max_iss_val_bytes,
+        max_extra_field_bytes,
+        max_jwt_header_b64_bytes,
+    }
+}
+
+ + + +
+ + + +## Function `update_groth16_verification_key` + + + +
public fun update_groth16_verification_key(fx: &signer, vk: openid_account::Groth16VerificationKey)
+
+ + + +
+Implementation + + +
public fun update_groth16_verification_key(fx: &signer, vk: Groth16VerificationKey) acquires Groth16VerificationKey {
+    system_addresses::assert_aptos_framework(fx);
+
+    if (exists<Groth16VerificationKey>(signer::address_of(fx))) {
+        let Groth16VerificationKey {
+            alpha_g1: _,
+            beta_g2: _,
+            gamma_g2: _,
+            delta_g2: _,
+            gamma_abc_g1: _
+        } = move_from<Groth16VerificationKey>(signer::address_of(fx));
+    };
+
+    move_to(fx, vk);
+}
+
+ + + +
+ + + +## Function `update_configuration` + + + +
public fun update_configuration(fx: &signer, config: openid_account::Configuration)
+
+ + + +
+Implementation + + +
public fun update_configuration(fx: &signer, config: Configuration) acquires Configuration {
+    system_addresses::assert_aptos_framework(fx);
+
+    if (exists<Configuration>(signer::address_of(fx))) {
+        let Configuration {
+            override_aud_vals: _,
+            max_oidb_signatures_per_txn: _,
+            max_exp_horizon_secs: _,
+            training_wheels_pubkey: _,
+            max_commited_epk_bytes: _,
+            max_iss_val_bytes: _,
+            max_extra_field_bytes: _,
+            max_jwt_header_b64_bytes: _,
+        } = move_from<Configuration>(signer::address_of(fx));
+    };
+
+    move_to(fx, config);
+}
+
+ + + +
+ + + +## Function `update_training_wheels` + + + +
public fun update_training_wheels(fx: &signer, pk: option::Option<vector<u8>>)
+
+ + + +
+Implementation + + +
public fun update_training_wheels(fx: &signer, pk: Option<vector<u8>>) acquires Configuration {
+    system_addresses::assert_aptos_framework(fx);
+    if (option::is_some(&pk)) {
+        assert!(vector::length(option::borrow(&pk)) == 32, E_TRAINING_WHEELS_PK_WRONG_SIZE)
+    };
+
+    let config = borrow_global_mut<Configuration>(signer::address_of(fx));
+    config.training_wheels_pubkey = pk;
+}
+
+ + + +
+ + + +## Function `update_max_exp_horizon` + + + +
public fun update_max_exp_horizon(fx: &signer, max_exp_horizon_secs: u64)
+
+ + + +
+Implementation + + +
public fun update_max_exp_horizon(fx: &signer, max_exp_horizon_secs: u64) acquires Configuration {
+    system_addresses::assert_aptos_framework(fx);
+
+    let config = borrow_global_mut<Configuration>(signer::address_of(fx));
+    config.max_exp_horizon_secs = max_exp_horizon_secs;
+}
+
+ + + +
+ + + +## Function `remove_all_override_auds` + + + +
public fun remove_all_override_auds(fx: &signer)
+
+ + + +
+Implementation + + +
public fun remove_all_override_auds(fx: &signer) acquires Configuration {
+    system_addresses::assert_aptos_framework(fx);
+
+    let config = borrow_global_mut<Configuration>(signer::address_of(fx));
+    config.override_aud_vals = vector[];
+}
+
+ + + +
+ + + +## Function `add_override_aud` + + + +
public fun add_override_aud(fx: &signer, aud: string::String)
+
+ + + +
+Implementation + + +
public fun add_override_aud(fx: &signer, aud: String) acquires Configuration {
+    system_addresses::assert_aptos_framework(fx);
+
+    let config = borrow_global_mut<Configuration>(signer::address_of(fx));
+    vector::push_back(&mut config.override_aud_vals, aud);
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/overview.md b/aptos-move/framework/aptos-framework/doc/overview.md index 67dfd6fabd519..f993863da04b1 100644 --- a/aptos-move/framework/aptos-framework/doc/overview.md +++ b/aptos-move/framework/aptos-framework/doc/overview.md @@ -41,6 +41,7 @@ This is the reference documentation of the Aptos framework. - [`0x1::multisig_account`](multisig_account.md#0x1_multisig_account) - [`0x1::object`](object.md#0x1_object) - [`0x1::object_code_deployment`](object_code_deployment.md#0x1_object_code_deployment) +- [`0x1::openid_account`](openid_account.md#0x1_openid_account) - [`0x1::optional_aggregator`](optional_aggregator.md#0x1_optional_aggregator) - [`0x1::primary_fungible_store`](primary_fungible_store.md#0x1_primary_fungible_store) - [`0x1::randomness`](randomness.md#0x1_randomness) @@ -64,7 +65,6 @@ This is the reference documentation of the Aptos framework. - [`0x1::version`](version.md#0x1_version) - [`0x1::vesting`](vesting.md#0x1_vesting) - [`0x1::voting`](voting.md#0x1_voting) -- [`0x1::zkid`](zkid.md#0x1_zkid) [move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/zkid.md b/aptos-move/framework/aptos-framework/doc/zkid.md deleted file mode 100644 index 3ebc307caac96..0000000000000 --- a/aptos-move/framework/aptos-framework/doc/zkid.md +++ /dev/null @@ -1,445 +0,0 @@ - - - -# Module `0x1::zkid` - - - -- [Struct `Group`](#0x1_zkid_Group) -- [Resource `Groth16VerificationKey`](#0x1_zkid_Groth16VerificationKey) -- [Resource `Configuration`](#0x1_zkid_Configuration) -- [Constants](#@Constants_0) -- [Function `new_groth16_verification_key`](#0x1_zkid_new_groth16_verification_key) -- [Function `new_configuration`](#0x1_zkid_new_configuration) -- [Function `update_groth16_verification_key`](#0x1_zkid_update_groth16_verification_key) -- [Function `update_configuration`](#0x1_zkid_update_configuration) -- [Function `update_training_wheels`](#0x1_zkid_update_training_wheels) -- [Function `remove_all_override_auds`](#0x1_zkid_remove_all_override_auds) -- [Function `add_override_aud`](#0x1_zkid_add_override_aud) - - -
use 0x1::option;
-use 0x1::signer;
-use 0x1::string;
-use 0x1::system_addresses;
-
- - - - - -## Struct `Group` - - - -
#[resource_group(#[scope = global])]
-struct Group
-
- - - -
-Fields - - -
-
-dummy_field: bool -
-
- -
-
- - -
- - - -## Resource `Groth16VerificationKey` - -The 288-byte Groth16 verification key (VK) for the zkID relation. - - -
#[resource_group_member(#[group = 0x1::zkid::Group])]
-struct Groth16VerificationKey has store, key
-
- - - -
-Fields - - -
-
-alpha_g1: vector<u8> -
-
- 32-byte serialization of alpha * G, where G is the generator of G1. -
-
-beta_g2: vector<u8> -
-
- 64-byte serialization of alpha * H, where H is the generator of G2. -
-
-gamma_g2: vector<u8> -
-
- 64-byte serialization of gamma * H, where H is the generator of G2. -
-
-delta_g2: vector<u8> -
-
- 64-byte serialization of delta * H, where H is the generator of G2. -
-
-gamma_abc_g1: vector<vector<u8>> -
-
- \forall i \in {0, ..., \ell}, 64-byte serialization of gamma^{-1} * (beta * a_i + alpha * b_i + c_i) * H, where - H is the generator of G1 and \ell is 1 for the zkID relation. -
-
- - -
- - - -## Resource `Configuration` - - - -
#[resource_group_member(#[group = 0x1::zkid::Group])]
-struct Configuration has store, key
-
- - - -
-Fields - - -
-
-override_aud_vals: vector<string::String> -
-
- An override aud for the identity of a recovery service, which will help users recover their zkID accounts - associated with dapps or wallets that have disappeared. - IMPORTANT: This recovery service **cannot** on its own take over user accounts; a user must first sign in - via OAuth in the recovery service in order to allow it to rotate any of that user's zkID accounts. -
-
-max_zkid_signatures_per_txn: u16 -
-
- No transaction can have more than this many zkID signatures. -
-
-max_exp_horizon_secs: u64 -
-
- How far in the future from the JWT issued at time the EPK expiry can be set. -
-
-training_wheels_pubkey: option::Option<vector<u8>> -
-
- The training wheels PK, if training wheels are on -
-
-nonce_commitment_num_bytes: u16 -
-
- The size of the "nonce commitment (to the EPK and expiration date)" stored in the JWT's nonce field. -
-
-max_commited_epk_bytes: u16 -
-
- The max length of an ephemeral public key supported in our circuit (93 bytes) -
-
-max_iss_field_bytes: u16 -
-
- The max length of the field name and value of the JWT's iss field supported in our circuit (e.g., "iss":"aptos.com") -
-
-max_extra_field_bytes: u16 -
-
- The max length of the JWT field name and value (e.g., "max_age":"18") supported in our circuit -
-
-max_jwt_header_b64_bytes: u32 -
-
- The max length of the base64url-encoded JWT header in bytes supported in our circuit -
-
- - -
- - - -## Constants - - - - -The training wheels PK needs to be 32 bytes long. - - -
const E_TRAINING_WHEELS_PK_WRONG_SIZE: u64 = 1;
-
- - - - - -## Function `new_groth16_verification_key` - - - -
public fun new_groth16_verification_key(alpha_g1: vector<u8>, beta_g2: vector<u8>, gamma_g2: vector<u8>, delta_g2: vector<u8>, gamma_abc_g1: vector<vector<u8>>): zkid::Groth16VerificationKey
-
- - - -
-Implementation - - -
public fun new_groth16_verification_key(alpha_g1: vector<u8>,
-                                        beta_g2: vector<u8>,
-                                        gamma_g2: vector<u8>,
-                                        delta_g2: vector<u8>,
-                                        gamma_abc_g1: vector<vector<u8>>
-): Groth16VerificationKey {
-    Groth16VerificationKey {
-        alpha_g1,
-        beta_g2,
-        gamma_g2,
-        delta_g2,
-        gamma_abc_g1,
-    }
-}
-
- - - -
- - - -## Function `new_configuration` - - - -
public fun new_configuration(override_aud_val: vector<string::String>, max_zkid_signatures_per_txn: u16, max_exp_horizon_secs: u64, training_wheels_pubkey: option::Option<vector<u8>>, nonce_commitment_num_bytes: u16, max_commited_epk_bytes: u16, max_iss_field_bytes: u16, max_extra_field_bytes: u16, max_jwt_header_b64_bytes: u32): zkid::Configuration
-
- - - -
-Implementation - - -
public fun new_configuration(
-    override_aud_val: vector<String>,
-    max_zkid_signatures_per_txn: u16,
-    max_exp_horizon_secs: u64,
-    training_wheels_pubkey: Option<vector<u8>>,
-    nonce_commitment_num_bytes: u16,
-    max_commited_epk_bytes: u16,
-    max_iss_field_bytes: u16,
-    max_extra_field_bytes: u16,
-    max_jwt_header_b64_bytes: u32
-): Configuration {
-    Configuration {
-        override_aud_vals: override_aud_val,
-        max_zkid_signatures_per_txn,
-        max_exp_horizon_secs,
-        training_wheels_pubkey,
-        nonce_commitment_num_bytes,
-        max_commited_epk_bytes,
-        max_iss_field_bytes,
-        max_extra_field_bytes,
-        max_jwt_header_b64_bytes,
-    }
-}
-
- - - -
- - - -## Function `update_groth16_verification_key` - - - -
public fun update_groth16_verification_key(fx: &signer, vk: zkid::Groth16VerificationKey)
-
- - - -
-Implementation - - -
public fun update_groth16_verification_key(fx: &signer, vk: Groth16VerificationKey) acquires Groth16VerificationKey {
-    system_addresses::assert_aptos_framework(fx);
-
-    if (exists<Groth16VerificationKey>(signer::address_of(fx))) {
-        let Groth16VerificationKey {
-            alpha_g1: _,
-            beta_g2: _,
-            gamma_g2: _,
-            delta_g2: _,
-            gamma_abc_g1: _
-        } = move_from<Groth16VerificationKey>(signer::address_of(fx));
-    };
-
-    move_to(fx, vk);
-}
-
- - - -
- - - -## Function `update_configuration` - - - -
public fun update_configuration(fx: &signer, config: zkid::Configuration)
-
- - - -
-Implementation - - -
public fun update_configuration(fx: &signer, config: Configuration) acquires Configuration {
-    system_addresses::assert_aptos_framework(fx);
-
-    if (exists<Configuration>(signer::address_of(fx))) {
-        let Configuration {
-            override_aud_vals: _,
-            max_zkid_signatures_per_txn: _,
-            max_exp_horizon_secs: _,
-            training_wheels_pubkey: _,
-            nonce_commitment_num_bytes: _,
-            max_commited_epk_bytes: _,
-            max_iss_field_bytes: _,
-            max_extra_field_bytes: _,
-            max_jwt_header_b64_bytes: _,
-        } = move_from<Configuration>(signer::address_of(fx));
-    };
-
-    move_to(fx, config);
-}
-
- - - -
- - - -## Function `update_training_wheels` - - - -
public fun update_training_wheels(fx: &signer, pk: option::Option<vector<u8>>)
-
- - - -
-Implementation - - -
public fun update_training_wheels(fx: &signer, pk: Option<vector<u8>>) acquires Configuration {
-    system_addresses::assert_aptos_framework(fx);
-    if (option::is_some(&pk)) {
-        assert!(vector::length(option::borrow(&pk)) == 32, E_TRAINING_WHEELS_PK_WRONG_SIZE)
-    };
-
-    let config = borrow_global_mut<Configuration>(signer::address_of(fx));
-    config.training_wheels_pubkey = pk;
-}
-
- - - -
- - - -## Function `remove_all_override_auds` - - - -
public fun remove_all_override_auds(fx: &signer)
-
- - - -
-Implementation - - -
public fun remove_all_override_auds(fx: &signer) acquires Configuration {
-    system_addresses::assert_aptos_framework(fx);
-
-    let config = borrow_global_mut<Configuration>(signer::address_of(fx));
-    config.override_aud_vals = vector[];
-}
-
- - - -
- - - -## Function `add_override_aud` - - - -
public fun add_override_aud(fx: &signer, aud: string::String)
-
- - - -
-Implementation - - -
public fun add_override_aud(fx: &signer, aud: String) acquires Configuration {
-    system_addresses::assert_aptos_framework(fx);
-
-    let config = borrow_global_mut<Configuration>(signer::address_of(fx));
-    vector::push_back(&mut config.override_aud_vals, aud);
-}
-
- - - -
- - -[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/sources/account.move b/aptos-move/framework/aptos-framework/sources/account.move index 76bea0e3132c5..489127d5e1546 100644 --- a/aptos-move/framework/aptos-framework/sources/account.move +++ b/aptos-move/framework/aptos-framework/sources/account.move @@ -265,8 +265,11 @@ module aptos_framework::account { borrow_global(addr).authentication_key } - /// This function is used to rotate a resource account's authentication key to 0, so that no private key can control - /// the resource account. + /// This function is used to rotate a resource account's authentication key to `new_auth_key`. This is done in + /// many contexts: + /// 1. During normal key rotation via `rotate_authentication_key` or `rotate_authentication_key_call` + /// 2. During resource account initialization so that no private key can control the resource account + /// 3. During multisig_v2 account creation public(friend) fun rotate_authentication_key_internal(account: &signer, new_auth_key: vector) acquires Account { let addr = signer::address_of(account); assert!(exists_at(addr), error::not_found(EACCOUNT_DOES_NOT_EXIST)); @@ -278,16 +281,13 @@ module aptos_framework::account { account_resource.authentication_key = new_auth_key; } - /// Entry function-only rotation key function that allows the signer update their authentication_key. - entry fun rotate_authentication_key_call(account: &signer, new_auth_key: vector) acquires Account, OriginatingAddress { - let addr = signer::address_of(account); - assert!(exists_at(addr), error::not_found(EACCOUNT_DOES_NOT_EXIST)); - assert!( - vector::length(&new_auth_key) == 32, - error::invalid_argument(EMALFORMED_AUTHENTICATION_KEY) - ); - let account_resource = borrow_global_mut(addr); - update_auth_key_and_originating_address_table(addr, account_resource, new_auth_key); + /// Private entry function for key rotation that allows the signer to update their authentication key. + /// Note that this does not update the `OriginatingAddress` table because the `new_auth_key` is not "verified": it + /// does not come with a proof-of-knowledge of the underlying SK. Nonetheless, we need this functionality due to + /// the introduction of non-standard key algorithms, such as passkeys, which cannot produce proofs-of-knowledge in + /// the format expected in `rotate_authentication_key`. + entry fun rotate_authentication_key_call(account: &signer, new_auth_key: vector) acquires Account { + rotate_authentication_key_internal(account, new_auth_key); } /// Generic authentication key rotation function that allows the user to rotate their authentication key from any scheme to any scheme. @@ -1337,7 +1337,7 @@ module aptos_framework::account { #[test(account = @aptos_framework)] - public entry fun test_simple_rotation(account: &signer) acquires Account, OriginatingAddress { + public entry fun test_simple_rotation(account: &signer) acquires Account { initialize(account); let alice_addr = @0x1234; @@ -1346,13 +1346,9 @@ module aptos_framework::account { let (_new_sk, new_pk) = ed25519::generate_keys(); let new_pk_unvalidated = ed25519::public_key_to_unvalidated(&new_pk); let new_auth_key = ed25519::unvalidated_public_key_to_authentication_key(&new_pk_unvalidated); - let new_addr = from_bcs::to_address(new_auth_key); + let _new_addr = from_bcs::to_address(new_auth_key); rotate_authentication_key_call(&alice, new_auth_key); - - let address_map = &mut borrow_global_mut(@aptos_framework).address_map; - let expected_originating_address = table::borrow(address_map, new_addr); - assert!(*expected_originating_address == alice_addr, 0); assert!(borrow_global(alice_addr).authentication_key == new_auth_key, 0); } diff --git a/aptos-move/framework/aptos-framework/sources/account.spec.move b/aptos-move/framework/aptos-framework/sources/account.spec.move index 91e1aca7751ce..2b80dbdce527d 100644 --- a/aptos-move/framework/aptos-framework/sources/account.spec.move +++ b/aptos-move/framework/aptos-framework/sources/account.spec.move @@ -203,35 +203,14 @@ spec aptos_framework::account { ensures account_resource.authentication_key == new_auth_key; } - /// The Account existed under the signer before the call. - /// The length of new_auth_key is 32. spec rotate_authentication_key_call(account: &signer, new_auth_key: vector) { let addr = signer::address_of(account); + /// [high-level-req-10] + let post account_resource = global(addr); aborts_if !exists(addr); aborts_if vector::length(new_auth_key) != 32; - let account_resource = global(addr); - let curr_auth_key = from_bcs::deserialize
(account_resource.authentication_key); - - // Verify all properties in update_auth_key_and_originating_address_table - let originating_addr = addr; - - let address_map = global(@aptos_framework).address_map; - let new_auth_key_addr = from_bcs::deserialize
(new_auth_key); - - aborts_if !exists(@aptos_framework); - aborts_if !from_bcs::deserializable
(account_resource.authentication_key); - aborts_if table::spec_contains(address_map, curr_auth_key) && - table::spec_get(address_map, curr_auth_key) != originating_addr; - - aborts_if curr_auth_key != new_auth_key_addr && table::spec_contains(address_map, new_auth_key_addr); - - include UpdateAuthKeyAndOriginatingAddressTableAbortsIf { - originating_addr: addr, - new_auth_key_vector: new_auth_key, - }; - - let post auth_key = global(addr).authentication_key; - ensures auth_key == new_auth_key; + modifies global(addr); + ensures account_resource.authentication_key == new_auth_key; } spec fun spec_assert_valid_rotation_proof_signature_and_get_auth_key(scheme: u8, public_key_bytes: vector, signature: vector, challenge: RotationProofChallenge): vector; diff --git a/aptos-move/framework/aptos-framework/sources/object.move b/aptos-move/framework/aptos-framework/sources/object.move index 77c7ea3b59110..82400e72c63a4 100644 --- a/aptos-move/framework/aptos-framework/sources/object.move +++ b/aptos-move/framework/aptos-framework/sources/object.move @@ -532,18 +532,11 @@ module aptos_framework::object { let current_address = object.owner; let count = 0; - while ({ - spec { - invariant count < MAXIMUM_OBJECT_NESTING; - invariant forall i in 0..count: - exists(current_address) && global(current_address).allow_ungated_transfer; - // invariant forall i in 0..count: - // current_address == get_transfer_address(global(destination).owner, i); + while (owner != current_address) { + count = count + 1; + if (std::features::max_object_nesting_check_enabled()) { + assert!(count < MAXIMUM_OBJECT_NESTING, error::out_of_range(EMAXIMUM_NESTING)) }; - owner != current_address - }) { - let count = count + 1; - assert!(count < MAXIMUM_OBJECT_NESTING, error::out_of_range(EMAXIMUM_NESTING)); // At this point, the first object exists and so the more likely case is that the // object's owner is not an object. So we return a more sensible error. assert!( @@ -623,16 +616,11 @@ module aptos_framework::object { let current_address = object.owner; let count = 0; - while ({ - spec { - invariant count < MAXIMUM_OBJECT_NESTING; - invariant forall i in 0..count: - owner != current_address && exists(current_address); + while (owner != current_address) { + count = count + 1; + if (std::features::max_object_nesting_check_enabled()) { + assert!(count < MAXIMUM_OBJECT_NESTING, error::out_of_range(EMAXIMUM_NESTING)) }; - owner != current_address - }) { - let count = count + 1; - assert!(count < MAXIMUM_OBJECT_NESTING, error::out_of_range(EMAXIMUM_NESTING)); if (!exists(current_address)) { return false }; @@ -816,4 +804,110 @@ module aptos_framework::object { let (_, hero) = create_hero(creator); unburn(creator, hero); } + + #[test_only] + fun create_simple_object(creator: &signer, seed: vector): Object { + object_from_constructor_ref(&create_named_object(creator, seed)) + } + + #[test(creator = @0x123)] + #[expected_failure(abort_code = 131078, location = Self)] + fun test_exceeding_maximum_object_nesting_owns_should_fail(creator: &signer) acquires ObjectCore { + use std::features; + let feature = features::get_max_object_nesting_check_feature(); + let fx = account::create_signer_for_test(@0x1); + features::change_feature_flags(&fx, vector[feature], vector[]); + + let obj1 = create_simple_object(creator, b"1"); + let obj2 = create_simple_object(creator, b"2"); + let obj3 = create_simple_object(creator, b"3"); + let obj4 = create_simple_object(creator, b"4"); + let obj5 = create_simple_object(creator, b"5"); + let obj6 = create_simple_object(creator, b"6"); + let obj7 = create_simple_object(creator, b"7"); + let obj8 = create_simple_object(creator, b"8"); + let obj9 = create_simple_object(creator, b"9"); + + transfer(creator, obj1, object_address(&obj2)); + transfer(creator, obj2, object_address(&obj3)); + transfer(creator, obj3, object_address(&obj4)); + transfer(creator, obj4, object_address(&obj5)); + transfer(creator, obj5, object_address(&obj6)); + transfer(creator, obj6, object_address(&obj7)); + transfer(creator, obj7, object_address(&obj8)); + transfer(creator, obj8, object_address(&obj9)); + + assert!(owns(obj9, signer::address_of(creator)), 1); + assert!(owns(obj8, signer::address_of(creator)), 1); + assert!(owns(obj7, signer::address_of(creator)), 1); + assert!(owns(obj6, signer::address_of(creator)), 1); + assert!(owns(obj5, signer::address_of(creator)), 1); + assert!(owns(obj4, signer::address_of(creator)), 1); + assert!(owns(obj3, signer::address_of(creator)), 1); + assert!(owns(obj2, signer::address_of(creator)), 1); + + // Calling `owns` should fail as the nesting is too deep. + assert!(owns(obj1, signer::address_of(creator)), 1); + } + + #[test(creator = @0x123)] + #[expected_failure(abort_code = 131078, location = Self)] + fun test_exceeding_maximum_object_nesting_transfer_should_fail(creator: &signer) acquires ObjectCore { + use std::features; + let feature = features::get_max_object_nesting_check_feature(); + let fx = account::create_signer_for_test(@0x1); + features::change_feature_flags(&fx, vector[feature], vector[]); + + let obj1 = create_simple_object(creator, b"1"); + let obj2 = create_simple_object(creator, b"2"); + let obj3 = create_simple_object(creator, b"3"); + let obj4 = create_simple_object(creator, b"4"); + let obj5 = create_simple_object(creator, b"5"); + let obj6 = create_simple_object(creator, b"6"); + let obj7 = create_simple_object(creator, b"7"); + let obj8 = create_simple_object(creator, b"8"); + let obj9 = create_simple_object(creator, b"9"); + + transfer(creator, obj1, object_address(&obj2)); + transfer(creator, obj2, object_address(&obj3)); + transfer(creator, obj3, object_address(&obj4)); + transfer(creator, obj4, object_address(&obj5)); + transfer(creator, obj5, object_address(&obj6)); + transfer(creator, obj6, object_address(&obj7)); + transfer(creator, obj7, object_address(&obj8)); + transfer(creator, obj8, object_address(&obj9)); + + // This should fail as the nesting is too deep. + transfer(creator, obj1, @0x1); + } + + #[test(creator = @0x123)] + #[expected_failure(abort_code = 131078, location = Self)] + fun test_cyclic_ownership_transfer_should_fail(creator: &signer) acquires ObjectCore { + use std::features; + let feature = features::get_max_object_nesting_check_feature(); + let fx = account::create_signer_for_test(@0x1); + features::change_feature_flags(&fx, vector[feature], vector[]); + + let obj1 = create_simple_object(creator, b"1"); + // This creates a cycle (self-loop) in ownership. + transfer(creator, obj1, object_address(&obj1)); + // This should fails as the ownership is cyclic. + transfer(creator, obj1, object_address(&obj1)); + } + + #[test(creator = @0x123)] + #[expected_failure(abort_code = 131078, location = Self)] + fun test_cyclic_ownership_owns_should_fail(creator: &signer) acquires ObjectCore { + use std::features; + let feature = features::get_max_object_nesting_check_feature(); + let fx = account::create_signer_for_test(@0x1); + features::change_feature_flags(&fx, vector[feature], vector[]); + + let obj1 = create_simple_object(creator, b"1"); + // This creates a cycle (self-loop) in ownership. + transfer(creator, obj1, object_address(&obj1)); + // This should fails as the ownership is cyclic. + let _ = owns(obj1, signer::address_of(creator)); + } } diff --git a/aptos-move/framework/aptos-framework/sources/object.spec.move b/aptos-move/framework/aptos-framework/sources/object.spec.move index 7f7fbf10245f8..49ec75b3bdbe9 100644 --- a/aptos-move/framework/aptos-framework/sources/object.spec.move +++ b/aptos-move/framework/aptos-framework/sources/object.spec.move @@ -47,8 +47,6 @@ spec aptos_framework::object { /// spec module { pragma aborts_if_is_strict; - //ghost variable - global g_roll: u8; } spec fun spec_exists_at(object: address): bool; @@ -504,6 +502,7 @@ spec aptos_framework::object { } spec owns(object: Object, owner: address): bool { + pragma aborts_if_is_partial; let current_address_0 = object.inner; let object_0 = global(current_address_0); let current_address = object_0.owner; diff --git a/aptos-move/framework/aptos-framework/sources/object_code_deployment.move b/aptos-move/framework/aptos-framework/sources/object_code_deployment.move index b22b66fc2b2ad..ef9e7d37fe9df 100644 --- a/aptos-move/framework/aptos-framework/sources/object_code_deployment.move +++ b/aptos-move/framework/aptos-framework/sources/object_code_deployment.move @@ -10,7 +10,7 @@ /// 1. Create a new object with the address derived from the publisher address and the object seed. /// 2. Publish the module passed in the function via `metadata_serialized` and `code` to the newly created object. /// 3. Emits 'Publish' event with the address of the newly created object. -/// 4. Create a `PublisherRef` which stores the extend ref of the newly created object. +/// 4. Create a `ManagingRefs` which stores the extend ref of the newly created object. /// Note: This is needed to upgrade the code as the signer must be generated to upgrade the existing code in an object. /// /// Upgrading modules flow: @@ -51,8 +51,8 @@ module aptos_framework::object_code_deployment { const OBJECT_CODE_DEPLOYMENT_DOMAIN_SEPARATOR: vector = b"aptos_framework::object_code_deployment"; #[resource_group_member(group = aptos_framework::object::ObjectGroup)] - /// Object which contains the code deployed, along with the extend ref to upgrade the code. - struct PublisherRef has key { + /// Internal struct, attached to the object, that holds Refs we need to manage the code deployment (i.e. upgrades). + struct ManagingRefs has key { /// We need to keep the extend ref to be able to generate the signer to upgrade existing code. extend_ref: ExtendRef, } @@ -97,7 +97,7 @@ module aptos_framework::object_code_deployment { event::emit(Publish { object_address: signer::address_of(code_signer), }); - move_to(code_signer, PublisherRef { + move_to(code_signer, ManagingRefs { extend_ref: object::generate_extend_ref(constructor_ref), }); } @@ -118,8 +118,8 @@ module aptos_framework::object_code_deployment { publisher: &signer, metadata_serialized: vector, code: vector>, - code_object: Object, - ) acquires PublisherRef { + code_object: Object, + ) acquires ManagingRefs { let publisher_address = signer::address_of(publisher); assert!( object::is_owner(code_object, publisher_address), @@ -127,9 +127,9 @@ module aptos_framework::object_code_deployment { ); let code_object_address = object::object_address(&code_object); - assert!(exists(code_object_address), error::not_found(ECODE_OBJECT_DOES_NOT_EXIST)); + assert!(exists(code_object_address), error::not_found(ECODE_OBJECT_DOES_NOT_EXIST)); - let extend_ref = &borrow_global(code_object_address).extend_ref; + let extend_ref = &borrow_global(code_object_address).extend_ref; let code_signer = &object::generate_signer_for_extending(extend_ref); code::publish_package_txn(code_signer, metadata_serialized, code); diff --git a/aptos-move/framework/aptos-framework/sources/zkid.move b/aptos-move/framework/aptos-framework/sources/openid_account.move similarity index 75% rename from aptos-move/framework/aptos-framework/sources/zkid.move rename to aptos-move/framework/aptos-framework/sources/openid_account.move index 169ccde3a5158..524c25e9d74f9 100644 --- a/aptos-move/framework/aptos-framework/sources/zkid.move +++ b/aptos-move/framework/aptos-framework/sources/openid_account.move @@ -1,4 +1,6 @@ -module aptos_framework::zkid { +/// This module is responsible for configuring OpenID-based blockchain accounts (OIDBs), which were introduced in +/// [AIP-61](https://github.com/aptos-foundation/AIPs/blob/main/aips/aip-61.md). +module aptos_framework::openid_account { use std::option; use std::option::Option; use std::signer; @@ -12,8 +14,8 @@ module aptos_framework::zkid { #[resource_group(scope = global)] struct Group {} - #[resource_group_member(group = aptos_framework::zkid::Group)] - /// The 288-byte Groth16 verification key (VK) for the zkID relation. + #[resource_group_member(group = aptos_framework::openid_account::Group)] + /// The 288-byte Groth16 verification key (VK) for the ZK relation that implements OIDBs. struct Groth16VerificationKey has key, store { /// 32-byte serialization of `alpha * G`, where `G` is the generator of `G1`. alpha_g1: vector, @@ -24,29 +26,27 @@ module aptos_framework::zkid { /// 64-byte serialization of `delta * H`, where `H` is the generator of `G2`. delta_g2: vector, /// `\forall i \in {0, ..., \ell}, 64-byte serialization of gamma^{-1} * (beta * a_i + alpha * b_i + c_i) * H`, where - /// `H` is the generator of `G1` and `\ell` is 1 for the zkID relation. + /// `H` is the generator of `G1` and `\ell` is 1 for the ZK relation. gamma_abc_g1: vector>, } - #[resource_group_member(group = aptos_framework::zkid::Group)] + #[resource_group_member(group = aptos_framework::openid_account::Group)] struct Configuration has key, store { - /// An override `aud` for the identity of a recovery service, which will help users recover their zkID accounts + /// An override `aud` for the identity of a recovery service, which will help users recover their OIDB accounts /// associated with dapps or wallets that have disappeared. /// IMPORTANT: This recovery service **cannot** on its own take over user accounts; a user must first sign in - /// via OAuth in the recovery service in order to allow it to rotate any of that user's zkID accounts. + /// via OAuth in the recovery service in order to allow it to rotate any of that user's OIDB accounts. override_aud_vals: vector, - /// No transaction can have more than this many zkID signatures. - max_zkid_signatures_per_txn: u16, + /// No transaction can have more than this many OIDB signatures. + max_oidb_signatures_per_txn: u16, /// How far in the future from the JWT issued at time the EPK expiry can be set. max_exp_horizon_secs: u64, /// The training wheels PK, if training wheels are on training_wheels_pubkey: Option>, - /// The size of the "nonce commitment (to the EPK and expiration date)" stored in the JWT's `nonce` field. - nonce_commitment_num_bytes: u16, /// The max length of an ephemeral public key supported in our circuit (93 bytes) max_commited_epk_bytes: u16, - /// The max length of the field name and value of the JWT's `iss` field supported in our circuit (e.g., `"iss":"aptos.com"`) - max_iss_field_bytes: u16, + /// The max length of the value of the JWT's `iss` field supported in our circuit (e.g., `"https://accounts.google.com"`) + max_iss_val_bytes: u16, /// The max length of the JWT field name and value (e.g., `"max_age":"18"`) supported in our circuit max_extra_field_bytes: u16, /// The max length of the base64url-encoded JWT header in bytes supported in our circuit @@ -78,29 +78,27 @@ module aptos_framework::zkid { public fun new_configuration( override_aud_val: vector, - max_zkid_signatures_per_txn: u16, + max_oidb_signatures_per_txn: u16, max_exp_horizon_secs: u64, training_wheels_pubkey: Option>, - nonce_commitment_num_bytes: u16, max_commited_epk_bytes: u16, - max_iss_field_bytes: u16, + max_iss_val_bytes: u16, max_extra_field_bytes: u16, max_jwt_header_b64_bytes: u32 ): Configuration { Configuration { override_aud_vals: override_aud_val, - max_zkid_signatures_per_txn, + max_oidb_signatures_per_txn, max_exp_horizon_secs, training_wheels_pubkey, - nonce_commitment_num_bytes, max_commited_epk_bytes, - max_iss_field_bytes, + max_iss_val_bytes, max_extra_field_bytes, max_jwt_header_b64_bytes, } } - // Sets the zkID Groth16 verification key, only callable via governance proposal. + // Sets the OIDB Groth16 verification key, only callable via governance proposal. // WARNING: If a malicious key is set, this would lead to stolen funds. public fun update_groth16_verification_key(fx: &signer, vk: Groth16VerificationKey) acquires Groth16VerificationKey { system_addresses::assert_aptos_framework(fx); @@ -118,7 +116,7 @@ module aptos_framework::zkid { move_to(fx, vk); } - // Sets the zkID configuration, only callable via governance proposal. + // Sets the OIDB configuration, only callable via governance proposal. // WARNING: If a malicious key is set, this would lead to stolen funds. public fun update_configuration(fx: &signer, config: Configuration) acquires Configuration { system_addresses::assert_aptos_framework(fx); @@ -126,12 +124,11 @@ module aptos_framework::zkid { if (exists(signer::address_of(fx))) { let Configuration { override_aud_vals: _, - max_zkid_signatures_per_txn: _, + max_oidb_signatures_per_txn: _, max_exp_horizon_secs: _, training_wheels_pubkey: _, - nonce_commitment_num_bytes: _, max_commited_epk_bytes: _, - max_iss_field_bytes: _, + max_iss_val_bytes: _, max_extra_field_bytes: _, max_jwt_header_b64_bytes: _, } = move_from(signer::address_of(fx)); @@ -140,7 +137,7 @@ module aptos_framework::zkid { move_to(fx, config); } - // Convenience method to set the zkID training wheels, only callable via governance proposal. + // Convenience method to set the OIDB training wheels, only callable via governance proposal. // WARNING: If a malicious key is set, this would lead to stolen funds. public fun update_training_wheels(fx: &signer, pk: Option>) acquires Configuration { system_addresses::assert_aptos_framework(fx); @@ -152,8 +149,16 @@ module aptos_framework::zkid { config.training_wheels_pubkey = pk; } - // Convenience method to append to clear the set of zkID override `aud`'s, only callable via governance proposal. - // WARNING: When no override `aud` is set, recovery of zkID accounts associated with applications that disappeared + // Convenience method to set the max expiration horizon, only callable via governance proposal. + public fun update_max_exp_horizon(fx: &signer, max_exp_horizon_secs: u64) acquires Configuration { + system_addresses::assert_aptos_framework(fx); + + let config = borrow_global_mut(signer::address_of(fx)); + config.max_exp_horizon_secs = max_exp_horizon_secs; + } + + // Convenience method to clear the set of override `aud`'s, only callable via governance proposal. + // WARNING: When no override `aud` is set, recovery of OIDB accounts associated with applications that disappeared // is no longer possible. public fun remove_all_override_auds(fx: &signer) acquires Configuration { system_addresses::assert_aptos_framework(fx); @@ -162,7 +167,7 @@ module aptos_framework::zkid { config.override_aud_vals = vector[]; } - // Convenience method to append to the set of zkID override `aud`'s, only callable via governance proposal. + // Convenience method to append to the set of override `aud`'s, only callable via governance proposal. // WARNING: If a malicious override `aud` is set, this would lead to stolen funds. public fun add_override_aud(fx: &signer, aud: String) acquires Configuration { system_addresses::assert_aptos_framework(fx); diff --git a/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs b/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs index a7779f247af45..687a7febe784a 100644 --- a/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs +++ b/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs @@ -131,7 +131,11 @@ pub enum EntryFunctionCall { cap_update_table: Vec, }, - /// Entry function-only rotation key function that allows the signer update their authentication_key. + /// Private entry function for key rotation that allows the signer to update their authentication key. + /// Note that this does not update the `OriginatingAddress` table because the `new_auth_key` is not "verified": it + /// does not come with a proof-of-knowledge of the underlying SK. Nonetheless, we need this functionality due to + /// the introduction of non-standard key algorithms, such as passkeys, which cannot produce proofs-of-knowledge in + /// the format expected in `rotate_authentication_key`. AccountRotateAuthenticationKeyCall { new_auth_key: Vec, }, @@ -1681,7 +1685,11 @@ pub fn account_rotate_authentication_key( )) } -/// Entry function-only rotation key function that allows the signer update their authentication_key. +/// Private entry function for key rotation that allows the signer to update their authentication key. +/// Note that this does not update the `OriginatingAddress` table because the `new_auth_key` is not "verified": it +/// does not come with a proof-of-knowledge of the underlying SK. Nonetheless, we need this functionality due to +/// the introduction of non-standard key algorithms, such as passkeys, which cannot produce proofs-of-knowledge in +/// the format expected in `rotate_authentication_key`. pub fn account_rotate_authentication_key_call(new_auth_key: Vec) -> TransactionPayload { TransactionPayload::EntryFunction(EntryFunction::new( ModuleId::new( diff --git a/aptos-move/framework/cached-packages/src/aptos_stdlib.rs b/aptos-move/framework/cached-packages/src/aptos_stdlib.rs index 06c387296a373..57d26ae54d75f 100644 --- a/aptos-move/framework/cached-packages/src/aptos_stdlib.rs +++ b/aptos-move/framework/cached-packages/src/aptos_stdlib.rs @@ -44,7 +44,7 @@ pub fn publish_module_source(module_name: &str, module_src: &str) -> Transaction pub fn object_code_deployment_upgrade( metadata_serialized: Vec, code: Vec>, - publisher_ref: AccountAddress, + code_object: AccountAddress, ) -> TransactionPayload { TransactionPayload::EntryFunction(EntryFunction::new( ModuleId::new( @@ -59,7 +59,7 @@ pub fn object_code_deployment_upgrade( vec![ bcs::to_bytes(&metadata_serialized).unwrap(), bcs::to_bytes(&code).unwrap(), - bcs::to_bytes(&publisher_ref).unwrap(), + bcs::to_bytes(&code_object).unwrap(), ], )) } diff --git a/aptos-move/framework/move-stdlib/doc/features.md b/aptos-move/framework/move-stdlib/doc/features.md index b624588339ee2..88de8800bd719 100644 --- a/aptos-move/framework/move-stdlib/doc/features.md +++ b/aptos-move/framework/move-stdlib/doc/features.md @@ -87,10 +87,10 @@ return true. - [Function `commission_change_delegation_pool_enabled`](#0x1_features_commission_change_delegation_pool_enabled) - [Function `get_bn254_strutures_feature`](#0x1_features_get_bn254_strutures_feature) - [Function `bn254_structures_enabled`](#0x1_features_bn254_structures_enabled) -- [Function `get_zkid_feature`](#0x1_features_get_zkid_feature) -- [Function `zkid_feature_enabled`](#0x1_features_zkid_feature_enabled) -- [Function `get_zkid_zkless_feature`](#0x1_features_get_zkid_zkless_feature) -- [Function `zkid_zkless_feature_enabled`](#0x1_features_zkid_zkless_feature_enabled) +- [Function `get_oidb_feature`](#0x1_features_get_oidb_feature) +- [Function `oidb_feature_enabled`](#0x1_features_oidb_feature_enabled) +- [Function `get_oidb_zkless_feature`](#0x1_features_get_oidb_zkless_feature) +- [Function `oidb_zkless_feature_enabled`](#0x1_features_oidb_zkless_feature_enabled) - [Function `get_jwk_consensus_feature`](#0x1_features_get_jwk_consensus_feature) - [Function `jwk_consensus_enabled`](#0x1_features_jwk_consensus_enabled) - [Function `get_concurrent_fungible_assets_feature`](#0x1_features_get_concurrent_fungible_assets_feature) @@ -98,6 +98,8 @@ return true. - [Function `get_reconfigure_with_dkg_feature`](#0x1_features_get_reconfigure_with_dkg_feature) - [Function `reconfigure_with_dkg_enabled`](#0x1_features_reconfigure_with_dkg_enabled) - [Function `is_object_code_deployment_enabled`](#0x1_features_is_object_code_deployment_enabled) +- [Function `get_max_object_nesting_check_feature`](#0x1_features_get_max_object_nesting_check_feature) +- [Function `max_object_nesting_check_enabled`](#0x1_features_max_object_nesting_check_enabled) - [Function `change_feature_flags`](#0x1_features_change_feature_flags) - [Function `change_feature_flags_for_next_epoch`](#0x1_features_change_feature_flags_for_next_epoch) - [Function `on_new_epoch`](#0x1_features_on_new_epoch) @@ -459,6 +461,16 @@ Lifetime: permanent + + +Whether checking the maximum object nesting is enabled. + + +
const MAX_OBJECT_NESTING_CHECK: u64 = 53;
+
+ + + Whether emit function in event.move are enabled for module events. @@ -503,6 +515,30 @@ Whether deploying to objects is enabled. + + +Whether the OIDB feature is enabled, possibly with the ZK-less verification mode. + +Lifetime: transient + + +
const OIDB_SIGNATURE: u64 = 46;
+
+ + + + + +Whether the ZK-less mode of the OIDB feature is enabled. + +Lifetime: transient + + +
const OIDB_ZKLESS_SIGNATURE: u64 = 47;
+
+ + + Whether allow changing beneficiaries for operators. @@ -684,30 +720,6 @@ Lifetime: transient - - -Whether the zkID feature is enabled, possibly with the ZK-less verification mode. - -Lifetime: transient - - -
const ZK_ID_SIGNATURES: u64 = 46;
-
- - - - - -Whether the ZK-less mode of the zkID feature is enabled. - -Lifetime: transient - - -
const ZK_ID_ZKLESS_SIGNATURE: u64 = 47;
-
- - - ## Function `code_dependency_check_enabled` @@ -1985,13 +1997,13 @@ Lifetime: transient - + -## Function `get_zkid_feature` +## Function `get_oidb_feature` -
public fun get_zkid_feature(): u64
+
public fun get_oidb_feature(): u64
 
@@ -2000,20 +2012,20 @@ Lifetime: transient Implementation -
public fun get_zkid_feature(): u64 { ZK_ID_SIGNATURES }
+
public fun get_oidb_feature(): u64 { OIDB_SIGNATURE }
 
- + -## Function `zkid_feature_enabled` +## Function `oidb_feature_enabled` -
public fun zkid_feature_enabled(): bool
+
public fun oidb_feature_enabled(): bool
 
@@ -2022,8 +2034,8 @@ Lifetime: transient Implementation -
public fun zkid_feature_enabled(): bool acquires Features {
-    is_enabled(ZK_ID_SIGNATURES)
+
public fun oidb_feature_enabled(): bool acquires Features {
+    is_enabled(OIDB_SIGNATURE)
 }
 
@@ -2031,13 +2043,13 @@ Lifetime: transient - + -## Function `get_zkid_zkless_feature` +## Function `get_oidb_zkless_feature` -
public fun get_zkid_zkless_feature(): u64
+
public fun get_oidb_zkless_feature(): u64
 
@@ -2046,20 +2058,20 @@ Lifetime: transient Implementation -
public fun get_zkid_zkless_feature(): u64 { ZK_ID_ZKLESS_SIGNATURE }
+
public fun get_oidb_zkless_feature(): u64 { OIDB_ZKLESS_SIGNATURE }
 
- + -## Function `zkid_zkless_feature_enabled` +## Function `oidb_zkless_feature_enabled` -
public fun zkid_zkless_feature_enabled(): bool
+
public fun oidb_zkless_feature_enabled(): bool
 
@@ -2068,8 +2080,8 @@ Lifetime: transient Implementation -
public fun zkid_zkless_feature_enabled(): bool acquires Features {
-    is_enabled(ZK_ID_ZKLESS_SIGNATURE)
+
public fun oidb_zkless_feature_enabled(): bool acquires Features {
+    is_enabled(OIDB_ZKLESS_SIGNATURE)
 }
 
@@ -2238,6 +2250,52 @@ Lifetime: transient + + + + +## Function `get_max_object_nesting_check_feature` + + + +
public fun get_max_object_nesting_check_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_max_object_nesting_check_feature(): u64 { MAX_OBJECT_NESTING_CHECK }
+
+ + + +
+ + + +## Function `max_object_nesting_check_enabled` + + + +
public fun max_object_nesting_check_enabled(): bool
+
+ + + +
+Implementation + + +
public fun max_object_nesting_check_enabled(): bool acquires Features {
+    is_enabled(MAX_OBJECT_NESTING_CHECK)
+}
+
+ + +
diff --git a/aptos-move/framework/move-stdlib/sources/configs/features.move b/aptos-move/framework/move-stdlib/sources/configs/features.move index 932f6b120e10b..7f6dfa9dabbd1 100644 --- a/aptos-move/framework/move-stdlib/sources/configs/features.move +++ b/aptos-move/framework/move-stdlib/sources/configs/features.move @@ -347,26 +347,26 @@ module std::features { is_enabled(BN254_STRUCTURES) } - /// Whether the zkID feature is enabled, possibly with the ZK-less verification mode. + /// Whether the OIDB feature is enabled, possibly with the ZK-less verification mode. /// /// Lifetime: transient - const ZK_ID_SIGNATURES: u64 = 46; + const OIDB_SIGNATURE: u64 = 46; - public fun get_zkid_feature(): u64 { ZK_ID_SIGNATURES } + public fun get_oidb_feature(): u64 { OIDB_SIGNATURE } - public fun zkid_feature_enabled(): bool acquires Features { - is_enabled(ZK_ID_SIGNATURES) + public fun oidb_feature_enabled(): bool acquires Features { + is_enabled(OIDB_SIGNATURE) } - /// Whether the ZK-less mode of the zkID feature is enabled. + /// Whether the ZK-less mode of the OIDB feature is enabled. /// /// Lifetime: transient - const ZK_ID_ZKLESS_SIGNATURE: u64 = 47; + const OIDB_ZKLESS_SIGNATURE: u64 = 47; - public fun get_zkid_zkless_feature(): u64 { ZK_ID_ZKLESS_SIGNATURE } + public fun get_oidb_zkless_feature(): u64 { OIDB_ZKLESS_SIGNATURE } - public fun zkid_zkless_feature_enabled(): bool acquires Features { - is_enabled(ZK_ID_ZKLESS_SIGNATURE) + public fun oidb_zkless_feature_enabled(): bool acquires Features { + is_enabled(OIDB_ZKLESS_SIGNATURE) } /// The JWK consensus feature. @@ -403,10 +403,20 @@ module std::features { /// Whether deploying to objects is enabled. const OBJECT_CODE_DEPLOYMENT: u64 = 52; + public fun is_object_code_deployment_enabled(): bool acquires Features { is_enabled(OBJECT_CODE_DEPLOYMENT) } + /// Whether checking the maximum object nesting is enabled. + const MAX_OBJECT_NESTING_CHECK: u64 = 53; + + public fun get_max_object_nesting_check_feature(): u64 { MAX_OBJECT_NESTING_CHECK } + + public fun max_object_nesting_check_enabled(): bool acquires Features { + is_enabled(MAX_OBJECT_NESTING_CHECK) + } + // ============================================================================================ // Feature Flag Implementation diff --git a/aptos-move/framework/src/natives/aggregator_natives/context.rs b/aptos-move/framework/src/natives/aggregator_natives/context.rs index 4632997c00ac6..cfbf821a8ebc3 100644 --- a/aptos-move/framework/src/natives/aggregator_natives/context.rs +++ b/aptos-move/framework/src/natives/aggregator_natives/context.rs @@ -8,7 +8,6 @@ use aptos_aggregator::{ delayed_field_extension::DelayedFieldData, delta_change_set::DeltaOp, resolver::{AggregatorV1Resolver, DelayedFieldResolver}, - types::DelayedFieldID, }; use aptos_types::{ delayed_fields::PanicError, @@ -16,6 +15,7 @@ use aptos_types::{ }; use better_any::{Tid, TidAble}; use move_core_types::value::MoveTypeLayout; +use move_vm_types::delayed_values::delayed_field_id::DelayedFieldID; use std::{ cell::RefCell, collections::{BTreeMap, HashSet}, diff --git a/aptos-move/framework/src/natives/object_code_deployment.rs b/aptos-move/framework/src/natives/object_code_deployment.rs index 8340154015ebf..23bdec0a81862 100644 --- a/aptos-move/framework/src/natives/object_code_deployment.rs +++ b/aptos-move/framework/src/natives/object_code_deployment.rs @@ -11,13 +11,13 @@ pub struct ExtendRef { } #[derive(Debug, Serialize, Deserialize, Eq, PartialEq)] -pub struct PublisherRef { +pub struct ManagingRefs { pub extend_ref: ExtendRef, } -impl PublisherRef { +impl ManagingRefs { pub fn new(address: AccountAddress) -> Self { - PublisherRef { + ManagingRefs { extend_ref: ExtendRef { address }, } } diff --git a/aptos-move/mvhashmap/Cargo.toml b/aptos-move/mvhashmap/Cargo.toml index 1be6297a183e2..3610582999b34 100644 --- a/aptos-move/mvhashmap/Cargo.toml +++ b/aptos-move/mvhashmap/Cargo.toml @@ -27,6 +27,7 @@ dashmap = { workspace = true } derivative = { workspace = true } move-binary-format = { workspace = true } move-core-types = { workspace = true } +move-vm-types = { workspace = true } serde = { workspace = true } [dev-dependencies] diff --git a/aptos-move/mvhashmap/src/versioned_delayed_fields.rs b/aptos-move/mvhashmap/src/versioned_delayed_fields.rs index 3908c32ca2d91..951ca8a1c9e5e 100644 --- a/aptos-move/mvhashmap/src/versioned_delayed_fields.rs +++ b/aptos-move/mvhashmap/src/versioned_delayed_fields.rs @@ -4,8 +4,9 @@ use crate::types::{AtomicTxnIndex, MVDelayedFieldsError, TxnIndex}; use aptos_aggregator::{ delayed_change::{ApplyBase, DelayedApplyEntry, DelayedEntry}, - types::{code_invariant_error, DelayedFieldValue, PanicError, PanicOr, ReadPosition}, + types::{code_invariant_error, DelayedFieldValue, PanicOr, ReadPosition}, }; +use aptos_types::delayed_fields::PanicError; use claims::assert_matches; use crossbeam::utils::CachePadded; use dashmap::DashMap; @@ -711,10 +712,10 @@ mod test { use super::*; use aptos_aggregator::{ bounded_math::SignedU128, delta_change_set::DeltaOp, delta_math::DeltaHistory, - types::DelayedFieldID, }; use aptos_types::delayed_fields::SnapshotToStringFormula; use claims::{assert_err_eq, assert_ok_eq, assert_some}; + use move_vm_types::delayed_values::delayed_field_id::DelayedFieldID; use test_case::test_case; // Different type acronyms used for generating different test cases. diff --git a/aptos-move/vm-genesis/src/lib.rs b/aptos-move/vm-genesis/src/lib.rs index 6f242e6218810..501a1270b7111 100644 --- a/aptos-move/vm-genesis/src/lib.rs +++ b/aptos-move/vm-genesis/src/lib.rs @@ -19,18 +19,17 @@ use aptos_gas_schedule::{ }; use aptos_types::{ account_config::{self, aptos_test_root_address, events::NewEpochEvent, CORE_CODE_ADDRESS}, - bn254_circom, - bn254_circom::Groth16VerificationKey, chain_id::ChainId, contract_event::{ContractEvent, ContractEventV1}, move_utils::as_move_value::AsMoveValue, + oidb, + oidb::{Groth16VerificationKey, DEVNET_VERIFICATION_KEY}, on_chain_config::{ FeatureFlag, Features, GasScheduleV2, OnChainConsensusConfig, OnChainExecutionConfig, TimedFeaturesBuilder, APTOS_MAX_KNOWN_VERSION, }, transaction::{authenticator::AuthenticationKey, ChangeSet, Transaction, WriteSetPayload}, write_set::TransactionWrite, - zkid, }; use aptos_vm::{ data_cache::AsMoveResolver, @@ -55,7 +54,7 @@ const GENESIS_MODULE_NAME: &str = "genesis"; const GOVERNANCE_MODULE_NAME: &str = "aptos_governance"; const CODE_MODULE_NAME: &str = "code"; const VERSION_MODULE_NAME: &str = "version"; -const ZKID_MODULE_NAME: &str = "zkid"; +const OIDB_MODULE_NAME: &str = "openid_account"; const JWKS_MODULE_NAME: &str = "jwks"; const NUM_SECONDS_PER_YEAR: u64 = 365 * 24 * 60 * 60; @@ -270,7 +269,7 @@ pub fn encode_genesis_change_set( if genesis_config.is_test { allow_core_resources_to_set_version(&mut session); } - initialize_zkid(&mut session, chain_id); + initialize_oidb(&mut session, chain_id); set_genesis_end(&mut session); // Reconfiguration should happen after all on-chain invocations. @@ -469,11 +468,12 @@ pub fn default_features() -> Vec { FeatureFlag::COMMISSION_CHANGE_DELEGATION_POOL, FeatureFlag::WEBAUTHN_SIGNATURE, FeatureFlag::RECONFIGURE_WITH_DKG, - FeatureFlag::ZK_ID_SIGNATURES, - FeatureFlag::ZK_ID_ZKLESS_SIGNATURE, + FeatureFlag::OIDB_SIGNATURE, + FeatureFlag::OIDB_ZKLESS_SIGNATURE, FeatureFlag::JWK_CONSENSUS, FeatureFlag::REFUNDABLE_BYTES, FeatureFlag::OBJECT_CODE_DEPLOYMENT, + FeatureFlag::MAX_OBJECT_NESTING_CHECK, ] } @@ -558,11 +558,11 @@ fn initialize_on_chain_governance(session: &mut SessionExt, genesis_config: &Gen ); } -fn initialize_zkid(session: &mut SessionExt, chain_id: ChainId) { - let config = zkid::Configuration::new_for_devnet_and_testing(); +fn initialize_oidb(session: &mut SessionExt, chain_id: ChainId) { + let config = oidb::Configuration::new_for_devnet(); exec_function( session, - ZKID_MODULE_NAME, + OIDB_MODULE_NAME, "update_configuration", vec![], serialize_values(&vec![ @@ -571,10 +571,10 @@ fn initialize_zkid(session: &mut SessionExt, chain_id: ChainId) { ]), ); if !chain_id.is_mainnet() { - let vk = Groth16VerificationKey::from(bn254_circom::DEVNET_VERIFYING_KEY.clone()); + let vk = Groth16VerificationKey::from(DEVNET_VERIFICATION_KEY.clone()); exec_function( session, - ZKID_MODULE_NAME, + OIDB_MODULE_NAME, "update_groth16_verification_key", vec![], serialize_values(&vec![ diff --git a/aptos-node/src/lib.rs b/aptos-node/src/lib.rs index 5834b0e5bdab8..22c66c652ba90 100644 --- a/aptos-node/src/lib.rs +++ b/aptos-node/src/lib.rs @@ -701,7 +701,10 @@ pub fn setup_environment_and_start_node( let maybe_jwk_consensus_key = load_consensus_key_from_secure_storage(&node_config.consensus.safety_rules); - debug!("maybe_jwk_consensus_key={:?}", maybe_jwk_consensus_key); + debug!( + "jwk_consensus_key_err={:?}", + maybe_jwk_consensus_key.as_ref().err() + ); let jwk_consensus_runtime = match (jwk_consensus_network_interfaces, maybe_jwk_consensus_key) { (Some(interfaces), Ok(consensus_key)) => { diff --git a/config/src/config/consensus_config.rs b/config/src/config/consensus_config.rs index 781a8c66979c1..0a0a22cc6da81 100644 --- a/config/src/config/consensus_config.rs +++ b/config/src/config/consensus_config.rs @@ -11,7 +11,7 @@ use cfg_if::cfg_if; use serde::{Deserialize, Serialize}; use std::path::PathBuf; -pub(crate) const MAX_SENDING_BLOCK_TXNS: u64 = 2500; +pub(crate) const MAX_SENDING_BLOCK_TXNS: u64 = 1900; #[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] #[serde(default, deny_unknown_fields)] diff --git a/config/src/config/state_sync_config.rs b/config/src/config/state_sync_config.rs index 26d98790241f5..179a07b0d7125 100644 --- a/config/src/config/state_sync_config.rs +++ b/config/src/config/state_sync_config.rs @@ -136,7 +136,7 @@ impl Default for StateSyncDriverConfig { max_connection_deadline_secs: 10, max_consecutive_stream_notifications: 10, max_num_stream_timeouts: 12, - max_pending_data_chunks: 100, + max_pending_data_chunks: 50, max_pending_mempool_notifications: 100, max_stream_wait_time_ms: 5000, num_versions_to_skip_snapshot_sync: 100_000_000, // At 5k TPS, this allows a node to fail for about 6 hours. @@ -204,6 +204,9 @@ impl Default for StorageServiceConfig { #[derive(Clone, Copy, Debug, Deserialize, PartialEq, Eq, Serialize)] #[serde(default, deny_unknown_fields)] pub struct DataStreamingServiceConfig { + /// The dynamic prefetching config for the data streaming service + pub dynamic_prefetching: DynamicPrefetchingConfig, + /// Whether or not to enable data subscription streaming. pub enable_subscription_streaming: bool, @@ -216,9 +219,7 @@ pub struct DataStreamingServiceConfig { /// Maximum number of concurrent data client requests (per stream) for state keys/values. pub max_concurrent_state_requests: u64, - /// Maximum channel sizes for each data stream listener. If messages are not - /// consumed, they will be dropped (oldest messages first). The remaining - /// messages will be retrieved using FIFO ordering. + /// Maximum channel sizes for each data stream listener (per stream). pub max_data_stream_channel_sizes: u64, /// Maximum number of notification ID to response context mappings held in @@ -248,11 +249,12 @@ pub struct DataStreamingServiceConfig { impl Default for DataStreamingServiceConfig { fn default() -> Self { Self { + dynamic_prefetching: DynamicPrefetchingConfig::default(), enable_subscription_streaming: false, global_summary_refresh_interval_ms: 50, max_concurrent_requests: MAX_CONCURRENT_REQUESTS, max_concurrent_state_requests: MAX_CONCURRENT_STATE_REQUESTS, - max_data_stream_channel_sizes: 300, + max_data_stream_channel_sizes: 50, max_notification_id_mappings: 300, max_num_consecutive_subscriptions: 40, // At ~4 blocks per second, this should last 10 seconds max_pending_requests: 50, @@ -263,6 +265,45 @@ impl Default for DataStreamingServiceConfig { } } +#[derive(Clone, Copy, Debug, Deserialize, PartialEq, Eq, Serialize)] +#[serde(default, deny_unknown_fields)] +pub struct DynamicPrefetchingConfig { + /// Whether or not to enable dynamic prefetching + pub enable_dynamic_prefetching: bool, + + /// The initial number of concurrent prefetching requests + pub initial_prefetching_value: u64, + + /// The maximum number of concurrent prefetching requests + pub max_prefetching_value: u64, + + /// The minimum number of concurrent prefetching requests + pub min_prefetching_value: u64, + + /// The amount by which to increase the concurrent prefetching value (i.e., on a successful response) + pub prefetching_value_increase: u64, + + /// The amount by which to decrease the concurrent prefetching value (i.e., on a timeout) + pub prefetching_value_decrease: u64, + + /// The duration by which to freeze the prefetching value on a timeout + pub timeout_freeze_duration_secs: u64, +} + +impl Default for DynamicPrefetchingConfig { + fn default() -> Self { + Self { + enable_dynamic_prefetching: true, + initial_prefetching_value: 3, + max_prefetching_value: 30, + min_prefetching_value: 3, + prefetching_value_increase: 1, + prefetching_value_decrease: 2, + timeout_freeze_duration_secs: 30, + } + } +} + #[derive(Clone, Copy, Debug, Deserialize, PartialEq, Eq, Serialize)] #[serde(default, deny_unknown_fields)] pub struct AptosDataPollerConfig { diff --git a/consensus/consensus-types/src/lib.rs b/consensus/consensus-types/src/lib.rs index 2b21258cbdc56..1ae492531755c 100644 --- a/consensus/consensus-types/src/lib.rs +++ b/consensus/consensus-types/src/lib.rs @@ -10,8 +10,8 @@ pub mod block_retrieval; pub mod common; pub mod delayed_qc_msg; pub mod epoch_retrieval; -pub mod executed_block; pub mod pipeline; +pub mod pipelined_block; pub mod proof_of_store; pub mod proposal_ext; pub mod proposal_msg; diff --git a/consensus/consensus-types/src/executed_block.rs b/consensus/consensus-types/src/pipelined_block.rs similarity index 85% rename from consensus/consensus-types/src/executed_block.rs rename to consensus/consensus-types/src/pipelined_block.rs index d83f0f86efd26..5b530de0fa46d 100644 --- a/consensus/consensus-types/src/executed_block.rs +++ b/consensus/consensus-types/src/pipelined_block.rs @@ -20,11 +20,11 @@ use std::{ time::{Duration, Instant}, }; -/// ExecutedBlocks are managed in a speculative tree, the committed blocks form a chain. Besides -/// block data, each executed block also has other derived meta data which could be regenerated from -/// blocks. +/// A representation of a block that has been added to the execution pipeline. It might either be in ordered +/// or in executed state. In the ordered state, the block is waiting to be executed. In the executed state, +/// the block has been executed and the output is available. #[derive(Clone, Eq, PartialEq)] -pub struct ExecutedBlock { +pub struct PipelinedBlock { /// Block data that cannot be regenerated. block: Block, /// Input transactions in the order of execution @@ -37,8 +37,8 @@ pub struct ExecutedBlock { pipeline_insertion_time: OnceCell, } -impl ExecutedBlock { - pub fn replace_result( +impl PipelinedBlock { + pub fn set_execution_result( mut self, input_transactions: Vec, result: StateComputeResult, @@ -57,19 +57,19 @@ impl ExecutedBlock { } } -impl Debug for ExecutedBlock { +impl Debug for PipelinedBlock { fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { write!(f, "{}", self) } } -impl Display for ExecutedBlock { +impl Display for PipelinedBlock { fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { write!(f, "{}", self.block()) } } -impl ExecutedBlock { +impl PipelinedBlock { pub fn new( block: Block, input_transactions: Vec, @@ -84,6 +84,16 @@ impl ExecutedBlock { } } + pub fn new_ordered(block: Block) -> Self { + Self { + block, + input_transactions: vec![], + state_compute_result: StateComputeResult::new_dummy(), + randomness: OnceCell::new(), + pipeline_insertion_time: OnceCell::new(), + } + } + pub fn block(&self) -> &Block { &self.block } @@ -144,12 +154,12 @@ impl ExecutedBlock { ) } - pub fn vote_proposal(&self, decoupled_execution: bool) -> VoteProposal { + pub fn vote_proposal(&self) -> VoteProposal { VoteProposal::new( self.compute_result().extension_proof(), self.block.clone(), self.compute_result().epoch_state().clone(), - decoupled_execution, + true, ) } diff --git a/consensus/src/block_storage/block_store.rs b/consensus/src/block_storage/block_store.rs index a596606a148d9..84611d65a46cb 100644 --- a/consensus/src/block_storage/block_store.rs +++ b/consensus/src/block_storage/block_store.rs @@ -13,17 +13,16 @@ use crate::{ persistent_liveness_storage::{ PersistentLivenessStorage, RecoveryData, RootInfo, RootMetadata, }, - state_computer::PipelineExecutionResult, - state_replication::StateComputer, + pipeline::execution_client::TExecutionClient, util::time_service::TimeService, }; use anyhow::{bail, ensure, format_err, Context}; use aptos_consensus_types::{ - block::Block, common::Round, executed_block::ExecutedBlock, quorum_cert::QuorumCert, + block::Block, common::Round, pipelined_block::PipelinedBlock, quorum_cert::QuorumCert, sync_info::SyncInfo, timeout_2chain::TwoChainTimeoutCertificate, }; use aptos_crypto::{hash::ACCUMULATOR_PLACEHOLDER_HASH, HashValue}; -use aptos_executor_types::{ExecutorError, ExecutorResult, StateComputeResult}; +use aptos_executor_types::StateComputeResult; use aptos_infallible::RwLock; use aptos_logger::prelude::*; use aptos_types::ledger_info::LedgerInfoWithSignatures; @@ -41,7 +40,7 @@ mod block_store_test; #[path = "sync_manager.rs"] pub mod sync_manager; -fn update_counters_for_ordered_blocks(ordered_blocks: &[Arc]) { +fn update_counters_for_ordered_blocks(ordered_blocks: &[Arc]) { for block in ordered_blocks { observe_block(block.block().timestamp_usecs(), BlockStage::ORDERED); } @@ -65,7 +64,7 @@ fn update_counters_for_ordered_blocks(ordered_blocks: &[Arc]) { /// ╰--------------> D3 pub struct BlockStore { inner: Arc>, - state_computer: Arc, + execution_client: Arc, /// The persistent storage backing up the in-memory data structure, every write should go /// through this before in-memory tree. storage: Arc, @@ -82,7 +81,7 @@ impl BlockStore { pub fn new( storage: Arc, initial_data: RecoveryData, - state_computer: Arc, + execution_client: Arc, max_pruned_blocks_in_mem: usize, time_service: Arc, vote_back_pressure_limit: Round, @@ -96,18 +95,18 @@ impl BlockStore { blocks, quorum_certs, highest_2chain_tc, - state_computer, + execution_client, storage, max_pruned_blocks_in_mem, time_service, vote_back_pressure_limit, payload_manager, )); - block_on(block_store.try_commit()); + block_on(block_store.try_send_for_execution()); block_store } - async fn try_commit(&self) { + async fn try_send_for_execution(&self) { // reproduce the same batches (important for the commit phase) let mut certs = self.inner.read().get_all_quorum_certs_with_commit_info(); @@ -121,7 +120,7 @@ impl BlockStore { qc.ledger_info() ); - if let Err(e) = self.commit(qc.clone()).await { + if let Err(e) = self.send_for_execution(qc.clone()).await { error!("Error in try-committing blocks. {}", e.to_string()); } } @@ -134,7 +133,7 @@ impl BlockStore { blocks: Vec, quorum_certs: Vec, highest_2chain_timeout_cert: Option, - state_computer: Arc, + execution_client: Arc, storage: Arc, max_pruned_blocks_in_mem: usize, time_service: Arc, @@ -173,7 +172,7 @@ impl BlockStore { vec![], /* reconfig_events */ ); - let executed_root_block = ExecutedBlock::new( + let pipelined_root_block = PipelinedBlock::new( *root_block, vec![], // Create a dummy state_compute_result with necessary fields filled in. @@ -181,7 +180,7 @@ impl BlockStore { ); let tree = BlockTree::new( - executed_root_block, + pipelined_root_block, root_qc, root_ordered_cert, root_commit_cert, @@ -191,7 +190,7 @@ impl BlockStore { let block_store = Self { inner: Arc::new(RwLock::new(tree)), - state_computer, + execution_client, storage, time_service, vote_back_pressure_limit, @@ -202,7 +201,7 @@ impl BlockStore { for block in blocks { block_store - .execute_and_insert_block(block) + .insert_ordered_block(block) .await .unwrap_or_else(|e| { panic!("[BlockStore] failed to insert block during build {:?}", e) @@ -220,8 +219,8 @@ impl BlockStore { block_store } - /// Commit the given block id with the proof, returns () on success or error - pub async fn commit(&self, finality_proof: QuorumCert) -> anyhow::Result<()> { + /// Send an ordered block id with the proof for execution, returns () on success or error + pub async fn send_for_execution(&self, finality_proof: QuorumCert) -> anyhow::Result<()> { let block_id_to_commit = finality_proof.commit_info().id(); let block_to_commit = self .get_block(block_id_to_commit) @@ -242,14 +241,13 @@ impl BlockStore { let block_tree = self.inner.clone(); let storage = self.storage.clone(); - // This callback is invoked synchronously withe coupled-execution and asynchronously in decoupled setup. - // the callback could be used for multiple batches of blocks. - self.state_computer - .commit( + // This callback is invoked synchronously with and could be used for multiple batches of blocks. + self.execution_client + .finalize_order( &blocks_to_commit, finality_proof.ledger_info().clone(), Box::new( - move |committed_blocks: &[Arc], + move |committed_blocks: &[Arc], commit_decision: LedgerInfoWithSignatures| { block_tree.write().commit_callback( storage, @@ -287,7 +285,7 @@ impl BlockStore { blocks, quorum_certs, prev_2chain_htc, - Arc::clone(&self.state_computer), + self.execution_client.clone(), Arc::clone(&self.storage), max_pruned_blocks_in_mem, Arc::clone(&self.time_service), @@ -300,10 +298,10 @@ impl BlockStore { *self.inner.write() = Arc::try_unwrap(inner) .unwrap_or_else(|_| panic!("New block tree is not shared")) .into_inner(); - self.try_commit().await; + self.try_send_for_execution().await; } - /// Execute and insert a block if it passes all validation tests. + /// Insert a block if it passes all validation tests. /// Returns the Arc to the block kept in the block store after persisting it to storage /// /// This function assumes that the ancestors are present (returns MissingParent otherwise). @@ -311,10 +309,7 @@ impl BlockStore { /// Duplicate inserts will return the previously inserted block ( /// note that it is considered a valid non-error case, for example, it can happen if a validator /// receives a certificate for a block that is currently being added). - pub async fn execute_and_insert_block( - &self, - block: Block, - ) -> anyhow::Result> { + pub async fn insert_ordered_block(&self, block: Block) -> anyhow::Result> { if let Some(existing_block) = self.get_block(block.id()) { return Ok(existing_block); } @@ -323,54 +318,28 @@ impl BlockStore { "Block with old round" ); - let executed_block = match self.execute_block(block.clone()).await { - Ok(res) => Ok(res), - Err(ExecutorError::BlockNotFound(parent_block_id)) => { - // recover the block tree in executor - let blocks_to_reexecute = self - .path_from_ordered_root(parent_block_id) - .unwrap_or_default(); - - for block in blocks_to_reexecute { - self.execute_block(block.block().clone()).await?; - } - self.execute_block(block).await - }, - err => err, - }?; - + let pipelined_block = PipelinedBlock::new_ordered(block.clone()); // ensure local time past the block time - let block_time = Duration::from_micros(executed_block.timestamp_usecs()); + let block_time = Duration::from_micros(pipelined_block.timestamp_usecs()); let current_timestamp = self.time_service.get_current_timestamp(); if let Some(t) = block_time.checked_sub(current_timestamp) { if t > Duration::from_secs(1) { warn!( "Long wait time {}ms for block {}", t.as_millis(), - executed_block.block() + pipelined_block.block() ); } self.time_service.wait_until(block_time).await; } - if let Some(payload) = executed_block.block().payload() { + if let Some(payload) = pipelined_block.block().payload() { self.payload_manager - .prefetch_payload_data(payload, executed_block.block().timestamp_usecs()); + .prefetch_payload_data(payload, pipelined_block.block().timestamp_usecs()); } self.storage - .save_tree(vec![executed_block.block().clone()], vec![]) + .save_tree(vec![pipelined_block.block().clone()], vec![]) .context("Insert block failed when saving block")?; - self.inner.write().insert_block(executed_block) - } - - async fn execute_block(&self, block: Block) -> ExecutorResult { - // Although NIL blocks don't have a payload, we still send a T::default() to compute - // because we may inject a block prologue transaction. - let pipeline_result = self - .state_computer - .compute(&block, block.parent_id(), None) - .await?; - let PipelineExecutionResult { input_txns, result } = pipeline_result; - Ok(ExecutedBlock::new(block, input_txns, result)) + self.inner.write().insert_block(pipelined_block) } /// Validates quorum certificates and inserts it into block tree assuming dependencies exist. @@ -381,19 +350,19 @@ impl BlockStore { // the QuorumCert's state on the next restart will work if there is a memory // corruption, for example. match self.get_block(qc.certified_block().id()) { - Some(executed_block) => { + Some(pipelined_block) => { ensure!( // decoupled execution allows dummy block infos - executed_block + pipelined_block .block_info() .match_ordered_only(qc.certified_block()), "QC for block {} has different {:?} than local {:?}", qc.certified_block().id(), qc.certified_block(), - executed_block.block_info() + pipelined_block.block_info() ); observe_block( - executed_block.block().timestamp_usecs(), + pipelined_block.block().timestamp_usecs(), BlockStage::QC_ADDED, ); }, @@ -557,15 +526,15 @@ impl BlockReader for BlockStore { self.inner.read().block_exists(&block_id) } - fn get_block(&self, block_id: HashValue) -> Option> { + fn get_block(&self, block_id: HashValue) -> Option> { self.inner.read().get_block(&block_id) } - fn ordered_root(&self) -> Arc { + fn ordered_root(&self) -> Arc { self.inner.read().ordered_root() } - fn commit_root(&self) -> Arc { + fn commit_root(&self) -> Arc { self.inner.read().commit_root() } @@ -573,15 +542,15 @@ impl BlockReader for BlockStore { self.inner.read().get_quorum_cert_for_block(&block_id) } - fn path_from_ordered_root(&self, block_id: HashValue) -> Option>> { + fn path_from_ordered_root(&self, block_id: HashValue) -> Option>> { self.inner.read().path_from_ordered_root(block_id) } - fn path_from_commit_root(&self, block_id: HashValue) -> Option>> { + fn path_from_commit_root(&self, block_id: HashValue) -> Option>> { self.inner.read().path_from_commit_root(block_id) } - fn highest_certified_block(&self) -> Arc { + fn highest_certified_block(&self) -> Arc { self.inner.read().highest_certified_block() } @@ -638,11 +607,11 @@ impl BlockStore { } /// Helper function to insert the block with the qc together - pub async fn insert_block_with_qc(&self, block: Block) -> anyhow::Result> { + pub async fn insert_block_with_qc(&self, block: Block) -> anyhow::Result> { self.insert_single_quorum_cert(block.quorum_cert().clone())?; if self.ordered_root().round() < block.quorum_cert().commit_info().round() { - self.commit(block.quorum_cert().clone()).await?; + self.send_for_execution(block.quorum_cert().clone()).await?; } - self.execute_and_insert_block(block).await + self.insert_ordered_block(block).await } } diff --git a/consensus/src/block_storage/block_store_test.rs b/consensus/src/block_storage/block_store_test.rs index 89b3ed650b483..9865b19332659 100644 --- a/consensus/src/block_storage/block_store_test.rs +++ b/consensus/src/block_storage/block_store_test.rs @@ -130,7 +130,7 @@ proptest! { let known_parent = block_store.block_exists(block.parent_id()); let certified_parent = block.quorum_cert().certified_block().id() == block.parent_id(); let verify_res = block.verify_well_formed(); - let res = timed_block_on(&runtime, block_store.execute_and_insert_block(block.clone())); + let res = timed_block_on(&runtime, block_store.insert_ordered_block(block.clone())); if !certified_parent { prop_assert!(verify_res.is_err()); } else if !known_parent { @@ -367,7 +367,7 @@ async fn test_illegal_timestamp() { ) .unwrap(); let result = block_store - .execute_and_insert_block(block_with_illegal_timestamp) + .insert_ordered_block(block_with_illegal_timestamp) .await; assert!(result.is_err()); } diff --git a/consensus/src/block_storage/block_tree.rs b/consensus/src/block_storage/block_tree.rs index 21c8090c78892..7c16b0946dc35 100644 --- a/consensus/src/block_storage/block_tree.rs +++ b/consensus/src/block_storage/block_tree.rs @@ -10,7 +10,7 @@ use crate::{ }; use anyhow::bail; use aptos_consensus_types::{ - executed_block::ExecutedBlock, quorum_cert::QuorumCert, + pipelined_block::PipelinedBlock, quorum_cert::QuorumCert, timeout_2chain::TwoChainTimeoutCertificate, }; use aptos_crypto::HashValue; @@ -22,24 +22,24 @@ use std::{ sync::Arc, }; -/// This structure is a wrapper of [`ExecutedBlock`](aptos_consensus_types::executed_block::ExecutedBlock) +/// This structure is a wrapper of [`ExecutedBlock`](aptos_consensus_types::pipelined_block::PipelinedBlock) /// that adds `children` field to know the parent-child relationship between blocks. struct LinkableBlock { /// Executed block that has raw block data and execution output. - executed_block: Arc, + executed_block: Arc, /// The set of children for cascading pruning. Note: a block may have multiple children. children: HashSet, } impl LinkableBlock { - pub fn new(block: ExecutedBlock) -> Self { + pub fn new(block: PipelinedBlock) -> Self { Self { executed_block: Arc::new(block), children: HashSet::new(), } } - pub fn executed_block(&self) -> &Arc { + pub fn executed_block(&self) -> &Arc { &self.executed_block } @@ -93,7 +93,7 @@ pub struct BlockTree { impl BlockTree { pub(super) fn new( - root: ExecutedBlock, + root: PipelinedBlock, root_quorum_cert: QuorumCert, root_ordered_cert: QuorumCert, root_commit_cert: QuorumCert, @@ -173,22 +173,22 @@ impl BlockTree { self.id_to_block.contains_key(block_id) } - pub(super) fn get_block(&self, block_id: &HashValue) -> Option> { + pub(super) fn get_block(&self, block_id: &HashValue) -> Option> { self.get_linkable_block(block_id) .map(|lb| Arc::clone(lb.executed_block())) } - pub(super) fn ordered_root(&self) -> Arc { + pub(super) fn ordered_root(&self) -> Arc { self.get_block(&self.ordered_root_id) .expect("Root must exist") } - pub(super) fn commit_root(&self) -> Arc { + pub(super) fn commit_root(&self) -> Arc { self.get_block(&self.commit_root_id) .expect("Commit root must exist") } - pub(super) fn highest_certified_block(&self) -> Arc { + pub(super) fn highest_certified_block(&self) -> Arc { self.get_block(&self.highest_certified_block_id) .expect("Highest cerfified block must exist") } @@ -223,8 +223,8 @@ impl BlockTree { pub(super) fn insert_block( &mut self, - block: ExecutedBlock, - ) -> anyhow::Result> { + block: PipelinedBlock, + ) -> anyhow::Result> { let block_id = block.id(); if let Some(existing_block) = self.get_block(&block_id) { debug!("Already had block {:?} for id {:?} when trying to add another block {:?} for the same id", @@ -371,7 +371,7 @@ impl BlockTree { block_id: HashValue, root_id: HashValue, root_round: u64, - ) -> Option>> { + ) -> Option>> { let mut res = vec![]; let mut cur_block_id = block_id; loop { @@ -398,14 +398,14 @@ impl BlockTree { pub(super) fn path_from_ordered_root( &self, block_id: HashValue, - ) -> Option>> { + ) -> Option>> { self.path_from_root_to_block(block_id, self.ordered_root_id, self.ordered_root().round()) } pub(super) fn path_from_commit_root( &self, block_id: HashValue, - ) -> Option>> { + ) -> Option>> { self.path_from_root_to_block(block_id, self.commit_root_id, self.commit_root().round()) } @@ -417,7 +417,7 @@ impl BlockTree { pub fn commit_callback( &mut self, storage: Arc, - blocks_to_commit: &[Arc], + blocks_to_commit: &[Arc], finality_proof: QuorumCert, commit_decision: LedgerInfoWithSignatures, ) { diff --git a/consensus/src/block_storage/mod.rs b/consensus/src/block_storage/mod.rs index 2c33949f90cbc..8b553706127b8 100644 --- a/consensus/src/block_storage/mod.rs +++ b/consensus/src/block_storage/mod.rs @@ -3,7 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 use aptos_consensus_types::{ - executed_block::ExecutedBlock, quorum_cert::QuorumCert, sync_info::SyncInfo, + pipelined_block::PipelinedBlock, quorum_cert::QuorumCert, sync_info::SyncInfo, timeout_2chain::TwoChainTimeoutCertificate, }; use aptos_crypto::HashValue; @@ -19,13 +19,13 @@ pub trait BlockReader: Send + Sync { fn block_exists(&self, block_id: HashValue) -> bool; /// Try to get a block with the block_id, return an Arc of it if found. - fn get_block(&self, block_id: HashValue) -> Option>; + fn get_block(&self, block_id: HashValue) -> Option>; /// Get the current ordered root block of the BlockTree. - fn ordered_root(&self) -> Arc; + fn ordered_root(&self) -> Arc; /// Get the current commit root block of the BlockTree. - fn commit_root(&self) -> Arc; + fn commit_root(&self) -> Arc; fn get_quorum_cert_for_block(&self, block_id: HashValue) -> Option>; @@ -36,12 +36,12 @@ pub trait BlockReader: Send + Sync { /// path_from_root(b2) -> Some([b2, b1]) /// path_from_root(b0) -> Some([]) /// path_from_root(a) -> None - fn path_from_ordered_root(&self, block_id: HashValue) -> Option>>; + fn path_from_ordered_root(&self, block_id: HashValue) -> Option>>; - fn path_from_commit_root(&self, block_id: HashValue) -> Option>>; + fn path_from_commit_root(&self, block_id: HashValue) -> Option>>; /// Return the certified block with the highest round. - fn highest_certified_block(&self) -> Arc; + fn highest_certified_block(&self) -> Arc; /// Return the quorum certificate with the highest round fn highest_quorum_cert(&self) -> Arc; diff --git a/consensus/src/block_storage/sync_manager.rs b/consensus/src/block_storage/sync_manager.rs index 92ff2ff2fb2fb..f7849a943b5fb 100644 --- a/consensus/src/block_storage/sync_manager.rs +++ b/consensus/src/block_storage/sync_manager.rs @@ -11,7 +11,7 @@ use crate::{ network_interface::ConsensusMsg, payload_manager::PayloadManager, persistent_liveness_storage::{LedgerRecoveryData, PersistentLivenessStorage, RecoveryData}, - state_replication::StateComputer, + pipeline::execution_client::TExecutionClient, }; use anyhow::{bail, Context}; use aptos_consensus_types::{ @@ -118,7 +118,7 @@ impl BlockStore { _ => (), } if self.ordered_root().round() < qc.commit_info().round() { - self.commit(qc.clone()).await?; + self.send_for_execution(qc.clone()).await?; if qc.ends_epoch() { retriever .network @@ -159,7 +159,7 @@ impl BlockStore { while let Some(block) = pending.pop() { let block_qc = block.quorum_cert().clone(); self.insert_single_quorum_cert(block_qc)?; - self.execute_and_insert_block(block).await?; + self.insert_ordered_block(block).await?; } self.insert_single_quorum_cert(qc) } @@ -185,7 +185,7 @@ impl BlockStore { &highest_commit_cert, retriever, self.storage.clone(), - self.state_computer.clone(), + self.execution_client.clone(), self.payload_manager.clone(), ) .await? @@ -215,7 +215,7 @@ impl BlockStore { highest_commit_cert: &'a QuorumCert, retriever: &'a mut BlockRetriever, storage: Arc, - state_computer: Arc, + execution_client: Arc, payload_manager: Arc, ) -> anyhow::Result { info!( @@ -327,7 +327,7 @@ impl BlockStore { storage.save_tree(blocks.clone(), quorum_certs.clone())?; - state_computer + execution_client .sync_to(highest_commit_cert.ledger_info().clone()) .await?; @@ -346,7 +346,7 @@ impl BlockStore { async fn sync_to_highest_commit_cert( &self, ledger_info: &LedgerInfoWithSignatures, - network: &NetworkSender, + network: &Arc, ) { // if the block exists between commit root and ordered root if self.commit_root().round() < ledger_info.commit_info().round() @@ -404,7 +404,7 @@ impl BlockStore { /// BlockRetriever is used internally to retrieve blocks pub struct BlockRetriever { - network: NetworkSender, + network: Arc, preferred_peer: Author, validator_addresses: Vec, max_blocks_to_request: u64, @@ -412,7 +412,7 @@ pub struct BlockRetriever { impl BlockRetriever { pub fn new( - network: NetworkSender, + network: Arc, preferred_peer: Author, validator_addresses: Vec, max_blocks_to_request: u64, diff --git a/consensus/src/consensus_provider.rs b/consensus/src/consensus_provider.rs index 61756761db324..d9f0fc425fbdd 100644 --- a/consensus/src/consensus_provider.rs +++ b/consensus/src/consensus_provider.rs @@ -8,6 +8,7 @@ use crate::{ network::NetworkTask, network_interface::{ConsensusMsg, ConsensusNetworkClient}, persistent_liveness_storage::StorageWriteProxy, + pipeline::execution_client::ExecutionProxyClient, quorum_store::quorum_store_db::QuorumStoreDB, rand::rand_gen::storage::db::RandDb, state_computer::ExecutionProxy, @@ -50,23 +51,32 @@ pub fn start_consensus( node_config.consensus.mempool_executed_txn_timeout_ms, )); - let state_computer = Arc::new(ExecutionProxy::new( + let execution_proxy = ExecutionProxy::new( Arc::new(BlockExecutor::::new(aptos_db)), txn_notifier, state_sync_notifier, runtime.handle(), TransactionFilter::new(node_config.execution.transaction_filter.clone()), - )); + ); let time_service = Arc::new(ClockTimeService::new(runtime.handle().clone())); let (timeout_sender, timeout_receiver) = aptos_channels::new(1_024, &counters::PENDING_ROUND_TIMEOUTS); let (self_sender, self_receiver) = aptos_channels::new(1_024, &counters::PENDING_SELF_MESSAGES); - let consensus_network_client = ConsensusNetworkClient::new(network_client); let bounded_executor = BoundedExecutor::new(8, runtime.handle().clone()); let rand_storage = Arc::new(RandDb::new(node_config.storage.dir())); + let execution_client = Arc::new(ExecutionProxyClient::new( + node_config.consensus.clone(), + Arc::new(execution_proxy), + node_config.validator_network.as_ref().unwrap().peer_id(), + self_sender.clone(), + consensus_network_client.clone(), + bounded_executor.clone(), + rand_storage.clone(), + )); + let epoch_mgr = EpochManager::new( node_config, time_service, @@ -74,7 +84,7 @@ pub fn start_consensus( consensus_network_client, timeout_sender, consensus_to_mempool_sender, - state_computer, + execution_client, storage.clone(), quorum_store_db.clone(), reconfig_events, diff --git a/consensus/src/counters.rs b/consensus/src/counters.rs index 3403eb3184fd8..5c2d9c835889c 100644 --- a/consensus/src/counters.rs +++ b/consensus/src/counters.rs @@ -6,7 +6,7 @@ use crate::{ block_storage::tracing::{observe_block, BlockStage}, quorum_store, }; -use aptos_consensus_types::executed_block::ExecutedBlock; +use aptos_consensus_types::pipelined_block::PipelinedBlock; use aptos_metrics_core::{ exponential_buckets, op_counters::DurationHistogram, register_avg_counter, register_counter, register_gauge, register_gauge_vec, register_histogram, register_histogram_vec, @@ -863,7 +863,7 @@ pub static UNEXPECTED_DKG_VTXN_COUNT: Lazy = Lazy::new(|| { }); /// Update various counters for committed blocks -pub fn update_counters_for_committed_blocks(blocks_to_commit: &[Arc]) { +pub fn update_counters_for_committed_blocks(blocks_to_commit: &[Arc]) { for block in blocks_to_commit { observe_block(block.block().timestamp_usecs(), BlockStage::COMMITTED); let txn_status = block.compute_result().compute_status_for_input_txns(); diff --git a/consensus/src/dag/adapter.rs b/consensus/src/dag/adapter.rs index 4ab1056e08330..82af60d92bb17 100644 --- a/consensus/src/dag/adapter.rs +++ b/consensus/src/dag/adapter.rs @@ -19,7 +19,7 @@ use aptos_bitvec::BitVec; use aptos_consensus_types::{ block::Block, common::{Author, Payload, Round}, - executed_block::ExecutedBlock, + pipelined_block::PipelinedBlock, quorum_cert::QuorumCert, }; use aptos_crypto::HashValue; @@ -171,7 +171,7 @@ impl OrderedNotifier for OrderedNotifierAdapter { }; NUM_ROUNDS_PER_BLOCK.observe((rounds_between + 1) as f64); - let block = ExecutedBlock::new( + let block = PipelinedBlock::new( Block::new_for_dag( epoch, round, @@ -196,7 +196,6 @@ impl OrderedNotifier for OrderedNotifierAdapter { .write() .insert(block_info.round(), Instant::now()); let block_created_ts = self.block_ordered_ts.clone(); - let blocks_to_send = OrderedBlocks { ordered_blocks: vec![block], ordered_proof: LedgerInfoWithSignatures::new( @@ -204,7 +203,7 @@ impl OrderedNotifier for OrderedNotifierAdapter { AggregateSignature::empty(), ), callback: Box::new( - move |committed_blocks: &[Arc], + move |committed_blocks: &[Arc], commit_decision: LedgerInfoWithSignatures| { block_created_ts .write() diff --git a/consensus/src/dag/bootstrap.rs b/consensus/src/dag/bootstrap.rs index 53bfaf7bcc137..90d75815b704c 100644 --- a/consensus/src/dag/bootstrap.rs +++ b/consensus/src/dag/bootstrap.rs @@ -32,8 +32,7 @@ use crate::{ network::IncomingDAGRequest, payload_client::PayloadClient, payload_manager::PayloadManager, - pipeline::buffer_manager::OrderedBlocks, - state_replication::StateComputer, + pipeline::{buffer_manager::OrderedBlocks, execution_client::TExecutionClient}, }; use aptos_bounded_executor::BoundedExecutor; use aptos_channels::{ @@ -205,7 +204,7 @@ impl SyncMode { let sync_manager = DagStateSynchronizer::new( bootstrapper.epoch_state.clone(), bootstrapper.time_service.clone(), - bootstrapper.state_computer.clone(), + bootstrapper.execution_client.clone(), bootstrapper.storage.clone(), bootstrapper.payload_manager.clone(), bootstrapper @@ -331,8 +330,8 @@ pub struct DagBootstrapper { time_service: aptos_time_service::TimeService, payload_manager: Arc, payload_client: Arc, - state_computer: Arc, ordered_nodes_tx: UnboundedSender, + execution_client: Arc, quorum_store_enabled: bool, vtxn_config: ValidatorTxnConfig, executor: BoundedExecutor, @@ -354,8 +353,8 @@ impl DagBootstrapper { time_service: aptos_time_service::TimeService, payload_manager: Arc, payload_client: Arc, - state_computer: Arc, ordered_nodes_tx: UnboundedSender, + execution_client: Arc, quorum_store_enabled: bool, vtxn_config: ValidatorTxnConfig, executor: BoundedExecutor, @@ -374,8 +373,8 @@ impl DagBootstrapper { time_service, payload_manager, payload_client, - state_computer, ordered_nodes_tx, + execution_client, quorum_store_enabled, vtxn_config, executor, @@ -711,7 +710,7 @@ pub(super) fn bootstrap_dag_for_test( time_service: aptos_time_service::TimeService, payload_manager: Arc, payload_client: Arc, - state_computer: Arc, + execution_client: Arc, ) -> ( JoinHandle, JoinHandle<()>, @@ -734,8 +733,8 @@ pub(super) fn bootstrap_dag_for_test( time_service, payload_manager, payload_client, - state_computer, ordered_nodes_tx, + execution_client, false, ValidatorTxnConfig::default_enabled(), BoundedExecutor::new(2, Handle::current()), diff --git a/consensus/src/dag/dag_state_sync.rs b/consensus/src/dag/dag_state_sync.rs index 776cdf830dc14..b6c248479a5c9 100644 --- a/consensus/src/dag/dag_state_sync.rs +++ b/consensus/src/dag/dag_state_sync.rs @@ -10,7 +10,7 @@ use super::{ }; use crate::{ dag::DAGMessage, network::IncomingDAGRequest, payload_manager::TPayloadManager, - state_replication::StateComputer, + pipeline::execution_client::TExecutionClient, }; use anyhow::ensure; use aptos_channels::aptos_channel; @@ -157,7 +157,7 @@ impl StateSyncTrigger { pub(super) struct DagStateSynchronizer { epoch_state: Arc, time_service: TimeService, - state_computer: Arc, + execution_client: Arc, storage: Arc, payload_manager: Arc, dag_window_size_config: Round, @@ -167,7 +167,7 @@ impl DagStateSynchronizer { pub fn new( epoch_state: Arc, time_service: TimeService, - state_computer: Arc, + execution_client: Arc, storage: Arc, payload_manager: Arc, dag_window_size_config: Round, @@ -175,7 +175,7 @@ impl DagStateSynchronizer { Self { epoch_state, time_service, - state_computer, + execution_client, storage, payload_manager, dag_window_size_config, @@ -253,7 +253,7 @@ impl DagStateSynchronizer { }, } - self.state_computer.sync_to(commit_li).await?; + self.execution_client.sync_to(commit_li).await?; Ok(Arc::into_inner(sync_dag_store).unwrap()) } diff --git a/consensus/src/dag/tests/dag_state_sync_tests.rs b/consensus/src/dag/tests/dag_state_sync_tests.rs index 12be85039f724..57993fd227f31 100644 --- a/consensus/src/dag/tests/dag_state_sync_tests.rs +++ b/consensus/src/dag/tests/dag_state_sync_tests.rs @@ -15,7 +15,7 @@ use crate::{ types::{CertifiedNodeMessage, RemoteFetchRequest}, CertifiedNode, DAGMessage, DAGRpcResult, RpcHandler, RpcWithFallback, TDAGNetworkSender, }, - test_utils::EmptyStateComputer, + pipeline::execution_client::DummyExecutionClient, }; use aptos_consensus_types::common::{Author, Round}; use aptos_crypto::HashValue; @@ -112,13 +112,13 @@ impl OrderedNotifier for MockNotifier { fn setup(epoch_state: Arc, storage: Arc) -> DagStateSynchronizer { let time_service = TimeService::mock(); - let state_computer = Arc::new(EmptyStateComputer {}); + let execution_client = Arc::new(DummyExecutionClient {}); let payload_manager = Arc::new(MockPayloadManager {}); DagStateSynchronizer::new( epoch_state, time_service, - state_computer, + execution_client, storage, payload_manager, TEST_DAG_WINDOW as Round, diff --git a/consensus/src/dag/tests/integration_tests.rs b/consensus/src/dag/tests/integration_tests.rs index 5a70ca26bb5ee..8c80e4b1c7ff7 100644 --- a/consensus/src/dag/tests/integration_tests.rs +++ b/consensus/src/dag/tests/integration_tests.rs @@ -7,8 +7,8 @@ use crate::{ network_interface::{ConsensusMsg, ConsensusNetworkClient, DIRECT_SEND, RPC}, network_tests::{NetworkPlayground, TwinId}, payload_manager::PayloadManager, - pipeline::buffer_manager::OrderedBlocks, - test_utils::{consensus_runtime, EmptyStateComputer, MockPayloadManager, MockStorage}, + pipeline::{buffer_manager::OrderedBlocks, execution_client::DummyExecutionClient}, + test_utils::{consensus_runtime, MockPayloadManager, MockStorage}, }; use aptos_channels::{aptos_channel, message_queues::QueueStyle}; use aptos_config::network_id::{NetworkId, PeerNetworkId}; @@ -74,7 +74,7 @@ impl DagBootstrapUnit { let payload_client = Arc::new(MockPayloadManager::new(None)); let payload_manager = Arc::new(PayloadManager::DirectMempool); - let state_computer = Arc::new(EmptyStateComputer {}); + let execution_client = Arc::new(DummyExecutionClient); let (nh_abort_handle, df_abort_handle, dag_rpc_tx, ordered_nodes_rx) = bootstrap_dag_for_test( @@ -88,7 +88,7 @@ impl DagBootstrapUnit { time_service, payload_manager, payload_client, - state_computer, + execution_client, ); ( diff --git a/consensus/src/epoch_manager.rs b/consensus/src/epoch_manager.rs index 749c17d1e8894..b5b901f7e9ddd 100644 --- a/consensus/src/epoch_manager.rs +++ b/consensus/src/epoch_manager.rs @@ -28,9 +28,8 @@ use crate::{ metrics_safety_rules::MetricsSafetyRules, monitor, network::{ - IncomingBatchRetrievalRequest, IncomingBlockRetrievalRequest, IncomingCommitRequest, - IncomingDAGRequest, IncomingRandGenRequest, IncomingRpcRequest, NetworkReceivers, - NetworkSender, + IncomingBatchRetrievalRequest, IncomingBlockRetrievalRequest, IncomingDAGRequest, + IncomingRandGenRequest, IncomingRpcRequest, NetworkReceivers, NetworkSender, }, network_interface::{ConsensusMsg, ConsensusNetworkClient}, payload_client::{ @@ -38,27 +37,18 @@ use crate::{ }, payload_manager::PayloadManager, persistent_liveness_storage::{LedgerRecoveryData, PersistentLivenessStorage, RecoveryData}, - pipeline::{ - buffer_manager::{OrderedBlocks, ResetRequest, ResetSignal}, - decoupled_execution_utils::prepare_phases_and_buffer_manager, - ordering_state_computer::{DagStateSyncComputer, OrderingStateComputer}, - signing_phase::CommitSignerProvider, - }, + pipeline::execution_client::TExecutionClient, quorum_store::{ quorum_store_builder::{DirectMempoolInnerBuilder, InnerBuilder, QuorumStoreBuilder}, quorum_store_coordinator::CoordinatorCommand, quorum_store_db::QuorumStoreStorage, }, rand::rand_gen::{ - rand_manager::RandManager, storage::interface::RandStorage, - types::{AugmentedData, RandConfig, Share}, + types::{AugmentedData, RandConfig}, }, recovery_manager::RecoveryManager, round_manager::{RoundManager, UnverifiedEvent, VerifiedEvent}, - state_replication::StateComputer, - transaction_deduper::create_transaction_deduper, - transaction_shuffler::create_transaction_shuffler, util::time_service::TimeService, }; use anyhow::{anyhow, bail, ensure, Context}; @@ -66,7 +56,6 @@ use aptos_bounded_executor::BoundedExecutor; use aptos_channels::{aptos_channel, message_queues::QueueStyle}; use aptos_config::config::{ ConsensusConfig, DagConsensusConfig, ExecutionConfig, NodeConfig, QcAggregatorType, - SecureBackend, }; use aptos_consensus_types::{ common::{Author, Round}, @@ -95,7 +84,6 @@ use aptos_types::{ OnChainConsensusConfig, OnChainExecutionConfig, ProposerElectionType, ValidatorSet, }, randomness::{RandKeys, WvufPP, WVUF}, - validator_signer::ValidatorSigner, }; use aptos_validator_transaction_pool::VTxnPoolState; use fail::fail_point; @@ -144,17 +132,13 @@ pub struct EpochManager { timeout_sender: aptos_channels::Sender, quorum_store_enabled: bool, quorum_store_to_mempool_sender: Sender, - commit_state_computer: Arc, + execution_client: Arc, storage: Arc, safety_rules_manager: SafetyRulesManager, vtxn_pool: VTxnPoolState, reconfig_events: ReconfigNotificationListener

, // channels to rand manager rand_manager_msg_tx: Option>, - rand_manager_reset_tx: Option>, - // channels to buffer manager - buffer_manager_msg_tx: Option>, - buffer_manager_reset_tx: Option>, // channels to round manager round_manager_tx: Option< aptos_channel::Sender<(Author, Discriminant), (Author, VerifiedEvent)>, @@ -239,7 +223,7 @@ impl EpochManager

{ network_sender: ConsensusNetworkClient>, timeout_sender: aptos_channels::Sender, quorum_store_to_mempool_sender: Sender, - commit_state_computer: Arc, + execution_client: Arc, storage: Arc, quorum_store_storage: Arc, reconfig_events: ReconfigNotificationListener

, @@ -266,15 +250,12 @@ impl EpochManager

{ // This default value is updated at epoch start quorum_store_enabled: false, quorum_store_to_mempool_sender, - commit_state_computer, + execution_client, storage, safety_rules_manager, vtxn_pool, reconfig_events, rand_manager_msg_tx: None, - rand_manager_reset_tx: None, - buffer_manager_msg_tx: None, - buffer_manager_reset_tx: None, round_manager_tx: None, round_manager_close_tx: None, buffered_proposal_tx: None, @@ -588,7 +569,7 @@ impl EpochManager

{ self.shutdown_current_processor().await; // make sure storage is on this ledger_info too, it should be no-op if it's already committed // panic if this doesn't succeed since the current processors are already shutdown. - self.commit_state_computer + self.execution_client .sync_to(ledger_info.clone()) .await .context(format!( @@ -635,114 +616,6 @@ impl EpochManager

{ tokio::spawn(task); } - /// this function spawns the phases and a buffer manager - /// it sets `self.commit_msg_tx` to a new aptos_channel::Sender and returns an OrderingStateComputer - fn spawn_decoupled_execution( - &mut self, - commit_signer_provider: Arc, - epoch_state: Arc, - rand_config: Option, - ) -> ( - UnboundedSender, - Option>, - UnboundedSender, - ) { - let network_sender = NetworkSender::new( - self.author, - self.network_sender.clone(), - self.self_sender.clone(), - epoch_state.verifier.clone(), - ); - // reset channel between epoch_manager and rand_manager - let (reset_rand_manager_tx, reset_rand_manager_rx) = unbounded::(); - // reset channel between epoch_manager and buffer_manager - let (reset_buffer_manager_tx, reset_buffer_manager_rx) = unbounded::(); - - let ((ordered_block_tx, ordered_block_rx), maybe_reset_rand_manager_tx) = - if let Some(rand_config) = rand_config { - // channel for sending ordered blocks to rand_manager - let (ordered_block_tx, ordered_block_rx) = unbounded::(); - // channel for sending rand ready blocks to buffer_manager - let (rand_ready_block_tx, rand_ready_block_rx) = unbounded::(); - - let (rand_msg_tx, rand_msg_rx) = aptos_channel::new::< - AccountAddress, - IncomingRandGenRequest, - >(QueueStyle::FIFO, 100, None); - - self.rand_manager_msg_tx = Some(rand_msg_tx); - self.rand_manager_reset_tx = Some(reset_rand_manager_tx.clone()); - - let signer = - new_signer_from_storage(self.author, &self.config.safety_rules.backend); - - let rand_manager = RandManager::::new( - self.author, - Arc::new(self.epoch_state().clone()), - signer, - rand_config, - rand_ready_block_tx, - Arc::new(network_sender.clone()), - self.rand_storage.clone(), - self.bounded_executor.clone(), - ); - tokio::spawn(rand_manager.start( - ordered_block_rx, - rand_msg_rx, - reset_rand_manager_rx, - self.bounded_executor.clone(), - )); - - ( - (ordered_block_tx, rand_ready_block_rx), - Some(reset_rand_manager_tx), - ) - } else { - (unbounded(), None) - }; - - let (commit_msg_tx, commit_msg_rx) = - aptos_channel::new::( - QueueStyle::FIFO, - 100, - Some(&counters::BUFFER_MANAGER_MSGS), - ); - - self.buffer_manager_msg_tx = Some(commit_msg_tx); - self.buffer_manager_reset_tx = Some(reset_buffer_manager_tx.clone()); - - let ( - execution_schedule_phase, - execution_wait_phase, - signing_phase, - persisting_phase, - buffer_manager, - ) = prepare_phases_and_buffer_manager( - self.author, - self.commit_state_computer.clone(), - commit_signer_provider, - network_sender, - commit_msg_rx, - self.commit_state_computer.clone(), - ordered_block_rx, - reset_buffer_manager_rx, - epoch_state, - self.bounded_executor.clone(), - ); - - tokio::spawn(execution_schedule_phase.start()); - tokio::spawn(execution_wait_phase.start()); - tokio::spawn(signing_phase.start()); - tokio::spawn(persisting_phase.start()); - tokio::spawn(buffer_manager.start()); - - ( - ordered_block_tx, - maybe_reset_rand_manager_tx, - reset_buffer_manager_tx, - ) - } - async fn shutdown_current_processor(&mut self) { if let Some(close_tx) = self.round_manager_close_tx.take() { // Release the previous RoundManager, especially the SafetyRule client @@ -770,33 +643,9 @@ impl EpochManager

{ // Shutdown the previous rand manager self.rand_manager_msg_tx = None; - if let Some(mut tx) = self.rand_manager_reset_tx.take() { - let (ack_tx, ack_rx) = oneshot::channel(); - tx.send(ResetRequest { - tx: ack_tx, - signal: ResetSignal::Stop, - }) - .await - .expect("[EpochManager] Fail to drop rand manager"); - ack_rx - .await - .expect("[EpochManager] Fail to drop rand manager"); - } // Shutdown the previous buffer manager, to release the SafetyRule client - self.buffer_manager_msg_tx = None; - if let Some(mut tx) = self.buffer_manager_reset_tx.take() { - let (ack_tx, ack_rx) = oneshot::channel(); - tx.send(ResetRequest { - tx: ack_tx, - signal: ResetSignal::Stop, - }) - .await - .expect("[EpochManager] Fail to drop buffer manager"); - ack_rx - .await - .expect("[EpochManager] Fail to drop buffer manager"); - } + self.execution_client.end_epoch().await; // Shutdown the block retrieval task by dropping the sender self.block_retrieval_tx = None; @@ -810,8 +659,6 @@ impl EpochManager

{ .expect("Could not send shutdown indicator to QuorumStore"); ack_rx.await.expect("Failed to stop QuorumStore"); } - - self.commit_state_computer.end_epoch(); } async fn start_recovery_manager( @@ -819,7 +666,7 @@ impl EpochManager

{ ledger_data: LedgerRecoveryData, onchain_consensus_config: OnChainConsensusConfig, epoch_state: Arc, - network_sender: NetworkSender, + network_sender: Arc, ) { let (recovery_manager_tx, recovery_manager_rx) = aptos_channel::new( QueueStyle::LIFO, @@ -833,7 +680,7 @@ impl EpochManager

{ epoch_state, network_sender, self.storage.clone(), - self.commit_state_computer.clone(), + self.execution_client.clone(), ledger_data.committed_round(), self.config .max_blocks_per_sending_request(onchain_consensus_config.quorum_store_enabled()), @@ -869,7 +716,7 @@ impl EpochManager

{ self.quorum_store_to_mempool_sender.clone(), self.config.mempool_txn_pull_timeout_ms, self.storage.aptos_db().clone(), - network_sender.clone(), + network_sender, epoch_state.verifier.clone(), self.config.safety_rules.backend.clone(), self.quorum_store_storage.clone(), @@ -897,54 +744,6 @@ impl EpochManager

{ (payload_manager, payload_client, quorum_store_builder) } - fn init_commit_state_computer( - &mut self, - epoch_state: &EpochState, - payload_manager: Arc, - onchain_execution_config: &OnChainExecutionConfig, - onchain_consensus_config: &OnChainConsensusConfig, - features: &Features, - ) { - let transaction_shuffler = - create_transaction_shuffler(onchain_execution_config.transaction_shuffler_type()); - let block_executor_onchain_config = - onchain_execution_config.block_executor_onchain_config(); - let transaction_deduper = - create_transaction_deduper(onchain_execution_config.transaction_deduper_type()); - let randomness_enabled = onchain_consensus_config.is_vtxn_enabled() - && features.is_enabled(FeatureFlag::RECONFIGURE_WITH_DKG); - - self.commit_state_computer.new_epoch( - epoch_state, - payload_manager, - transaction_shuffler, - block_executor_onchain_config, - transaction_deduper, - randomness_enabled, - ); - } - - fn init_ordering_state_computer( - &mut self, - epoch_state: Arc, - onchain_consensus_config: &OnChainConsensusConfig, - commit_signer_provider: Arc, - rand_config: Option, - ) -> Arc { - if onchain_consensus_config.decoupled_execution() { - let (block_tx, maybe_reset_rand_manager_tx, reset_buffer_manager_tx) = - self.spawn_decoupled_execution(commit_signer_provider, epoch_state, rand_config); - Arc::new(OrderingStateComputer::new( - block_tx, - self.commit_state_computer.clone(), - maybe_reset_rand_manager_tx, - reset_buffer_manager_tx, - )) - } else { - self.commit_state_computer.clone() - } - } - fn set_epoch_start_metrics(&self, epoch_state: &EpochState) { counters::EPOCH.set(epoch_state.epoch as i64); counters::CURRENT_EPOCH_VALIDATORS.set(epoch_state.verifier.len() as i64); @@ -971,7 +770,8 @@ impl EpochManager

{ recovery_data: RecoveryData, epoch_state: Arc, onchain_consensus_config: OnChainConsensusConfig, - network_sender: NetworkSender, + onchain_execution_config: OnChainExecutionConfig, + network_sender: Arc, payload_client: Arc, payload_manager: Arc, rand_config: Option, @@ -1016,12 +816,18 @@ impl EpochManager

{ let safety_rules_container = Arc::new(Mutex::new(safety_rules)); - let state_computer = self.init_ordering_state_computer( - epoch_state.clone(), - &onchain_consensus_config, - safety_rules_container.clone(), - rand_config, - ); + self.rand_manager_msg_tx = self + .execution_client + .start_epoch( + epoch_state.clone(), + safety_rules_container.clone(), + payload_manager.clone(), + &onchain_consensus_config, + &onchain_execution_config, + features, + rand_config, + ) + .await; info!(epoch = epoch, "Create BlockStore"); // Read the last vote, before "moving" `recovery_data` @@ -1029,7 +835,7 @@ impl EpochManager

{ let block_store = Arc::new(BlockStore::new( Arc::clone(&self.storage), recovery_data, - state_computer, + self.execution_client.clone(), self.config.max_pruned_blocks_in_mem, Arc::clone(&self.time_service), self.config.vote_back_pressure_limit, @@ -1259,18 +1065,14 @@ impl EpochManager

{ let rand_config = rand_config.ok(); let (network_sender, payload_client, payload_manager) = self - .initialize_shared_component( - &epoch_state, - &consensus_config, - &execution_config, - &features, - ) + .initialize_shared_component(&epoch_state, &consensus_config) .await; if consensus_config.is_dag_enabled() { self.start_new_epoch_with_dag( epoch_state, consensus_config, + execution_config, network_sender, payload_client, payload_manager, @@ -1282,6 +1084,7 @@ impl EpochManager

{ self.start_new_epoch_with_joltean( epoch_state, consensus_config, + execution_config, network_sender, payload_client, payload_manager, @@ -1296,8 +1099,6 @@ impl EpochManager

{ &mut self, epoch_state: &EpochState, consensus_config: &OnChainConsensusConfig, - execution_config: &OnChainExecutionConfig, - features: &Features, ) -> (NetworkSender, Arc, Arc) { self.set_epoch_start_metrics(epoch_state); self.quorum_store_enabled = self.enable_quorum_store(consensus_config); @@ -1312,13 +1113,6 @@ impl EpochManager

{ Arc::new(self.vtxn_pool.clone()), Arc::new(quorum_store_client), ); - self.init_commit_state_computer( - epoch_state, - payload_manager.clone(), - execution_config, - consensus_config, - features, - ); self.start_quorum_store(quorum_store_builder); ( network_sender, @@ -1331,6 +1125,7 @@ impl EpochManager

{ &mut self, epoch_state: Arc, consensus_config: OnChainConsensusConfig, + execution_config: OnChainExecutionConfig, network_sender: NetworkSender, payload_client: Arc, payload_manager: Arc, @@ -1344,7 +1139,8 @@ impl EpochManager

{ initial_data, epoch_state, consensus_config, - network_sender, + execution_config, + Arc::new(network_sender), payload_client, payload_manager, rand_config, @@ -1358,7 +1154,7 @@ impl EpochManager

{ ledger_data, consensus_config, epoch_state, - network_sender, + Arc::new(network_sender), ) .await }, @@ -1369,6 +1165,7 @@ impl EpochManager

{ &mut self, epoch_state: Arc, onchain_consensus_config: OnChainConsensusConfig, + on_chain_execution_config: OnChainExecutionConfig, network_sender: NetworkSender, payload_client: Arc, payload_manager: Arc, @@ -1377,20 +1174,26 @@ impl EpochManager

{ ) { let epoch = epoch_state.epoch; - let signer = new_signer_from_storage(self.author, &self.config.safety_rules.backend); + let signer = crate::new_signer_from_storage(self.author, &self.config.safety_rules.backend); let commit_signer = Arc::new(DagCommitSigner::new(signer)); assert!( onchain_consensus_config.decoupled_execution(), "decoupled execution must be enabled" ); - let (block_tx, reset_rand_manager_tx, reset_buffer_manager_tx) = - self.spawn_decoupled_execution(commit_signer, epoch_state.clone(), rand_config); - let state_computer = Arc::new(DagStateSyncComputer::new( - self.commit_state_computer.clone(), - reset_rand_manager_tx, - reset_buffer_manager_tx, - )); + + self.rand_manager_msg_tx = self + .execution_client + .start_epoch( + epoch_state.clone(), + commit_signer, + payload_manager.clone(), + &onchain_consensus_config, + &on_chain_execution_config, + features, + rand_config, + ) + .await; let onchain_dag_consensus_config = onchain_consensus_config.unwrap_dag_config_v1(); let epoch_to_validators = self.extract_epoch_proposers( @@ -1406,7 +1209,7 @@ impl EpochManager

{ self.storage.aptos_db(), )); - let signer = new_signer_from_storage(self.author, &self.config.safety_rules.backend); + let signer = crate::new_signer_from_storage(self.author, &self.config.safety_rules.backend); let network_sender_arc = Arc::new(network_sender); let bootstrapper = DagBootstrapper::new( @@ -1422,8 +1225,8 @@ impl EpochManager

{ self.aptos_time_service.clone(), payload_manager, payload_client, - state_computer, - block_tx, + self.execution_client.get_execution_channel().unwrap(), + self.execution_client.clone(), onchain_consensus_config.quorum_store_enabled(), onchain_consensus_config.effective_validator_txn_config(), self.bounded_executor.clone(), @@ -1708,15 +1511,7 @@ impl EpochManager

{ } }, IncomingRpcRequest::CommitRequest(request) => { - if let Some(tx) = &self.buffer_manager_msg_tx { - tx.push(peer_id, request) - } else { - counters::EPOCH_MANAGER_ISSUES_DETAILS - .with_label_values(&["buffer_manager_not_started"]) - .inc(); - warn!("Buffer manager not started"); - Ok(()) - } + self.execution_client.send_commit_msg(peer_id, request) }, IncomingRpcRequest::RandGenRequest(request) => { if let Some(tx) = &self.rand_manager_msg_tx { @@ -1795,19 +1590,6 @@ impl EpochManager

{ } } -#[allow(dead_code)] -fn new_signer_from_storage(author: Author, backend: &SecureBackend) -> Arc { - let storage: Storage = backend.into(); - if let Err(error) = storage.available() { - panic!("Storage is not available: {:?}", error); - } - let private_key = storage - .get(CONSENSUS_KEY) - .map(|v| v.value) - .expect("Unable to get private key"); - Arc::new(ValidatorSigner::new(author, private_key)) -} - #[derive(Debug)] enum NoRandomnessReason { VTxnDisabled, diff --git a/consensus/src/lib.rs b/consensus/src/lib.rs index dbc5cb5ec5e5c..9f6f15be0a5c8 100644 --- a/consensus/src/lib.rs +++ b/consensus/src/lib.rs @@ -62,13 +62,19 @@ mod transaction_filter; mod transaction_shuffler; mod txn_hash_and_authenticator_deduper; +use aptos_config::config::SecureBackend; +use aptos_consensus_types::common::Author; +use aptos_global_constants::CONSENSUS_KEY; use aptos_metrics_core::IntGauge; +use aptos_secure_storage::{KVStorage, Storage}; +use aptos_types::validator_signer::ValidatorSigner; pub use consensusdb::create_checkpoint; /// Required by the smoke tests pub use consensusdb::CONSENSUS_DB_NAME; pub use quorum_store::quorum_store_db::QUORUM_STORE_DB_NAME; #[cfg(feature = "fuzzing")] pub use round_manager::round_manager_fuzzing; +use std::sync::Arc; struct IntGaugeGuard { gauge: IntGauge, @@ -99,3 +105,15 @@ macro_rules! monitor { $fn }}; } + +fn new_signer_from_storage(author: Author, backend: &SecureBackend) -> Arc { + let storage: Storage = backend.into(); + if let Err(error) = storage.available() { + panic!("Storage is not available: {:?}", error); + } + let private_key = storage + .get(CONSENSUS_KEY) + .map(|v| v.value) + .expect("Unable to get private key"); + Arc::new(ValidatorSigner::new(author, private_key)) +} diff --git a/consensus/src/network.rs b/consensus/src/network.rs index ce2cc51020ba6..6351c2e43aec7 100644 --- a/consensus/src/network.rs +++ b/consensus/src/network.rs @@ -295,11 +295,12 @@ impl NetworkSender { /// The future is fulfilled as soon as the message is put into the mpsc channel to network /// internal (to provide back pressure), it does not indicate the message is delivered or sent /// out. - async fn broadcast(&mut self, msg: ConsensusMsg) { + async fn broadcast(&self, msg: ConsensusMsg) { fail_point!("consensus::send::any", |_| ()); // Directly send the message to ourself without going through network. let self_msg = Event::Message(self.author, msg.clone()); - if let Err(err) = self.self_sender.send(self_msg).await { + let mut self_sender = self.self_sender.clone(); + if let Err(err) = self_sender.send(self_msg).await { error!("Error broadcasting to self: {:?}", err); } @@ -369,25 +370,25 @@ impl NetworkSender { } } - pub async fn broadcast_proposal(&mut self, proposal_msg: ProposalMsg) { + pub async fn broadcast_proposal(&self, proposal_msg: ProposalMsg) { fail_point!("consensus::send::broadcast_proposal", |_| ()); let msg = ConsensusMsg::ProposalMsg(Box::new(proposal_msg)); self.broadcast(msg).await } - pub async fn broadcast_sync_info(&mut self, sync_info_msg: SyncInfo) { + pub async fn broadcast_sync_info(&self, sync_info_msg: SyncInfo) { fail_point!("consensus::send::broadcast_sync_info", |_| ()); let msg = ConsensusMsg::SyncInfo(Box::new(sync_info_msg)); self.broadcast(msg).await } - pub async fn broadcast_timeout_vote(&mut self, timeout_vote_msg: VoteMsg) { + pub async fn broadcast_timeout_vote(&self, timeout_vote_msg: VoteMsg) { fail_point!("consensus::send::broadcast_timeout_vote", |_| ()); let msg = ConsensusMsg::VoteMsg(Box::new(timeout_vote_msg)); self.broadcast(msg).await } - pub async fn broadcast_epoch_change(&mut self, epoch_change_proof: EpochChangeProof) { + pub async fn broadcast_epoch_change(&self, epoch_change_proof: EpochChangeProof) { fail_point!("consensus::send::broadcast_epoch_change", |_| ()); let msg = ConsensusMsg::EpochChangeProof(Box::new(epoch_change_proof)); self.broadcast(msg).await diff --git a/consensus/src/pipeline/buffer_item.rs b/consensus/src/pipeline/buffer_item.rs index d92d62eb71f99..d251e97e80fa0 100644 --- a/consensus/src/pipeline/buffer_item.rs +++ b/consensus/src/pipeline/buffer_item.rs @@ -5,7 +5,7 @@ use crate::{pipeline::hashable::Hashable, state_replication::StateComputerCommitCallBackType}; use anyhow::anyhow; use aptos_consensus_types::{ - common::Author, executed_block::ExecutedBlock, pipeline::commit_vote::CommitVote, + common::Author, pipeline::commit_vote::CommitVote, pipelined_block::PipelinedBlock, }; use aptos_crypto::{bls12381, HashValue}; use aptos_executor_types::ExecutorResult; @@ -54,7 +54,7 @@ fn verify_signatures( fn generate_executed_item_from_ordered( commit_info: BlockInfo, - executed_blocks: Vec, + executed_blocks: Vec, verified_signatures: PartialSignatures, callback: StateComputerCommitCallBackType, ordered_proof: LedgerInfoWithSignatures, @@ -92,12 +92,12 @@ pub struct OrderedItem { // from peers. pub commit_proof: Option, pub callback: StateComputerCommitCallBackType, - pub ordered_blocks: Vec, + pub ordered_blocks: Vec, pub ordered_proof: LedgerInfoWithSignatures, } pub struct ExecutedItem { - pub executed_blocks: Vec, + pub executed_blocks: Vec, pub partial_commit_proof: LedgerInfoWithPartialSignatures, pub callback: StateComputerCommitCallBackType, pub commit_info: BlockInfo, @@ -105,7 +105,7 @@ pub struct ExecutedItem { } pub struct SignedItem { - pub executed_blocks: Vec, + pub executed_blocks: Vec, pub partial_commit_proof: LedgerInfoWithPartialSignatures, pub callback: StateComputerCommitCallBackType, pub commit_vote: CommitVote, @@ -113,7 +113,7 @@ pub struct SignedItem { } pub struct AggregatedItem { - pub executed_blocks: Vec, + pub executed_blocks: Vec, pub commit_proof: LedgerInfoWithSignatures, pub callback: StateComputerCommitCallBackType, } @@ -131,11 +131,11 @@ impl Hashable for BufferItem { } } -pub type ExecutionFut = BoxFuture<'static, ExecutorResult>>; +pub type ExecutionFut = BoxFuture<'static, ExecutorResult>>; impl BufferItem { pub fn new_ordered( - ordered_blocks: Vec, + ordered_blocks: Vec, ordered_proof: LedgerInfoWithSignatures, callback: StateComputerCommitCallBackType, ) -> Self { @@ -151,7 +151,7 @@ impl BufferItem { // pipeline functions pub fn advance_to_executed_or_aggregated( self, - executed_blocks: Vec, + executed_blocks: Vec, validator: &ValidatorVerifier, epoch_end_timestamp: Option, ) -> Self { @@ -371,7 +371,7 @@ impl BufferItem { } // generic functions - pub fn get_blocks(&self) -> &Vec { + pub fn get_blocks(&self) -> &Vec { match self { Self::Ordered(ordered) => &ordered.ordered_blocks, Self::Executed(executed) => &executed.executed_blocks, diff --git a/consensus/src/pipeline/buffer_manager.rs b/consensus/src/pipeline/buffer_manager.rs index 4aaf90d4b5c52..b694f54bbadb2 100644 --- a/consensus/src/pipeline/buffer_manager.rs +++ b/consensus/src/pipeline/buffer_manager.rs @@ -21,7 +21,7 @@ use crate::{ }; use aptos_bounded_executor::BoundedExecutor; use aptos_consensus_types::{ - common::Author, executed_block::ExecutedBlock, pipeline::commit_decision::CommitDecision, + common::Author, pipeline::commit_decision::CommitDecision, pipelined_block::PipelinedBlock, }; use aptos_crypto::HashValue; use aptos_logger::prelude::*; @@ -67,7 +67,7 @@ pub struct ResetRequest { } pub struct OrderedBlocks { - pub ordered_blocks: Vec, + pub ordered_blocks: Vec, pub ordered_proof: LedgerInfoWithSignatures, pub callback: StateComputerCommitCallBackType, } @@ -329,14 +329,14 @@ impl BufferManager { /// Pop the prefix of buffer items until (including) target_block_id /// Send persist request. async fn advance_head(&mut self, target_block_id: HashValue) { - let mut blocks_to_persist: Vec> = vec![]; + let mut blocks_to_persist: Vec> = vec![]; while let Some(item) = self.buffer.pop_front() { blocks_to_persist.extend( item.get_blocks() .iter() .map(|eb| Arc::new(eb.clone())) - .collect::>>(), + .collect::>>(), ); if self.signing_root == Some(item.block_id()) { self.signing_root = None; @@ -348,6 +348,8 @@ impl BufferManager { let aggregated_item = item.unwrap_aggregated(); let block = aggregated_item.executed_blocks.last().unwrap().block(); observe_block(block.timestamp_usecs(), BlockStage::COMMIT_CERTIFIED); + // TODO: As all the validators broadcast commit votes directly to all other validators, + // the proposer do not have to broadcast commit decision again. Remove this if block. // if we're the proposer for the block, we're responsible to broadcast the commit decision. if block.author() == Some(self.author) { let commit_decision = CommitMessage::Decision(CommitDecision::new( @@ -516,27 +518,11 @@ impl BufferManager { // we have found the buffer item let mut signed_item = item.advance_to_signed(self.author, signature); let signed_item_mut = signed_item.unwrap_signed_mut(); - let maybe_proposer = signed_item_mut - .executed_blocks - .last() - .unwrap() - .block() - .author(); let commit_vote = signed_item_mut.commit_vote.clone(); - - if let Some(proposer) = maybe_proposer { - let sender = self.commit_msg_tx.clone(); - tokio::spawn(async move { - if let Err(e) = sender.send_commit_vote(commit_vote, proposer).await { - warn!("Failed to send commit vote {:?}", e); - } - }); - } else { - let commit_vote = CommitMessage::Vote(commit_vote); - signed_item_mut - .rb_handle - .replace((Instant::now(), self.do_reliable_broadcast(commit_vote))); - } + let commit_vote = CommitMessage::Vote(commit_vote); + signed_item_mut + .rb_handle + .replace((Instant::now(), self.do_reliable_broadcast(commit_vote))); self.buffer.set(¤t_cursor, signed_item); } else { self.buffer.set(¤t_cursor, item); diff --git a/consensus/src/pipeline/execution_client.rs b/consensus/src/pipeline/execution_client.rs new file mode 100644 index 0000000000000..7878549146e03 --- /dev/null +++ b/consensus/src/pipeline/execution_client.rs @@ -0,0 +1,490 @@ +// Copyright © Aptos Foundation +// Parts of the project are originally copyright © Meta Platforms, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + counters, + error::StateSyncError, + network::{IncomingCommitRequest, IncomingRandGenRequest, NetworkSender}, + network_interface::{ConsensusMsg, ConsensusNetworkClient}, + new_signer_from_storage, + payload_manager::PayloadManager, + pipeline::{ + buffer_manager::{OrderedBlocks, ResetAck, ResetRequest, ResetSignal}, + decoupled_execution_utils::prepare_phases_and_buffer_manager, + errors::Error, + signing_phase::CommitSignerProvider, + }, + rand::rand_gen::{ + rand_manager::RandManager, + storage::interface::RandStorage, + types::{AugmentedData, RandConfig, Share}, + }, + state_computer::ExecutionProxy, + state_replication::{StateComputer, StateComputerCommitCallBackType}, + transaction_deduper::create_transaction_deduper, + transaction_shuffler::create_transaction_shuffler, +}; +use anyhow::Result; +use aptos_bounded_executor::BoundedExecutor; +use aptos_channels::{aptos_channel, message_queues::QueueStyle}; +use aptos_config::config::ConsensusConfig; +use aptos_consensus_types::{common::Author, pipelined_block::PipelinedBlock}; +use aptos_executor_types::ExecutorResult; +use aptos_infallible::RwLock; +use aptos_logger::prelude::*; +use aptos_network::{application::interface::NetworkClient, protocols::network::Event}; +use aptos_types::{ + epoch_state::EpochState, + ledger_info::LedgerInfoWithSignatures, + on_chain_config::{FeatureFlag, Features, OnChainConsensusConfig, OnChainExecutionConfig}, +}; +use fail::fail_point; +use futures::{ + channel::{mpsc::UnboundedSender, oneshot}, + SinkExt, +}; +use futures_channel::mpsc::unbounded; +use move_core_types::account_address::AccountAddress; +use std::sync::Arc; + +#[async_trait::async_trait] +pub trait TExecutionClient: Send + Sync { + /// Initialize the execution phase for a new epoch. + async fn start_epoch( + &self, + epoch_state: Arc, + commit_signer_provider: Arc, + payload_manager: Arc, + onchain_consensus_config: &OnChainConsensusConfig, + onchain_execution_config: &OnChainExecutionConfig, + features: &Features, + rand_config: Option, + ) -> Option>; + + /// This is needed for some DAG tests. Clean this up as a TODO. + fn get_execution_channel(&self) -> Option>; + + /// Send ordered blocks to the real execution phase through the channel. + async fn finalize_order( + &self, + blocks: &[Arc], + ordered_proof: LedgerInfoWithSignatures, + callback: StateComputerCommitCallBackType, + ) -> ExecutorResult<()>; + + fn send_commit_msg( + &self, + peer_id: AccountAddress, + commit_msg: IncomingCommitRequest, + ) -> Result<()>; + + /// Synchronize to a commit that not present locally. + async fn sync_to(&self, target: LedgerInfoWithSignatures) -> Result<(), StateSyncError>; + + /// Shutdown the current processor at the end of the epoch. + async fn end_epoch(&self); +} + +struct BufferManagerHandle { + pub execute_tx: Option>, + pub commit_tx: Option>, + pub reset_tx_to_buffer_manager: Option>, + pub reset_tx_to_rand_manager: Option>, +} + +impl BufferManagerHandle { + pub fn new() -> Self { + Self { + execute_tx: None, + commit_tx: None, + reset_tx_to_buffer_manager: None, + reset_tx_to_rand_manager: None, + } + } + + pub fn init( + &mut self, + execute_tx: UnboundedSender, + commit_tx: aptos_channel::Sender, + reset_tx_to_buffer_manager: UnboundedSender, + reset_tx_to_rand_manager: Option>, + ) { + self.execute_tx = Some(execute_tx); + self.commit_tx = Some(commit_tx); + self.reset_tx_to_buffer_manager = Some(reset_tx_to_buffer_manager); + self.reset_tx_to_rand_manager = reset_tx_to_rand_manager; + } + + pub fn reset( + &mut self, + ) -> ( + Option>, + Option>, + ) { + let reset_tx_to_rand_manager = self.reset_tx_to_rand_manager.take(); + let reset_tx_to_buffer_manager = self.reset_tx_to_buffer_manager.take(); + self.execute_tx = None; + self.commit_tx = None; + self.reset_tx_to_buffer_manager = None; + (reset_tx_to_rand_manager, reset_tx_to_buffer_manager) + } +} + +pub struct ExecutionProxyClient { + consensus_config: ConsensusConfig, + execution_proxy: Arc, + author: Author, + self_sender: aptos_channels::Sender>, + network_sender: ConsensusNetworkClient>, + bounded_executor: BoundedExecutor, + // channels to buffer manager + handle: Arc>, + rand_storage: Arc>, +} + +impl ExecutionProxyClient { + pub fn new( + consensus_config: ConsensusConfig, + execution_proxy: Arc, + author: Author, + self_sender: aptos_channels::Sender>, + network_sender: ConsensusNetworkClient>, + bounded_executor: BoundedExecutor, + rand_storage: Arc>, + ) -> Self { + Self { + consensus_config, + execution_proxy, + author, + self_sender, + network_sender, + bounded_executor, + handle: Arc::new(RwLock::new(BufferManagerHandle::new())), + rand_storage, + } + } + + fn spawn_decoupled_execution( + &self, + commit_signer_provider: Arc, + epoch_state: Arc, + rand_config: Option, + ) -> Option> { + let network_sender = NetworkSender::new( + self.author, + self.network_sender.clone(), + self.self_sender.clone(), + epoch_state.verifier.clone(), + ); + + let (reset_buffer_manager_tx, reset_buffer_manager_rx) = unbounded::(); + + let (commit_msg_tx, commit_msg_rx) = + aptos_channel::new::( + QueueStyle::FIFO, + 100, + Some(&counters::BUFFER_MANAGER_MSGS), + ); + + let ( + execution_ready_block_tx, + execution_ready_block_rx, + maybe_reset_tx_to_rand_manager, + maybe_rand_msg_tx, + ) = if let Some(rand_config) = rand_config { + let (ordered_block_tx, ordered_block_rx) = unbounded::(); + let (rand_ready_block_tx, rand_ready_block_rx) = unbounded::(); + let (rand_msg_tx, rand_msg_rx) = aptos_channel::new::< + AccountAddress, + IncomingRandGenRequest, + >(QueueStyle::FIFO, 100, None); + + let (reset_tx_to_rand_manager, reset_rand_manager_rx) = unbounded::(); + let signer = + new_signer_from_storage(self.author, &self.consensus_config.safety_rules.backend); + + let rand_manager = RandManager::::new( + self.author, + epoch_state.clone(), + signer, + rand_config, + rand_ready_block_tx, + Arc::new(network_sender.clone()), + self.rand_storage.clone(), + self.bounded_executor.clone(), + ); + + tokio::spawn(rand_manager.start( + ordered_block_rx, + rand_msg_rx, + reset_rand_manager_rx, + self.bounded_executor.clone(), + )); + + ( + ordered_block_tx, + rand_ready_block_rx, + Some(reset_tx_to_rand_manager), + Some(rand_msg_tx), + ) + } else { + let (ordered_block_tx, ordered_block_rx) = unbounded(); + (ordered_block_tx, ordered_block_rx, None, None) + }; + + self.handle.write().init( + execution_ready_block_tx, + commit_msg_tx, + reset_buffer_manager_tx, + maybe_reset_tx_to_rand_manager, + ); + + let ( + execution_schedule_phase, + execution_wait_phase, + signing_phase, + persisting_phase, + buffer_manager, + ) = prepare_phases_and_buffer_manager( + self.author, + self.execution_proxy.clone(), + commit_signer_provider, + network_sender, + commit_msg_rx, + self.execution_proxy.clone(), + execution_ready_block_rx, + reset_buffer_manager_rx, + epoch_state, + self.bounded_executor.clone(), + ); + tokio::spawn(execution_schedule_phase.start()); + tokio::spawn(execution_wait_phase.start()); + tokio::spawn(signing_phase.start()); + tokio::spawn(persisting_phase.start()); + tokio::spawn(buffer_manager.start()); + + maybe_rand_msg_tx + } +} + +#[async_trait::async_trait] +impl TExecutionClient for ExecutionProxyClient { + async fn start_epoch( + &self, + epoch_state: Arc, + commit_signer_provider: Arc, + payload_manager: Arc, + onchain_consensus_config: &OnChainConsensusConfig, + onchain_execution_config: &OnChainExecutionConfig, + features: &Features, + rand_config: Option, + ) -> Option> { + let maybe_rand_msg_tx = self.spawn_decoupled_execution( + commit_signer_provider, + epoch_state.clone(), + rand_config, + ); + + let transaction_shuffler = + create_transaction_shuffler(onchain_execution_config.transaction_shuffler_type()); + let block_executor_onchain_config = + onchain_execution_config.block_executor_onchain_config(); + let transaction_deduper = + create_transaction_deduper(onchain_execution_config.transaction_deduper_type()); + let randomness_enabled = onchain_consensus_config.is_vtxn_enabled() + && features.is_enabled(FeatureFlag::RECONFIGURE_WITH_DKG); + self.execution_proxy.new_epoch( + &epoch_state, + payload_manager, + transaction_shuffler, + block_executor_onchain_config, + transaction_deduper, + randomness_enabled, + ); + + maybe_rand_msg_tx + } + + fn get_execution_channel(&self) -> Option> { + self.handle.read().execute_tx.clone() + } + + async fn finalize_order( + &self, + blocks: &[Arc], + ordered_proof: LedgerInfoWithSignatures, + callback: StateComputerCommitCallBackType, + ) -> ExecutorResult<()> { + assert!(!blocks.is_empty()); + let execute_tx = self.handle.read().execute_tx.clone(); + + if execute_tx.is_none() { + debug!("Failed to send to buffer manager, maybe epoch ends"); + return Ok(()); + } + + for block in blocks { + block.set_insertion_time(); + } + + debug!( + epoch = blocks[0].epoch(), + round = blocks[0].round(), + "Sending ordered blocks to execution client." + ); + + if execute_tx + .unwrap() + .send(OrderedBlocks { + ordered_blocks: blocks + .iter() + .map(|b| (**b).clone()) + .collect::>(), + ordered_proof, + callback, + }) + .await + .is_err() + { + debug!("Failed to send to buffer manager, maybe epoch ends"); + } + Ok(()) + } + + fn send_commit_msg( + &self, + peer_id: AccountAddress, + commit_msg: IncomingCommitRequest, + ) -> Result<()> { + if let Some(tx) = &self.handle.read().commit_tx { + tx.push(peer_id, commit_msg) + } else { + counters::EPOCH_MANAGER_ISSUES_DETAILS + .with_label_values(&["buffer_manager_not_started"]) + .inc(); + warn!("Buffer manager not started"); + Ok(()) + } + } + + async fn sync_to(&self, target: LedgerInfoWithSignatures) -> Result<(), StateSyncError> { + fail_point!("consensus::sync_to", |_| { + Err(anyhow::anyhow!("Injected error in sync_to").into()) + }); + + let (reset_tx_to_rand_manager, reset_tx_to_buffer_manager) = { + let handle = self.handle.read(); + ( + handle.reset_tx_to_rand_manager.clone(), + handle.reset_tx_to_buffer_manager.clone(), + ) + }; + + if let Some(mut reset_tx) = reset_tx_to_rand_manager { + let (ack_tx, ack_rx) = oneshot::channel::(); + reset_tx + .send(ResetRequest { + tx: ack_tx, + signal: ResetSignal::TargetRound(target.commit_info().round()), + }) + .await + .map_err(|_| Error::RandResetDropped)?; + ack_rx.await.map_err(|_| Error::RandResetDropped)?; + } + + if let Some(mut reset_tx) = reset_tx_to_buffer_manager { + // reset execution phase and commit phase + let (tx, rx) = oneshot::channel::(); + reset_tx + .send(ResetRequest { + tx, + signal: ResetSignal::TargetRound(target.commit_info().round()), + }) + .await + .map_err(|_| Error::ResetDropped)?; + rx.await.map_err(|_| Error::ResetDropped)?; + } + + // TODO: handle the sync error, should re-push the ordered blocks to buffer manager + // when it's reset but sync fails. + self.execution_proxy.sync_to(target).await?; + Ok(()) + } + + async fn end_epoch(&self) { + let (reset_tx_to_rand_manager, reset_tx_to_buffer_manager) = { + let mut handle = self.handle.write(); + handle.reset() + }; + + if let Some(mut tx) = reset_tx_to_rand_manager { + let (ack_tx, ack_rx) = oneshot::channel(); + tx.send(ResetRequest { + tx: ack_tx, + signal: ResetSignal::Stop, + }) + .await + .expect("[EpochManager] Fail to drop rand manager"); + ack_rx + .await + .expect("[EpochManager] Fail to drop rand manager"); + } + + if let Some(mut tx) = reset_tx_to_buffer_manager { + let (ack_tx, ack_rx) = oneshot::channel(); + tx.send(ResetRequest { + tx: ack_tx, + signal: ResetSignal::Stop, + }) + .await + .expect("[EpochManager] Fail to drop buffer manager"); + ack_rx + .await + .expect("[EpochManager] Fail to drop buffer manager"); + } + + self.execution_proxy.end_epoch(); + } +} + +pub struct DummyExecutionClient; + +#[async_trait::async_trait] +impl TExecutionClient for DummyExecutionClient { + async fn start_epoch( + &self, + _epoch_state: Arc, + _commit_signer_provider: Arc, + _payload_manager: Arc, + _onchain_consensus_config: &OnChainConsensusConfig, + _onchain_execution_config: &OnChainExecutionConfig, + _features: &Features, + _rand_config: Option, + ) -> Option> { + None + } + + fn get_execution_channel(&self) -> Option> { + None + } + + async fn finalize_order( + &self, + _: &[Arc], + _: LedgerInfoWithSignatures, + _: StateComputerCommitCallBackType, + ) -> ExecutorResult<()> { + Ok(()) + } + + fn send_commit_msg(&self, _: AccountAddress, _: IncomingCommitRequest) -> Result<()> { + Ok(()) + } + + async fn sync_to(&self, _: LedgerInfoWithSignatures) -> Result<(), StateSyncError> { + Ok(()) + } + + async fn end_epoch(&self) {} +} diff --git a/consensus/src/pipeline/execution_schedule_phase.rs b/consensus/src/pipeline/execution_schedule_phase.rs index 414960b5e67cc..96586273649c1 100644 --- a/consensus/src/pipeline/execution_schedule_phase.rs +++ b/consensus/src/pipeline/execution_schedule_phase.rs @@ -9,7 +9,7 @@ use crate::{ state_computer::PipelineExecutionResult, state_replication::StateComputer, }; -use aptos_consensus_types::executed_block::ExecutedBlock; +use aptos_consensus_types::pipelined_block::PipelinedBlock; use aptos_crypto::HashValue; use aptos_executor_types::ExecutorError; use aptos_logger::debug; @@ -25,7 +25,7 @@ use std::{ /// the buffer manager and send them to the ExecutionPipeline. pub struct ExecutionRequest { - pub ordered_blocks: Vec, + pub ordered_blocks: Vec, // Hold a CountedRequest to guarantee the executor doesn't get reset with pending tasks // stuck in the ExecutinoPipeline. pub lifetime_guard: CountedRequest<()>, @@ -94,7 +94,7 @@ impl StatelessPipeline for ExecutionSchedulePhase { for (block, fut) in itertools::zip_eq(ordered_blocks, futs) { debug!("try to receive compute result for block {}", block.id()); let PipelineExecutionResult { input_txns, result } = fut.await?; - results.push(block.replace_result(input_txns, result)); + results.push(block.set_execution_result(input_txns, result)); } drop(lifetime_guard); Ok(results) diff --git a/consensus/src/pipeline/execution_wait_phase.rs b/consensus/src/pipeline/execution_wait_phase.rs index c5c78509e8fa3..88b616ca7d142 100644 --- a/consensus/src/pipeline/execution_wait_phase.rs +++ b/consensus/src/pipeline/execution_wait_phase.rs @@ -3,7 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::pipeline::{buffer_item::ExecutionFut, pipeline_phase::StatelessPipeline}; -use aptos_consensus_types::executed_block::ExecutedBlock; +use aptos_consensus_types::pipelined_block::PipelinedBlock; use aptos_crypto::HashValue; use aptos_executor_types::ExecutorResult; use async_trait::async_trait; @@ -32,7 +32,7 @@ impl Display for ExecutionWaitRequest { pub struct ExecutionResponse { pub block_id: HashValue, - pub inner: ExecutorResult>, + pub inner: ExecutorResult>, } pub struct ExecutionWaitPhase; diff --git a/consensus/src/pipeline/mod.rs b/consensus/src/pipeline/mod.rs index b8aad5120a036..11269562e9cfa 100644 --- a/consensus/src/pipeline/mod.rs +++ b/consensus/src/pipeline/mod.rs @@ -31,10 +31,10 @@ pub mod errors; pub mod execution_schedule_phase; pub mod execution_wait_phase; pub mod hashable; -pub mod ordering_state_computer; pub mod persisting_phase; pub mod pipeline_phase; pub mod signing_phase; +pub mod execution_client; #[cfg(test)] mod tests; diff --git a/consensus/src/pipeline/persisting_phase.rs b/consensus/src/pipeline/persisting_phase.rs index 9b82638efcd54..8a60e4589c131 100644 --- a/consensus/src/pipeline/persisting_phase.rs +++ b/consensus/src/pipeline/persisting_phase.rs @@ -6,7 +6,7 @@ use crate::{ pipeline::pipeline_phase::StatelessPipeline, state_replication::{StateComputer, StateComputerCommitCallBackType}, }; -use aptos_consensus_types::executed_block::ExecutedBlock; +use aptos_consensus_types::pipelined_block::PipelinedBlock; use aptos_executor_types::ExecutorResult; use aptos_types::ledger_info::LedgerInfoWithSignatures; use async_trait::async_trait; @@ -21,7 +21,7 @@ use std::{ /// a response. pub struct PersistingRequest { - pub blocks: Vec>, + pub blocks: Vec>, pub commit_ledger_info: LedgerInfoWithSignatures, pub callback: StateComputerCommitCallBackType, } diff --git a/consensus/src/pipeline/tests/buffer_manager_tests.rs b/consensus/src/pipeline/tests/buffer_manager_tests.rs index dc30aebd6a675..0cddd6679287d 100644 --- a/consensus/src/pipeline/tests/buffer_manager_tests.rs +++ b/consensus/src/pipeline/tests/buffer_manager_tests.rs @@ -14,7 +14,6 @@ use crate::{ decoupled_execution_utils::prepare_phases_and_buffer_manager, execution_schedule_phase::ExecutionSchedulePhase, execution_wait_phase::ExecutionWaitPhase, - ordering_state_computer::OrderingStateComputer, persisting_phase::PersistingPhase, pipeline_phase::PipelinePhase, signing_phase::SigningPhase, @@ -29,7 +28,7 @@ use aptos_bounded_executor::BoundedExecutor; use aptos_channels::{aptos_channel, message_queues::QueueStyle}; use aptos_config::network_id::NetworkId; use aptos_consensus_types::{ - block::block_test_utils::certificate_for_genesis, executed_block::ExecutedBlock, + block::block_test_utils::certificate_for_genesis, pipelined_block::PipelinedBlock, vote_proposal::VoteProposal, }; use aptos_crypto::{hash::ACCUMULATOR_PLACEHOLDER_HASH, HashValue}; @@ -129,15 +128,7 @@ pub fn prepare_buffer_manager( ); let (result_tx, result_rx) = create_channel::(); - let (reset_rand_tx, _) = create_channel::(); - let (reset_bufmgr_tx, _) = create_channel::(); - - let persisting_proxy = Arc::new(OrderingStateComputer::new( - result_tx, - Arc::new(EmptyStateComputer), - Some(reset_rand_tx), - reset_bufmgr_tx, - )); + let state_computer = Arc::new(EmptyStateComputer::new(result_tx)); let (block_tx, block_rx) = create_channel::(); let (buffer_reset_tx, buffer_reset_rx) = create_channel::(); @@ -157,7 +148,7 @@ pub fn prepare_buffer_manager( Arc::new(Mutex::new(safety_rules)), network, msg_rx, - persisting_proxy, + state_computer, block_rx, buffer_reset_rx, Arc::new(EpochState { @@ -257,7 +248,10 @@ async fn loopback_commit_vote( }; } -async fn assert_results(batches: Vec>, result_rx: &mut Receiver) { +async fn assert_results( + batches: Vec>, + result_rx: &mut Receiver, +) { for (i, batch) in enumerate(batches) { let OrderedBlocks { ordered_blocks, .. } = result_rx.next().await.unwrap(); assert_eq!( diff --git a/consensus/src/pipeline/tests/execution_phase_tests.rs b/consensus/src/pipeline/tests/execution_phase_tests.rs index 06fda816af8c4..e34d3f9e7743f 100644 --- a/consensus/src/pipeline/tests/execution_phase_tests.rs +++ b/consensus/src/pipeline/tests/execution_phase_tests.rs @@ -16,7 +16,7 @@ use crate::{ use aptos_consensus_types::{ block::{block_test_utils::certificate_for_genesis, Block}, common::Payload, - executed_block::ExecutedBlock, + pipelined_block::PipelinedBlock, quorum_cert::QuorumCert, }; use aptos_crypto::HashValue; @@ -90,7 +90,7 @@ fn add_execution_phase_test_cases( // happy path phase_tester.add_test_case( ExecutionRequest { - ordered_blocks: vec![ExecutedBlock::new( + ordered_blocks: vec![PipelinedBlock::new( block, vec![], StateComputeResult::new_dummy(), @@ -123,7 +123,7 @@ fn add_execution_phase_test_cases( Block::new_proposal(Payload::empty(false), 1, 1, bad_qc, &signers[0], Vec::new()).unwrap(); phase_tester.add_test_case( ExecutionRequest { - ordered_blocks: vec![ExecutedBlock::new( + ordered_blocks: vec![PipelinedBlock::new( bad_block, vec![], StateComputeResult::new_dummy(), diff --git a/consensus/src/pipeline/tests/test_utils.rs b/consensus/src/pipeline/tests/test_utils.rs index a9915a182ee8a..721f4b1a6ffec 100644 --- a/consensus/src/pipeline/tests/test_utils.rs +++ b/consensus/src/pipeline/tests/test_utils.rs @@ -6,7 +6,7 @@ use crate::{metrics_safety_rules::MetricsSafetyRules, test_utils::MockStorage}; use aptos_consensus_types::{ block::block_test_utils::certificate_for_genesis, common::{Payload, Round}, - executed_block::ExecutedBlock, + pipelined_block::PipelinedBlock, quorum_cert::QuorumCert, vote_proposal::VoteProposal, }; @@ -63,7 +63,7 @@ pub fn prepare_executed_blocks_with_ledger_info( init_qc: Option, init_round: Round, ) -> ( - Vec, + Vec, LedgerInfoWithSignatures, Vec, ) { @@ -108,10 +108,10 @@ pub fn prepare_executed_blocks_with_ledger_info( let li_sig = generate_ledger_info_with_sig(&[signer.clone()], li); - let executed_blocks: Vec = proposals + let executed_blocks: Vec = proposals .iter() .map(|proposal| { - ExecutedBlock::new(proposal.block().clone(), vec![], compute_result.clone()) + PipelinedBlock::new(proposal.block().clone(), vec![], compute_result.clone()) }) .collect(); @@ -120,7 +120,7 @@ pub fn prepare_executed_blocks_with_ledger_info( pub fn prepare_executed_blocks_with_executed_ledger_info( signer: &ValidatorSigner, -) -> (Vec, LedgerInfoWithSignatures) { +) -> (Vec, LedgerInfoWithSignatures) { let genesis_qc = certificate_for_genesis(); let (executed_blocks, li_sig, _) = prepare_executed_blocks_with_ledger_info( signer, @@ -136,7 +136,7 @@ pub fn prepare_executed_blocks_with_executed_ledger_info( pub fn prepare_executed_blocks_with_ordered_ledger_info( signer: &ValidatorSigner, -) -> (Vec, LedgerInfoWithSignatures) { +) -> (Vec, LedgerInfoWithSignatures) { let genesis_qc = certificate_for_genesis(); let (executed_blocks, li_sig, _) = prepare_executed_blocks_with_ledger_info( signer, diff --git a/consensus/src/quorum_store/batch_coordinator.rs b/consensus/src/quorum_store/batch_coordinator.rs index 9360de6211ed2..628e5d55b440b 100644 --- a/consensus/src/quorum_store/batch_coordinator.rs +++ b/consensus/src/quorum_store/batch_coordinator.rs @@ -23,7 +23,7 @@ pub enum BatchCoordinatorCommand { pub struct BatchCoordinator { my_peer_id: PeerId, - network_sender: NetworkSender, + network_sender: Arc, batch_store: Arc, max_batch_txns: u64, max_batch_bytes: u64, @@ -43,7 +43,7 @@ impl BatchCoordinator { ) -> Self { Self { my_peer_id, - network_sender, + network_sender: Arc::new(network_sender), batch_store, max_batch_txns, max_batch_bytes, diff --git a/consensus/src/quorum_store/batch_store.rs b/consensus/src/quorum_store/batch_store.rs index b373fffb07ff2..2c4868c5790d8 100644 --- a/consensus/src/quorum_store/batch_store.rs +++ b/consensus/src/quorum_store/batch_store.rs @@ -349,7 +349,7 @@ impl BatchStore { match self.db.get_batch(digest) { Ok(Some(value)) => Ok(value), Ok(None) | Err(_) => { - error!("Could not get batch from db"); + warn!("Could not get batch from db"); Err(ExecutorError::CouldNotGetData) }, } diff --git a/consensus/src/rand/rand_gen/block_queue.rs b/consensus/src/rand/rand_gen/block_queue.rs index f3654563e89af..e1beac8ac622a 100644 --- a/consensus/src/rand/rand_gen/block_queue.rs +++ b/consensus/src/rand/rand_gen/block_queue.rs @@ -5,7 +5,7 @@ use crate::{ block_storage::tracing::{observe_block, BlockStage}, pipeline::buffer_manager::OrderedBlocks, }; -use aptos_consensus_types::{common::Round, executed_block::ExecutedBlock}; +use aptos_consensus_types::{common::Round, pipelined_block::PipelinedBlock}; use aptos_logger::info; use aptos_reliable_broadcast::DropGuard; use aptos_types::randomness::{RandMetadata, Randomness}; @@ -75,11 +75,11 @@ impl QueueItem { } } - fn blocks(&self) -> &[ExecutedBlock] { + fn blocks(&self) -> &[PipelinedBlock] { &self.ordered_blocks.ordered_blocks } - fn blocks_mut(&mut self) -> &mut [ExecutedBlock] { + fn blocks_mut(&mut self) -> &mut [PipelinedBlock] { &mut self.ordered_blocks.ordered_blocks } } diff --git a/consensus/src/rand/rand_gen/rand_manager.rs b/consensus/src/rand/rand_gen/rand_manager.rs index 5c57e3cccf802..ba45c0ec0660b 100644 --- a/consensus/src/rand/rand_gen/rand_manager.rs +++ b/consensus/src/rand/rand_gen/rand_manager.rs @@ -334,7 +334,7 @@ impl RandManager { self.process_incoming_blocks(blocks); } Some(reset) = reset_rx.next() => { - while incoming_blocks.try_next().is_ok() {} + while matches!(incoming_blocks.try_next(), Ok(Some(_))) {} self.process_reset(reset); } Some(randomness) = self.decision_rx.next() => { diff --git a/consensus/src/rand/rand_gen/test_utils.rs b/consensus/src/rand/rand_gen/test_utils.rs index f8092679f082e..0c2977941c823 100644 --- a/consensus/src/rand/rand_gen/test_utils.rs +++ b/consensus/src/rand/rand_gen/test_utils.rs @@ -9,7 +9,7 @@ use aptos_consensus_types::{ block::Block, block_data::{BlockData, BlockType}, common::{Author, Round}, - executed_block::ExecutedBlock, + pipelined_block::PipelinedBlock, quorum_cert::QuorumCert, }; use aptos_crypto::HashValue; @@ -24,7 +24,7 @@ pub fn create_ordered_blocks(rounds: Vec) -> OrderedBlocks { let blocks = rounds .into_iter() .map(|round| { - ExecutedBlock::new( + PipelinedBlock::new( Block::new_for_testing( HashValue::random(), BlockData::new_for_testing( diff --git a/consensus/src/recovery_manager.rs b/consensus/src/recovery_manager.rs index 39c4b767c4c24..897424d4ed4a3 100644 --- a/consensus/src/recovery_manager.rs +++ b/consensus/src/recovery_manager.rs @@ -9,8 +9,8 @@ use crate::{ network::NetworkSender, payload_manager::PayloadManager, persistent_liveness_storage::{PersistentLivenessStorage, RecoveryData}, + pipeline::execution_client::TExecutionClient, round_manager::VerifiedEvent, - state_replication::StateComputer, }; use anyhow::{anyhow, ensure, Context, Result}; use aptos_channels::aptos_channel; @@ -27,9 +27,9 @@ use std::{mem::Discriminant, process, sync::Arc}; /// for processing the events carrying sync info and use the info to retrieve blocks from peers pub struct RecoveryManager { epoch_state: Arc, - network: NetworkSender, + network: Arc, storage: Arc, - state_computer: Arc, + execution_client: Arc, last_committed_round: Round, max_blocks_to_request: u64, payload_manager: Arc, @@ -38,9 +38,9 @@ pub struct RecoveryManager { impl RecoveryManager { pub fn new( epoch_state: Arc, - network: NetworkSender, + network: Arc, storage: Arc, - state_computer: Arc, + execution_client: Arc, last_committed_round: Round, max_blocks_to_request: u64, payload_manager: Arc, @@ -49,7 +49,7 @@ impl RecoveryManager { epoch_state, network, storage, - state_computer, + execution_client, last_committed_round, max_blocks_to_request, payload_manager, @@ -95,7 +95,7 @@ impl RecoveryManager { sync_info.highest_commit_cert(), &mut retriever, self.storage.clone(), - self.state_computer.clone(), + self.execution_client.clone(), self.payload_manager.clone(), ) .await?; diff --git a/consensus/src/round_manager.rs b/consensus/src/round_manager.rs index 39ec0abe1f59f..b9fb837df9cb2 100644 --- a/consensus/src/round_manager.rs +++ b/consensus/src/round_manager.rs @@ -188,7 +188,7 @@ pub struct RoundManager { proposer_election: UnequivocalProposerElection, proposal_generator: ProposalGenerator, safety_rules: Arc>, - network: NetworkSender, + network: Arc, storage: Arc, onchain_config: OnChainConsensusConfig, vtxn_config: ValidatorTxnConfig, @@ -205,7 +205,7 @@ impl RoundManager { proposer_election: Arc, proposal_generator: ProposalGenerator, safety_rules: Arc>, - network: NetworkSender, + network: Arc, storage: Arc, onchain_config: OnChainConsensusConfig, buffered_proposal_tx: aptos_channel::Sender, @@ -239,10 +239,6 @@ impl RoundManager { } } - fn decoupled_execution(&self) -> bool { - self.onchain_config.decoupled_execution() - } - // TODO: Evaluate if creating a block retriever is slow and cache this if needed. fn create_block_retriever(&self, author: Author) -> BlockRetriever { BlockRetriever::new( @@ -295,7 +291,6 @@ impl RoundManager { self.log_collected_vote_stats(&new_round_event); self.round_state.setup_leader_timeout(); let proposal_msg = self.generate_proposal(new_round_event).await?; - let mut network = self.network.clone(); #[cfg(feature = "failpoints")] { if self.check_whether_to_inject_reconfiguration_error() { @@ -303,7 +298,7 @@ impl RoundManager { .await?; } } - network.broadcast_proposal(proposal_msg).await; + self.network.broadcast_proposal(proposal_msg).await; counters::PROPOSALS_COUNT.inc(); } Ok(()) @@ -384,7 +379,7 @@ impl RoundManager { ) -> anyhow::Result { // Proposal generator will ensure that at most one proposal is generated per round let sync_info = self.block_store.sync_info(); - let mut sender = self.network.clone(); + let sender = self.network.clone(); let callback = async move { sender.broadcast_sync_info(sync_info).await; } @@ -555,16 +550,12 @@ impl RoundManager { } fn sync_only(&self) -> bool { - if self.decoupled_execution() { - let sync_or_not = self.local_config.sync_only || self.block_store.vote_back_pressure(); - counters::OP_COUNTERS - .gauge("sync_only") - .set(sync_or_not as i64); + let sync_or_not = self.local_config.sync_only || self.block_store.vote_back_pressure(); + counters::OP_COUNTERS + .gauge("sync_only") + .set(sync_or_not as i64); - sync_or_not - } else { - self.local_config.sync_only - } + sync_or_not } /// The replica broadcasts a "timeout vote message", which includes the round signature, which @@ -742,7 +733,7 @@ impl RoundManager { ); observe_block(proposal.timestamp_usecs(), BlockStage::SYNCED); - if self.decoupled_execution() && self.block_store.vote_back_pressure() { + if self.block_store.vote_back_pressure() { counters::CONSENSUS_WITHOLD_VOTE_BACKPRESSURE_TRIGGERED.observe(1.0); // In case of back pressure, we delay processing proposal. This is done by resending the // same proposal to self after some time. Even if processing proposal is delayed, we add @@ -754,7 +745,7 @@ impl RoundManager { // tries to add the same block again, which is okay as `execute_and_insert_block` call // is idempotent. self.block_store - .execute_and_insert_block(proposal.clone()) + .insert_ordered_block(proposal.clone()) .await .context("[RoundManager] Failed to execute_and_insert the block")?; self.resend_verified_proposal_to_self( @@ -786,7 +777,7 @@ impl RoundManager { while start.elapsed() < Duration::from_millis(timeout_ms) { if !block_store.vote_back_pressure() { if let Err(e) = self_sender.push(author, event) { - error!("Failed to send event to round manager {:?}", e); + warn!("Failed to send event to round manager {:?}", e); } break; } @@ -825,7 +816,7 @@ impl RoundManager { async fn execute_and_vote(&mut self, proposed_block: Block) -> anyhow::Result { let executed_block = self .block_store - .execute_and_insert_block(proposed_block) + .insert_ordered_block(proposed_block) .await .context("[RoundManager] Failed to execute_and_insert the block")?; @@ -841,7 +832,7 @@ impl RoundManager { "[RoundManager] sync_only flag is set, stop voting" ); - let vote_proposal = executed_block.vote_proposal(self.decoupled_execution()); + let vote_proposal = executed_block.vote_proposal(); let vote_result = self.safety_rules.lock().construct_and_sign_vote_two_chain( &vote_proposal, self.block_store.highest_2chain_timeout_cert().as_deref(), @@ -1169,7 +1160,6 @@ impl RoundManager { .collect(); half_peers.truncate(half_peers.len() / 2); self.network - .clone() .send_proposal(proposal_msg.clone(), half_peers) .await; Err(anyhow::anyhow!("Injected error in reconfiguration suffix")) diff --git a/consensus/src/round_manager_fuzzing.rs b/consensus/src/round_manager_fuzzing.rs index 0d8507ec5238c..c6eb1b1f4f9f5 100644 --- a/consensus/src/round_manager_fuzzing.rs +++ b/consensus/src/round_manager_fuzzing.rs @@ -16,8 +16,9 @@ use crate::{ network_interface::{ConsensusNetworkClient, DIRECT_SEND, RPC}, payload_manager::PayloadManager, persistent_liveness_storage::{PersistentLivenessStorage, RecoveryData}, + pipeline::execution_client::DummyExecutionClient, round_manager::RoundManager, - test_utils::{EmptyStateComputer, MockPayloadManager, MockStorage}, + test_utils::{MockPayloadManager, MockStorage}, util::{mock_time_service::SimulatedTimeService, time_service::TimeService}, }; use aptos_channels::{self, aptos_channel, message_queues::QueueStyle}; @@ -82,7 +83,7 @@ fn build_empty_store( Arc::new(BlockStore::new( storage, initial_data, - Arc::new(EmptyStateComputer), + Arc::new(DummyExecutionClient), 10, // max pruned blocks in mem Arc::new(SimulatedTimeService::new()), 10, @@ -155,12 +156,12 @@ fn create_node_for_fuzzing() -> RoundManager { epoch: 1, verifier: storage.get_validator_set().into(), }); - let network = NetworkSender::new( + let network = Arc::new(NetworkSender::new( signer.author(), consensus_network_client, self_sender, epoch_state.verifier.clone(), - ); + )); // TODO: mock let block_store = build_empty_store(storage.clone(), initial_data); diff --git a/consensus/src/round_manager_test.rs b/consensus/src/round_manager_test.rs index cbba3a30c8460..181f36df92cf1 100644 --- a/consensus/src/round_manager_test.rs +++ b/consensus/src/round_manager_test.rs @@ -21,8 +21,9 @@ use crate::{ pipeline::buffer_manager::OrderedBlocks, round_manager::RoundManager, test_utils::{ - consensus_runtime, create_vec_signed_transactions, timed_block_on, MockPayloadManager, - MockStateComputer, MockStorage, TreeInserter, + consensus_runtime, create_vec_signed_transactions, + mock_execution_client::MockExecutionClient, timed_block_on, MockPayloadManager, + MockStorage, TreeInserter, }, util::time_service::{ClockTimeService, TimeService}, }; @@ -107,7 +108,7 @@ pub struct NodeSetup { pending_network_events: Vec>, all_network_events: Box> + Send + Unpin>, ordered_blocks_events: mpsc::UnboundedReceiver, - mock_state_computer: Arc, + mock_execution_client: Arc, _state_sync_receiver: mpsc::UnboundedReceiver>, id: usize, onchain_consensus_config: OnChainConsensusConfig, @@ -243,16 +244,21 @@ impl NodeSetup { playground.add_node(twin_id, consensus_tx, network_reqs_rx, conn_mgr_reqs_rx); let (self_sender, self_receiver) = aptos_channels::new_test(1000); - let network = NetworkSender::new(author, consensus_network_client, self_sender, validators); + let network = Arc::new(NetworkSender::new( + author, + consensus_network_client, + self_sender, + validators, + )); let all_network_events = Box::new(select(network_events, self_receiver)); let last_vote_sent = initial_data.last_vote(); let (ordered_blocks_tx, ordered_blocks_events) = mpsc::unbounded::(); let (state_sync_client, _state_sync_receiver) = mpsc::unbounded(); - let mock_state_computer = Arc::new(MockStateComputer::new( - state_sync_client, - ordered_blocks_tx, + let mock_execution_client = Arc::new(MockExecutionClient::new( + state_sync_client.clone(), + ordered_blocks_tx.clone(), Arc::clone(&storage), )); let time_service = Arc::new(ClockTimeService::new(executor)); @@ -260,7 +266,7 @@ impl NodeSetup { let block_store = Arc::new(BlockStore::new( storage.clone(), initial_data, - mock_state_computer.clone(), + mock_execution_client.clone(), 10, // max pruned blocks in mem time_service.clone(), 10, @@ -315,7 +321,7 @@ impl NodeSetup { pending_network_events: Vec::new(), all_network_events, ordered_blocks_events, - mock_state_computer, + mock_execution_client, _state_sync_receiver, id, onchain_consensus_config, @@ -476,7 +482,7 @@ impl NodeSetup { .map(|b| b.round()) .collect::>(); assert_eq!(&rounds, expected_rounds); - self.mock_state_computer + self.mock_execution_client .commit_to_storage(ordered_blocks) .await .unwrap(); diff --git a/consensus/src/state_computer.rs b/consensus/src/state_computer.rs index 2a3d50913a3e5..d447e7ae93e07 100644 --- a/consensus/src/state_computer.rs +++ b/consensus/src/state_computer.rs @@ -18,7 +18,7 @@ use crate::{ }; use anyhow::Result; use aptos_consensus_notifications::ConsensusNotificationSender; -use aptos_consensus_types::{block::Block, common::Round, executed_block::ExecutedBlock}; +use aptos_consensus_types::{block::Block, common::Round, pipelined_block::PipelinedBlock}; use aptos_crypto::HashValue; use aptos_executor_types::{BlockExecutorTrait, ExecutorResult, StateComputeResult}; use aptos_infallible::Mutex; @@ -56,13 +56,6 @@ impl PipelineExecutionResult { pub fn new(input_txns: Vec, result: StateComputeResult) -> Self { Self { input_txns, result } } - - pub fn new_dummy() -> Self { - Self { - input_txns: vec![], - result: StateComputeResult::new_dummy(), - } - } } type NotificationType = ( @@ -144,7 +137,7 @@ impl ExecutionProxy { } } - pub fn transactions_to_commit(&self, executed_block: &ExecutedBlock) -> Vec { + pub fn transactions_to_commit(&self, executed_block: &PipelinedBlock) -> Vec { // reconfiguration suffix don't execute if executed_block.is_reconfiguration_suffix() { return vec![]; @@ -242,7 +235,7 @@ impl StateComputer for ExecutionProxy { /// Send a successful commit. A future is fulfilled when the state is finalized. async fn commit( &self, - blocks: &[Arc], + blocks: &[Arc], finality_proof: LedgerInfoWithSignatures, callback: StateComputerCommitCallBackType, ) -> ExecutorResult<()> { @@ -491,7 +484,7 @@ async fn test_commit_sync_race() { } } - let callback = Box::new(move |_a: &[Arc], _b: LedgerInfoWithSignatures| {}); + let callback = Box::new(move |_a: &[Arc], _b: LedgerInfoWithSignatures| {}); let recorded_commit = Arc::new(RecordedCommit { time: Mutex::new(LogicalTime::new(0, 0)), }); diff --git a/consensus/src/state_computer_tests.rs b/consensus/src/state_computer_tests.rs index 99ac2b25b6277..784f38c3e5e64 100644 --- a/consensus/src/state_computer_tests.rs +++ b/consensus/src/state_computer_tests.rs @@ -8,7 +8,7 @@ use crate::{ }; use aptos_config::config::transaction_filter_type::Filter; use aptos_consensus_notifications::{ConsensusNotificationSender, Error}; -use aptos_consensus_types::{block::Block, block_data::BlockData, executed_block::ExecutedBlock}; +use aptos_consensus_types::{block::Block, block_data::BlockData, pipelined_block::PipelinedBlock}; use aptos_crypto::HashValue; use aptos_executor_types::{ state_checkpoint_output::StateCheckpointOutput, BlockExecutorTrait, ExecutorResult, @@ -218,7 +218,7 @@ async fn commit_should_discover_validator_txns() { 3 ]); - let blocks = vec![Arc::new(ExecutedBlock::new( + let blocks = vec![Arc::new(PipelinedBlock::new( block, vec![], state_compute_result, @@ -237,7 +237,7 @@ async fn commit_should_discover_validator_txns() { let (tx, rx) = oneshot::channel::<()>(); let callback = Box::new( - move |_a: &[Arc], _b: LedgerInfoWithSignatures| { + move |_a: &[Arc], _b: LedgerInfoWithSignatures| { tx.send(()).unwrap(); }, ); diff --git a/consensus/src/state_replication.rs b/consensus/src/state_replication.rs index 747423150e456..26da5fa80d163 100644 --- a/consensus/src/state_replication.rs +++ b/consensus/src/state_replication.rs @@ -10,7 +10,7 @@ use crate::{ transaction_shuffler::TransactionShuffler, }; use anyhow::Result; -use aptos_consensus_types::{block::Block, executed_block::ExecutedBlock}; +use aptos_consensus_types::{block::Block, pipelined_block::PipelinedBlock}; use aptos_crypto::HashValue; use aptos_executor_types::ExecutorResult; use aptos_types::{ @@ -20,7 +20,7 @@ use aptos_types::{ use std::sync::Arc; pub type StateComputerCommitCallBackType = - Box], LedgerInfoWithSignatures) + Send + Sync>; + Box], LedgerInfoWithSignatures) + Send + Sync>; /// While Consensus is managing proposed blocks, `StateComputer` is managing the results of the /// (speculative) execution of their payload. @@ -57,7 +57,7 @@ pub trait StateComputer: Send + Sync { /// Send a successful commit. A future is fulfilled when the state is finalized. async fn commit( &self, - blocks: &[Arc], + blocks: &[Arc], finality_proof: LedgerInfoWithSignatures, callback: StateComputerCommitCallBackType, ) -> ExecutorResult<()>; diff --git a/consensus/src/test_utils/mock_execution_client.rs b/consensus/src/test_utils/mock_execution_client.rs new file mode 100644 index 0000000000000..9aa466588139d --- /dev/null +++ b/consensus/src/test_utils/mock_execution_client.rs @@ -0,0 +1,167 @@ +// Copyright © Aptos Foundation +// Parts of the project are originally copyright © Meta Platforms, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + error::StateSyncError, + network::{IncomingCommitRequest, IncomingRandGenRequest}, + payload_manager::PayloadManager, + pipeline::{ + buffer_manager::OrderedBlocks, execution_client::TExecutionClient, + signing_phase::CommitSignerProvider, + }, + rand::rand_gen::types::RandConfig, + state_replication::StateComputerCommitCallBackType, + test_utils::mock_storage::MockStorage, +}; +use anyhow::{format_err, Result}; +use aptos_channels::aptos_channel; +use aptos_consensus_types::{common::Payload, pipelined_block::PipelinedBlock}; +use aptos_crypto::HashValue; +use aptos_executor_types::ExecutorResult; +use aptos_infallible::Mutex; +use aptos_logger::prelude::*; +use aptos_types::{ + epoch_state::EpochState, + ledger_info::LedgerInfoWithSignatures, + on_chain_config::{Features, OnChainConsensusConfig, OnChainExecutionConfig}, + transaction::SignedTransaction, +}; +use futures::{channel::mpsc, SinkExt}; +use futures_channel::mpsc::UnboundedSender; +use move_core_types::account_address::AccountAddress; +use std::{collections::HashMap, sync::Arc}; + +pub struct MockExecutionClient { + state_sync_client: mpsc::UnboundedSender>, + executor_channel: UnboundedSender, + consensus_db: Arc, + block_cache: Mutex>, + payload_manager: Arc, +} + +impl MockExecutionClient { + pub fn new( + state_sync_client: mpsc::UnboundedSender>, + executor_channel: UnboundedSender, + consensus_db: Arc, + ) -> Self { + MockExecutionClient { + state_sync_client, + executor_channel, + consensus_db, + block_cache: Mutex::new(HashMap::new()), + payload_manager: Arc::from(PayloadManager::DirectMempool), + } + } + + pub async fn commit_to_storage(&self, blocks: OrderedBlocks) -> ExecutorResult<()> { + let OrderedBlocks { + ordered_blocks, + ordered_proof, + callback, + } = blocks; + + self.consensus_db + .commit_to_storage(ordered_proof.ledger_info().clone()); + // mock sending commit notif to state sync + let mut txns = vec![]; + for block in &ordered_blocks { + self.block_cache + .lock() + .remove(&block.id()) + .ok_or_else(|| format_err!("Cannot find block"))?; + let (mut payload_txns, _max_txns_from_block_to_execute) = + self.payload_manager.get_transactions(block.block()).await?; + txns.append(&mut payload_txns); + } + // they may fail during shutdown + let _ = self.state_sync_client.unbounded_send(txns); + + callback( + &ordered_blocks.into_iter().map(Arc::new).collect::>(), + ordered_proof, + ); + + Ok(()) + } +} + +#[async_trait::async_trait] +impl TExecutionClient for MockExecutionClient { + async fn start_epoch( + &self, + _epoch_state: Arc, + _commit_signer_provider: Arc, + _payload_manager: Arc, + _onchain_consensus_config: &OnChainConsensusConfig, + _onchain_execution_config: &OnChainExecutionConfig, + _features: &Features, + _rand_config: Option, + ) -> Option> { + None + } + + fn get_execution_channel(&self) -> Option> { + Some(self.executor_channel.clone()) + } + + async fn finalize_order( + &self, + blocks: &[Arc], + finality_proof: LedgerInfoWithSignatures, + callback: StateComputerCommitCallBackType, + ) -> ExecutorResult<()> { + assert!(!blocks.is_empty()); + info!( + "MockStateComputer commit put on queue {:?}", + blocks.iter().map(|v| v.round()).collect::>() + ); + + for block in blocks { + self.block_cache.lock().insert( + block.id(), + block.payload().unwrap_or(&Payload::empty(false)).clone(), + ); + } + + if self + .executor_channel + .clone() + .send(OrderedBlocks { + ordered_blocks: blocks + .iter() + .map(|b| (**b).clone()) + .collect::>(), + ordered_proof: finality_proof, + callback, + }) + .await + .is_err() + { + debug!("Failed to send to buffer manager, maybe epoch ends"); + } + + Ok(()) + } + + fn send_commit_msg( + &self, + _peer_id: AccountAddress, + _commit_msg: IncomingCommitRequest, + ) -> Result<()> { + Ok(()) + } + + async fn sync_to(&self, commit: LedgerInfoWithSignatures) -> Result<(), StateSyncError> { + debug!( + "Fake sync to block id {}", + commit.ledger_info().consensus_block_id() + ); + self.consensus_db + .commit_to_storage(commit.ledger_info().clone()); + Ok(()) + } + + async fn end_epoch(&self) {} +} diff --git a/consensus/src/test_utils/mock_state_computer.rs b/consensus/src/test_utils/mock_state_computer.rs index 3c3d6b5e1446a..aeef2a0ef6e97 100644 --- a/consensus/src/test_utils/mock_state_computer.rs +++ b/consensus/src/test_utils/mock_state_computer.rs @@ -8,106 +8,54 @@ use crate::{ pipeline::buffer_manager::OrderedBlocks, state_computer::{PipelineExecutionResult, StateComputeResultFut}, state_replication::{StateComputer, StateComputerCommitCallBackType}, - test_utils::mock_storage::MockStorage, transaction_deduper::TransactionDeduper, transaction_shuffler::TransactionShuffler, }; -use anyhow::{format_err, Result}; -use aptos_consensus_types::{block::Block, common::Payload, executed_block::ExecutedBlock}; +use anyhow::Result; +use aptos_consensus_types::{block::Block, pipelined_block::PipelinedBlock}; use aptos_crypto::HashValue; use aptos_executor_types::{ExecutorError, ExecutorResult, StateComputeResult}; -use aptos_infallible::Mutex; -use aptos_logger::prelude::*; +use aptos_logger::debug; use aptos_types::{ block_executor::config::BlockExecutorConfigFromOnchain, epoch_state::EpochState, - ledger_info::LedgerInfoWithSignatures, randomness::Randomness, transaction::SignedTransaction, + ledger_info::LedgerInfoWithSignatures, randomness::Randomness, }; -use futures::{channel::mpsc, SinkExt}; +use futures::SinkExt; use futures_channel::mpsc::UnboundedSender; -use std::{collections::HashMap, sync::Arc}; +use std::sync::Arc; -pub struct MockStateComputer { - state_sync_client: mpsc::UnboundedSender>, +pub struct EmptyStateComputer { executor_channel: UnboundedSender, - consensus_db: Arc, - block_cache: Mutex>, - payload_manager: Arc, } -impl MockStateComputer { - pub fn new( - state_sync_client: mpsc::UnboundedSender>, - executor_channel: UnboundedSender, - consensus_db: Arc, - ) -> Self { - MockStateComputer { - state_sync_client, - executor_channel, - consensus_db, - block_cache: Mutex::new(HashMap::new()), - payload_manager: Arc::from(PayloadManager::DirectMempool), - } - } - - pub async fn commit_to_storage(&self, blocks: OrderedBlocks) -> ExecutorResult<()> { - let OrderedBlocks { - ordered_blocks, - ordered_proof, - callback, - } = blocks; - - self.consensus_db - .commit_to_storage(ordered_proof.ledger_info().clone()); - // mock sending commit notif to state sync - let mut txns = vec![]; - for block in &ordered_blocks { - self.block_cache - .lock() - .remove(&block.id()) - .ok_or_else(|| format_err!("Cannot find block"))?; - let (mut payload_txns, _max_txns_from_block_to_execute) = - self.payload_manager.get_transactions(block.block()).await?; - txns.append(&mut payload_txns); - } - // they may fail during shutdown - let _ = self.state_sync_client.unbounded_send(txns); - - callback( - &ordered_blocks.into_iter().map(Arc::new).collect::>(), - ordered_proof, - ); - - Ok(()) +impl EmptyStateComputer { + pub fn new(executor_channel: UnboundedSender) -> Self { + Self { executor_channel } } } #[async_trait::async_trait] -impl StateComputer for MockStateComputer { +impl StateComputer for EmptyStateComputer { async fn compute( &self, - block: &Block, + _block: &Block, _parent_block_id: HashValue, _randomness: Option, ) -> ExecutorResult { - self.block_cache.lock().insert( - block.id(), - block.payload().unwrap_or(&Payload::empty(false)).clone(), - ); - let result = PipelineExecutionResult::new_dummy(); - Ok(result) + Ok(PipelineExecutionResult::new( + vec![], + StateComputeResult::new_dummy(), + )) } async fn commit( &self, - blocks: &[Arc], - finality_proof: LedgerInfoWithSignatures, - callback: StateComputerCommitCallBackType, + blocks: &[Arc], + commit: LedgerInfoWithSignatures, + call_back: StateComputerCommitCallBackType, ) -> ExecutorResult<()> { assert!(!blocks.is_empty()); - info!( - "MockStateComputer commit put on queue {:?}", - blocks.iter().map(|v| v.round()).collect::>() - ); + if self .executor_channel .clone() @@ -115,9 +63,9 @@ impl StateComputer for MockStateComputer { ordered_blocks: blocks .iter() .map(|b| (**b).clone()) - .collect::>(), - ordered_proof: finality_proof, - callback, + .collect::>(), + ordered_proof: commit, + callback: call_back, }) .await .is_err() @@ -128,52 +76,6 @@ impl StateComputer for MockStateComputer { Ok(()) } - async fn sync_to(&self, commit: LedgerInfoWithSignatures) -> Result<(), StateSyncError> { - debug!( - "Fake sync to block id {}", - commit.ledger_info().consensus_block_id() - ); - self.consensus_db - .commit_to_storage(commit.ledger_info().clone()); - Ok(()) - } - - fn new_epoch( - &self, - _: &EpochState, - _: Arc, - _: Arc, - _: BlockExecutorConfigFromOnchain, - _: Arc, - _: bool, - ) { - } - - fn end_epoch(&self) {} -} - -pub struct EmptyStateComputer; - -#[async_trait::async_trait] -impl StateComputer for EmptyStateComputer { - async fn compute( - &self, - _block: &Block, - _parent_block_id: HashValue, - _randomness: Option, - ) -> ExecutorResult { - Ok(PipelineExecutionResult::new_dummy()) - } - - async fn commit( - &self, - _blocks: &[Arc], - _commit: LedgerInfoWithSignatures, - _call_back: StateComputerCommitCallBackType, - ) -> ExecutorResult<()> { - Ok(()) - } - async fn sync_to(&self, _commit: LedgerInfoWithSignatures) -> Result<(), StateSyncError> { Ok(()) } @@ -233,7 +135,7 @@ impl StateComputer for RandomComputeResultStateComputer { async fn commit( &self, - _blocks: &[Arc], + _blocks: &[Arc], _commit: LedgerInfoWithSignatures, _call_back: StateComputerCommitCallBackType, ) -> ExecutorResult<()> { diff --git a/consensus/src/test_utils/mod.rs b/consensus/src/test_utils/mod.rs index 5b62d08e59a65..62cead47a2766 100644 --- a/consensus/src/test_utils/mod.rs +++ b/consensus/src/test_utils/mod.rs @@ -6,7 +6,7 @@ use crate::block_storage::{BlockReader, BlockStore}; use aptos_consensus_types::{ block::{block_test_utils::certificate_for_genesis, Block}, common::{Author, Round}, - executed_block::ExecutedBlock, + pipelined_block::PipelinedBlock, quorum_cert::QuorumCert, sync_info::SyncInfo, }; @@ -16,13 +16,18 @@ use aptos_types::{ledger_info::LedgerInfo, validator_signer::ValidatorSigner}; use std::{future::Future, sync::Arc, time::Duration}; use tokio::{runtime, time::timeout}; +#[cfg(test)] +pub mod mock_execution_client; #[cfg(any(test, feature = "fuzzing"))] mod mock_payload_manager; pub mod mock_quorum_store_sender; mod mock_state_computer; mod mock_storage; -use crate::{payload_manager::PayloadManager, util::mock_time_service::SimulatedTimeService}; +use crate::{ + payload_manager::PayloadManager, pipeline::execution_client::DummyExecutionClient, + util::mock_time_service::SimulatedTimeService, +}; use aptos_consensus_types::{block::block_test_utils::gen_test_certificate, common::Payload}; use aptos_crypto::ed25519::{Ed25519PrivateKey, Ed25519Signature}; use aptos_types::{ @@ -31,15 +36,16 @@ use aptos_types::{ transaction::{RawTransaction, Script, SignedTransaction, TransactionPayload}, }; pub use mock_payload_manager::MockPayloadManager; +#[cfg(test)] pub use mock_state_computer::EmptyStateComputer; #[cfg(test)] -pub use mock_state_computer::{MockStateComputer, RandomComputeResultStateComputer}; +pub use mock_state_computer::RandomComputeResultStateComputer; pub use mock_storage::{EmptyStorage, MockStorage}; use move_core_types::account_address::AccountAddress; pub const TEST_TIMEOUT: Duration = Duration::from_secs(60); -pub async fn build_simple_tree() -> (Vec>, Arc) { +pub async fn build_simple_tree() -> (Vec>, Arc) { let mut inserter = TreeInserter::default(); let block_store = inserter.block_store(); let genesis = block_store.ordered_root(); @@ -78,7 +84,7 @@ pub fn build_empty_tree() -> Arc { Arc::new(BlockStore::new( storage, initial_data, - Arc::new(EmptyStateComputer), + Arc::new(DummyExecutionClient), 10, // max pruned blocks in mem Arc::new(SimulatedTimeService::new()), 10, @@ -124,10 +130,10 @@ impl TreeInserter { /// `insert_block_with_qc`. pub async fn insert_block( &mut self, - parent: &ExecutedBlock, + parent: &PipelinedBlock, round: Round, committed_block: Option, - ) -> Arc { + ) -> Arc { // Node must carry a QC to its parent let parent_qc = self.create_qc_for_block(parent, committed_block); self.insert_block_with_qc(parent_qc, parent, round).await @@ -136,9 +142,9 @@ impl TreeInserter { pub async fn insert_block_with_qc( &mut self, parent_qc: QuorumCert, - parent: &ExecutedBlock, + parent: &PipelinedBlock, round: Round, - ) -> Arc { + ) -> Arc { self.block_store .insert_block_with_qc(self.create_block_with_qc( parent_qc, @@ -153,7 +159,7 @@ impl TreeInserter { pub fn create_qc_for_block( &self, - block: &ExecutedBlock, + block: &PipelinedBlock, committed_block: Option, ) -> QuorumCert { gen_test_certificate( @@ -164,7 +170,7 @@ impl TreeInserter { ) } - pub fn insert_qc_for_block(&self, block: &ExecutedBlock, committed_block: Option) { + pub fn insert_qc_for_block(&self, block: &PipelinedBlock, committed_block: Option) { self.block_store .insert_single_quorum_cert(self.create_qc_for_block(block, committed_block)) .unwrap() diff --git a/consensus/src/twins/twins_node.rs b/consensus/src/twins/twins_node.rs index 4abf049427876..6452a779d1307 100644 --- a/consensus/src/twins/twins_node.rs +++ b/consensus/src/twins/twins_node.rs @@ -12,7 +12,7 @@ use crate::{ pipeline::buffer_manager::OrderedBlocks, quorum_store::quorum_store_db::MockQuorumStoreDB, rand::rand_gen::storage::in_memory::InMemRandDb, - test_utils::{MockStateComputer, MockStorage}, + test_utils::{mock_execution_client::MockExecutionClient, MockStorage}, util::time_service::ClockTimeService, }; use aptos_bounded_executor::BoundedExecutor; @@ -109,7 +109,8 @@ impl SMRNode { let (ordered_blocks_tx, mut ordered_blocks_events) = mpsc::unbounded::(); let shared_mempool = MockSharedMempool::new(); let (quorum_store_to_mempool_sender, _) = mpsc::channel(1_024); - let state_computer = Arc::new(MockStateComputer::new( + + let execution_client = Arc::new(MockExecutionClient::new( state_sync_client, ordered_blocks_tx, Arc::clone(&storage), @@ -154,7 +155,7 @@ impl SMRNode { consensus_network_client, timeout_sender, quorum_store_to_mempool_sender, - state_computer.clone(), + execution_client.clone(), storage.clone(), quorum_store_storage, reconfig_listener, @@ -174,7 +175,7 @@ impl SMRNode { loop { let ordered_blocks = ordered_blocks_events.next().await.unwrap(); let commit = ordered_blocks.ordered_proof.clone(); - state_computer + execution_client .commit_to_storage(ordered_blocks) .await .unwrap(); diff --git a/crates/aptos-admin-service/src/server/mod.rs b/crates/aptos-admin-service/src/server/mod.rs index 28670c1bc3c8d..902a8e300f3e2 100644 --- a/crates/aptos-admin-service/src/server/mod.rs +++ b/crates/aptos-admin-service/src/server/mod.rs @@ -23,7 +23,7 @@ use tokio::runtime::Runtime; mod consensus; #[cfg(target_os = "linux")] -mod profiling; +pub mod profiling; #[cfg(target_os = "linux")] mod thread_dump; mod utils; diff --git a/crates/aptos-admin-service/src/server/profiling.rs b/crates/aptos-admin-service/src/server/profiling.rs index a0f54f4fe586b..65ed481dc9c03 100644 --- a/crates/aptos-admin-service/src/server/profiling.rs +++ b/crates/aptos-admin-service/src/server/profiling.rs @@ -50,15 +50,8 @@ pub async fn handle_cpu_profiling_request(req: Request) -> hyper::Result true, }; - info!( - seconds = seconds, - frequency = frequency, - use_proto = use_proto, - "Starting cpu profiling." - ); match start_cpu_profiling(seconds, frequency, use_proto).await { Ok(body) => { - info!("Cpu profiling is done."); let content_type = if use_proto { mime::APPLICATION_OCTET_STREAM } else { @@ -84,11 +77,17 @@ pub async fn handle_cpu_profiling_request(req: Request) -> hyper::Result anyhow::Result> { + info!( + seconds = seconds, + frequency = frequency, + use_proto = use_proto, + "Starting cpu profiling." + ); let lock = CPU_PROFILE_MUTEX.try_lock(); ensure!(lock.is_some(), "A profiling task is already running."); @@ -117,6 +116,8 @@ async fn start_cpu_profiling( .map_err(|e| anyhow!("Failed to generate flamegraph report: {e:?}."))?; } + info!("Cpu profiling is done."); + Ok(body) } diff --git a/crates/aptos-crypto/src/poseidon_bn254.rs b/crates/aptos-crypto/src/poseidon_bn254.rs index 33d0a2fb585ec..88a5936a664a3 100644 --- a/crates/aptos-crypto/src/poseidon_bn254.rs +++ b/crates/aptos-crypto/src/poseidon_bn254.rs @@ -5,7 +5,7 @@ use anyhow::bail; use ark_ff::PrimeField; use once_cell::sync::Lazy; -// TODO(zkid): Figure out the right library for Poseidon. +// TODO(oidb): Figure out the right library for Poseidon. use poseidon_ark::Poseidon; /// The maximum number of input scalars that can be hashed using the Poseidon-BN254 hash function diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/config.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/config.rs index 948add632e61a..668310417c971 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/config.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/config.rs @@ -60,6 +60,7 @@ pub struct IndexerGrpcDataServiceConfig { #[serde(default = "IndexerGrpcDataServiceConfig::default_data_service_response_channel_size")] pub data_service_response_channel_size: usize, /// A list of auth tokens that are allowed to access the service. + #[serde(default)] pub whitelisted_auth_tokens: Vec, /// If set, don't check for auth tokens. #[serde(default)] diff --git a/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/convert.rs b/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/convert.rs index 2405a2b62d79f..6559708f61e43 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/convert.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/convert.rs @@ -15,7 +15,7 @@ use aptos_logger::warn; use aptos_protos::{ transaction::{ v1 as transaction, - v1::{any_signature, Ed25519, Secp256k1Ecdsa, WebAuthn, ZkId}, + v1::{any_signature, Ed25519, Oidb, Secp256k1Ecdsa, WebAuthn}, }, util::timestamp, }; @@ -602,10 +602,10 @@ fn convert_signature(signature: &Signature) -> transaction::AnySignature { signature: s.0.clone(), })), }, - Signature::ZkId(s) => transaction::AnySignature { - r#type: transaction::any_signature::Type::Zkid as i32, + Signature::Oidb(s) => transaction::AnySignature { + r#type: transaction::any_signature::Type::Oidb as i32, signature: s.0.clone(), - signature_variant: Some(any_signature::SignatureVariant::Zkid(ZkId { + signature_variant: Some(any_signature::SignatureVariant::Oidb(Oidb { signature: s.0.clone(), })), }, @@ -626,8 +626,8 @@ fn convert_public_key(public_key: &PublicKey) -> transaction::AnyPublicKey { r#type: transaction::any_public_key::Type::Secp256r1Ecdsa as i32, public_key: p.0.clone(), }, - PublicKey::ZkId(p) => transaction::AnyPublicKey { - r#type: transaction::any_public_key::Type::Zkid as i32, + PublicKey::Oidb(p) => transaction::AnyPublicKey { + r#type: transaction::any_public_key::Type::Oidb as i32, public_key: p.0.clone(), }, } diff --git a/ecosystem/indexer-grpc/indexer-grpc-server-framework/Cargo.toml b/ecosystem/indexer-grpc/indexer-grpc-server-framework/Cargo.toml index 66e157ef77457..a7348fbac5840 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-server-framework/Cargo.toml +++ b/ecosystem/indexer-grpc/indexer-grpc-server-framework/Cargo.toml @@ -29,3 +29,6 @@ toml = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } warp = { workspace = true } + +[target.'cfg(target_os = "linux")'.dependencies] +aptos-admin-service = { workspace = true } diff --git a/ecosystem/indexer-grpc/indexer-grpc-server-framework/src/lib.rs b/ecosystem/indexer-grpc/indexer-grpc-server-framework/src/lib.rs index 366107e0c0ee2..05aae22948290 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-server-framework/src/lib.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-server-framework/src/lib.rs @@ -1,10 +1,14 @@ // Copyright © Aptos Foundation -use anyhow::{Context, Ok, Result}; +use anyhow::{Context, Result}; +#[cfg(target_os = "linux")] +use aptos_admin_service::profiling::start_cpu_profiling; use backtrace::Backtrace; use clap::Parser; use prometheus::{Encoder, TextEncoder}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; +#[cfg(target_os = "linux")] +use std::convert::Infallible; use std::{fs::File, io::Read, panic::PanicInfo, path::PathBuf, process}; use tracing::error; use tracing_subscriber::EnvFilter; @@ -42,7 +46,7 @@ where // Start liveness and readiness probes. let task_handler = tokio::spawn(async move { register_probes_and_metrics_handler(health_port).await; - Ok(()) + anyhow::Ok(()) }); let main_task_handler = tokio::spawn(async move { config.run().await.expect("task should exit with Ok.") }); @@ -178,6 +182,7 @@ pub fn setup_logging(make_writer: Option Box async fn register_probes_and_metrics_handler(port: u16) { let readiness = warp::path("readiness") .map(move || warp::reply::with_status("ready", warp::http::StatusCode::OK)); + let metrics_endpoint = warp::path("metrics").map(|| { // Metrics encoding. let metrics = aptos_metrics_core::gather(); @@ -193,9 +198,42 @@ async fn register_probes_and_metrics_handler(port: u16) { .header("Content-Type", "text/plain") .body(encode_buffer) }); - warp::serve(readiness.or(metrics_endpoint)) - .run(([0, 0, 0, 0], port)) - .await; + + if cfg!(target_os = "linux") { + #[cfg(target_os = "linux")] + let profilez = warp::path("profilez").and_then(|| async move { + // TODO(grao): Consider make the parameters configurable. + Ok::<_, Infallible>(match start_cpu_profiling(10, 99, false).await { + Ok(body) => { + let response = Response::builder() + .header("Content-Length", body.len()) + .header("Content-Disposition", "inline") + .header("Content-Type", "image/svg+xml") + .body(body); + + match response { + Ok(res) => warp::reply::with_status(res, warp::http::StatusCode::OK), + Err(e) => warp::reply::with_status( + Response::new(format!("Profiling failed: {e:?}.").as_bytes().to_vec()), + warp::http::StatusCode::INTERNAL_SERVER_ERROR, + ), + } + }, + Err(e) => warp::reply::with_status( + Response::new(format!("Profiling failed: {e:?}.").as_bytes().to_vec()), + warp::http::StatusCode::INTERNAL_SERVER_ERROR, + ), + }) + }); + #[cfg(target_os = "linux")] + warp::serve(readiness.or(metrics_endpoint).or(profilez)) + .run(([0, 0, 0, 0], port)) + .await; + } else { + warp::serve(readiness.or(metrics_endpoint)) + .run(([0, 0, 0, 0], port)) + .await; + } } #[cfg(test)] diff --git a/experimental/execution/ptx-executor/src/runner.rs b/experimental/execution/ptx-executor/src/runner.rs index ee3fc58d4b9b6..b214906c459c2 100644 --- a/experimental/execution/ptx-executor/src/runner.rs +++ b/experimental/execution/ptx-executor/src/runner.rs @@ -273,7 +273,7 @@ impl<'scope, 'view: 'scope, BaseView: StateView + Sync> Worker<'view, BaseView> }; let _post = PER_WORKER_TIMER.timer_with(&[&idx, "run_txn_post_vm"]); // TODO(ptx): error handling - let (_vm_status, vm_output, _msg) = vm_output.expect("VM execution failed."); + let (_vm_status, vm_output) = vm_output.expect("VM execution failed."); // inform output state values to the manager // TODO use try_into_storage_change_set() instead, and ChangeSet it returns, instead of VMOutput. diff --git a/protos/proto/aptos/transaction/v1/transaction.proto b/protos/proto/aptos/transaction/v1/transaction.proto index 9b0cc3d046326..a6fac3e2e3492 100644 --- a/protos/proto/aptos/transaction/v1/transaction.proto +++ b/protos/proto/aptos/transaction/v1/transaction.proto @@ -451,7 +451,7 @@ message AnyPublicKey { TYPE_ED25519 = 1; TYPE_SECP256K1_ECDSA = 2; TYPE_SECP256R1_ECDSA = 3; - TYPE_ZKID = 4; + TYPE_OIDB = 4; } Type type = 1; @@ -464,7 +464,7 @@ message AnySignature { TYPE_ED25519 = 1; TYPE_SECP256K1_ECDSA = 2; TYPE_WEBAUTHN = 3; - TYPE_ZKID = 4; + TYPE_OIDB = 4; } Type type = 1; @@ -478,7 +478,7 @@ message AnySignature { Ed25519 ed25519 = 3; Secp256k1Ecdsa secp256k1_ecdsa = 4; WebAuthn webauthn = 5; - ZkId zkid = 6; + Oidb oidb = 6; } } @@ -494,7 +494,7 @@ message WebAuthn { bytes signature = 1; } -message ZkId { +message Oidb { bytes signature = 1; } diff --git a/protos/python/aptos_protos/aptos/transaction/v1/transaction_pb2.py b/protos/python/aptos_protos/aptos/transaction/v1/transaction_pb2.py index 374bea5fbedfd..aad99102d24ff 100644 --- a/protos/python/aptos_protos/aptos/transaction/v1/transaction_pb2.py +++ b/protos/python/aptos_protos/aptos/transaction/v1/transaction_pb2.py @@ -17,7 +17,7 @@ ) DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( - b'\n&aptos/transaction/v1/transaction.proto\x12\x14\x61ptos.transaction.v1\x1a$aptos/util/timestamp/timestamp.proto"\x9a\x01\n\x05\x42lock\x12\x32\n\ttimestamp\x18\x01 \x01(\x0b\x32\x1f.aptos.util.timestamp.Timestamp\x12\x12\n\x06height\x18\x02 \x01(\x04\x42\x02\x30\x01\x12\x37\n\x0ctransactions\x18\x03 \x03(\x0b\x32!.aptos.transaction.v1.Transaction\x12\x10\n\x08\x63hain_id\x18\x04 \x01(\r"\xad\x06\n\x0bTransaction\x12\x32\n\ttimestamp\x18\x01 \x01(\x0b\x32\x1f.aptos.util.timestamp.Timestamp\x12\x13\n\x07version\x18\x02 \x01(\x04\x42\x02\x30\x01\x12\x33\n\x04info\x18\x03 \x01(\x0b\x32%.aptos.transaction.v1.TransactionInfo\x12\x11\n\x05\x65poch\x18\x04 \x01(\x04\x42\x02\x30\x01\x12\x18\n\x0c\x62lock_height\x18\x05 \x01(\x04\x42\x02\x30\x01\x12?\n\x04type\x18\x06 \x01(\x0e\x32\x31.aptos.transaction.v1.Transaction.TransactionType\x12H\n\x0e\x62lock_metadata\x18\x07 \x01(\x0b\x32..aptos.transaction.v1.BlockMetadataTransactionH\x00\x12;\n\x07genesis\x18\x08 \x01(\x0b\x32(.aptos.transaction.v1.GenesisTransactionH\x00\x12L\n\x10state_checkpoint\x18\t \x01(\x0b\x32\x30.aptos.transaction.v1.StateCheckpointTransactionH\x00\x12\x35\n\x04user\x18\n \x01(\x0b\x32%.aptos.transaction.v1.UserTransactionH\x00\x12?\n\tvalidator\x18\x15 \x01(\x0b\x32*.aptos.transaction.v1.ValidatorTransactionH\x00"\xd8\x01\n\x0fTransactionType\x12 \n\x1cTRANSACTION_TYPE_UNSPECIFIED\x10\x00\x12\x1c\n\x18TRANSACTION_TYPE_GENESIS\x10\x01\x12#\n\x1fTRANSACTION_TYPE_BLOCK_METADATA\x10\x02\x12%\n!TRANSACTION_TYPE_STATE_CHECKPOINT\x10\x03\x12\x19\n\x15TRANSACTION_TYPE_USER\x10\x04\x12\x1e\n\x1aTRANSACTION_TYPE_VALIDATOR\x10\x14\x42\n\n\x08txn_data"\xbe\x01\n\x18\x42lockMetadataTransaction\x12\n\n\x02id\x18\x01 \x01(\t\x12\x11\n\x05round\x18\x02 \x01(\x04\x42\x02\x30\x01\x12+\n\x06\x65vents\x18\x03 \x03(\x0b\x32\x1b.aptos.transaction.v1.Event\x12#\n\x1bprevious_block_votes_bitvec\x18\x04 \x01(\x0c\x12\x10\n\x08proposer\x18\x05 \x01(\t\x12\x1f\n\x17\x66\x61iled_proposer_indices\x18\x06 \x03(\r"r\n\x12GenesisTransaction\x12/\n\x07payload\x18\x01 \x01(\x0b\x32\x1e.aptos.transaction.v1.WriteSet\x12+\n\x06\x65vents\x18\x02 \x03(\x0b\x32\x1b.aptos.transaction.v1.Event"\x1c\n\x1aStateCheckpointTransaction"\x16\n\x14ValidatorTransaction"}\n\x0fUserTransaction\x12=\n\x07request\x18\x01 \x01(\x0b\x32,.aptos.transaction.v1.UserTransactionRequest\x12+\n\x06\x65vents\x18\x02 \x03(\x0b\x32\x1b.aptos.transaction.v1.Event"\x9f\x01\n\x05\x45vent\x12+\n\x03key\x18\x01 \x01(\x0b\x32\x1e.aptos.transaction.v1.EventKey\x12\x1b\n\x0fsequence_number\x18\x02 \x01(\x04\x42\x02\x30\x01\x12,\n\x04type\x18\x03 \x01(\x0b\x32\x1e.aptos.transaction.v1.MoveType\x12\x10\n\x08type_str\x18\x05 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\t"\xa1\x02\n\x0fTransactionInfo\x12\x0c\n\x04hash\x18\x01 \x01(\x0c\x12\x19\n\x11state_change_hash\x18\x02 \x01(\x0c\x12\x17\n\x0f\x65vent_root_hash\x18\x03 \x01(\x0c\x12"\n\x15state_checkpoint_hash\x18\x04 \x01(\x0cH\x00\x88\x01\x01\x12\x14\n\x08gas_used\x18\x05 \x01(\x04\x42\x02\x30\x01\x12\x0f\n\x07success\x18\x06 \x01(\x08\x12\x11\n\tvm_status\x18\x07 \x01(\t\x12\x1d\n\x15\x61\x63\x63umulator_root_hash\x18\x08 \x01(\x0c\x12\x35\n\x07\x63hanges\x18\t \x03(\x0b\x32$.aptos.transaction.v1.WriteSetChangeB\x18\n\x16_state_checkpoint_hash"@\n\x08\x45ventKey\x12\x1b\n\x0f\x63reation_number\x18\x01 \x01(\x04\x42\x02\x30\x01\x12\x17\n\x0f\x61\x63\x63ount_address\x18\x02 \x01(\t"\xb0\x02\n\x16UserTransactionRequest\x12\x0e\n\x06sender\x18\x01 \x01(\t\x12\x1b\n\x0fsequence_number\x18\x02 \x01(\x04\x42\x02\x30\x01\x12\x1a\n\x0emax_gas_amount\x18\x03 \x01(\x04\x42\x02\x30\x01\x12\x1a\n\x0egas_unit_price\x18\x04 \x01(\x04\x42\x02\x30\x01\x12\x42\n\x19\x65xpiration_timestamp_secs\x18\x05 \x01(\x0b\x32\x1f.aptos.util.timestamp.Timestamp\x12\x39\n\x07payload\x18\x06 \x01(\x0b\x32(.aptos.transaction.v1.TransactionPayload\x12\x32\n\tsignature\x18\x07 \x01(\x0b\x32\x1f.aptos.transaction.v1.Signature"\xda\x02\n\x08WriteSet\x12\x43\n\x0ewrite_set_type\x18\x01 \x01(\x0e\x32+.aptos.transaction.v1.WriteSet.WriteSetType\x12@\n\x10script_write_set\x18\x02 \x01(\x0b\x32$.aptos.transaction.v1.ScriptWriteSetH\x00\x12@\n\x10\x64irect_write_set\x18\x03 \x01(\x0b\x32$.aptos.transaction.v1.DirectWriteSetH\x00"x\n\x0cWriteSetType\x12\x1e\n\x1aWRITE_SET_TYPE_UNSPECIFIED\x10\x00\x12#\n\x1fWRITE_SET_TYPE_SCRIPT_WRITE_SET\x10\x01\x12#\n\x1fWRITE_SET_TYPE_DIRECT_WRITE_SET\x10\x02\x42\x0b\n\twrite_set"Y\n\x0eScriptWriteSet\x12\x12\n\nexecute_as\x18\x01 \x01(\t\x12\x33\n\x06script\x18\x02 \x01(\x0b\x32#.aptos.transaction.v1.ScriptPayload"}\n\x0e\x44irectWriteSet\x12>\n\x10write_set_change\x18\x01 \x03(\x0b\x32$.aptos.transaction.v1.WriteSetChange\x12+\n\x06\x65vents\x18\x02 \x03(\x0b\x32\x1b.aptos.transaction.v1.Event"\x89\x05\n\x0eWriteSetChange\x12\x37\n\x04type\x18\x01 \x01(\x0e\x32).aptos.transaction.v1.WriteSetChange.Type\x12;\n\rdelete_module\x18\x02 \x01(\x0b\x32".aptos.transaction.v1.DeleteModuleH\x00\x12?\n\x0f\x64\x65lete_resource\x18\x03 \x01(\x0b\x32$.aptos.transaction.v1.DeleteResourceH\x00\x12\x42\n\x11\x64\x65lete_table_item\x18\x04 \x01(\x0b\x32%.aptos.transaction.v1.DeleteTableItemH\x00\x12\x39\n\x0cwrite_module\x18\x05 \x01(\x0b\x32!.aptos.transaction.v1.WriteModuleH\x00\x12=\n\x0ewrite_resource\x18\x06 \x01(\x0b\x32#.aptos.transaction.v1.WriteResourceH\x00\x12@\n\x10write_table_item\x18\x07 \x01(\x0b\x32$.aptos.transaction.v1.WriteTableItemH\x00"\xb5\x01\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x16\n\x12TYPE_DELETE_MODULE\x10\x01\x12\x18\n\x14TYPE_DELETE_RESOURCE\x10\x02\x12\x1a\n\x16TYPE_DELETE_TABLE_ITEM\x10\x03\x12\x15\n\x11TYPE_WRITE_MODULE\x10\x04\x12\x17\n\x13TYPE_WRITE_RESOURCE\x10\x05\x12\x19\n\x15TYPE_WRITE_TABLE_ITEM\x10\x06\x42\x08\n\x06\x63hange"k\n\x0c\x44\x65leteModule\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x16\n\x0estate_key_hash\x18\x02 \x01(\x0c\x12\x32\n\x06module\x18\x03 \x01(\x0b\x32".aptos.transaction.v1.MoveModuleId"~\n\x0e\x44\x65leteResource\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x16\n\x0estate_key_hash\x18\x02 \x01(\x0c\x12\x31\n\x04type\x18\x03 \x01(\x0b\x32#.aptos.transaction.v1.MoveStructTag\x12\x10\n\x08type_str\x18\x04 \x01(\t"{\n\x0f\x44\x65leteTableItem\x12\x16\n\x0estate_key_hash\x18\x01 \x01(\x0c\x12\x0e\n\x06handle\x18\x02 \x01(\t\x12\x0b\n\x03key\x18\x03 \x01(\t\x12\x33\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32%.aptos.transaction.v1.DeleteTableData"0\n\x0f\x44\x65leteTableData\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x10\n\x08key_type\x18\x02 \x01(\t"n\n\x0bWriteModule\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x16\n\x0estate_key_hash\x18\x02 \x01(\x0c\x12\x36\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32(.aptos.transaction.v1.MoveModuleBytecode"\x8b\x01\n\rWriteResource\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x16\n\x0estate_key_hash\x18\x02 \x01(\x0c\x12\x31\n\x04type\x18\x03 \x01(\x0b\x32#.aptos.transaction.v1.MoveStructTag\x12\x10\n\x08type_str\x18\x04 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x05 \x01(\t"R\n\x0eWriteTableData\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x10\n\x08key_type\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t\x12\x12\n\nvalue_type\x18\x04 \x01(\t"y\n\x0eWriteTableItem\x12\x16\n\x0estate_key_hash\x18\x01 \x01(\x0c\x12\x0e\n\x06handle\x18\x02 \x01(\t\x12\x0b\n\x03key\x18\x03 \x01(\t\x12\x32\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32$.aptos.transaction.v1.WriteTableData"\x8c\x04\n\x12TransactionPayload\x12;\n\x04type\x18\x01 \x01(\x0e\x32-.aptos.transaction.v1.TransactionPayload.Type\x12L\n\x16\x65ntry_function_payload\x18\x02 \x01(\x0b\x32*.aptos.transaction.v1.EntryFunctionPayloadH\x00\x12=\n\x0escript_payload\x18\x03 \x01(\x0b\x32#.aptos.transaction.v1.ScriptPayloadH\x00\x12\x42\n\x11write_set_payload\x18\x05 \x01(\x0b\x32%.aptos.transaction.v1.WriteSetPayloadH\x00\x12\x41\n\x10multisig_payload\x18\x06 \x01(\x0b\x32%.aptos.transaction.v1.MultisigPayloadH\x00"\x93\x01\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x1f\n\x1bTYPE_ENTRY_FUNCTION_PAYLOAD\x10\x01\x12\x17\n\x13TYPE_SCRIPT_PAYLOAD\x10\x02\x12\x1a\n\x16TYPE_WRITE_SET_PAYLOAD\x10\x04\x12\x19\n\x15TYPE_MULTISIG_PAYLOAD\x10\x05"\x04\x08\x03\x10\x03\x42\t\n\x07payloadJ\x04\x08\x04\x10\x05"\xb9\x01\n\x14\x45ntryFunctionPayload\x12\x37\n\x08\x66unction\x18\x01 \x01(\x0b\x32%.aptos.transaction.v1.EntryFunctionId\x12\x36\n\x0etype_arguments\x18\x02 \x03(\x0b\x32\x1e.aptos.transaction.v1.MoveType\x12\x11\n\targuments\x18\x03 \x03(\t\x12\x1d\n\x15\x65ntry_function_id_str\x18\x04 \x01(\t"W\n\x12MoveScriptBytecode\x12\x10\n\x08\x62ytecode\x18\x01 \x01(\x0c\x12/\n\x03\x61\x62i\x18\x02 \x01(\x0b\x32".aptos.transaction.v1.MoveFunction"\x92\x01\n\rScriptPayload\x12\x36\n\x04\x63ode\x18\x01 \x01(\x0b\x32(.aptos.transaction.v1.MoveScriptBytecode\x12\x36\n\x0etype_arguments\x18\x02 \x03(\x0b\x32\x1e.aptos.transaction.v1.MoveType\x12\x11\n\targuments\x18\x03 \x03(\t"\x97\x01\n\x0fMultisigPayload\x12\x18\n\x10multisig_address\x18\x01 \x01(\t\x12R\n\x13transaction_payload\x18\x02 \x01(\x0b\x32\x30.aptos.transaction.v1.MultisigTransactionPayloadH\x00\x88\x01\x01\x42\x16\n\x14_transaction_payload"\xf9\x01\n\x1aMultisigTransactionPayload\x12\x43\n\x04type\x18\x01 \x01(\x0e\x32\x35.aptos.transaction.v1.MultisigTransactionPayload.Type\x12L\n\x16\x65ntry_function_payload\x18\x02 \x01(\x0b\x32*.aptos.transaction.v1.EntryFunctionPayloadH\x00"=\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x1f\n\x1bTYPE_ENTRY_FUNCTION_PAYLOAD\x10\x01\x42\t\n\x07payload"U\n\x12MoveModuleBytecode\x12\x10\n\x08\x62ytecode\x18\x01 \x01(\x0c\x12-\n\x03\x61\x62i\x18\x02 \x01(\x0b\x32 .aptos.transaction.v1.MoveModule"\xd2\x01\n\nMoveModule\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x33\n\x07\x66riends\x18\x03 \x03(\x0b\x32".aptos.transaction.v1.MoveModuleId\x12=\n\x11\x65xposed_functions\x18\x04 \x03(\x0b\x32".aptos.transaction.v1.MoveFunction\x12\x31\n\x07structs\x18\x05 \x03(\x0b\x32 .aptos.transaction.v1.MoveStruct"\x92\x03\n\x0cMoveFunction\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x41\n\nvisibility\x18\x02 \x01(\x0e\x32-.aptos.transaction.v1.MoveFunction.Visibility\x12\x10\n\x08is_entry\x18\x03 \x01(\x08\x12O\n\x13generic_type_params\x18\x04 \x03(\x0b\x32\x32.aptos.transaction.v1.MoveFunctionGenericTypeParam\x12.\n\x06params\x18\x05 \x03(\x0b\x32\x1e.aptos.transaction.v1.MoveType\x12.\n\x06return\x18\x06 \x03(\x0b\x32\x1e.aptos.transaction.v1.MoveType"n\n\nVisibility\x12\x1a\n\x16VISIBILITY_UNSPECIFIED\x10\x00\x12\x16\n\x12VISIBILITY_PRIVATE\x10\x01\x12\x15\n\x11VISIBILITY_PUBLIC\x10\x02\x12\x15\n\x11VISIBILITY_FRIEND\x10\x03"\xe9\x01\n\nMoveStruct\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x11\n\tis_native\x18\x02 \x01(\x08\x12\x34\n\tabilities\x18\x03 \x03(\x0e\x32!.aptos.transaction.v1.MoveAbility\x12M\n\x13generic_type_params\x18\x04 \x03(\x0b\x32\x30.aptos.transaction.v1.MoveStructGenericTypeParam\x12\x35\n\x06\x66ields\x18\x05 \x03(\x0b\x32%.aptos.transaction.v1.MoveStructField"h\n\x1aMoveStructGenericTypeParam\x12\x36\n\x0b\x63onstraints\x18\x01 \x03(\x0e\x32!.aptos.transaction.v1.MoveAbility\x12\x12\n\nis_phantom\x18\x02 \x01(\x08"M\n\x0fMoveStructField\x12\x0c\n\x04name\x18\x01 \x01(\t\x12,\n\x04type\x18\x02 \x01(\x0b\x32\x1e.aptos.transaction.v1.MoveType"V\n\x1cMoveFunctionGenericTypeParam\x12\x36\n\x0b\x63onstraints\x18\x01 \x03(\x0e\x32!.aptos.transaction.v1.MoveAbility"\xf8\x02\n\x08MoveType\x12-\n\x04type\x18\x01 \x01(\x0e\x32\x1f.aptos.transaction.v1.MoveTypes\x12\x30\n\x06vector\x18\x03 \x01(\x0b\x32\x1e.aptos.transaction.v1.MoveTypeH\x00\x12\x35\n\x06struct\x18\x04 \x01(\x0b\x32#.aptos.transaction.v1.MoveStructTagH\x00\x12"\n\x18generic_type_param_index\x18\x05 \x01(\rH\x00\x12\x41\n\treference\x18\x06 \x01(\x0b\x32,.aptos.transaction.v1.MoveType.ReferenceTypeH\x00\x12\x14\n\nunparsable\x18\x07 \x01(\tH\x00\x1aL\n\rReferenceType\x12\x0f\n\x07mutable\x18\x01 \x01(\x08\x12*\n\x02to\x18\x02 \x01(\x0b\x32\x1e.aptos.transaction.v1.MoveTypeB\t\n\x07\x63ontent"D\n\x0fWriteSetPayload\x12\x31\n\twrite_set\x18\x01 \x01(\x0b\x32\x1e.aptos.transaction.v1.WriteSet"S\n\x0f\x45ntryFunctionId\x12\x32\n\x06module\x18\x01 \x01(\x0b\x32".aptos.transaction.v1.MoveModuleId\x12\x0c\n\x04name\x18\x02 \x01(\t"-\n\x0cMoveModuleId\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t"{\n\rMoveStructTag\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x0e\n\x06module\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\x12;\n\x13generic_type_params\x18\x04 \x03(\x0b\x32\x1e.aptos.transaction.v1.MoveType"\x9b\x04\n\tSignature\x12\x32\n\x04type\x18\x01 \x01(\x0e\x32$.aptos.transaction.v1.Signature.Type\x12\x39\n\x07\x65\x64\x32\x35\x35\x31\x39\x18\x02 \x01(\x0b\x32&.aptos.transaction.v1.Ed25519SignatureH\x00\x12\x44\n\rmulti_ed25519\x18\x03 \x01(\x0b\x32+.aptos.transaction.v1.MultiEd25519SignatureH\x00\x12@\n\x0bmulti_agent\x18\x04 \x01(\x0b\x32).aptos.transaction.v1.MultiAgentSignatureH\x00\x12<\n\tfee_payer\x18\x05 \x01(\x0b\x32\'.aptos.transaction.v1.FeePayerSignatureH\x00\x12;\n\rsingle_sender\x18\x07 \x01(\x0b\x32".aptos.transaction.v1.SingleSenderH\x00"\x8e\x01\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x10\n\x0cTYPE_ED25519\x10\x01\x12\x16\n\x12TYPE_MULTI_ED25519\x10\x02\x12\x14\n\x10TYPE_MULTI_AGENT\x10\x03\x12\x12\n\x0eTYPE_FEE_PAYER\x10\x04\x12\x16\n\x12TYPE_SINGLE_SENDER\x10\x06"\x04\x08\x05\x10\x05\x42\x0b\n\tsignature"9\n\x10\x45\x64\x32\x35\x35\x31\x39Signature\x12\x12\n\npublic_key\x18\x01 \x01(\x0c\x12\x11\n\tsignature\x18\x02 \x01(\x0c"o\n\x15MultiEd25519Signature\x12\x13\n\x0bpublic_keys\x18\x01 \x03(\x0c\x12\x12\n\nsignatures\x18\x02 \x03(\x0c\x12\x11\n\tthreshold\x18\x03 \x01(\r\x12\x1a\n\x12public_key_indices\x18\x04 \x03(\r"\xb4\x01\n\x13MultiAgentSignature\x12\x36\n\x06sender\x18\x01 \x01(\x0b\x32&.aptos.transaction.v1.AccountSignature\x12"\n\x1asecondary_signer_addresses\x18\x02 \x03(\t\x12\x41\n\x11secondary_signers\x18\x03 \x03(\x0b\x32&.aptos.transaction.v1.AccountSignature"\x8f\x02\n\x11\x46\x65\x65PayerSignature\x12\x36\n\x06sender\x18\x01 \x01(\x0b\x32&.aptos.transaction.v1.AccountSignature\x12"\n\x1asecondary_signer_addresses\x18\x02 \x03(\t\x12\x41\n\x11secondary_signers\x18\x03 \x03(\x0b\x32&.aptos.transaction.v1.AccountSignature\x12\x19\n\x11\x66\x65\x65_payer_address\x18\x04 \x01(\t\x12@\n\x10\x66\x65\x65_payer_signer\x18\x05 \x01(\x0b\x32&.aptos.transaction.v1.AccountSignature"\xcc\x01\n\x0c\x41nyPublicKey\x12\x35\n\x04type\x18\x01 \x01(\x0e\x32\'.aptos.transaction.v1.AnyPublicKey.Type\x12\x12\n\npublic_key\x18\x02 \x01(\x0c"q\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x10\n\x0cTYPE_ED25519\x10\x01\x12\x18\n\x14TYPE_SECP256K1_ECDSA\x10\x02\x12\x18\n\x14TYPE_SECP256R1_ECDSA\x10\x03\x12\r\n\tTYPE_ZKID\x10\x04"\xb0\x03\n\x0c\x41nySignature\x12\x35\n\x04type\x18\x01 \x01(\x0e\x32\'.aptos.transaction.v1.AnySignature.Type\x12\x15\n\tsignature\x18\x02 \x01(\x0c\x42\x02\x18\x01\x12\x30\n\x07\x65\x64\x32\x35\x35\x31\x39\x18\x03 \x01(\x0b\x32\x1d.aptos.transaction.v1.Ed25519H\x00\x12?\n\x0fsecp256k1_ecdsa\x18\x04 \x01(\x0b\x32$.aptos.transaction.v1.Secp256k1EcdsaH\x00\x12\x32\n\x08webauthn\x18\x05 \x01(\x0b\x32\x1e.aptos.transaction.v1.WebAuthnH\x00\x12*\n\x04zkid\x18\x06 \x01(\x0b\x32\x1a.aptos.transaction.v1.ZkIdH\x00"j\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x10\n\x0cTYPE_ED25519\x10\x01\x12\x18\n\x14TYPE_SECP256K1_ECDSA\x10\x02\x12\x11\n\rTYPE_WEBAUTHN\x10\x03\x12\r\n\tTYPE_ZKID\x10\x04\x42\x13\n\x11signature_variant"\x1c\n\x07\x45\x64\x32\x35\x35\x31\x39\x12\x11\n\tsignature\x18\x01 \x01(\x0c"#\n\x0eSecp256k1Ecdsa\x12\x11\n\tsignature\x18\x01 \x01(\x0c"\x1d\n\x08WebAuthn\x12\x11\n\tsignature\x18\x01 \x01(\x0c"\x19\n\x04ZkId\x12\x11\n\tsignature\x18\x01 \x01(\x0c"\x83\x01\n\x12SingleKeySignature\x12\x36\n\npublic_key\x18\x01 \x01(\x0b\x32".aptos.transaction.v1.AnyPublicKey\x12\x35\n\tsignature\x18\x02 \x01(\x0b\x32".aptos.transaction.v1.AnySignature"X\n\x10IndexedSignature\x12\r\n\x05index\x18\x01 \x01(\r\x12\x35\n\tsignature\x18\x02 \x01(\x0b\x32".aptos.transaction.v1.AnySignature"\xa5\x01\n\x11MultiKeySignature\x12\x37\n\x0bpublic_keys\x18\x01 \x03(\x0b\x32".aptos.transaction.v1.AnyPublicKey\x12:\n\nsignatures\x18\x02 \x03(\x0b\x32&.aptos.transaction.v1.IndexedSignature\x12\x1b\n\x13signatures_required\x18\x03 \x01(\r"F\n\x0cSingleSender\x12\x36\n\x06sender\x18\x01 \x01(\x0b\x32&.aptos.transaction.v1.AccountSignature"\xe4\x03\n\x10\x41\x63\x63ountSignature\x12\x39\n\x04type\x18\x01 \x01(\x0e\x32+.aptos.transaction.v1.AccountSignature.Type\x12\x39\n\x07\x65\x64\x32\x35\x35\x31\x39\x18\x02 \x01(\x0b\x32&.aptos.transaction.v1.Ed25519SignatureH\x00\x12\x44\n\rmulti_ed25519\x18\x03 \x01(\x0b\x32+.aptos.transaction.v1.MultiEd25519SignatureH\x00\x12H\n\x14single_key_signature\x18\x05 \x01(\x0b\x32(.aptos.transaction.v1.SingleKeySignatureH\x00\x12\x46\n\x13multi_key_signature\x18\x06 \x01(\x0b\x32\'.aptos.transaction.v1.MultiKeySignatureH\x00"u\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x10\n\x0cTYPE_ED25519\x10\x01\x12\x16\n\x12TYPE_MULTI_ED25519\x10\x02\x12\x13\n\x0fTYPE_SINGLE_KEY\x10\x04\x12\x12\n\x0eTYPE_MULTI_KEY\x10\x05"\x04\x08\x03\x10\x03\x42\x0b\n\tsignature*\xea\x02\n\tMoveTypes\x12\x1a\n\x16MOVE_TYPES_UNSPECIFIED\x10\x00\x12\x13\n\x0fMOVE_TYPES_BOOL\x10\x01\x12\x11\n\rMOVE_TYPES_U8\x10\x02\x12\x12\n\x0eMOVE_TYPES_U16\x10\x0c\x12\x12\n\x0eMOVE_TYPES_U32\x10\r\x12\x12\n\x0eMOVE_TYPES_U64\x10\x03\x12\x13\n\x0fMOVE_TYPES_U128\x10\x04\x12\x13\n\x0fMOVE_TYPES_U256\x10\x0e\x12\x16\n\x12MOVE_TYPES_ADDRESS\x10\x05\x12\x15\n\x11MOVE_TYPES_SIGNER\x10\x06\x12\x15\n\x11MOVE_TYPES_VECTOR\x10\x07\x12\x15\n\x11MOVE_TYPES_STRUCT\x10\x08\x12!\n\x1dMOVE_TYPES_GENERIC_TYPE_PARAM\x10\t\x12\x18\n\x14MOVE_TYPES_REFERENCE\x10\n\x12\x19\n\x15MOVE_TYPES_UNPARSABLE\x10\x0b*\x87\x01\n\x0bMoveAbility\x12\x1c\n\x18MOVE_ABILITY_UNSPECIFIED\x10\x00\x12\x15\n\x11MOVE_ABILITY_COPY\x10\x01\x12\x15\n\x11MOVE_ABILITY_DROP\x10\x02\x12\x16\n\x12MOVE_ABILITY_STORE\x10\x03\x12\x14\n\x10MOVE_ABILITY_KEY\x10\x04\x62\x06proto3' + b'\n&aptos/transaction/v1/transaction.proto\x12\x14\x61ptos.transaction.v1\x1a$aptos/util/timestamp/timestamp.proto"\x9a\x01\n\x05\x42lock\x12\x32\n\ttimestamp\x18\x01 \x01(\x0b\x32\x1f.aptos.util.timestamp.Timestamp\x12\x12\n\x06height\x18\x02 \x01(\x04\x42\x02\x30\x01\x12\x37\n\x0ctransactions\x18\x03 \x03(\x0b\x32!.aptos.transaction.v1.Transaction\x12\x10\n\x08\x63hain_id\x18\x04 \x01(\r"\xad\x06\n\x0bTransaction\x12\x32\n\ttimestamp\x18\x01 \x01(\x0b\x32\x1f.aptos.util.timestamp.Timestamp\x12\x13\n\x07version\x18\x02 \x01(\x04\x42\x02\x30\x01\x12\x33\n\x04info\x18\x03 \x01(\x0b\x32%.aptos.transaction.v1.TransactionInfo\x12\x11\n\x05\x65poch\x18\x04 \x01(\x04\x42\x02\x30\x01\x12\x18\n\x0c\x62lock_height\x18\x05 \x01(\x04\x42\x02\x30\x01\x12?\n\x04type\x18\x06 \x01(\x0e\x32\x31.aptos.transaction.v1.Transaction.TransactionType\x12H\n\x0e\x62lock_metadata\x18\x07 \x01(\x0b\x32..aptos.transaction.v1.BlockMetadataTransactionH\x00\x12;\n\x07genesis\x18\x08 \x01(\x0b\x32(.aptos.transaction.v1.GenesisTransactionH\x00\x12L\n\x10state_checkpoint\x18\t \x01(\x0b\x32\x30.aptos.transaction.v1.StateCheckpointTransactionH\x00\x12\x35\n\x04user\x18\n \x01(\x0b\x32%.aptos.transaction.v1.UserTransactionH\x00\x12?\n\tvalidator\x18\x15 \x01(\x0b\x32*.aptos.transaction.v1.ValidatorTransactionH\x00"\xd8\x01\n\x0fTransactionType\x12 \n\x1cTRANSACTION_TYPE_UNSPECIFIED\x10\x00\x12\x1c\n\x18TRANSACTION_TYPE_GENESIS\x10\x01\x12#\n\x1fTRANSACTION_TYPE_BLOCK_METADATA\x10\x02\x12%\n!TRANSACTION_TYPE_STATE_CHECKPOINT\x10\x03\x12\x19\n\x15TRANSACTION_TYPE_USER\x10\x04\x12\x1e\n\x1aTRANSACTION_TYPE_VALIDATOR\x10\x14\x42\n\n\x08txn_data"\xbe\x01\n\x18\x42lockMetadataTransaction\x12\n\n\x02id\x18\x01 \x01(\t\x12\x11\n\x05round\x18\x02 \x01(\x04\x42\x02\x30\x01\x12+\n\x06\x65vents\x18\x03 \x03(\x0b\x32\x1b.aptos.transaction.v1.Event\x12#\n\x1bprevious_block_votes_bitvec\x18\x04 \x01(\x0c\x12\x10\n\x08proposer\x18\x05 \x01(\t\x12\x1f\n\x17\x66\x61iled_proposer_indices\x18\x06 \x03(\r"r\n\x12GenesisTransaction\x12/\n\x07payload\x18\x01 \x01(\x0b\x32\x1e.aptos.transaction.v1.WriteSet\x12+\n\x06\x65vents\x18\x02 \x03(\x0b\x32\x1b.aptos.transaction.v1.Event"\x1c\n\x1aStateCheckpointTransaction"\x16\n\x14ValidatorTransaction"}\n\x0fUserTransaction\x12=\n\x07request\x18\x01 \x01(\x0b\x32,.aptos.transaction.v1.UserTransactionRequest\x12+\n\x06\x65vents\x18\x02 \x03(\x0b\x32\x1b.aptos.transaction.v1.Event"\x9f\x01\n\x05\x45vent\x12+\n\x03key\x18\x01 \x01(\x0b\x32\x1e.aptos.transaction.v1.EventKey\x12\x1b\n\x0fsequence_number\x18\x02 \x01(\x04\x42\x02\x30\x01\x12,\n\x04type\x18\x03 \x01(\x0b\x32\x1e.aptos.transaction.v1.MoveType\x12\x10\n\x08type_str\x18\x05 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\t"\xa1\x02\n\x0fTransactionInfo\x12\x0c\n\x04hash\x18\x01 \x01(\x0c\x12\x19\n\x11state_change_hash\x18\x02 \x01(\x0c\x12\x17\n\x0f\x65vent_root_hash\x18\x03 \x01(\x0c\x12"\n\x15state_checkpoint_hash\x18\x04 \x01(\x0cH\x00\x88\x01\x01\x12\x14\n\x08gas_used\x18\x05 \x01(\x04\x42\x02\x30\x01\x12\x0f\n\x07success\x18\x06 \x01(\x08\x12\x11\n\tvm_status\x18\x07 \x01(\t\x12\x1d\n\x15\x61\x63\x63umulator_root_hash\x18\x08 \x01(\x0c\x12\x35\n\x07\x63hanges\x18\t \x03(\x0b\x32$.aptos.transaction.v1.WriteSetChangeB\x18\n\x16_state_checkpoint_hash"@\n\x08\x45ventKey\x12\x1b\n\x0f\x63reation_number\x18\x01 \x01(\x04\x42\x02\x30\x01\x12\x17\n\x0f\x61\x63\x63ount_address\x18\x02 \x01(\t"\xb0\x02\n\x16UserTransactionRequest\x12\x0e\n\x06sender\x18\x01 \x01(\t\x12\x1b\n\x0fsequence_number\x18\x02 \x01(\x04\x42\x02\x30\x01\x12\x1a\n\x0emax_gas_amount\x18\x03 \x01(\x04\x42\x02\x30\x01\x12\x1a\n\x0egas_unit_price\x18\x04 \x01(\x04\x42\x02\x30\x01\x12\x42\n\x19\x65xpiration_timestamp_secs\x18\x05 \x01(\x0b\x32\x1f.aptos.util.timestamp.Timestamp\x12\x39\n\x07payload\x18\x06 \x01(\x0b\x32(.aptos.transaction.v1.TransactionPayload\x12\x32\n\tsignature\x18\x07 \x01(\x0b\x32\x1f.aptos.transaction.v1.Signature"\xda\x02\n\x08WriteSet\x12\x43\n\x0ewrite_set_type\x18\x01 \x01(\x0e\x32+.aptos.transaction.v1.WriteSet.WriteSetType\x12@\n\x10script_write_set\x18\x02 \x01(\x0b\x32$.aptos.transaction.v1.ScriptWriteSetH\x00\x12@\n\x10\x64irect_write_set\x18\x03 \x01(\x0b\x32$.aptos.transaction.v1.DirectWriteSetH\x00"x\n\x0cWriteSetType\x12\x1e\n\x1aWRITE_SET_TYPE_UNSPECIFIED\x10\x00\x12#\n\x1fWRITE_SET_TYPE_SCRIPT_WRITE_SET\x10\x01\x12#\n\x1fWRITE_SET_TYPE_DIRECT_WRITE_SET\x10\x02\x42\x0b\n\twrite_set"Y\n\x0eScriptWriteSet\x12\x12\n\nexecute_as\x18\x01 \x01(\t\x12\x33\n\x06script\x18\x02 \x01(\x0b\x32#.aptos.transaction.v1.ScriptPayload"}\n\x0e\x44irectWriteSet\x12>\n\x10write_set_change\x18\x01 \x03(\x0b\x32$.aptos.transaction.v1.WriteSetChange\x12+\n\x06\x65vents\x18\x02 \x03(\x0b\x32\x1b.aptos.transaction.v1.Event"\x89\x05\n\x0eWriteSetChange\x12\x37\n\x04type\x18\x01 \x01(\x0e\x32).aptos.transaction.v1.WriteSetChange.Type\x12;\n\rdelete_module\x18\x02 \x01(\x0b\x32".aptos.transaction.v1.DeleteModuleH\x00\x12?\n\x0f\x64\x65lete_resource\x18\x03 \x01(\x0b\x32$.aptos.transaction.v1.DeleteResourceH\x00\x12\x42\n\x11\x64\x65lete_table_item\x18\x04 \x01(\x0b\x32%.aptos.transaction.v1.DeleteTableItemH\x00\x12\x39\n\x0cwrite_module\x18\x05 \x01(\x0b\x32!.aptos.transaction.v1.WriteModuleH\x00\x12=\n\x0ewrite_resource\x18\x06 \x01(\x0b\x32#.aptos.transaction.v1.WriteResourceH\x00\x12@\n\x10write_table_item\x18\x07 \x01(\x0b\x32$.aptos.transaction.v1.WriteTableItemH\x00"\xb5\x01\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x16\n\x12TYPE_DELETE_MODULE\x10\x01\x12\x18\n\x14TYPE_DELETE_RESOURCE\x10\x02\x12\x1a\n\x16TYPE_DELETE_TABLE_ITEM\x10\x03\x12\x15\n\x11TYPE_WRITE_MODULE\x10\x04\x12\x17\n\x13TYPE_WRITE_RESOURCE\x10\x05\x12\x19\n\x15TYPE_WRITE_TABLE_ITEM\x10\x06\x42\x08\n\x06\x63hange"k\n\x0c\x44\x65leteModule\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x16\n\x0estate_key_hash\x18\x02 \x01(\x0c\x12\x32\n\x06module\x18\x03 \x01(\x0b\x32".aptos.transaction.v1.MoveModuleId"~\n\x0e\x44\x65leteResource\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x16\n\x0estate_key_hash\x18\x02 \x01(\x0c\x12\x31\n\x04type\x18\x03 \x01(\x0b\x32#.aptos.transaction.v1.MoveStructTag\x12\x10\n\x08type_str\x18\x04 \x01(\t"{\n\x0f\x44\x65leteTableItem\x12\x16\n\x0estate_key_hash\x18\x01 \x01(\x0c\x12\x0e\n\x06handle\x18\x02 \x01(\t\x12\x0b\n\x03key\x18\x03 \x01(\t\x12\x33\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32%.aptos.transaction.v1.DeleteTableData"0\n\x0f\x44\x65leteTableData\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x10\n\x08key_type\x18\x02 \x01(\t"n\n\x0bWriteModule\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x16\n\x0estate_key_hash\x18\x02 \x01(\x0c\x12\x36\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32(.aptos.transaction.v1.MoveModuleBytecode"\x8b\x01\n\rWriteResource\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x16\n\x0estate_key_hash\x18\x02 \x01(\x0c\x12\x31\n\x04type\x18\x03 \x01(\x0b\x32#.aptos.transaction.v1.MoveStructTag\x12\x10\n\x08type_str\x18\x04 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x05 \x01(\t"R\n\x0eWriteTableData\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x10\n\x08key_type\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t\x12\x12\n\nvalue_type\x18\x04 \x01(\t"y\n\x0eWriteTableItem\x12\x16\n\x0estate_key_hash\x18\x01 \x01(\x0c\x12\x0e\n\x06handle\x18\x02 \x01(\t\x12\x0b\n\x03key\x18\x03 \x01(\t\x12\x32\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32$.aptos.transaction.v1.WriteTableData"\x8c\x04\n\x12TransactionPayload\x12;\n\x04type\x18\x01 \x01(\x0e\x32-.aptos.transaction.v1.TransactionPayload.Type\x12L\n\x16\x65ntry_function_payload\x18\x02 \x01(\x0b\x32*.aptos.transaction.v1.EntryFunctionPayloadH\x00\x12=\n\x0escript_payload\x18\x03 \x01(\x0b\x32#.aptos.transaction.v1.ScriptPayloadH\x00\x12\x42\n\x11write_set_payload\x18\x05 \x01(\x0b\x32%.aptos.transaction.v1.WriteSetPayloadH\x00\x12\x41\n\x10multisig_payload\x18\x06 \x01(\x0b\x32%.aptos.transaction.v1.MultisigPayloadH\x00"\x93\x01\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x1f\n\x1bTYPE_ENTRY_FUNCTION_PAYLOAD\x10\x01\x12\x17\n\x13TYPE_SCRIPT_PAYLOAD\x10\x02\x12\x1a\n\x16TYPE_WRITE_SET_PAYLOAD\x10\x04\x12\x19\n\x15TYPE_MULTISIG_PAYLOAD\x10\x05"\x04\x08\x03\x10\x03\x42\t\n\x07payloadJ\x04\x08\x04\x10\x05"\xb9\x01\n\x14\x45ntryFunctionPayload\x12\x37\n\x08\x66unction\x18\x01 \x01(\x0b\x32%.aptos.transaction.v1.EntryFunctionId\x12\x36\n\x0etype_arguments\x18\x02 \x03(\x0b\x32\x1e.aptos.transaction.v1.MoveType\x12\x11\n\targuments\x18\x03 \x03(\t\x12\x1d\n\x15\x65ntry_function_id_str\x18\x04 \x01(\t"W\n\x12MoveScriptBytecode\x12\x10\n\x08\x62ytecode\x18\x01 \x01(\x0c\x12/\n\x03\x61\x62i\x18\x02 \x01(\x0b\x32".aptos.transaction.v1.MoveFunction"\x92\x01\n\rScriptPayload\x12\x36\n\x04\x63ode\x18\x01 \x01(\x0b\x32(.aptos.transaction.v1.MoveScriptBytecode\x12\x36\n\x0etype_arguments\x18\x02 \x03(\x0b\x32\x1e.aptos.transaction.v1.MoveType\x12\x11\n\targuments\x18\x03 \x03(\t"\x97\x01\n\x0fMultisigPayload\x12\x18\n\x10multisig_address\x18\x01 \x01(\t\x12R\n\x13transaction_payload\x18\x02 \x01(\x0b\x32\x30.aptos.transaction.v1.MultisigTransactionPayloadH\x00\x88\x01\x01\x42\x16\n\x14_transaction_payload"\xf9\x01\n\x1aMultisigTransactionPayload\x12\x43\n\x04type\x18\x01 \x01(\x0e\x32\x35.aptos.transaction.v1.MultisigTransactionPayload.Type\x12L\n\x16\x65ntry_function_payload\x18\x02 \x01(\x0b\x32*.aptos.transaction.v1.EntryFunctionPayloadH\x00"=\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x1f\n\x1bTYPE_ENTRY_FUNCTION_PAYLOAD\x10\x01\x42\t\n\x07payload"U\n\x12MoveModuleBytecode\x12\x10\n\x08\x62ytecode\x18\x01 \x01(\x0c\x12-\n\x03\x61\x62i\x18\x02 \x01(\x0b\x32 .aptos.transaction.v1.MoveModule"\xd2\x01\n\nMoveModule\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x33\n\x07\x66riends\x18\x03 \x03(\x0b\x32".aptos.transaction.v1.MoveModuleId\x12=\n\x11\x65xposed_functions\x18\x04 \x03(\x0b\x32".aptos.transaction.v1.MoveFunction\x12\x31\n\x07structs\x18\x05 \x03(\x0b\x32 .aptos.transaction.v1.MoveStruct"\x92\x03\n\x0cMoveFunction\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x41\n\nvisibility\x18\x02 \x01(\x0e\x32-.aptos.transaction.v1.MoveFunction.Visibility\x12\x10\n\x08is_entry\x18\x03 \x01(\x08\x12O\n\x13generic_type_params\x18\x04 \x03(\x0b\x32\x32.aptos.transaction.v1.MoveFunctionGenericTypeParam\x12.\n\x06params\x18\x05 \x03(\x0b\x32\x1e.aptos.transaction.v1.MoveType\x12.\n\x06return\x18\x06 \x03(\x0b\x32\x1e.aptos.transaction.v1.MoveType"n\n\nVisibility\x12\x1a\n\x16VISIBILITY_UNSPECIFIED\x10\x00\x12\x16\n\x12VISIBILITY_PRIVATE\x10\x01\x12\x15\n\x11VISIBILITY_PUBLIC\x10\x02\x12\x15\n\x11VISIBILITY_FRIEND\x10\x03"\xe9\x01\n\nMoveStruct\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x11\n\tis_native\x18\x02 \x01(\x08\x12\x34\n\tabilities\x18\x03 \x03(\x0e\x32!.aptos.transaction.v1.MoveAbility\x12M\n\x13generic_type_params\x18\x04 \x03(\x0b\x32\x30.aptos.transaction.v1.MoveStructGenericTypeParam\x12\x35\n\x06\x66ields\x18\x05 \x03(\x0b\x32%.aptos.transaction.v1.MoveStructField"h\n\x1aMoveStructGenericTypeParam\x12\x36\n\x0b\x63onstraints\x18\x01 \x03(\x0e\x32!.aptos.transaction.v1.MoveAbility\x12\x12\n\nis_phantom\x18\x02 \x01(\x08"M\n\x0fMoveStructField\x12\x0c\n\x04name\x18\x01 \x01(\t\x12,\n\x04type\x18\x02 \x01(\x0b\x32\x1e.aptos.transaction.v1.MoveType"V\n\x1cMoveFunctionGenericTypeParam\x12\x36\n\x0b\x63onstraints\x18\x01 \x03(\x0e\x32!.aptos.transaction.v1.MoveAbility"\xf8\x02\n\x08MoveType\x12-\n\x04type\x18\x01 \x01(\x0e\x32\x1f.aptos.transaction.v1.MoveTypes\x12\x30\n\x06vector\x18\x03 \x01(\x0b\x32\x1e.aptos.transaction.v1.MoveTypeH\x00\x12\x35\n\x06struct\x18\x04 \x01(\x0b\x32#.aptos.transaction.v1.MoveStructTagH\x00\x12"\n\x18generic_type_param_index\x18\x05 \x01(\rH\x00\x12\x41\n\treference\x18\x06 \x01(\x0b\x32,.aptos.transaction.v1.MoveType.ReferenceTypeH\x00\x12\x14\n\nunparsable\x18\x07 \x01(\tH\x00\x1aL\n\rReferenceType\x12\x0f\n\x07mutable\x18\x01 \x01(\x08\x12*\n\x02to\x18\x02 \x01(\x0b\x32\x1e.aptos.transaction.v1.MoveTypeB\t\n\x07\x63ontent"D\n\x0fWriteSetPayload\x12\x31\n\twrite_set\x18\x01 \x01(\x0b\x32\x1e.aptos.transaction.v1.WriteSet"S\n\x0f\x45ntryFunctionId\x12\x32\n\x06module\x18\x01 \x01(\x0b\x32".aptos.transaction.v1.MoveModuleId\x12\x0c\n\x04name\x18\x02 \x01(\t"-\n\x0cMoveModuleId\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t"{\n\rMoveStructTag\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x0e\n\x06module\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\x12;\n\x13generic_type_params\x18\x04 \x03(\x0b\x32\x1e.aptos.transaction.v1.MoveType"\x9b\x04\n\tSignature\x12\x32\n\x04type\x18\x01 \x01(\x0e\x32$.aptos.transaction.v1.Signature.Type\x12\x39\n\x07\x65\x64\x32\x35\x35\x31\x39\x18\x02 \x01(\x0b\x32&.aptos.transaction.v1.Ed25519SignatureH\x00\x12\x44\n\rmulti_ed25519\x18\x03 \x01(\x0b\x32+.aptos.transaction.v1.MultiEd25519SignatureH\x00\x12@\n\x0bmulti_agent\x18\x04 \x01(\x0b\x32).aptos.transaction.v1.MultiAgentSignatureH\x00\x12<\n\tfee_payer\x18\x05 \x01(\x0b\x32\'.aptos.transaction.v1.FeePayerSignatureH\x00\x12;\n\rsingle_sender\x18\x07 \x01(\x0b\x32".aptos.transaction.v1.SingleSenderH\x00"\x8e\x01\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x10\n\x0cTYPE_ED25519\x10\x01\x12\x16\n\x12TYPE_MULTI_ED25519\x10\x02\x12\x14\n\x10TYPE_MULTI_AGENT\x10\x03\x12\x12\n\x0eTYPE_FEE_PAYER\x10\x04\x12\x16\n\x12TYPE_SINGLE_SENDER\x10\x06"\x04\x08\x05\x10\x05\x42\x0b\n\tsignature"9\n\x10\x45\x64\x32\x35\x35\x31\x39Signature\x12\x12\n\npublic_key\x18\x01 \x01(\x0c\x12\x11\n\tsignature\x18\x02 \x01(\x0c"o\n\x15MultiEd25519Signature\x12\x13\n\x0bpublic_keys\x18\x01 \x03(\x0c\x12\x12\n\nsignatures\x18\x02 \x03(\x0c\x12\x11\n\tthreshold\x18\x03 \x01(\r\x12\x1a\n\x12public_key_indices\x18\x04 \x03(\r"\xb4\x01\n\x13MultiAgentSignature\x12\x36\n\x06sender\x18\x01 \x01(\x0b\x32&.aptos.transaction.v1.AccountSignature\x12"\n\x1asecondary_signer_addresses\x18\x02 \x03(\t\x12\x41\n\x11secondary_signers\x18\x03 \x03(\x0b\x32&.aptos.transaction.v1.AccountSignature"\x8f\x02\n\x11\x46\x65\x65PayerSignature\x12\x36\n\x06sender\x18\x01 \x01(\x0b\x32&.aptos.transaction.v1.AccountSignature\x12"\n\x1asecondary_signer_addresses\x18\x02 \x03(\t\x12\x41\n\x11secondary_signers\x18\x03 \x03(\x0b\x32&.aptos.transaction.v1.AccountSignature\x12\x19\n\x11\x66\x65\x65_payer_address\x18\x04 \x01(\t\x12@\n\x10\x66\x65\x65_payer_signer\x18\x05 \x01(\x0b\x32&.aptos.transaction.v1.AccountSignature"\xcc\x01\n\x0c\x41nyPublicKey\x12\x35\n\x04type\x18\x01 \x01(\x0e\x32\'.aptos.transaction.v1.AnyPublicKey.Type\x12\x12\n\npublic_key\x18\x02 \x01(\x0c"q\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x10\n\x0cTYPE_ED25519\x10\x01\x12\x18\n\x14TYPE_SECP256K1_ECDSA\x10\x02\x12\x18\n\x14TYPE_SECP256R1_ECDSA\x10\x03\x12\r\n\tTYPE_OIDB\x10\x04"\xb0\x03\n\x0c\x41nySignature\x12\x35\n\x04type\x18\x01 \x01(\x0e\x32\'.aptos.transaction.v1.AnySignature.Type\x12\x15\n\tsignature\x18\x02 \x01(\x0c\x42\x02\x18\x01\x12\x30\n\x07\x65\x64\x32\x35\x35\x31\x39\x18\x03 \x01(\x0b\x32\x1d.aptos.transaction.v1.Ed25519H\x00\x12?\n\x0fsecp256k1_ecdsa\x18\x04 \x01(\x0b\x32$.aptos.transaction.v1.Secp256k1EcdsaH\x00\x12\x32\n\x08webauthn\x18\x05 \x01(\x0b\x32\x1e.aptos.transaction.v1.WebAuthnH\x00\x12*\n\x04oidb\x18\x06 \x01(\x0b\x32\x1a.aptos.transaction.v1.OidbH\x00"j\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x10\n\x0cTYPE_ED25519\x10\x01\x12\x18\n\x14TYPE_SECP256K1_ECDSA\x10\x02\x12\x11\n\rTYPE_WEBAUTHN\x10\x03\x12\r\n\tTYPE_OIDB\x10\x04\x42\x13\n\x11signature_variant"\x1c\n\x07\x45\x64\x32\x35\x35\x31\x39\x12\x11\n\tsignature\x18\x01 \x01(\x0c"#\n\x0eSecp256k1Ecdsa\x12\x11\n\tsignature\x18\x01 \x01(\x0c"\x1d\n\x08WebAuthn\x12\x11\n\tsignature\x18\x01 \x01(\x0c"\x19\n\x04Oidb\x12\x11\n\tsignature\x18\x01 \x01(\x0c"\x83\x01\n\x12SingleKeySignature\x12\x36\n\npublic_key\x18\x01 \x01(\x0b\x32".aptos.transaction.v1.AnyPublicKey\x12\x35\n\tsignature\x18\x02 \x01(\x0b\x32".aptos.transaction.v1.AnySignature"X\n\x10IndexedSignature\x12\r\n\x05index\x18\x01 \x01(\r\x12\x35\n\tsignature\x18\x02 \x01(\x0b\x32".aptos.transaction.v1.AnySignature"\xa5\x01\n\x11MultiKeySignature\x12\x37\n\x0bpublic_keys\x18\x01 \x03(\x0b\x32".aptos.transaction.v1.AnyPublicKey\x12:\n\nsignatures\x18\x02 \x03(\x0b\x32&.aptos.transaction.v1.IndexedSignature\x12\x1b\n\x13signatures_required\x18\x03 \x01(\r"F\n\x0cSingleSender\x12\x36\n\x06sender\x18\x01 \x01(\x0b\x32&.aptos.transaction.v1.AccountSignature"\xe4\x03\n\x10\x41\x63\x63ountSignature\x12\x39\n\x04type\x18\x01 \x01(\x0e\x32+.aptos.transaction.v1.AccountSignature.Type\x12\x39\n\x07\x65\x64\x32\x35\x35\x31\x39\x18\x02 \x01(\x0b\x32&.aptos.transaction.v1.Ed25519SignatureH\x00\x12\x44\n\rmulti_ed25519\x18\x03 \x01(\x0b\x32+.aptos.transaction.v1.MultiEd25519SignatureH\x00\x12H\n\x14single_key_signature\x18\x05 \x01(\x0b\x32(.aptos.transaction.v1.SingleKeySignatureH\x00\x12\x46\n\x13multi_key_signature\x18\x06 \x01(\x0b\x32\'.aptos.transaction.v1.MultiKeySignatureH\x00"u\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x10\n\x0cTYPE_ED25519\x10\x01\x12\x16\n\x12TYPE_MULTI_ED25519\x10\x02\x12\x13\n\x0fTYPE_SINGLE_KEY\x10\x04\x12\x12\n\x0eTYPE_MULTI_KEY\x10\x05"\x04\x08\x03\x10\x03\x42\x0b\n\tsignature*\xea\x02\n\tMoveTypes\x12\x1a\n\x16MOVE_TYPES_UNSPECIFIED\x10\x00\x12\x13\n\x0fMOVE_TYPES_BOOL\x10\x01\x12\x11\n\rMOVE_TYPES_U8\x10\x02\x12\x12\n\x0eMOVE_TYPES_U16\x10\x0c\x12\x12\n\x0eMOVE_TYPES_U32\x10\r\x12\x12\n\x0eMOVE_TYPES_U64\x10\x03\x12\x13\n\x0fMOVE_TYPES_U128\x10\x04\x12\x13\n\x0fMOVE_TYPES_U256\x10\x0e\x12\x16\n\x12MOVE_TYPES_ADDRESS\x10\x05\x12\x15\n\x11MOVE_TYPES_SIGNER\x10\x06\x12\x15\n\x11MOVE_TYPES_VECTOR\x10\x07\x12\x15\n\x11MOVE_TYPES_STRUCT\x10\x08\x12!\n\x1dMOVE_TYPES_GENERIC_TYPE_PARAM\x10\t\x12\x18\n\x14MOVE_TYPES_REFERENCE\x10\n\x12\x19\n\x15MOVE_TYPES_UNPARSABLE\x10\x0b*\x87\x01\n\x0bMoveAbility\x12\x1c\n\x18MOVE_ABILITY_UNSPECIFIED\x10\x00\x12\x15\n\x11MOVE_ABILITY_COPY\x10\x01\x12\x15\n\x11MOVE_ABILITY_DROP\x10\x02\x12\x16\n\x12MOVE_ABILITY_STORE\x10\x03\x12\x14\n\x10MOVE_ABILITY_KEY\x10\x04\x62\x06proto3' ) _globals = globals() @@ -183,8 +183,8 @@ _globals["_SECP256K1ECDSA"]._serialized_end = 9641 _globals["_WEBAUTHN"]._serialized_start = 9643 _globals["_WEBAUTHN"]._serialized_end = 9672 - _globals["_ZKID"]._serialized_start = 9674 - _globals["_ZKID"]._serialized_end = 9699 + _globals["_OIDB"]._serialized_start = 9674 + _globals["_OIDB"]._serialized_end = 9699 _globals["_SINGLEKEYSIGNATURE"]._serialized_start = 9702 _globals["_SINGLEKEYSIGNATURE"]._serialized_end = 9833 _globals["_INDEXEDSIGNATURE"]._serialized_start = 9835 diff --git a/protos/python/aptos_protos/aptos/transaction/v1/transaction_pb2.pyi b/protos/python/aptos_protos/aptos/transaction/v1/transaction_pb2.pyi index 2537b9b4006fa..befe7f92b5ead 100644 --- a/protos/python/aptos_protos/aptos/transaction/v1/transaction_pb2.pyi +++ b/protos/python/aptos_protos/aptos/transaction/v1/transaction_pb2.pyi @@ -1016,12 +1016,12 @@ class AnyPublicKey(_message.Message): TYPE_ED25519: _ClassVar[AnyPublicKey.Type] TYPE_SECP256K1_ECDSA: _ClassVar[AnyPublicKey.Type] TYPE_SECP256R1_ECDSA: _ClassVar[AnyPublicKey.Type] - TYPE_ZKID: _ClassVar[AnyPublicKey.Type] + TYPE_OIDB: _ClassVar[AnyPublicKey.Type] TYPE_UNSPECIFIED: AnyPublicKey.Type TYPE_ED25519: AnyPublicKey.Type TYPE_SECP256K1_ECDSA: AnyPublicKey.Type TYPE_SECP256R1_ECDSA: AnyPublicKey.Type - TYPE_ZKID: AnyPublicKey.Type + TYPE_OIDB: AnyPublicKey.Type TYPE_FIELD_NUMBER: _ClassVar[int] PUBLIC_KEY_FIELD_NUMBER: _ClassVar[int] type: AnyPublicKey.Type @@ -1033,7 +1033,7 @@ class AnyPublicKey(_message.Message): ) -> None: ... class AnySignature(_message.Message): - __slots__ = ["type", "signature", "ed25519", "secp256k1_ecdsa", "webauthn", "zkid"] + __slots__ = ["type", "signature", "ed25519", "secp256k1_ecdsa", "webauthn", "oidb"] class Type(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): __slots__ = [] @@ -1041,24 +1041,24 @@ class AnySignature(_message.Message): TYPE_ED25519: _ClassVar[AnySignature.Type] TYPE_SECP256K1_ECDSA: _ClassVar[AnySignature.Type] TYPE_WEBAUTHN: _ClassVar[AnySignature.Type] - TYPE_ZKID: _ClassVar[AnySignature.Type] + TYPE_OIDB: _ClassVar[AnySignature.Type] TYPE_UNSPECIFIED: AnySignature.Type TYPE_ED25519: AnySignature.Type TYPE_SECP256K1_ECDSA: AnySignature.Type TYPE_WEBAUTHN: AnySignature.Type - TYPE_ZKID: AnySignature.Type + TYPE_OIDB: AnySignature.Type TYPE_FIELD_NUMBER: _ClassVar[int] SIGNATURE_FIELD_NUMBER: _ClassVar[int] ED25519_FIELD_NUMBER: _ClassVar[int] SECP256K1_ECDSA_FIELD_NUMBER: _ClassVar[int] WEBAUTHN_FIELD_NUMBER: _ClassVar[int] - ZKID_FIELD_NUMBER: _ClassVar[int] + OIDB_FIELD_NUMBER: _ClassVar[int] type: AnySignature.Type signature: bytes ed25519: Ed25519 secp256k1_ecdsa: Secp256k1Ecdsa webauthn: WebAuthn - zkid: ZkId + oidb: Oidb def __init__( self, type: _Optional[_Union[AnySignature.Type, str]] = ..., @@ -1066,7 +1066,7 @@ class AnySignature(_message.Message): ed25519: _Optional[_Union[Ed25519, _Mapping]] = ..., secp256k1_ecdsa: _Optional[_Union[Secp256k1Ecdsa, _Mapping]] = ..., webauthn: _Optional[_Union[WebAuthn, _Mapping]] = ..., - zkid: _Optional[_Union[ZkId, _Mapping]] = ..., + oidb: _Optional[_Union[Oidb, _Mapping]] = ..., ) -> None: ... class Ed25519(_message.Message): @@ -1087,7 +1087,7 @@ class WebAuthn(_message.Message): signature: bytes def __init__(self, signature: _Optional[bytes] = ...) -> None: ... -class ZkId(_message.Message): +class Oidb(_message.Message): __slots__ = ["signature"] SIGNATURE_FIELD_NUMBER: _ClassVar[int] signature: bytes diff --git a/protos/rust/src/pb/aptos.transaction.v1.rs b/protos/rust/src/pb/aptos.transaction.v1.rs index 48147cffcfe5a..2807c72f1762c 100644 --- a/protos/rust/src/pb/aptos.transaction.v1.rs +++ b/protos/rust/src/pb/aptos.transaction.v1.rs @@ -870,7 +870,7 @@ pub mod any_public_key { Ed25519 = 1, Secp256k1Ecdsa = 2, Secp256r1Ecdsa = 3, - Zkid = 4, + Oidb = 4, } impl Type { /// String value of the enum field names used in the ProtoBuf definition. @@ -883,7 +883,7 @@ pub mod any_public_key { Type::Ed25519 => "TYPE_ED25519", Type::Secp256k1Ecdsa => "TYPE_SECP256K1_ECDSA", Type::Secp256r1Ecdsa => "TYPE_SECP256R1_ECDSA", - Type::Zkid => "TYPE_ZKID", + Type::Oidb => "TYPE_OIDB", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -893,7 +893,7 @@ pub mod any_public_key { "TYPE_ED25519" => Some(Self::Ed25519), "TYPE_SECP256K1_ECDSA" => Some(Self::Secp256k1Ecdsa), "TYPE_SECP256R1_ECDSA" => Some(Self::Secp256r1Ecdsa), - "TYPE_ZKID" => Some(Self::Zkid), + "TYPE_OIDB" => Some(Self::Oidb), _ => None, } } @@ -922,7 +922,7 @@ pub mod any_signature { Ed25519 = 1, Secp256k1Ecdsa = 2, Webauthn = 3, - Zkid = 4, + Oidb = 4, } impl Type { /// String value of the enum field names used in the ProtoBuf definition. @@ -935,7 +935,7 @@ pub mod any_signature { Type::Ed25519 => "TYPE_ED25519", Type::Secp256k1Ecdsa => "TYPE_SECP256K1_ECDSA", Type::Webauthn => "TYPE_WEBAUTHN", - Type::Zkid => "TYPE_ZKID", + Type::Oidb => "TYPE_OIDB", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -945,7 +945,7 @@ pub mod any_signature { "TYPE_ED25519" => Some(Self::Ed25519), "TYPE_SECP256K1_ECDSA" => Some(Self::Secp256k1Ecdsa), "TYPE_WEBAUTHN" => Some(Self::Webauthn), - "TYPE_ZKID" => Some(Self::Zkid), + "TYPE_OIDB" => Some(Self::Oidb), _ => None, } } @@ -961,7 +961,7 @@ pub mod any_signature { #[prost(message, tag="5")] Webauthn(super::WebAuthn), #[prost(message, tag="6")] - Zkid(super::ZkId), + Oidb(super::Oidb), } } #[allow(clippy::derive_partial_eq_without_eq)] @@ -984,7 +984,7 @@ pub struct WebAuthn { } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] -pub struct ZkId { +pub struct Oidb { #[prost(bytes="vec", tag="1")] pub signature: ::prost::alloc::vec::Vec, } @@ -1878,7 +1878,7 @@ pub const FILE_DESCRIPTOR_SET: &[u8] = &[ 0x32, 0x35, 0x36, 0x4b, 0x31, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x10, 0x02, 0x12, 0x18, 0x0a, 0x14, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, 0x52, 0x31, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x5a, 0x4b, 0x49, 0x44, 0x10, 0x04, 0x22, 0xea, 0x03, 0x0a, 0x0c, 0x41, 0x6e, 0x79, 0x53, 0x69, + 0x4f, 0x49, 0x44, 0x42, 0x10, 0x04, 0x22, 0xea, 0x03, 0x0a, 0x0c, 0x41, 0x6e, 0x79, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x3b, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x79, @@ -1898,16 +1898,16 @@ pub const FILE_DESCRIPTOR_SET: &[u8] = &[ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x65, 0x62, 0x41, 0x75, 0x74, 0x68, 0x6e, 0x48, 0x00, 0x52, 0x08, 0x77, 0x65, 0x62, 0x61, 0x75, 0x74, 0x68, 0x6e, - 0x12, 0x30, 0x0a, 0x04, 0x7a, 0x6b, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x12, 0x30, 0x0a, 0x04, 0x6f, 0x69, 0x64, 0x62, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x5a, 0x6b, 0x49, 0x64, 0x48, 0x00, 0x52, 0x04, 0x7a, 0x6b, - 0x69, 0x64, 0x22, 0x6a, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x69, 0x64, 0x62, 0x48, 0x00, 0x52, 0x04, 0x6f, 0x69, + 0x64, 0x62, 0x22, 0x6a, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, 0x4b, 0x31, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x57, 0x45, 0x42, 0x41, 0x55, 0x54, 0x48, 0x4e, 0x10, 0x03, 0x12, - 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x5a, 0x4b, 0x49, 0x44, 0x10, 0x04, 0x42, 0x13, + 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x49, 0x44, 0x42, 0x10, 0x04, 0x42, 0x13, 0x0a, 0x11, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x76, 0x61, 0x72, 0x69, 0x61, 0x6e, 0x74, 0x22, 0x27, 0x0a, 0x07, 0x45, 0x64, 0x32, 0x35, 0x35, 0x31, 0x39, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, @@ -1917,7 +1917,7 @@ pub const FILE_DESCRIPTOR_SET: &[u8] = &[ 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x28, 0x0a, 0x08, 0x57, 0x65, 0x62, 0x41, 0x75, 0x74, 0x68, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x24, 0x0a, 0x04, 0x5a, 0x6b, 0x49, 0x64, 0x12, 0x1c, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x24, 0x0a, 0x04, 0x4f, 0x69, 0x64, 0x62, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x99, 0x01, 0x0a, 0x12, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, diff --git a/protos/rust/src/pb/aptos.transaction.v1.serde.rs b/protos/rust/src/pb/aptos.transaction.v1.serde.rs index 3676f0c8d0443..9cbe38b65b792 100644 --- a/protos/rust/src/pb/aptos.transaction.v1.serde.rs +++ b/protos/rust/src/pb/aptos.transaction.v1.serde.rs @@ -364,7 +364,7 @@ impl serde::Serialize for any_public_key::Type { Self::Ed25519 => "TYPE_ED25519", Self::Secp256k1Ecdsa => "TYPE_SECP256K1_ECDSA", Self::Secp256r1Ecdsa => "TYPE_SECP256R1_ECDSA", - Self::Zkid => "TYPE_ZKID", + Self::Oidb => "TYPE_OIDB", }; serializer.serialize_str(variant) } @@ -380,7 +380,7 @@ impl<'de> serde::Deserialize<'de> for any_public_key::Type { "TYPE_ED25519", "TYPE_SECP256K1_ECDSA", "TYPE_SECP256R1_ECDSA", - "TYPE_ZKID", + "TYPE_OIDB", ]; struct GeneratedVisitor; @@ -427,7 +427,7 @@ impl<'de> serde::Deserialize<'de> for any_public_key::Type { "TYPE_ED25519" => Ok(any_public_key::Type::Ed25519), "TYPE_SECP256K1_ECDSA" => Ok(any_public_key::Type::Secp256k1Ecdsa), "TYPE_SECP256R1_ECDSA" => Ok(any_public_key::Type::Secp256r1Ecdsa), - "TYPE_ZKID" => Ok(any_public_key::Type::Zkid), + "TYPE_OIDB" => Ok(any_public_key::Type::Oidb), _ => Err(serde::de::Error::unknown_variant(value, FIELDS)), } } @@ -472,8 +472,8 @@ impl serde::Serialize for AnySignature { any_signature::SignatureVariant::Webauthn(v) => { struct_ser.serialize_field("webauthn", v)?; } - any_signature::SignatureVariant::Zkid(v) => { - struct_ser.serialize_field("zkid", v)?; + any_signature::SignatureVariant::Oidb(v) => { + struct_ser.serialize_field("oidb", v)?; } } } @@ -493,7 +493,7 @@ impl<'de> serde::Deserialize<'de> for AnySignature { "secp256k1_ecdsa", "secp256k1Ecdsa", "webauthn", - "zkid", + "oidb", ]; #[allow(clippy::enum_variant_names)] @@ -503,7 +503,7 @@ impl<'de> serde::Deserialize<'de> for AnySignature { Ed25519, Secp256k1Ecdsa, Webauthn, - Zkid, + Oidb, } impl<'de> serde::Deserialize<'de> for GeneratedField { fn deserialize(deserializer: D) -> std::result::Result @@ -530,7 +530,7 @@ impl<'de> serde::Deserialize<'de> for AnySignature { "ed25519" => Ok(GeneratedField::Ed25519), "secp256k1Ecdsa" | "secp256k1_ecdsa" => Ok(GeneratedField::Secp256k1Ecdsa), "webauthn" => Ok(GeneratedField::Webauthn), - "zkid" => Ok(GeneratedField::Zkid), + "oidb" => Ok(GeneratedField::Oidb), _ => Err(serde::de::Error::unknown_field(value, FIELDS)), } } @@ -590,11 +590,11 @@ impl<'de> serde::Deserialize<'de> for AnySignature { signature_variant__ = map.next_value::<::std::option::Option<_>>()?.map(any_signature::SignatureVariant::Webauthn) ; } - GeneratedField::Zkid => { + GeneratedField::Oidb => { if signature_variant__.is_some() { - return Err(serde::de::Error::duplicate_field("zkid")); + return Err(serde::de::Error::duplicate_field("oidb")); } - signature_variant__ = map.next_value::<::std::option::Option<_>>()?.map(any_signature::SignatureVariant::Zkid) + signature_variant__ = map.next_value::<::std::option::Option<_>>()?.map(any_signature::SignatureVariant::Oidb) ; } } @@ -620,7 +620,7 @@ impl serde::Serialize for any_signature::Type { Self::Ed25519 => "TYPE_ED25519", Self::Secp256k1Ecdsa => "TYPE_SECP256K1_ECDSA", Self::Webauthn => "TYPE_WEBAUTHN", - Self::Zkid => "TYPE_ZKID", + Self::Oidb => "TYPE_OIDB", }; serializer.serialize_str(variant) } @@ -636,7 +636,7 @@ impl<'de> serde::Deserialize<'de> for any_signature::Type { "TYPE_ED25519", "TYPE_SECP256K1_ECDSA", "TYPE_WEBAUTHN", - "TYPE_ZKID", + "TYPE_OIDB", ]; struct GeneratedVisitor; @@ -683,7 +683,7 @@ impl<'de> serde::Deserialize<'de> for any_signature::Type { "TYPE_ED25519" => Ok(any_signature::Type::Ed25519), "TYPE_SECP256K1_ECDSA" => Ok(any_signature::Type::Secp256k1Ecdsa), "TYPE_WEBAUTHN" => Ok(any_signature::Type::Webauthn), - "TYPE_ZKID" => Ok(any_signature::Type::Zkid), + "TYPE_OIDB" => Ok(any_signature::Type::Oidb), _ => Err(serde::de::Error::unknown_variant(value, FIELDS)), } } @@ -5323,6 +5323,99 @@ impl<'de> serde::Deserialize<'de> for multisig_transaction_payload::Type { deserializer.deserialize_any(GeneratedVisitor) } } +impl serde::Serialize for Oidb { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if !self.signature.is_empty() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("aptos.transaction.v1.Oidb", len)?; + if !self.signature.is_empty() { + struct_ser.serialize_field("signature", pbjson::private::base64::encode(&self.signature).as_str())?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for Oidb { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "signature", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + Signature, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "signature" => Ok(GeneratedField::Signature), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = Oidb; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct aptos.transaction.v1.Oidb") + } + + fn visit_map(self, mut map: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut signature__ = None; + while let Some(k) = map.next_key()? { + match k { + GeneratedField::Signature => { + if signature__.is_some() { + return Err(serde::de::Error::duplicate_field("signature")); + } + signature__ = + Some(map.next_value::<::pbjson::private::BytesDeserialize<_>>()?.0) + ; + } + } + } + Ok(Oidb { + signature: signature__.unwrap_or_default(), + }) + } + } + deserializer.deserialize_struct("aptos.transaction.v1.Oidb", FIELDS, GeneratedVisitor) + } +} impl serde::Serialize for ScriptPayload { #[allow(deprecated)] fn serialize(&self, serializer: S) -> std::result::Result @@ -8637,96 +8730,3 @@ impl<'de> serde::Deserialize<'de> for WriteTableItem { deserializer.deserialize_struct("aptos.transaction.v1.WriteTableItem", FIELDS, GeneratedVisitor) } } -impl serde::Serialize for ZkId { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.signature.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("aptos.transaction.v1.ZkId", len)?; - if !self.signature.is_empty() { - struct_ser.serialize_field("signature", pbjson::private::base64::encode(&self.signature).as_str())?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for ZkId { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "signature", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Signature, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "signature" => Ok(GeneratedField::Signature), - _ => Err(serde::de::Error::unknown_field(value, FIELDS)), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = ZkId; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct aptos.transaction.v1.ZkId") - } - - fn visit_map(self, mut map: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut signature__ = None; - while let Some(k) = map.next_key()? { - match k { - GeneratedField::Signature => { - if signature__.is_some() { - return Err(serde::de::Error::duplicate_field("signature")); - } - signature__ = - Some(map.next_value::<::pbjson::private::BytesDeserialize<_>>()?.0) - ; - } - } - } - Ok(ZkId { - signature: signature__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("aptos.transaction.v1.ZkId", FIELDS, GeneratedVisitor) - } -} diff --git a/protos/typescript/src/aptos/transaction/v1/transaction.ts b/protos/typescript/src/aptos/transaction/v1/transaction.ts index e3828d8e7152c..13a27b1d4afc3 100644 --- a/protos/typescript/src/aptos/transaction/v1/transaction.ts +++ b/protos/typescript/src/aptos/transaction/v1/transaction.ts @@ -872,7 +872,7 @@ export enum AnyPublicKey_Type { TYPE_ED25519 = 1, TYPE_SECP256K1_ECDSA = 2, TYPE_SECP256R1_ECDSA = 3, - TYPE_ZKID = 4, + TYPE_OIDB = 4, UNRECOGNIZED = -1, } @@ -891,8 +891,8 @@ export function anyPublicKey_TypeFromJSON(object: any): AnyPublicKey_Type { case "TYPE_SECP256R1_ECDSA": return AnyPublicKey_Type.TYPE_SECP256R1_ECDSA; case 4: - case "TYPE_ZKID": - return AnyPublicKey_Type.TYPE_ZKID; + case "TYPE_OIDB": + return AnyPublicKey_Type.TYPE_OIDB; case -1: case "UNRECOGNIZED": default: @@ -910,8 +910,8 @@ export function anyPublicKey_TypeToJSON(object: AnyPublicKey_Type): string { return "TYPE_SECP256K1_ECDSA"; case AnyPublicKey_Type.TYPE_SECP256R1_ECDSA: return "TYPE_SECP256R1_ECDSA"; - case AnyPublicKey_Type.TYPE_ZKID: - return "TYPE_ZKID"; + case AnyPublicKey_Type.TYPE_OIDB: + return "TYPE_OIDB"; case AnyPublicKey_Type.UNRECOGNIZED: default: return "UNRECOGNIZED"; @@ -932,7 +932,7 @@ export interface AnySignature { ed25519?: Ed25519 | undefined; secp256k1Ecdsa?: Secp256k1Ecdsa | undefined; webauthn?: WebAuthn | undefined; - zkid?: ZkId | undefined; + oidb?: Oidb | undefined; } export enum AnySignature_Type { @@ -940,7 +940,7 @@ export enum AnySignature_Type { TYPE_ED25519 = 1, TYPE_SECP256K1_ECDSA = 2, TYPE_WEBAUTHN = 3, - TYPE_ZKID = 4, + TYPE_OIDB = 4, UNRECOGNIZED = -1, } @@ -959,8 +959,8 @@ export function anySignature_TypeFromJSON(object: any): AnySignature_Type { case "TYPE_WEBAUTHN": return AnySignature_Type.TYPE_WEBAUTHN; case 4: - case "TYPE_ZKID": - return AnySignature_Type.TYPE_ZKID; + case "TYPE_OIDB": + return AnySignature_Type.TYPE_OIDB; case -1: case "UNRECOGNIZED": default: @@ -978,8 +978,8 @@ export function anySignature_TypeToJSON(object: AnySignature_Type): string { return "TYPE_SECP256K1_ECDSA"; case AnySignature_Type.TYPE_WEBAUTHN: return "TYPE_WEBAUTHN"; - case AnySignature_Type.TYPE_ZKID: - return "TYPE_ZKID"; + case AnySignature_Type.TYPE_OIDB: + return "TYPE_OIDB"; case AnySignature_Type.UNRECOGNIZED: default: return "UNRECOGNIZED"; @@ -998,7 +998,7 @@ export interface WebAuthn { signature?: Uint8Array | undefined; } -export interface ZkId { +export interface Oidb { signature?: Uint8Array | undefined; } @@ -7719,7 +7719,7 @@ function createBaseAnySignature(): AnySignature { ed25519: undefined, secp256k1Ecdsa: undefined, webauthn: undefined, - zkid: undefined, + oidb: undefined, }; } @@ -7740,8 +7740,8 @@ export const AnySignature = { if (message.webauthn !== undefined) { WebAuthn.encode(message.webauthn, writer.uint32(42).fork()).ldelim(); } - if (message.zkid !== undefined) { - ZkId.encode(message.zkid, writer.uint32(50).fork()).ldelim(); + if (message.oidb !== undefined) { + Oidb.encode(message.oidb, writer.uint32(50).fork()).ldelim(); } return writer; }, @@ -7793,7 +7793,7 @@ export const AnySignature = { break; } - message.zkid = ZkId.decode(reader, reader.uint32()); + message.oidb = Oidb.decode(reader, reader.uint32()); continue; } if ((tag & 7) === 4 || tag === 0) { @@ -7843,7 +7843,7 @@ export const AnySignature = { ed25519: isSet(object.ed25519) ? Ed25519.fromJSON(object.ed25519) : undefined, secp256k1Ecdsa: isSet(object.secp256k1Ecdsa) ? Secp256k1Ecdsa.fromJSON(object.secp256k1Ecdsa) : undefined, webauthn: isSet(object.webauthn) ? WebAuthn.fromJSON(object.webauthn) : undefined, - zkid: isSet(object.zkid) ? ZkId.fromJSON(object.zkid) : undefined, + oidb: isSet(object.oidb) ? Oidb.fromJSON(object.oidb) : undefined, }; }, @@ -7864,8 +7864,8 @@ export const AnySignature = { if (message.webauthn !== undefined) { obj.webauthn = WebAuthn.toJSON(message.webauthn); } - if (message.zkid !== undefined) { - obj.zkid = ZkId.toJSON(message.zkid); + if (message.oidb !== undefined) { + obj.oidb = Oidb.toJSON(message.oidb); } return obj; }, @@ -7886,7 +7886,7 @@ export const AnySignature = { message.webauthn = (object.webauthn !== undefined && object.webauthn !== null) ? WebAuthn.fromPartial(object.webauthn) : undefined; - message.zkid = (object.zkid !== undefined && object.zkid !== null) ? ZkId.fromPartial(object.zkid) : undefined; + message.oidb = (object.oidb !== undefined && object.oidb !== null) ? Oidb.fromPartial(object.oidb) : undefined; return message; }, }; @@ -8158,22 +8158,22 @@ export const WebAuthn = { }, }; -function createBaseZkId(): ZkId { +function createBaseOidb(): Oidb { return { signature: new Uint8Array(0) }; } -export const ZkId = { - encode(message: ZkId, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { +export const Oidb = { + encode(message: Oidb, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { if (message.signature !== undefined && message.signature.length !== 0) { writer.uint32(10).bytes(message.signature); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): ZkId { + decode(input: _m0.Reader | Uint8Array, length?: number): Oidb { const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseZkId(); + const message = createBaseOidb(); while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -8194,40 +8194,40 @@ export const ZkId = { }, // encodeTransform encodes a source of message objects. - // Transform - async *encodeTransform(source: AsyncIterable | Iterable): AsyncIterable { + // Transform + async *encodeTransform(source: AsyncIterable | Iterable): AsyncIterable { for await (const pkt of source) { if (globalThis.Array.isArray(pkt)) { for (const p of (pkt as any)) { - yield* [ZkId.encode(p).finish()]; + yield* [Oidb.encode(p).finish()]; } } else { - yield* [ZkId.encode(pkt as any).finish()]; + yield* [Oidb.encode(pkt as any).finish()]; } } }, // decodeTransform decodes a source of encoded messages. - // Transform + // Transform async *decodeTransform( source: AsyncIterable | Iterable, - ): AsyncIterable { + ): AsyncIterable { for await (const pkt of source) { if (globalThis.Array.isArray(pkt)) { for (const p of (pkt as any)) { - yield* [ZkId.decode(p)]; + yield* [Oidb.decode(p)]; } } else { - yield* [ZkId.decode(pkt as any)]; + yield* [Oidb.decode(pkt as any)]; } } }, - fromJSON(object: any): ZkId { + fromJSON(object: any): Oidb { return { signature: isSet(object.signature) ? bytesFromBase64(object.signature) : new Uint8Array(0) }; }, - toJSON(message: ZkId): unknown { + toJSON(message: Oidb): unknown { const obj: any = {}; if (message.signature !== undefined && message.signature.length !== 0) { obj.signature = base64FromBytes(message.signature); @@ -8235,11 +8235,11 @@ export const ZkId = { return obj; }, - create(base?: DeepPartial): ZkId { - return ZkId.fromPartial(base ?? {}); + create(base?: DeepPartial): Oidb { + return Oidb.fromPartial(base ?? {}); }, - fromPartial(object: DeepPartial): ZkId { - const message = createBaseZkId(); + fromPartial(object: DeepPartial): Oidb { + const message = createBaseOidb(); message.signature = object.signature ?? new Uint8Array(0); return message; }, diff --git a/state-sync/data-streaming-service/src/data_notification.rs b/state-sync/data-streaming-service/src/data_notification.rs index df217ba9c442c..f3ea476086d69 100644 --- a/state-sync/data-streaming-service/src/data_notification.rs +++ b/state-sync/data-streaming-service/src/data_notification.rs @@ -88,6 +88,11 @@ impl DataClientRequest { } } + /// Returns true iff the request is a new data request + pub fn is_new_data_request(&self) -> bool { + self.is_optimistic_fetch_request() || self.is_subscription_request() + } + /// Returns true iff the request is an optimistic fetch request pub fn is_optimistic_fetch_request(&self) -> bool { matches!(self, DataClientRequest::NewTransactionsWithProof(_)) diff --git a/state-sync/data-streaming-service/src/data_stream.rs b/state-sync/data-streaming-service/src/data_stream.rs index d4c7e77c46f68..abf6966fac71a 100644 --- a/state-sync/data-streaming-service/src/data_stream.rs +++ b/state-sync/data-streaming-service/src/data_stream.rs @@ -13,6 +13,7 @@ use crate::{ TransactionOutputsWithProofRequest, TransactionsOrOutputsWithProofRequest, TransactionsWithProofRequest, }, + dynamic_prefetching::DynamicPrefetchingState, error::Error, logging::{LogEntry, LogEvent, LogSchema}, metrics, @@ -119,6 +120,9 @@ pub struct DataStream { // The time service to track elapsed time (e.g., during stream lag checks) time_service: TimeService, + + // The dynamic prefetching state (if enabled) + dynamic_prefetching_state: DynamicPrefetchingState, } impl DataStream { @@ -141,6 +145,10 @@ impl DataStream { // Create a new stream engine let stream_engine = StreamEngine::new(data_stream_config, stream_request, advertised_data)?; + // Create the dynamic prefetching state + let dynamic_prefetching_state = + DynamicPrefetchingState::new(data_stream_config, time_service.clone()); + // Create a new data stream let data_stream = Self { data_client_config, @@ -159,6 +167,7 @@ impl DataStream { send_failure: false, subscription_stream_lag: None, time_service, + dynamic_prefetching_state, }; Ok((data_stream, data_stream_listener)) @@ -255,17 +264,6 @@ impl DataStream { Ok(()) } - /// Returns the maximum number of concurrent requests that can be executing - /// at any given time. - fn get_max_concurrent_requests(&self) -> u64 { - match self.stream_engine { - StreamEngine::StateStreamEngine(_) => { - self.streaming_service_config.max_concurrent_state_requests - }, - _ => self.streaming_service_config.max_concurrent_requests, - } - } - /// Creates and sends a batch of aptos data client requests to the network fn create_and_send_client_requests( &mut self, @@ -285,7 +283,8 @@ impl DataStream { // Otherwise, calculate the max number of requests to send based on // the max concurrent requests and the number of pending request slots. let remaining_concurrent_requests = self - .get_max_concurrent_requests() + .dynamic_prefetching_state + .get_max_concurrent_requests(&self.stream_engine) .saturating_sub(num_in_flight_requests); let remaining_request_slots = max_pending_requests.saturating_sub(num_pending_requests); min(remaining_concurrent_requests, remaining_request_slots) @@ -394,8 +393,11 @@ impl DataStream { pending_client_response } - // TODO(joshlind): this function shouldn't be blocking when trying to send! If there are - // multiple streams, a single blocked stream could cause them all to block. + // TODO(joshlind): this function shouldn't be blocking when trying to send. + // If there are multiple streams, a single blocked stream could cause them + // all to block. This is acceptable for now (because there is only ever + // a single stream in use by the driver) but it should be fixed if we want + // to generalize this for multiple streams. async fn send_data_notification( &mut self, data_notification: DataNotification, @@ -453,85 +455,89 @@ impl DataStream { return Ok(()); // There's nothing left to do } - // Process any ready data responses - for _ in 0..self.get_num_pending_data_requests()? { - if let Some(pending_response) = self.pop_pending_response_queue()? { - // Get the client request and response information - let maybe_client_response = pending_response.lock().client_response.take(); - let client_response = maybe_client_response.ok_or_else(|| { - Error::UnexpectedErrorEncountered("The client response should be ready!".into()) - })?; - let client_request = &pending_response.lock().client_request.clone(); - - // Process the client response - match client_response { - Ok(client_response) => { - // Sanity check and process the response - if sanity_check_client_response_type(client_request, &client_response) { - // The response is valid, send the data notification to the client - let client_response_payload = client_response.payload.clone(); - self.send_data_notification_to_client(client_request, client_response) - .await?; - - // If the response wasn't enough to satisfy the original request (e.g., - // it was truncated), missing data should be requested. - match self - .request_missing_data(client_request, &client_response_payload) - { - Ok(missing_data_requested) => { - if missing_data_requested { - break; // We're now head of line blocked on the missing data - } - }, - Err(error) => { - warn!(LogSchema::new(LogEntry::ReceivedDataResponse) - .stream_id(self.data_stream_id) - .event(LogEvent::Error) - .error(&error) - .message( - "Failed to determine if missing data was requested!" - )); - }, - } - - // If the request was a subscription request and the subscription - // stream is lagging behind the data advertisements, the stream - // engine should be notified (e.g., so that it can catch up). - if client_request.is_subscription_request() { - if let Err(error) = self.check_subscription_stream_lag( - &global_data_summary, - &client_response_payload, - ) { - self.notify_new_data_request_error(client_request, error)?; - break; // We're now head of line blocked on the failed stream + // Continuously process any ready data responses + while let Some(pending_response) = self.pop_pending_response_queue()? { + // Get the client request and response information + let maybe_client_response = pending_response.lock().client_response.take(); + let client_response = maybe_client_response.ok_or_else(|| { + Error::UnexpectedErrorEncountered("The client response should be ready!".into()) + })?; + let client_request = &pending_response.lock().client_request.clone(); + + // Process the client response + match client_response { + Ok(client_response) => { + // Sanity check and process the response + if sanity_check_client_response_type(client_request, &client_response) { + // If the response wasn't enough to satisfy the original request (e.g., + // it was truncated), missing data should be requested. + let mut head_of_line_blocked = false; + match self.request_missing_data(client_request, &client_response.payload) { + Ok(missing_data_requested) => { + if missing_data_requested { + head_of_line_blocked = true; // We're now head of line blocked on the missing data } + }, + Err(error) => { + warn!(LogSchema::new(LogEntry::ReceivedDataResponse) + .stream_id(self.data_stream_id) + .event(LogEvent::Error) + .error(&error) + .message("Failed to determine if missing data was requested!")); + }, + } + + // If the request was a subscription request and the subscription + // stream is lagging behind the data advertisements, the stream + // engine should be notified (e.g., so that it can catch up). + if client_request.is_subscription_request() { + if let Err(error) = self.check_subscription_stream_lag( + &global_data_summary, + &client_response.payload, + ) { + self.notify_new_data_request_error(client_request, error)?; + head_of_line_blocked = true; // We're now head of line blocked on the failed stream } - } else { - // The sanity check failed - self.handle_sanity_check_failure( - client_request, - &client_response.context, - )?; - break; // We're now head of line blocked on the failed request } - }, - Err(error) => { - // Handle the error depending on the request type - if client_request.is_subscription_request() - || client_request.is_optimistic_fetch_request() - { - // The request was for new data. We should notify the - // stream engine and clear the requests queue. - self.notify_new_data_request_error(client_request, error)?; - } else { - // Otherwise, we should handle the error and simply retry - self.handle_data_client_error(client_request, &error)?; + + // The response is valid, send the data notification to the client + self.send_data_notification_to_client(client_request, client_response) + .await?; + + // If the request is for specific data, increase the prefetching limit. + // Note: we don't increase the limit for new data requests because + // those don't invoke the prefetcher (as we're already up-to-date). + if !client_request.is_new_data_request() { + self.dynamic_prefetching_state + .increase_max_concurrent_requests(); } + + // If we're head of line blocked, we should return early + if head_of_line_blocked { + break; + } + } else { + // The sanity check failed + self.handle_sanity_check_failure(client_request, &client_response.context)?; break; // We're now head of line blocked on the failed request - }, - } - } else { - break; // The first response hasn't arrived yet + } + }, + Err(error) => { + // Handle the error depending on the request type + if client_request.is_new_data_request() { + // The request was for new data. We should notify the + // stream engine and clear the requests queue. + self.notify_new_data_request_error(client_request, error)?; + } else { + // Decrease the prefetching limit on an error + self.dynamic_prefetching_state + .decrease_max_concurrent_requests(); + + // Handle the error and simply retry + self.handle_data_client_error(client_request, &error)?; + } + break; // We're now head of line blocked on the failed request + }, } } @@ -705,6 +711,7 @@ impl DataStream { data_client_request: &DataClientRequest, data_client_error: &aptos_data_client::error::Error, ) -> Result<(), Error> { + // Log the error warn!(LogSchema::new(LogEntry::ReceivedDataResponse) .stream_id(self.data_stream_id) .event(LogEvent::Error) diff --git a/state-sync/data-streaming-service/src/dynamic_prefetching.rs b/state-sync/data-streaming-service/src/dynamic_prefetching.rs new file mode 100644 index 0000000000000..374025a282134 --- /dev/null +++ b/state-sync/data-streaming-service/src/dynamic_prefetching.rs @@ -0,0 +1,740 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{metrics, stream_engine::StreamEngine}; +use aptos_config::config::{DataStreamingServiceConfig, DynamicPrefetchingConfig}; +use aptos_time_service::{TimeService, TimeServiceTrait}; +use std::{ + cmp::{max, min}, + time::{Duration, Instant}, +}; + +/// A simple container for the dynamic prefetching state +#[derive(Debug)] +pub struct DynamicPrefetchingState { + // The data streaming service config + streaming_service_config: DataStreamingServiceConfig, + + // The instant the last timeout occurred (if any) + last_timeout_instant: Option, + + // The maximum number of concurrent requests that can be executing at any given time + max_dynamic_concurrent_requests: u64, + + // The time service to track elapsed time (e.g., during stream lag checks) + time_service: TimeService, +} + +impl DynamicPrefetchingState { + pub fn new( + data_streaming_service_config: DataStreamingServiceConfig, + time_service: TimeService, + ) -> Self { + // Get the initial prefetching value from the config + let max_dynamic_concurrent_requests = data_streaming_service_config + .dynamic_prefetching + .initial_prefetching_value; + + // Create and return the new dynamic prefetching state + Self { + streaming_service_config: data_streaming_service_config, + last_timeout_instant: None, + max_dynamic_concurrent_requests, + time_service, + } + } + + /// A simple helper function that returns the dynamic prefetching config + fn get_dynamic_prefetching_config(&self) -> &DynamicPrefetchingConfig { + &self.streaming_service_config.dynamic_prefetching + } + + /// Returns true iff dynamic prefetching is enabled + fn is_dynamic_prefetching_enabled(&self) -> bool { + self.get_dynamic_prefetching_config() + .enable_dynamic_prefetching + } + + /// Returns true iff the prefetching value is currently frozen (i.e., + /// to avoid overly increasing the value near saturation). Freezing + /// occurs after a timeout and lasts for a configured duration. + fn is_prefetching_value_frozen(&self) -> bool { + match self.last_timeout_instant { + Some(last_failure_time) => { + // Get the time since the last failure and max freeze duration + let time_since_last_failure = + self.time_service.now().duration_since(last_failure_time); + let max_freeze_duration = Duration::from_secs( + self.get_dynamic_prefetching_config() + .timeout_freeze_duration_secs, + ); + + // Check if the time since the last failure is less than the freeze duration + time_since_last_failure < max_freeze_duration + }, + None => false, // No failures have occurred + } + } + + /// Returns the number of maximum concurrent requests that can be executing + /// at any given time. Depending on if dynamic prefetching is enabled, this + /// value will be dynamic or static (i.e., config defined). + pub fn get_max_concurrent_requests(&self, stream_engine: &StreamEngine) -> u64 { + // If dynamic prefetching is disabled, use the static values defined + // in the config. Otherwise get the current dynamic max value. + let max_concurrent_requests = if !self.is_dynamic_prefetching_enabled() { + match stream_engine { + StreamEngine::StateStreamEngine(_) => { + // Use the configured max for state value requests + self.streaming_service_config.max_concurrent_state_requests + }, + _ => { + // Use the configured max for all other requests + self.streaming_service_config.max_concurrent_requests + }, + } + } else { + // Otherwise, return the current max value + self.max_dynamic_concurrent_requests + }; + + // Update the metrics for the max concurrent requests + metrics::set_max_concurrent_requests(max_concurrent_requests); + + max_concurrent_requests + } + + /// Increases the maximum number of concurrent requests that should be executing. + /// This is typically called after a successful response is received. + pub fn increase_max_concurrent_requests(&mut self) { + // If dynamic prefetching is disabled, or the value is currently frozen, do nothing + if !self.is_dynamic_prefetching_enabled() || self.is_prefetching_value_frozen() { + return; + } + + // Otherwise, get and increase the current max + let dynamic_prefetching_config = self.get_dynamic_prefetching_config(); + let amount_to_increase = dynamic_prefetching_config.prefetching_value_increase; + let max_dynamic_concurrent_requests = self + .max_dynamic_concurrent_requests + .saturating_add(amount_to_increase); + + // Bound the value by the configured maximum + let max_prefetching_value = dynamic_prefetching_config.max_prefetching_value; + self.max_dynamic_concurrent_requests = + min(max_dynamic_concurrent_requests, max_prefetching_value); + } + + /// Decreases the maximum number of concurrent requests that should be executing. + /// This is typically called after a timeout is received. + pub fn decrease_max_concurrent_requests(&mut self) { + // If dynamic prefetching is disabled, do nothing + if !self.is_dynamic_prefetching_enabled() { + return; + } + + // Update the last failure time + self.last_timeout_instant = Some(self.time_service.now()); + + // Otherwise, get and decrease the current max + let dynamic_prefetching_config = self.get_dynamic_prefetching_config(); + let amount_to_decrease = dynamic_prefetching_config.prefetching_value_decrease; + let max_dynamic_concurrent_requests = self + .max_dynamic_concurrent_requests + .saturating_sub(amount_to_decrease); + + // Bound the value by the configured minimum + let min_prefetching_value = dynamic_prefetching_config.min_prefetching_value; + self.max_dynamic_concurrent_requests = + max(max_dynamic_concurrent_requests, min_prefetching_value); + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::streaming_client::{ + GetAllStatesRequest, GetAllTransactionsOrOutputsRequest, StreamRequest, + }; + use aptos_data_client::global_summary::AdvertisedData; + + #[test] + fn test_initialize_prefetching_state() { + // Create a data streaming service config with dynamic prefetching enabled + let initial_prefetching_value = 5; + let dynamic_prefetching_config = DynamicPrefetchingConfig { + enable_dynamic_prefetching: true, + initial_prefetching_value, + ..Default::default() + }; + let data_streaming_service_config = DataStreamingServiceConfig { + dynamic_prefetching: dynamic_prefetching_config, + ..Default::default() + }; + + // Create a new dynamic prefetching state + let dynamic_prefetching_state = + DynamicPrefetchingState::new(data_streaming_service_config, TimeService::mock()); + + // Verify that the state was initialized correctly + assert_eq!( + dynamic_prefetching_state.streaming_service_config, + data_streaming_service_config + ); + assert_eq!(dynamic_prefetching_state.last_timeout_instant, None); + assert_eq!( + dynamic_prefetching_state.max_dynamic_concurrent_requests, + initial_prefetching_value + ); + } + + #[test] + fn test_is_dynamic_prefetching_enabled() { + // Create a data streaming service config with dynamic prefetching enabled + let dynamic_prefetching_config = DynamicPrefetchingConfig { + enable_dynamic_prefetching: true, + ..Default::default() + }; + let data_streaming_service_config = DataStreamingServiceConfig { + dynamic_prefetching: dynamic_prefetching_config, + ..Default::default() + }; + + // Create a new dynamic prefetching state + let dynamic_prefetching_state = + DynamicPrefetchingState::new(data_streaming_service_config, TimeService::mock()); + + // Verify that dynamic prefetching is enabled + assert!(dynamic_prefetching_state.is_dynamic_prefetching_enabled()); + + // Create a data streaming service config with dynamic prefetching disabled + let dynamic_prefetching_config = DynamicPrefetchingConfig { + enable_dynamic_prefetching: false, + ..Default::default() + }; + let data_streaming_service_config = DataStreamingServiceConfig { + dynamic_prefetching: dynamic_prefetching_config, + ..Default::default() + }; + + // Create a new dynamic prefetching state + let dynamic_prefetching_state = + DynamicPrefetchingState::new(data_streaming_service_config, TimeService::mock()); + + // Verify that dynamic prefetching is disabled + assert!(!dynamic_prefetching_state.is_dynamic_prefetching_enabled()); + } + + #[test] + fn test_is_prefetching_value_frozen() { + // Create a data streaming service config with dynamic prefetching enabled + let timeout_freeze_duration_secs = 10; + let dynamic_prefetching_config = DynamicPrefetchingConfig { + enable_dynamic_prefetching: true, + timeout_freeze_duration_secs, + ..Default::default() + }; + let data_streaming_service_config = DataStreamingServiceConfig { + dynamic_prefetching: dynamic_prefetching_config, + ..Default::default() + }; + + // Create a new dynamic prefetching state + let time_service = TimeService::mock(); + let mut dynamic_prefetching_state = + DynamicPrefetchingState::new(data_streaming_service_config, time_service.clone()); + + // Verify that the prefetching value is not frozen initially + assert!(!dynamic_prefetching_state.is_prefetching_value_frozen()); + + // Update the prefetcher state to simulate a timeout + dynamic_prefetching_state.decrease_max_concurrent_requests(); + + // Verify that the prefetching value is frozen + assert!(dynamic_prefetching_state.is_prefetching_value_frozen()); + + // Elapse less time than the freeze duration + let time_service = time_service.into_mock(); + time_service.advance_secs(timeout_freeze_duration_secs - 1); + + // Verify that the prefetching value is still frozen + assert!(dynamic_prefetching_state.is_prefetching_value_frozen()); + + // Elapse more time than the freeze duration + time_service.advance_secs(timeout_freeze_duration_secs + 1); + + // Verify that the prefetching value is no longer frozen + assert!(!dynamic_prefetching_state.is_prefetching_value_frozen()); + } + + #[test] + fn test_get_max_concurrent_requests_disabled() { + // Create a data streaming service config with dynamic prefetching disabled + let max_concurrent_requests = 10; + let dynamic_prefetching_config = DynamicPrefetchingConfig { + enable_dynamic_prefetching: false, + ..Default::default() + }; + let data_streaming_service_config = DataStreamingServiceConfig { + max_concurrent_requests, + dynamic_prefetching: dynamic_prefetching_config, + ..Default::default() + }; + + // Create a new dynamic prefetching state + let mut dynamic_prefetching_state = + DynamicPrefetchingState::new(data_streaming_service_config, TimeService::mock()); + + // Verify that dynamic prefetching is disabled + assert!(!dynamic_prefetching_state.is_dynamic_prefetching_enabled()); + + // Create a stream engine for transactions or outputs + let stream_engine = + create_transactions_or_outputs_stream_engine(data_streaming_service_config); + + // Verify that the max concurrent requests is the static config value + verify_max_concurrent_requests( + &mut dynamic_prefetching_state, + &stream_engine, + max_concurrent_requests, + ); + + // Increase the max concurrent requests several times and verify the value + for _ in 0..10 { + // Increase the max concurrent requests + dynamic_prefetching_state.increase_max_concurrent_requests(); + + // Verify that the max concurrent requests is still the static config value + verify_max_concurrent_requests( + &mut dynamic_prefetching_state, + &stream_engine, + max_concurrent_requests, + ); + } + + // Decrease the max concurrent requests several times and verify the value + for _ in 0..10 { + // Decrease the max concurrent requests + dynamic_prefetching_state.decrease_max_concurrent_requests(); + + // Verify that the max concurrent requests is still the static config value + verify_max_concurrent_requests( + &mut dynamic_prefetching_state, + &stream_engine, + max_concurrent_requests, + ); + } + } + + #[test] + fn test_get_max_concurrent_state_requests_disabled() { + // Create a data streaming service config with dynamic prefetching disabled + let max_concurrent_state_requests = 5; + let dynamic_prefetching_config = DynamicPrefetchingConfig { + enable_dynamic_prefetching: false, + ..Default::default() + }; + let data_streaming_service_config = DataStreamingServiceConfig { + max_concurrent_state_requests, + dynamic_prefetching: dynamic_prefetching_config, + ..Default::default() + }; + + // Create a new dynamic prefetching state + let mut dynamic_prefetching_state = + DynamicPrefetchingState::new(data_streaming_service_config, TimeService::mock()); + + // Verify that dynamic prefetching is disabled + assert!(!dynamic_prefetching_state.is_dynamic_prefetching_enabled()); + + // Create a stream engine for states + let stream_engine = create_state_stream_engine(data_streaming_service_config); + + // Verify that the max concurrent state requests is the static config value + verify_max_concurrent_requests( + &mut dynamic_prefetching_state, + &stream_engine, + max_concurrent_state_requests, + ); + + // Increase the max concurrent requests several times and verify the value + for _ in 0..10 { + // Increase the max concurrent requests + dynamic_prefetching_state.increase_max_concurrent_requests(); + + // Verify that the max concurrent requests is still the static config value + verify_max_concurrent_requests( + &mut dynamic_prefetching_state, + &stream_engine, + max_concurrent_state_requests, + ); + } + + // Decrease the max concurrent requests several times + for _ in 0..10 { + // Decrease the max concurrent requests + dynamic_prefetching_state.decrease_max_concurrent_requests(); + + // Verify that the max concurrent requests is still the static config value + verify_max_concurrent_requests( + &mut dynamic_prefetching_state, + &stream_engine, + max_concurrent_state_requests, + ); + } + } + + #[test] + fn test_get_max_concurrent_requests() { + // Create a data streaming service config with dynamic prefetching enabled + let initial_prefetching_value = 5; + let prefetching_value_increase = 1; + let prefetching_value_decrease = 2; + let dynamic_prefetching_config = DynamicPrefetchingConfig { + enable_dynamic_prefetching: true, + initial_prefetching_value, + prefetching_value_increase, + prefetching_value_decrease, + ..Default::default() + }; + let data_streaming_service_config = DataStreamingServiceConfig { + dynamic_prefetching: dynamic_prefetching_config, + ..Default::default() + }; + + // Create a new dynamic prefetching state + let mut dynamic_prefetching_state = + DynamicPrefetchingState::new(data_streaming_service_config, TimeService::mock()); + + // Create a stream engine for transactions or outputs + let stream_engine = + create_transactions_or_outputs_stream_engine(data_streaming_service_config); + + // Verify that the max concurrent requests is the initial prefetching value + verify_max_concurrent_requests( + &mut dynamic_prefetching_state, + &stream_engine, + initial_prefetching_value, + ); + + // Increase the max concurrent requests several times and verify the value + let mut expected_max_requests = initial_prefetching_value; + for _ in 0..10 { + // Increase the max concurrent requests + dynamic_prefetching_state.increase_max_concurrent_requests(); + + // Verify that the max concurrent requests has increased correctly + expected_max_requests += prefetching_value_increase; + verify_max_concurrent_requests( + &mut dynamic_prefetching_state, + &stream_engine, + expected_max_requests, + ); + } + + // Decrease the max concurrent requests several times and verify the value + for _ in 0..3 { + // Decrease the max concurrent requests + dynamic_prefetching_state.decrease_max_concurrent_requests(); + + // Verify that the max concurrent requests has decreased correctly + expected_max_requests -= prefetching_value_decrease; + verify_max_concurrent_requests( + &mut dynamic_prefetching_state, + &stream_engine, + expected_max_requests, + ); + } + } + + #[test] + fn test_get_max_concurrent_requests_max_value() { + // Create a data streaming service config with dynamic prefetching enabled + let initial_prefetching_value = 10; + let prefetching_value_increase = 2; + let max_prefetching_value = 30; + let dynamic_prefetching_config = DynamicPrefetchingConfig { + enable_dynamic_prefetching: true, + initial_prefetching_value, + prefetching_value_increase, + max_prefetching_value, + ..Default::default() + }; + let data_streaming_service_config = DataStreamingServiceConfig { + dynamic_prefetching: dynamic_prefetching_config, + ..Default::default() + }; + + // Create a new dynamic prefetching state + let mut dynamic_prefetching_state = + DynamicPrefetchingState::new(data_streaming_service_config, TimeService::mock()); + + // Create a stream engine for states + let stream_engine = create_state_stream_engine(data_streaming_service_config); + + // Verify that the max concurrent requests is the initial prefetching value + verify_max_concurrent_requests( + &mut dynamic_prefetching_state, + &stream_engine, + initial_prefetching_value, + ); + + // Increase the max concurrent requests several times and verify the value + let mut expected_max_requests = initial_prefetching_value; + for _ in 0..10 { + // Increase the max concurrent requests + dynamic_prefetching_state.increase_max_concurrent_requests(); + + // Verify that the max concurrent requests has increased correctly + expected_max_requests += prefetching_value_increase; + verify_max_concurrent_requests( + &mut dynamic_prefetching_state, + &stream_engine, + expected_max_requests, + ); + } + + // Increase the max concurrent requests many more times and verify the value + for _ in 0..100 { + // Increase the max concurrent requests + dynamic_prefetching_state.increase_max_concurrent_requests(); + + // Verify that the max concurrent requests has increased to the max value + verify_max_concurrent_requests( + &mut dynamic_prefetching_state, + &stream_engine, + max_prefetching_value, + ); + } + } + + #[test] + fn test_get_max_concurrent_requests_min_value() { + // Create a data streaming service config with dynamic prefetching enabled + let initial_prefetching_value = 20; + let prefetching_value_decrease = 1; + let min_prefetching_value = 2; + let dynamic_prefetching_config = DynamicPrefetchingConfig { + enable_dynamic_prefetching: true, + initial_prefetching_value, + prefetching_value_decrease, + min_prefetching_value, + ..Default::default() + }; + let data_streaming_service_config = DataStreamingServiceConfig { + dynamic_prefetching: dynamic_prefetching_config, + ..Default::default() + }; + + // Create a new dynamic prefetching state + let mut dynamic_prefetching_state = + DynamicPrefetchingState::new(data_streaming_service_config, TimeService::mock()); + + // Create a stream engine for transactions or outputs + let stream_engine = + create_transactions_or_outputs_stream_engine(data_streaming_service_config); + + // Verify that the max concurrent requests is the initial prefetching value + verify_max_concurrent_requests( + &mut dynamic_prefetching_state, + &stream_engine, + initial_prefetching_value, + ); + + // Decrease the max concurrent requests several times and verify the value + let mut expected_max_requests = initial_prefetching_value; + for _ in 0..18 { + // Decrease the max concurrent requests + dynamic_prefetching_state.decrease_max_concurrent_requests(); + + // Verify that the max concurrent requests has decreased correctly + expected_max_requests -= prefetching_value_decrease; + verify_max_concurrent_requests( + &mut dynamic_prefetching_state, + &stream_engine, + expected_max_requests, + ); + } + + // Decrease the max concurrent requests many more times and verify the value + for _ in 0..100 { + // Decrease the max concurrent requests + dynamic_prefetching_state.decrease_max_concurrent_requests(); + + // Verify that the max concurrent requests has decreased to the min value + verify_max_concurrent_requests( + &mut dynamic_prefetching_state, + &stream_engine, + min_prefetching_value, + ); + } + } + + #[test] + fn test_prefetching_value_frozen() { + // Create a data streaming service config with dynamic prefetching enabled + let initial_prefetching_value = 5; + let prefetching_value_increase = 1; + let prefetching_value_decrease = 2; + let timeout_freeze_duration_secs = 10; + let dynamic_prefetching_config = DynamicPrefetchingConfig { + enable_dynamic_prefetching: true, + initial_prefetching_value, + prefetching_value_increase, + prefetching_value_decrease, + timeout_freeze_duration_secs, + ..Default::default() + }; + let data_streaming_service_config = DataStreamingServiceConfig { + dynamic_prefetching: dynamic_prefetching_config, + ..Default::default() + }; + + // Create a new dynamic prefetching state + let time_service = TimeService::mock(); + let mut dynamic_prefetching_state = + DynamicPrefetchingState::new(data_streaming_service_config, time_service.clone()); + + // Create a stream engine for transactions or outputs + let stream_engine = + create_transactions_or_outputs_stream_engine(data_streaming_service_config); + + // Increase the max concurrent requests several times and verify the value + let mut expected_max_requests = initial_prefetching_value; + for _ in 0..10 { + // Increase the max concurrent requests + dynamic_prefetching_state.increase_max_concurrent_requests(); + + // Verify that the max concurrent requests has increased correctly + expected_max_requests += prefetching_value_increase; + verify_max_concurrent_requests( + &mut dynamic_prefetching_state, + &stream_engine, + expected_max_requests, + ); + } + + // Update the prefetcher state to simulate a timeout and verify the value is frozen + dynamic_prefetching_state.decrease_max_concurrent_requests(); + assert!(dynamic_prefetching_state.is_prefetching_value_frozen()); + + // Verify that the max concurrent requests has decreased correctly + let mut expected_max_requests = expected_max_requests - prefetching_value_decrease; + verify_max_concurrent_requests( + &mut dynamic_prefetching_state, + &stream_engine, + expected_max_requests, + ); + + // Increase the max concurrent requests several more times + for _ in 0..100 { + // Increase the max concurrent requests + dynamic_prefetching_state.increase_max_concurrent_requests(); + + // Verify that the max concurrent requests has not changed (the value is frozen) + verify_max_concurrent_requests( + &mut dynamic_prefetching_state, + &stream_engine, + expected_max_requests, + ); + } + + // Elapse less time than the freeze duration + let time_service = time_service.into_mock(); + time_service.advance_secs(timeout_freeze_duration_secs - 1); + + // Increase the max concurrent requests and verify the prefetching value is still frozen + dynamic_prefetching_state.increase_max_concurrent_requests(); + assert!(dynamic_prefetching_state.is_prefetching_value_frozen()); + verify_max_concurrent_requests( + &mut dynamic_prefetching_state, + &stream_engine, + expected_max_requests, + ); + + // Decrease the max concurrent requests several times + for _ in 0..3 { + // Decrease the max concurrent requests + dynamic_prefetching_state.decrease_max_concurrent_requests(); + + // Verify that the max concurrent requests has decreased correctly + expected_max_requests -= prefetching_value_decrease; + verify_max_concurrent_requests( + &mut dynamic_prefetching_state, + &stream_engine, + expected_max_requests, + ); + } + + // Elapse more time than the freeze duration and verify the value is not frozen + time_service.advance_secs(timeout_freeze_duration_secs + 1); + assert!(!dynamic_prefetching_state.is_prefetching_value_frozen()); + + // Increase the max concurrent requests several times and verify the value is not frozen + for _ in 0..10 { + // Increase the max concurrent requests + dynamic_prefetching_state.increase_max_concurrent_requests(); + + // Verify that the max concurrent requests has increased correctly + expected_max_requests += prefetching_value_increase; + verify_max_concurrent_requests( + &mut dynamic_prefetching_state, + &stream_engine, + expected_max_requests, + ); + } + } + + /// Creates a stream engine for states + fn create_state_stream_engine( + data_streaming_service_config: DataStreamingServiceConfig, + ) -> StreamEngine { + // Create the stream request for states + let stream_request = StreamRequest::GetAllStates(GetAllStatesRequest { + version: 0, + start_index: 0, + }); + + // Create and return the stream engine + StreamEngine::new( + data_streaming_service_config, + &stream_request, + &AdvertisedData::empty(), + ) + .unwrap() + } + + /// Creates a stream engine for transactions or outputs + fn create_transactions_or_outputs_stream_engine( + data_streaming_service_config: DataStreamingServiceConfig, + ) -> StreamEngine { + // Create the stream request for transactions or outputs + let stream_request = + StreamRequest::GetAllTransactionsOrOutputs(GetAllTransactionsOrOutputsRequest { + start_version: 0, + end_version: 100_000, + proof_version: 100_000, + include_events: true, + }); + + // Create and return the stream engine + StreamEngine::new( + data_streaming_service_config, + &stream_request, + &AdvertisedData::empty(), + ) + .unwrap() + } + + /// Verifies that the max concurrent requests is the expected value + fn verify_max_concurrent_requests( + dynamic_prefetching_state: &mut DynamicPrefetchingState, + stream_engine: &StreamEngine, + expected_max_requests: u64, + ) { + assert_eq!( + dynamic_prefetching_state.get_max_concurrent_requests(stream_engine), + expected_max_requests + ); + } +} diff --git a/state-sync/data-streaming-service/src/lib.rs b/state-sync/data-streaming-service/src/lib.rs index 81b9de837a672..1f04b89a09a58 100644 --- a/state-sync/data-streaming-service/src/lib.rs +++ b/state-sync/data-streaming-service/src/lib.rs @@ -6,6 +6,7 @@ pub mod data_notification; pub mod data_stream; +mod dynamic_prefetching; pub mod error; mod logging; mod metrics; diff --git a/state-sync/data-streaming-service/src/metrics.rs b/state-sync/data-streaming-service/src/metrics.rs index 2143eef9d35c6..f2a5b0e6bcce8 100644 --- a/state-sync/data-streaming-service/src/metrics.rs +++ b/state-sync/data-streaming-service/src/metrics.rs @@ -134,6 +134,15 @@ pub static RETRIED_DATA_REQUESTS: Lazy = Lazy::new(|| { .unwrap() }); +/// Counter for the number of max concurrent prefetching requests +pub static MAX_CONCURRENT_PREFETCHING_REQUESTS: Lazy = Lazy::new(|| { + register_int_gauge!( + "aptos_data_streaming_service_max_concurrent_prefetching_requests", + "The number of max concurrent prefetching requests", + ) + .unwrap() +}); + /// Counter for the number of pending data responses pub static PENDING_DATA_RESPONSES: Lazy = Lazy::new(|| { register_int_gauge!( @@ -252,6 +261,11 @@ pub fn set_active_data_streams(value: usize) { ACTIVE_DATA_STREAMS.set(value as i64); } +/// Sets the number of max concurrent requests +pub fn set_max_concurrent_requests(value: u64) { + MAX_CONCURRENT_PREFETCHING_REQUESTS.set(value as i64); +} + /// Sets the number of complete pending data responses pub fn set_complete_pending_data_responses(value: u64) { COMPLETE_PENDING_DATA_RESPONSES.set(value as i64); diff --git a/state-sync/data-streaming-service/src/tests/data_stream.rs b/state-sync/data-streaming-service/src/tests/data_stream.rs index 418861705e734..d68fd6ba193e4 100644 --- a/state-sync/data-streaming-service/src/tests/data_stream.rs +++ b/state-sync/data-streaming-service/src/tests/data_stream.rs @@ -31,7 +31,9 @@ use crate::{ }, }; use aptos_channels::{aptos_channel, message_queues::QueueStyle}; -use aptos_config::config::{AptosDataClientConfig, DataStreamingServiceConfig}; +use aptos_config::config::{ + AptosDataClientConfig, DataStreamingServiceConfig, DynamicPrefetchingConfig, +}; use aptos_data_client::{ global_summary::{AdvertisedData, GlobalDataSummary, OptimalChunkSizes}, interface::{Response, ResponseContext, ResponsePayload}, @@ -51,7 +53,7 @@ use aptos_types::{ }; use claims::{assert_err, assert_ge, assert_matches, assert_none, assert_ok, assert_some}; use futures::{FutureExt, StreamExt}; -use std::{collections::VecDeque, sync::Arc, time::Duration}; +use std::{sync::Arc, time::Duration}; use tokio::time::timeout; #[tokio::test] @@ -266,12 +268,8 @@ async fn test_epoch_stream_out_of_order_responses() { let global_data_summary = create_global_data_summary(1); initialize_data_requests(&mut data_stream, &global_data_summary); - // Verify at least three requests have been made - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_ge!( - sent_requests.as_ref().unwrap().len(), - max_concurrent_requests as usize - ); + // Verify that three requests have been made + verify_num_sent_requests(&mut data_stream, max_concurrent_requests); // Set a response for the second request and verify no notifications set_epoch_ending_response_in_queue(&mut data_stream, 1, 0); @@ -317,9 +315,14 @@ async fn test_epoch_stream_out_of_order_responses() { #[tokio::test] async fn test_state_stream_out_of_order_responses() { - // Create a state value data stream + // Create a state value data stream with dynamic prefetching disabled let max_concurrent_state_requests = 6; + let dynamic_prefetching_config = DynamicPrefetchingConfig { + enable_dynamic_prefetching: false, + ..Default::default() + }; let streaming_service_config = DataStreamingServiceConfig { + dynamic_prefetching: dynamic_prefetching_config, max_concurrent_requests: 1, max_concurrent_state_requests, ..Default::default() @@ -335,18 +338,95 @@ async fn test_state_stream_out_of_order_responses() { initialize_data_requests(&mut data_stream, &global_data_summary); // Verify a single request is made (to fetch the number of state values) - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!(sent_requests.as_ref().unwrap().len(), 1); + verify_num_sent_requests(&mut data_stream, 1); // Set a response for the number of state values set_num_state_values_response_in_queue(&mut data_stream, 0); process_data_responses(&mut data_stream, &global_data_summary).await; - // Verify at least six requests have been made - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_ge!( - sent_requests.as_ref().unwrap().len(), - max_concurrent_state_requests as usize + // Verify the number of sent requests + verify_num_sent_requests(&mut data_stream, max_concurrent_state_requests); + + // Set a response for the second request and verify no notifications + set_state_value_response_in_queue(&mut data_stream, 1, 1, 1); + process_data_responses(&mut data_stream, &global_data_summary).await; + assert_none!(stream_listener.select_next_some().now_or_never()); + + // Set a response for the first request and verify two notifications + set_state_value_response_in_queue(&mut data_stream, 0, 0, 0); + process_data_responses(&mut data_stream, &global_data_summary).await; + for _ in 0..2 { + let data_notification = get_data_notification(&mut stream_listener).await.unwrap(); + assert_matches!( + data_notification.data_payload, + DataPayload::StateValuesWithProof(_) + ); + } + assert_none!(stream_listener.select_next_some().now_or_never()); + + // Set the response for the first and third request and verify one notification sent + set_state_value_response_in_queue(&mut data_stream, 2, 2, 0); + set_state_value_response_in_queue(&mut data_stream, 4, 4, 2); + process_data_responses(&mut data_stream, &global_data_summary).await; + let data_notification = get_data_notification(&mut stream_listener).await.unwrap(); + assert_matches!( + data_notification.data_payload, + DataPayload::StateValuesWithProof(_) + ); + assert_none!(stream_listener.select_next_some().now_or_never()); + + // Set the response for the first and third request and verify three notifications sent + set_state_value_response_in_queue(&mut data_stream, 3, 3, 0); + set_state_value_response_in_queue(&mut data_stream, 5, 5, 2); + process_data_responses(&mut data_stream, &global_data_summary).await; + for _ in 0..3 { + let data_notification = get_data_notification(&mut stream_listener).await.unwrap(); + assert_matches!( + data_notification.data_payload, + DataPayload::StateValuesWithProof(_) + ); + } + assert_none!(stream_listener.select_next_some().now_or_never()); +} + +#[tokio::test] +async fn test_state_stream_out_of_order_responses_dynamic() { + // Create a dynamic prefetching config with prefetching enabled + let initial_prefetching_value = 3; + let prefetching_value_increase = 2; + let dynamic_prefetching_config = DynamicPrefetchingConfig { + enable_dynamic_prefetching: true, + initial_prefetching_value, + prefetching_value_increase, + ..Default::default() + }; + + // Create a state value data stream + let streaming_service_config = DataStreamingServiceConfig { + dynamic_prefetching: dynamic_prefetching_config, + ..Default::default() + }; + let (mut data_stream, mut stream_listener) = create_state_value_stream( + AptosDataClientConfig::default(), + streaming_service_config, + MIN_ADVERTISED_STATES, + ); + + // Initialize the data stream + let global_data_summary = create_global_data_summary(1); + initialize_data_requests(&mut data_stream, &global_data_summary); + + // Verify a single request is made (to fetch the number of state values) + verify_num_sent_requests(&mut data_stream, 1); + + // Set a response for the number of state values + set_num_state_values_response_in_queue(&mut data_stream, 0); + process_data_responses(&mut data_stream, &global_data_summary).await; + + // Verify the correct number of requests have been made + verify_num_sent_requests( + &mut data_stream, + initial_prefetching_value + prefetching_value_increase, ); // Set a response for the second request and verify no notifications @@ -354,6 +434,12 @@ async fn test_state_stream_out_of_order_responses() { process_data_responses(&mut data_stream, &global_data_summary).await; assert_none!(stream_listener.select_next_some().now_or_never()); + // Verify the correct number of requests have been made + verify_num_sent_requests( + &mut data_stream, + initial_prefetching_value + prefetching_value_increase + 1, + ); + // Set a response for the first request and verify two notifications set_state_value_response_in_queue(&mut data_stream, 0, 0, 0); process_data_responses(&mut data_stream, &global_data_summary).await; @@ -366,6 +452,12 @@ async fn test_state_stream_out_of_order_responses() { } assert_none!(stream_listener.select_next_some().now_or_never()); + // Verify the correct number of requests have been made + verify_num_sent_requests( + &mut data_stream, + initial_prefetching_value + (prefetching_value_increase * 3), + ); + // Set the response for the first and third request and verify one notification sent set_state_value_response_in_queue(&mut data_stream, 2, 2, 0); set_state_value_response_in_queue(&mut data_stream, 4, 4, 2); @@ -377,6 +469,12 @@ async fn test_state_stream_out_of_order_responses() { ); assert_none!(stream_listener.select_next_some().now_or_never()); + // Verify the correct number of requests have been made + verify_num_sent_requests( + &mut data_stream, + initial_prefetching_value + (prefetching_value_increase * 4) + 1, + ); + // Set the response for the first and third request and verify three notifications sent set_state_value_response_in_queue(&mut data_stream, 3, 3, 0); set_state_value_response_in_queue(&mut data_stream, 5, 5, 2); @@ -389,14 +487,25 @@ async fn test_state_stream_out_of_order_responses() { ); } assert_none!(stream_listener.select_next_some().now_or_never()); + + // Verify the correct number of requests have been made + verify_num_sent_requests( + &mut data_stream, + initial_prefetching_value + (prefetching_value_increase * 7), + ); } #[tokio::test] async fn test_stream_max_pending_requests() { - // Create an epoch ending data stream + // Create an epoch ending data stream with dynamic prefetching disabled let max_concurrent_requests = 6; let max_pending_requests = 19; + let dynamic_prefetching_config = DynamicPrefetchingConfig { + enable_dynamic_prefetching: false, + ..Default::default() + }; let streaming_service_config = DataStreamingServiceConfig { + dynamic_prefetching: dynamic_prefetching_config, max_concurrent_requests, max_pending_requests, ..Default::default() @@ -412,11 +521,7 @@ async fn test_stream_max_pending_requests() { initialize_data_requests(&mut data_stream, &global_data_summary); // Verify that the correct number of client requests have been made - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!( - sent_requests.as_ref().unwrap().len(), - max_concurrent_requests as usize - ); + verify_num_sent_requests(&mut data_stream, max_concurrent_requests); // Set a valid response for each request except the first one set_epoch_ending_response_for_indices( @@ -431,15 +536,11 @@ async fn test_stream_max_pending_requests() { // Verify the correct number of requests have been made let num_expected_pending_requests = (max_concurrent_requests * 2) - 1; // The first request failed - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!( - sent_requests.as_ref().unwrap().len() as u64, - num_expected_pending_requests - ); + verify_num_sent_requests(&mut data_stream, num_expected_pending_requests); // Verify the state of the pending responses verify_pending_responses_for_indices( - sent_requests, + &mut data_stream, num_expected_pending_requests, (1..max_concurrent_requests).collect::>(), ); @@ -457,15 +558,11 @@ async fn test_stream_max_pending_requests() { // Verify the correct number of requests have been made let num_expected_pending_requests = (max_concurrent_requests * 3) - 3; - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!( - sent_requests.as_ref().unwrap().len() as u64, - num_expected_pending_requests - ); + verify_num_sent_requests(&mut data_stream, num_expected_pending_requests); // Verify the state of the pending responses verify_pending_responses_for_indices( - sent_requests, + &mut data_stream, num_expected_pending_requests, (1..(max_concurrent_requests * 2) - 2).collect::>(), ); @@ -482,15 +579,11 @@ async fn test_stream_max_pending_requests() { assert_none!(stream_listener.select_next_some().now_or_never()); // Verify the correct number of requests have been made - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!( - sent_requests.as_ref().unwrap().len() as u64, - max_pending_requests - ); + verify_num_sent_requests(&mut data_stream, max_pending_requests); // Verify the state of the pending responses verify_pending_responses_for_indices( - sent_requests, + &mut data_stream, num_expected_pending_requests, (1..(max_concurrent_requests * 3) - 3).collect::>(), ); @@ -509,11 +602,7 @@ async fn test_stream_max_pending_requests() { assert_none!(stream_listener.select_next_some().now_or_never()); // Verify that no more requests have been made (we're at the max) - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!( - sent_requests.as_ref().unwrap().len() as u64, - max_pending_requests - ); + verify_num_sent_requests(&mut data_stream, max_pending_requests); } // Set a valid response for every request @@ -527,12 +616,152 @@ async fn test_stream_max_pending_requests() { process_data_responses(&mut data_stream, &global_data_summary).await; // Verify that more requests have been made (and the entire buffer has been flushed) - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!( - sent_requests.as_ref().unwrap().len() as u64, - max_concurrent_requests + verify_num_sent_requests(&mut data_stream, max_concurrent_requests); + + // Verify that we received a notification for each flushed response + for _ in 0..max_pending_requests { + let data_notification = get_data_notification(&mut stream_listener).await.unwrap(); + assert_matches!( + data_notification.data_payload, + DataPayload::EpochEndingLedgerInfos(_) + ); + } +} + +#[tokio::test] +async fn test_stream_max_pending_requests_dynamic() { + // Create a dynamic prefetching config with prefetching enabled + let initial_prefetching_value = 5; + let min_prefetching_value = 1; + let prefetching_value_increase = 2; + let prefetching_value_decrease = 3; + let dynamic_prefetching_config = DynamicPrefetchingConfig { + enable_dynamic_prefetching: true, + initial_prefetching_value, + min_prefetching_value, + prefetching_value_increase, + prefetching_value_decrease, + timeout_freeze_duration_secs: 0, // Don't freeze the prefetching value + ..Default::default() + }; + + // Create an epoch ending data stream + let max_pending_requests = 6; + let streaming_service_config = DataStreamingServiceConfig { + dynamic_prefetching: dynamic_prefetching_config, + max_pending_requests, + ..Default::default() + }; + let (mut data_stream, mut stream_listener) = create_epoch_ending_stream( + AptosDataClientConfig::default(), + streaming_service_config, + MIN_ADVERTISED_EPOCH_END, + ); + + // Initialize the data stream + let global_data_summary = create_global_data_summary(1); + initialize_data_requests(&mut data_stream, &global_data_summary); + + // Verify that the correct number of client requests have been made + verify_num_sent_requests(&mut data_stream, initial_prefetching_value); + + // Set a valid response for each request except the first one + set_epoch_ending_response_for_indices( + &mut data_stream, + initial_prefetching_value, + (1..initial_prefetching_value).collect::>(), + ); + + // Process the responses and send more client requests + process_data_responses(&mut data_stream, &global_data_summary).await; + assert_none!(stream_listener.select_next_some().now_or_never()); + + // Verify the correct number of requests have been made + let num_expected_pending_requests = + ((initial_prefetching_value * 2) - prefetching_value_decrease) - 1; // The first request failed + verify_num_sent_requests(&mut data_stream, num_expected_pending_requests); + + // Verify the state of the pending responses + verify_pending_responses_for_indices( + &mut data_stream, + num_expected_pending_requests, + (1..initial_prefetching_value).collect::>(), ); + // Set a valid response for each request except the first and last ones + set_epoch_ending_response_for_indices( + &mut data_stream, + num_expected_pending_requests, + (1..num_expected_pending_requests - 1).collect::>(), + ); + + // Process the responses and send more client requests + process_data_responses(&mut data_stream, &global_data_summary).await; + assert_none!(stream_listener.select_next_some().now_or_never()); + + // Verify the correct number of requests have been made + let num_expected_pending_requests = + ((initial_prefetching_value * 2) - prefetching_value_decrease) - 1; // The first request failed + verify_num_sent_requests(&mut data_stream, num_expected_pending_requests); + + // Verify the state of the pending responses + verify_pending_responses_for_indices( + &mut data_stream, + num_expected_pending_requests, + (1..num_expected_pending_requests - 1).collect::>(), + ); + + // Set a valid response for each request except the first one + set_epoch_ending_response_for_indices( + &mut data_stream, + num_expected_pending_requests, + (1..num_expected_pending_requests).collect::>(), + ); + + // Process the responses and send more client requests + process_data_responses(&mut data_stream, &global_data_summary).await; + assert_none!(stream_listener.select_next_some().now_or_never()); + + // Verify the correct number of requests have been made + verify_num_sent_requests(&mut data_stream, max_pending_requests); + + // Verify the state of the pending responses + verify_pending_responses_for_indices( + &mut data_stream, + num_expected_pending_requests, + (1..num_expected_pending_requests).collect::>(), + ); + + // Set a valid response for each request except the first one + set_epoch_ending_response_for_indices( + &mut data_stream, + num_expected_pending_requests, + (1..num_expected_pending_requests).collect::>(), + ); + + // Process the responses and send more client requests several times + for _ in 0..10 { + // Process the responses and send more client requests + process_data_responses(&mut data_stream, &global_data_summary).await; + assert_none!(stream_listener.select_next_some().now_or_never()); + + // Verify that no more requests have been made (we're at the max) + verify_num_sent_requests(&mut data_stream, max_pending_requests); + } + + // Set a valid response for every request + set_epoch_ending_response_for_indices( + &mut data_stream, + max_pending_requests, + (0..max_pending_requests).collect::>(), + ); + + // Process the responses and send more client requests + process_data_responses(&mut data_stream, &global_data_summary).await; + + // Verify that more requests have been made (and the entire buffer has been flushed) + verify_num_sent_requests(&mut data_stream, max_pending_requests); + // Verify that we received a notification for each flushed response for _ in 0..max_pending_requests { let data_notification = get_data_notification(&mut stream_listener).await.unwrap(); @@ -545,10 +774,15 @@ async fn test_stream_max_pending_requests() { #[tokio::test] async fn test_stream_max_pending_requests_flushing() { - // Create an epoch ending data stream + // Create an epoch ending data stream with dynamic prefetching disabled let max_concurrent_requests = 2; let max_pending_requests = 4; + let dynamic_prefetching_config = DynamicPrefetchingConfig { + enable_dynamic_prefetching: false, + ..Default::default() + }; let streaming_service_config = DataStreamingServiceConfig { + dynamic_prefetching: dynamic_prefetching_config, max_concurrent_requests, max_pending_requests, ..Default::default() @@ -564,11 +798,7 @@ async fn test_stream_max_pending_requests_flushing() { initialize_data_requests(&mut data_stream, &global_data_summary); // Verify that the correct number of client requests have been made - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!( - sent_requests.as_ref().unwrap().len(), - max_concurrent_requests as usize - ); + verify_num_sent_requests(&mut data_stream, max_concurrent_requests); // Set a valid response for the second request set_epoch_ending_response_in_queue(&mut data_stream, 1, 0); @@ -578,11 +808,7 @@ async fn test_stream_max_pending_requests_flushing() { assert_none!(stream_listener.select_next_some().now_or_never()); // Verify that the correct number of client requests have been made - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!( - sent_requests.as_ref().unwrap().len(), - (max_concurrent_requests + 1) as usize - ); + verify_num_sent_requests(&mut data_stream, max_concurrent_requests + 1); // Set a valid response for the third request set_epoch_ending_response_in_queue(&mut data_stream, 2, 0); @@ -592,11 +818,7 @@ async fn test_stream_max_pending_requests_flushing() { assert_none!(stream_listener.select_next_some().now_or_never()); // Verify that the correct number of client requests have been made - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!( - sent_requests.as_ref().unwrap().len(), - max_pending_requests as usize - ); + verify_num_sent_requests(&mut data_stream, max_pending_requests); // Set a valid response for the first request set_epoch_ending_response_in_queue(&mut data_stream, 0, 0); @@ -609,11 +831,7 @@ async fn test_stream_max_pending_requests_flushing() { assert_none!(stream_listener.select_next_some().now_or_never()); // Verify the correct number of client requests have been made - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!( - sent_requests.as_ref().unwrap().len(), - max_concurrent_requests as usize - ); + verify_num_sent_requests(&mut data_stream, max_concurrent_requests); // Set a valid response for the second request set_epoch_ending_response_in_queue(&mut data_stream, 1, 0); @@ -623,11 +841,7 @@ async fn test_stream_max_pending_requests_flushing() { assert_none!(stream_listener.select_next_some().now_or_never()); // Verify the correct number of client requests have been made - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!( - sent_requests.as_ref().unwrap().len(), - (max_concurrent_requests + 1) as usize - ); + verify_num_sent_requests(&mut data_stream, max_concurrent_requests + 1); // Set a valid response for the first request set_epoch_ending_response_in_queue(&mut data_stream, 0, 0); @@ -640,11 +854,7 @@ async fn test_stream_max_pending_requests_flushing() { assert_none!(stream_listener.select_next_some().now_or_never()); // Verify the correct number of client requests have been made - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!( - sent_requests.as_ref().unwrap().len(), - max_concurrent_requests as usize - ); + verify_num_sent_requests(&mut data_stream, max_concurrent_requests); // Set an error response for all requests for index in 0..max_concurrent_requests { @@ -653,38 +863,343 @@ async fn test_stream_max_pending_requests_flushing() { // Process the responses and (potentially) send more client requests process_data_responses(&mut data_stream, &global_data_summary).await; - assert_none!(stream_listener.select_next_some().now_or_never()); + assert_none!(stream_listener.select_next_some().now_or_never()); + + // Verify no more client requests have been made + verify_num_sent_requests(&mut data_stream, max_concurrent_requests); + + // Set a valid response for the first request + set_epoch_ending_response_in_queue(&mut data_stream, 0, 0); + + // Process the responses and verify we get one notification + process_data_responses(&mut data_stream, &global_data_summary).await; + assert_some!(stream_listener.select_next_some().now_or_never()); + assert_none!(stream_listener.select_next_some().now_or_never()); + + // Verify no more client requests have been made + verify_num_sent_requests(&mut data_stream, max_concurrent_requests); +} + +#[tokio::test] +async fn test_stream_max_pending_requests_flushing_dynamic() { + // Create a dynamic prefetching config with prefetching enabled + let initial_prefetching_value = 3; + let min_prefetching_value = 1; + let prefetching_value_increase = 2; + let dynamic_prefetching_config = DynamicPrefetchingConfig { + enable_dynamic_prefetching: true, + initial_prefetching_value, + min_prefetching_value, + prefetching_value_increase, + timeout_freeze_duration_secs: 0, // Don't freeze the prefetching value + ..Default::default() + }; + + // Create an epoch ending data stream + let max_pending_requests = 7; + let streaming_service_config = DataStreamingServiceConfig { + dynamic_prefetching: dynamic_prefetching_config, + max_pending_requests, + ..Default::default() + }; + let (mut data_stream, mut stream_listener) = create_epoch_ending_stream( + AptosDataClientConfig::default(), + streaming_service_config, + MIN_ADVERTISED_EPOCH_END, + ); + + // Initialize the data stream + let global_data_summary = create_global_data_summary(1); + initialize_data_requests(&mut data_stream, &global_data_summary); + + // Verify that the correct number of client requests have been made + verify_num_sent_requests(&mut data_stream, initial_prefetching_value); + + // Set a valid response for the second request + set_epoch_ending_response_in_queue(&mut data_stream, 1, 0); + + // Process the responses and verify we get no notifications + process_data_responses(&mut data_stream, &global_data_summary).await; + assert_none!(stream_listener.select_next_some().now_or_never()); + + // Verify that the correct number of client requests have been made + verify_num_sent_requests(&mut data_stream, initial_prefetching_value + 1); + + // Set a valid response for the third request + set_epoch_ending_response_in_queue(&mut data_stream, 2, 0); + + // Process the responses and send more client requests + process_data_responses(&mut data_stream, &global_data_summary).await; + assert_none!(stream_listener.select_next_some().now_or_never()); + + // Verify that the correct number of client requests have been made + verify_num_sent_requests(&mut data_stream, initial_prefetching_value + 2); + + // Set a valid response for the first request + set_epoch_ending_response_in_queue(&mut data_stream, 0, 0); + + // Process the responses and verify we get three notifications + process_data_responses(&mut data_stream, &global_data_summary).await; + for _ in 0..3 { + assert_some!(stream_listener.select_next_some().now_or_never()); + } + assert_none!(stream_listener.select_next_some().now_or_never()); + + // Verify the correct number of client requests have been made + verify_num_sent_requests(&mut data_stream, max_pending_requests); + + // Set a valid response for the second request + set_epoch_ending_response_in_queue(&mut data_stream, 1, 0); + + // Process the responses and verify we get no notifications + process_data_responses(&mut data_stream, &global_data_summary).await; + assert_none!(stream_listener.select_next_some().now_or_never()); + + // Verify the correct number of client requests have been made + verify_num_sent_requests(&mut data_stream, max_pending_requests); + + // Set a valid response for the first request + set_epoch_ending_response_in_queue(&mut data_stream, 0, 0); + + // Process the responses and verify we get two notifications + process_data_responses(&mut data_stream, &global_data_summary).await; + for _ in 0..2 { + assert_some!(stream_listener.select_next_some().now_or_never()); + } + assert_none!(stream_listener.select_next_some().now_or_never()); + + // Verify the correct number of client requests have been made + verify_num_sent_requests(&mut data_stream, max_pending_requests); + + // Set an error response for all requests + for index in 0..max_pending_requests { + set_failure_response_in_queue(&mut data_stream, index as usize); + } + + // Process the responses and (potentially) send more client requests + process_data_responses(&mut data_stream, &global_data_summary).await; + assert_none!(stream_listener.select_next_some().now_or_never()); + + // Verify no more client requests have been made + verify_num_sent_requests(&mut data_stream, max_pending_requests); + + // Set a valid response for the first request + set_epoch_ending_response_in_queue(&mut data_stream, 0, 0); + + // Process the responses and verify we get one notification + process_data_responses(&mut data_stream, &global_data_summary).await; + assert_some!(stream_listener.select_next_some().now_or_never()); + assert_none!(stream_listener.select_next_some().now_or_never()); + + // Verify more client requests have been made + verify_num_sent_requests(&mut data_stream, max_pending_requests); +} + +#[tokio::test] +async fn test_stream_max_pending_requests_freeze_dynamic() { + // Create a dynamic prefetching config with prefetching enabled + let initial_prefetching_value = 10; + let max_prefetching_value = 12; + let prefetching_value_increase = 2; + let prefetching_value_decrease = 2; + let timeout_freeze_duration_secs = 10; + let dynamic_prefetching_config = DynamicPrefetchingConfig { + enable_dynamic_prefetching: true, + initial_prefetching_value, + max_prefetching_value, + prefetching_value_increase, + prefetching_value_decrease, + timeout_freeze_duration_secs, + ..Default::default() + }; + + // Create an data streaming service config with prefetching enabled + let max_pending_requests = 100; + let streaming_service_config = DataStreamingServiceConfig { + dynamic_prefetching: dynamic_prefetching_config, + max_pending_requests, + ..Default::default() + }; + + // Create an epoch ending data stream + let stream_request = + StreamRequest::GetAllEpochEndingLedgerInfos(GetAllEpochEndingLedgerInfosRequest { + start_epoch: MIN_ADVERTISED_EPOCH_END, + }); + let (mut data_stream, mut stream_listener, time_service) = create_data_stream( + AptosDataClientConfig::default(), + streaming_service_config, + stream_request, + ); + + // Initialize the data stream + let global_data_summary = create_global_data_summary(1); + initialize_data_requests(&mut data_stream, &global_data_summary); + + // Verify that the correct number of client requests have been made + verify_num_sent_requests(&mut data_stream, initial_prefetching_value); + + // Set a valid response for each request except the first one + set_epoch_ending_response_for_indices( + &mut data_stream, + initial_prefetching_value, + (1..initial_prefetching_value).collect::>(), + ); + + // Set an invalid response for the first request + set_failure_response_in_queue(&mut data_stream, 0); + + // Process the responses and verify we get no notifications + process_data_responses(&mut data_stream, &global_data_summary).await; + assert_none!(stream_listener.select_next_some().now_or_never()); + + // Verify that the correct number of client requests have been made + let mut num_expected_pending_requests = + ((initial_prefetching_value * 2) - prefetching_value_decrease) - 1; // The first request failed + verify_num_sent_requests(&mut data_stream, num_expected_pending_requests); + + // Set a valid response for each request except the first one + set_epoch_ending_response_for_indices( + &mut data_stream, + num_expected_pending_requests, + (1..num_expected_pending_requests).collect::>(), + ); + + // Set an invalid response for the first request + set_failure_response_in_queue(&mut data_stream, 0); + + // Elapse some time (but not enough for the prefetching value to be unfrozen) + let time_service = time_service.into_mock(); + time_service.advance(Duration::from_secs(timeout_freeze_duration_secs / 2)); + + // Process the responses and verify we get no notifications + process_data_responses(&mut data_stream, &global_data_summary).await; + assert_none!(stream_listener.select_next_some().now_or_never()); + + // Verify that the correct number of client requests have been made + num_expected_pending_requests += + initial_prefetching_value - (prefetching_value_decrease * 2) - 1; // The first request failed + verify_num_sent_requests(&mut data_stream, num_expected_pending_requests); + + // Set a valid response for all requests + set_epoch_ending_response_for_indices( + &mut data_stream, + num_expected_pending_requests, + (0..num_expected_pending_requests).collect::>(), + ); + + // Process the responses and verify we get the correct number of notifications + process_data_responses(&mut data_stream, &global_data_summary).await; + for _ in 0..num_expected_pending_requests { + assert_some!(stream_listener.select_next_some().now_or_never()); + } + assert_none!(stream_listener.select_next_some().now_or_never()); + + // Verify the correct number of client requests have been made + let num_expected_pending_requests = + initial_prefetching_value - (prefetching_value_decrease * 2); + verify_num_sent_requests(&mut data_stream, num_expected_pending_requests); + + // Elapse enough time for the prefetching value to be unfrozen + time_service.advance(Duration::from_secs(timeout_freeze_duration_secs + 1)); + + // Set a valid response for all requests + set_epoch_ending_response_for_indices( + &mut data_stream, + num_expected_pending_requests, + (0..num_expected_pending_requests).collect::>(), + ); + + // Process the responses and verify we get the correct number of notifications + process_data_responses(&mut data_stream, &global_data_summary).await; + for _ in 0..num_expected_pending_requests { + assert_some!(stream_listener.select_next_some().now_or_never()); + } + assert_none!(stream_listener.select_next_some().now_or_never()); + + // Verify the correct number of client requests have been made + verify_num_sent_requests(&mut data_stream, max_prefetching_value); +} + +#[tokio::test] +async fn test_stream_max_pending_requests_missing_data() { + // Create an epoch ending data stream with dynamic prefetching disabled + let max_concurrent_requests = 1; + let max_pending_requests = 3; + let dynamic_prefetching_config = DynamicPrefetchingConfig { + enable_dynamic_prefetching: false, + ..Default::default() + }; + let streaming_service_config = DataStreamingServiceConfig { + dynamic_prefetching: dynamic_prefetching_config, + max_concurrent_requests, + max_pending_requests, + ..Default::default() + }; + let (mut data_stream, mut stream_listener) = create_epoch_ending_stream( + AptosDataClientConfig::default(), + streaming_service_config, + MIN_ADVERTISED_EPOCH_END, + ); + + // Initialize the data stream + let optimal_epoch_chunk_sizes = 2; + let global_data_summary = create_global_data_summary(optimal_epoch_chunk_sizes); + initialize_data_requests(&mut data_stream, &global_data_summary); + + // Verify that the correct number of client requests have been made + verify_num_sent_requests(&mut data_stream, max_concurrent_requests); + + // Set a valid (but partial) response for the first request + set_epoch_ending_response_in_queue(&mut data_stream, 0, 1); + + // Process the responses and verify we get a notification + process_data_responses(&mut data_stream, &global_data_summary).await; + assert_some!(stream_listener.select_next_some().now_or_never()); + + // Verify that the correct number of client requests have been made + verify_num_sent_requests(&mut data_stream, max_concurrent_requests); + + // Set a valid (now complete) response for the first request + set_epoch_ending_response_in_queue(&mut data_stream, 0, 1); + + // Process the responses and verify we get a notification + process_data_responses(&mut data_stream, &global_data_summary).await; + assert_some!(stream_listener.select_next_some().now_or_never()); - // Verify no more client requests have been made - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!( - sent_requests.as_ref().unwrap().len() as u64, - max_concurrent_requests - ); + // Verify that no more client requests have been made + verify_num_sent_requests(&mut data_stream, max_concurrent_requests); - // Set a valid response for the first request - set_epoch_ending_response_in_queue(&mut data_stream, 0, 0); + // Set a valid (but partial) response for the first request again + set_epoch_ending_response_in_queue(&mut data_stream, 0, 1); - // Process the responses and verify we get one notification + // Process the responses and verify we get a notification process_data_responses(&mut data_stream, &global_data_summary).await; assert_some!(stream_listener.select_next_some().now_or_never()); - assert_none!(stream_listener.select_next_some().now_or_never()); - // Verify no more client requests have been made - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!( - sent_requests.as_ref().unwrap().len() as u64, - max_concurrent_requests - ); + // Verify that the correct number of client requests have been made + verify_num_sent_requests(&mut data_stream, max_concurrent_requests); } #[tokio::test] -async fn test_stream_max_pending_requests_missing_data() { +async fn test_stream_max_pending_requests_missing_data_dynamic() { + // Create a dynamic prefetching config with prefetching enabled + let initial_prefetching_value = 3; + let min_prefetching_value = 1; + let prefetching_value_increase = 2; + let dynamic_prefetching_config = DynamicPrefetchingConfig { + enable_dynamic_prefetching: true, + initial_prefetching_value, + min_prefetching_value, + prefetching_value_increase, + timeout_freeze_duration_secs: 0, // Don't freeze the prefetching value + ..Default::default() + }; + // Create an epoch ending data stream - let max_concurrent_requests = 1; - let max_pending_requests = 3; + let max_pending_requests = 10; let streaming_service_config = DataStreamingServiceConfig { - max_concurrent_requests, + dynamic_prefetching: dynamic_prefetching_config, max_pending_requests, ..Default::default() }; @@ -700,11 +1215,7 @@ async fn test_stream_max_pending_requests_missing_data() { initialize_data_requests(&mut data_stream, &global_data_summary); // Verify that the correct number of client requests have been made - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!( - sent_requests.as_ref().unwrap().len(), - max_concurrent_requests as usize - ); + verify_num_sent_requests(&mut data_stream, initial_prefetching_value); // Set a valid (but partial) response for the first request set_epoch_ending_response_in_queue(&mut data_stream, 0, 1); @@ -714,10 +1225,9 @@ async fn test_stream_max_pending_requests_missing_data() { assert_some!(stream_listener.select_next_some().now_or_never()); // Verify that the correct number of client requests have been made - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!( - sent_requests.as_ref().unwrap().len(), - max_concurrent_requests as usize + verify_num_sent_requests( + &mut data_stream, + initial_prefetching_value + prefetching_value_increase, ); // Set a valid (now complete) response for the first request @@ -727,11 +1237,10 @@ async fn test_stream_max_pending_requests_missing_data() { process_data_responses(&mut data_stream, &global_data_summary).await; assert_some!(stream_listener.select_next_some().now_or_never()); - // Verify that no more client requests have been made - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!( - sent_requests.as_ref().unwrap().len(), - max_concurrent_requests as usize + // Verify that more client requests have been made + verify_num_sent_requests( + &mut data_stream, + initial_prefetching_value + (2 * prefetching_value_increase), ); // Set a valid (but partial) response for the first request again @@ -742,19 +1251,23 @@ async fn test_stream_max_pending_requests_missing_data() { assert_some!(stream_listener.select_next_some().now_or_never()); // Verify that the correct number of client requests have been made - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!( - sent_requests.as_ref().unwrap().len(), - max_concurrent_requests as usize + verify_num_sent_requests( + &mut data_stream, + initial_prefetching_value + (3 * prefetching_value_increase), ); } #[tokio::test] async fn test_continuous_stream_epoch_change_retry() { - // Create a test streaming service config + // Create a test streaming service config with dynamic prefetching disabled let max_request_retry = 10; let max_concurrent_requests = 3; + let dynamic_prefetching_config = DynamicPrefetchingConfig { + enable_dynamic_prefetching: false, + ..Default::default() + }; let streaming_service_config = DataStreamingServiceConfig { + dynamic_prefetching: dynamic_prefetching_config, max_concurrent_requests, max_request_retry, ..Default::default() @@ -785,8 +1298,84 @@ async fn test_continuous_stream_epoch_change_retry() { initialize_data_requests(&mut data_stream, &global_data_summary); // Verify a single request is made - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!(sent_requests.as_ref().unwrap().len(), 1); + verify_num_sent_requests(&mut data_stream, 1); + + // Verify the request is for an epoch ending ledger info + let client_request = get_pending_client_request(&mut data_stream, 0); + let epoch_ending_request = + DataClientRequest::EpochEndingLedgerInfos(EpochEndingLedgerInfosRequest { + start_epoch: MIN_ADVERTISED_EPOCH_END, + end_epoch: MIN_ADVERTISED_EPOCH_END, + }); + assert_eq!(client_request, epoch_ending_request); + + // Handle multiple timeouts and retries + for _ in 0..max_request_retry - 1 { + // Set a timeout response for the epoch ending ledger info and process it + set_timeout_response_in_queue(&mut data_stream, 0); + process_data_responses(&mut data_stream, &global_data_summary).await; + + // Verify the data client request was resent to the network (retried) + let client_request = get_pending_client_request(&mut data_stream, 0); + assert_eq!(client_request, epoch_ending_request); + } + + // Set an epoch ending response in the queue and process it + set_epoch_ending_response_in_queue(&mut data_stream, 0, MIN_ADVERTISED_TRANSACTION + 100); + process_data_responses(&mut data_stream, &global_data_summary).await; + + // Verify the correct number of data requests are now pending, + // i.e., a target has been found and we're fetching data up to it. + verify_num_sent_requests(&mut data_stream, 3); + } +} + +#[tokio::test] +async fn test_continuous_stream_epoch_change_retry_dynamic() { + // Create a dynamic prefetching config with prefetching enabled + let initial_prefetching_value = 5; + let min_prefetching_value = 2; + let dynamic_prefetching_config = DynamicPrefetchingConfig { + enable_dynamic_prefetching: true, + initial_prefetching_value, + min_prefetching_value, + ..Default::default() + }; + + // Create a test streaming service config + let max_request_retry = 10; + let streaming_service_config = DataStreamingServiceConfig { + dynamic_prefetching: dynamic_prefetching_config, + max_request_retry, + ..Default::default() + }; + + // Test all types of continuous data streams + let (data_stream_1, _stream_listener_1, _) = create_continuous_transaction_stream( + AptosDataClientConfig::default(), + streaming_service_config, + MIN_ADVERTISED_TRANSACTION, + MIN_ADVERTISED_EPOCH_END, + ); + let (data_stream_2, _stream_listener_2, _) = create_continuous_transaction_output_stream( + AptosDataClientConfig::default(), + streaming_service_config, + MIN_ADVERTISED_TRANSACTION_OUTPUT, + MIN_ADVERTISED_EPOCH_END, + ); + let (data_stream_3, _stream_listener_3, _) = create_continuous_transaction_or_output_stream( + AptosDataClientConfig::default(), + streaming_service_config, + MIN_ADVERTISED_TRANSACTION_OUTPUT, + MIN_ADVERTISED_EPOCH_END, + ); + for mut data_stream in [data_stream_1, data_stream_2, data_stream_3] { + // Initialize the data stream and drive progress + let global_data_summary = create_global_data_summary(1); + initialize_data_requests(&mut data_stream, &global_data_summary); + + // Verify a single request is made + verify_num_sent_requests(&mut data_stream, 1); // Verify the request is for an epoch ending ledger info let client_request = get_pending_client_request(&mut data_stream, 0); @@ -814,8 +1403,7 @@ async fn test_continuous_stream_epoch_change_retry() { // Verify the correct number of data requests are now pending, // i.e., a target has been found and we're fetching data up to it. - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!(sent_requests.as_ref().unwrap().len(), 3); + verify_num_sent_requests(&mut data_stream, min_prefetching_value); } } @@ -1591,10 +2179,9 @@ async fn test_continuous_stream_subscription_max() { process_data_responses(&mut data_stream, &global_data_summary).await; // Verify the number of pending requests - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!( - sent_requests.as_ref().unwrap().len(), - (max_num_consecutive_subscriptions - max_concurrent_requests) as usize + verify_num_sent_requests( + &mut data_stream, + max_num_consecutive_subscriptions - max_concurrent_requests, ); // Set valid responses for all pending requests and process the responses @@ -1625,8 +2212,14 @@ async fn test_continuous_stream_subscription_timeout() { ..Default::default() }; - // Create a test streaming service config with subscriptions enabled + // Create a test streaming service config with subscriptions + // enabled, but dynamic prefetching disabled. + let dynamic_prefetching_config = DynamicPrefetchingConfig { + enable_dynamic_prefetching: false, + ..Default::default() + }; let streaming_service_config = DataStreamingServiceConfig { + dynamic_prefetching: dynamic_prefetching_config, enable_subscription_streaming: true, max_concurrent_requests: 7, ..Default::default() @@ -1652,10 +2245,15 @@ async fn test_stream_timeouts() { ..Default::default() }; - // Create a test streaming service config + // Create a test streaming service config with dynamic prefetching disabled let max_concurrent_requests = 3; let max_request_retry = 10; + let dynamic_prefetching_config = DynamicPrefetchingConfig { + enable_dynamic_prefetching: false, + ..Default::default() + }; let streaming_service_config = DataStreamingServiceConfig { + dynamic_prefetching: dynamic_prefetching_config, max_concurrent_requests, max_request_retry, ..Default::default() @@ -1690,11 +2288,7 @@ async fn test_stream_timeouts() { initialize_data_requests(&mut data_stream, &global_data_summary); // Verify the correct number of requests are made - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!( - sent_requests.as_ref().unwrap().len(), - max_concurrent_requests as usize - ); + verify_num_sent_requests(&mut data_stream, max_concurrent_requests); // Wait for the data client to satisfy all requests for request_index in 0..max_concurrent_requests as usize { @@ -1754,6 +2348,124 @@ async fn test_stream_timeouts() { } } +#[tokio::test(flavor = "multi_thread")] +async fn test_stream_timeouts_dynamic() { + // Create a test data client config + let max_response_timeout_ms = 85; + let response_timeout_ms = 7; + let data_client_config = AptosDataClientConfig { + max_response_timeout_ms, + response_timeout_ms, + ..Default::default() + }; + + // Create a dynamic prefetching config with prefetching enabled + let initial_prefetching_value = 5; + let min_prefetching_value = 3; + let dynamic_prefetching_config = DynamicPrefetchingConfig { + enable_dynamic_prefetching: true, + initial_prefetching_value, + min_prefetching_value, + ..Default::default() + }; + + // Create a test streaming service config with dynamic prefetching disabled + let max_request_retry = 10; + let streaming_service_config = DataStreamingServiceConfig { + dynamic_prefetching: dynamic_prefetching_config, + max_request_retry, + ..Default::default() + }; + + // Test all types of data streams + let (data_stream_1, stream_listener_1) = create_transaction_stream( + data_client_config, + streaming_service_config, + MIN_ADVERTISED_TRANSACTION, + MAX_ADVERTISED_TRANSACTION, + ); + let (data_stream_2, stream_listener_2) = create_output_stream( + data_client_config, + streaming_service_config, + MIN_ADVERTISED_TRANSACTION_OUTPUT, + MAX_ADVERTISED_TRANSACTION_OUTPUT, + ); + let (data_stream_3, stream_listener_3) = create_transactions_or_output_stream( + data_client_config, + streaming_service_config, + MIN_ADVERTISED_TRANSACTION_OUTPUT, + MAX_ADVERTISED_TRANSACTION_OUTPUT, + ); + for (mut data_stream, mut stream_listener, transactions_only, allow_transactions_or_outputs) in [ + (data_stream_1, stream_listener_1, true, false), + (data_stream_2, stream_listener_2, false, false), + (data_stream_3, stream_listener_3, false, true), + ] { + // Initialize the data stream + let global_data_summary = create_global_data_summary(1); + initialize_data_requests(&mut data_stream, &global_data_summary); + + // Verify the correct number of requests are made + verify_num_sent_requests(&mut data_stream, initial_prefetching_value); + + // Wait for the data client to satisfy all requests + for request_index in 0..initial_prefetching_value as usize { + wait_for_data_client_to_respond(&mut data_stream, request_index).await; + } + + // Handle multiple timeouts and retries on the first request + for _ in 0..max_request_retry / 2 { + set_timeout_response_in_queue(&mut data_stream, 0); + process_data_responses(&mut data_stream, &global_data_summary).await; + wait_for_data_client_to_respond(&mut data_stream, 0).await; + } + + // Wait until a notification is finally sent along the stream + wait_for_notification_and_verify( + &mut data_stream, + &mut stream_listener, + transactions_only, + allow_transactions_or_outputs, + false, + &global_data_summary, + ) + .await; + + // Wait for the data client to satisfy all requests + for request_index in 0..min_prefetching_value as usize { + wait_for_data_client_to_respond(&mut data_stream, request_index).await; + } + + // Set a timeout on the second request + set_timeout_response_in_queue(&mut data_stream, 1); + + // Handle multiple invalid type responses on the first request + for _ in 0..max_request_retry / 2 { + set_state_value_response_in_queue(&mut data_stream, 0, 0, 0); + process_data_responses(&mut data_stream, &global_data_summary).await; + wait_for_data_client_to_respond(&mut data_stream, 0).await; + } + + // Handle multiple invalid type responses on the third request + for _ in 0..max_request_retry / 2 { + set_state_value_response_in_queue(&mut data_stream, 2, 2, 2); + process_data_responses(&mut data_stream, &global_data_summary).await; + wait_for_data_client_to_respond(&mut data_stream, 2).await; + } + + // Wait until a notification is finally sent along the stream + wait_for_notification_and_verify( + &mut data_stream, + &mut stream_listener, + transactions_only, + allow_transactions_or_outputs, + false, + &global_data_summary, + ) + .await; + } +} + #[tokio::test] async fn test_stream_listener_dropped() { // Create an epoch ending data stream @@ -1836,11 +2548,7 @@ async fn advertise_new_data_and_verify_requests( process_data_responses(data_stream, &new_global_data_summary).await; // Verify multiple data requests have now been sent to fetch the missing data - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!( - sent_requests.as_ref().unwrap().len(), - max_concurrent_requests as usize, - ); + verify_num_sent_requests(data_stream, max_concurrent_requests); // Verify the pending requests are for the correct data and correctly formed for request_index in 0..max_concurrent_requests { @@ -2205,7 +2913,7 @@ fn set_epoch_ending_response_for_indices( } else if random_number % 3 == 1 { set_failure_response_in_queue(data_stream, index as usize); // Set a failure response } else { - // Do nothing (to emulate the request still being in-flight) + set_pending_response_in_queue(data_stream, index as usize); // Set a pending response } } } @@ -2308,8 +3016,19 @@ fn set_failure_response_in_queue(data_stream: &mut DataStream, index: usize) { + // Get the pending response at the specified index + let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); + let pending_response = sent_requests.as_mut().unwrap().get_mut(index).unwrap(); + + // Set the response to still be pending + pending_response.lock().client_response = None; +} + +/// Sets the client response at the index in the pending +/// queue to contain a timeout response. fn set_timeout_response_in_queue(data_stream: &mut DataStream, index: usize) { set_response_in_queue( data_stream, @@ -2419,11 +3138,7 @@ async fn verify_continuous_stream_request_timeouts( initialize_data_requests(&mut data_stream, &global_data_summary); // Verify that the expected number of requests are made - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!( - sent_requests.as_ref().unwrap().len(), - num_expected_requests as usize - ); + verify_num_sent_requests(&mut data_stream, num_expected_requests); // Wait until a notification is sent. The mock data client // will verify the timeout. @@ -2445,11 +3160,7 @@ async fn verify_continuous_stream_request_timeouts( process_data_responses(&mut data_stream, &global_data_summary).await; // Verify more requests are made - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!( - sent_requests.as_ref().unwrap().len(), - num_expected_requests as usize - ); + verify_num_sent_requests(&mut data_stream, num_expected_requests); } // Wait until a notification is sent. The mock data client @@ -2539,6 +3250,20 @@ fn get_subscription_stream_id( } } +/// Verifies that the length of the pending requests queue is +/// equal to the expected length. +fn verify_num_sent_requests( + data_stream: &mut DataStream, + expected_length: u64, +) { + // Get the number of sent requests + let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); + let num_sent_requests = sent_requests.as_ref().unwrap().len() as u64; + + // Verify the number of sent requests + assert_eq!(num_sent_requests, expected_length); +} + /// Verifies that a single pending optimistic fetch exists and /// that it is for the correct data. fn verify_pending_optimistic_fetch( @@ -2548,8 +3273,7 @@ fn verify_pending_optimistic_fetch( known_version_offset: u64, ) { // Verify a single request is pending - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!(sent_requests.as_ref().unwrap().len(), 1); + verify_num_sent_requests(data_stream, 1); // Verify the request is for the correct data let client_request = get_pending_client_request(data_stream, 0); @@ -2578,10 +3302,14 @@ fn verify_pending_optimistic_fetch( /// Verifies that the pending requests are fulfilled for the specified indices fn verify_pending_responses_for_indices( - sent_requests: &mut Option>>>>, + data_stream: &mut DataStream, max_queue_length: u64, indices: Vec, ) { + // Get the sent requests queue + let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); + + // Verify the client responses for the specified indices for index in 0..max_queue_length { // Get the client response let sent_request = sent_requests.as_ref().unwrap().get(index as usize); @@ -2613,11 +3341,7 @@ fn verify_pending_subscription_requests( known_version_offset: u64, ) { // Verify the correct number of pending requests - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!( - sent_requests.as_ref().unwrap().len(), - max_concurrent_requests as usize - ); + verify_num_sent_requests(data_stream, max_concurrent_requests); // Verify the pending requests are for the correct data and correctly formed for request_index in 0..max_concurrent_requests { diff --git a/state-sync/state-sync-driver/src/bootstrapper.rs b/state-sync/state-sync-driver/src/bootstrapper.rs index a5ffa83585459..cadcc15bc134b 100644 --- a/state-sync/state-sync-driver/src/bootstrapper.rs +++ b/state-sync/state-sync-driver/src/bootstrapper.rs @@ -1030,6 +1030,7 @@ impl< if let Err(error) = self .storage_synchronizer .save_state_values(notification_id, state_value_chunk_with_proof) + .await { self.reset_active_stream(Some(NotificationAndFeedback::new( notification_id, diff --git a/state-sync/state-sync-driver/src/storage_synchronizer.rs b/state-sync/state-sync-driver/src/storage_synchronizer.rs index 8cb49bee773cb..c033912923df2 100644 --- a/state-sync/state-sync-driver/src/storage_synchronizer.rs +++ b/state-sync/state-sync-driver/src/storage_synchronizer.rs @@ -96,7 +96,7 @@ pub trait StorageSynchronizerInterface { /// /// Note: this requires that `initialize_state_synchronizer` has been /// called. - fn save_state_values( + async fn save_state_values( &mut self, notification_id: NotificationId, state_value_chunk_with_proof: StateValueChunkWithProof, @@ -403,17 +403,20 @@ impl< load_pending_data_chunks(self.pending_data_chunks.clone()) > 0 } - fn save_state_values( + async fn save_state_values( &mut self, notification_id: NotificationId, state_value_chunk_with_proof: StateValueChunkWithProof, ) -> Result<(), Error> { + // Get the snapshot notifier and create the storage data chunk let state_snapshot_notifier = self.state_snapshot_notifier.as_mut().ok_or_else(|| { Error::UnexpectedError("The state snapshot receiver has not been initialized!".into()) })?; let storage_data_chunk = StorageDataChunk::States(notification_id, state_value_chunk_with_proof); - if let Err(error) = state_snapshot_notifier.try_send(storage_data_chunk) { + + // Notify the snapshot receiver of the storage data chunk + if let Err(error) = state_snapshot_notifier.send(storage_data_chunk).await { Err(Error::UnexpectedError(format!( "Failed to send storage data chunk to state snapshot listener: {:?}", error diff --git a/state-sync/state-sync-driver/src/tests/mocks.rs b/state-sync/state-sync-driver/src/tests/mocks.rs index d0e587ae9db55..a8ff93ae9814c 100644 --- a/state-sync/state-sync-driver/src/tests/mocks.rs +++ b/state-sync/state-sync-driver/src/tests/mocks.rs @@ -475,7 +475,7 @@ mock! { fn pending_storage_data(&self) -> bool; - fn save_state_values( + async fn save_state_values( &mut self, notification_id: NotificationId, state_value_chunk_with_proof: StateValueChunkWithProof, diff --git a/state-sync/state-sync-driver/src/tests/storage_synchronizer.rs b/state-sync/state-sync-driver/src/tests/storage_synchronizer.rs index b5c7cff24a15e..c4b5ecfc82fa8 100644 --- a/state-sync/state-sync-driver/src/tests/storage_synchronizer.rs +++ b/state-sync/state-sync-driver/src/tests/storage_synchronizer.rs @@ -750,9 +750,11 @@ async fn test_save_states_completion() { // Save multiple state chunks (including the last chunk) storage_synchronizer .save_state_values(0, create_state_value_chunk_with_proof(false)) + .await .unwrap(); storage_synchronizer .save_state_values(1, create_state_value_chunk_with_proof(true)) + .await .unwrap(); // Verify we get a commit notification @@ -808,6 +810,7 @@ async fn test_save_states_dropped_error_listener() { let notification_id = 0; storage_synchronizer .save_state_values(notification_id, create_state_value_chunk_with_proof(true)) + .await .unwrap(); // The handler should panic as the commit listener was dropped @@ -849,13 +852,14 @@ async fn test_save_states_invalid_chunk() { let notification_id = 0; storage_synchronizer .save_state_values(notification_id, create_state_value_chunk_with_proof(false)) + .await .unwrap(); verify_error_notification(&mut error_listener, notification_id).await; } -#[test] +#[tokio::test] #[should_panic] -fn test_save_states_without_initialize() { +async fn test_save_states_without_initialize() { // Create the storage synchronizer let (_, _, _, _, _, mut storage_synchronizer, _) = create_storage_synchronizer( create_mock_executor(), @@ -864,7 +868,10 @@ fn test_save_states_without_initialize() { // Attempting to save the states should panic as the state // synchronizer was not initialized! - let _ = storage_synchronizer.save_state_values(0, create_state_value_chunk_with_proof(false)); + storage_synchronizer + .save_state_values(0, create_state_value_chunk_with_proof(false)) + .await + .unwrap(); } /// Creates a storage synchronizer for testing diff --git a/testsuite/generate-format/src/api.rs b/testsuite/generate-format/src/api.rs index 4561abc668071..b86e2174a2148 100644 --- a/testsuite/generate-format/src/api.rs +++ b/testsuite/generate-format/src/api.rs @@ -114,7 +114,7 @@ pub fn get_registry() -> Result { tracer.trace_type::(&samples)?; tracer.trace_type::(&samples)?; tracer.trace_type::(&samples)?; - tracer.trace_type::(&samples)?; + tracer.trace_type::(&samples)?; // events tracer.trace_type::(&samples)?; diff --git a/testsuite/generate-format/src/aptos.rs b/testsuite/generate-format/src/aptos.rs index 09cbea25c66c7..77746a32aa8ca 100644 --- a/testsuite/generate-format/src/aptos.rs +++ b/testsuite/generate-format/src/aptos.rs @@ -108,7 +108,7 @@ pub fn get_registry() -> Result { tracer.trace_type::(&samples)?; tracer.trace_type::(&samples)?; tracer.trace_type::(&samples)?; - tracer.trace_type::(&samples)?; + tracer.trace_type::(&samples)?; tracer.trace_type::(&samples)?; // aliases within StructTag diff --git a/testsuite/generate-format/src/consensus.rs b/testsuite/generate-format/src/consensus.rs index c3a025a103418..e3eb234d62c03 100644 --- a/testsuite/generate-format/src/consensus.rs +++ b/testsuite/generate-format/src/consensus.rs @@ -104,7 +104,7 @@ pub fn get_registry() -> Result { tracer.trace_type::(&samples)?; tracer.trace_type::(&samples)?; tracer.trace_type::(&samples)?; - tracer.trace_type::(&samples)?; + tracer.trace_type::(&samples)?; tracer.trace_type::(&samples)?; tracer.trace_type::(&samples)?; diff --git a/testsuite/generate-format/tests/staged/api.yaml b/testsuite/generate-format/tests/staged/api.yaml index 3d9845c2dc130..c1c6094be6c25 100644 --- a/testsuite/generate-format/tests/staged/api.yaml +++ b/testsuite/generate-format/tests/staged/api.yaml @@ -76,10 +76,10 @@ AnyPublicKey: - public_key: TYPENAME: Secp256r1EcdsaPublicKey 3: - ZkId: + OIDB: STRUCT: - public_key: - TYPENAME: ZkIdPublicKey + TYPENAME: OidbPublicKey AnySignature: ENUM: 0: @@ -98,10 +98,10 @@ AnySignature: - signature: TYPENAME: PartialAuthenticatorAssertionResponse 3: - ZkId: + OIDB: STRUCT: - signature: - TYPENAME: ZkIdSignature + TYPENAME: OidbSignature AssertionSignature: ENUM: 0: @@ -340,10 +340,25 @@ MultisigTransactionPayload: EntryFunction: NEWTYPE: TYPENAME: EntryFunction +OidbPublicKey: + STRUCT: + - iss_val: STR + - idc: + TYPENAME: IdCommitment +OidbSignature: + STRUCT: + - sig: + TYPENAME: ZkpOrOpenIdSig + - jwt_header_b64: STR + - exp_timestamp_secs: U64 + - ephemeral_pubkey: + TYPENAME: EphemeralPublicKey + - ephemeral_signature: + TYPENAME: EphemeralSignature OpenIdSig: STRUCT: - - jwt_sig: STR - - jwt_payload: STR + - jwt_sig_b64: STR + - jwt_payload_b64: STR - uid_key: STR - epk_blinder: BYTES - pepper: @@ -450,7 +465,8 @@ SignedGroth16Zkp: - non_malleability_signature: TYPENAME: EphemeralSignature - exp_horizon_secs: U64 - - extra_field: STR + - extra_field: + OPTION: STR - override_aud_val: OPTION: STR - training_wheels_signature: @@ -779,21 +795,6 @@ WriteSetPayload: WriteSetV0: NEWTYPESTRUCT: TYPENAME: WriteSetMut -ZkIdPublicKey: - STRUCT: - - iss: STR - - idc: - TYPENAME: IdCommitment -ZkIdSignature: - STRUCT: - - sig: - TYPENAME: ZkpOrOpenIdSig - - jwt_header: STR - - exp_timestamp_secs: U64 - - ephemeral_pubkey: - TYPENAME: EphemeralPublicKey - - ephemeral_signature: - TYPENAME: EphemeralSignature ZkpOrOpenIdSig: ENUM: 0: diff --git a/testsuite/generate-format/tests/staged/aptos.yaml b/testsuite/generate-format/tests/staged/aptos.yaml index b578f684fef83..38ad2e662914e 100644 --- a/testsuite/generate-format/tests/staged/aptos.yaml +++ b/testsuite/generate-format/tests/staged/aptos.yaml @@ -64,10 +64,10 @@ AnyPublicKey: - public_key: TYPENAME: Secp256r1EcdsaPublicKey 3: - ZkId: + OIDB: STRUCT: - public_key: - TYPENAME: ZkIdPublicKey + TYPENAME: OidbPublicKey AnySignature: ENUM: 0: @@ -86,10 +86,10 @@ AnySignature: - signature: TYPENAME: PartialAuthenticatorAssertionResponse 3: - ZkId: + OIDB: STRUCT: - signature: - TYPENAME: ZkIdSignature + TYPENAME: OidbSignature AssertionSignature: ENUM: 0: @@ -286,10 +286,25 @@ MultisigTransactionPayload: EntryFunction: NEWTYPE: TYPENAME: EntryFunction +OidbPublicKey: + STRUCT: + - iss_val: STR + - idc: + TYPENAME: IdCommitment +OidbSignature: + STRUCT: + - sig: + TYPENAME: ZkpOrOpenIdSig + - jwt_header_b64: STR + - exp_timestamp_secs: U64 + - ephemeral_pubkey: + TYPENAME: EphemeralPublicKey + - ephemeral_signature: + TYPENAME: EphemeralSignature OpenIdSig: STRUCT: - - jwt_sig: STR - - jwt_payload: STR + - jwt_sig_b64: STR + - jwt_payload_b64: STR - uid_key: STR - epk_blinder: BYTES - pepper: @@ -382,7 +397,8 @@ SignedGroth16Zkp: - non_malleability_signature: TYPENAME: EphemeralSignature - exp_horizon_secs: U64 - - extra_field: STR + - extra_field: + OPTION: STR - override_aud_val: OPTION: STR - training_wheels_signature: @@ -661,21 +677,6 @@ WriteSetPayload: WriteSetV0: NEWTYPESTRUCT: TYPENAME: WriteSetMut -ZkIdPublicKey: - STRUCT: - - iss: STR - - idc: - TYPENAME: IdCommitment -ZkIdSignature: - STRUCT: - - sig: - TYPENAME: ZkpOrOpenIdSig - - jwt_header: STR - - exp_timestamp_secs: U64 - - ephemeral_pubkey: - TYPENAME: EphemeralPublicKey - - ephemeral_signature: - TYPENAME: EphemeralSignature ZkpOrOpenIdSig: ENUM: 0: diff --git a/testsuite/generate-format/tests/staged/consensus.yaml b/testsuite/generate-format/tests/staged/consensus.yaml index 81fbf1c5776db..7c6d02cdfc2cd 100644 --- a/testsuite/generate-format/tests/staged/consensus.yaml +++ b/testsuite/generate-format/tests/staged/consensus.yaml @@ -70,10 +70,10 @@ AnyPublicKey: - public_key: TYPENAME: Secp256r1EcdsaPublicKey 3: - ZkId: + OIDB: STRUCT: - public_key: - TYPENAME: ZkIdPublicKey + TYPENAME: OidbPublicKey AnySignature: ENUM: 0: @@ -92,10 +92,10 @@ AnySignature: - signature: TYPENAME: PartialAuthenticatorAssertionResponse 3: - ZkId: + OIDB: STRUCT: - signature: - TYPENAME: ZkIdSignature + TYPENAME: OidbSignature AssertionSignature: ENUM: 0: @@ -564,10 +564,25 @@ MultisigTransactionPayload: EntryFunction: NEWTYPE: TYPENAME: EntryFunction +OidbPublicKey: + STRUCT: + - iss_val: STR + - idc: + TYPENAME: IdCommitment +OidbSignature: + STRUCT: + - sig: + TYPENAME: ZkpOrOpenIdSig + - jwt_header_b64: STR + - exp_timestamp_secs: U64 + - ephemeral_pubkey: + TYPENAME: EphemeralPublicKey + - ephemeral_signature: + TYPENAME: EphemeralSignature OpenIdSig: STRUCT: - - jwt_sig: STR - - jwt_payload: STR + - jwt_sig_b64: STR + - jwt_payload_b64: STR - uid_key: STR - epk_blinder: BYTES - pepper: @@ -741,7 +756,8 @@ SignedGroth16Zkp: - non_malleability_signature: TYPENAME: EphemeralSignature - exp_horizon_secs: U64 - - extra_field: STR + - extra_field: + OPTION: STR - override_aud_val: OPTION: STR - training_wheels_signature: @@ -1083,21 +1099,6 @@ WriteSetPayload: WriteSetV0: NEWTYPESTRUCT: TYPENAME: WriteSetMut -ZkIdPublicKey: - STRUCT: - - iss: STR - - idc: - TYPENAME: IdCommitment -ZkIdSignature: - STRUCT: - - sig: - TYPENAME: ZkpOrOpenIdSig - - jwt_header: STR - - exp_timestamp_secs: U64 - - ephemeral_pubkey: - TYPENAME: EphemeralPublicKey - - ephemeral_signature: - TYPENAME: EphemeralSignature ZkpOrOpenIdSig: ENUM: 0: diff --git a/testsuite/single_node_performance.py b/testsuite/single_node_performance.py index 338519a71a14c..5e39c09a751ea 100755 --- a/testsuite/single_node_performance.py +++ b/testsuite/single_node_performance.py @@ -85,56 +85,55 @@ class RunGroupConfig: # https://app.axiom.co/aptoslabs-hghf/explorer?qid=29zYzeVi7FX-s4ukl5&relative=1 # fmt: off TESTS = [ - RunGroupConfig(expected_tps=24000, key=RunGroupKey("no-op"), included_in=LAND_BLOCKING_AND_C), - RunGroupConfig(expected_tps=11000, key=RunGroupKey("no-op", module_working_set_size=1000), included_in=LAND_BLOCKING_AND_C), - RunGroupConfig(expected_tps=15000, key=RunGroupKey("coin-transfer"), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE), - # this was changed from 42000 to make landings not flaky, needs follow up - RunGroupConfig(expected_tps=37500, key=RunGroupKey("coin-transfer", executor_type="native"), included_in=LAND_BLOCKING_AND_C), - RunGroupConfig(expected_tps=12800, key=RunGroupKey("account-generation"), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE), - RunGroupConfig(expected_tps=30000, key=RunGroupKey("account-generation", executor_type="native"), included_in=Flow.CONTINUOUS), - RunGroupConfig(expected_tps=20700, key=RunGroupKey("account-resource32-b"), included_in=LAND_BLOCKING_AND_C), - RunGroupConfig(expected_tps=4500, key=RunGroupKey("modify-global-resource"), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE), - RunGroupConfig(expected_tps=16500, key=RunGroupKey("modify-global-resource", module_working_set_size=10), included_in=Flow.CONTINUOUS), - RunGroupConfig(expected_tps=150, key=RunGroupKey("publish-package"), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE), - RunGroupConfig(expected_tps=2620, key=RunGroupKey( + RunGroupConfig(expected_tps=22200, key=RunGroupKey("no-op"), included_in=LAND_BLOCKING_AND_C), + RunGroupConfig(expected_tps=11500, key=RunGroupKey("no-op", module_working_set_size=1000), included_in=LAND_BLOCKING_AND_C), + RunGroupConfig(expected_tps=14200, key=RunGroupKey("coin-transfer"), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE), + RunGroupConfig(expected_tps=37000, key=RunGroupKey("coin-transfer", executor_type="native"), included_in=LAND_BLOCKING_AND_C), + RunGroupConfig(expected_tps=12000, key=RunGroupKey("account-generation"), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE), + RunGroupConfig(expected_tps=31300, key=RunGroupKey("account-generation", executor_type="native"), included_in=Flow.CONTINUOUS), + RunGroupConfig(expected_tps=19200, key=RunGroupKey("account-resource32-b"), included_in=LAND_BLOCKING_AND_C), + RunGroupConfig(expected_tps=4170, key=RunGroupKey("modify-global-resource"), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE), + RunGroupConfig(expected_tps=15400, key=RunGroupKey("modify-global-resource", module_working_set_size=10), included_in=Flow.CONTINUOUS), + RunGroupConfig(expected_tps=155, key=RunGroupKey("publish-package"), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE), + RunGroupConfig(expected_tps=2450, key=RunGroupKey( "mix_publish_transfer", transaction_type_override="publish-package coin-transfer", transaction_weights_override="1 500", ), included_in=LAND_BLOCKING_AND_C), - RunGroupConfig(expected_tps=365, key=RunGroupKey("batch100-transfer"), included_in=LAND_BLOCKING_AND_C), + RunGroupConfig(expected_tps=345, key=RunGroupKey("batch100-transfer"), included_in=LAND_BLOCKING_AND_C), RunGroupConfig(expected_tps=995, key=RunGroupKey("batch100-transfer", executor_type="native"), included_in=Flow.CONTINUOUS), RunGroupConfig(expected_tps=165, key=RunGroupKey("vector-picture40"), included_in=Flow(0), waived=True), RunGroupConfig(expected_tps=1000, key=RunGroupKey("vector-picture40", module_working_set_size=20), included_in=Flow(0), waived=True), - RunGroupConfig(expected_tps=165, key=RunGroupKey("vector-picture30k"), included_in=LAND_BLOCKING_AND_C), - RunGroupConfig(expected_tps=995, key=RunGroupKey("vector-picture30k", module_working_set_size=20), included_in=Flow.CONTINUOUS), - RunGroupConfig(expected_tps=22, key=RunGroupKey("smart-table-picture30-k-with200-change"), included_in=LAND_BLOCKING_AND_C), - RunGroupConfig(expected_tps=86, key=RunGroupKey("smart-table-picture30-k-with200-change", module_working_set_size=20), included_in=Flow.CONTINUOUS), + RunGroupConfig(expected_tps=151, key=RunGroupKey("vector-picture30k"), included_in=LAND_BLOCKING_AND_C), + RunGroupConfig(expected_tps=895, key=RunGroupKey("vector-picture30k", module_working_set_size=20), included_in=Flow.CONTINUOUS), + RunGroupConfig(expected_tps=23, key=RunGroupKey("smart-table-picture30-k-with200-change"), included_in=LAND_BLOCKING_AND_C), + RunGroupConfig(expected_tps=119, key=RunGroupKey("smart-table-picture30-k-with200-change", module_working_set_size=20), included_in=Flow.CONTINUOUS), # RunGroupConfig(expected_tps=3.6, key=RunGroupKey("smart-table-picture1-m-with1-k-change"), included_in=LAND_BLOCKING_AND_C), # RunGroupConfig(expected_tps=12.8, key=RunGroupKey("smart-table-picture1-m-with1-k-change", module_working_set_size=20), included_in=Flow.CONTINUOUS), # RunGroupConfig(expected_tps=5, key=RunGroupKey("smart-table-picture1-b-with1-k-change"), included_in=Flow(0), waived=True), # RunGroupConfig(expected_tps=10, key=RunGroupKey("smart-table-picture1-b-with1-k-change", module_working_set_size=20), included_in=Flow(0), waived=True), - RunGroupConfig(expected_tps=19500, key=RunGroupKey("modify-global-resource-agg-v2"), included_in=Flow.AGG_V2 | LAND_BLOCKING_AND_C), + RunGroupConfig(expected_tps=20000, key=RunGroupKey("modify-global-resource-agg-v2"), included_in=Flow.AGG_V2 | LAND_BLOCKING_AND_C), RunGroupConfig(expected_tps=12500, key=RunGroupKey("modify-global-resource-agg-v2", module_working_set_size=50), included_in=Flow.AGG_V2), - RunGroupConfig(expected_tps=7650, key=RunGroupKey("modify-global-flag-agg-v2"), included_in=Flow.AGG_V2 | Flow.CONTINUOUS), + RunGroupConfig(expected_tps=7300, key=RunGroupKey("modify-global-flag-agg-v2"), included_in=Flow.AGG_V2 | Flow.CONTINUOUS), RunGroupConfig(expected_tps=12500, key=RunGroupKey("modify-global-flag-agg-v2", module_working_set_size=50), included_in=Flow.AGG_V2), - RunGroupConfig(expected_tps=13000, key=RunGroupKey("modify-global-bounded-agg-v2"), included_in=Flow.AGG_V2 | Flow.CONTINUOUS), + RunGroupConfig(expected_tps=12600, key=RunGroupKey("modify-global-bounded-agg-v2"), included_in=Flow.AGG_V2 | Flow.CONTINUOUS), RunGroupConfig(expected_tps=12500, key=RunGroupKey("modify-global-bounded-agg-v2", module_working_set_size=50), included_in=Flow.AGG_V2), - RunGroupConfig(expected_tps=3800, key=RunGroupKey("resource-groups-global-write-tag1-kb"), included_in=LAND_BLOCKING_AND_C | Flow.RESOURCE_GROUPS), + RunGroupConfig(expected_tps=3600, key=RunGroupKey("resource-groups-global-write-tag1-kb"), included_in=LAND_BLOCKING_AND_C | Flow.RESOURCE_GROUPS), RunGroupConfig(expected_tps=8000, key=RunGroupKey("resource-groups-global-write-tag1-kb", module_working_set_size=20), included_in=Flow.RESOURCE_GROUPS, waived=True), - RunGroupConfig(expected_tps=3500, key=RunGroupKey("resource-groups-global-write-and-read-tag1-kb"), included_in=Flow.CONTINUOUS | Flow.RESOURCE_GROUPS), + RunGroupConfig(expected_tps=3300, key=RunGroupKey("resource-groups-global-write-and-read-tag1-kb"), included_in=Flow.CONTINUOUS | Flow.RESOURCE_GROUPS), RunGroupConfig(expected_tps=8000, key=RunGroupKey("resource-groups-global-write-and-read-tag1-kb", module_working_set_size=20), included_in=Flow.RESOURCE_GROUPS, waived=True), - RunGroupConfig(expected_tps=17900, key=RunGroupKey("resource-groups-sender-write-tag1-kb"), included_in=Flow.CONTINUOUS | Flow.RESOURCE_GROUPS), + RunGroupConfig(expected_tps=16100, key=RunGroupKey("resource-groups-sender-write-tag1-kb"), included_in=Flow.CONTINUOUS | Flow.RESOURCE_GROUPS), RunGroupConfig(expected_tps=8000, key=RunGroupKey("resource-groups-sender-write-tag1-kb", module_working_set_size=20), included_in=Flow.RESOURCE_GROUPS, waived=True), - RunGroupConfig(expected_tps=16230, key=RunGroupKey("resource-groups-sender-multi-change1-kb"), included_in=LAND_BLOCKING_AND_C | Flow.RESOURCE_GROUPS), + RunGroupConfig(expected_tps=14000, key=RunGroupKey("resource-groups-sender-multi-change1-kb"), included_in=LAND_BLOCKING_AND_C | Flow.RESOURCE_GROUPS), RunGroupConfig(expected_tps=8000, key=RunGroupKey("resource-groups-sender-multi-change1-kb", module_working_set_size=20), included_in=Flow.RESOURCE_GROUPS, waived=True), - RunGroupConfig(expected_tps=1890, key=RunGroupKey("token-v1ft-mint-and-transfer"), included_in=Flow.CONTINUOUS), - RunGroupConfig(expected_tps=9250, key=RunGroupKey("token-v1ft-mint-and-transfer", module_working_set_size=20), included_in=Flow.CONTINUOUS), - RunGroupConfig(expected_tps=1100, key=RunGroupKey("token-v1nft-mint-and-transfer-sequential"), included_in=Flow.CONTINUOUS), - RunGroupConfig(expected_tps=6100, key=RunGroupKey("token-v1nft-mint-and-transfer-sequential", module_working_set_size=20), included_in=Flow.CONTINUOUS), + RunGroupConfig(expected_tps=1740, key=RunGroupKey("token-v1ft-mint-and-transfer"), included_in=Flow.CONTINUOUS), + RunGroupConfig(expected_tps=8520, key=RunGroupKey("token-v1ft-mint-and-transfer", module_working_set_size=20), included_in=Flow.CONTINUOUS), + RunGroupConfig(expected_tps=1050, key=RunGroupKey("token-v1nft-mint-and-transfer-sequential"), included_in=Flow.CONTINUOUS), + RunGroupConfig(expected_tps=5740, key=RunGroupKey("token-v1nft-mint-and-transfer-sequential", module_working_set_size=20), included_in=Flow.CONTINUOUS), RunGroupConfig(expected_tps=1300, key=RunGroupKey("token-v1nft-mint-and-transfer-parallel"), included_in=Flow(0)), RunGroupConfig(expected_tps=5300, key=RunGroupKey("token-v1nft-mint-and-transfer-parallel", module_working_set_size=20), included_in=Flow(0)), @@ -142,10 +141,10 @@ class RunGroupConfig: # RunGroupConfig(expected_tps=1000, key=RunGroupKey("token-v1nft-mint-and-store-sequential"), included_in=Flow(0)), # RunGroupConfig(expected_tps=1000, key=RunGroupKey("token-v1nft-mint-and-transfer-parallel"), included_in=Flow(0)), - RunGroupConfig(expected_tps=23500, key=RunGroupKey("no-op5-signers"), included_in=Flow.CONTINUOUS), + RunGroupConfig(expected_tps=22200, key=RunGroupKey("no-op5-signers"), included_in=Flow.CONTINUOUS), - RunGroupConfig(expected_tps=6800, key=RunGroupKey("token-v2-ambassador-mint"), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE), - RunGroupConfig(expected_tps=6800, key=RunGroupKey("token-v2-ambassador-mint", module_working_set_size=20), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE), + RunGroupConfig(expected_tps=6840, key=RunGroupKey("token-v2-ambassador-mint"), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE), + RunGroupConfig(expected_tps=6800, key=RunGroupKey("token-v2-ambassador-mint", module_working_set_size=20), included_in=Flow.CONTINUOUS), RunGroupConfig(expected_tps=50000, key=RunGroupKey("coin_transfer_connected_components", executor_type="sharded", sharding_traffic_flags="--connected-tx-grps 5000", transaction_type_override=""), included_in=Flow.REPRESENTATIVE), RunGroupConfig(expected_tps=50000, key=RunGroupKey("coin_transfer_hotspot", executor_type="sharded", sharding_traffic_flags="--hotspot-probability 0.8", transaction_type_override=""), included_in=Flow.REPRESENTATIVE), @@ -154,8 +153,7 @@ class RunGroupConfig: RunGroupConfig(expected_tps=29000 if NUM_ACCOUNTS < 5000000 else 20000, key=RunGroupKey("coin-transfer", smaller_working_set=True), included_in=Flow.MAINNET | Flow.MAINNET_LARGE_DB), RunGroupConfig(expected_tps=23000 if NUM_ACCOUNTS < 5000000 else 15000, key=RunGroupKey("account-generation"), included_in=Flow.MAINNET | Flow.MAINNET_LARGE_DB), RunGroupConfig(expected_tps=130 if NUM_ACCOUNTS < 5000000 else 60, key=RunGroupKey("publish-package"), included_in=Flow.MAINNET | Flow.MAINNET_LARGE_DB), - RunGroupConfig(expected_tps=1500 if NUM_ACCOUNTS < 5000000 else 1500, key=RunGroupKey("token-v2-ambassador-mint"), included_in=Flow.MAINNET | Flow.MAINNET_LARGE_DB), - RunGroupConfig(expected_tps=12000 if NUM_ACCOUNTS < 5000000 else 7000, key=RunGroupKey("token-v2-ambassador-mint", module_working_set_size=100), included_in=Flow.MAINNET | Flow.MAINNET_LARGE_DB), + RunGroupConfig(expected_tps=12000 if NUM_ACCOUNTS < 5000000 else 6800, key=RunGroupKey("token-v2-ambassador-mint"), included_in=Flow.MAINNET | Flow.MAINNET_LARGE_DB), RunGroupConfig(expected_tps=35000 if NUM_ACCOUNTS < 5000000 else 28000, key=RunGroupKey("coin_transfer_connected_components", executor_type="sharded", sharding_traffic_flags="--connected-tx-grps 5000", transaction_type_override=""), included_in=Flow.MAINNET | Flow.MAINNET_LARGE_DB, waived=True), RunGroupConfig(expected_tps=27000 if NUM_ACCOUNTS < 5000000 else 23000, key=RunGroupKey("coin_transfer_hotspot", executor_type="sharded", sharding_traffic_flags="--hotspot-probability 0.8", transaction_type_override=""), included_in=Flow.MAINNET | Flow.MAINNET_LARGE_DB, waived=True), ] diff --git a/testsuite/smoke-test/src/lib.rs b/testsuite/smoke-test/src/lib.rs index e636298404128..dfc612e30e062 100644 --- a/testsuite/smoke-test/src/lib.rs +++ b/testsuite/smoke-test/src/lib.rs @@ -29,6 +29,8 @@ mod jwks; #[cfg(test)] mod network; #[cfg(test)] +mod oidb; +#[cfg(test)] mod randomness; #[cfg(test)] mod rest_api; @@ -48,8 +50,6 @@ mod txn_broadcast; mod txn_emitter; #[cfg(test)] mod upgrade; -#[cfg(test)] -mod zkid; #[cfg(test)] mod smoke_test_environment; diff --git a/testsuite/smoke-test/src/oidb.rs b/testsuite/smoke-test/src/oidb.rs new file mode 100644 index 0000000000000..3763d13a631e8 --- /dev/null +++ b/testsuite/smoke-test/src/oidb.rs @@ -0,0 +1,345 @@ +// Copyright © Aptos Foundation + +use crate::smoke_test_environment::SwarmBuilder; +use aptos::test::CliTestFramework; +use aptos_cached_packages::aptos_stdlib; +use aptos_crypto::{ + ed25519::{Ed25519PrivateKey, Ed25519PublicKey}, + SigningKey, Uniform, +}; +use aptos_forge::{AptosPublicInfo, LocalSwarm, NodeExt, Swarm, SwarmExt}; +use aptos_logger::{debug, info}; +use aptos_rest_client::Client; +use aptos_types::{ + jwks::{ + jwk::{JWKMoveStruct, JWK}, + AllProvidersJWKs, PatchedJWKs, ProviderJWKs, + }, + oidb::{ + test_utils::{ + get_sample_esk, get_sample_iss, get_sample_jwk, get_sample_oidb_groth16_sig_and_pk, + get_sample_oidb_openid_sig_and_pk, + }, + Configuration, Groth16VerificationKey, OidbPublicKey, OidbSignature, ZkpOrOpenIdSig, + }, + transaction::{ + authenticator::{AnyPublicKey, EphemeralSignature}, + SignedTransaction, + }, +}; +use move_core_types::account_address::AccountAddress; +use rand::thread_rng; +use std::time::Duration; + +// TODO(oidb): Test the override aud_val path + +#[tokio::test] +async fn test_oidb_oidc_txn_verifies() { + let (_, _, mut swarm, signed_txn) = + get_oidb_transaction(get_sample_oidb_openid_sig_and_pk).await; + + info!("Submit OpenID transaction"); + let result = swarm + .aptos_public_info() + .client() + .submit_without_serializing_response(&signed_txn) + .await; + + if let Err(e) = result { + panic!("Error with OpenID TXN verification: {:?}", e) + } +} + +#[tokio::test] +async fn test_oidb_oidc_txn_with_bad_jwt_sig() { + let (tw_sk, mut swarm) = setup_local_net().await; + let (mut oidb_sig, oidb_pk) = get_sample_oidb_openid_sig_and_pk(); + + match &mut oidb_sig.sig { + ZkpOrOpenIdSig::Groth16Zkp(_) => panic!("Internal inconsistency"), + ZkpOrOpenIdSig::OpenIdSig(openid_sig) => { + openid_sig.jwt_sig_b64 = "bad signature".to_string() // Mauling the signature + }, + } + + let mut info = swarm.aptos_public_info(); + let signed_txn = sign_oidb_transaction(&mut info, oidb_sig, oidb_pk, &tw_sk).await; + + info!("Submit OpenID transaction with bad JWT signature"); + let result = info + .client() + .submit_without_serializing_response(&signed_txn) + .await; + + if result.is_ok() { + panic!("OpenID TXN with bad JWT signature should have failed verification") + } +} + +#[tokio::test] +async fn test_oidb_oidc_txn_with_expired_epk() { + let (tw_sk, mut swarm) = setup_local_net().await; + let (mut oidb_sig, oidb_pk) = get_sample_oidb_openid_sig_and_pk(); + + oidb_sig.exp_timestamp_secs = 1; // This should fail the verification since the expiration date is way in the past + + let mut info = swarm.aptos_public_info(); + let signed_txn = sign_oidb_transaction(&mut info, oidb_sig, oidb_pk, &tw_sk).await; + + info!("Submit OpenID transaction with expired EPK"); + let result = info + .client() + .submit_without_serializing_response(&signed_txn) + .await; + + if result.is_ok() { + panic!("OpenID TXN with expired EPK should have failed verification") + } +} + +#[tokio::test] +async fn test_oidb_groth16_verifies() { + let (_, _, mut swarm, signed_txn) = + get_oidb_transaction(get_sample_oidb_groth16_sig_and_pk).await; + + info!("Submit OIDB Groth16 transaction"); + let result = swarm + .aptos_public_info() + .client() + .submit_without_serializing_response(&signed_txn) + .await; + + if let Err(e) = result { + panic!("Error with OIDB Groth16 TXN verification: {:?}", e) + } +} + +#[tokio::test] +async fn test_oidb_groth16_with_mauled_proof() { + let (tw_sk, mut swarm) = setup_local_net().await; + let (mut oidb_sig, oidb_pk) = get_sample_oidb_groth16_sig_and_pk(); + + match &mut oidb_sig.sig { + ZkpOrOpenIdSig::Groth16Zkp(proof) => { + proof.non_malleability_signature = + EphemeralSignature::ed25519(tw_sk.sign(&proof.proof).unwrap()); // bad signature using the TW SK rather than the ESK + }, + ZkpOrOpenIdSig::OpenIdSig(_) => panic!("Internal inconsistency"), + } + + let mut info = swarm.aptos_public_info(); + let signed_txn = sign_oidb_transaction(&mut info, oidb_sig, oidb_pk, &tw_sk).await; + + info!("Submit OIDB Groth16 transaction"); + let result = info + .client() + .submit_without_serializing_response(&signed_txn) + .await; + + if result.is_ok() { + panic!("OIDB Groth16 TXN with mauled proof should have failed verification") + } +} + +#[tokio::test] +async fn test_oidb_groth16_with_bad_tw_signature() { + let (_tw_sk, mut swarm) = setup_local_net().await; + let (oidb_sig, oidb_pk) = get_sample_oidb_groth16_sig_and_pk(); + + let mut info = swarm.aptos_public_info(); + + // using the sample ESK rather than the TW SK to get a bad training wheels signature + let signed_txn = sign_oidb_transaction(&mut info, oidb_sig, oidb_pk, &get_sample_esk()).await; + + info!("Submit OIDB Groth16 transaction"); + let result = info + .client() + .submit_without_serializing_response(&signed_txn) + .await; + + if result.is_ok() { + panic!( + "OIDB Groth16 TXN with bad training wheels signature should have failed verification" + ) + } +} + +async fn sign_oidb_transaction<'a>( + info: &mut AptosPublicInfo<'a>, + mut oidb_sig: OidbSignature, + oidb_pk: OidbPublicKey, + tw_sk: &Ed25519PrivateKey, +) -> SignedTransaction { + let oidb_addr = info + .create_user_account_with_any_key(&AnyPublicKey::oidb(oidb_pk.clone())) + .await + .unwrap(); + info.mint(oidb_addr, 10_000_000_000).await.unwrap(); + + let recipient = info + .create_and_fund_user_account(20_000_000_000) + .await + .unwrap(); + + let raw_txn = info + .transaction_factory() + .payload(aptos_stdlib::aptos_coin_transfer(recipient.address(), 100)) + .sender(oidb_addr) + .sequence_number(1) + .build(); + + let esk = get_sample_esk(); + oidb_sig.ephemeral_signature = EphemeralSignature::ed25519(esk.sign(&raw_txn).unwrap()); + + // Compute the training wheels signature if not present + match &mut oidb_sig.sig { + ZkpOrOpenIdSig::Groth16Zkp(proof) => { + proof.training_wheels_signature = Some(EphemeralSignature::ed25519( + tw_sk.sign(&proof.proof).unwrap(), + )); + }, + ZkpOrOpenIdSig::OpenIdSig(_) => {}, + } + + SignedTransaction::new_oidb(raw_txn, oidb_pk, oidb_sig) +} + +async fn get_oidb_transaction( + get_pk_and_sig_func: fn() -> (OidbSignature, OidbPublicKey), +) -> (OidbSignature, OidbPublicKey, LocalSwarm, SignedTransaction) { + let (tw_sk, mut swarm) = setup_local_net().await; + + let (oidb_sig, oidb_pk) = get_pk_and_sig_func(); + + let mut info = swarm.aptos_public_info(); + let signed_txn = + sign_oidb_transaction(&mut info, oidb_sig.clone(), oidb_pk.clone(), &tw_sk).await; + + (oidb_sig, oidb_pk, swarm, signed_txn) +} + +async fn setup_local_net() -> (Ed25519PrivateKey, LocalSwarm) { + let (mut swarm, mut cli, _faucet) = SwarmBuilder::new_local(1) + .with_aptos() + .build_with_cli(0) + .await; + + let tw_sk = spawn_network_and_execute_gov_proposals(&mut swarm, &mut cli).await; + (tw_sk, swarm) +} + +async fn spawn_network_and_execute_gov_proposals( + swarm: &mut LocalSwarm, + cli: &mut CliTestFramework, +) -> Ed25519PrivateKey { + let client = swarm.validators().next().unwrap().rest_client(); + let root_idx = cli.add_account_with_address_to_cli( + swarm.root_key(), + swarm.chain_info().root_account().address(), + ); + swarm + .wait_for_all_nodes_to_catchup_to_epoch(2, Duration::from_secs(60)) + .await + .expect("Epoch 2 taking too long to come!"); + + let maybe_response = client + .get_account_resource_bcs::( + AccountAddress::ONE, + "0x1::openid_account::Groth16VerificationKey", + ) + .await; + let vk = maybe_response.unwrap().into_inner(); + println!("Groth16 VK: {:?}", vk); + + let maybe_response = client + .get_account_resource_bcs::( + AccountAddress::ONE, + "0x1::openid_account::Configuration", + ) + .await; + let config = maybe_response.unwrap().into_inner(); + println!("OIDB configuration before: {:?}", config); + + let iss = get_sample_iss(); + let jwk = get_sample_jwk(); + + let training_wheels_sk = Ed25519PrivateKey::generate(&mut thread_rng()); + let training_wheels_pk = Ed25519PublicKey::from(&training_wheels_sk); + + info!("Insert a JWK."); + let jwk_patch_script = format!( + r#" +script {{ +use aptos_framework::jwks; +use aptos_framework::openid_account; +use aptos_framework::aptos_governance; +use std::string::utf8; +use std::option; +fun main(core_resources: &signer) {{ + let framework_signer = aptos_governance::get_signer_testnet_only(core_resources, @0000000000000000000000000000000000000000000000000000000000000001); + let jwk_0 = jwks::new_rsa_jwk( + utf8(b"{}"), + utf8(b"{}"), + utf8(b"{}"), + utf8(b"{}") + ); + let patches = vector[ + jwks::new_patch_remove_all(), + jwks::new_patch_upsert_jwk(b"{}", jwk_0), + ]; + jwks::set_patches(&framework_signer, patches); + + openid_account::update_max_exp_horizon(&framework_signer, {}); + openid_account::update_training_wheels(&framework_signer, option::some(x"{}")); +}} +}} +"#, + jwk.kid, + jwk.alg, + jwk.e, + jwk.n, + iss, + Configuration::new_for_testing().max_exp_horizon_secs, + hex::encode(training_wheels_pk.to_bytes()) + ); + + let txn_summary = cli.run_script(root_idx, &jwk_patch_script).await.unwrap(); + debug!("txn_summary={:?}", txn_summary); + + info!("Use resource API to check the patch result."); + let patched_jwks = get_latest_jwkset(&client).await; + debug!("patched_jwks={:?}", patched_jwks); + + let expected_providers_jwks = AllProvidersJWKs { + entries: vec![ProviderJWKs { + issuer: iss.into_bytes(), + version: 0, + jwks: vec![JWKMoveStruct::from(JWK::RSA(jwk))], + }], + }; + assert_eq!(expected_providers_jwks, patched_jwks.jwks); + + let maybe_response = client + .get_account_resource_bcs::( + AccountAddress::ONE, + "0x1::openid_account::Configuration", + ) + .await; + let config = maybe_response.unwrap().into_inner(); + println!("OIDB configuration after: {:?}", config); + + let mut info = swarm.aptos_public_info(); + + // Increment sequence number since we patched a JWK + info.root_account().increment_sequence_number(); + + training_wheels_sk +} + +async fn get_latest_jwkset(rest_client: &Client) -> PatchedJWKs { + let maybe_response = rest_client + .get_account_resource_bcs::(AccountAddress::ONE, "0x1::jwks::PatchedJWKs") + .await; + let response = maybe_response.unwrap(); + response.into_inner() +} diff --git a/testsuite/smoke-test/src/randomness/e2e_correctness.rs b/testsuite/smoke-test/src/randomness/e2e_correctness.rs index 036db2b510941..b1e30c65bfc98 100644 --- a/testsuite/smoke-test/src/randomness/e2e_correctness.rs +++ b/testsuite/smoke-test/src/randomness/e2e_correctness.rs @@ -53,7 +53,7 @@ async fn randomness_correctness() { swarm .wait_for_all_nodes_to_catchup_to_epoch(3, Duration::from_secs(epoch_duration_secs * 2)) .await - .expect("Epoch 2 taking too long to arrive!"); + .expect("Epoch 3 taking too long to arrive!"); info!("Verify DKG correctness for epoch 3."); let dkg_session = get_on_chain_resource::(&rest_client).await; diff --git a/testsuite/smoke-test/src/zkid.rs b/testsuite/smoke-test/src/zkid.rs deleted file mode 100644 index c735ad7a0268d..0000000000000 --- a/testsuite/smoke-test/src/zkid.rs +++ /dev/null @@ -1,623 +0,0 @@ -// Copyright © Aptos Foundation - -use crate::smoke_test_environment::SwarmBuilder; -use aptos::test::CliTestFramework; -use aptos_cached_packages::aptos_stdlib; -use aptos_crypto::{ - ed25519::{Ed25519PrivateKey, Ed25519PublicKey}, - encoding_type::EncodingType, - SigningKey, Uniform, -}; -use aptos_forge::{LocalSwarm, NodeExt, Swarm, SwarmExt}; -use aptos_logger::{debug, info}; -use aptos_rest_client::Client; -use aptos_sdk::types::{AccountKey, LocalAccount}; -use aptos_types::{ - bn254_circom::{G1Bytes, G2Bytes, Groth16VerificationKey}, - jwks::{ - jwk::{JWKMoveStruct, JWK}, - rsa::RSA_JWK, - AllProvidersJWKs, PatchedJWKs, ProviderJWKs, - }, - transaction::{ - authenticator::{AnyPublicKey, EphemeralPublicKey, EphemeralSignature}, - SignedTransaction, - }, - zkid::{ - Configuration, Groth16Zkp, IdCommitment, OpenIdSig, Pepper, SignedGroth16Zkp, - ZkIdPublicKey, ZkIdSignature, ZkpOrOpenIdSig, - }, -}; -use move_core_types::account_address::AccountAddress; -use rand::thread_rng; -use std::time::Duration; - -// TODO(zkid): test the override aud_val path -// TODO(zkid): These tests are not modular and they lack instructions for how to regenerate the proofs. - -#[tokio::test] -async fn test_zkid_oidc_signature_transaction_submission() { - let (mut swarm, mut cli, _faucet) = SwarmBuilder::new_local(4) - .with_aptos() - .build_with_cli(0) - .await; - let _ = test_setup(&mut swarm, &mut cli).await; - - let mut info = swarm.aptos_public_info(); - - let pepper = Pepper::new([0u8; 31]); - let idc = - IdCommitment::new_from_preimage(&pepper, "test_client_id", "sub", "test_account").unwrap(); - let sender_zkid_public_key = ZkIdPublicKey { - iss: "https://accounts.google.com".to_owned(), - idc, - }; - let sender_any_public_key = AnyPublicKey::zkid(sender_zkid_public_key.clone()); - let account_address = info - .create_user_account_with_any_key(&sender_any_public_key) - .await - .unwrap(); - info.mint(account_address, 10_000_000_000).await.unwrap(); - - let ephemeral_private_key: Ed25519PrivateKey = EncodingType::Hex - .decode_key( - "zkid test ephemeral private key", - "0x1111111111111111111111111111111111111111111111111111111111111111" - .as_bytes() - .to_vec(), - ) - .unwrap(); - let ephemeral_account: aptos_sdk::types::LocalAccount = LocalAccount::new( - account_address, - AccountKey::from_private_key(ephemeral_private_key), - 0, - ); - let ephemeral_public_key = EphemeralPublicKey::ed25519(ephemeral_account.public_key().clone()); - - let recipient = info - .create_and_fund_user_account(20_000_000_000) - .await - .unwrap(); - - let raw_txn = info - .transaction_factory() - .payload(aptos_stdlib::aptos_coin_transfer(recipient.address(), 100)) - .sender(account_address) - .sequence_number(1) - .build(); - - let sender_sig = ephemeral_account.private_key().sign(&raw_txn).unwrap(); - let ephemeral_signature = EphemeralSignature::ed25519(sender_sig); - - let epk_blinder = vec![0u8; 31]; - let jwt_header = "eyJhbGciOiJSUzI1NiIsImtpZCI6InRlc3RfandrIiwidHlwIjoiSldUIn0".to_string(); - let jwt_payload = "eyJpc3MiOiJodHRwczovL2FjY291bnRzLmdvb2dsZS5jb20iLCJhdWQiOiJ0ZXN0X2NsaWVudF9pZCIsInN1YiI6InRlc3RfYWNjb3VudCIsImVtYWlsIjoidGVzdEBnbWFpbC5jb20iLCJlbWFpbF92ZXJpZmllZCI6dHJ1ZSwibm9uY2UiOiIxMzIwMTc1NTc0Njg5NjI2Mjk1MjE1NjI0NDQ5OTc3ODc4Njk5NzE5Njc3NzE0MzIzOTg5Njk3NzczODY0NTIzOTkwMzIyNzI4MjE2IiwibmJmIjoxNzAyODA4OTM2LCJpYXQiOjE3MDQ5MDkyMzYsImV4cCI6MTcyNzgxMjgzNiwianRpIjoiZjEwYWZiZjBlN2JiOTcyZWI4ZmE2M2YwMjQ5YjBhMzRhMjMxZmM0MCJ9".to_string(); - let jwt_sig = "W4-yUKHhM7HYYhELuP9vfRH1D2IgcSSxz397SMz4u04WfLW3mBrmsaZ0QBgUwy33I7ZA6UoffnuUN8M8koXjfFMv0AfTgkQNJCg0X7cPCIn0WplONF6i4ACWUZjX_fSg36y5cRLDBv4pMOOMEI_eGyMt2tOoNZ2Fik1k-AXsyVNV-mqBtzblhdiGpy0bBgvcrMvJiBfe-AJazv-W3Ik5M0OeZB12YbQDHQSMTjhPEnADn6gmgsERBKbaGO8ieKW0v2Ukb3yqIy7PtdM44wJ0E_u2_tyqffmm6VoH6zaiFHgvEqfT7IM1w8_8k7nk2M9rT__o2A0cGWsYzhw3Mxs1Xw".to_string(); - - let openid_signature = OpenIdSig { - jwt_sig, - jwt_payload, - uid_key: "sub".to_string(), - epk_blinder, - pepper, - idc_aud_val: None, - }; - - let zk_sig = ZkIdSignature { - sig: ZkpOrOpenIdSig::OpenIdSig(openid_signature), - jwt_header, - exp_timestamp_secs: 1727812836, - ephemeral_pubkey: ephemeral_public_key, - ephemeral_signature, - }; - - let signed_txn = SignedTransaction::new_zkid(raw_txn, sender_zkid_public_key, zk_sig); - - info!("Submit openid transaction"); - info.client() - .submit_without_serializing_response(&signed_txn) - .await - .unwrap(); -} - -#[tokio::test] -async fn test_zkid_oidc_signature_transaction_submission_fails_jwt_verification() { - let (mut swarm, mut cli, _faucet) = SwarmBuilder::new_local(4) - .with_aptos() - .build_with_cli(0) - .await; - let _ = test_setup(&mut swarm, &mut cli).await; - let mut info = swarm.aptos_public_info(); - - let pepper = Pepper::new([0u8; 31]); - let idc = - IdCommitment::new_from_preimage(&pepper, "test_client_id", "sub", "test_account").unwrap(); - let sender_zkid_public_key = ZkIdPublicKey { - iss: "https://accounts.google.com".to_owned(), - idc, - }; - let sender_any_public_key = AnyPublicKey::zkid(sender_zkid_public_key.clone()); - let account_address = info - .create_user_account_with_any_key(&sender_any_public_key) - .await - .unwrap(); - info.mint(account_address, 10_000_000_000).await.unwrap(); - - let ephemeral_private_key: Ed25519PrivateKey = EncodingType::Hex - .decode_key( - "zkid test ephemeral private key", - "0x1111111111111111111111111111111111111111111111111111111111111111" - .as_bytes() - .to_vec(), - ) - .unwrap(); - let ephemeral_account: aptos_sdk::types::LocalAccount = LocalAccount::new( - account_address, - AccountKey::from_private_key(ephemeral_private_key), - 0, - ); - let ephemeral_public_key = EphemeralPublicKey::ed25519(ephemeral_account.public_key().clone()); - - let recipient = info - .create_and_fund_user_account(20_000_000_000) - .await - .unwrap(); - - let raw_txn = info - .transaction_factory() - .payload(aptos_stdlib::aptos_coin_transfer(recipient.address(), 100)) - .sender(account_address) - .sequence_number(1) - .build(); - - let sender_sig = ephemeral_account.private_key().sign(&raw_txn).unwrap(); - let ephemeral_signature = EphemeralSignature::ed25519(sender_sig); - - let epk_blinder = vec![0u8; 31]; - let jwt_header = "eyJhbGciOiJSUzI1NiIsImtpZCI6InRlc3RfandrIiwidHlwIjoiSldUIn0".to_string(); - let jwt_payload = "eyJpc3MiOiJodHRwczovL2FjY291bnRzLmdvb2dsZS5jb20iLCJhdWQiOiJ0ZXN0X2NsaWVudF9pZCIsInN1YiI6InRlc3RfYWNjb3VudCIsImVtYWlsIjoidGVzdEBnbWFpbC5jb20iLCJlbWFpbF92ZXJpZmllZCI6dHJ1ZSwibm9uY2UiOiIxMzIwMTc1NTc0Njg5NjI2Mjk1MjE1NjI0NDQ5OTc3ODc4Njk5NzE5Njc3NzE0MzIzOTg5Njk3NzczODY0NTIzOTkwMzIyNzI4MjE2IiwibmJmIjoxNzAyODA4OTM2LCJpYXQiOjE3MDQ5MDkyMzYsImV4cCI6MTcyNzgxMjgzNiwianRpIjoiZjEwYWZiZjBlN2JiOTcyZWI4ZmE2M2YwMjQ5YjBhMzRhMjMxZmM0MCJ9".to_string(); - let jwt_sig = "bad_signature".to_string(); - - let openid_signature = OpenIdSig { - jwt_sig, - jwt_payload, - uid_key: "sub".to_string(), - epk_blinder, - pepper, - idc_aud_val: None, - }; - - let zk_sig = ZkIdSignature { - sig: ZkpOrOpenIdSig::OpenIdSig(openid_signature), - jwt_header, - exp_timestamp_secs: 1727812836, - ephemeral_pubkey: ephemeral_public_key, - ephemeral_signature, - }; - - let signed_txn = SignedTransaction::new_zkid(raw_txn, sender_zkid_public_key, zk_sig); - - info!("Submit openid transaction"); - let _err = info - .client() - .submit_without_serializing_response(&signed_txn) - .await - .unwrap_err(); -} - -#[tokio::test] -async fn test_zkid_oidc_signature_transaction_submission_epk_expired() { - let (mut swarm, mut cli, _faucet) = SwarmBuilder::new_local(4) - .with_aptos() - .build_with_cli(0) - .await; - let _ = test_setup(&mut swarm, &mut cli).await; - let mut info = swarm.aptos_public_info(); - - let pepper = Pepper::new([0u8; 31]); - let idc = - IdCommitment::new_from_preimage(&pepper, "test_client_id", "sub", "test_account").unwrap(); - let sender_zkid_public_key = ZkIdPublicKey { - iss: "https://accounts.google.com".to_owned(), - idc, - }; - let sender_any_public_key = AnyPublicKey::zkid(sender_zkid_public_key.clone()); - let account_address = info - .create_user_account_with_any_key(&sender_any_public_key) - .await - .unwrap(); - info.mint(account_address, 10_000_000_000).await.unwrap(); - - let ephemeral_private_key: Ed25519PrivateKey = EncodingType::Hex - .decode_key( - "zkid test ephemeral private key", - "0x1111111111111111111111111111111111111111111111111111111111111111" - .as_bytes() - .to_vec(), - ) - .unwrap(); - let ephemeral_account: aptos_sdk::types::LocalAccount = LocalAccount::new( - account_address, - AccountKey::from_private_key(ephemeral_private_key), - 0, - ); - let ephemeral_public_key = EphemeralPublicKey::ed25519(ephemeral_account.public_key().clone()); - - let recipient = info - .create_and_fund_user_account(20_000_000_000) - .await - .unwrap(); - - let raw_txn = info - .transaction_factory() - .payload(aptos_stdlib::aptos_coin_transfer(recipient.address(), 100)) - .sender(account_address) - .sequence_number(1) - .build(); - - let sender_sig = ephemeral_account.private_key().sign(&raw_txn).unwrap(); - let ephemeral_signature = EphemeralSignature::ed25519(sender_sig); - - let epk_blinder = vec![0u8; 31]; - let jwt_header = "eyJhbGciOiJSUzI1NiIsImtpZCI6InRlc3RfandrIiwidHlwIjoiSldUIn0".to_string(); - let jwt_payload = "eyJpc3MiOiJodHRwczovL2FjY291bnRzLmdvb2dsZS5jb20iLCJhdWQiOiJ0ZXN0X2NsaWVudF9pZCIsInN1YiI6InRlc3RfYWNjb3VudCIsImVtYWlsIjoidGVzdEBnbWFpbC5jb20iLCJlbWFpbF92ZXJpZmllZCI6dHJ1ZSwibm9uY2UiOiIxMzIwMTc1NTc0Njg5NjI2Mjk1MjE1NjI0NDQ5OTc3ODc4Njk5NzE5Njc3NzE0MzIzOTg5Njk3NzczODY0NTIzOTkwMzIyNzI4MjE2IiwibmJmIjoxNzAyODA4OTM2LCJpYXQiOjE3MDQ5MDkyMzYsImV4cCI6MTcyNzgxMjgzNiwianRpIjoiZjEwYWZiZjBlN2JiOTcyZWI4ZmE2M2YwMjQ5YjBhMzRhMjMxZmM0MCJ9".to_string(); - let jwt_sig = "W4-yUKHhM7HYYhELuP9vfRH1D2IgcSSxz397SMz4u04WfLW3mBrmsaZ0QBgUwy33I7ZA6UoffnuUN8M8koXjfFMv0AfTgkQNJCg0X7cPCIn0WplONF6i4ACWUZjX_fSg36y5cRLDBv4pMOOMEI_eGyMt2tOoNZ2Fik1k-AXsyVNV-mqBtzblhdiGpy0bBgvcrMvJiBfe-AJazv-W3Ik5M0OeZB12YbQDHQSMTjhPEnADn6gmgsERBKbaGO8ieKW0v2Ukb3yqIy7PtdM44wJ0E_u2_tyqffmm6VoH6zaiFHgvEqfT7IM1w8_8k7nk2M9rT__o2A0cGWsYzhw3Mxs1Xw".to_string(); - - let openid_signature = OpenIdSig { - jwt_sig, - jwt_payload, - uid_key: "sub".to_string(), - epk_blinder, - pepper, - idc_aud_val: None, - }; - - let zk_sig = ZkIdSignature { - sig: ZkpOrOpenIdSig::OpenIdSig(openid_signature), - jwt_header, - exp_timestamp_secs: 1, // Expired timestamp - ephemeral_pubkey: ephemeral_public_key, - ephemeral_signature, - }; - - let signed_txn = SignedTransaction::new_zkid(raw_txn, sender_zkid_public_key, zk_sig); - - info!("Submit openid transaction"); - let _err = info - .client() - .submit_without_serializing_response(&signed_txn) - .await - .unwrap_err(); -} - -#[tokio::test] -async fn test_zkid_groth16_verifies() { - let (mut swarm, mut cli, _faucet) = SwarmBuilder::new_local(4) - .with_aptos() - .build_with_cli(0) - .await; - let tw_sk = test_setup(&mut swarm, &mut cli).await; - let mut info = swarm.aptos_public_info(); - - let pepper = Pepper::from_number(76); - let idc = IdCommitment::new_from_preimage( - &pepper, - "407408718192.apps.googleusercontent.com", - "sub", - "113990307082899718775", - ) - .unwrap(); - let sender_zkid_public_key = ZkIdPublicKey { - iss: "https://accounts.google.com".to_owned(), - idc, - }; - let sender_any_public_key = AnyPublicKey::zkid(sender_zkid_public_key.clone()); - let account_address = info - .create_user_account_with_any_key(&sender_any_public_key) - .await - .unwrap(); - info.mint(account_address, 10_000_000_000).await.unwrap(); - - let ephemeral_private_key: Ed25519PrivateKey = EncodingType::Hex - .decode_key( - "zkid test ephemeral private key", - "0x76b8e0ada0f13d90405d6ae55386bd28bdd219b8a08ded1aa836efcc8b770dc7" - .as_bytes() - .to_vec(), - ) - .unwrap(); - let ephemeral_account: aptos_sdk::types::LocalAccount = LocalAccount::new( - account_address, - AccountKey::from_private_key(ephemeral_private_key), - 0, - ); - let ephemeral_public_key = EphemeralPublicKey::ed25519(ephemeral_account.public_key().clone()); - - let recipient = info - .create_and_fund_user_account(20_000_000_000) - .await - .unwrap(); - - let raw_txn = info - .transaction_factory() - .payload(aptos_stdlib::aptos_coin_transfer(recipient.address(), 100)) - .sender(account_address) - .sequence_number(1) - .build(); - - let sender_sig = ephemeral_account.private_key().sign(&raw_txn).unwrap(); - let ephemeral_signature = EphemeralSignature::ed25519(sender_sig); - - let a = G1Bytes::new_unchecked( - "20534193224874816823038374805971256353897254359389549519579800571198905682623", - "3128047629776327625062258700337193014005673411952335683536865294076478098678", - ) - .unwrap(); - let b = G2Bytes::new_unchecked( - [ - "11831059544281359959902363827760224027191828999098259913907764686593049260801", - "14933419822301565783764657928814181728459886670248956535955133596731082875810", - ], - [ - "16616167200367085072660100259194052934821478809307596510515652443339946625933", - "1103855954970567341442645156173756328940907403537523212700521414512165362008", - ], - ) - .unwrap(); - let c = G1Bytes::new_unchecked( - "296457556259014920933232985275282694032456344171046224944953719399946325676", - "10314488872240559867545387237625153841351761679810222583912967187658678987385", - ) - .unwrap(); - let proof = Groth16Zkp::new(a, b, c); - - let jwt_header = "eyJhbGciOiJSUzI1NiIsImtpZCI6InRlc3RfandrIiwidHlwIjoiSldUIn0".to_string(); - - let proof_sig = ephemeral_account.private_key().sign(&proof).unwrap(); - let ephem_proof_sig = EphemeralSignature::ed25519(proof_sig); - - // TODO(zkid): Refactor tests to be modular and add test for bad training wheels signature (commented out below). - //let bad_sk = Ed25519PrivateKey::generate(&mut thread_rng()); - let config = Configuration::new_for_devnet_and_testing(); - let zk_sig = ZkIdSignature { - sig: ZkpOrOpenIdSig::Groth16Zkp(SignedGroth16Zkp { - proof: proof.clone(), - non_malleability_signature: ephem_proof_sig, - extra_field: "\"family_name\":\"Straka\",".to_string(), - exp_horizon_secs: config.max_exp_horizon_secs, - override_aud_val: None, - training_wheels_signature: Some(EphemeralSignature::ed25519( - tw_sk.sign(&proof).unwrap(), - )), - }), - jwt_header, - exp_timestamp_secs: 1900255944, - ephemeral_pubkey: ephemeral_public_key, - ephemeral_signature, - }; - - let signed_txn = SignedTransaction::new_zkid(raw_txn, sender_zkid_public_key, zk_sig); - - info!("Submit zero knowledge transaction"); - let result = info - .client() - .submit_without_serializing_response(&signed_txn) - .await; - - if let Err(e) = result { - panic!("Error with Groth16 TXN verification: {:?}", e) - } -} - -#[tokio::test] -async fn test_zkid_groth16_signature_transaction_submission_proof_signature_check_fails() { - let (mut swarm, mut cli, _faucet) = SwarmBuilder::new_local(4) - .with_aptos() - .build_with_cli(0) - .await; - let tw_sk = test_setup(&mut swarm, &mut cli).await; - let mut info = swarm.aptos_public_info(); - - let pepper = Pepper::from_number(76); - let idc = IdCommitment::new_from_preimage( - &pepper, - "407408718192.apps.googleusercontent.com", - "sub", - "113990307082899718775", - ) - .unwrap(); - let sender_zkid_public_key = ZkIdPublicKey { - iss: "https://accounts.google.com".to_owned(), - idc, - }; - let sender_any_public_key = AnyPublicKey::zkid(sender_zkid_public_key.clone()); - let account_address = info - .create_user_account_with_any_key(&sender_any_public_key) - .await - .unwrap(); - info.mint(account_address, 10_000_000_000).await.unwrap(); - - let ephemeral_private_key: Ed25519PrivateKey = EncodingType::Hex - .decode_key( - "zkid test ephemeral private key", - "0x76b8e0ada0f13d90405d6ae55386bd28bdd219b8a08ded1aa836efcc8b770dc7" - .as_bytes() - .to_vec(), - ) - .unwrap(); - let ephemeral_account: aptos_sdk::types::LocalAccount = LocalAccount::new( - account_address, - AccountKey::from_private_key(ephemeral_private_key), - 0, - ); - let ephemeral_public_key = EphemeralPublicKey::ed25519(ephemeral_account.public_key().clone()); - - let recipient = info - .create_and_fund_user_account(20_000_000_000) - .await - .unwrap(); - - let raw_txn = info - .transaction_factory() - .payload(aptos_stdlib::aptos_coin_transfer(recipient.address(), 100)) - .sender(account_address) - .sequence_number(1) - .build(); - - let sender_sig = ephemeral_account.private_key().sign(&raw_txn).unwrap(); - let ephemeral_signature = EphemeralSignature::ed25519(sender_sig); - - let a = G1Bytes::new_unchecked( - "20534193224874816823038374805971256353897254359389549519579800571198905682623", - "3128047629776327625062258700337193014005673411952335683536865294076478098678", - ) - .unwrap(); - let b = G2Bytes::new_unchecked( - [ - "11831059544281359959902363827760224027191828999098259913907764686593049260801", - "14933419822301565783764657928814181728459886670248956535955133596731082875810", - ], - [ - "16616167200367085072660100259194052934821478809307596510515652443339946625933", - "1103855954970567341442645156173756328940907403537523212700521414512165362008", - ], - ) - .unwrap(); - let c = G1Bytes::new_unchecked( - "296457556259014920933232985275282694032456344171046224944953719399946325676", - "10314488872240559867545387237625153841351761679810222583912967187658678987385", - ) - .unwrap(); - let proof = Groth16Zkp::new(a, b, c); - - let jwt_header = "eyJhbGciOiJSUzI1NiIsImtpZCI6InRlc3RfandrIiwidHlwIjoiSldUIn0".to_string(); - - let config = Configuration::new_for_devnet_and_testing(); - let zk_sig = ZkIdSignature { - sig: ZkpOrOpenIdSig::Groth16Zkp(SignedGroth16Zkp { - proof: proof.clone(), - non_malleability_signature: ephemeral_signature.clone(), // Wrong signature - extra_field: "\"family_name\":\"Straka\",".to_string(), - exp_horizon_secs: config.max_exp_horizon_secs, - override_aud_val: None, - training_wheels_signature: Some(EphemeralSignature::ed25519( - tw_sk.sign(&proof).unwrap(), - )), - }), - jwt_header, - exp_timestamp_secs: 1900255944, - ephemeral_pubkey: ephemeral_public_key, - ephemeral_signature, - }; - - let signed_txn = SignedTransaction::new_zkid(raw_txn, sender_zkid_public_key, zk_sig); - - info!("Submit zero knowledge transaction"); - info.client() - .submit_without_serializing_response(&signed_txn) - .await - .unwrap_err(); -} - -async fn test_setup(swarm: &mut LocalSwarm, cli: &mut CliTestFramework) -> Ed25519PrivateKey { - let client = swarm.validators().next().unwrap().rest_client(); - let root_idx = cli.add_account_with_address_to_cli( - swarm.root_key(), - swarm.chain_info().root_account().address(), - ); - swarm - .wait_for_all_nodes_to_catchup_to_epoch(2, Duration::from_secs(60)) - .await - .expect("Epoch 2 taking too long to come!"); - - let maybe_response = client - .get_account_resource_bcs::( - AccountAddress::ONE, - "0x1::zkid::Groth16VerificationKey", - ) - .await; - let vk = maybe_response.unwrap().into_inner(); - println!("Groth16 VK: {:?}", vk); - - let maybe_response = client - .get_account_resource_bcs::(AccountAddress::ONE, "0x1::zkid::Configuration") - .await; - let config = maybe_response.unwrap().into_inner(); - println!("zkID configuration: {:?}", config); - - let iss = "https://accounts.google.com"; - let jwk = RSA_JWK { - kid:"test_jwk".to_owned(), - kty:"RSA".to_owned(), - alg:"RS256".to_owned(), - e:"AQAB".to_owned(), - n:"6S7asUuzq5Q_3U9rbs-PkDVIdjgmtgWreG5qWPsC9xXZKiMV1AiV9LXyqQsAYpCqEDM3XbfmZqGb48yLhb_XqZaKgSYaC_h2DjM7lgrIQAp9902Rr8fUmLN2ivr5tnLxUUOnMOc2SQtr9dgzTONYW5Zu3PwyvAWk5D6ueIUhLtYzpcB-etoNdL3Ir2746KIy_VUsDwAM7dhrqSK8U2xFCGlau4ikOTtvzDownAMHMrfE7q1B6WZQDAQlBmxRQsyKln5DIsKv6xauNsHRgBAKctUxZG8M4QJIx3S6Aughd3RZC4Ca5Ae9fd8L8mlNYBCrQhOZ7dS0f4at4arlLcajtw".to_owned(), - }; - - let training_wheels_sk = Ed25519PrivateKey::generate(&mut thread_rng()); - let training_wheels_pk = Ed25519PublicKey::from(&training_wheels_sk); - - info!("Insert a JWK."); - let jwk_patch_script = format!( - r#" -script {{ -use aptos_framework::jwks; -use aptos_framework::zkid; -use aptos_framework::aptos_governance; -use std::string::utf8; -use std::option; -fun main(core_resources: &signer) {{ - let framework_signer = aptos_governance::get_signer_testnet_only(core_resources, @0000000000000000000000000000000000000000000000000000000000000001); - let google_jwk_0 = jwks::new_rsa_jwk( - utf8(b"{}"), - utf8(b"RS256"), - utf8(b"AQAB"), - utf8(b"{}") - ); - let patches = vector[ - jwks::new_patch_remove_all(), - jwks::new_patch_upsert_jwk(b"{}", google_jwk_0), - ]; - jwks::set_patches(&framework_signer, patches); - - zkid::update_training_wheels(&framework_signer, option::some(x"{}")); -}} -}} -"#, - jwk.kid, - jwk.n, - iss, - hex::encode(training_wheels_pk.to_bytes()) - ); - - let txn_summary = cli.run_script(root_idx, &jwk_patch_script).await.unwrap(); - debug!("txn_summary={:?}", txn_summary); - - info!("Use resource API to check the patch result."); - let patched_jwks = get_latest_jwkset(&client).await; - debug!("patched_jwks={:?}", patched_jwks); - - let expected_providers_jwks = AllProvidersJWKs { - entries: vec![ProviderJWKs { - issuer: b"https://accounts.google.com".to_vec(), - version: 0, - jwks: vec![JWKMoveStruct::from(JWK::RSA(jwk))], - }], - }; - assert_eq!(expected_providers_jwks, patched_jwks.jwks); - - let mut info = swarm.aptos_public_info(); - - // Increment sequence number since we patched a JWK - info.root_account().increment_sequence_number(); - - training_wheels_sk -} - -async fn get_latest_jwkset(rest_client: &Client) -> PatchedJWKs { - let maybe_response = rest_client - .get_account_resource_bcs::(AccountAddress::ONE, "0x1::jwks::PatchedJWKs") - .await; - let response = maybe_response.unwrap(); - response.into_inner() -} diff --git a/third_party/move/documentation/coding_guidelines.md b/third_party/move/documentation/coding_guidelines.md index 00e5bcd30e25f..f7047e96f1094 100644 --- a/third_party/move/documentation/coding_guidelines.md +++ b/third_party/move/documentation/coding_guidelines.md @@ -39,7 +39,7 @@ We require every PR to have approval by at least two reviewers. This is enforced - Don't forget to list any issues this PR fixes (if you mention 'closes #nnn' in the PR description the issue will be automatically closed). - If you have any significant TODOs in your code, please open an issue for them. Use `TODO(#nnn)` to link the TODO to the issue. - Respond to each comment of the reviewers. If you think you resolved a request for a change, just use 'done' as a response. If you disagree with the reviewer, explain why -- it is OK to pushback, but should be justified. -- Once you are done with addressing comments, indicate this to the reviewers by adding a top-level comment to the PR. You can use 'PTLA' ('please take another look') as an acronym for this purpose. +- Once you are done with addressing comments, indicate this to the reviewers by adding a top-level comment to the PR. You can use 'PTAL' ('please take another look') as an acronym for this purpose. - If possible, avoid force push so the reviewer can see how you changed code in comparison. There are exceptions to this, for example if you need to rebase. ### Guidelines for Reviewers of PRs diff --git a/third_party/move/move-binary-format/Cargo.toml b/third_party/move/move-binary-format/Cargo.toml index 05c6b0ccb17b2..2c04f955bbc38 100644 --- a/third_party/move/move-binary-format/Cargo.toml +++ b/third_party/move/move-binary-format/Cargo.toml @@ -31,3 +31,4 @@ serde_json = "1.0.64" [features] default = [] fuzzing = ["proptest", "proptest-derive", "arbitrary", "move-core-types/fuzzing"] +testing = [] diff --git a/third_party/move/move-binary-format/src/file_format_common.rs b/third_party/move/move-binary-format/src/file_format_common.rs index 81a02203db0c2..b5446c81260e4 100644 --- a/third_party/move/move-binary-format/src/file_format_common.rs +++ b/third_party/move/move-binary-format/src/file_format_common.rs @@ -139,7 +139,7 @@ pub enum SerializedOption { SOME = 0x2, } -/// A marker for an boolean in the serialized output. +/// A marker for a boolean in the serialized output. #[rustfmt::skip] #[allow(non_camel_case_types)] #[repr(u8)] @@ -537,8 +537,11 @@ pub(crate) mod versioned_data { }, }; if version == 0 || version > u32::min(max_version, VERSION_MAX) { - return Err(PartialVMError::new(StatusCode::UNKNOWN_VERSION)); - } else if version == VERSION_NEXT && !cfg!(test) && !cfg!(feature = "fuzzing") { + return Err(PartialVMError::new(StatusCode::UNKNOWN_VERSION) + .with_message(format!("bytecode version {} unsupported", version))); + } else if version == VERSION_NEXT + && !cfg!(any(test, feature = "testing", feature = "fuzzing")) + { return Err( PartialVMError::new(StatusCode::UNKNOWN_VERSION).with_message(format!( "bytecode version {} only allowed in test code", diff --git a/third_party/move/move-compiler-v2/src/bytecode_generator.rs b/third_party/move/move-compiler-v2/src/bytecode_generator.rs index 7ae97a5f9a779..cc087416eba8f 100644 --- a/third_party/move/move-compiler-v2/src/bytecode_generator.rs +++ b/third_party/move/move-compiler-v2/src/bytecode_generator.rs @@ -229,7 +229,7 @@ impl<'env> Generator<'env> { self.error( id, format!("cannot assign tuple type `{}` to single variable (use `(a, b, ..) = ..` instead)", - ty.display(&self.env().get_type_display_ctx())) + ty.display(&self.func_env.get_type_display_ctx())) ) } self.new_temp(ty) @@ -379,7 +379,7 @@ impl<'env> Generator<'env> { format!( "expected `&mut` but found `{}`", self.temp_type(lhs_temp) - .display(&self.env().get_type_display_ctx()), + .display(&self.func_env.get_type_display_ctx()), ), ); } @@ -588,12 +588,15 @@ impl<'env> Generator<'env> { { let err_loc = self.env().get_node_loc(id); let mut reasons: Vec<(Loc, String)> = Vec::new(); - let reason_msg = format!("Invalid call to {}.", op.display(self.env(), id)); + let reason_msg = format!( + "Invalid call to {}.", + op.display_with_fun_env(self.env(), &self.func_env, id) + ); reasons.push((err_loc.clone(), reason_msg.clone())); let err_msg = format!( "Expected a struct type. Global storage operations are restricted to struct types declared in the current module. \ Found: '{}'", - self.env().get_node_instantiation(id)[0].display(&self.env().get_type_display_ctx()) + self.env().get_node_instantiation(id)[0].display(&self.func_env.get_type_display_ctx()) ); self.env() .diag_with_labels(Severity::Error, &err_loc, &err_msg, reasons) @@ -1067,7 +1070,7 @@ impl<'env> Generator<'env> { format!( "expected `&mut` but found `{}`", self.temp_type(oper_temp) - .display(&self.env().get_type_display_ctx()) + .display(&self.func_env.get_type_display_ctx()) ), ) } diff --git a/third_party/move/move-compiler-v2/src/experiments.rs b/third_party/move/move-compiler-v2/src/experiments.rs index 37971120f4aea..de90d247e2165 100644 --- a/third_party/move/move-compiler-v2/src/experiments.rs +++ b/third_party/move/move-compiler-v2/src/experiments.rs @@ -26,4 +26,7 @@ impl Experiment { /// A flag which allows to turn off safety checks, like reference safety. /// Retention: permanent. pub const NO_SAFETY: &'static str = "no-safety"; + /// A flag which allows to turn on the critical edge splitting pass. + /// Retention: temporary. This should be removed after the pass can be tested. + pub const SPLIT_CRITICAL_EDGES: &'static str = "split-critical-edges"; } diff --git a/third_party/move/move-compiler-v2/src/function_checker.rs b/third_party/move/move-compiler-v2/src/function_checker.rs index 3a89a16db9fec..fb4d3e7dd7f97 100644 --- a/third_party/move/move-compiler-v2/src/function_checker.rs +++ b/third_party/move/move-compiler-v2/src/function_checker.rs @@ -121,7 +121,7 @@ pub fn check_access_and_use(env: &mut GlobalEnv, before_inlining: bool) { // Only public functions are visible from scripts. generic_error( env, - "a script", + "a script ", "it is not public", sites, &callee_func, diff --git a/third_party/move/move-compiler-v2/src/lib.rs b/third_party/move/move-compiler-v2/src/lib.rs index ad5bd791e6aec..075685aafa50c 100644 --- a/third_party/move/move-compiler-v2/src/lib.rs +++ b/third_party/move/move-compiler-v2/src/lib.rs @@ -18,6 +18,7 @@ use crate::pipeline::{ exit_state_analysis::ExitStateAnalysisProcessor, explicit_drop::ExplicitDrop, livevar_analysis_processor::LiveVarAnalysisProcessor, reference_safety_processor::ReferenceSafetyProcessor, + split_critical_edges_processor::SplitCriticalEdgesProcessor, uninitialized_use_checker::UninitializedUseChecker, unreachable_code_analysis::UnreachableCodeProcessor, unreachable_code_remover::UnreachableCodeRemover, @@ -207,6 +208,9 @@ pub fn bytecode_pipeline(env: &GlobalEnv) -> FunctionTargetPipeline { let options = env.get_extension::().expect("options"); let safety_on = !options.experiment_on(Experiment::NO_SAFETY); let mut pipeline = FunctionTargetPipeline::default(); + if options.experiment_on(Experiment::SPLIT_CRITICAL_EDGES) { + pipeline.add_processor(Box::new(SplitCriticalEdgesProcessor {})); + } if safety_on { pipeline.add_processor(Box::new(UninitializedUseChecker {})); } diff --git a/third_party/move/move-compiler-v2/src/pipeline/mod.rs b/third_party/move/move-compiler-v2/src/pipeline/mod.rs index 30b5c402338ba..1eb996661cf7a 100644 --- a/third_party/move/move-compiler-v2/src/pipeline/mod.rs +++ b/third_party/move/move-compiler-v2/src/pipeline/mod.rs @@ -20,6 +20,7 @@ pub mod exit_state_analysis; pub mod explicit_drop; pub mod livevar_analysis_processor; pub mod reference_safety_processor; +pub mod split_critical_edges_processor; pub mod uninitialized_use_checker; pub mod unreachable_code_analysis; pub mod unreachable_code_remover; diff --git a/third_party/move/move-compiler-v2/src/pipeline/split_critical_edges_processor.rs b/third_party/move/move-compiler-v2/src/pipeline/split_critical_edges_processor.rs new file mode 100644 index 0000000000000..ed7d11dbd5f9e --- /dev/null +++ b/third_party/move/move-compiler-v2/src/pipeline/split_critical_edges_processor.rs @@ -0,0 +1,375 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +//! This pass splits critical edges with empty blocks. +//! A critical edge is an edge where the source node has multiple successors, +//! and the target node has multiple predecessors. +//! +//! Side effects: clear existing annotations. +//! +//! Prerequisites: no call instructions have abort actions. +//! +//! Postconditions: no critical edges in the control flow graph. + +use log::{log_enabled, Level}; +use move_model::{ast::TempIndex, model::FunctionEnv}; +use move_stackless_bytecode::{ + function_target::FunctionData, + function_target_pipeline::{FunctionTargetProcessor, FunctionTargetsHolder}, + stackless_bytecode::{AttrId, Bytecode, Label}, + stackless_control_flow_graph::{BlockId, StacklessControlFlowGraph}, +}; +use std::collections::{BTreeMap, BTreeSet}; + +pub struct SplitCriticalEdgesProcessor {} + +impl FunctionTargetProcessor for SplitCriticalEdgesProcessor { + fn process( + &self, + _targets: &mut FunctionTargetsHolder, + fun_env: &FunctionEnv, + mut data: FunctionData, + _scc_opt: Option<&[FunctionEnv]>, + ) -> FunctionData { + if cfg!(debug_assertions) || log_enabled!(Level::Debug) { + Self::check_precondition(&data); + } + if fun_env.is_native() { + return data; + } + let mut transformer = SplitCriticalEdgesTransformation::new(std::mem::take(&mut data.code)); + transformer.transform(); + data.code = transformer.code; + data.annotations.clear(); + if cfg!(debug_assertions) || log_enabled!(Level::Debug) { + Self::check_postcondition(&data.code); + } + data + } + + fn name(&self) -> String { + "SplitCriticalEdgesProcessor".to_owned() + } +} + +impl SplitCriticalEdgesProcessor { + /// Checks the precondition of the transformaiton; cf. module documentation. + fn check_precondition(data: &FunctionData) { + for instr in &data.code { + if matches!(instr, Bytecode::Call(_, _, _, _, Some(_))) { + panic!("precondition violated: found call instruction with abort action") + } + } + } + + /// Checks the postcondition of the transformation; cf. module documentation. + fn check_postcondition(code: &[Bytecode]) { + let cfg = StacklessControlFlowGraph::new_forward(code); + let blocks = cfg.blocks(); + let mut pred_count: BTreeMap = + blocks.iter().map(|block_id| (*block_id, 0)).collect(); + for block in &blocks { + // don't count the edge from the dummy start to a block as an incoming edge + if *block == cfg.entry_block() { + continue; + } + for suc_block in cfg.successors(*block) { + *pred_count + .get_mut(suc_block) + .unwrap_or_else(|| panic!("block {}", suc_block)) += 1; + } + } + for block in blocks { + let successors = cfg.successors(block); + if successors.len() > 1 { + for suc_block in successors { + assert!( + *pred_count.get(suc_block).expect("pred count") <= 1, + "{} has > 1 predecessors", + suc_block + ) + } + } + } + } +} + +struct SplitCriticalEdgesTransformation { + /// Function data of the function being transformed + code: Vec, + /// Labels used in the original code and in the generated code + labels: BTreeSet