diff --git a/plenum/test/instances/test_instance_cannot_become_active_with_less_than_four_servers.py b/plenum/test/instances/test_instance_cannot_become_active_with_less_than_four_servers.py index 036bc6c931..d9e2b2d7d0 100644 --- a/plenum/test/instances/test_instance_cannot_become_active_with_less_than_four_servers.py +++ b/plenum/test/instances/test_instance_cannot_become_active_with_less_than_four_servers.py @@ -60,6 +60,8 @@ def addNodeBackAndCheck(nodeIdx: int, expectedStatus: Status): timeout = waits.expectedNodeStartUpTimeout() + \ waits.expectedPoolInterconnectionTime(len(nodeSet)) + # TODO: Probably it's better to modify waits.* functions + timeout *= 1.5 looper.run(eventually(checkNodeStatusRemotesAndF, expectedStatus, nodeIdx, diff --git a/plenum/test/node_catchup/test_node_catchup_after_restart_after_txns.py b/plenum/test/node_catchup/test_node_catchup_after_restart_after_txns.py index 9e98d40a2a..ab8e414f21 100644 --- a/plenum/test/node_catchup/test_node_catchup_after_restart_after_txns.py +++ b/plenum/test/node_catchup/test_node_catchup_after_restart_after_txns.py @@ -73,10 +73,13 @@ def test_node_catchup_after_restart_with_txns( looper.add(newNode) txnPoolNodeSet[-1] = newNode + # Make sure ledger is not synced initially + check_ledger_state(newNode, DOMAIN_LEDGER_ID, LedgerState.not_synced) + # Delay catchup reply processing so LedgerState does not change - # TODO fix delay, sometimes it's not enough and loweer 'check_ledger_state' + # TODO fix delay, sometimes it's not enough and lower 'check_ledger_state' # fails because newNode's domain ledger state is 'synced' - delay_catchup_reply = 5 + delay_catchup_reply = 10 newNode.nodeIbStasher.delay(cr_delay(delay_catchup_reply)) looper.run(checkNodesConnected(txnPoolNodeSet)) diff --git a/plenum/test/node_request/test_timestamp/test_timestamp_post_view_change.py b/plenum/test/node_request/test_timestamp/test_timestamp_post_view_change.py index a3aef6e1ec..53538e69b7 100644 --- a/plenum/test/node_request/test_timestamp/test_timestamp_post_view_change.py +++ b/plenum/test/node_request/test_timestamp/test_timestamp_post_view_change.py @@ -7,6 +7,7 @@ from plenum.test.test_node import ensureElectionsDone, getNonPrimaryReplicas from plenum.test.view_change.helper import ensure_view_change from plenum.test.helper import sdk_send_random_and_check, sdk_send_random_requests +from plenum.test.delayers import icDelay Max3PCBatchSize = 4 @@ -51,15 +52,27 @@ def test_new_primary_has_wrong_clock(tconf, looper, txnPoolNodeSet, old_view_no = txnPoolNodeSet[0].viewNo + # Delay parameters + malicious_batch_count = 5 + malicious_batch_interval = 2 + instance_change_delay = 1.5 * malicious_batch_count * malicious_batch_interval + + # Delay instance change so view change doesn't happen in the middle of this test + for node in txnPoolNodeSet: + node.nodeIbStasher.delay(icDelay(instance_change_delay)) + # Requests are sent - for _ in range(5): + for _ in range(malicious_batch_count): sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, count=2) - looper.runFor(.2) + looper.runFor(malicious_batch_interval) def chk(): + for node in txnPoolNodeSet: + assert node.viewNo == old_view_no + for node in [n for n in txnPoolNodeSet if n != faulty_node]: # Each non faulty node raises suspicion assert get_timestamp_suspicion_count(node) > susp_counts[node.name] @@ -70,6 +83,10 @@ def chk(): looper.run(eventually(chk, retryWait=1)) + # Clear delays + for node in txnPoolNodeSet: + node.nodeIbStasher.reset_delays_and_process_delayeds() + # Eventually another view change happens looper.run(eventually(checkViewNoForNodes, txnPoolNodeSet, old_view_no + 1, retryWait=1, timeout=2 * tconf.PerfCheckFreq))