Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[INDY-1185] Fixing build on hyperledger jenkins #553

Merged
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,8 @@ def addNodeBackAndCheck(nodeIdx: int, expectedStatus: Status):

timeout = waits.expectedNodeStartUpTimeout() + \
waits.expectedPoolInterconnectionTime(len(nodeSet))
# TODO: Probably it's better to modify waits.* functions
timeout *= 1.5
looper.run(eventually(checkNodeStatusRemotesAndF,
expectedStatus,
nodeIdx,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -73,10 +73,13 @@ def test_node_catchup_after_restart_with_txns(
looper.add(newNode)
txnPoolNodeSet[-1] = newNode

# Make sure ledger is not synced initially
check_ledger_state(newNode, DOMAIN_LEDGER_ID, LedgerState.not_synced)

# Delay catchup reply processing so LedgerState does not change
# TODO fix delay, sometimes it's not enough and loweer 'check_ledger_state'
# TODO fix delay, sometimes it's not enough and lower 'check_ledger_state'
# fails because newNode's domain ledger state is 'synced'
delay_catchup_reply = 5
delay_catchup_reply = 10
newNode.nodeIbStasher.delay(cr_delay(delay_catchup_reply))
looper.run(checkNodesConnected(txnPoolNodeSet))

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from plenum.test.test_node import ensureElectionsDone, getNonPrimaryReplicas
from plenum.test.view_change.helper import ensure_view_change
from plenum.test.helper import sdk_send_random_and_check, sdk_send_random_requests
from plenum.test.delayers import icDelay

Max3PCBatchSize = 4

Expand Down Expand Up @@ -51,15 +52,27 @@ def test_new_primary_has_wrong_clock(tconf, looper, txnPoolNodeSet,

old_view_no = txnPoolNodeSet[0].viewNo

# Delay parameters
malicious_batch_count = 5
malicious_batch_interval = 2
instance_change_delay = 1.5 * malicious_batch_count * malicious_batch_interval

# Delay instance change so view change doesn't happen in the middle of this test
for node in txnPoolNodeSet:
node.nodeIbStasher.delay(icDelay(instance_change_delay))

# Requests are sent
for _ in range(5):
for _ in range(malicious_batch_count):
sdk_send_random_requests(looper,
sdk_pool_handle,
sdk_wallet_client,
count=2)
looper.runFor(.2)
looper.runFor(malicious_batch_interval)

def chk():
for node in txnPoolNodeSet:
assert node.viewNo == old_view_no

for node in [n for n in txnPoolNodeSet if n != faulty_node]:
# Each non faulty node raises suspicion
assert get_timestamp_suspicion_count(node) > susp_counts[node.name]
Expand All @@ -70,6 +83,10 @@ def chk():

looper.run(eventually(chk, retryWait=1))

# Clear delays
for node in txnPoolNodeSet:
node.nodeIbStasher.reset_delays_and_process_delayeds()

# Eventually another view change happens
looper.run(eventually(checkViewNoForNodes, txnPoolNodeSet, old_view_no + 1,
retryWait=1, timeout=2 * tconf.PerfCheckFreq))
Expand Down