@@ -83,7 +83,7 @@ EventDeliveryManager::initialize()
83
83
reset_timers_counters ();
84
84
spike_register_.resize ( num_threads );
85
85
off_grid_spike_register_.resize ( num_threads );
86
- gather_completed_checker_.resize ( num_threads, false );
86
+ gather_completed_checker_.initialize ( num_threads, false );
87
87
// Ensures that ResetKernel resets off_grid_spiking_
88
88
off_grid_spiking_ = false ;
89
89
buffer_size_target_data_has_changed_ = false ;
@@ -107,7 +107,6 @@ EventDeliveryManager::finalize()
107
107
// clear the spike buffers
108
108
std::vector< std::vector< std::vector< std::vector< Target > > > >().swap ( spike_register_ );
109
109
std::vector< std::vector< std::vector< std::vector< OffGridTarget > > > >().swap ( off_grid_spike_register_ );
110
- gather_completed_checker_.clear ();
111
110
112
111
send_buffer_secondary_events_.clear ();
113
112
recv_buffer_secondary_events_.clear ();
@@ -308,16 +307,16 @@ EventDeliveryManager::gather_spike_data_( const thread tid,
308
307
std::vector< SpikeDataT >& recv_buffer )
309
308
{
310
309
// Assume all threads have some work to do
311
- gather_completed_checker_. set ( tid, false );
310
+ gather_completed_checker_[ tid ]. set_false ( );
312
311
assert ( gather_completed_checker_.all_false () );
313
312
314
313
const AssignedRanks assigned_ranks = kernel ().vp_manager .get_assigned_ranks ( tid );
315
314
316
- while ( not gather_completed_checker_.all_true () )
315
+ while ( gather_completed_checker_.any_false () )
317
316
{
318
317
// Assume this is the last gather round and change to false
319
318
// otherwise
320
- gather_completed_checker_. set ( tid, true );
319
+ gather_completed_checker_[ tid ]. set_true ( );
321
320
322
321
#pragma omp single
323
322
{
@@ -335,13 +334,13 @@ EventDeliveryManager::gather_spike_data_( const thread tid,
335
334
// Collocate spikes to send buffer
336
335
const bool collocate_completed =
337
336
collocate_spike_data_buffers_ ( tid, assigned_ranks, send_buffer_position, spike_register_, send_buffer );
338
- gather_completed_checker_.logical_and ( tid, collocate_completed );
337
+ gather_completed_checker_[ tid ] .logical_and ( collocate_completed );
339
338
340
339
if ( off_grid_spiking_ )
341
340
{
342
341
const bool collocate_completed_off_grid = collocate_spike_data_buffers_ (
343
342
tid, assigned_ranks, send_buffer_position, off_grid_spike_register_, send_buffer );
344
- gather_completed_checker_.logical_and ( tid, collocate_completed_off_grid );
343
+ gather_completed_checker_[ tid ] .logical_and ( collocate_completed_off_grid );
345
344
}
346
345
347
346
#pragma omp barrier
@@ -374,13 +373,13 @@ EventDeliveryManager::gather_spike_data_( const thread tid,
374
373
375
374
// Deliver spikes from receive buffer to ring buffers.
376
375
const bool deliver_completed = deliver_events_ ( tid, recv_buffer );
377
- gather_completed_checker_.logical_and ( tid, deliver_completed );
376
+ gather_completed_checker_[ tid ] .logical_and ( deliver_completed );
378
377
379
378
// Exit gather loop if all local threads and remote processes are
380
379
// done.
381
380
#pragma omp barrier
382
381
// Resize mpi buffers, if necessary and allowed.
383
- if ( not gather_completed_checker_.all_true () and kernel ().mpi_manager .adaptive_spike_buffers () )
382
+ if ( gather_completed_checker_.any_false () and kernel ().mpi_manager .adaptive_spike_buffers () )
384
383
{
385
384
#pragma omp single
386
385
{
@@ -583,19 +582,19 @@ EventDeliveryManager::gather_target_data( const thread tid )
583
582
assert ( not kernel ().connection_manager .is_source_table_cleared () );
584
583
585
584
// assume all threads have some work to do
586
- gather_completed_checker_. set ( tid, false );
585
+ gather_completed_checker_[ tid ]. set_false ( );
587
586
assert ( gather_completed_checker_.all_false () );
588
587
589
588
const AssignedRanks assigned_ranks = kernel ().vp_manager .get_assigned_ranks ( tid );
590
589
591
590
kernel ().connection_manager .prepare_target_table ( tid );
592
591
kernel ().connection_manager .reset_source_table_entry_point ( tid );
593
592
594
- while ( not gather_completed_checker_.all_true () )
593
+ while ( gather_completed_checker_.any_false () )
595
594
{
596
595
// assume this is the last gather round and change to false
597
596
// otherwise
598
- gather_completed_checker_. set ( tid, true );
597
+ gather_completed_checker_[ tid ]. set_true ( );
599
598
600
599
#pragma omp single
601
600
{
@@ -611,7 +610,7 @@ EventDeliveryManager::gather_target_data( const thread tid )
611
610
assigned_ranks, kernel ().mpi_manager .get_send_recv_count_target_data_per_rank () );
612
611
613
612
const bool gather_completed = collocate_target_data_buffers_ ( tid, assigned_ranks, send_buffer_position );
614
- gather_completed_checker_.logical_and ( tid, gather_completed );
613
+ gather_completed_checker_[ tid ] .logical_and ( gather_completed );
615
614
616
615
if ( gather_completed_checker_.all_true () )
617
616
{
@@ -627,11 +626,11 @@ EventDeliveryManager::gather_target_data( const thread tid )
627
626
} // of omp single
628
627
629
628
const bool distribute_completed = distribute_target_data_buffers_ ( tid );
630
- gather_completed_checker_.logical_and ( tid, distribute_completed );
629
+ gather_completed_checker_[ tid ] .logical_and ( distribute_completed );
631
630
#pragma omp barrier
632
631
633
632
// resize mpi buffers, if necessary and allowed
634
- if ( not gather_completed_checker_.all_true () and kernel ().mpi_manager .adaptive_target_buffers () )
633
+ if ( gather_completed_checker_.any_false () and kernel ().mpi_manager .adaptive_target_buffers () )
635
634
{
636
635
#pragma omp single
637
636
{
0 commit comments