Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use vector of integer for per-thread bool indicators #1442

Merged
merged 6 commits into from
Mar 2, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion nestkernel/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ set( nestkernel_sources
archiving_node.h archiving_node.cpp
clopath_archiving_node.h clopath_archiving_node.cpp
common_synapse_properties.h common_synapse_properties.cpp
completed_checker.h completed_checker.cpp
connection.h
connection_label.h
common_properties_hom_w.h
Expand Down Expand Up @@ -53,6 +52,7 @@ set( nestkernel_sources
modelrange_manager.h modelrange_manager.cpp
node.h node.cpp
parameter.h parameter.cpp
per_thread_bool_indicator.h per_thread_bool_indicator.cpp
proxynode.h proxynode.cpp
recording_device.h recording_device.cpp
pseudo_recording_device.h
Expand Down
140 changes: 0 additions & 140 deletions nestkernel/completed_checker.h

This file was deleted.

22 changes: 11 additions & 11 deletions nestkernel/connection_manager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -94,9 +94,9 @@ nest::ConnectionManager::initialize()
secondary_recv_buffer_pos_.resize( num_threads );
sort_connections_by_source_ = true;

have_connections_changed_.resize( num_threads, true );
check_primary_connections_.resize( num_threads, false );
check_secondary_connections_.resize( num_threads, false );
have_connections_changed_.initialize( num_threads, true );
check_primary_connections_.initialize( num_threads, false );
check_secondary_connections_.initialize( num_threads, false );

#pragma omp parallel
{
Expand Down Expand Up @@ -555,17 +555,17 @@ nest::ConnectionManager::connect_( Node& s,

// We do not check has_primary_connections_ and secondary_connections_exist_
// directly as this led to worse performance on the supercomputer Piz Daint.
if ( not check_primary_connections_[ tid ] and is_primary )
if ( check_primary_connections_[ tid ].is_false() and is_primary )
{
#pragma omp atomic write
has_primary_connections_ = true;
check_primary_connections_.set( tid, true );
check_primary_connections_[ tid ].set_true();
}
else if ( not check_secondary_connections_[ tid ] and not is_primary )
else if ( check_secondary_connections_[ tid ].is_false() and not is_primary )
{
#pragma omp atomic write
secondary_connections_exist_ = true;
check_secondary_connections_.set( tid, true );
check_secondary_connections_[ tid ].set_true();
}
}

Expand Down Expand Up @@ -1409,12 +1409,12 @@ nest::ConnectionManager::set_have_connections_changed( const thread tid )
// Need to check if have_connections_changed_ has already been set, because if
// we have a lot of threads and they all try to set the variable at once we get
// performance issues on supercomputers.
if ( not have_connections_changed_[ tid ] )
if ( have_connections_changed_[ tid ].is_false() )
{
std::string msg =
"New connections created, connection descriptors previously obtained using 'GetConnections' are now invalid.";
LOG( M_WARNING, "ConnectionManager", msg );
have_connections_changed_.set( tid, true );
have_connections_changed_[ tid ].set_true();
}
}

Expand All @@ -1424,8 +1424,8 @@ nest::ConnectionManager::unset_have_connections_changed( const thread tid )
// Need to check if have_connections_changed_ has already been set, because if
// we have a lot of threads and they all try to set the variable at once we get
// performance issues on supercomputers.
if ( have_connections_changed_[ tid ] )
if ( have_connections_changed_[ tid ].is_true() )
{
have_connections_changed_.set( tid, false );
have_connections_changed_[ tid ].set_false();
}
}
8 changes: 4 additions & 4 deletions nestkernel/connection_manager.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,14 +30,14 @@
#include "manager_interface.h"

// Includes from nestkernel:
#include "completed_checker.h"
#include "conn_builder.h"
#include "connection_id.h"
#include "connector_base.h"
#include "node_collection.h"
#include "nest_time.h"
#include "nest_timeconverter.h"
#include "nest_types.h"
#include "per_thread_bool_indicator.h"
#include "source_table.h"
#include "target_table.h"
#include "target_table_devices.h"
Expand Down Expand Up @@ -580,7 +580,7 @@ class ConnectionManager : public ManagerInterface

//! True if new connections have been created since startup or last call to
//! simulate.
CompletedChecker have_connections_changed_;
PerThreadBoolIndicator have_connections_changed_;

//! Whether to sort connections by source node ID.
bool sort_connections_by_source_;
Expand All @@ -589,13 +589,13 @@ class ConnectionManager : public ManagerInterface
bool has_primary_connections_;

//! Check for primary connections (spikes) on each thread.
CompletedChecker check_primary_connections_;
PerThreadBoolIndicator check_primary_connections_;

//! Whether secondary connections (e.g., gap junctions) exist.
bool secondary_connections_exist_;

//! Check for secondary connections (e.g., gap junctions) on each thread.
CompletedChecker check_secondary_connections_;
PerThreadBoolIndicator check_secondary_connections_;

//! Maximum distance between (double) spike times in STDP that is
//! still considered 0. See issue #894
Expand Down
29 changes: 14 additions & 15 deletions nestkernel/event_delivery_manager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ EventDeliveryManager::initialize()
reset_timers_counters();
spike_register_.resize( num_threads );
off_grid_spike_register_.resize( num_threads );
gather_completed_checker_.resize( num_threads, false );
gather_completed_checker_.initialize( num_threads, false );
// Ensures that ResetKernel resets off_grid_spiking_
off_grid_spiking_ = false;
buffer_size_target_data_has_changed_ = false;
Expand All @@ -107,7 +107,6 @@ EventDeliveryManager::finalize()
// clear the spike buffers
std::vector< std::vector< std::vector< std::vector< Target > > > >().swap( spike_register_ );
std::vector< std::vector< std::vector< std::vector< OffGridTarget > > > >().swap( off_grid_spike_register_ );
gather_completed_checker_.clear();

send_buffer_secondary_events_.clear();
recv_buffer_secondary_events_.clear();
Expand Down Expand Up @@ -308,16 +307,16 @@ EventDeliveryManager::gather_spike_data_( const thread tid,
std::vector< SpikeDataT >& recv_buffer )
{
// Assume all threads have some work to do
gather_completed_checker_.set( tid, false );
gather_completed_checker_[ tid ].set_false();
assert( gather_completed_checker_.all_false() );

const AssignedRanks assigned_ranks = kernel().vp_manager.get_assigned_ranks( tid );

while ( not gather_completed_checker_.all_true() )
while ( gather_completed_checker_.any_false() )
{
// Assume this is the last gather round and change to false
// otherwise
gather_completed_checker_.set( tid, true );
gather_completed_checker_[ tid ].set_true();

#pragma omp single
{
Expand All @@ -335,13 +334,13 @@ EventDeliveryManager::gather_spike_data_( const thread tid,
// Collocate spikes to send buffer
const bool collocate_completed =
collocate_spike_data_buffers_( tid, assigned_ranks, send_buffer_position, spike_register_, send_buffer );
gather_completed_checker_.logical_and( tid, collocate_completed );
gather_completed_checker_[ tid ].logical_and( collocate_completed );

if ( off_grid_spiking_ )
{
const bool collocate_completed_off_grid = collocate_spike_data_buffers_(
tid, assigned_ranks, send_buffer_position, off_grid_spike_register_, send_buffer );
gather_completed_checker_.logical_and( tid, collocate_completed_off_grid );
gather_completed_checker_[ tid ].logical_and( collocate_completed_off_grid );
}

#pragma omp barrier
Expand Down Expand Up @@ -374,13 +373,13 @@ EventDeliveryManager::gather_spike_data_( const thread tid,

// Deliver spikes from receive buffer to ring buffers.
const bool deliver_completed = deliver_events_( tid, recv_buffer );
gather_completed_checker_.logical_and( tid, deliver_completed );
gather_completed_checker_[ tid ].logical_and( deliver_completed );

// Exit gather loop if all local threads and remote processes are
// done.
#pragma omp barrier
// Resize mpi buffers, if necessary and allowed.
if ( not gather_completed_checker_.all_true() and kernel().mpi_manager.adaptive_spike_buffers() )
if ( gather_completed_checker_.any_false() and kernel().mpi_manager.adaptive_spike_buffers() )
{
#pragma omp single
{
Expand Down Expand Up @@ -583,19 +582,19 @@ EventDeliveryManager::gather_target_data( const thread tid )
assert( not kernel().connection_manager.is_source_table_cleared() );

// assume all threads have some work to do
gather_completed_checker_.set( tid, false );
gather_completed_checker_[ tid ].set_false();
assert( gather_completed_checker_.all_false() );

const AssignedRanks assigned_ranks = kernel().vp_manager.get_assigned_ranks( tid );

kernel().connection_manager.prepare_target_table( tid );
kernel().connection_manager.reset_source_table_entry_point( tid );

while ( not gather_completed_checker_.all_true() )
while ( gather_completed_checker_.any_false() )
{
// assume this is the last gather round and change to false
// otherwise
gather_completed_checker_.set( tid, true );
gather_completed_checker_[ tid ].set_true();

#pragma omp single
{
Expand All @@ -611,7 +610,7 @@ EventDeliveryManager::gather_target_data( const thread tid )
assigned_ranks, kernel().mpi_manager.get_send_recv_count_target_data_per_rank() );

const bool gather_completed = collocate_target_data_buffers_( tid, assigned_ranks, send_buffer_position );
gather_completed_checker_.logical_and( tid, gather_completed );
gather_completed_checker_[ tid ].logical_and( gather_completed );

if ( gather_completed_checker_.all_true() )
{
Expand All @@ -627,11 +626,11 @@ EventDeliveryManager::gather_target_data( const thread tid )
} // of omp single

const bool distribute_completed = distribute_target_data_buffers_( tid );
gather_completed_checker_.logical_and( tid, distribute_completed );
gather_completed_checker_[ tid ].logical_and( distribute_completed );
#pragma omp barrier

// resize mpi buffers, if necessary and allowed
if ( not gather_completed_checker_.all_true() and kernel().mpi_manager.adaptive_target_buffers() )
if ( gather_completed_checker_.any_false() and kernel().mpi_manager.adaptive_target_buffers() )
{
#pragma omp single
{
Expand Down
4 changes: 2 additions & 2 deletions nestkernel/event_delivery_manager.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,12 +33,12 @@
#include "stopwatch.h"

// Includes from nestkernel:
#include "completed_checker.h"
#include "event.h"
#include "mpi_manager.h" // OffGridSpike
#include "nest_time.h"
#include "nest_types.h"
#include "node.h"
#include "per_thread_bool_indicator.h"
#include "target_table.h"
#include "spike_data.h"
#include "vp_manager.h"
Expand Down Expand Up @@ -433,7 +433,7 @@ class EventDeliveryManager : public ManagerInterface
//!< whether size of MPI buffer for communication of spikes was changed
bool buffer_size_spike_data_has_changed_;

CompletedChecker gather_completed_checker_;
PerThreadBoolIndicator gather_completed_checker_;
};

inline void
Expand Down
Loading