Skip to content

Commit 339f68b

Browse files
committed
use early return for race_detecting() logic
1 parent a888905 commit 339f68b

File tree

1 file changed

+152
-156
lines changed

1 file changed

+152
-156
lines changed

src/tools/miri/src/concurrency/data_race.rs

+152-156
Original file line numberDiff line numberDiff line change
@@ -1048,32 +1048,31 @@ impl VClockAlloc {
10481048
) -> InterpResult<'tcx> {
10491049
let current_span = machine.current_span();
10501050
let global = machine.data_race.as_ref().unwrap();
1051-
if global.race_detecting() {
1052-
let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1053-
let mut alloc_ranges = self.alloc_ranges.borrow_mut();
1054-
for (mem_clocks_range, mem_clocks) in
1055-
alloc_ranges.iter_mut(access_range.start, access_range.size)
1051+
if !global.race_detecting() {
1052+
return Ok(());
1053+
}
1054+
let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1055+
let mut alloc_ranges = self.alloc_ranges.borrow_mut();
1056+
for (mem_clocks_range, mem_clocks) in
1057+
alloc_ranges.iter_mut(access_range.start, access_range.size)
1058+
{
1059+
if let Err(DataRace) =
1060+
mem_clocks.read_race_detect(&mut thread_clocks, index, read_type, current_span)
10561061
{
1057-
if let Err(DataRace) =
1058-
mem_clocks.read_race_detect(&mut thread_clocks, index, read_type, current_span)
1059-
{
1060-
drop(thread_clocks);
1061-
// Report data-race.
1062-
return Self::report_data_race(
1063-
global,
1064-
&machine.threads,
1065-
mem_clocks,
1066-
AccessType::NaRead(read_type),
1067-
access_range.size,
1068-
interpret::Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
1069-
ty,
1070-
);
1071-
}
1062+
drop(thread_clocks);
1063+
// Report data-race.
1064+
return Self::report_data_race(
1065+
global,
1066+
&machine.threads,
1067+
mem_clocks,
1068+
AccessType::NaRead(read_type),
1069+
access_range.size,
1070+
interpret::Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
1071+
ty,
1072+
);
10721073
}
1073-
Ok(())
1074-
} else {
1075-
Ok(())
10761074
}
1075+
Ok(())
10771076
}
10781077

10791078
/// Detect data-races for an unsynchronized write operation. It will not perform
@@ -1091,34 +1090,30 @@ impl VClockAlloc {
10911090
) -> InterpResult<'tcx> {
10921091
let current_span = machine.current_span();
10931092
let global = machine.data_race.as_mut().unwrap();
1094-
if global.race_detecting() {
1095-
let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1096-
for (mem_clocks_range, mem_clocks) in
1097-
self.alloc_ranges.get_mut().iter_mut(access_range.start, access_range.size)
1093+
if !global.race_detecting() {
1094+
return Ok(());
1095+
}
1096+
let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1097+
for (mem_clocks_range, mem_clocks) in
1098+
self.alloc_ranges.get_mut().iter_mut(access_range.start, access_range.size)
1099+
{
1100+
if let Err(DataRace) =
1101+
mem_clocks.write_race_detect(&mut thread_clocks, index, write_type, current_span)
10981102
{
1099-
if let Err(DataRace) = mem_clocks.write_race_detect(
1100-
&mut thread_clocks,
1101-
index,
1102-
write_type,
1103-
current_span,
1104-
) {
1105-
drop(thread_clocks);
1106-
// Report data-race
1107-
return Self::report_data_race(
1108-
global,
1109-
&machine.threads,
1110-
mem_clocks,
1111-
AccessType::NaWrite(write_type),
1112-
access_range.size,
1113-
interpret::Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
1114-
ty,
1115-
);
1116-
}
1103+
drop(thread_clocks);
1104+
// Report data-race
1105+
return Self::report_data_race(
1106+
global,
1107+
&machine.threads,
1108+
mem_clocks,
1109+
AccessType::NaWrite(write_type),
1110+
access_range.size,
1111+
interpret::Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
1112+
ty,
1113+
);
11171114
}
1118-
Ok(())
1119-
} else {
1120-
Ok(())
11211115
}
1116+
Ok(())
11221117
}
11231118
}
11241119

@@ -1149,48 +1144,50 @@ impl FrameState {
11491144
pub fn local_write(&self, local: mir::Local, storage_live: bool, machine: &MiriMachine<'_>) {
11501145
let current_span = machine.current_span();
11511146
let global = machine.data_race.as_ref().unwrap();
1152-
if global.race_detecting() {
1153-
let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1154-
// This should do the same things as `MemoryCellClocks::write_race_detect`.
1155-
if !current_span.is_dummy() {
1156-
thread_clocks.clock.index_mut(index).span = current_span;
1157-
}
1158-
let mut clocks = self.local_clocks.borrow_mut();
1159-
if storage_live {
1160-
let new_clocks = LocalClocks {
1161-
write: thread_clocks.clock[index],
1162-
write_type: NaWriteType::Allocate,
1163-
read: VTimestamp::ZERO,
1164-
};
1165-
// There might already be an entry in the map for this, if the local was previously
1166-
// live already.
1167-
clocks.insert(local, new_clocks);
1168-
} else {
1169-
// This can fail to exist if `race_detecting` was false when the allocation
1170-
// occurred, in which case we can backdate this to the beginning of time.
1171-
let clocks = clocks.entry(local).or_insert_with(Default::default);
1172-
clocks.write = thread_clocks.clock[index];
1173-
clocks.write_type = NaWriteType::Write;
1174-
}
1147+
if !global.race_detecting() {
1148+
return;
1149+
}
1150+
let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1151+
// This should do the same things as `MemoryCellClocks::write_race_detect`.
1152+
if !current_span.is_dummy() {
1153+
thread_clocks.clock.index_mut(index).span = current_span;
1154+
}
1155+
let mut clocks = self.local_clocks.borrow_mut();
1156+
if storage_live {
1157+
let new_clocks = LocalClocks {
1158+
write: thread_clocks.clock[index],
1159+
write_type: NaWriteType::Allocate,
1160+
read: VTimestamp::ZERO,
1161+
};
1162+
// There might already be an entry in the map for this, if the local was previously
1163+
// live already.
1164+
clocks.insert(local, new_clocks);
1165+
} else {
1166+
// This can fail to exist if `race_detecting` was false when the allocation
1167+
// occurred, in which case we can backdate this to the beginning of time.
1168+
let clocks = clocks.entry(local).or_insert_with(Default::default);
1169+
clocks.write = thread_clocks.clock[index];
1170+
clocks.write_type = NaWriteType::Write;
11751171
}
11761172
}
11771173

11781174
pub fn local_read(&self, local: mir::Local, machine: &MiriMachine<'_>) {
11791175
let current_span = machine.current_span();
11801176
let global = machine.data_race.as_ref().unwrap();
1181-
if global.race_detecting() {
1182-
let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1183-
// This should do the same things as `MemoryCellClocks::read_race_detect`.
1184-
if !current_span.is_dummy() {
1185-
thread_clocks.clock.index_mut(index).span = current_span;
1186-
}
1187-
thread_clocks.clock.index_mut(index).set_read_type(NaReadType::Read);
1188-
// This can fail to exist if `race_detecting` was false when the allocation
1189-
// occurred, in which case we can backdate this to the beginning of time.
1190-
let mut clocks = self.local_clocks.borrow_mut();
1191-
let clocks = clocks.entry(local).or_insert_with(Default::default);
1192-
clocks.read = thread_clocks.clock[index];
1177+
if !global.race_detecting() {
1178+
return;
1179+
}
1180+
let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1181+
// This should do the same things as `MemoryCellClocks::read_race_detect`.
1182+
if !current_span.is_dummy() {
1183+
thread_clocks.clock.index_mut(index).span = current_span;
11931184
}
1185+
thread_clocks.clock.index_mut(index).set_read_type(NaReadType::Read);
1186+
// This can fail to exist if `race_detecting` was false when the allocation
1187+
// occurred, in which case we can backdate this to the beginning of time.
1188+
let mut clocks = self.local_clocks.borrow_mut();
1189+
let clocks = clocks.entry(local).or_insert_with(Default::default);
1190+
clocks.read = thread_clocks.clock[index];
11941191
}
11951192

11961193
pub fn local_moved_to_memory(
@@ -1200,21 +1197,22 @@ impl FrameState {
12001197
machine: &MiriMachine<'_>,
12011198
) {
12021199
let global = machine.data_race.as_ref().unwrap();
1203-
if global.race_detecting() {
1204-
let (index, _thread_clocks) = global.active_thread_state_mut(&machine.threads);
1205-
// Get the time the last write actually happened. This can fail to exist if
1206-
// `race_detecting` was false when the write occurred, in that case we can backdate this
1207-
// to the beginning of time.
1208-
let local_clocks = self.local_clocks.borrow_mut().remove(&local).unwrap_or_default();
1209-
for (_mem_clocks_range, mem_clocks) in alloc.alloc_ranges.get_mut().iter_mut_all() {
1210-
// The initialization write for this already happened, just at the wrong timestamp.
1211-
// Check that the thread index matches what we expect.
1212-
assert_eq!(mem_clocks.write.0, index);
1213-
// Convert the local's clocks into memory clocks.
1214-
mem_clocks.write = (index, local_clocks.write);
1215-
mem_clocks.write_type = local_clocks.write_type;
1216-
mem_clocks.read = VClock::new_with_index(index, local_clocks.read);
1217-
}
1200+
if !global.race_detecting() {
1201+
return;
1202+
}
1203+
let (index, _thread_clocks) = global.active_thread_state_mut(&machine.threads);
1204+
// Get the time the last write actually happened. This can fail to exist if
1205+
// `race_detecting` was false when the write occurred, in that case we can backdate this
1206+
// to the beginning of time.
1207+
let local_clocks = self.local_clocks.borrow_mut().remove(&local).unwrap_or_default();
1208+
for (_mem_clocks_range, mem_clocks) in alloc.alloc_ranges.get_mut().iter_mut_all() {
1209+
// The initialization write for this already happened, just at the wrong timestamp.
1210+
// Check that the thread index matches what we expect.
1211+
assert_eq!(mem_clocks.write.0, index);
1212+
// Convert the local's clocks into memory clocks.
1213+
mem_clocks.write = (index, local_clocks.write);
1214+
mem_clocks.write_type = local_clocks.write_type;
1215+
mem_clocks.read = VClock::new_with_index(index, local_clocks.read);
12181216
}
12191217
}
12201218
}
@@ -1403,69 +1401,67 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
14031401
) -> InterpResult<'tcx> {
14041402
let this = self.eval_context_ref();
14051403
assert!(access.is_atomic());
1406-
if let Some(data_race) = &this.machine.data_race {
1407-
if data_race.race_detecting() {
1408-
let size = place.layout.size;
1409-
let (alloc_id, base_offset, _prov) = this.ptr_get_alloc_id(place.ptr(), 0)?;
1410-
// Load and log the atomic operation.
1411-
// Note that atomic loads are possible even from read-only allocations, so `get_alloc_extra_mut` is not an option.
1412-
let alloc_meta = this.get_alloc_extra(alloc_id)?.data_race.as_ref().unwrap();
1413-
trace!(
1414-
"Atomic op({}) with ordering {:?} on {:?} (size={})",
1415-
access.description(None, None),
1416-
&atomic,
1417-
place.ptr(),
1418-
size.bytes()
1419-
);
1404+
let Some(data_race) = &this.machine.data_race else { return Ok(()) };
1405+
if !data_race.race_detecting() {
1406+
return Ok(());
1407+
}
1408+
let size = place.layout.size;
1409+
let (alloc_id, base_offset, _prov) = this.ptr_get_alloc_id(place.ptr(), 0)?;
1410+
// Load and log the atomic operation.
1411+
// Note that atomic loads are possible even from read-only allocations, so `get_alloc_extra_mut` is not an option.
1412+
let alloc_meta = this.get_alloc_extra(alloc_id)?.data_race.as_ref().unwrap();
1413+
trace!(
1414+
"Atomic op({}) with ordering {:?} on {:?} (size={})",
1415+
access.description(None, None),
1416+
&atomic,
1417+
place.ptr(),
1418+
size.bytes()
1419+
);
14201420

1421-
let current_span = this.machine.current_span();
1422-
// Perform the atomic operation.
1423-
data_race.maybe_perform_sync_operation(
1424-
&this.machine.threads,
1425-
current_span,
1426-
|index, mut thread_clocks| {
1427-
for (mem_clocks_range, mem_clocks) in
1428-
alloc_meta.alloc_ranges.borrow_mut().iter_mut(base_offset, size)
1429-
{
1430-
if let Err(DataRace) = op(mem_clocks, &mut thread_clocks, index, atomic)
1431-
{
1432-
mem::drop(thread_clocks);
1433-
return VClockAlloc::report_data_race(
1434-
data_race,
1435-
&this.machine.threads,
1436-
mem_clocks,
1437-
access,
1438-
place.layout.size,
1439-
interpret::Pointer::new(
1440-
alloc_id,
1441-
Size::from_bytes(mem_clocks_range.start),
1442-
),
1443-
None,
1444-
)
1445-
.map(|_| true);
1446-
}
1447-
}
1448-
1449-
// This conservatively assumes all operations have release semantics
1450-
Ok(true)
1451-
},
1452-
)?;
1453-
1454-
// Log changes to atomic memory.
1455-
if tracing::enabled!(tracing::Level::TRACE) {
1456-
for (_offset, mem_clocks) in
1457-
alloc_meta.alloc_ranges.borrow().iter(base_offset, size)
1458-
{
1459-
trace!(
1460-
"Updated atomic memory({:?}, size={}) to {:#?}",
1461-
place.ptr(),
1462-
size.bytes(),
1463-
mem_clocks.atomic_ops
1464-
);
1421+
let current_span = this.machine.current_span();
1422+
// Perform the atomic operation.
1423+
data_race.maybe_perform_sync_operation(
1424+
&this.machine.threads,
1425+
current_span,
1426+
|index, mut thread_clocks| {
1427+
for (mem_clocks_range, mem_clocks) in
1428+
alloc_meta.alloc_ranges.borrow_mut().iter_mut(base_offset, size)
1429+
{
1430+
if let Err(DataRace) = op(mem_clocks, &mut thread_clocks, index, atomic) {
1431+
mem::drop(thread_clocks);
1432+
return VClockAlloc::report_data_race(
1433+
data_race,
1434+
&this.machine.threads,
1435+
mem_clocks,
1436+
access,
1437+
place.layout.size,
1438+
interpret::Pointer::new(
1439+
alloc_id,
1440+
Size::from_bytes(mem_clocks_range.start),
1441+
),
1442+
None,
1443+
)
1444+
.map(|_| true);
14651445
}
14661446
}
1447+
1448+
// This conservatively assumes all operations have release semantics
1449+
Ok(true)
1450+
},
1451+
)?;
1452+
1453+
// Log changes to atomic memory.
1454+
if tracing::enabled!(tracing::Level::TRACE) {
1455+
for (_offset, mem_clocks) in alloc_meta.alloc_ranges.borrow().iter(base_offset, size) {
1456+
trace!(
1457+
"Updated atomic memory({:?}, size={}) to {:#?}",
1458+
place.ptr(),
1459+
size.bytes(),
1460+
mem_clocks.atomic_ops
1461+
);
14671462
}
14681463
}
1464+
14691465
Ok(())
14701466
}
14711467
}

0 commit comments

Comments
 (0)