@@ -1048,32 +1048,31 @@ impl VClockAlloc {
1048
1048
) -> InterpResult < ' tcx > {
1049
1049
let current_span = machine. current_span ( ) ;
1050
1050
let global = machine. data_race . as_ref ( ) . unwrap ( ) ;
1051
- if global. race_detecting ( ) {
1052
- let ( index, mut thread_clocks) = global. active_thread_state_mut ( & machine. threads ) ;
1053
- let mut alloc_ranges = self . alloc_ranges . borrow_mut ( ) ;
1054
- for ( mem_clocks_range, mem_clocks) in
1055
- alloc_ranges. iter_mut ( access_range. start , access_range. size )
1051
+ if !global. race_detecting ( ) {
1052
+ return Ok ( ( ) ) ;
1053
+ }
1054
+ let ( index, mut thread_clocks) = global. active_thread_state_mut ( & machine. threads ) ;
1055
+ let mut alloc_ranges = self . alloc_ranges . borrow_mut ( ) ;
1056
+ for ( mem_clocks_range, mem_clocks) in
1057
+ alloc_ranges. iter_mut ( access_range. start , access_range. size )
1058
+ {
1059
+ if let Err ( DataRace ) =
1060
+ mem_clocks. read_race_detect ( & mut thread_clocks, index, read_type, current_span)
1056
1061
{
1057
- if let Err ( DataRace ) =
1058
- mem_clocks. read_race_detect ( & mut thread_clocks, index, read_type, current_span)
1059
- {
1060
- drop ( thread_clocks) ;
1061
- // Report data-race.
1062
- return Self :: report_data_race (
1063
- global,
1064
- & machine. threads ,
1065
- mem_clocks,
1066
- AccessType :: NaRead ( read_type) ,
1067
- access_range. size ,
1068
- interpret:: Pointer :: new ( alloc_id, Size :: from_bytes ( mem_clocks_range. start ) ) ,
1069
- ty,
1070
- ) ;
1071
- }
1062
+ drop ( thread_clocks) ;
1063
+ // Report data-race.
1064
+ return Self :: report_data_race (
1065
+ global,
1066
+ & machine. threads ,
1067
+ mem_clocks,
1068
+ AccessType :: NaRead ( read_type) ,
1069
+ access_range. size ,
1070
+ interpret:: Pointer :: new ( alloc_id, Size :: from_bytes ( mem_clocks_range. start ) ) ,
1071
+ ty,
1072
+ ) ;
1072
1073
}
1073
- Ok ( ( ) )
1074
- } else {
1075
- Ok ( ( ) )
1076
1074
}
1075
+ Ok ( ( ) )
1077
1076
}
1078
1077
1079
1078
/// Detect data-races for an unsynchronized write operation. It will not perform
@@ -1091,34 +1090,30 @@ impl VClockAlloc {
1091
1090
) -> InterpResult < ' tcx > {
1092
1091
let current_span = machine. current_span ( ) ;
1093
1092
let global = machine. data_race . as_mut ( ) . unwrap ( ) ;
1094
- if global. race_detecting ( ) {
1095
- let ( index, mut thread_clocks) = global. active_thread_state_mut ( & machine. threads ) ;
1096
- for ( mem_clocks_range, mem_clocks) in
1097
- self . alloc_ranges . get_mut ( ) . iter_mut ( access_range. start , access_range. size )
1093
+ if !global. race_detecting ( ) {
1094
+ return Ok ( ( ) ) ;
1095
+ }
1096
+ let ( index, mut thread_clocks) = global. active_thread_state_mut ( & machine. threads ) ;
1097
+ for ( mem_clocks_range, mem_clocks) in
1098
+ self . alloc_ranges . get_mut ( ) . iter_mut ( access_range. start , access_range. size )
1099
+ {
1100
+ if let Err ( DataRace ) =
1101
+ mem_clocks. write_race_detect ( & mut thread_clocks, index, write_type, current_span)
1098
1102
{
1099
- if let Err ( DataRace ) = mem_clocks. write_race_detect (
1100
- & mut thread_clocks,
1101
- index,
1102
- write_type,
1103
- current_span,
1104
- ) {
1105
- drop ( thread_clocks) ;
1106
- // Report data-race
1107
- return Self :: report_data_race (
1108
- global,
1109
- & machine. threads ,
1110
- mem_clocks,
1111
- AccessType :: NaWrite ( write_type) ,
1112
- access_range. size ,
1113
- interpret:: Pointer :: new ( alloc_id, Size :: from_bytes ( mem_clocks_range. start ) ) ,
1114
- ty,
1115
- ) ;
1116
- }
1103
+ drop ( thread_clocks) ;
1104
+ // Report data-race
1105
+ return Self :: report_data_race (
1106
+ global,
1107
+ & machine. threads ,
1108
+ mem_clocks,
1109
+ AccessType :: NaWrite ( write_type) ,
1110
+ access_range. size ,
1111
+ interpret:: Pointer :: new ( alloc_id, Size :: from_bytes ( mem_clocks_range. start ) ) ,
1112
+ ty,
1113
+ ) ;
1117
1114
}
1118
- Ok ( ( ) )
1119
- } else {
1120
- Ok ( ( ) )
1121
1115
}
1116
+ Ok ( ( ) )
1122
1117
}
1123
1118
}
1124
1119
@@ -1149,48 +1144,50 @@ impl FrameState {
1149
1144
pub fn local_write ( & self , local : mir:: Local , storage_live : bool , machine : & MiriMachine < ' _ > ) {
1150
1145
let current_span = machine. current_span ( ) ;
1151
1146
let global = machine. data_race . as_ref ( ) . unwrap ( ) ;
1152
- if global. race_detecting ( ) {
1153
- let ( index, mut thread_clocks) = global. active_thread_state_mut ( & machine. threads ) ;
1154
- // This should do the same things as `MemoryCellClocks::write_race_detect`.
1155
- if !current_span. is_dummy ( ) {
1156
- thread_clocks. clock . index_mut ( index) . span = current_span;
1157
- }
1158
- let mut clocks = self . local_clocks . borrow_mut ( ) ;
1159
- if storage_live {
1160
- let new_clocks = LocalClocks {
1161
- write : thread_clocks. clock [ index] ,
1162
- write_type : NaWriteType :: Allocate ,
1163
- read : VTimestamp :: ZERO ,
1164
- } ;
1165
- // There might already be an entry in the map for this, if the local was previously
1166
- // live already.
1167
- clocks. insert ( local, new_clocks) ;
1168
- } else {
1169
- // This can fail to exist if `race_detecting` was false when the allocation
1170
- // occurred, in which case we can backdate this to the beginning of time.
1171
- let clocks = clocks. entry ( local) . or_insert_with ( Default :: default) ;
1172
- clocks. write = thread_clocks. clock [ index] ;
1173
- clocks. write_type = NaWriteType :: Write ;
1174
- }
1147
+ if !global. race_detecting ( ) {
1148
+ return ;
1149
+ }
1150
+ let ( index, mut thread_clocks) = global. active_thread_state_mut ( & machine. threads ) ;
1151
+ // This should do the same things as `MemoryCellClocks::write_race_detect`.
1152
+ if !current_span. is_dummy ( ) {
1153
+ thread_clocks. clock . index_mut ( index) . span = current_span;
1154
+ }
1155
+ let mut clocks = self . local_clocks . borrow_mut ( ) ;
1156
+ if storage_live {
1157
+ let new_clocks = LocalClocks {
1158
+ write : thread_clocks. clock [ index] ,
1159
+ write_type : NaWriteType :: Allocate ,
1160
+ read : VTimestamp :: ZERO ,
1161
+ } ;
1162
+ // There might already be an entry in the map for this, if the local was previously
1163
+ // live already.
1164
+ clocks. insert ( local, new_clocks) ;
1165
+ } else {
1166
+ // This can fail to exist if `race_detecting` was false when the allocation
1167
+ // occurred, in which case we can backdate this to the beginning of time.
1168
+ let clocks = clocks. entry ( local) . or_insert_with ( Default :: default) ;
1169
+ clocks. write = thread_clocks. clock [ index] ;
1170
+ clocks. write_type = NaWriteType :: Write ;
1175
1171
}
1176
1172
}
1177
1173
1178
1174
pub fn local_read ( & self , local : mir:: Local , machine : & MiriMachine < ' _ > ) {
1179
1175
let current_span = machine. current_span ( ) ;
1180
1176
let global = machine. data_race . as_ref ( ) . unwrap ( ) ;
1181
- if global. race_detecting ( ) {
1182
- let ( index, mut thread_clocks) = global. active_thread_state_mut ( & machine. threads ) ;
1183
- // This should do the same things as `MemoryCellClocks::read_race_detect`.
1184
- if !current_span. is_dummy ( ) {
1185
- thread_clocks. clock . index_mut ( index) . span = current_span;
1186
- }
1187
- thread_clocks. clock . index_mut ( index) . set_read_type ( NaReadType :: Read ) ;
1188
- // This can fail to exist if `race_detecting` was false when the allocation
1189
- // occurred, in which case we can backdate this to the beginning of time.
1190
- let mut clocks = self . local_clocks . borrow_mut ( ) ;
1191
- let clocks = clocks. entry ( local) . or_insert_with ( Default :: default) ;
1192
- clocks. read = thread_clocks. clock [ index] ;
1177
+ if !global. race_detecting ( ) {
1178
+ return ;
1179
+ }
1180
+ let ( index, mut thread_clocks) = global. active_thread_state_mut ( & machine. threads ) ;
1181
+ // This should do the same things as `MemoryCellClocks::read_race_detect`.
1182
+ if !current_span. is_dummy ( ) {
1183
+ thread_clocks. clock . index_mut ( index) . span = current_span;
1193
1184
}
1185
+ thread_clocks. clock . index_mut ( index) . set_read_type ( NaReadType :: Read ) ;
1186
+ // This can fail to exist if `race_detecting` was false when the allocation
1187
+ // occurred, in which case we can backdate this to the beginning of time.
1188
+ let mut clocks = self . local_clocks . borrow_mut ( ) ;
1189
+ let clocks = clocks. entry ( local) . or_insert_with ( Default :: default) ;
1190
+ clocks. read = thread_clocks. clock [ index] ;
1194
1191
}
1195
1192
1196
1193
pub fn local_moved_to_memory (
@@ -1200,21 +1197,22 @@ impl FrameState {
1200
1197
machine : & MiriMachine < ' _ > ,
1201
1198
) {
1202
1199
let global = machine. data_race . as_ref ( ) . unwrap ( ) ;
1203
- if global. race_detecting ( ) {
1204
- let ( index, _thread_clocks) = global. active_thread_state_mut ( & machine. threads ) ;
1205
- // Get the time the last write actually happened. This can fail to exist if
1206
- // `race_detecting` was false when the write occurred, in that case we can backdate this
1207
- // to the beginning of time.
1208
- let local_clocks = self . local_clocks . borrow_mut ( ) . remove ( & local) . unwrap_or_default ( ) ;
1209
- for ( _mem_clocks_range, mem_clocks) in alloc. alloc_ranges . get_mut ( ) . iter_mut_all ( ) {
1210
- // The initialization write for this already happened, just at the wrong timestamp.
1211
- // Check that the thread index matches what we expect.
1212
- assert_eq ! ( mem_clocks. write. 0 , index) ;
1213
- // Convert the local's clocks into memory clocks.
1214
- mem_clocks. write = ( index, local_clocks. write ) ;
1215
- mem_clocks. write_type = local_clocks. write_type ;
1216
- mem_clocks. read = VClock :: new_with_index ( index, local_clocks. read ) ;
1217
- }
1200
+ if !global. race_detecting ( ) {
1201
+ return ;
1202
+ }
1203
+ let ( index, _thread_clocks) = global. active_thread_state_mut ( & machine. threads ) ;
1204
+ // Get the time the last write actually happened. This can fail to exist if
1205
+ // `race_detecting` was false when the write occurred, in that case we can backdate this
1206
+ // to the beginning of time.
1207
+ let local_clocks = self . local_clocks . borrow_mut ( ) . remove ( & local) . unwrap_or_default ( ) ;
1208
+ for ( _mem_clocks_range, mem_clocks) in alloc. alloc_ranges . get_mut ( ) . iter_mut_all ( ) {
1209
+ // The initialization write for this already happened, just at the wrong timestamp.
1210
+ // Check that the thread index matches what we expect.
1211
+ assert_eq ! ( mem_clocks. write. 0 , index) ;
1212
+ // Convert the local's clocks into memory clocks.
1213
+ mem_clocks. write = ( index, local_clocks. write ) ;
1214
+ mem_clocks. write_type = local_clocks. write_type ;
1215
+ mem_clocks. read = VClock :: new_with_index ( index, local_clocks. read ) ;
1218
1216
}
1219
1217
}
1220
1218
}
@@ -1403,69 +1401,67 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
1403
1401
) -> InterpResult < ' tcx > {
1404
1402
let this = self . eval_context_ref ( ) ;
1405
1403
assert ! ( access. is_atomic( ) ) ;
1406
- if let Some ( data_race) = & this. machine . data_race {
1407
- if data_race. race_detecting ( ) {
1408
- let size = place. layout . size ;
1409
- let ( alloc_id, base_offset, _prov) = this. ptr_get_alloc_id ( place. ptr ( ) , 0 ) ?;
1410
- // Load and log the atomic operation.
1411
- // Note that atomic loads are possible even from read-only allocations, so `get_alloc_extra_mut` is not an option.
1412
- let alloc_meta = this. get_alloc_extra ( alloc_id) ?. data_race . as_ref ( ) . unwrap ( ) ;
1413
- trace ! (
1414
- "Atomic op({}) with ordering {:?} on {:?} (size={})" ,
1415
- access. description( None , None ) ,
1416
- & atomic,
1417
- place. ptr( ) ,
1418
- size. bytes( )
1419
- ) ;
1404
+ let Some ( data_race) = & this. machine . data_race else { return Ok ( ( ) ) } ;
1405
+ if !data_race. race_detecting ( ) {
1406
+ return Ok ( ( ) ) ;
1407
+ }
1408
+ let size = place. layout . size ;
1409
+ let ( alloc_id, base_offset, _prov) = this. ptr_get_alloc_id ( place. ptr ( ) , 0 ) ?;
1410
+ // Load and log the atomic operation.
1411
+ // Note that atomic loads are possible even from read-only allocations, so `get_alloc_extra_mut` is not an option.
1412
+ let alloc_meta = this. get_alloc_extra ( alloc_id) ?. data_race . as_ref ( ) . unwrap ( ) ;
1413
+ trace ! (
1414
+ "Atomic op({}) with ordering {:?} on {:?} (size={})" ,
1415
+ access. description( None , None ) ,
1416
+ & atomic,
1417
+ place. ptr( ) ,
1418
+ size. bytes( )
1419
+ ) ;
1420
1420
1421
- let current_span = this. machine . current_span ( ) ;
1422
- // Perform the atomic operation.
1423
- data_race. maybe_perform_sync_operation (
1424
- & this. machine . threads ,
1425
- current_span,
1426
- |index, mut thread_clocks| {
1427
- for ( mem_clocks_range, mem_clocks) in
1428
- alloc_meta. alloc_ranges . borrow_mut ( ) . iter_mut ( base_offset, size)
1429
- {
1430
- if let Err ( DataRace ) = op ( mem_clocks, & mut thread_clocks, index, atomic)
1431
- {
1432
- mem:: drop ( thread_clocks) ;
1433
- return VClockAlloc :: report_data_race (
1434
- data_race,
1435
- & this. machine . threads ,
1436
- mem_clocks,
1437
- access,
1438
- place. layout . size ,
1439
- interpret:: Pointer :: new (
1440
- alloc_id,
1441
- Size :: from_bytes ( mem_clocks_range. start ) ,
1442
- ) ,
1443
- None ,
1444
- )
1445
- . map ( |_| true ) ;
1446
- }
1447
- }
1448
-
1449
- // This conservatively assumes all operations have release semantics
1450
- Ok ( true )
1451
- } ,
1452
- ) ?;
1453
-
1454
- // Log changes to atomic memory.
1455
- if tracing:: enabled!( tracing:: Level :: TRACE ) {
1456
- for ( _offset, mem_clocks) in
1457
- alloc_meta. alloc_ranges . borrow ( ) . iter ( base_offset, size)
1458
- {
1459
- trace ! (
1460
- "Updated atomic memory({:?}, size={}) to {:#?}" ,
1461
- place. ptr( ) ,
1462
- size. bytes( ) ,
1463
- mem_clocks. atomic_ops
1464
- ) ;
1421
+ let current_span = this. machine . current_span ( ) ;
1422
+ // Perform the atomic operation.
1423
+ data_race. maybe_perform_sync_operation (
1424
+ & this. machine . threads ,
1425
+ current_span,
1426
+ |index, mut thread_clocks| {
1427
+ for ( mem_clocks_range, mem_clocks) in
1428
+ alloc_meta. alloc_ranges . borrow_mut ( ) . iter_mut ( base_offset, size)
1429
+ {
1430
+ if let Err ( DataRace ) = op ( mem_clocks, & mut thread_clocks, index, atomic) {
1431
+ mem:: drop ( thread_clocks) ;
1432
+ return VClockAlloc :: report_data_race (
1433
+ data_race,
1434
+ & this. machine . threads ,
1435
+ mem_clocks,
1436
+ access,
1437
+ place. layout . size ,
1438
+ interpret:: Pointer :: new (
1439
+ alloc_id,
1440
+ Size :: from_bytes ( mem_clocks_range. start ) ,
1441
+ ) ,
1442
+ None ,
1443
+ )
1444
+ . map ( |_| true ) ;
1465
1445
}
1466
1446
}
1447
+
1448
+ // This conservatively assumes all operations have release semantics
1449
+ Ok ( true )
1450
+ } ,
1451
+ ) ?;
1452
+
1453
+ // Log changes to atomic memory.
1454
+ if tracing:: enabled!( tracing:: Level :: TRACE ) {
1455
+ for ( _offset, mem_clocks) in alloc_meta. alloc_ranges . borrow ( ) . iter ( base_offset, size) {
1456
+ trace ! (
1457
+ "Updated atomic memory({:?}, size={}) to {:#?}" ,
1458
+ place. ptr( ) ,
1459
+ size. bytes( ) ,
1460
+ mem_clocks. atomic_ops
1461
+ ) ;
1467
1462
}
1468
1463
}
1464
+
1469
1465
Ok ( ( ) )
1470
1466
}
1471
1467
}
0 commit comments