forked from mmtk/mmtk-julia
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcollection.rs
177 lines (143 loc) · 5.35 KB
/
collection.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
use crate::SINGLETON;
use crate::{
jl_gc_get_max_memory, jl_gc_prepare_to_collect, jl_gc_update_stats, jl_get_gc_disable_counter,
jl_hrtime, jl_throw_out_of_memory_error,
};
use crate::{JuliaVM, USER_TRIGGERED_GC};
use log::{info, trace};
use mmtk::util::alloc::AllocationError;
use mmtk::util::heap::GCTriggerPolicy;
use mmtk::util::opaque_pointer::*;
use mmtk::vm::{Collection, GCThreadContext};
use mmtk::Mutator;
use std::sync::atomic::{AtomicBool, AtomicIsize, AtomicU64, Ordering};
use crate::{BLOCK_FOR_GC, STW_COND, WORLD_HAS_STOPPED};
pub static GC_START: AtomicU64 = AtomicU64::new(0);
use std::collections::HashSet;
use std::sync::RwLock;
use std::thread::ThreadId;
lazy_static! {
static ref GC_THREADS: RwLock<HashSet<ThreadId>> = RwLock::new(HashSet::new());
}
pub(crate) fn register_gc_thread() {
let id = std::thread::current().id();
GC_THREADS.write().unwrap().insert(id);
}
pub(crate) fn unregister_gc_thread() {
let id = std::thread::current().id();
GC_THREADS.write().unwrap().remove(&id);
}
pub(crate) fn is_gc_thread() -> bool {
let id = std::thread::current().id();
GC_THREADS.read().unwrap().contains(&id)
}
pub struct VMCollection {}
impl Collection<JuliaVM> for VMCollection {
fn stop_all_mutators<F>(_tls: VMWorkerThread, mut mutator_visitor: F)
where
F: FnMut(&'static mut Mutator<JuliaVM>),
{
// Wait for all mutators to stop and all finalizers to run
while !AtomicBool::load(&WORLD_HAS_STOPPED, Ordering::SeqCst) {
// Stay here while the world has not stopped
// FIXME add wait var
}
trace!("Stopped the world!");
// Tell MMTk the stacks are ready.
{
use mmtk::vm::ActivePlan;
for mutator in crate::active_plan::VMActivePlan::mutators() {
info!("stop_all_mutators: visiting {:?}", mutator.mutator_tls);
mutator_visitor(mutator);
}
}
// Record the start time of the GC
let now = unsafe { jl_hrtime() };
trace!("gc_start = {}", now);
GC_START.store(now, Ordering::Relaxed);
}
fn resume_mutators(_tls: VMWorkerThread) {
// Get the end time of the GC
let end = unsafe { jl_hrtime() };
trace!("gc_end = {}", end);
let gc_time = end - GC_START.load(Ordering::Relaxed);
unsafe {
jl_gc_update_stats(
gc_time,
crate::api::mmtk_used_bytes(),
is_current_gc_nursery(),
)
}
AtomicBool::store(&BLOCK_FOR_GC, false, Ordering::SeqCst);
AtomicBool::store(&WORLD_HAS_STOPPED, false, Ordering::SeqCst);
let (_, cvar) = &*STW_COND.clone();
cvar.notify_all();
info!(
"Live bytes = {}, total bytes = {}",
crate::api::mmtk_used_bytes(),
crate::api::mmtk_total_bytes()
);
trace!("Resuming mutators.");
}
fn block_for_gc(_tls: VMMutatorThread) {
info!("Triggered GC!");
unsafe { jl_gc_prepare_to_collect() };
info!("Finished blocking mutator for GC!");
}
fn spawn_gc_thread(_tls: VMThread, ctx: GCThreadContext<JuliaVM>) {
// Just drop the join handle. The thread will run until the process quits.
let _ = std::thread::Builder::new()
.name("MMTk Worker".to_string())
.spawn(move || {
use mmtk::util::opaque_pointer::*;
use mmtk::util::Address;
// Remember this GC thread
register_gc_thread();
// Start the worker loop
let worker_tls = VMWorkerThread(VMThread(OpaquePointer::from_address(unsafe {
Address::from_usize(thread_id::get())
})));
match ctx {
GCThreadContext::Worker(w) => {
mmtk::memory_manager::start_worker(&SINGLETON, worker_tls, w)
}
}
// The GC thread quits somehow. Unresgister this GC thread
unregister_gc_thread();
});
}
fn schedule_finalization(_tls: VMWorkerThread) {}
fn out_of_memory(_tls: VMThread, _err_kind: AllocationError) {
println!("Out of Memory!");
unsafe { jl_throw_out_of_memory_error() };
}
fn vm_live_bytes() -> usize {
crate::api::JULIA_MALLOC_BYTES.load(Ordering::SeqCst)
}
fn is_collection_enabled() -> bool {
unsafe { jl_get_gc_disable_counter() == 0 }
}
fn create_gc_trigger() -> Box<dyn GCTriggerPolicy<JuliaVM>> {
use crate::gc_trigger::*;
let max_memory = unsafe { jl_gc_get_max_memory() };
Box::new(JuliaGCTrigger::new(max_memory))
}
}
pub fn is_current_gc_nursery() -> bool {
match crate::SINGLETON.get_plan().generational() {
Some(gen) => gen.is_current_gc_nursery(),
None => false,
}
}
#[no_mangle]
pub extern "C" fn mmtk_block_thread_for_gc() {
AtomicBool::store(&BLOCK_FOR_GC, true, Ordering::SeqCst);
let (lock, cvar) = &*STW_COND.clone();
let mut count = lock.lock().unwrap();
info!("Blocking for GC!");
AtomicBool::store(&WORLD_HAS_STOPPED, true, Ordering::SeqCst);
while AtomicBool::load(&BLOCK_FOR_GC, Ordering::SeqCst) {
count = cvar.wait(count).unwrap();
}
AtomicIsize::store(&USER_TRIGGERED_GC, 0, Ordering::SeqCst);
}