@@ -9,6 +9,17 @@ use crate::vm::VMBinding;
9
9
use crate :: vm:: { ActivePlan , Collection } ;
10
10
use downcast_rs:: Downcast ;
11
11
12
+ #[ repr( C ) ]
13
+ #[ derive( Debug ) ]
14
+ /// A list of errors that MMTk can encounter during allocation.
15
+ pub enum AllocationError {
16
+ /// The specified heap size is too small for the given program to continue.
17
+ HeapOutOfMemory ,
18
+ /// The OS is unable to mmap or acquire more memory. Critical error. MMTk expects the VM to
19
+ /// abort if such an error is thrown.
20
+ MmapOutOfMemory ,
21
+ }
22
+
12
23
#[ inline( always) ]
13
24
pub fn align_allocation_no_fill < VM : VMBinding > (
14
25
region : Address ,
@@ -102,32 +113,73 @@ pub fn get_maximum_aligned_size<VM: VMBinding>(
102
113
}
103
114
}
104
115
116
+ /// A trait which implements allocation routines. Every allocator needs to implements this trait.
105
117
pub trait Allocator < VM : VMBinding > : Downcast {
118
+ /// Return the [`VMThread`] associated with this allocator instance.
106
119
fn get_tls ( & self ) -> VMThread ;
107
120
121
+ /// Return the [`Space`] instance associated with this allocator instance.
108
122
fn get_space ( & self ) -> & ' static dyn Space < VM > ;
123
+
124
+ /// Return the [`Plan`] instance that this allocator instance is associated with.
109
125
fn get_plan ( & self ) -> & ' static dyn Plan < VM = VM > ;
110
126
111
- /// Does this allocator do thread local allocation? If an allocator does not do thread local allocation,
112
- /// each allocation will go to slowpath and will have a check for GC polls.
127
+ /// Return if this allocator can do thread local allocation. If an allocator does not do thread
128
+ /// local allocation, each allocation will go to slowpath and will have a check for GC polls.
113
129
fn does_thread_local_allocation ( & self ) -> bool ;
114
130
115
- /// At which granularity the allocator acquires memory from the global space and use them as thread local buffer.
116
- /// For example, bump pointer allocator acquire memory at 32KB blocks. Depending on the actual size for the current object,
117
- /// they always acquire memory of N*32KB (N>=1). Thus bump pointer allocator returns 32KB for this method.
118
- /// Only allocators that do thread local allocation need to implement this method.
131
+ /// Return at which granularity the allocator acquires memory from the global space and use
132
+ /// them as thread local buffer. For example, the [`BumpAllocator`] acquires memory at 32KB
133
+ /// blocks. Depending on the actual size for the current object, they always acquire memory of
134
+ /// N*32KB (N>=1). Thus the [`BumpAllocator`] returns 32KB for this method. Only allocators
135
+ /// that do thread local allocation need to implement this method.
119
136
fn get_thread_local_buffer_granularity ( & self ) -> usize {
120
137
assert ! ( self . does_thread_local_allocation( ) , "An allocator that does not thread local allocation does not have a buffer granularity." ) ;
121
138
unimplemented ! ( )
122
139
}
123
140
141
+ /// An allocation attempt. The implementation of this function depends on the allocator used.
142
+ /// If an allocator supports thread local allocations, then the allocation will be serviced
143
+ /// from its TLAB, otherwise it will default to using the slowpath, i.e. [`alloc_slow`].
144
+ ///
145
+ /// Note that in the case where the VM is out of memory, we invoke
146
+ /// [`Collection::out_of_memory`] to inform the binding and then return a null pointer back to
147
+ /// it. We have no assumptions on whether the VM will continue executing or abort immediately.
148
+ ///
149
+ /// Arguments:
150
+ /// * `size`: the allocation size in bytes.
151
+ /// * `align`: the required alignment in bytes.
152
+ /// * `offset` the required offset in bytes.
124
153
fn alloc ( & mut self , size : usize , align : usize , offset : isize ) -> Address ;
125
154
155
+ /// Slowpath allocation attempt. This function is explicitly not inlined for performance
156
+ /// considerations.
157
+ ///
158
+ /// Arguments:
159
+ /// * `size`: the allocation size in bytes.
160
+ /// * `align`: the required alignment in bytes.
161
+ /// * `offset` the required offset in bytes.
126
162
#[ inline( never) ]
127
163
fn alloc_slow ( & mut self , size : usize , align : usize , offset : isize ) -> Address {
128
164
self . alloc_slow_inline ( size, align, offset)
129
165
}
130
166
167
+ /// Slowpath allocation attempt. This function executes the actual slowpath allocation. A
168
+ /// slowpath allocation in MMTk attempts to allocate the object using the per-allocator
169
+ /// definition of [`alloc_slow_once`]. This function also accounts for increasing the
170
+ /// allocation bytes in order to support stress testing. In case precise stress testing is
171
+ /// being used, the [`alloc_slow_once_precise_stress`] function is used instead.
172
+ ///
173
+ /// Note that in the case where the VM is out of memory, we invoke
174
+ /// [`Collection::out_of_memory`] with a [`AllocationError::HeapOutOfMemory`] error to inform
175
+ /// the binding and then return a null pointer back to it. We have no assumptions on whether
176
+ /// the VM will continue executing or abort immediately on a
177
+ /// [`AllocationError::HeapOutOfMemory`] error.
178
+ ///
179
+ /// Arguments:
180
+ /// * `size`: the allocation size in bytes.
181
+ /// * `align`: the required alignment in bytes.
182
+ /// * `offset` the required offset in bytes.
131
183
#[ inline( always) ]
132
184
fn alloc_slow_inline ( & mut self , size : usize , align : usize , offset : isize ) -> Address {
133
185
let tls = self . get_tls ( ) ;
@@ -207,10 +259,10 @@ pub trait Allocator<VM: VMBinding>: Downcast {
207
259
}
208
260
209
261
// It is possible to have cases where a thread is blocked for another GC (non emergency)
210
- // immediately after being blocked for a GC (emergency) (e.g. in stress test), that is saying the thread does not
211
- // leave this loop between the two GCs. The local var 'emergency_collection' was set to true
212
- // after the first GC. But when we execute this check below, we just finished the second GC,
213
- // which is not emergency. In such case, we will give a false OOM.
262
+ // immediately after being blocked for a GC (emergency) (e.g. in stress test), that is saying
263
+ // the thread does not leave this loop between the two GCs. The local var 'emergency_collection'
264
+ // was set to true after the first GC. But when we execute this check below, we just finished
265
+ // the second GC, which is not emergency. In such case, we will give a false OOM.
214
266
// We cannot just rely on the local var. Instead, we get the emergency collection value again,
215
267
// and check both.
216
268
if emergency_collection && self . get_plan ( ) . is_emergency_collection ( ) {
@@ -220,8 +272,11 @@ pub trait Allocator<VM: VMBinding>: Downcast {
220
272
let fail_with_oom = !plan. allocation_success . swap ( true , Ordering :: SeqCst ) ;
221
273
trace ! ( "fail with oom={}" , fail_with_oom) ;
222
274
if fail_with_oom {
223
- VM :: VMCollection :: out_of_memory ( tls) ;
224
- trace ! ( "Not reached" ) ;
275
+ // Note that we throw a `HeapOutOfMemory` error here and return a null ptr back to the VM
276
+ trace ! ( "Throw HeapOutOfMemory!" ) ;
277
+ VM :: VMCollection :: out_of_memory ( tls, AllocationError :: HeapOutOfMemory ) ;
278
+ plan. allocation_success . swap ( false , Ordering :: SeqCst ) ;
279
+ return result;
225
280
}
226
281
}
227
282
@@ -235,53 +290,63 @@ pub trait Allocator<VM: VMBinding>: Downcast {
235
290
// VMActivePlan::mutator(tls).get_allocator_from_space(space)
236
291
//};
237
292
238
- /*
239
- * Record whether last collection was an Emergency collection.
240
- * If so, we make one more attempt to allocate before we signal
241
- * an OOM.
242
- */
293
+ // Record whether last collection was an Emergency collection. If so, we make one more
294
+ // attempt to allocate before we signal an OOM.
243
295
emergency_collection = self . get_plan ( ) . is_emergency_collection ( ) ;
244
296
trace ! ( "Got emergency collection as {}" , emergency_collection) ;
245
297
previous_result_zero = true ;
246
298
}
247
299
}
248
300
249
- /// Single slow path allocation attempt. This is called by allocSlow.
301
+ /// Single slow path allocation attempt. This is called by [`alloc_slow_inline`]. The
302
+ /// implementation of this function depends on the allocator used. Generally, if an allocator
303
+ /// supports thread local allocations, it will try to allocate more TLAB space here. If it
304
+ /// doesn't, then (generally) the allocator simply allocates enough space for the current
305
+ /// object.
306
+ ///
307
+ /// Arguments:
308
+ /// * `size`: the allocation size in bytes.
309
+ /// * `align`: the required alignment in bytes.
310
+ /// * `offset` the required offset in bytes.
250
311
fn alloc_slow_once ( & mut self , size : usize , align : usize , offset : isize ) -> Address ;
251
312
252
- /// Single slowpath allocation attempt for stress test. When the stress factor is set (e.g. to N),
253
- /// we would expect for every N bytes allocated, we will trigger a stress GC.
254
- /// However, for allocators that do thread local allocation, they may allocate from their thread local buffer
255
- /// which does not have a GC poll check, and they may even allocate with the JIT generated allocation
256
- /// fastpath which is unaware of stress test GC. For both cases, we are not able to guarantee
257
- /// a stress GC is triggered every N bytes. To solve this, when the stress factor is set, we
258
- /// will call this method instead of the normal alloc_slow_once(). We expect the implementation of this slow allocation
259
- /// will trick the fastpath so every allocation will fail in the fastpath, jump to the slow path and eventually
260
- /// call this method again for the actual allocation.
313
+ /// Single slowpath allocation attempt for stress test. When the stress factor is set (e.g. to
314
+ /// N), we would expect for every N bytes allocated, we will trigger a stress GC. However, for
315
+ /// allocators that do thread local allocation, they may allocate from their thread local
316
+ /// buffer which does not have a GC poll check, and they may even allocate with the JIT
317
+ /// generated allocation fastpath which is unaware of stress test GC. For both cases, we are
318
+ /// not able to guarantee a stress GC is triggered every N bytes. To solve this, when the
319
+ /// stress factor is set, we will call this method instead of the normal alloc_slow_once(). We
320
+ /// expect the implementation of this slow allocation will trick the fastpath so every
321
+ /// allocation will fail in the fastpath, jump to the slow path and eventually call this method
322
+ /// again for the actual allocation.
261
323
///
262
- /// The actual implementation about how to trick the fastpath may vary. For example, our bump pointer allocator will
263
- /// set the thread local buffer limit to the buffer size instead of the buffer end address. In this case, every fastpath
264
- /// check (cursor + size < limit) will fail, and jump to this slowpath. In the slowpath, we still allocate from the thread
265
- /// local buffer, and recompute the limit (remaining buffer size).
324
+ /// The actual implementation about how to trick the fastpath may vary. For example, our bump
325
+ /// pointer allocator will set the thread local buffer limit to the buffer size instead of the
326
+ /// buffer end address. In this case, every fastpath check (cursor + size < limit) will fail,
327
+ /// and jump to this slowpath. In the slowpath, we still allocate from the thread local buffer,
328
+ /// and recompute the limit (remaining buffer size).
266
329
///
267
- /// If an allocator does not do thread local allocation (which returns false for does_thread_local_allocation()), it does
268
- /// not need to override this method. The default implementation will simply call allow_slow_once() and it will work fine
269
- /// for allocators that do not have thread local allocation.
330
+ /// If an allocator does not do thread local allocation (which returns false for
331
+ /// does_thread_local_allocation()), it does not need to override this method. The default
332
+ /// implementation will simply call allow_slow_once() and it will work fine for allocators that
333
+ /// do not have thread local allocation.
270
334
///
271
335
/// Arguments:
272
336
/// * `size`: the allocation size in bytes.
273
337
/// * `align`: the required alignment in bytes.
274
338
/// * `offset` the required offset in bytes.
275
- /// * `need_poll`: if this is true, the implementation must poll for a GC, rather than attempting to allocate from the local buffer.
339
+ /// * `need_poll`: if this is true, the implementation must poll for a GC, rather than
340
+ /// attempting to allocate from the local buffer.
276
341
fn alloc_slow_once_precise_stress (
277
342
& mut self ,
278
343
size : usize ,
279
344
align : usize ,
280
345
offset : isize ,
281
346
need_poll : bool ,
282
347
) -> Address {
283
- // If an allocator does thread local allocation but does not override this method to provide a correct implementation,
284
- // we will log a warning.
348
+ // If an allocator does thread local allocation but does not override this method to
349
+ // provide a correct implementation, we will log a warning.
285
350
if self . does_thread_local_allocation ( ) && need_poll {
286
351
warn ! ( "{} does not support stress GC (An allocator that does thread local allocation needs to implement allow_slow_once_stress_test())." , std:: any:: type_name:: <Self >( ) ) ;
287
352
}
0 commit comments