1 /*
2 * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
3 *
4 * This code is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 only, as
6 * published by the Free Software Foundation.
7 *
8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "gc/epsilon/epsilonHeap.hpp"
26 #include "gc/epsilon/epsilonMemoryPool.hpp"
27 #include "gc/epsilon/epsilonThreadLocalData.hpp"
28 #include "gc/shared/gcArguments.hpp"
29 #include "gc/shared/locationPrinter.inline.hpp"
30 #include "memory/allocation.hpp"
31 #include "memory/allocation.inline.hpp"
32 #include "memory/resourceArea.hpp"
33 #include "memory/universe.hpp"
34 #include "runtime/atomic.hpp"
35 #include "runtime/globals.hpp"
36
37 jint EpsilonHeap::initialize() {
38 size_t align = HeapAlignment;
39 size_t init_byte_size = align_up(InitialHeapSize, align);
40 size_t max_byte_size = align_up(MaxHeapSize, align);
41
42 // Initialize backing storage
43 ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, align);
44 _virtual_space.initialize(heap_rs, init_byte_size);
45
46 MemRegion committed_region((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high());
47 MemRegion reserved_region((HeapWord*)_virtual_space.low_boundary(), (HeapWord*)_virtual_space.high_boundary());
48
49 initialize_reserved_region(heap_rs);
50
51 _space = new ContiguousSpace();
52 _space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true);
53
54 // Precompute hot fields
55 _max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), align_object_size(EpsilonMaxTLABSize / HeapWordSize));
56 _step_counter_update = MIN2<size_t>(max_byte_size / 16, EpsilonUpdateCountersStep);
57 _step_heap_print = (EpsilonPrintHeapSteps == 0) ? SIZE_MAX : (max_byte_size / EpsilonPrintHeapSteps);
58 _decay_time_ns = (int64_t) EpsilonTLABDecayTime * NANOSECS_PER_MILLISEC;
59
60 // Enable monitoring
61 _monitoring_support = new EpsilonMonitoringSupport(this);
62 _last_counter_update = 0;
63 _last_heap_print = 0;
64
65 // Install barrier set
66 BarrierSet::set_barrier_set(new EpsilonBarrierSet());
67
68 // All done, print out the configuration
69 if (init_byte_size != max_byte_size) {
70 log_info(gc)("Resizeable heap; starting at " SIZE_FORMAT "M, max: " SIZE_FORMAT "M, step: " SIZE_FORMAT "M",
71 init_byte_size / M, max_byte_size / M, EpsilonMinHeapExpand / M);
72 } else {
73 log_info(gc)("Non-resizeable heap; start/max: " SIZE_FORMAT "M", init_byte_size / M);
74 }
75
76 if (UseTLAB) {
77 log_info(gc)("Using TLAB allocation; max: " SIZE_FORMAT "K", _max_tlab_size * HeapWordSize / K);
78 if (EpsilonElasticTLAB) {
79 log_info(gc)("Elastic TLABs enabled; elasticity: %.2fx", EpsilonTLABElasticity);
80 }
81 if (EpsilonElasticTLABDecay) {
82 log_info(gc)("Elastic TLABs decay enabled; decay time: " SIZE_FORMAT "ms", EpsilonTLABDecayTime);
83 }
84 } else {
85 log_info(gc)("Not using TLAB allocation");
86 }
87
224 assert(min_size <= size,
225 "Size honors min size: " SIZE_FORMAT " <= " SIZE_FORMAT, min_size, size);
226 assert(size <= _max_tlab_size,
227 "Size honors max size: " SIZE_FORMAT " <= " SIZE_FORMAT, size, _max_tlab_size);
228 assert(size <= CollectedHeap::max_tlab_size(),
229 "Size honors global max size: " SIZE_FORMAT " <= " SIZE_FORMAT, size, CollectedHeap::max_tlab_size());
230
231 if (log_is_enabled(Trace, gc)) {
232 ResourceMark rm;
233 log_trace(gc)("TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT
234 "K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K",
235 thread->name(),
236 requested_size * HeapWordSize / K,
237 min_size * HeapWordSize / K,
238 _max_tlab_size * HeapWordSize / K,
239 ergo_tlab * HeapWordSize / K,
240 size * HeapWordSize / K);
241 }
242
243 // All prepared, let's do it!
244 HeapWord* res = allocate_work(size);
245
246 if (res != NULL) {
247 // Allocation successful
248 *actual_size = size;
249 if (EpsilonElasticTLABDecay) {
250 EpsilonThreadLocalData::set_last_tlab_time(thread, time);
251 }
252 if (EpsilonElasticTLAB && !fits) {
253 // If we requested expansion, this is our new ergonomic TLAB size
254 EpsilonThreadLocalData::set_ergo_tlab_size(thread, size);
255 }
256 } else {
257 // Allocation failed, reset ergonomics to try and fit smaller TLABs
258 if (EpsilonElasticTLAB) {
259 EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
260 }
261 }
262
263 return res;
264 }
265
266 HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) {
267 *gc_overhead_limit_was_exceeded = false;
268 return allocate_work(size);
269 }
270
271 void EpsilonHeap::collect(GCCause::Cause cause) {
272 switch (cause) {
273 case GCCause::_metadata_GC_threshold:
274 case GCCause::_metadata_GC_clear_soft_refs:
275 // Receiving these causes means the VM itself entered the safepoint for metadata collection.
276 // While Epsilon does not do GC, it has to perform sizing adjustments, otherwise we would
277 // re-enter the safepoint again very soon.
278
279 assert(SafepointSynchronize::is_at_safepoint(), "Expected at safepoint");
280 log_info(gc)("GC request for \"%s\" is handled", GCCause::to_string(cause));
281 MetaspaceGC::compute_new_size();
282 print_metaspace_info();
283 break;
284 default:
285 log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause));
286 }
287 _monitoring_support->update_counters();
288 }
289
290 void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) {
291 collect(gc_cause());
292 }
293
294 void EpsilonHeap::object_iterate(ObjectClosure *cl) {
295 _space->object_iterate(cl);
296 }
297
298 void EpsilonHeap::print_on(outputStream *st) const {
299 st->print_cr("Epsilon Heap");
300
301 // Cast away constness:
302 ((VirtualSpace)_virtual_space).print_on(st);
303
304 st->print_cr("Allocation space:");
305 _space->print_on(st);
306
332 log_info(gc)("Heap: no reliable data");
333 }
334 }
335
336 void EpsilonHeap::print_metaspace_info() const {
337 size_t reserved = MetaspaceUtils::reserved_bytes();
338 size_t committed = MetaspaceUtils::committed_bytes();
339 size_t used = MetaspaceUtils::used_bytes();
340
341 if (reserved != 0) {
342 log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, "
343 SIZE_FORMAT "%s (%.2f%%) used",
344 byte_size_in_proper_unit(reserved), proper_unit_for_byte_size(reserved),
345 byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),
346 committed * 100.0 / reserved,
347 byte_size_in_proper_unit(used), proper_unit_for_byte_size(used),
348 used * 100.0 / reserved);
349 } else {
350 log_info(gc, metaspace)("Metaspace: no reliable data");
351 }
352 }
|
1 /*
2 * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved.
3 *
4 * This code is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 only, as
6 * published by the Free Software Foundation.
7 *
8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "classfile/classLoaderDataGraph.hpp"
26 #include "classfile/stringTable.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "code/codeCache.hpp"
29 #include "gc/epsilon/epsilonHeap.hpp"
30 #include "gc/epsilon/epsilonMemoryPool.hpp"
31 #include "gc/epsilon/epsilonThreadLocalData.hpp"
32 #include "gc/shared/barrierSet.inline.hpp"
33 #include "gc/shared/gcArguments.hpp"
34 #include "gc/shared/gcTraceTime.inline.hpp"
35 #include "gc/shared/locationPrinter.inline.hpp"
36 #include "gc/shared/markBitMap.inline.hpp"
37 #include "gc/shared/strongRootsScope.hpp"
38 #include "gc/shared/preservedMarks.inline.hpp"
39 #include "gc/shared/weakProcessor.hpp"
40 #include "memory/allocation.hpp"
41 #include "memory/allocation.inline.hpp"
42 #include "memory/iterator.inline.hpp"
43 #include "memory/resourceArea.hpp"
44 #include "memory/universe.hpp"
45 #include "oops/compressedOops.inline.hpp"
46 #include "oops/markWord.inline.hpp"
47 #include "runtime/biasedLocking.hpp"
48 #include "runtime/atomic.hpp"
49 #include "runtime/globals.hpp"
50 #include "runtime/objectMonitor.inline.hpp"
51 #include "runtime/thread.hpp"
52 #include "runtime/vmOperations.hpp"
53 #include "runtime/vmThread.hpp"
54 #include "utilities/stack.inline.hpp"
55 #include "services/management.hpp"
56
57 jint EpsilonHeap::initialize() {
58 size_t align = HeapAlignment;
59 size_t init_byte_size = align_up(InitialHeapSize, align);
60 size_t max_byte_size = align_up(MaxHeapSize, align);
61
62 // Initialize backing storage
63 ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, align);
64 _virtual_space.initialize(heap_rs, init_byte_size);
65
66 MemRegion committed_region((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high());
67 MemRegion reserved_region((HeapWord*)_virtual_space.low_boundary(), (HeapWord*)_virtual_space.high_boundary());
68
69 initialize_reserved_region(heap_rs);
70
71 _space = new ContiguousSpace();
72 _space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true);
73
74 // Precompute hot fields
75 _max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), align_object_size(EpsilonMaxTLABSize / HeapWordSize));
76 _step_counter_update = MIN2<size_t>(max_byte_size / 16, EpsilonUpdateCountersStep);
77 _step_heap_print = (EpsilonPrintHeapSteps == 0) ? SIZE_MAX : (max_byte_size / EpsilonPrintHeapSteps);
78 _decay_time_ns = (int64_t) EpsilonTLABDecayTime * NANOSECS_PER_MILLISEC;
79
80 // Enable monitoring
81 _monitoring_support = new EpsilonMonitoringSupport(this);
82 _last_counter_update = 0;
83 _last_heap_print = 0;
84
85 // Install barrier set
86 BarrierSet::set_barrier_set(new EpsilonBarrierSet());
87
88 size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
89 size_t _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
90 _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
91
92 // Initialize marking bitmap, but not commit it yet
93 if (EpsilonSlidingGC) {
94 ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
95 MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
96 _bitmap_region = MemRegion((HeapWord *) bitmap.base(), bitmap.size() / HeapWordSize);
97 MemRegion heap_region = MemRegion((HeapWord *) heap_rs.base(), heap_rs.size() / HeapWordSize);
98 _bitmap.initialize(heap_region, _bitmap_region);
99 }
100
101 // All done, print out the configuration
102 if (init_byte_size != max_byte_size) {
103 log_info(gc)("Resizeable heap; starting at " SIZE_FORMAT "M, max: " SIZE_FORMAT "M, step: " SIZE_FORMAT "M",
104 init_byte_size / M, max_byte_size / M, EpsilonMinHeapExpand / M);
105 } else {
106 log_info(gc)("Non-resizeable heap; start/max: " SIZE_FORMAT "M", init_byte_size / M);
107 }
108
109 if (UseTLAB) {
110 log_info(gc)("Using TLAB allocation; max: " SIZE_FORMAT "K", _max_tlab_size * HeapWordSize / K);
111 if (EpsilonElasticTLAB) {
112 log_info(gc)("Elastic TLABs enabled; elasticity: %.2fx", EpsilonTLABElasticity);
113 }
114 if (EpsilonElasticTLABDecay) {
115 log_info(gc)("Elastic TLABs decay enabled; decay time: " SIZE_FORMAT "ms", EpsilonTLABDecayTime);
116 }
117 } else {
118 log_info(gc)("Not using TLAB allocation");
119 }
120
257 assert(min_size <= size,
258 "Size honors min size: " SIZE_FORMAT " <= " SIZE_FORMAT, min_size, size);
259 assert(size <= _max_tlab_size,
260 "Size honors max size: " SIZE_FORMAT " <= " SIZE_FORMAT, size, _max_tlab_size);
261 assert(size <= CollectedHeap::max_tlab_size(),
262 "Size honors global max size: " SIZE_FORMAT " <= " SIZE_FORMAT, size, CollectedHeap::max_tlab_size());
263
264 if (log_is_enabled(Trace, gc)) {
265 ResourceMark rm;
266 log_trace(gc)("TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT
267 "K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K",
268 thread->name(),
269 requested_size * HeapWordSize / K,
270 min_size * HeapWordSize / K,
271 _max_tlab_size * HeapWordSize / K,
272 ergo_tlab * HeapWordSize / K,
273 size * HeapWordSize / K);
274 }
275
276 // All prepared, let's do it!
277 HeapWord* res = allocate_or_collect_work(size);
278
279 if (res != NULL) {
280 // Allocation successful
281 *actual_size = size;
282 if (EpsilonElasticTLABDecay) {
283 EpsilonThreadLocalData::set_last_tlab_time(thread, time);
284 }
285 if (EpsilonElasticTLAB && !fits) {
286 // If we requested expansion, this is our new ergonomic TLAB size
287 EpsilonThreadLocalData::set_ergo_tlab_size(thread, size);
288 }
289 } else {
290 // Allocation failed, reset ergonomics to try and fit smaller TLABs
291 if (EpsilonElasticTLAB) {
292 EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
293 }
294 }
295
296 return res;
297 }
298
299 HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) {
300 *gc_overhead_limit_was_exceeded = false;
301 return allocate_or_collect_work(size);
302 }
303
304 void EpsilonHeap::collect(GCCause::Cause cause) {
305 switch (cause) {
306 case GCCause::_metadata_GC_threshold:
307 case GCCause::_metadata_GC_clear_soft_refs:
308 // Receiving these causes means the VM itself entered the safepoint for metadata collection.
309 // While Epsilon does not do GC, it has to perform sizing adjustments, otherwise we would
310 // re-enter the safepoint again very soon.
311
312 assert(SafepointSynchronize::is_at_safepoint(), "Expected at safepoint");
313 log_info(gc)("GC request for \"%s\" is handled", GCCause::to_string(cause));
314 MetaspaceGC::compute_new_size();
315 print_metaspace_info();
316 break;
317 default:
318 if (EpsilonSlidingGC) {
319 if (SafepointSynchronize::is_at_safepoint()) {
320 entry_collect(cause);
321 } else {
322 vmentry_collect(cause);
323 }
324 } else {
325 log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause));
326 }
327 }
328 _monitoring_support->update_counters();
329 }
330
331 void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) {
332 collect(gc_cause());
333 }
334
335 void EpsilonHeap::object_iterate(ObjectClosure *cl) {
336 _space->object_iterate(cl);
337 }
338
339 void EpsilonHeap::print_on(outputStream *st) const {
340 st->print_cr("Epsilon Heap");
341
342 // Cast away constness:
343 ((VirtualSpace)_virtual_space).print_on(st);
344
345 st->print_cr("Allocation space:");
346 _space->print_on(st);
347
373 log_info(gc)("Heap: no reliable data");
374 }
375 }
376
377 void EpsilonHeap::print_metaspace_info() const {
378 size_t reserved = MetaspaceUtils::reserved_bytes();
379 size_t committed = MetaspaceUtils::committed_bytes();
380 size_t used = MetaspaceUtils::used_bytes();
381
382 if (reserved != 0) {
383 log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, "
384 SIZE_FORMAT "%s (%.2f%%) used",
385 byte_size_in_proper_unit(reserved), proper_unit_for_byte_size(reserved),
386 byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),
387 committed * 100.0 / reserved,
388 byte_size_in_proper_unit(used), proper_unit_for_byte_size(used),
389 used * 100.0 / reserved);
390 } else {
391 log_info(gc, metaspace)("Metaspace: no reliable data");
392 }
393 }
394
395 // ------------------ EXPERIMENTAL MARK-COMPACT -------------------------------
396 //
397 // This implements a trivial Lisp2-style sliding collector:
398 // https://en.wikipedia.org/wiki/Mark-compact_algorithm#LISP2_algorithm
399 //
400 // The goal for this implementation is to be as simple as possible, ignoring
401 // non-trivial performance optimizations. This collector does not implement
402 // reference processing: no soft/weak/phantom/finalizeable references are ever
403 // cleared. It also does not implement class unloading and other runtime
404 // cleanups.
405 //
406
407 // VM operation that executes collection cycle under safepoint
408 class VM_EpsilonCollect: public VM_Operation {
409 private:
410 const GCCause::Cause _cause;
411 EpsilonHeap* const _heap;
412 static size_t _last_used;
413 public:
414 VM_EpsilonCollect(GCCause::Cause cause) : VM_Operation(),
415 _cause(cause),
416 _heap(EpsilonHeap::heap()) {};
417
418 VM_Operation::VMOp_Type type() const { return VMOp_EpsilonCollect; }
419 const char* name() const { return "Epsilon Collection"; }
420
421 virtual bool doit_prologue() {
422 // Need to take the Heap lock before managing backing storage.
423 // This also naturally serializes GC requests, and allows us to coalesce
424 // back-to-back allocation failure requests from many threads. There is no
425 // need to handle allocation failure that comes without allocations since
426 // last complete GC. Waiting for 1% of heap allocated before starting next
427 // GC seems to resolve most races.
428 Heap_lock->lock();
429 size_t used = _heap->used();
430 size_t capacity = _heap->capacity();
431 size_t allocated = used > _last_used ? used - _last_used : 0;
432 if (_cause != GCCause::_allocation_failure || allocated > capacity / 100) {
433 return true;
434 } else {
435 Heap_lock->unlock();
436 return false;
437 }
438 }
439
440 virtual void doit() {
441 _heap->entry_collect(_cause);
442 }
443
444 virtual void doit_epilogue() {
445 _last_used = _heap->used();
446 Heap_lock->unlock();
447 }
448 };
449
450 size_t VM_EpsilonCollect::_last_used = 0;
451
452 void EpsilonHeap::vmentry_collect(GCCause::Cause cause) {
453 VM_EpsilonCollect vmop(cause);
454 VMThread::execute(&vmop);
455 }
456
457 HeapWord* EpsilonHeap::allocate_or_collect_work(size_t size) {
458 HeapWord* res = allocate_work(size);
459 if (res == NULL && EpsilonSlidingGC) {
460 vmentry_collect(GCCause::_allocation_failure);
461 res = allocate_work(size);
462 }
463 return res;
464 }
465
466 typedef Stack<oop, mtGC> EpsilonMarkStack;
467
468 void EpsilonHeap::do_roots(OopClosure* cl, bool everything) {
469 // Need to tell runtime we are about to walk the roots with 1 thread
470 StrongRootsScope scope(1);
471
472 // Need to adapt oop closure for some special root types.
473 CLDToOopClosure clds(cl, ClassLoaderData::_claim_none);
474 MarkingCodeBlobClosure blobs(cl, CodeBlobToOopClosure::FixRelocations);
475
476 // Walk all these different parts of runtime roots. Some roots require
477 // holding the lock when walking them.
478 {
479 MutexLocker lock(CodeCache_lock, Mutex::_no_safepoint_check_flag);
480 CodeCache::blobs_do(&blobs);
481 }
482
483 ClassLoaderDataGraph::cld_do(&clds);
484 Universe::oops_do(cl);
485 Management::oops_do(cl);
486 JvmtiExport::oops_do(cl);
487 JNIHandles::oops_do(cl);
488 WeakProcessor::oops_do(cl);
489 ObjectSynchronizer::oops_do(cl);
490 SystemDictionary::oops_do(cl);
491 Threads::possibly_parallel_oops_do(false, cl, &blobs);
492
493 // This is implicitly handled by other roots, and we only want to
494 // touch these during verification.
495 if (everything) {
496 StringTable::oops_do(cl);
497 }
498 }
499
500 // Walk the marking bitmap and call object closure on every marked object.
501 // This is much faster that walking a (very sparse) parsable heap, but it
502 // takes up to 1/64-th of heap size for the bitmap.
503 void EpsilonHeap::walk_bitmap(ObjectClosure* cl) {
504 HeapWord* limit = _space->top();
505 HeapWord* addr = _bitmap.get_next_marked_addr(_space->bottom(), limit);
506 while (addr < limit) {
507 oop obj = oop(addr);
508 assert(_bitmap.is_marked(obj), "sanity");
509 cl->do_object(obj);
510 addr += 1;
511 if (addr < limit) {
512 addr = _bitmap.get_next_marked_addr(addr, limit);
513 }
514 }
515 }
516
517 class EpsilonScanOopClosure : public BasicOopIterateClosure {
518 private:
519 EpsilonMarkStack* const _stack;
520 MarkBitMap* const _bitmap;
521
522 template <class T>
523 void do_oop_work(T* p) {
524 // p is the pointer to memory location where oop is, load the value
525 // from it, unpack the compressed reference, if needed:
526 T o = RawAccess<>::oop_load(p);
527 if (!CompressedOops::is_null(o)) {
528 oop obj = CompressedOops::decode_not_null(o);
529
530 // Object is discovered. See if it is marked already. If not,
531 // mark and push it on mark stack for further traversal. Non-atomic
532 // check and set would do, as this closure is called by single thread.
533 if (!_bitmap->is_marked(obj)) {
534 _bitmap->mark((HeapWord*)obj);
535 _stack->push(obj);
536 }
537 }
538 }
539
540 public:
541 EpsilonScanOopClosure(EpsilonMarkStack* stack, MarkBitMap* bitmap) :
542 _stack(stack), _bitmap(bitmap) {}
543 virtual void do_oop(oop* p) { do_oop_work(p); }
544 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
545 };
546
547 class EpsilonCalcNewLocationObjectClosure : public ObjectClosure {
548 private:
549 HeapWord* _compact_point;
550 PreservedMarks* const _preserved_marks;
551
552 public:
553 EpsilonCalcNewLocationObjectClosure(HeapWord* start, PreservedMarks* pm) :
554 _compact_point(start),
555 _preserved_marks(pm) {}
556
557 void do_object(oop obj) {
558 // Record the new location of the object: it is current compaction point.
559 // If object stays at the same location (which is true for objects in
560 // dense prefix, that we would normally get), do not bother recording the
561 // move, letting downstream code ignore it.
562 if ((HeapWord*)obj != _compact_point) {
563 markWord mark = obj->mark_raw();
564 if (mark.must_be_preserved(obj->klass())) {
565 _preserved_marks->push(obj, mark);
566 }
567 obj->forward_to(oop(_compact_point));
568 }
569 _compact_point += obj->size();
570 }
571
572 HeapWord* compact_point() {
573 return _compact_point;
574 }
575 };
576
577 class EpsilonAdjustPointersOopClosure : public BasicOopIterateClosure {
578 private:
579 template <class T>
580 void do_oop_work(T* p) {
581 // p is the pointer to memory location where oop is, load the value
582 // from it, unpack the compressed reference, if needed:
583 T o = RawAccess<>::oop_load(p);
584 if (!CompressedOops::is_null(o)) {
585 oop obj = CompressedOops::decode_not_null(o);
586
587 // Rewrite the current pointer to the object with its forwardee.
588 // Skip the write if update is not needed.
589 if (obj->is_forwarded()) {
590 oop fwd = obj->forwardee();
591 assert(fwd != NULL, "just checking");
592 RawAccess<>::oop_store(p, fwd);
593 }
594 }
595 }
596
597 public:
598 virtual void do_oop(oop* p) { do_oop_work(p); }
599 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
600 };
601
602 class EpsilonAdjustPointersObjectClosure : public ObjectClosure {
603 private:
604 EpsilonAdjustPointersOopClosure _cl;
605 public:
606 void do_object(oop obj) {
607 // Apply the updates to all references reachable from current object:
608 obj->oop_iterate(&_cl);
609 }
610 };
611
612 class EpsilonMoveObjectsObjectClosure : public ObjectClosure {
613 private:
614 size_t _moved;
615 public:
616 EpsilonMoveObjectsObjectClosure() : ObjectClosure(), _moved(0) {}
617
618 void do_object(oop obj) {
619 // Copy the object to its new location, if needed. This is final step,
620 // so we have to re-initialize its new mark word, dropping the forwardee
621 // data from it.
622 if (obj->is_forwarded()) {
623 oop fwd = obj->forwardee();
624 assert(fwd != NULL, "just checking");
625 Copy::aligned_conjoint_words((HeapWord*)obj, (HeapWord*)fwd, obj->size());
626 fwd->init_mark_raw();
627 _moved++;
628 }
629 }
630
631 size_t moved() {
632 return _moved;
633 }
634 };
635
636 class EpsilonVerifyOopClosure : public BasicOopIterateClosure {
637 private:
638 EpsilonHeap* const _heap;
639 EpsilonMarkStack* const _stack;
640 MarkBitMap* const _bitmap;
641
642 template <class T>
643 void do_oop_work(T* p) {
644 T o = RawAccess<>::oop_load(p);
645 if (!CompressedOops::is_null(o)) {
646 oop obj = CompressedOops::decode_not_null(o);
647 if (!_bitmap->is_marked(obj)) {
648 _bitmap->mark((HeapWord*)obj);
649
650 guarantee(_heap->is_in(obj), "Is in heap: " PTR_FORMAT, p2i(obj));
651 guarantee(oopDesc::is_oop(obj), "Is an object: " PTR_FORMAT, p2i(obj));
652 guarantee(!obj->mark().is_marked(), "Mark is gone: " PTR_FORMAT, p2i(obj));
653
654 _stack->push(obj);
655 }
656 }
657 }
658
659 public:
660 EpsilonVerifyOopClosure(EpsilonMarkStack* stack, MarkBitMap* bitmap) :
661 _heap(EpsilonHeap::heap()), _stack(stack), _bitmap(bitmap) {}
662 virtual void do_oop(oop* p) { do_oop_work(p); }
663 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
664 };
665
666 void EpsilonHeap::entry_collect(GCCause::Cause cause) {
667 GCIdMark mark;
668 GCTraceTime(Info, gc) time("Lisp2-style Mark-Compact", NULL, cause, true);
669
670 // Some statistics, for fun and profit:
671 size_t stat_reachable_roots = 0;
672 size_t stat_reachable_heap = 0;
673 size_t stat_moved = 0;
674 size_t stat_preserved_marks = 0;
675
676 {
677 GCTraceTime(Info, gc) time("Step 0: Prologue", NULL);
678
679 // Commit marking bitmap memory. There are several upsides of doing this
680 // before the cycle: no memory is taken if GC is not happening, the memory
681 // is "cleared" on first touch, and untouched parts of bitmap are mapped
682 // to zero page, boosting performance on sparse heaps.
683 if (!os::commit_memory((char*)_bitmap_region.start(), _bitmap_region.byte_size(), false)) {
684 log_warning(gc)("Could not commit native memory for marking bitmap, GC failed");
685 return;
686 }
687
688 // We do not need parsable heap for this algorithm to work, but we want
689 // threads to give up their TLABs.
690 ensure_parsability(true);
691
692 // Tell various parts of runtime we are doing GC.
693 BiasedLocking::preserve_marks();
694
695 // Derived pointers would be re-discovered during the mark.
696 // Clear and activate the table for them.
697 DerivedPointerTable::clear();
698 }
699
700 {
701 GCTraceTime(Info, gc) time("Step 1: Mark", NULL);
702
703 // Marking stack and the closure that does most of the work. The closure
704 // would scan the outgoing references, mark them, and push newly-marked
705 // objects to stack for further processing.
706 EpsilonMarkStack stack;
707 EpsilonScanOopClosure cl(&stack, &_bitmap);
708
709 // Seed the marking with roots.
710 process_roots(&cl);
711 stat_reachable_roots = stack.size();
712
713 // Scan the rest of the heap until we run out of objects. Termination is
714 // guaranteed, because all reachable objects would be marked eventually.
715 while (!stack.is_empty()) {
716 oop obj = stack.pop();
717 obj->oop_iterate(&cl);
718 stat_reachable_heap++;
719 }
720
721 // No more derived pointers discovered after marking is done.
722 DerivedPointerTable::set_active(false);
723 }
724
725 // We are going to store forwarding information (where the new copy resides)
726 // in mark words. Some of those mark words need to be carefully preserved.
727 // This is an utility that maintains the list of those special mark words.
728 PreservedMarks preserved_marks;
729
730 // New top of the allocated space.
731 HeapWord* new_top;
732
733 {
734 GCTraceTime(Info, gc) time("Step 2: Calculate new locations", NULL);
735
736 // Walk all alive objects, compute their new addresses and store those
737 // addresses in mark words. Optionally preserve some marks.
738 EpsilonCalcNewLocationObjectClosure cl(_space->bottom(), &preserved_marks);
739 walk_bitmap(&cl);
740
741 // After addresses are calculated, we know the new top for the allocated
742 // space. We cannot set it just yet, because some asserts check that objects
743 // are "in heap" based on current "top".
744 new_top = cl.compact_point();
745
746 stat_preserved_marks = preserved_marks.size();
747 }
748
749 {
750 GCTraceTime(Info, gc) time("Step 3: Adjust pointers", NULL);
751
752 // Walk all alive objects _and their reference fields_, and put "new
753 // addresses" there. We know the new addresses from the forwarding data
754 // in mark words. Take care of the heap objects first.
755 EpsilonAdjustPointersObjectClosure cl;
756 walk_bitmap(&cl);
757
758 // Now do the same, but for all VM roots, which reference the objects on
759 // their own: their references should also be updated.
760 EpsilonAdjustPointersOopClosure cli;
761 process_roots(&cli);
762
763 // Finally, make sure preserved marks know the objects are about to move.
764 preserved_marks.adjust_during_full_gc();
765 }
766
767 {
768 GCTraceTime(Info, gc) time("Step 4: Move objects", NULL);
769
770 // Move all alive objects to their new locations. All the references are
771 // already adjusted at previous step.
772 EpsilonMoveObjectsObjectClosure cl;
773 walk_bitmap(&cl);
774 stat_moved = cl.moved();
775
776 // Now we moved all objects to their relevant locations, we can retract
777 // the "top" of the allocation space to the end of the compacted prefix.
778 _space->set_top(new_top);
779 }
780
781 {
782 GCTraceTime(Info, gc) time("Step 5: Epilogue", NULL);
783
784 // Restore all special mark words.
785 preserved_marks.restore();
786
787 // Tell the rest of runtime we have finished the GC.
788 DerivedPointerTable::update_pointers();
789 BiasedLocking::restore_marks();
790
791 // Verification code walks entire heap and verifies nothing is broken.
792 if (EpsilonVerify) {
793 // The basic implementation turns heap into entirely parsable one with
794 // only alive objects, which mean we could just walked the heap object
795 // by object and verify it. But, it would be inconvenient for verification
796 // to assume heap has only alive objects. Any future change that leaves
797 // at least one dead object with dead outgoing references would fail the
798 // verification. Therefore, it makes more sense to mark through the heap
799 // again, not assuming objects are all alive.
800 EpsilonMarkStack stack;
801 EpsilonVerifyOopClosure cl(&stack, &_bitmap);
802
803 _bitmap.clear();
804
805 // Verify all roots are correct, and that we have the same number of
806 // object reachable from roots.
807 process_all_roots(&cl);
808
809 size_t verified_roots = stack.size();
810 guarantee(verified_roots == stat_reachable_roots,
811 "Verification discovered " SIZE_FORMAT " roots out of " SIZE_FORMAT,
812 verified_roots, stat_reachable_roots);
813
814 // Verify the rest of the heap is correct, and that we have the same
815 // number of objects reachable from heap.
816 size_t verified_heap = 0;
817 while (!stack.is_empty()) {
818 oop obj = stack.pop();
819 obj->oop_iterate(&cl);
820 verified_heap++;
821 }
822
823 guarantee(verified_heap == stat_reachable_heap,
824 "Verification discovered " SIZE_FORMAT " heap objects out of " SIZE_FORMAT,
825 verified_heap, stat_reachable_heap);
826
827 // Ask parts of runtime to verify themselves too
828 Universe::verify(VerifyOption_Default, "");
829 }
830
831 // Marking bitmap is not needed anymore
832 if (!os::uncommit_memory((char*)_bitmap_region.start(), _bitmap_region.byte_size())) {
833 log_warning(gc)("Could not uncommit native memory for marking bitmap");
834 }
835
836 // Return all memory back if so requested. On large heaps, this would
837 // take a while.
838 if (EpsilonUncommit) {
839 _virtual_space.shrink_by((_space->end() - new_top) * HeapWordSize);
840 _space->set_end((HeapWord*)_virtual_space.high());
841 }
842 }
843
844 size_t stat_reachable = stat_reachable_roots + stat_reachable_heap;
845 log_info(gc)("GC Stats: " SIZE_FORMAT " (%.2f%%) reachable from roots, " SIZE_FORMAT " (%.2f%%) reachable from heap, "
846 SIZE_FORMAT " (%.2f%%) moved, " SIZE_FORMAT " (%.2f%%) markwords preserved",
847 stat_reachable_roots, 100.0 * stat_reachable_roots / stat_reachable,
848 stat_reachable_heap, 100.0 * stat_reachable_heap / stat_reachable,
849 stat_moved, 100.0 * stat_moved / stat_reachable,
850 stat_preserved_marks, 100.0 * stat_preserved_marks / stat_reachable);
851
852 print_heap_info(used());
853 print_metaspace_info();
854 }
|