< prev index next >

src/hotspot/share/gc/epsilon/epsilonHeap.cpp

Print this page
rev 56085 : Epsilon, Sliding Mark-Compact
   1 /*
   2  * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"




  25 #include "gc/epsilon/epsilonHeap.hpp"
  26 #include "gc/epsilon/epsilonMemoryPool.hpp"
  27 #include "gc/epsilon/epsilonThreadLocalData.hpp"
  28 #include "gc/shared/gcArguments.hpp"
  29 #include "memory/allocation.hpp"





  30 #include "memory/allocation.inline.hpp"

  31 #include "memory/resourceArea.hpp"
  32 #include "memory/universe.hpp"



  33 #include "runtime/globals.hpp"






  34 
  35 jint EpsilonHeap::initialize() {
  36   size_t align = HeapAlignment;
  37   size_t init_byte_size = align_up(InitialHeapSize, align);
  38   size_t max_byte_size  = align_up(MaxHeapSize, align);
  39 
  40   // Initialize backing storage
  41   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, align);
  42   _virtual_space.initialize(heap_rs, init_byte_size);
  43 
  44   MemRegion committed_region((HeapWord*)_virtual_space.low(),          (HeapWord*)_virtual_space.high());
  45   MemRegion  reserved_region((HeapWord*)_virtual_space.low_boundary(), (HeapWord*)_virtual_space.high_boundary());
  46 
  47   initialize_reserved_region(reserved_region.start(), reserved_region.end());
  48 
  49   _space = new ContiguousSpace();
  50   _space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true);
  51 
  52   // Precompute hot fields
  53   _max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), align_object_size(EpsilonMaxTLABSize / HeapWordSize));
  54   _step_counter_update = MIN2<size_t>(max_byte_size / 16, EpsilonUpdateCountersStep);
  55   _step_heap_print = (EpsilonPrintHeapSteps == 0) ? SIZE_MAX : (max_byte_size / EpsilonPrintHeapSteps);
  56   _decay_time_ns = (int64_t) EpsilonTLABDecayTime * NANOSECS_PER_MILLISEC;
  57 
  58   // Enable monitoring
  59   _monitoring_support = new EpsilonMonitoringSupport(this);
  60   _last_counter_update = 0;
  61   _last_heap_print = 0;
  62 
  63   // Install barrier set
  64   BarrierSet::set_barrier_set(new EpsilonBarrierSet());
  65 













  66   // All done, print out the configuration
  67   if (init_byte_size != max_byte_size) {
  68     log_info(gc)("Resizeable heap; starting at " SIZE_FORMAT "M, max: " SIZE_FORMAT "M, step: " SIZE_FORMAT "M",
  69                  init_byte_size / M, max_byte_size / M, EpsilonMinHeapExpand / M);
  70   } else {
  71     log_info(gc)("Non-resizeable heap; start/max: " SIZE_FORMAT "M", init_byte_size / M);
  72   }
  73 
  74   if (UseTLAB) {
  75     log_info(gc)("Using TLAB allocation; max: " SIZE_FORMAT "K", _max_tlab_size * HeapWordSize / K);
  76     if (EpsilonElasticTLAB) {
  77       log_info(gc)("Elastic TLABs enabled; elasticity: %.2fx", EpsilonTLABElasticity);
  78     }
  79     if (EpsilonElasticTLABDecay) {
  80       log_info(gc)("Elastic TLABs decay enabled; decay time: " SIZE_FORMAT "ms", EpsilonTLABDecayTime);
  81     }
  82   } else {
  83     log_info(gc)("Not using TLAB allocation");
  84   }
  85 


 222   assert(min_size <= size,
 223          "Size honors min size: "  SIZE_FORMAT " <= " SIZE_FORMAT, min_size, size);
 224   assert(size <= _max_tlab_size,
 225          "Size honors max size: "  SIZE_FORMAT " <= " SIZE_FORMAT, size, _max_tlab_size);
 226   assert(size <= CollectedHeap::max_tlab_size(),
 227          "Size honors global max size: "  SIZE_FORMAT " <= " SIZE_FORMAT, size, CollectedHeap::max_tlab_size());
 228 
 229   if (log_is_enabled(Trace, gc)) {
 230     ResourceMark rm;
 231     log_trace(gc)("TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT
 232                           "K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K",
 233                   thread->name(),
 234                   requested_size * HeapWordSize / K,
 235                   min_size * HeapWordSize / K,
 236                   _max_tlab_size * HeapWordSize / K,
 237                   ergo_tlab * HeapWordSize / K,
 238                   size * HeapWordSize / K);
 239   }
 240 
 241   // All prepared, let's do it!
 242   HeapWord* res = allocate_work(size);
 243 
 244   if (res != NULL) {
 245     // Allocation successful
 246     *actual_size = size;
 247     if (EpsilonElasticTLABDecay) {
 248       EpsilonThreadLocalData::set_last_tlab_time(thread, time);
 249     }
 250     if (EpsilonElasticTLAB && !fits) {
 251       // If we requested expansion, this is our new ergonomic TLAB size
 252       EpsilonThreadLocalData::set_ergo_tlab_size(thread, size);
 253     }
 254   } else {
 255     // Allocation failed, reset ergonomics to try and fit smaller TLABs
 256     if (EpsilonElasticTLAB) {
 257       EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
 258     }
 259   }
 260 
 261   return res;
 262 }
 263 
 264 HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) {
 265   *gc_overhead_limit_was_exceeded = false;
 266   return allocate_work(size);
 267 }
 268 
 269 void EpsilonHeap::collect(GCCause::Cause cause) {
 270   switch (cause) {
 271     case GCCause::_metadata_GC_threshold:
 272     case GCCause::_metadata_GC_clear_soft_refs:
 273       // Receiving these causes means the VM itself entered the safepoint for metadata collection.
 274       // While Epsilon does not do GC, it has to perform sizing adjustments, otherwise we would
 275       // re-enter the safepoint again very soon.
 276 
 277       assert(SafepointSynchronize::is_at_safepoint(), "Expected at safepoint");
 278       log_info(gc)("GC request for \"%s\" is handled", GCCause::to_string(cause));
 279       MetaspaceGC::compute_new_size();
 280       print_metaspace_info();
 281       break;
 282     default:







 283       log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause));
 284   }

 285   _monitoring_support->update_counters();
 286 }
 287 
 288 void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) {
 289   collect(gc_cause());
 290 }
 291 
 292 void EpsilonHeap::safe_object_iterate(ObjectClosure *cl) {
 293   _space->safe_object_iterate(cl);
 294 }
 295 
 296 void EpsilonHeap::print_on(outputStream *st) const {
 297   st->print_cr("Epsilon Heap");
 298 
 299   // Cast away constness:
 300   ((VirtualSpace)_virtual_space).print_on(st);
 301 
 302   st->print_cr("Allocation space:");
 303   _space->print_on(st);
 304 


 326     log_info(gc)("Heap: no reliable data");
 327   }
 328 }
 329 
 330 void EpsilonHeap::print_metaspace_info() const {
 331   size_t reserved  = MetaspaceUtils::reserved_bytes();
 332   size_t committed = MetaspaceUtils::committed_bytes();
 333   size_t used      = MetaspaceUtils::used_bytes();
 334 
 335   if (reserved != 0) {
 336     log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, "
 337                             SIZE_FORMAT "%s (%.2f%%) used",
 338             byte_size_in_proper_unit(reserved),  proper_unit_for_byte_size(reserved),
 339             byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),
 340             committed * 100.0 / reserved,
 341             byte_size_in_proper_unit(used),      proper_unit_for_byte_size(used),
 342             used * 100.0 / reserved);
 343   } else {
 344     log_info(gc, metaspace)("Metaspace: no reliable data");
 345   }















































































































































































































































































































































































































































































 346 }
   1 /*
   2  * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "classfile/classLoaderDataGraph.hpp"
  26 #include "classfile/stringTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc/epsilon/epsilonHeap.hpp"
  30 #include "gc/epsilon/epsilonMemoryPool.hpp"
  31 #include "gc/epsilon/epsilonThreadLocalData.hpp"
  32 #include "gc/shared/gcArguments.hpp"
  33 #include "gc/shared/barrierSet.inline.hpp"
  34 #include "gc/shared/gcTraceTime.inline.hpp"
  35 #include "gc/shared/markBitMap.inline.hpp"
  36 #include "gc/shared/strongRootsScope.hpp"
  37 #include "gc/shared/preservedMarks.inline.hpp"
  38 #include "gc/shared/weakProcessor.hpp"
  39 #include "memory/allocation.inline.hpp"
  40 #include "memory/iterator.inline.hpp"
  41 #include "memory/resourceArea.hpp"
  42 #include "memory/universe.hpp"
  43 #include "oops/compressedOops.inline.hpp"
  44 #include "oops/markWord.inline.hpp"
  45 #include "runtime/biasedLocking.hpp"
  46 #include "runtime/globals.hpp"
  47 #include "runtime/objectMonitor.inline.hpp"
  48 #include "runtime/thread.hpp"
  49 #include "runtime/vmOperations.hpp"
  50 #include "runtime/vmThread.hpp"
  51 #include "utilities/stack.inline.hpp"
  52 #include "services/management.hpp"
  53 
  54 jint EpsilonHeap::initialize() {
  55   size_t align = HeapAlignment;
  56   size_t init_byte_size = align_up(InitialHeapSize, align);
  57   size_t max_byte_size  = align_up(MaxHeapSize, align);
  58 
  59   // Initialize backing storage
  60   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, align);
  61   _virtual_space.initialize(heap_rs, init_byte_size);
  62 
  63   MemRegion committed_region((HeapWord*)_virtual_space.low(),          (HeapWord*)_virtual_space.high());
  64   MemRegion  reserved_region((HeapWord*)_virtual_space.low_boundary(), (HeapWord*)_virtual_space.high_boundary());
  65 
  66   initialize_reserved_region(reserved_region.start(), reserved_region.end());
  67 
  68   _space = new ContiguousSpace();
  69   _space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true);
  70 
  71   // Precompute hot fields
  72   _max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), align_object_size(EpsilonMaxTLABSize / HeapWordSize));
  73   _step_counter_update = MIN2<size_t>(max_byte_size / 16, EpsilonUpdateCountersStep);
  74   _step_heap_print = (EpsilonPrintHeapSteps == 0) ? SIZE_MAX : (max_byte_size / EpsilonPrintHeapSteps);
  75   _decay_time_ns = (int64_t) EpsilonTLABDecayTime * NANOSECS_PER_MILLISEC;
  76 
  77   // Enable monitoring
  78   _monitoring_support = new EpsilonMonitoringSupport(this);
  79   _last_counter_update = 0;
  80   _last_heap_print = 0;
  81 
  82   // Install barrier set
  83   BarrierSet::set_barrier_set(new EpsilonBarrierSet());
  84 
  85   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
  86   size_t _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
  87   _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
  88 
  89   // Initialize marking bitmap, but not commit it yet
  90   if (EpsilonSlidingGC) {
  91     ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
  92     MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
  93     _bitmap_region = MemRegion((HeapWord *) bitmap.base(), bitmap.size() / HeapWordSize);
  94     MemRegion heap_region = MemRegion((HeapWord *) heap_rs.base(), heap_rs.size() / HeapWordSize);
  95     _bitmap.initialize(heap_region, _bitmap_region);
  96   }
  97 
  98   // All done, print out the configuration
  99   if (init_byte_size != max_byte_size) {
 100     log_info(gc)("Resizeable heap; starting at " SIZE_FORMAT "M, max: " SIZE_FORMAT "M, step: " SIZE_FORMAT "M",
 101                  init_byte_size / M, max_byte_size / M, EpsilonMinHeapExpand / M);
 102   } else {
 103     log_info(gc)("Non-resizeable heap; start/max: " SIZE_FORMAT "M", init_byte_size / M);
 104   }
 105 
 106   if (UseTLAB) {
 107     log_info(gc)("Using TLAB allocation; max: " SIZE_FORMAT "K", _max_tlab_size * HeapWordSize / K);
 108     if (EpsilonElasticTLAB) {
 109       log_info(gc)("Elastic TLABs enabled; elasticity: %.2fx", EpsilonTLABElasticity);
 110     }
 111     if (EpsilonElasticTLABDecay) {
 112       log_info(gc)("Elastic TLABs decay enabled; decay time: " SIZE_FORMAT "ms", EpsilonTLABDecayTime);
 113     }
 114   } else {
 115     log_info(gc)("Not using TLAB allocation");
 116   }
 117 


 254   assert(min_size <= size,
 255          "Size honors min size: "  SIZE_FORMAT " <= " SIZE_FORMAT, min_size, size);
 256   assert(size <= _max_tlab_size,
 257          "Size honors max size: "  SIZE_FORMAT " <= " SIZE_FORMAT, size, _max_tlab_size);
 258   assert(size <= CollectedHeap::max_tlab_size(),
 259          "Size honors global max size: "  SIZE_FORMAT " <= " SIZE_FORMAT, size, CollectedHeap::max_tlab_size());
 260 
 261   if (log_is_enabled(Trace, gc)) {
 262     ResourceMark rm;
 263     log_trace(gc)("TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT
 264                           "K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K",
 265                   thread->name(),
 266                   requested_size * HeapWordSize / K,
 267                   min_size * HeapWordSize / K,
 268                   _max_tlab_size * HeapWordSize / K,
 269                   ergo_tlab * HeapWordSize / K,
 270                   size * HeapWordSize / K);
 271   }
 272 
 273   // All prepared, let's do it!
 274   HeapWord* res = allocate_or_collect_work(size);
 275 
 276   if (res != NULL) {
 277     // Allocation successful
 278     *actual_size = size;
 279     if (EpsilonElasticTLABDecay) {
 280       EpsilonThreadLocalData::set_last_tlab_time(thread, time);
 281     }
 282     if (EpsilonElasticTLAB && !fits) {
 283       // If we requested expansion, this is our new ergonomic TLAB size
 284       EpsilonThreadLocalData::set_ergo_tlab_size(thread, size);
 285     }
 286   } else {
 287     // Allocation failed, reset ergonomics to try and fit smaller TLABs
 288     if (EpsilonElasticTLAB) {
 289       EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
 290     }
 291   }
 292 
 293   return res;
 294 }
 295 
 296 HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) {
 297   *gc_overhead_limit_was_exceeded = false;
 298   return allocate_or_collect_work(size);
 299 }
 300 
 301 void EpsilonHeap::collect(GCCause::Cause cause) {
 302   switch (cause) {
 303     case GCCause::_metadata_GC_threshold:
 304     case GCCause::_metadata_GC_clear_soft_refs:
 305       // Receiving these causes means the VM itself entered the safepoint for metadata collection.
 306       // While Epsilon does not do GC, it has to perform sizing adjustments, otherwise we would
 307       // re-enter the safepoint again very soon.
 308 
 309       assert(SafepointSynchronize::is_at_safepoint(), "Expected at safepoint");
 310       log_info(gc)("GC request for \"%s\" is handled", GCCause::to_string(cause));
 311       MetaspaceGC::compute_new_size();
 312       print_metaspace_info();
 313       break;
 314     default:
 315       if (EpsilonSlidingGC) {
 316         if (SafepointSynchronize::is_at_safepoint()) {
 317           entry_collect(cause);
 318         } else {
 319           vmentry_collect(cause);
 320         }
 321       } else {
 322         log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause));
 323       }
 324   }
 325   _monitoring_support->update_counters();
 326 }
 327 
 328 void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) {
 329   collect(gc_cause());
 330 }
 331 
 332 void EpsilonHeap::safe_object_iterate(ObjectClosure *cl) {
 333   _space->safe_object_iterate(cl);
 334 }
 335 
 336 void EpsilonHeap::print_on(outputStream *st) const {
 337   st->print_cr("Epsilon Heap");
 338 
 339   // Cast away constness:
 340   ((VirtualSpace)_virtual_space).print_on(st);
 341 
 342   st->print_cr("Allocation space:");
 343   _space->print_on(st);
 344 


 366     log_info(gc)("Heap: no reliable data");
 367   }
 368 }
 369 
 370 void EpsilonHeap::print_metaspace_info() const {
 371   size_t reserved  = MetaspaceUtils::reserved_bytes();
 372   size_t committed = MetaspaceUtils::committed_bytes();
 373   size_t used      = MetaspaceUtils::used_bytes();
 374 
 375   if (reserved != 0) {
 376     log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, "
 377                             SIZE_FORMAT "%s (%.2f%%) used",
 378             byte_size_in_proper_unit(reserved),  proper_unit_for_byte_size(reserved),
 379             byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),
 380             committed * 100.0 / reserved,
 381             byte_size_in_proper_unit(used),      proper_unit_for_byte_size(used),
 382             used * 100.0 / reserved);
 383   } else {
 384     log_info(gc, metaspace)("Metaspace: no reliable data");
 385   }
 386 }
 387 
 388 // ------------------ EXPERIMENTAL MARK-COMPACT -------------------------------
 389 //
 390 // This implements a trivial Lisp2-style sliding collector:
 391 //     https://en.wikipedia.org/wiki/Mark-compact_algorithm#LISP2_algorithm
 392 //
 393 // The goal for this implementation is to be as simple as possible, ignoring
 394 // non-trivial performance optimizations. This collector does not implement
 395 // reference processing: no soft/weak/phantom/finalizeable references are ever
 396 // cleared. It also does not implement class unloading and other runtime
 397 // cleanups.
 398 //
 399 
 400 // VM operation that executes collection cycle under safepoint
 401 class VM_EpsilonCollect: public VM_Operation {
 402 private:
 403   const GCCause::Cause _cause;
 404   EpsilonHeap* const _heap;
 405   static size_t _last_used;
 406 public:
 407   VM_EpsilonCollect(GCCause::Cause cause) : VM_Operation(),
 408                                             _cause(cause),
 409                                             _heap(EpsilonHeap::heap()) {};
 410 
 411   VM_Operation::VMOp_Type type() const { return VMOp_EpsilonCollect; }
 412   const char* name()             const { return "Epsilon Collection"; }
 413 
 414   virtual bool doit_prologue() {
 415     // Need to take the Heap lock before managing backing storage.
 416     // This also naturally serializes GC requests, and allows us to coalesce
 417     // back-to-back allocation failure requests from many threads. There is no
 418     // need to handle allocation failure that comes without allocations since
 419     // last complete GC. Waiting for 1% of heap allocated before starting next
 420     // GC seems to resolve most races.
 421     Heap_lock->lock();
 422     size_t used = _heap->used();
 423     size_t capacity = _heap->capacity();
 424     size_t allocated = used > _last_used ? used - _last_used : 0;
 425     if (_cause != GCCause::_allocation_failure || allocated > capacity / 100) {
 426       return true;
 427     } else {
 428       Heap_lock->unlock();
 429       return false;
 430     }
 431   }
 432 
 433   virtual void doit() {
 434     _heap->entry_collect(_cause);
 435   }
 436 
 437   virtual void doit_epilogue() {
 438     _last_used = _heap->used();
 439     Heap_lock->unlock();
 440   }
 441 };
 442 
 443 size_t VM_EpsilonCollect::_last_used = 0;
 444 
 445 void EpsilonHeap::vmentry_collect(GCCause::Cause cause) {
 446   VM_EpsilonCollect vmop(cause);
 447   VMThread::execute(&vmop);
 448 }
 449 
 450 HeapWord* EpsilonHeap::allocate_or_collect_work(size_t size) {
 451   HeapWord* res = allocate_work(size);
 452   if (res == NULL && EpsilonSlidingGC) {
 453     vmentry_collect(GCCause::_allocation_failure);
 454     res = allocate_work(size);
 455   }
 456   return res;
 457 }
 458 
 459 typedef Stack<oop, mtGC> EpsilonMarkStack;
 460 
 461 void EpsilonHeap::do_roots(OopClosure* cl, bool everything) {
 462   // Need to tell runtime we are about to walk the roots with 1 thread
 463   StrongRootsScope scope(1);
 464 
 465   // Need to adapt oop closure for some special root types.
 466   CLDToOopClosure clds(cl, ClassLoaderData::_claim_none);
 467   MarkingCodeBlobClosure blobs(cl, CodeBlobToOopClosure::FixRelocations);
 468 
 469   // Walk all these different parts of runtime roots. Some roots require
 470   // holding the lock when walking them.
 471   {
 472     MutexLocker lock(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 473     CodeCache::blobs_do(&blobs);
 474   }
 475   {
 476     MutexLocker lock(ClassLoaderDataGraph_lock);
 477     ClassLoaderDataGraph::cld_do(&clds);
 478   }
 479   Universe::oops_do(cl);
 480   Management::oops_do(cl);
 481   JvmtiExport::oops_do(cl);
 482   JNIHandles::oops_do(cl);
 483   WeakProcessor::oops_do(cl);
 484   ObjectSynchronizer::oops_do(cl);
 485   SystemDictionary::oops_do(cl);
 486   Threads::possibly_parallel_oops_do(false, cl, &blobs);
 487 
 488   // This is implicitly handled by other roots, and we only want to
 489   // touch these during verification.
 490   if (everything) {
 491     StringTable::oops_do(cl);
 492   }
 493 }
 494 
 495 // Walk the marking bitmap and call object closure on every marked object.
 496 // This is much faster that walking a (very sparse) parsable heap, but it
 497 // takes up to 1/64-th of heap size for the bitmap.
 498 void EpsilonHeap::walk_bitmap(ObjectClosure* cl) {
 499    HeapWord* limit = _space->top();
 500    HeapWord* addr = _bitmap.get_next_marked_addr(_space->bottom(), limit);
 501    while (addr < limit) {
 502      oop obj = oop(addr);
 503      assert(_bitmap.is_marked(obj), "sanity");
 504      cl->do_object(obj);
 505      addr += 1;
 506      if (addr < limit) {
 507        addr = _bitmap.get_next_marked_addr(addr, limit);
 508      }
 509    }
 510 }
 511 
 512 class EpsilonScanOopClosure : public BasicOopIterateClosure {
 513 private:
 514   EpsilonMarkStack* const _stack;
 515   MarkBitMap* const _bitmap;
 516 
 517   template <class T>
 518   void do_oop_work(T* p) {
 519     // p is the pointer to memory location where oop is, load the value
 520     // from it, unpack the compressed reference, if needed:
 521     T o = RawAccess<>::oop_load(p);
 522     if (!CompressedOops::is_null(o)) {
 523       oop obj = CompressedOops::decode_not_null(o);
 524 
 525       // Object is discovered. See if it is marked already. If not,
 526       // mark and push it on mark stack for further traversal. Non-atomic
 527       // check and set would do, as this closure is called by single thread.
 528       if (!_bitmap->is_marked(obj)) {
 529         _bitmap->mark((HeapWord*)obj);
 530         _stack->push(obj);
 531       }
 532     }
 533   }
 534 
 535 public:
 536   EpsilonScanOopClosure(EpsilonMarkStack* stack, MarkBitMap* bitmap) :
 537                         _stack(stack), _bitmap(bitmap) {}
 538   virtual void do_oop(oop* p)       { do_oop_work(p); }
 539   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
 540 };
 541 
 542 class EpsilonCalcNewLocationObjectClosure : public ObjectClosure {
 543 private:
 544   HeapWord* _compact_point;
 545   PreservedMarks* const _preserved_marks;
 546 
 547 public:
 548   EpsilonCalcNewLocationObjectClosure(HeapWord* start, PreservedMarks* pm) :
 549                                       _compact_point(start),
 550                                       _preserved_marks(pm) {}
 551 
 552   void do_object(oop obj) {
 553     // Record the new location of the object: it is current compaction point.
 554     // If object stays at the same location (which is true for objects in
 555     // dense prefix, that we would normally get), do not bother recording the
 556     // move, letting downstream code ignore it.
 557     if ((HeapWord*)obj != _compact_point) {
 558       markWord mark = obj->mark_raw();
 559       if (mark.must_be_preserved(obj->klass())) {
 560         _preserved_marks->push(obj, mark);
 561       }
 562       obj->forward_to(oop(_compact_point));
 563     }
 564     _compact_point += obj->size();
 565   }
 566 
 567   HeapWord* compact_point() {
 568     return _compact_point;
 569   }
 570 };
 571 
 572 class EpsilonAdjustPointersOopClosure : public BasicOopIterateClosure {
 573 private:
 574   template <class T>
 575   void do_oop_work(T* p) {
 576     // p is the pointer to memory location where oop is, load the value
 577     // from it, unpack the compressed reference, if needed:
 578     T o = RawAccess<>::oop_load(p);
 579     if (!CompressedOops::is_null(o)) {
 580       oop obj = CompressedOops::decode_not_null(o);
 581 
 582       // Rewrite the current pointer to the object with its forwardee.
 583       // Skip the write if update is not needed.
 584       if (obj->is_forwarded()) {
 585         oop fwd = obj->forwardee();
 586         assert(fwd != NULL, "just checking");
 587         RawAccess<>::oop_store(p, fwd);
 588       }
 589     }
 590   }
 591 
 592 public:
 593   virtual void do_oop(oop* p)       { do_oop_work(p); }
 594   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
 595 };
 596 
 597 class EpsilonAdjustPointersObjectClosure : public ObjectClosure {
 598 private:
 599   EpsilonAdjustPointersOopClosure _cl;
 600 public:
 601   void do_object(oop obj) {
 602     // Apply the updates to all references reachable from current object:
 603     obj->oop_iterate(&_cl);
 604   }
 605 };
 606 
 607 class EpsilonMoveObjectsObjectClosure : public ObjectClosure {
 608 private:
 609   size_t _moved;
 610 public:
 611   EpsilonMoveObjectsObjectClosure() : ObjectClosure(), _moved(0) {}
 612 
 613   void do_object(oop obj) {
 614     // Copy the object to its new location, if needed. This is final step,
 615     // so we have to re-initialize its new mark word, dropping the forwardee
 616     // data from it.
 617     if (obj->is_forwarded()) {
 618       oop fwd = obj->forwardee();
 619       assert(fwd != NULL, "just checking");
 620       Copy::aligned_conjoint_words((HeapWord*)obj, (HeapWord*)fwd, obj->size());
 621       fwd->init_mark_raw();
 622       _moved++;
 623     }
 624   }
 625 
 626   size_t moved() {
 627     return _moved;
 628   }
 629 };
 630 
 631 class EpsilonVerifyOopClosure : public BasicOopIterateClosure {
 632 private:
 633   EpsilonHeap* const _heap;
 634   EpsilonMarkStack* const _stack;
 635   MarkBitMap* const _bitmap;
 636 
 637   template <class T>
 638   void do_oop_work(T* p) {
 639     T o = RawAccess<>::oop_load(p);
 640     if (!CompressedOops::is_null(o)) {
 641       oop obj = CompressedOops::decode_not_null(o);
 642       if (!_bitmap->is_marked(obj)) {
 643         _bitmap->mark((HeapWord*)obj);
 644 
 645         guarantee(_heap->is_in(obj),        "Is in heap: "   PTR_FORMAT, p2i(obj));
 646         guarantee(oopDesc::is_oop(obj),     "Is an object: " PTR_FORMAT, p2i(obj));
 647         guarantee(!obj->mark().is_marked(), "Mark is gone: " PTR_FORMAT, p2i(obj));
 648 
 649         _stack->push(obj);
 650       }
 651     }
 652   }
 653 
 654 public:
 655   EpsilonVerifyOopClosure(EpsilonMarkStack* stack, MarkBitMap* bitmap) :
 656     _heap(EpsilonHeap::heap()), _stack(stack), _bitmap(bitmap) {}
 657   virtual void do_oop(oop* p)       { do_oop_work(p); }
 658   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
 659 };
 660 
 661 void EpsilonHeap::entry_collect(GCCause::Cause cause) {
 662   GCIdMark mark;
 663   GCTraceTime(Info, gc) time("Lisp2-style Mark-Compact", NULL, cause, true);
 664 
 665   // Some statistics, for fun and profit:
 666   size_t stat_reachable_roots = 0;
 667   size_t stat_reachable_heap = 0;
 668   size_t stat_moved = 0;
 669   size_t stat_preserved_marks = 0;
 670 
 671   {
 672     GCTraceTime(Info, gc) time("Step 0: Prologue", NULL);
 673 
 674     // Commit marking bitmap memory. There are several upsides of doing this
 675     // before the cycle: no memory is taken if GC is not happening, the memory
 676     // is "cleared" on first touch, and untouched parts of bitmap are mapped
 677     // to zero page, boosting performance on sparse heaps.
 678     if (!os::commit_memory((char*)_bitmap_region.start(), _bitmap_region.byte_size(), false)) {
 679       log_warning(gc)("Could not commit native memory for marking bitmap, GC failed");
 680       return;
 681     }
 682 
 683     // We do not need parsable heap for this algorithm to work, but we want
 684     // threads to give up their TLABs.
 685     ensure_parsability(true);
 686 
 687     // Tell various parts of runtime we are doing GC.
 688     BiasedLocking::preserve_marks();
 689 
 690     // Derived pointers would be re-discovered during the mark.
 691     // Clear and activate the table for them.
 692     DerivedPointerTable::clear();
 693   }
 694 
 695   {
 696     GCTraceTime(Info, gc) time("Step 1: Mark", NULL);
 697 
 698     // Marking stack and the closure that does most of the work. The closure
 699     // would scan the outgoing references, mark them, and push newly-marked
 700     // objects to stack for further processing.
 701     EpsilonMarkStack stack;
 702     EpsilonScanOopClosure cl(&stack, &_bitmap);
 703 
 704     // Seed the marking with roots.
 705     process_roots(&cl);
 706     stat_reachable_roots = stack.size();
 707 
 708     // Scan the rest of the heap until we run out of objects. Termination is
 709     // guaranteed, because all reachable objects would be marked eventually.
 710     while (!stack.is_empty()) {
 711       oop obj = stack.pop();
 712       obj->oop_iterate(&cl);
 713       stat_reachable_heap++;
 714     }
 715 
 716     // No more derived pointers discovered after marking is done.
 717     DerivedPointerTable::set_active(false);
 718   }
 719 
 720   // We are going to store forwarding information (where the new copy resides)
 721   // in mark words. Some of those mark words need to be carefully preserved.
 722   // This is an utility that maintains the list of those special mark words.
 723   PreservedMarks preserved_marks;
 724 
 725   // New top of the allocated space.
 726   HeapWord* new_top;
 727 
 728   {
 729     GCTraceTime(Info, gc) time("Step 2: Calculate new locations", NULL);
 730 
 731     // Walk all alive objects, compute their new addresses and store those
 732     // addresses in mark words. Optionally preserve some marks.
 733     EpsilonCalcNewLocationObjectClosure cl(_space->bottom(), &preserved_marks);
 734     walk_bitmap(&cl);
 735 
 736     // After addresses are calculated, we know the new top for the allocated
 737     // space. We cannot set it just yet, because some asserts check that objects
 738     // are "in heap" based on current "top".
 739     new_top = cl.compact_point();
 740 
 741     stat_preserved_marks = preserved_marks.size();
 742   }
 743 
 744   {
 745     GCTraceTime(Info, gc) time("Step 3: Adjust pointers", NULL);
 746 
 747     // Walk all alive objects _and their reference fields_, and put "new
 748     // addresses" there. We know the new addresses from the forwarding data
 749     // in mark words. Take care of the heap objects first.
 750     EpsilonAdjustPointersObjectClosure cl;
 751     walk_bitmap(&cl);
 752 
 753     // Now do the same, but for all VM roots, which reference the objects on
 754     // their own: their references should also be updated.
 755     EpsilonAdjustPointersOopClosure cli;
 756     process_roots(&cli);
 757 
 758     // Finally, make sure preserved marks know the objects are about to move.
 759     preserved_marks.adjust_during_full_gc();
 760   }
 761 
 762   {
 763     GCTraceTime(Info, gc) time("Step 4: Move objects", NULL);
 764 
 765     // Move all alive objects to their new locations. All the references are
 766     // already adjusted at previous step.
 767     EpsilonMoveObjectsObjectClosure cl;
 768     walk_bitmap(&cl);
 769     stat_moved = cl.moved();
 770 
 771     // Now we moved all objects to their relevant locations, we can retract
 772     // the "top" of the allocation space to the end of the compacted prefix.
 773     _space->set_top(new_top);
 774   }
 775 
 776   {
 777     GCTraceTime(Info, gc) time("Step 5: Epilogue", NULL);
 778 
 779     // Restore all special mark words.
 780     preserved_marks.restore();
 781 
 782     // Tell the rest of runtime we have finished the GC.
 783     DerivedPointerTable::update_pointers();
 784     BiasedLocking::restore_marks();
 785 
 786     // Verification code walks entire heap and verifies nothing is broken.
 787     if (EpsilonVerify) {
 788       // The basic implementation turns heap into entirely parsable one with
 789       // only alive objects, which mean we could just walked the heap object
 790       // by object and verify it. But, it would be inconvenient for verification
 791       // to assume heap has only alive objects. Any future change that leaves
 792       // at least one dead object with dead outgoing references would fail the
 793       // verification. Therefore, it makes more sense to mark through the heap
 794       // again, not assuming objects are all alive.
 795       EpsilonMarkStack stack;
 796       EpsilonVerifyOopClosure cl(&stack, &_bitmap);
 797 
 798       _bitmap.clear();
 799 
 800       // Verify all roots are correct, and that we have the same number of
 801       // object reachable from roots.
 802       process_all_roots(&cl);
 803 
 804       size_t verified_roots = stack.size();
 805       guarantee(verified_roots == stat_reachable_roots,
 806                 "Verification discovered " SIZE_FORMAT " roots out of " SIZE_FORMAT,
 807                 verified_roots, stat_reachable_roots);
 808 
 809       // Verify the rest of the heap is correct, and that we have the same
 810       // number of objects reachable from heap.
 811       size_t verified_heap = 0;
 812       while (!stack.is_empty()) {
 813         oop obj = stack.pop();
 814         obj->oop_iterate(&cl);
 815         verified_heap++;
 816       }
 817 
 818       guarantee(verified_heap == stat_reachable_heap,
 819                 "Verification discovered " SIZE_FORMAT " heap objects out of " SIZE_FORMAT,
 820                 verified_heap, stat_reachable_heap);
 821 
 822       // Ask parts of runtime to verify themselves too
 823       Universe::verify(VerifyOption_Default, "");
 824     }
 825 
 826     // Marking bitmap is not needed anymore
 827     if (!os::uncommit_memory((char*)_bitmap_region.start(), _bitmap_region.byte_size())) {
 828       log_warning(gc)("Could not uncommit native memory for marking bitmap");
 829     }
 830 
 831     // Return all memory back if so requested. On large heaps, this would
 832     // take a while.
 833     if (EpsilonUncommit) {
 834       _virtual_space.shrink_by((_space->end() - new_top) * HeapWordSize);
 835       _space->set_end((HeapWord*)_virtual_space.high());
 836     }
 837   }
 838 
 839   size_t stat_reachable = stat_reachable_roots + stat_reachable_heap;
 840   log_info(gc)("GC Stats: " SIZE_FORMAT " (%.2f%%) reachable from roots, " SIZE_FORMAT " (%.2f%%) reachable from heap, "
 841                SIZE_FORMAT " (%.2f%%) moved, " SIZE_FORMAT " (%.2f%%) markwords preserved",
 842                stat_reachable_roots, 100.0 * stat_reachable_roots / stat_reachable,
 843                stat_reachable_heap,  100.0 * stat_reachable_heap  / stat_reachable,
 844                stat_moved,           100.0 * stat_moved           / stat_reachable,
 845                stat_preserved_marks, 100.0 * stat_preserved_marks / stat_reachable);
 846 
 847   print_heap_info(used());
 848   print_metaspace_info();
 849 }
< prev index next >