< prev index next >

src/hotspot/share/gc/epsilon/epsilonHeap.hpp

Print this page
rev 57357 : Epsilon, Sliding Mark-Compact


   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #ifndef SHARE_GC_EPSILON_EPSILONHEAP_HPP
  25 #define SHARE_GC_EPSILON_EPSILONHEAP_HPP
  26 
  27 #include "gc/shared/collectedHeap.hpp"

  28 #include "gc/shared/softRefPolicy.hpp"
  29 #include "gc/shared/space.hpp"
  30 #include "gc/epsilon/epsilonMonitoringSupport.hpp"
  31 #include "gc/epsilon/epsilonBarrierSet.hpp"
  32 #include "services/memoryManager.hpp"
  33 
  34 class EpsilonHeap : public CollectedHeap {
  35   friend class VMStructs;
  36 private:
  37   SoftRefPolicy _soft_ref_policy;
  38   EpsilonMonitoringSupport* _monitoring_support;
  39   MemoryPool* _pool;
  40   GCMemoryManager _memory_manager;
  41   ContiguousSpace* _space;
  42   VirtualSpace _virtual_space;
  43   size_t _max_tlab_size;
  44   size_t _step_counter_update;
  45   size_t _step_heap_print;
  46   int64_t _decay_time_ns;
  47   volatile size_t _last_counter_update;
  48   volatile size_t _last_heap_print;


  49 
  50 public:
  51   static EpsilonHeap* heap();
  52 
  53   EpsilonHeap() :
  54           _memory_manager("Epsilon Heap", "") {};
  55 
  56   virtual Name kind() const {
  57     return CollectedHeap::Epsilon;
  58   }
  59 
  60   virtual const char* name() const {
  61     return "Epsilon";
  62   }
  63 
  64   virtual SoftRefPolicy* soft_ref_policy() {
  65     return &_soft_ref_policy;
  66   }
  67 
  68   virtual jint initialize();


  70   virtual void initialize_serviceability();
  71 
  72   virtual GrowableArray<GCMemoryManager*> memory_managers();
  73   virtual GrowableArray<MemoryPool*> memory_pools();
  74 
  75   virtual size_t max_capacity() const { return _virtual_space.reserved_size();  }
  76   virtual size_t capacity()     const { return _virtual_space.committed_size(); }
  77   virtual size_t used()         const { return _space->used(); }
  78 
  79   virtual bool is_in(const void* p) const {
  80     return _space->is_in(p);
  81   }
  82 
  83   virtual bool is_maximal_no_gc() const {
  84     // No GC is going to happen. Return "we are at max", when we are about to fail.
  85     return used() == capacity();
  86   }
  87 
  88   // Allocation
  89   HeapWord* allocate_work(size_t size);

  90   virtual HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
  91   virtual HeapWord* allocate_new_tlab(size_t min_size,
  92                                       size_t requested_size,
  93                                       size_t* actual_size);
  94 
  95   // TLAB allocation
  96   virtual bool supports_tlab_allocation()           const { return true;           }
  97   virtual size_t tlab_capacity(Thread* thr)         const { return capacity();     }
  98   virtual size_t tlab_used(Thread* thr)             const { return used();         }
  99   virtual size_t max_tlab_size()                    const { return _max_tlab_size; }
 100   virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
 101 
 102   virtual void collect(GCCause::Cause cause);
 103   virtual void do_full_collection(bool clear_all_soft_refs);
 104 
 105   // Heap walking support
 106   virtual void object_iterate(ObjectClosure* cl);
 107 
 108   // Object pinning support: every object is implicitly pinned
 109   virtual bool supports_object_pinning() const           { return true; }

 110   virtual oop pin_object(JavaThread* thread, oop obj)    { return obj; }
 111   virtual void unpin_object(JavaThread* thread, oop obj) { }
 112 
 113   // No support for block parsing.
 114   HeapWord* block_start(const void* addr) const { return NULL;  }
 115   bool block_is_obj(const HeapWord* addr) const { return false; }
 116 
 117   // No GC threads
 118   virtual void print_gc_threads_on(outputStream* st) const {}
 119   virtual void gc_threads_do(ThreadClosure* tc) const {}
 120 
 121   // No nmethod handling
 122   virtual void register_nmethod(nmethod* nm) {}
 123   virtual void unregister_nmethod(nmethod* nm) {}
 124   virtual void flush_nmethod(nmethod* nm) {}
 125   virtual void verify_nmethod(nmethod* nm) {}
 126 
 127   // No heap verification
 128   virtual void prepare_for_verify() {}
 129   virtual void verify(VerifyOption option) {}
 130 
 131   virtual jlong millis_since_last_gc() {
 132     // Report time since the VM start
 133     return os::elapsed_counter() / NANOSECS_PER_MILLISEC;
 134   }
 135 
 136   MemRegion reserved_region() const { return _reserved; }
 137   bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }
 138 
 139   virtual void print_on(outputStream* st) const;
 140   virtual void print_tracing_info() const;
 141   virtual bool print_location(outputStream* st, void* addr) const;
 142 


 143 private:
 144   void print_heap_info(size_t used) const;
 145   void print_metaspace_info() const;







 146 
 147 };
 148 
 149 #endif // SHARE_GC_EPSILON_EPSILONHEAP_HPP


   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #ifndef SHARE_GC_EPSILON_EPSILONHEAP_HPP
  25 #define SHARE_GC_EPSILON_EPSILONHEAP_HPP
  26 
  27 #include "gc/shared/collectedHeap.hpp"
  28 #include "gc/shared/markBitMap.hpp"
  29 #include "gc/shared/softRefPolicy.hpp"
  30 #include "gc/shared/space.hpp"
  31 #include "gc/epsilon/epsilonMonitoringSupport.hpp"
  32 #include "gc/epsilon/epsilonBarrierSet.hpp"
  33 #include "services/memoryManager.hpp"
  34 
  35 class EpsilonHeap : public CollectedHeap {
  36   friend class VMStructs;
  37 private:
  38   SoftRefPolicy _soft_ref_policy;
  39   EpsilonMonitoringSupport* _monitoring_support;
  40   MemoryPool* _pool;
  41   GCMemoryManager _memory_manager;
  42   ContiguousSpace* _space;
  43   VirtualSpace _virtual_space;
  44   size_t _max_tlab_size;
  45   size_t _step_counter_update;
  46   size_t _step_heap_print;
  47   int64_t _decay_time_ns;
  48   volatile size_t _last_counter_update;
  49   volatile size_t _last_heap_print;
  50   MemRegion  _bitmap_region;
  51   MarkBitMap _bitmap;
  52 
  53 public:
  54   static EpsilonHeap* heap();
  55 
  56   EpsilonHeap() :
  57           _memory_manager("Epsilon Heap", "") {};
  58 
  59   virtual Name kind() const {
  60     return CollectedHeap::Epsilon;
  61   }
  62 
  63   virtual const char* name() const {
  64     return "Epsilon";
  65   }
  66 
  67   virtual SoftRefPolicy* soft_ref_policy() {
  68     return &_soft_ref_policy;
  69   }
  70 
  71   virtual jint initialize();


  73   virtual void initialize_serviceability();
  74 
  75   virtual GrowableArray<GCMemoryManager*> memory_managers();
  76   virtual GrowableArray<MemoryPool*> memory_pools();
  77 
  78   virtual size_t max_capacity() const { return _virtual_space.reserved_size();  }
  79   virtual size_t capacity()     const { return _virtual_space.committed_size(); }
  80   virtual size_t used()         const { return _space->used(); }
  81 
  82   virtual bool is_in(const void* p) const {
  83     return _space->is_in(p);
  84   }
  85 
  86   virtual bool is_maximal_no_gc() const {
  87     // No GC is going to happen. Return "we are at max", when we are about to fail.
  88     return used() == capacity();
  89   }
  90 
  91   // Allocation
  92   HeapWord* allocate_work(size_t size);
  93   HeapWord* allocate_or_collect_work(size_t size);
  94   virtual HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
  95   virtual HeapWord* allocate_new_tlab(size_t min_size,
  96                                       size_t requested_size,
  97                                       size_t* actual_size);
  98 
  99   // TLAB allocation
 100   virtual bool supports_tlab_allocation()           const { return true;           }
 101   virtual size_t tlab_capacity(Thread* thr)         const { return capacity();     }
 102   virtual size_t tlab_used(Thread* thr)             const { return used();         }
 103   virtual size_t max_tlab_size()                    const { return _max_tlab_size; }
 104   virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
 105 
 106   virtual void collect(GCCause::Cause cause);
 107   virtual void do_full_collection(bool clear_all_soft_refs);
 108 
 109   // Heap walking support
 110   virtual void object_iterate(ObjectClosure* cl);
 111 
 112   // Object pinning support: every object is implicitly pinned
 113   // Or is it... (evil laugh)
 114   virtual bool supports_object_pinning() const           { return !EpsilonSlidingGC; }
 115   virtual oop pin_object(JavaThread* thread, oop obj)    { return obj; }
 116   virtual void unpin_object(JavaThread* thread, oop obj) { }
 117 
 118   // No support for block parsing.
 119   HeapWord* block_start(const void* addr) const { return NULL;  }
 120   bool block_is_obj(const HeapWord* addr) const { return false; }
 121 
 122   // No GC threads
 123   virtual void print_gc_threads_on(outputStream* st) const {}
 124   virtual void gc_threads_do(ThreadClosure* tc) const {}
 125 
 126   // No nmethod handling
 127   virtual void register_nmethod(nmethod* nm) {}
 128   virtual void unregister_nmethod(nmethod* nm) {}
 129   virtual void flush_nmethod(nmethod* nm) {}
 130   virtual void verify_nmethod(nmethod* nm) {}
 131 
 132   // No heap verification
 133   virtual void prepare_for_verify() {}
 134   virtual void verify(VerifyOption option) {}
 135 
 136   virtual jlong millis_since_last_gc() {
 137     // Report time since the VM start
 138     return os::elapsed_counter() / NANOSECS_PER_MILLISEC;
 139   }
 140 
 141   MemRegion reserved_region() const { return _reserved; }
 142   bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }
 143 
 144   virtual void print_on(outputStream* st) const;
 145   virtual void print_tracing_info() const;
 146   virtual bool print_location(outputStream* st, void* addr) const;
 147 
 148   void entry_collect(GCCause::Cause cause);
 149 
 150 private:
 151   void print_heap_info(size_t used) const;
 152   void print_metaspace_info() const;
 153 
 154   void vmentry_collect(GCCause::Cause cause);
 155 
 156   void do_roots(OopClosure* cl, bool everything);
 157   void process_roots(OopClosure* cl)     { do_roots(cl, false); }
 158   void process_all_roots(OopClosure* cl) { do_roots(cl, true);  }
 159   void walk_bitmap(ObjectClosure* cl);
 160 
 161 };
 162 
 163 #endif // SHARE_GC_EPSILON_EPSILONHEAP_HPP
< prev index next >