1 /*
   2  * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #ifndef SHARE_GC_EPSILON_EPSILONHEAP_HPP
  25 #define SHARE_GC_EPSILON_EPSILONHEAP_HPP
  26 
  27 #include "gc/shared/collectedHeap.hpp"
  28 #include "gc/shared/markBitMap.hpp"
  29 #include "gc/shared/softRefPolicy.hpp"
  30 #include "gc/shared/space.hpp"
  31 #include "gc/epsilon/epsilonMonitoringSupport.hpp"
  32 #include "gc/epsilon/epsilonBarrierSet.hpp"
  33 #include "services/memoryManager.hpp"
  34 
  35 class EpsilonHeap : public CollectedHeap {
  36   friend class VMStructs;
  37 private:
  38   SoftRefPolicy _soft_ref_policy;
  39   EpsilonMonitoringSupport* _monitoring_support;
  40   MemoryPool* _pool;
  41   GCMemoryManager _memory_manager;
  42   ContiguousSpace* _space;
  43   VirtualSpace _virtual_space;
  44   size_t _max_tlab_size;
  45   size_t _step_counter_update;
  46   size_t _step_heap_print;
  47   int64_t _decay_time_ns;
  48   volatile size_t _last_counter_update;
  49   volatile size_t _last_heap_print;
  50   MemRegion  _bitmap_region;
  51   MarkBitMap _bitmap;
  52 
  53 public:
  54   static EpsilonHeap* heap();
  55 
  56   EpsilonHeap() :
  57           _memory_manager("Epsilon Heap", "") {};
  58 
  59   virtual Name kind() const {
  60     return CollectedHeap::Epsilon;
  61   }
  62 
  63   virtual const char* name() const {
  64     return "Epsilon";
  65   }
  66 
  67   virtual SoftRefPolicy* soft_ref_policy() {
  68     return &_soft_ref_policy;
  69   }
  70 
  71   virtual jint initialize();
  72   virtual void post_initialize();
  73   virtual void initialize_serviceability();
  74 
  75   virtual GrowableArray<GCMemoryManager*> memory_managers();
  76   virtual GrowableArray<MemoryPool*> memory_pools();
  77 
  78   virtual size_t max_capacity() const { return _virtual_space.reserved_size();  }
  79   virtual size_t capacity()     const { return _virtual_space.committed_size(); }
  80   virtual size_t used()         const { return _space->used(); }
  81 
  82   virtual bool is_in(const void* p) const {
  83     return _space->is_in(p);
  84   }
  85 
  86   virtual bool is_maximal_no_gc() const {
  87     // No GC is going to happen. Return "we are at max", when we are about to fail.
  88     return used() == capacity();
  89   }
  90 
  91   // Allocation
  92   HeapWord* allocate_work(size_t size);
  93   HeapWord* allocate_or_collect_work(size_t size);
  94   virtual HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
  95   virtual HeapWord* allocate_new_tlab(size_t min_size,
  96                                       size_t requested_size,
  97                                       size_t* actual_size);
  98 
  99   // TLAB allocation
 100   virtual bool supports_tlab_allocation()           const { return true;           }
 101   virtual size_t tlab_capacity(Thread* thr)         const { return capacity();     }
 102   virtual size_t tlab_used(Thread* thr)             const { return used();         }
 103   virtual size_t max_tlab_size()                    const { return _max_tlab_size; }
 104   virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
 105 
 106   virtual void collect(GCCause::Cause cause);
 107   virtual void do_full_collection(bool clear_all_soft_refs);
 108 
 109   // Heap walking support
 110   virtual void object_iterate(ObjectClosure* cl);
 111 
 112   // Object pinning support: every object is implicitly pinned
 113   // Or is it... (evil laugh)
 114   virtual bool supports_object_pinning() const           { return !EpsilonSlidingGC; }
 115   virtual oop pin_object(JavaThread* thread, oop obj)    { return obj; }
 116   virtual void unpin_object(JavaThread* thread, oop obj) { }
 117 
 118   // No support for block parsing.
 119   HeapWord* block_start(const void* addr) const { return NULL;  }
 120   bool block_is_obj(const HeapWord* addr) const { return false; }
 121 
 122   // No GC threads
 123   virtual void print_gc_threads_on(outputStream* st) const {}
 124   virtual void gc_threads_do(ThreadClosure* tc) const {}
 125 
 126   // No nmethod handling
 127   virtual void register_nmethod(nmethod* nm) {}
 128   virtual void unregister_nmethod(nmethod* nm) {}
 129   virtual void flush_nmethod(nmethod* nm) {}
 130   virtual void verify_nmethod(nmethod* nm) {}
 131 
 132   // No heap verification
 133   virtual void prepare_for_verify() {}
 134   virtual void verify(VerifyOption option) {}
 135 
 136   virtual jlong millis_since_last_gc() {
 137     // Report time since the VM start
 138     return os::elapsed_counter() / NANOSECS_PER_MILLISEC;
 139   }
 140 
 141   MemRegion reserved_region() const { return _reserved; }
 142   bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }
 143 
 144   virtual void print_on(outputStream* st) const;
 145   virtual void print_tracing_info() const;
 146   virtual bool print_location(outputStream* st, void* addr) const;
 147 
 148   void entry_collect(GCCause::Cause cause);
 149 
 150 private:
 151   void print_heap_info(size_t used) const;
 152   void print_metaspace_info() const;
 153 
 154   void vmentry_collect(GCCause::Cause cause);
 155 
 156   void do_roots(OopClosure* cl, bool everything);
 157   void process_roots(OopClosure* cl)     { do_roots(cl, false); }
 158   void process_all_roots(OopClosure* cl) { do_roots(cl, true);  }
 159   void walk_bitmap(ObjectClosure* cl);
 160 
 161 };
 162 
 163 #endif // SHARE_GC_EPSILON_EPSILONHEAP_HPP