1 /*
2 * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
3 *
4 * This code is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 only, as
6 * published by the Free Software Foundation.
7 *
8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "gc/epsilon/epsilonHeap.hpp"
26 #include "gc/epsilon/epsilonMemoryPool.hpp"
27 #include "gc/epsilon/epsilonThreadLocalData.hpp"
28 #include "gc/shared/gcArguments.hpp"
29 #include "gc/shared/locationPrinter.inline.hpp"
30 #include "memory/allocation.hpp"
31 #include "memory/allocation.inline.hpp"
32 #include "memory/resourceArea.hpp"
33 #include "memory/universe.hpp"
34 #include "runtime/atomic.hpp"
35 #include "runtime/globals.hpp"
36
37 jint EpsilonHeap::initialize() {
38 size_t align = HeapAlignment;
39 size_t init_byte_size = align_up(InitialHeapSize, align);
40 size_t max_byte_size = align_up(MaxHeapSize, align);
41
42 // Initialize backing storage
43 ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, align);
44 _virtual_space.initialize(heap_rs, init_byte_size);
45
46 MemRegion committed_region((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high());
47 MemRegion reserved_region((HeapWord*)_virtual_space.low_boundary(), (HeapWord*)_virtual_space.high_boundary());
48
49 initialize_reserved_region(heap_rs);
50
51 _space = new ContiguousSpace();
52 _space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true);
53
54 // Precompute hot fields
55 _max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), align_object_size(EpsilonMaxTLABSize / HeapWordSize));
56 _step_counter_update = MIN2<size_t>(max_byte_size / 16, EpsilonUpdateCountersStep);
57 _step_heap_print = (EpsilonPrintHeapSteps == 0) ? SIZE_MAX : (max_byte_size / EpsilonPrintHeapSteps);
58 _decay_time_ns = (int64_t) EpsilonTLABDecayTime * NANOSECS_PER_MILLISEC;
59
60 // Enable monitoring
61 _monitoring_support = new EpsilonMonitoringSupport(this);
62 _last_counter_update = 0;
63 _last_heap_print = 0;
64
65 // Install barrier set
66 BarrierSet::set_barrier_set(new EpsilonBarrierSet());
67
68 // All done, print out the configuration
69 if (init_byte_size != max_byte_size) {
70 log_info(gc)("Resizeable heap; starting at " SIZE_FORMAT "M, max: " SIZE_FORMAT "M, step: " SIZE_FORMAT "M",
71 init_byte_size / M, max_byte_size / M, EpsilonMinHeapExpand / M);
72 } else {
73 log_info(gc)("Non-resizeable heap; start/max: " SIZE_FORMAT "M", init_byte_size / M);
74 }
75
76 if (UseTLAB) {
77 log_info(gc)("Using TLAB allocation; max: " SIZE_FORMAT "K", _max_tlab_size * HeapWordSize / K);
78 if (EpsilonElasticTLAB) {
79 log_info(gc)("Elastic TLABs enabled; elasticity: %.2fx", EpsilonTLABElasticity);
80 }
81 if (EpsilonElasticTLABDecay) {
82 log_info(gc)("Elastic TLABs decay enabled; decay time: " SIZE_FORMAT "ms", EpsilonTLABDecayTime);
83 }
84 } else {
85 log_info(gc)("Not using TLAB allocation");
86 }
87
88 return JNI_OK;
89 }
90
91 void EpsilonHeap::post_initialize() {
92 CollectedHeap::post_initialize();
93 }
94
95 void EpsilonHeap::initialize_serviceability() {
96 _pool = new EpsilonMemoryPool(this);
97 _memory_manager.add_pool(_pool);
98 }
99
100 GrowableArray<GCMemoryManager*> EpsilonHeap::memory_managers() {
101 GrowableArray<GCMemoryManager*> memory_managers(1);
102 memory_managers.append(&_memory_manager);
103 return memory_managers;
104 }
105
106 GrowableArray<MemoryPool*> EpsilonHeap::memory_pools() {
107 GrowableArray<MemoryPool*> memory_pools(1);
108 memory_pools.append(_pool);
109 return memory_pools;
110 }
111
112 size_t EpsilonHeap::unsafe_max_tlab_alloc(Thread* thr) const {
113 // Return max allocatable TLAB size, and let allocation path figure out
114 // the actual allocation size. Note: result should be in bytes.
115 return _max_tlab_size * HeapWordSize;
116 }
117
118 EpsilonHeap* EpsilonHeap::heap() {
119 CollectedHeap* heap = Universe::heap();
120 assert(heap != NULL, "Uninitialized access to EpsilonHeap::heap()");
121 assert(heap->kind() == CollectedHeap::Epsilon, "Not an Epsilon heap");
122 return (EpsilonHeap*)heap;
123 }
124
125 HeapWord* EpsilonHeap::allocate_work(size_t size) {
126 assert(is_object_aligned(size), "Allocation size should be aligned: " SIZE_FORMAT, size);
127
128 HeapWord* res = _space->par_allocate(size);
129
130 while (res == NULL) {
131 // Allocation failed, attempt expansion, and retry:
132 MutexLocker ml(Heap_lock);
133
134 size_t space_left = max_capacity() - capacity();
135 size_t want_space = MAX2(size, EpsilonMinHeapExpand);
136
137 if (want_space < space_left) {
138 // Enough space to expand in bulk:
139 bool expand = _virtual_space.expand_by(want_space);
140 assert(expand, "Should be able to expand");
141 } else if (size < space_left) {
142 // No space to expand in bulk, and this allocation is still possible,
143 // take all the remaining space:
144 bool expand = _virtual_space.expand_by(space_left);
145 assert(expand, "Should be able to expand");
146 } else {
147 // No space left:
148 return NULL;
149 }
150
151 _space->set_end((HeapWord *) _virtual_space.high());
152 res = _space->par_allocate(size);
153 }
154
155 size_t used = _space->used();
156
157 // Allocation successful, update counters
158 {
159 size_t last = _last_counter_update;
160 if ((used - last >= _step_counter_update) && Atomic::cmpxchg(&_last_counter_update, last, used) == last) {
161 _monitoring_support->update_counters();
162 }
163 }
164
165 // ...and print the occupancy line, if needed
166 {
167 size_t last = _last_heap_print;
168 if ((used - last >= _step_heap_print) && Atomic::cmpxchg(&_last_heap_print, last, used) == last) {
169 print_heap_info(used);
170 print_metaspace_info();
171 }
172 }
173
174 assert(is_object_aligned(res), "Object should be aligned: " PTR_FORMAT, p2i(res));
175 return res;
176 }
177
178 HeapWord* EpsilonHeap::allocate_new_tlab(size_t min_size,
179 size_t requested_size,
180 size_t* actual_size) {
181 Thread* thread = Thread::current();
182
183 // Defaults in case elastic paths are not taken
184 bool fits = true;
185 size_t size = requested_size;
186 size_t ergo_tlab = requested_size;
187 int64_t time = 0;
188
189 if (EpsilonElasticTLAB) {
190 ergo_tlab = EpsilonThreadLocalData::ergo_tlab_size(thread);
191
192 if (EpsilonElasticTLABDecay) {
193 int64_t last_time = EpsilonThreadLocalData::last_tlab_time(thread);
194 time = (int64_t) os::javaTimeNanos();
195
196 assert(last_time <= time, "time should be monotonic");
197
198 // If the thread had not allocated recently, retract the ergonomic size.
199 // This conserves memory when the thread had initial burst of allocations,
200 // and then started allocating only sporadically.
201 if (last_time != 0 && (time - last_time > _decay_time_ns)) {
202 ergo_tlab = 0;
203 EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
204 }
205 }
206
207 // If we can fit the allocation under current TLAB size, do so.
208 // Otherwise, we want to elastically increase the TLAB size.
209 fits = (requested_size <= ergo_tlab);
210 if (!fits) {
211 size = (size_t) (ergo_tlab * EpsilonTLABElasticity);
212 }
213 }
214
215 // Always honor boundaries
216 size = clamp(size, min_size, _max_tlab_size);
217
218 // Always honor alignment
219 size = align_up(size, MinObjAlignment);
220
221 // Check that adjustments did not break local and global invariants
222 assert(is_object_aligned(size),
223 "Size honors object alignment: " SIZE_FORMAT, size);
224 assert(min_size <= size,
225 "Size honors min size: " SIZE_FORMAT " <= " SIZE_FORMAT, min_size, size);
226 assert(size <= _max_tlab_size,
227 "Size honors max size: " SIZE_FORMAT " <= " SIZE_FORMAT, size, _max_tlab_size);
228 assert(size <= CollectedHeap::max_tlab_size(),
229 "Size honors global max size: " SIZE_FORMAT " <= " SIZE_FORMAT, size, CollectedHeap::max_tlab_size());
230
231 if (log_is_enabled(Trace, gc)) {
232 ResourceMark rm;
233 log_trace(gc)("TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT
234 "K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K",
235 thread->name(),
236 requested_size * HeapWordSize / K,
237 min_size * HeapWordSize / K,
238 _max_tlab_size * HeapWordSize / K,
239 ergo_tlab * HeapWordSize / K,
240 size * HeapWordSize / K);
241 }
242
243 // All prepared, let's do it!
244 HeapWord* res = allocate_work(size);
245
246 if (res != NULL) {
247 // Allocation successful
248 *actual_size = size;
249 if (EpsilonElasticTLABDecay) {
250 EpsilonThreadLocalData::set_last_tlab_time(thread, time);
251 }
252 if (EpsilonElasticTLAB && !fits) {
253 // If we requested expansion, this is our new ergonomic TLAB size
254 EpsilonThreadLocalData::set_ergo_tlab_size(thread, size);
255 }
256 } else {
257 // Allocation failed, reset ergonomics to try and fit smaller TLABs
258 if (EpsilonElasticTLAB) {
259 EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
260 }
261 }
262
263 return res;
264 }
265
266 HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) {
267 *gc_overhead_limit_was_exceeded = false;
268 return allocate_work(size);
269 }
270
271 void EpsilonHeap::collect(GCCause::Cause cause) {
272 switch (cause) {
273 case GCCause::_metadata_GC_threshold:
274 case GCCause::_metadata_GC_clear_soft_refs:
275 // Receiving these causes means the VM itself entered the safepoint for metadata collection.
276 // While Epsilon does not do GC, it has to perform sizing adjustments, otherwise we would
277 // re-enter the safepoint again very soon.
278
279 assert(SafepointSynchronize::is_at_safepoint(), "Expected at safepoint");
280 log_info(gc)("GC request for \"%s\" is handled", GCCause::to_string(cause));
281 MetaspaceGC::compute_new_size();
282 print_metaspace_info();
283 break;
284 default:
285 log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause));
286 }
287 _monitoring_support->update_counters();
288 }
289
290 void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) {
291 collect(gc_cause());
292 }
293
294 void EpsilonHeap::object_iterate(ObjectClosure *cl) {
295 _space->object_iterate(cl);
296 }
297
298 void EpsilonHeap::print_on(outputStream *st) const {
299 st->print_cr("Epsilon Heap");
300
301 // Cast away constness:
302 ((VirtualSpace)_virtual_space).print_on(st);
303
304 st->print_cr("Allocation space:");
305 _space->print_on(st);
306
307 MetaspaceUtils::print_on(st);
308 }
309
310 bool EpsilonHeap::print_location(outputStream* st, void* addr) const {
311 return BlockLocationPrinter<EpsilonHeap>::print_location(st, addr);
312 }
313
314 void EpsilonHeap::print_tracing_info() const {
315 print_heap_info(used());
316 print_metaspace_info();
317 }
318
319 void EpsilonHeap::print_heap_info(size_t used) const {
320 size_t reserved = max_capacity();
321 size_t committed = capacity();
322
323 if (reserved != 0) {
324 log_info(gc)("Heap: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, "
325 SIZE_FORMAT "%s (%.2f%%) used",
326 byte_size_in_proper_unit(reserved), proper_unit_for_byte_size(reserved),
327 byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),
328 committed * 100.0 / reserved,
329 byte_size_in_proper_unit(used), proper_unit_for_byte_size(used),
330 used * 100.0 / reserved);
331 } else {
332 log_info(gc)("Heap: no reliable data");
333 }
334 }
335
336 void EpsilonHeap::print_metaspace_info() const {
337 size_t reserved = MetaspaceUtils::reserved_bytes();
338 size_t committed = MetaspaceUtils::committed_bytes();
339 size_t used = MetaspaceUtils::used_bytes();
340
341 if (reserved != 0) {
342 log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, "
343 SIZE_FORMAT "%s (%.2f%%) used",
344 byte_size_in_proper_unit(reserved), proper_unit_for_byte_size(reserved),
345 byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),
346 committed * 100.0 / reserved,
347 byte_size_in_proper_unit(used), proper_unit_for_byte_size(used),
348 used * 100.0 / reserved);
349 } else {
350 log_info(gc, metaspace)("Metaspace: no reliable data");
351 }
352 }