1 /*
2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_RUNTIME_VMOPERATIONS_HPP
26 #define SHARE_RUNTIME_VMOPERATIONS_HPP
27
28 #include "classfile/javaClasses.hpp"
29 #include "memory/allocation.hpp"
30 #include "oops/oop.hpp"
31 #include "runtime/thread.hpp"
32 #include "runtime/threadSMR.hpp"
33 #include "code/codeCache.hpp"
34
35 // The following classes are used for operations
36 // initiated by a Java thread but that must
37 // take place in the VMThread.
38
39 #define VM_OP_ENUM(type) VMOp_##type,
40
41 // Note: When new VM_XXX comes up, add 'XXX' to the template table.
42 #define VM_OPS_DO(template) \
43 template(None) \
44 template(Cleanup) \
45 template(ThreadStop) \
46 template(ThreadDump) \
47 template(PrintThreads) \
48 template(FindDeadlocks) \
49 template(ClearICs) \
50 template(ForceSafepoint) \
51 template(ForceAsyncSafepoint) \
52 template(Deoptimize) \
53 template(DeoptimizeFrame) \
54 template(DeoptimizeAll) \
55 template(ZombieAll) \
56 template(Verify) \
57 template(PrintJNI) \
58 template(HeapDumper) \
59 template(DeoptimizeTheWorld) \
60 template(CollectForMetadataAllocation) \
61 template(GC_HeapInspection) \
62 template(GenCollectFull) \
63 template(GenCollectFullConcurrent) \
64 template(GenCollectForAllocation) \
65 template(ParallelGCFailedAllocation) \
66 template(ParallelGCSystemGC) \
67 template(CMS_Initial_Mark) \
68 template(CMS_Final_Remark) \
69 template(G1CollectForAllocation) \
70 template(G1CollectFull) \
71 template(G1Concurrent) \
72 template(ZMarkStart) \
73 template(ZMarkEnd) \
74 template(ZRelocateStart) \
75 template(ZVerify) \
76 template(HandshakeOneThread) \
77 template(HandshakeAllThreads) \
78 template(HandshakeFallback) \
79 template(EnableBiasedLocking) \
80 template(BulkRevokeBias) \
81 template(PopulateDumpSharedSpace) \
82 template(JNIFunctionTableCopier) \
83 template(RedefineClasses) \
84 template(UpdateForPopTopFrame) \
85 template(SetFramePop) \
86 template(GetOwnedMonitorInfo) \
87 template(GetObjectMonitorUsage) \
88 template(GetCurrentContendedMonitor) \
89 template(GetStackTrace) \
90 template(GetMultipleStackTraces) \
91 template(GetAllStackTraces) \
92 template(GetThreadListStackTraces) \
93 template(GetFrameCount) \
94 template(GetFrameLocation) \
95 template(ChangeBreakpoints) \
96 template(GetOrSetLocal) \
97 template(GetCurrentLocation) \
98 template(EnterInterpOnlyMode) \
99 template(ChangeSingleStep) \
100 template(HeapWalkOperation) \
101 template(HeapIterateOperation) \
102 template(ReportJavaOutOfMemory) \
103 template(JFRCheckpoint) \
104 template(ShenandoahFullGC) \
105 template(ShenandoahInitMark) \
106 template(ShenandoahFinalMarkStartEvac) \
107 template(ShenandoahFinalEvac) \
108 template(ShenandoahInitTraversalGC) \
109 template(ShenandoahFinalTraversalGC) \
110 template(ShenandoahInitUpdateRefs) \
111 template(ShenandoahFinalUpdateRefs) \
112 template(ShenandoahDegeneratedGC) \
113 template(Exit) \
114 template(LinuxDllLoad) \
115 template(RotateGCLog) \
116 template(WhiteBoxOperation) \
117 template(JVMCIResizeCounters) \
118 template(ClassLoaderStatsOperation) \
119 template(ClassLoaderHierarchyOperation) \
120 template(DumpHashtable) \
121 template(DumpTouchedMethods) \
122 template(MarkActiveNMethods) \
123 template(PrintCompileQueue) \
124 template(PrintClassHierarchy) \
125 template(ThreadSuspend) \
126 template(ThreadsSuspendJVMTI) \
127 template(ICBufferFull) \
128 template(ScavengeMonitors) \
129 template(PrintMetadata) \
130 template(GTestExecuteAtSafepoint) \
131 template(JFROldObject) \
132
133 class VM_Operation: public CHeapObj<mtInternal> {
134 public:
135 enum Mode {
136 _safepoint, // blocking, safepoint, vm_op C-heap allocated
137 _no_safepoint, // blocking, no safepoint, vm_op C-Heap allocated
138 _concurrent, // non-blocking, no safepoint, vm_op C-Heap allocated
139 _async_safepoint // non-blocking, safepoint, vm_op C-Heap allocated
140 };
141
142 enum VMOp_Type {
143 VM_OPS_DO(VM_OP_ENUM)
144 VMOp_Terminating
145 };
146
147 private:
148 Thread* _calling_thread;
149 long _timestamp;
150 VM_Operation* _next;
151 VM_Operation* _prev;
152
153 // The VM operation name array
154 static const char* _names[];
155
156 public:
157 VM_Operation() { _calling_thread = NULL; _next = NULL; _prev = NULL; }
158 virtual ~VM_Operation() {}
159
160 // VM operation support (used by VM thread)
161 Thread* calling_thread() const { return _calling_thread; }
162 void set_calling_thread(Thread* thread);
163
164 long timestamp() const { return _timestamp; }
165 void set_timestamp(long timestamp) { _timestamp = timestamp; }
166
167 // Called by VM thread - does in turn invoke doit(). Do not override this
168 void evaluate();
169
170 // evaluate() is called by the VMThread and in turn calls doit().
171 // If the thread invoking VMThread::execute((VM_Operation*) is a JavaThread,
172 // doit_prologue() is called in that thread before transferring control to
173 // the VMThread.
174 // If doit_prologue() returns true the VM operation will proceed, and
175 // doit_epilogue() will be called by the JavaThread once the VM operation
176 // completes. If doit_prologue() returns false the VM operation is cancelled.
177 virtual void doit() = 0;
178 virtual bool doit_prologue() { return true; };
179 virtual void doit_epilogue() {}; // Note: Not called if mode is: _concurrent
180
181 // Type test
182 virtual bool is_methodCompiler() const { return false; }
183
184 // Linking
185 VM_Operation *next() const { return _next; }
186 VM_Operation *prev() const { return _prev; }
187 void set_next(VM_Operation *next) { _next = next; }
188 void set_prev(VM_Operation *prev) { _prev = prev; }
189
190 // Configuration. Override these appropriately in subclasses.
191 virtual VMOp_Type type() const = 0;
192 virtual Mode evaluation_mode() const { return _safepoint; }
193 virtual bool allow_nested_vm_operations() const { return false; }
194 virtual bool is_cheap_allocated() const { return false; }
195 virtual void oops_do(OopClosure* f) { /* do nothing */ };
196
197 // CAUTION: <don't hang yourself with following rope>
198 // If you override these methods, make sure that the evaluation
199 // of these methods is race-free and non-blocking, since these
200 // methods may be evaluated either by the mutators or by the
201 // vm thread, either concurrently with mutators or with the mutators
202 // stopped. In other words, taking locks is verboten, and if there
203 // are any races in evaluating the conditions, they'd better be benign.
204 virtual bool evaluate_at_safepoint() const {
205 return evaluation_mode() == _safepoint ||
206 evaluation_mode() == _async_safepoint;
207 }
208 virtual bool evaluate_concurrently() const {
209 return evaluation_mode() == _concurrent ||
210 evaluation_mode() == _async_safepoint;
211 }
212
213 static const char* mode_to_string(Mode mode);
214
215 // Debugging
216 virtual void print_on_error(outputStream* st) const;
217 virtual const char* name() const { return _names[type()]; }
218 static const char* name(int type) {
219 assert(type >= 0 && type < VMOp_Terminating, "invalid VM operation type");
220 return _names[type];
221 }
222 #ifndef PRODUCT
223 void print_on(outputStream* st) const { print_on_error(st); }
224 #endif
225 };
226
227 class VM_None: public VM_Operation {
228 const char* _reason;
229 public:
230 VM_None(const char* reason) : _reason(reason) {}
231 const char* name() const { return _reason; }
232 VMOp_Type type() const { return VMOp_None; }
233 void doit() {};
234 };
235
236 class VM_Cleanup: public VM_Operation {
237 public:
238 VMOp_Type type() const { return VMOp_Cleanup; }
239 void doit() {};
240 };
241
242 class VM_ThreadStop: public VM_Operation {
243 private:
244 oop _thread; // The Thread that the Throwable is thrown against
245 oop _throwable; // The Throwable thrown at the target Thread
246 public:
247 // All oops are passed as JNI handles, since there is no guarantee that a GC might happen before the
248 // VM operation is executed.
249 VM_ThreadStop(oop thread, oop throwable) {
250 _thread = thread;
251 _throwable = throwable;
252 }
253 VMOp_Type type() const { return VMOp_ThreadStop; }
254 oop target_thread() const { return _thread; }
255 oop throwable() const { return _throwable;}
256 void doit();
257 // We deoptimize if top-most frame is compiled - this might require a C2I adapter to be generated
258 bool allow_nested_vm_operations() const { return true; }
259 Mode evaluation_mode() const { return _async_safepoint; }
260 bool is_cheap_allocated() const { return true; }
261
262 // GC support
263 void oops_do(OopClosure* f) {
264 f->do_oop(&_thread); f->do_oop(&_throwable);
265 }
266 };
267
268 class VM_ClearICs: public VM_Operation {
269 private:
270 bool _preserve_static_stubs;
271 public:
272 VM_ClearICs(bool preserve_static_stubs) { _preserve_static_stubs = preserve_static_stubs; }
273 void doit();
274 VMOp_Type type() const { return VMOp_ClearICs; }
275 };
276
277 // empty vm op, evaluated just to force a safepoint
278 class VM_ForceSafepoint: public VM_Operation {
279 public:
280 void doit() {}
281 VMOp_Type type() const { return VMOp_ForceSafepoint; }
282 };
283
284 // empty vm op, when forcing a safepoint to suspend a thread
285 class VM_ThreadSuspend: public VM_ForceSafepoint {
286 public:
287 VMOp_Type type() const { return VMOp_ThreadSuspend; }
288 };
289
290 // empty vm op, when forcing a safepoint to suspend threads from jvmti
291 class VM_ThreadsSuspendJVMTI: public VM_ForceSafepoint {
292 public:
293 VMOp_Type type() const { return VMOp_ThreadsSuspendJVMTI; }
294 };
295
296 // empty vm op, when forcing a safepoint due to inline cache buffers being full
297 class VM_ICBufferFull: public VM_ForceSafepoint {
298 public:
299 VMOp_Type type() const { return VMOp_ICBufferFull; }
300 };
301
302 // empty asynchronous vm op, when forcing a safepoint to scavenge monitors
303 class VM_ScavengeMonitors: public VM_ForceSafepoint {
304 public:
305 VMOp_Type type() const { return VMOp_ScavengeMonitors; }
306 Mode evaluation_mode() const { return _async_safepoint; }
307 bool is_cheap_allocated() const { return true; }
308 };
309
310 // Base class for invoking parts of a gtest in a safepoint.
311 // Derived classes provide the doit method.
312 // Typically also need to transition the gtest thread from native to VM.
313 class VM_GTestExecuteAtSafepoint: public VM_Operation {
314 public:
315 VMOp_Type type() const { return VMOp_GTestExecuteAtSafepoint; }
316
317 protected:
318 VM_GTestExecuteAtSafepoint() {}
319 };
320
321 class VM_Deoptimize: public VM_Operation {
322 public:
323 VM_Deoptimize() {}
324 VMOp_Type type() const { return VMOp_Deoptimize; }
325 void doit();
326 bool allow_nested_vm_operations() const { return true; }
327 };
328
329 class VM_MarkActiveNMethods: public VM_Operation {
330 public:
331 VM_MarkActiveNMethods() {}
332 VMOp_Type type() const { return VMOp_MarkActiveNMethods; }
333 void doit();
334 bool allow_nested_vm_operations() const { return true; }
335 };
336
337 // Deopt helper that can deoptimize frames in threads other than the
338 // current thread. Only used through Deoptimization::deoptimize_frame.
339 class VM_DeoptimizeFrame: public VM_Operation {
340 friend class Deoptimization;
341
342 private:
343 JavaThread* _thread;
344 intptr_t* _id;
345 int _reason;
346 VM_DeoptimizeFrame(JavaThread* thread, intptr_t* id, int reason);
347
348 public:
349 VMOp_Type type() const { return VMOp_DeoptimizeFrame; }
350 void doit();
351 bool allow_nested_vm_operations() const { return true; }
352 };
353
354 #ifndef PRODUCT
355 class VM_DeoptimizeAll: public VM_Operation {
356 private:
357 Klass* _dependee;
358 public:
359 VM_DeoptimizeAll() {}
360 VMOp_Type type() const { return VMOp_DeoptimizeAll; }
361 void doit();
362 bool allow_nested_vm_operations() const { return true; }
363 };
364
365
366 class VM_ZombieAll: public VM_Operation {
367 public:
368 VM_ZombieAll() {}
369 VMOp_Type type() const { return VMOp_ZombieAll; }
370 void doit();
371 bool allow_nested_vm_operations() const { return true; }
372 };
373 #endif // PRODUCT
374
375 class VM_Verify: public VM_Operation {
376 public:
377 VMOp_Type type() const { return VMOp_Verify; }
378 void doit();
379 };
380
381
382 class VM_PrintThreads: public VM_Operation {
383 private:
384 outputStream* _out;
385 bool _print_concurrent_locks;
386 bool _print_extended_info;
387 public:
388 VM_PrintThreads()
389 : _out(tty), _print_concurrent_locks(PrintConcurrentLocks), _print_extended_info(false)
390 {}
391 VM_PrintThreads(outputStream* out, bool print_concurrent_locks, bool print_extended_info)
392 : _out(out), _print_concurrent_locks(print_concurrent_locks), _print_extended_info(print_extended_info)
393 {}
394 VMOp_Type type() const {
395 return VMOp_PrintThreads;
396 }
397 void doit();
398 bool doit_prologue();
399 void doit_epilogue();
400 };
401
402 class VM_PrintJNI: public VM_Operation {
403 private:
404 outputStream* _out;
405 public:
406 VM_PrintJNI() { _out = tty; }
407 VM_PrintJNI(outputStream* out) { _out = out; }
408 VMOp_Type type() const { return VMOp_PrintJNI; }
409 void doit();
410 };
411
412 class VM_PrintMetadata : public VM_Operation {
413 private:
414 outputStream* const _out;
415 const size_t _scale;
416 const int _flags;
417
418 public:
419 VM_PrintMetadata(outputStream* out, size_t scale, int flags)
420 : _out(out), _scale(scale), _flags(flags)
421 {};
422
423 VMOp_Type type() const { return VMOp_PrintMetadata; }
424 void doit();
425 };
426
427 class DeadlockCycle;
428 class VM_FindDeadlocks: public VM_Operation {
429 private:
430 bool _concurrent_locks;
431 DeadlockCycle* _deadlocks;
432 outputStream* _out;
433 ThreadsListSetter _setter; // Helper to set hazard ptr in the originating thread
434 // which protects the JavaThreads in _deadlocks.
435
436 public:
437 VM_FindDeadlocks(bool concurrent_locks) : _concurrent_locks(concurrent_locks), _deadlocks(NULL), _out(NULL), _setter() {};
438 VM_FindDeadlocks(outputStream* st) : _concurrent_locks(true), _deadlocks(NULL), _out(st) {};
439 ~VM_FindDeadlocks();
440
441 DeadlockCycle* result() { return _deadlocks; };
442 VMOp_Type type() const { return VMOp_FindDeadlocks; }
443 void doit();
444 };
445
446 class ThreadDumpResult;
447 class ThreadSnapshot;
448 class ThreadConcurrentLocks;
449
450 class VM_ThreadDump : public VM_Operation {
451 private:
452 ThreadDumpResult* _result;
453 int _num_threads;
454 GrowableArray<instanceHandle>* _threads;
455 int _max_depth;
456 bool _with_locked_monitors;
457 bool _with_locked_synchronizers;
458
459 void snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl);
460
461 public:
462 VM_ThreadDump(ThreadDumpResult* result,
463 int max_depth, // -1 indicates entire stack
464 bool with_locked_monitors,
465 bool with_locked_synchronizers);
466
467 VM_ThreadDump(ThreadDumpResult* result,
468 GrowableArray<instanceHandle>* threads,
469 int num_threads, // -1 indicates entire stack
470 int max_depth,
471 bool with_locked_monitors,
472 bool with_locked_synchronizers);
473
474 VMOp_Type type() const { return VMOp_ThreadDump; }
475 void doit();
476 bool doit_prologue();
477 void doit_epilogue();
478 };
479
480
481 class VM_Exit: public VM_Operation {
482 private:
483 int _exit_code;
484 static volatile bool _vm_exited;
485 static Thread * volatile _shutdown_thread;
486 static void wait_if_vm_exited();
487 public:
488 VM_Exit(int exit_code) {
489 _exit_code = exit_code;
490 }
491 static int wait_for_threads_in_native_to_block();
492 static int set_vm_exited();
493 static bool vm_exited() { return _vm_exited; }
494 static Thread * shutdown_thread() { return _shutdown_thread; }
495 static void block_if_vm_exited() {
496 if (_vm_exited) {
497 wait_if_vm_exited();
498 }
499 }
500 VMOp_Type type() const { return VMOp_Exit; }
501 void doit();
502 };
503
504 class VM_PrintCompileQueue: public VM_Operation {
505 private:
506 outputStream* _out;
507
508 public:
509 VM_PrintCompileQueue(outputStream* st) : _out(st) {}
510 VMOp_Type type() const { return VMOp_PrintCompileQueue; }
511 Mode evaluation_mode() const { return _safepoint; }
512 void doit();
513 };
514
515 #if INCLUDE_SERVICES
516 class VM_PrintClassHierarchy: public VM_Operation {
517 private:
518 outputStream* _out;
519 bool _print_interfaces;
520 bool _print_subclasses;
521 char* _classname;
522
523 public:
524 VM_PrintClassHierarchy(outputStream* st, bool print_interfaces, bool print_subclasses, char* classname) :
525 _out(st), _print_interfaces(print_interfaces), _print_subclasses(print_subclasses),
526 _classname(classname) {}
527 VMOp_Type type() const { return VMOp_PrintClassHierarchy; }
528 void doit();
529 };
530 #endif // INCLUDE_SERVICES
531
532 #endif // SHARE_RUNTIME_VMOPERATIONS_HPP