diff --git a/src/hotspot/cpu/aarch64/downcallLinker_aarch64.cpp b/src/hotspot/cpu/aarch64/downcallLinker_aarch64.cpp index f30b5d7cf2c64..05e9f15a48078 100644 --- a/src/hotspot/cpu/aarch64/downcallLinker_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/downcallLinker_aarch64.cpp @@ -32,6 +32,7 @@ #include "prims/downcallLinker.hpp" #include "runtime/globals.hpp" #include "runtime/stubCodeGenerator.hpp" +#include "runtime/threadWXSetters.inline.hpp" #define __ _masm-> @@ -47,6 +48,9 @@ RuntimeStub* DowncallLinker::make_downcall_stub(BasicType* signature, bool needs_return_buffer, int captured_state_mask, bool needs_transition) { +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif int code_size = native_invoker_code_base_size + (num_args * native_invoker_size_per_arg); int locs_size = 1; // must be non-zero CodeBuffer code("nep_invoker_blob", code_size, locs_size); diff --git a/src/hotspot/cpu/aarch64/gc/shared/barrierSetNMethod_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shared/barrierSetNMethod_aarch64.cpp index c45611c882b5a..b9ca63b8de815 100644 --- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetNMethod_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetNMethod_aarch64.cpp @@ -33,6 +33,7 @@ #include "runtime/javaThread.hpp" #include "runtime/registerMap.hpp" #include "runtime/sharedRuntime.hpp" +#include "runtime/threadWXSetters.inline.hpp" #include "utilities/align.hpp" #include "utilities/debug.hpp" #include "utilities/formatBuffer.hpp" @@ -118,6 +119,10 @@ class NativeNMethodBarrier { } void set_value(int value) { +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif + REQUIRE_THREAD_WX_MODE_WRITE Atomic::release_store(guard_addr(), value); } diff --git a/src/hotspot/cpu/aarch64/jniFastGetField_aarch64.cpp b/src/hotspot/cpu/aarch64/jniFastGetField_aarch64.cpp index 8bec45b4b479a..6f081120b0578 100644 --- a/src/hotspot/cpu/aarch64/jniFastGetField_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/jniFastGetField_aarch64.cpp @@ -58,7 +58,7 @@ static const Register result = r7; // (8262896). So each FastGetXXXField is wrapped into a C++ statically // compiled template function that optionally switches to WXExec if necessary. -#ifdef __APPLE__ +#if INCLUDE_WX_OLD static address generated_fast_get_field[T_LONG + 1 - T_BOOLEAN]; @@ -86,14 +86,14 @@ address JNI_FastGetField::generate_fast_get_int_field1() { return (address)static_fast_get_field_wrapper; } -#else // __APPLE__ +#else // INCLUDE_WX_OLD template address JNI_FastGetField::generate_fast_get_int_field1() { return generate_fast_get_int_field0((BasicType)BType); } -#endif // __APPLE__ +#endif // INCLUDE_WX_OLD address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) { const char *name; diff --git a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp index 33158d6b97a91..29745e4ef52a8 100644 --- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp @@ -81,6 +81,8 @@ void NativeCall::set_destination_mt_safe(address dest) { CompiledICLocker::is_safe(addr_at(0)), "concurrent code patching"); + REQUIRE_THREAD_WX_MODE_WRITE + address addr_call = addr_at(0); bool reachable = Assembler::reachable_from_branch_at(addr_call, dest); assert(NativeCall::is_call_at(addr_call), "unexpected code at call site"); diff --git a/src/hotspot/cpu/aarch64/upcallLinker_aarch64.cpp b/src/hotspot/cpu/aarch64/upcallLinker_aarch64.cpp index 7a0e5aaf3b4e9..4bcfa4f2b98ed 100644 --- a/src/hotspot/cpu/aarch64/upcallLinker_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/upcallLinker_aarch64.cpp @@ -122,6 +122,9 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Symbol* signature, BasicType ret_type, jobject jabi, jobject jconv, bool needs_return_buffer, int ret_buf_size) { +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif ResourceMark rm; const ABIDescriptor abi = ForeignGlobals::parse_abi_descriptor(jabi); const CallRegs call_regs = ForeignGlobals::parse_call_regs(jconv); diff --git a/src/hotspot/os/bsd/globals_bsd.hpp b/src/hotspot/os/bsd/globals_bsd.hpp index 850d491a11fa4..1e11beef0099f 100644 --- a/src/hotspot/os/bsd/globals_bsd.hpp +++ b/src/hotspot/os/bsd/globals_bsd.hpp @@ -37,7 +37,13 @@ \ AARCH64_ONLY(develop(bool, AssertWXAtThreadSync, true, \ "Conservatively check W^X thread state at possible safepoint" \ - "or handshake")) + "or handshake")) \ + AARCH64_ONLY(develop(bool, AssertWX, false, \ + "Enable extra W^X state checking.")) \ + AARCH64_ONLY(develop(bool, UseOldWX, false WX_OLD_ONLY(|| true), \ + "Choose old W^X implementation.")) \ + AARCH64_ONLY(develop(bool, UseNewWX, false WX_NEW_ONLY(|| true), \ + "Choose new W^X implementation.")) \ // end of RUNTIME_OS_FLAGS diff --git a/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp b/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp index aeba308d3a2e2..ee575e2e6ff2e 100644 --- a/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp +++ b/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp @@ -37,6 +37,9 @@ #include "os_posix.hpp" #include "prims/jniFastGetField.hpp" #include "prims/jvm_misc.hpp" +#if 1 +#include "runtime/atomic.hpp" +#endif #include "runtime/arguments.hpp" #include "runtime/frame.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" @@ -199,9 +202,11 @@ NOINLINE frame os::current_frame() { bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, ucontext_t* uc, JavaThread* thread) { +#if INCLUDE_WX_OLD // Enable WXWrite: this function is called by the signal handler at arbitrary // point of execution. ThreadWXEnable wx(WXWrite, thread); +#endif // decide if this trap can be handled by a stub address stub = nullptr; @@ -210,6 +215,12 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, //%note os_trap_1 if (info != nullptr && uc != nullptr && thread != nullptr) { +#if INCLUDE_WX_NEW + // Enable WXExec: this function is called by the signal handler at arbitrary + // point of execution. + auto _wx = WXExecMark(thread); +#endif + pc = (address) os::Posix::ucontext_get_pc(uc); // Handle ALL stack overflow variations here @@ -488,8 +499,26 @@ int os::extra_bang_size_in_bytes() { return 0; } -void os::current_thread_enable_wx(WXMode mode) { - pthread_jit_write_protect_np(mode == WXExec); +#ifndef PRODUCT +static uint _wx_changes[2] = {0}; +static uint _wx_changes_total = 0; +#endif + +void os::current_thread_enable_wx(WXMode mode, bool use_new_code) { +#ifndef PRODUCT + Atomic::inc(&_wx_changes[use_new_code ? 1 : 0]); + Atomic::inc(&_wx_changes_total); + if (is_power_of_2(_wx_changes_total)) { + log_develop_info(wx, perf)("WX transitions: old %d new %d total %d %.1f:%.1f", + _wx_changes[0], _wx_changes[1], _wx_changes_total, + 100.0 * _wx_changes[0] / _wx_changes_total, + 100.0 * _wx_changes[1] / _wx_changes_total); + } +#endif + + if (use_new_code ? UseNewWX : UseOldWX && !UseNewWX) { + pthread_jit_write_protect_np(mode == WXExec); + } } static inline void atomic_copy64(const volatile void *src, volatile void *dst) { diff --git a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp index 4335733d1fed4..47bd8262919b2 100644 --- a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp +++ b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp @@ -469,3 +469,23 @@ extern "C" { memmove(to, from, count * 8); } }; + +#if WX_EMUL +#ifndef PRODUCT +static uint _wx_changes[2] = {0}; +static uint _wx_changes_total = 0; +#endif + +void os::current_thread_enable_wx(WXMode mode, bool use_new_code) { +#ifndef PRODUCT + Atomic::inc(&_wx_changes[use_new_code ? 1 : 0]); + Atomic::inc(&_wx_changes_total); + if (is_power_of_2(_wx_changes_total)) { + log_develop_info(wx, perf)("WX transitions: old %d new %d total %d %.1f:%.1f", + _wx_changes[0], _wx_changes[1], _wx_changes_total, + 100.0 * _wx_changes[0] / _wx_changes_total, + 100.0 * _wx_changes[1] / _wx_changes_total); + } +#endif +} +#endif diff --git a/src/hotspot/share/asm/codeBuffer.hpp b/src/hotspot/share/asm/codeBuffer.hpp index 95cd4b7f912af..849f51ccca950 100644 --- a/src/hotspot/share/asm/codeBuffer.hpp +++ b/src/hotspot/share/asm/codeBuffer.hpp @@ -33,6 +33,7 @@ #include "utilities/growableArray.hpp" #include "utilities/linkedlist.hpp" #include "utilities/resizeableResourceHash.hpp" +#include "runtime/threadWXSetters.inline.hpp" #include "utilities/macros.hpp" template @@ -155,6 +156,8 @@ class CodeSection { _skipped_instructions_size = cs->_skipped_instructions_size; } + address writable_end() const { REQUIRE_THREAD_WX_MODE_WRITE return _end; } + public: address start() const { return _start; } address mark() const { return _mark; } @@ -220,24 +223,24 @@ class CodeSection { // Code emission void emit_int8(uint8_t x1) { - address curr = end(); + address curr = writable_end(); *((uint8_t*) curr++) = x1; set_end(curr); } template - void emit_native(T x) { put_native(end(), x); set_end(end() + sizeof x); } + void emit_native(T x) { put_native(writable_end(), x); set_end(end() + sizeof x); } void emit_int16(uint16_t x) { emit_native(x); } void emit_int16(uint8_t x1, uint8_t x2) { - address curr = end(); + address curr = writable_end(); *((uint8_t*) curr++) = x1; *((uint8_t*) curr++) = x2; set_end(curr); } void emit_int24(uint8_t x1, uint8_t x2, uint8_t x3) { - address curr = end(); + address curr = writable_end(); *((uint8_t*) curr++) = x1; *((uint8_t*) curr++) = x2; *((uint8_t*) curr++) = x3; @@ -246,7 +249,7 @@ class CodeSection { void emit_int32(uint32_t x) { emit_native(x); } void emit_int32(uint8_t x1, uint8_t x2, uint8_t x3, uint8_t x4) { - address curr = end(); + address curr = writable_end(); *((uint8_t*) curr++) = x1; *((uint8_t*) curr++) = x2; *((uint8_t*) curr++) = x3; diff --git a/src/hotspot/share/c1/c1_Compilation.cpp b/src/hotspot/share/c1/c1_Compilation.cpp index 53a97ce104216..d4217ac405fa2 100644 --- a/src/hotspot/share/c1/c1_Compilation.cpp +++ b/src/hotspot/share/c1/c1_Compilation.cpp @@ -386,6 +386,10 @@ int Compilation::compile_java_method() { BAILOUT_("mdo allocation failed", no_frame_size); } +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif + { PhaseTraceTime timeit(_t_buildIR); build_hir(); diff --git a/src/hotspot/share/c1/c1_Compiler.cpp b/src/hotspot/share/c1/c1_Compiler.cpp index dd13b84edf51e..f2d2d403baac2 100644 --- a/src/hotspot/share/c1/c1_Compiler.cpp +++ b/src/hotspot/share/c1/c1_Compiler.cpp @@ -39,6 +39,7 @@ #include "memory/resourceArea.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/sharedRuntime.hpp" +#include "runtime/threadWXSetters.inline.hpp" #include "runtime/vm_version.hpp" #include "utilities/bitMap.inline.hpp" #include "utilities/macros.hpp" @@ -64,6 +65,10 @@ bool Compiler::init_c1_runtime() { void Compiler::initialize() { +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif + // Buffer blob must be allocated per C1 compiler thread at startup BufferBlob* buffer_blob = init_buffer_blob(); diff --git a/src/hotspot/share/c1/c1_Runtime1.cpp b/src/hotspot/share/c1/c1_Runtime1.cpp index 0f87a90a417a7..91413ac3deae9 100644 --- a/src/hotspot/share/c1/c1_Runtime1.cpp +++ b/src/hotspot/share/c1/c1_Runtime1.cpp @@ -1321,7 +1321,7 @@ void Runtime1::patch_code(JavaThread* current, C1StubId stub_id) { // Enable WXWrite: the function is called by c1 stub as a runtime function // (see another implementation above). - MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, current)); + WX_OLD_ONLY(ThreadWXEnable wx(WXWrite, current)); if (TracePatching) { tty->print_cr("Deoptimizing because patch is needed"); diff --git a/src/hotspot/share/ci/ciEnv.cpp b/src/hotspot/share/ci/ciEnv.cpp index 3991773d86fa3..b0cc277bc67ba 100644 --- a/src/hotspot/share/ci/ciEnv.cpp +++ b/src/hotspot/share/ci/ciEnv.cpp @@ -1059,6 +1059,10 @@ void ciEnv::register_method(ciMethod* target, assert(offsets->value(CodeOffsets::Deopt) != -1, "must have deopt entry"); assert(offsets->value(CodeOffsets::Exceptions) != -1, "must have exception entry"); +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(THREAD); +#endif + nm = nmethod::new_nmethod(method, compile_id(), entry_bci, diff --git a/src/hotspot/share/classfile/classLoader.cpp b/src/hotspot/share/classfile/classLoader.cpp index d7660647fb280..49e12e2f82051 100644 --- a/src/hotspot/share/classfile/classLoader.cpp +++ b/src/hotspot/share/classfile/classLoader.cpp @@ -70,6 +70,7 @@ #include "runtime/os.hpp" #include "runtime/perfData.hpp" #include "runtime/threadCritical.hpp" +#include "runtime/threadWXSetters.inline.hpp" #include "runtime/timer.hpp" #include "runtime/vm_version.hpp" #include "services/management.hpp" @@ -686,6 +687,9 @@ void ClassLoader::add_to_exploded_build_list(JavaThread* current, Symbol* module } jzfile* ClassLoader::open_zip_file(const char* canonical_path, char** error_msg, JavaThread* thread) { +#if INCLUDE_WX_NEW + auto _wx = WXExecMark(thread); +#endif // enable call to C land ThreadToNativeFromVM ttn(thread); HandleMark hm(thread); diff --git a/src/hotspot/share/classfile/classLoaderDataGraph.cpp b/src/hotspot/share/classfile/classLoaderDataGraph.cpp index fca6a9e74ad31..940b279160c5e 100644 --- a/src/hotspot/share/classfile/classLoaderDataGraph.cpp +++ b/src/hotspot/share/classfile/classLoaderDataGraph.cpp @@ -412,6 +412,11 @@ bool ClassLoaderDataGraph::do_unloading() { uint loaders_processed = 0; uint loaders_removed = 0; + // Anticipate nmethod::is_unloading() +#if INCLUDE_WX_NEW + auto _wx = WXLazyMark(Thread::current()); +#endif + for (ClassLoaderData* data = _head; data != nullptr; data = data->next()) { if (data->is_alive()) { prev = data; diff --git a/src/hotspot/share/code/codeBlob.cpp b/src/hotspot/share/code/codeBlob.cpp index 1512f498b37cb..7065413f7ce02 100644 --- a/src/hotspot/share/code/codeBlob.cpp +++ b/src/hotspot/share/code/codeBlob.cpp @@ -319,6 +319,9 @@ RuntimeBlob::RuntimeBlob( void RuntimeBlob::free(RuntimeBlob* blob) { assert(blob != nullptr, "caller must check for nullptr"); ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif blob->purge(); { MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); @@ -376,6 +379,9 @@ BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, int size) BufferBlob* BufferBlob::create(const char* name, uint buffer_size) { ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif BufferBlob* blob = nullptr; unsigned int size = sizeof(BufferBlob); @@ -401,6 +407,9 @@ BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int // Used by gtest BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) { ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif BufferBlob* blob = nullptr; unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferBlob)); @@ -467,8 +476,6 @@ VtableBlob::VtableBlob(const char* name, int size) : } VtableBlob* VtableBlob::create(const char* name, int buffer_size) { - assert(JavaThread::current()->thread_state() == _thread_in_vm, "called with the wrong state"); - VtableBlob* blob = nullptr; unsigned int size = sizeof(VtableBlob); // align the size to CodeEntryAlignment @@ -489,6 +496,9 @@ VtableBlob* VtableBlob::create(const char* name, int buffer_size) { // eventually. return nullptr; } +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif blob = new (size) VtableBlob(name, size); CodeCache_lock->unlock(); } diff --git a/src/hotspot/share/code/codeCache.cpp b/src/hotspot/share/code/codeCache.cpp index 902d434562229..c543f10c7fb8b 100644 --- a/src/hotspot/share/code/codeCache.cpp +++ b/src/hotspot/share/code/codeCache.cpp @@ -61,6 +61,7 @@ #include "runtime/mutexLocker.hpp" #include "runtime/os.inline.hpp" #include "runtime/safepointVerifiers.hpp" +#include "runtime/threadWXSetters.inline.hpp" #include "runtime/vmThread.hpp" #include "sanitizers/leak.hpp" #include "services/memoryService.hpp" @@ -506,6 +507,8 @@ CodeBlob* CodeCache::allocate(uint size, CodeBlobType code_blob_type, bool handl if (size == 0) { return nullptr; } + REQUIRE_THREAD_WX_MODE_WRITE + CodeBlob* cb = nullptr; // Get CodeHeap for the given CodeBlobType @@ -574,6 +577,9 @@ CodeBlob* CodeCache::allocate(uint size, CodeBlobType code_blob_type, bool handl } void CodeCache::free(CodeBlob* cb) { +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif assert_locked_or_safepoint(CodeCache_lock); CodeHeap* heap = get_code_heap(cb); print_trace("free", cb); @@ -603,6 +609,9 @@ void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) { // which provides the memory (see BufferBlob::create() in codeBlob.cpp). used += CodeBlob::align_code_offset(cb->header_size()); +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif // Get heap for given CodeBlob and deallocate its unused tail get_code_heap(cb)->deallocate_tail(cb, used); // Adjust the sizes of the CodeBlob @@ -1145,6 +1154,9 @@ void CodeCache::initialize() { } void codeCache_init() { +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif CodeCache::initialize(); } @@ -1224,6 +1236,9 @@ static void check_live_nmethods_dependencies(DepChange& changes) { void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) { MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); +#if INCLUDE_WX_NEW + auto _wx = WXLazyMark(Thread::current()); +#endif // search the hierarchy looking for nmethods which are affected by the loading of this class @@ -1302,6 +1317,9 @@ void CodeCache::mark_dependents_for_evol_deoptimization(DeoptimizationScope* deo reset_old_method_table(); NMethodIterator iter(NMethodIterator::all); +#if INCLUDE_WX_NEW + auto _wx = WXLazyMark(Thread::current()); +#endif while(iter.next()) { nmethod* nm = iter.method(); // Walk all alive nmethods to check for old Methods. @@ -1317,6 +1335,9 @@ void CodeCache::mark_dependents_for_evol_deoptimization(DeoptimizationScope* deo void CodeCache::mark_all_nmethods_for_evol_deoptimization(DeoptimizationScope* deopt_scope) { assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!"); NMethodIterator iter(NMethodIterator::all); +#if INCLUDE_WX_NEW + auto _wx = WXLazyMark(Thread::current()); +#endif while(iter.next()) { nmethod* nm = iter.method(); if (!nm->method()->is_method_handle_intrinsic()) { @@ -1336,6 +1357,9 @@ void CodeCache::mark_all_nmethods_for_evol_deoptimization(DeoptimizationScope* d void CodeCache::mark_all_nmethods_for_deoptimization(DeoptimizationScope* deopt_scope) { MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); NMethodIterator iter(NMethodIterator::not_unloading); +#if INCLUDE_WX_NEW + auto _wx = WXLazyMark(Thread::current()); +#endif while(iter.next()) { nmethod* nm = iter.method(); if (!nm->is_native_method()) { @@ -1346,6 +1370,9 @@ void CodeCache::mark_all_nmethods_for_deoptimization(DeoptimizationScope* deopt_ void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, Method* dependee) { MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); +#if INCLUDE_WX_NEW + auto _wx = WXLazyMark(Thread::current()); +#endif NMethodIterator iter(NMethodIterator::not_unloading); while(iter.next()) { @@ -1358,6 +1385,9 @@ void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, Method void CodeCache::make_marked_nmethods_deoptimized() { RelaxedNMethodIterator iter(RelaxedNMethodIterator::not_unloading); +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif while(iter.next()) { nmethod* nm = iter.method(); if (nm->is_marked_for_deoptimization() && !nm->has_been_deoptimized() && nm->can_be_deoptimized()) { diff --git a/src/hotspot/share/code/compiledIC.cpp b/src/hotspot/share/code/compiledIC.cpp index 2547b8711db16..31b77d920d26c 100644 --- a/src/hotspot/share/code/compiledIC.cpp +++ b/src/hotspot/share/code/compiledIC.cpp @@ -198,6 +198,9 @@ void CompiledIC::ensure_initialized(CallInfo* call_info, Klass* receiver_klass) void CompiledIC::set_to_clean() { log_debug(inlinecache)("IC@" INTPTR_FORMAT ": set to clean", p2i(_call->instruction_address())); +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif _call->set_destination_mt_safe(SharedRuntime::get_resolve_virtual_call_stub()); } @@ -219,6 +222,9 @@ void CompiledIC::set_to_monomorphic() { to_compiled ? "compiled" : "interpreter", method->print_value_string()); +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif _call->set_destination_mt_safe(entry); } @@ -259,6 +265,9 @@ void CompiledIC::set_to_megamorphic(CallInfo* call_info) { log_trace(inlinecache)("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT, p2i(_call->instruction_address()), call_info->selected_method()->print_value_string(), p2i(entry)); +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif _call->set_destination_mt_safe(entry); assert(is_megamorphic(), "sanity check"); } @@ -323,6 +332,7 @@ void CompiledIC::verify() { // ---------------------------------------------------------------------------- void CompiledDirectCall::set_to_clean() { + REQUIRE_THREAD_WX_MODE_WRITE // in_use is unused but needed to match template function in nmethod assert(CompiledICLocker::is_safe(instruction_address()), "mt unsafe call"); // Reset call site diff --git a/src/hotspot/share/code/dependencyContext.cpp b/src/hotspot/share/code/dependencyContext.cpp index a8ef707978d31..02f82ec1678e6 100644 --- a/src/hotspot/share/code/dependencyContext.cpp +++ b/src/hotspot/share/code/dependencyContext.cpp @@ -33,6 +33,7 @@ #include "runtime/mutexLocker.hpp" #include "runtime/orderAccess.hpp" #include "runtime/perfData.hpp" +#include "runtime/threadWXSetters.inline.hpp" #include "utilities/exceptions.hpp" PerfCounter* DependencyContext::_perf_total_buckets_allocated_count = nullptr; @@ -67,6 +68,9 @@ void DependencyContext::init() { // deoptimization. // void DependencyContext::mark_dependent_nmethods(DeoptimizationScope* deopt_scope, DepChange& changes) { +#if INCLUDE_WX_NEW + auto _wx = WXLazyMark(Thread::current()); +#endif for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) { nmethod* nm = b->get_nmethod(); if (nm->is_marked_for_deoptimization()) { diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp index ed6167dac225b..bcafe864add0a 100644 --- a/src/hotspot/share/code/nmethod.cpp +++ b/src/hotspot/share/code/nmethod.cpp @@ -491,6 +491,10 @@ const char* nmethod::state() const { void nmethod::set_deoptimized_done() { ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag); if (_deoptimization_status != deoptimize_done) { // can't go backwards +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); + REQUIRE_THREAD_WX_MODE_WRITE +#endif Atomic::store(&_deoptimization_status, deoptimize_done); } } @@ -503,6 +507,10 @@ void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) { assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock"); assert(new_entry != nullptr,"Must be non null"); assert(new_entry->next() == nullptr, "Must be null"); +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); + REQUIRE_THREAD_WX_MODE_WRITE +#endif for (;;) { ExceptionCache *ec = exception_cache(); @@ -550,6 +558,9 @@ void nmethod::clean_exception_cache() { // handshake operation. ExceptionCache* prev = nullptr; ExceptionCache* curr = exception_cache_acquire(); +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif while (curr != nullptr) { ExceptionCache* next = curr->next(); @@ -810,6 +821,9 @@ static void clean_if_nmethod_is_unloaded(CallsiteT* callsite, nmethod* from, } nmethod* nm = cb->as_nmethod(); if (clean_all || !nm->is_in_use() || nm->is_unloading() || nm->method()->code() != nm) { +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif callsite->set_to_clean(); } } @@ -1150,6 +1164,7 @@ nmethod* nmethod::new_nmethod(const methodHandle& method, #endif ) { + REQUIRE_THREAD_WX_MODE_WRITE assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR"); code_buffer->finalize_oop_references(method); // create nmethod @@ -1734,6 +1749,7 @@ void nmethod::print_nmethod(bool printmethod) { // Promote one word from an assembly-time handle to a live embedded oop. inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) { + REQUIRE_THREAD_WX_MODE_WRITE if (handle == nullptr || // As a special case, IC oops are initialized to 1 or -1. handle == (jobject) Universe::non_oop_word()) { @@ -1771,10 +1787,17 @@ void nmethod::copy_values(GrowableArray* array) { } void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) { +#if INCLUDE_WX_NEW + Thread* current = Thread::current(); + auto _wx = WXLazyMark(current); +#endif // re-patch all oop-bearing instructions, just in case some oops moved RelocIterator iter(this, begin, end); while (iter.next()) { if (iter.type() == relocInfo::oop_type) { +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(current); +#endif oop_Relocation* reloc = iter.oop_reloc(); if (initialize_immediates && reloc->oop_is_immediate()) { oop* dest = reloc->oop_addr(); @@ -1855,6 +1878,10 @@ void nmethod::make_deoptimized() { ResourceMark rm; RelocIterator iter(this, oops_reloc_begin()); +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif + while (iter.next()) { switch (iter.type()) { @@ -1931,6 +1958,10 @@ void nmethod::verify_clean_inline_caches() { } void nmethod::mark_as_maybe_on_stack() { +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); + REQUIRE_THREAD_WX_MODE_WRITE +#endif Atomic::store(&_gc_epoch, CodeCache::gc_epoch()); } @@ -1952,6 +1983,7 @@ void nmethod::inc_decompile_count() { } bool nmethod::try_transition(signed char new_state_int) { + REQUIRE_THREAD_WX_MODE_WRITE signed char new_state = new_state_int; assert_lock_strong(NMethodState_lock); signed char old_state = _state; @@ -2008,6 +2040,12 @@ bool nmethod::make_not_entrant(const char* reason) { // This can be called while the system is already at a safepoint which is ok NoSafepointVerifier nsv; + // If is_unloading() needs write mode, leave it on, otherwise + // leave it off until needed. +#if INCLUDE_WX_NEW + auto _wx = WXLazyMark(Thread::current()); +#endif + if (is_unloading()) { // If the nmethod is unloading, then it is already not entrant through // the nmethod entry barriers. No need to do anything; GC will unload it. @@ -2032,6 +2070,10 @@ bool nmethod::make_not_entrant(const char* reason) { return false; } +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif + if (is_osr_method()) { // This logic is equivalent to the logic below for patching the // verified entry point of regular methods. @@ -2138,6 +2180,9 @@ void nmethod::unlink() { void nmethod::purge(bool unregister_nmethod) { MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag); +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif // completely deallocate this method Events::log_nmethod_flush(Thread::current(), "flushing %s nmethod " INTPTR_FORMAT, is_osr_method() ? "osr" : "", p2i(this)); @@ -2146,6 +2191,8 @@ void nmethod::purge(bool unregister_nmethod) { is_osr_method() ? "osr" : "",_compile_id, p2i(this), CodeCache::blob_count(), CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024); + REQUIRE_THREAD_WX_MODE_WRITE + // We need to deallocate any ExceptionCache data. // Note that we do not need to grab the nmethod lock for this, it // better be thread safe if we're disposing of it! @@ -2253,6 +2300,9 @@ void nmethod::post_compiled_method_load_event(JvmtiThreadState* state) { if (JvmtiExport::should_post_compiled_method_load()) { +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif // Only post unload events if load events are found. set_load_reported(); // If a JavaThread hasn't been passed in, let the Service thread @@ -2415,11 +2465,16 @@ bool nmethod::is_unloading() { state_is_unloading = IsUnloadingBehaviour::is_unloading(this); uint8_t new_state = IsUnloadingState::create(state_is_unloading, state_unloading_cycle); +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif + // Note that if an nmethod has dead oops, everyone will agree that the // nmethod is_unloading. However, the is_cold heuristics can yield // different outcomes, so we guard the computed result with a CAS // to ensure all threads have a shared view of whether an nmethod // is_unloading or not. + REQUIRE_THREAD_WX_MODE_WRITE uint8_t found_state = Atomic::cmpxchg(&_is_unloading_state, state, new_state, memory_order_relaxed); if (found_state == state) { @@ -2482,6 +2537,8 @@ void nmethod::oops_do(OopClosure* f, bool allow_dead) { } void nmethod::follow_nmethod(OopIterateClosure* cl) { + REQUIRE_THREAD_WX_MODE_WRITE + // Process oops in the nmethod oops_do(cl); @@ -2517,10 +2574,15 @@ bool nmethod::oops_do_try_claim() { bool nmethod::oops_do_try_claim_weak_request() { assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint"); - if ((_oops_do_mark_link == nullptr) && - (Atomic::replace_if_null(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag)))) { - oops_do_log_change("oops_do, mark weak request"); - return true; + if (_oops_do_mark_link == nullptr) { +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); + REQUIRE_THREAD_WX_MODE_WRITE +#endif + if (Atomic::replace_if_null(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag))) { + oops_do_log_change("oops_do, mark weak request"); + return true; + } } return false; } @@ -2666,6 +2728,10 @@ void nmethod::oops_do_marking_epilogue() { nmethod* next = _oops_do_mark_nmethods; _oops_do_mark_nmethods = nullptr; if (next != nullptr) { +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); + REQUIRE_THREAD_WX_MODE_WRITE +#endif nmethod* cur; do { cur = next; diff --git a/src/hotspot/share/code/nmethod.hpp b/src/hotspot/share/code/nmethod.hpp index 2ce6e5cd3618d..a067d460a09a1 100644 --- a/src/hotspot/share/code/nmethod.hpp +++ b/src/hotspot/share/code/nmethod.hpp @@ -656,10 +656,12 @@ class nmethod : public CodeBlob { void print_dependencies_on(outputStream* out) PRODUCT_RETURN; void flush_dependencies(); +#define WX_W REQUIRE_THREAD_WX_MODE_WRITE + template T* gc_data() const { return reinterpret_cast(_gc_data); } template - void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast(gc_data); } + void set_gc_data(T* gc_data) { WX_W _gc_data = reinterpret_cast(gc_data); } bool has_unsafe_access() const { return _has_unsafe_access; } void set_has_unsafe_access(bool z) { _has_unsafe_access = z; } @@ -800,7 +802,7 @@ class nmethod : public CodeBlob { int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; } address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; } nmethod* osr_link() const { return _osr_link; } - void set_osr_link(nmethod *n) { _osr_link = n; } + void set_osr_link(nmethod *n) { REQUIRE_THREAD_WX_MODE_WRITE _osr_link = n; } void invalidate_osr_method(); int num_stack_arg_slots(bool rounded = true) const { @@ -877,7 +879,7 @@ class nmethod : public CodeBlob { // used by jvmti to track if the load events has been reported bool load_reported() const { return _load_reported; } - void set_load_reported() { _load_reported = true; } + void set_load_reported() { REQUIRE_THREAD_WX_MODE_WRITE _load_reported = true; } public: // ScopeDesc retrieval operation diff --git a/src/hotspot/share/code/nmethod.inline.hpp b/src/hotspot/share/code/nmethod.inline.hpp index 1e556b6825099..34c17675f3b61 100644 --- a/src/hotspot/share/code/nmethod.inline.hpp +++ b/src/hotspot/share/code/nmethod.inline.hpp @@ -30,6 +30,7 @@ #include "code/nativeInst.hpp" #include "runtime/atomic.hpp" #include "runtime/frame.hpp" +#include "runtime/threadWXSetters.inline.hpp" inline bool nmethod::is_deopt_pc(address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); } @@ -58,5 +59,4 @@ address ExceptionCache::handler_at(int index) { // increment_count is only called under lock, but there may be concurrent readers. inline void ExceptionCache::increment_count() { Atomic::release_store(&_count, _count + 1); } - #endif // SHARE_CODE_NMETHOD_INLINE_HPP diff --git a/src/hotspot/share/code/relocInfo.cpp b/src/hotspot/share/code/relocInfo.cpp index 81fa83dbe8d28..f709d12027e5f 100644 --- a/src/hotspot/share/code/relocInfo.cpp +++ b/src/hotspot/share/code/relocInfo.cpp @@ -589,6 +589,7 @@ oop oop_Relocation::oop_value() { void oop_Relocation::fix_oop_relocation() { + REQUIRE_THREAD_WX_MODE_WRITE if (!oop_is_immediate()) { // get the oop from the pool, and re-insert it into the instruction: set_value(value()); diff --git a/src/hotspot/share/code/vtableStubs.cpp b/src/hotspot/share/code/vtableStubs.cpp index 00826f820366d..8ce4d9f037a28 100644 --- a/src/hotspot/share/code/vtableStubs.cpp +++ b/src/hotspot/share/code/vtableStubs.cpp @@ -212,6 +212,9 @@ address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index) { MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag); s = lookup(is_vtable_stub, vtable_index); if (s == nullptr) { +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif if (is_vtable_stub) { s = create_vtable_stub(vtable_index); } else { diff --git a/src/hotspot/share/code/vtableStubs.hpp b/src/hotspot/share/code/vtableStubs.hpp index 06acd8f25b921..8dd09ff31fd4f 100644 --- a/src/hotspot/share/code/vtableStubs.hpp +++ b/src/hotspot/share/code/vtableStubs.hpp @@ -28,6 +28,7 @@ #include "asm/macroAssembler.hpp" #include "code/vmreg.hpp" #include "memory/allStatic.hpp" +#include "runtime/threadWXSetters.inline.hpp" #include "utilities/checkedCast.hpp" // A VtableStub holds an individual code stub for a pair (vtable index, #args) for either itables or vtables @@ -140,7 +141,7 @@ class VtableStub { VtableStub(bool is_vtable_stub, short index) : _next(nullptr), _index(index), _ame_offset(-1), _npe_offset(-1), - _type(is_vtable_stub ? Type::vtable_stub : Type::itable_stub) {} + _type(is_vtable_stub ? Type::vtable_stub : Type::itable_stub) { REQUIRE_THREAD_WX_MODE_WRITE } VtableStub* next() const { return _next; } int index() const { return _index; } static VMReg receiver_location() { return _receiver_location; } diff --git a/src/hotspot/share/compiler/compileBroker.cpp b/src/hotspot/share/compiler/compileBroker.cpp index 168679feb9ba1..100dc477d145a 100644 --- a/src/hotspot/share/compiler/compileBroker.cpp +++ b/src/hotspot/share/compiler/compileBroker.cpp @@ -1899,6 +1899,12 @@ void CompileBroker::compiler_thread_loop() { log->end_elem(); } + // If we switch to WXWrite (C1/C2), stay there, otherwise + // remain in WXExec (JVMCI). +#if INCLUDE_WX_NEW + auto _wx = WXLazyMark(thread); +#endif + // If compiler thread/runtime initialization fails, exit the compiler thread if (!init_compiler_runtime()) { return; diff --git a/src/hotspot/share/gc/shared/barrierSetNMethod.cpp b/src/hotspot/share/gc/shared/barrierSetNMethod.cpp index 41eb0a24b625a..8ad8274475afa 100644 --- a/src/hotspot/share/gc/shared/barrierSetNMethod.cpp +++ b/src/hotspot/share/gc/shared/barrierSetNMethod.cpp @@ -105,6 +105,10 @@ bool BarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) { return true; } +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif + // If the nmethod is the only thing pointing to the oops, and we are using a // SATB GC, then it is important that this code marks them live. // Also, with concurrent GC, it is possible that frames in continuation stack @@ -167,7 +171,7 @@ void BarrierSetNMethod::arm_all_nmethods() { int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) { // Enable WXWrite: the function is called directly from nmethod_entry_barrier // stub. - MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current())); + WX_OLD_ONLY(ThreadWXEnable wx(WXWrite, Thread::current())); address return_address = *return_address_ptr; AARCH64_PORT_ONLY(return_address = pauth_strip_pointer(return_address)); diff --git a/src/hotspot/share/gc/shared/concurrentGCThread.cpp b/src/hotspot/share/gc/shared/concurrentGCThread.cpp index 7d0cecde528bf..6bec191ee1239 100644 --- a/src/hotspot/share/gc/shared/concurrentGCThread.cpp +++ b/src/hotspot/share/gc/shared/concurrentGCThread.cpp @@ -28,6 +28,7 @@ #include "runtime/jniHandles.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/os.hpp" +#include "runtime/threadWXSetters.inline.hpp" ConcurrentGCThread::ConcurrentGCThread() : _should_terminate(false), @@ -44,7 +45,17 @@ void ConcurrentGCThread::run() { // Wait for initialization to complete wait_init_completed(); - run_service(); + { +#if INCLUDE_WX_NEW +#if 0 + auto _wx = WXWriteMark(this); +#endif +#if 1 + auto _wx = WXLazyMark(this); +#endif +#endif + run_service(); + } // Signal thread has terminated MonitorLocker ml(Terminator_lock); diff --git a/src/hotspot/share/gc/shared/scavengableNMethods.cpp b/src/hotspot/share/gc/shared/scavengableNMethods.cpp index 887ac5f43a28a..c03b7570c1633 100644 --- a/src/hotspot/share/gc/shared/scavengableNMethods.cpp +++ b/src/hotspot/share/gc/shared/scavengableNMethods.cpp @@ -27,6 +27,7 @@ #include "gc/shared/scavengableNMethods.hpp" #include "gc/shared/scavengableNMethodsData.hpp" #include "runtime/mutexLocker.hpp" +#include "runtime/threadWXSetters.inline.hpp" #include "utilities/debug.hpp" static ScavengableNMethodsData gc_data(nmethod* nm) { @@ -131,6 +132,16 @@ bool ScavengableNMethods::has_scavengable_oops(nmethod* nm) { void ScavengableNMethods::nmethods_do_and_prune(NMethodToOopClosure* cl) { assert_locked_or_safepoint(CodeCache_lock); +#if INCLUDE_WX_NEW + Thread* current = Thread::current(); + auto _wx = WXLazyMark(current); + // FIXME - need something like MaybeWrite mode, changes to write mode, but doesn't + // assert if no writes happen + // or conditional Write scope? + bool cond = _head != nullptr && (DEBUG_ONLY(true /* clear_marked() */ ||) cl != nullptr); + auto _wx_w = WXConditionalWriteMark(current, cond); +#endif + DEBUG_ONLY(mark_on_list_nmethods()); nmethod* prev = nullptr; @@ -166,13 +177,18 @@ void ScavengableNMethods::prune_nmethods_not_into_young() { void ScavengableNMethods::prune_unlinked_nmethods() { assert_locked_or_safepoint(CodeCache_lock); +#if INCLUDE_WX_NEW + Thread* current = Thread::current(); + auto _wx_w = WXSpeculativeWriteMark(current, (_head != nullptr)); +#endif + DEBUG_ONLY(mark_on_list_nmethods()); nmethod* prev = nullptr; nmethod* cur = _head; while (cur != nullptr) { ScavengableNMethodsData data = gc_data(cur); - DEBUG_ONLY(data.clear_marked()); + DEBUG_ONLY(data.clear_marked()); // W^X=W assert(data.on_list(), "else shouldn't be on this list"); nmethod* const next = data.next(); @@ -215,12 +231,24 @@ void ScavengableNMethods::unlist_nmethod(nmethod* nm, nmethod* prev) { #ifndef PRODUCT // Temporarily mark nmethods that are claimed to be on the scavenge list. void ScavengableNMethods::mark_on_list_nmethods() { +#if INCLUDE_WX_NEW + Thread* current = Thread::current(); +#if 0 + auto _wx = WXLazyMark(current); +#endif + auto _wx_w = WXWriteMark(current); REQUIRE_THREAD_WX_MODE_WRITE // FIXME +#endif NMethodIterator iter(NMethodIterator::all); while(iter.next()) { nmethod* nm = iter.method(); ScavengableNMethodsData data = gc_data(nm); assert(data.not_marked(), "clean state"); if (data.on_list()) { +#if 0 +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(current); +#endif +#endif data.set_marked(); } } diff --git a/src/hotspot/share/gc/shared/workerThread.cpp b/src/hotspot/share/gc/shared/workerThread.cpp index ffeb3a4194b7b..2117147b175dd 100644 --- a/src/hotspot/share/gc/shared/workerThread.cpp +++ b/src/hotspot/share/gc/shared/workerThread.cpp @@ -31,6 +31,7 @@ #include "runtime/java.hpp" #include "runtime/os.hpp" #include "runtime/safepoint.hpp" +#include "runtime/threadWXSetters.inline.hpp" WorkerTaskDispatcher::WorkerTaskDispatcher() : _task(nullptr), @@ -196,6 +197,10 @@ WorkerThread::WorkerThread(const char* name_prefix, uint name_suffix, WorkerTask void WorkerThread::run() { os::set_priority(this, NearMaxPriority); +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(this); +#endif + while (true) { _dispatcher->worker_run_task(); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSetNMethod.cpp b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSetNMethod.cpp index c6e6108fda81c..755c9772bb222 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSetNMethod.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSetNMethod.cpp @@ -51,7 +51,7 @@ bool ShenandoahBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) { return true; } - MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current());) + WX_OLD_ONLY(ThreadWXEnable wx(WXWrite, Thread::current());) if (nm->is_unloading()) { // We don't need to take the lock when unlinking nmethods from diff --git a/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp b/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp index 0d6be2b789f77..fbb2e0cc58990 100644 --- a/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp +++ b/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp @@ -55,7 +55,7 @@ bool ZBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) { return true; } - MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current())); + WX_OLD_ONLY(ThreadWXEnable wx(WXWrite, Thread::current())); if (nm->is_unloading()) { log_develop_trace(gc, nmethod)("nmethod: " PTR_FORMAT " visited by entry (unloading)", p2i(nm)); diff --git a/src/hotspot/share/gc/z/zNMethod.cpp b/src/hotspot/share/gc/z/zNMethod.cpp index bf592c20fa296..87940dac1ab06 100644 --- a/src/hotspot/share/gc/z/zNMethod.cpp +++ b/src/hotspot/share/gc/z/zNMethod.cpp @@ -257,7 +257,7 @@ void ZNMethod::nmethod_oops_do(nmethod* nm, OopClosure* cl) { ZNMethod::nmethod_oops_do_inner(nm, cl); } -void ZNMethod::nmethod_oops_do_inner(nmethod* nm, OopClosure* cl) { +void ZNMethod::nmethod_oops_do_inner(nmethod* nm, OopClosure* cl, bool fix_relocations) { // Process oops table { oop* const begin = nm->oops_begin(); @@ -282,7 +282,7 @@ void ZNMethod::nmethod_oops_do_inner(nmethod* nm, OopClosure* cl) { } // Process non-immediate oops - if (data->has_non_immediate_oops()) { + if (fix_relocations && data->has_non_immediate_oops()) { nm->fix_oop_relocations(); } } diff --git a/src/hotspot/share/gc/z/zNMethod.hpp b/src/hotspot/share/gc/z/zNMethod.hpp index 5d797f3fd5542..2821afa02e04b 100644 --- a/src/hotspot/share/gc/z/zNMethod.hpp +++ b/src/hotspot/share/gc/z/zNMethod.hpp @@ -58,7 +58,7 @@ class ZNMethod : public AllStatic { static void nmethod_patch_barriers(nmethod* nm); static void nmethod_oops_do(nmethod* nm, OopClosure* cl); - static void nmethod_oops_do_inner(nmethod* nm, OopClosure* cl); + static void nmethod_oops_do_inner(nmethod* nm, OopClosure* cl, bool fix_relocations = true); static void nmethods_do_begin(bool secondary); static void nmethods_do_end(bool secondary); diff --git a/src/hotspot/share/gc/z/zUnload.cpp b/src/hotspot/share/gc/z/zUnload.cpp index 3dc7ecd3edc41..ebbb041efbb80 100644 --- a/src/hotspot/share/gc/z/zUnload.cpp +++ b/src/hotspot/share/gc/z/zUnload.cpp @@ -81,7 +81,7 @@ class ZIsUnloadingBehaviour : public IsUnloadingBehaviour { return false; } ZIsUnloadingOopClosure cl(nm); - ZNMethod::nmethod_oops_do_inner(nm, &cl); + ZNMethod::nmethod_oops_do_inner(nm, &cl, false /* fix_relocations */); return cl.is_unloading(); } }; diff --git a/src/hotspot/share/interpreter/interpreterRuntime.cpp b/src/hotspot/share/interpreter/interpreterRuntime.cpp index 0f16b17e99e00..47b1a9cc8011f 100644 --- a/src/hotspot/share/interpreter/interpreterRuntime.cpp +++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp @@ -72,6 +72,7 @@ #include "runtime/stubRoutines.hpp" #include "runtime/synchronizer.inline.hpp" #include "runtime/threadCritical.hpp" +#include "runtime/threadWXSetters.inline.hpp" #include "utilities/align.hpp" #include "utilities/checkedCast.hpp" #include "utilities/copy.hpp" @@ -1017,7 +1018,7 @@ JRT_END nmethod* InterpreterRuntime::frequency_counter_overflow(JavaThread* current, address branch_bcp) { // Enable WXWrite: the function is called directly by interpreter. - MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, current)); + WX_OLD_ONLY(ThreadWXEnable wx(WXWrite, current)); // frequency_counter_overflow_inner can throw async exception. nmethod* nm = frequency_counter_overflow_inner(current, branch_bcp); @@ -1339,7 +1340,11 @@ void SignatureHandlerLibrary::add(const methodHandle& method) { if (UseFastSignatureHandlers && method->size_of_parameters() <= Fingerprinter::fp_max_size_of_parameters) { // use customized signature handler MutexLocker mu(SignatureHandlerLibrary_lock); +#if INCLUDE_WX_NEW + auto _wx = WXLazyMark(Thread::current()); +#endif // make sure data structure is initialized + // may require WXWrite initialize(); // lookup method signature's fingerprint uint64_t fingerprint = Fingerprinter(method).fingerprint(); @@ -1350,6 +1355,9 @@ void SignatureHandlerLibrary::add(const methodHandle& method) { if (handler_index < 0) { ResourceMark rm; ptrdiff_t align_offset = align_up(_buffer, CodeEntryAlignment) - (address)_buffer; +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif CodeBuffer buffer((address)(_buffer + align_offset), checked_cast(SignatureHandlerLibrary::buffer_size - align_offset)); InterpreterRuntime::SignatureHandlerGenerator(method, &buffer).generate(fingerprint); diff --git a/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp b/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp index 1d3be066941a8..9913518006691 100644 --- a/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp +++ b/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp @@ -53,6 +53,9 @@ static const BasicType types[Interpreter::number_of_result_handlers] = { }; void TemplateInterpreterGenerator::generate_all() { +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif { CodeletMark cm(_masm, "slow signature handler"); AbstractInterpreter::_slow_signature_handler = generate_slow_signature_handler(); } diff --git a/src/hotspot/share/jfr/instrumentation/jfrJvmtiAgent.cpp b/src/hotspot/share/jfr/instrumentation/jfrJvmtiAgent.cpp index 31dd55f702d2d..43ece5ab2045c 100644 --- a/src/hotspot/share/jfr/instrumentation/jfrJvmtiAgent.cpp +++ b/src/hotspot/share/jfr/instrumentation/jfrJvmtiAgent.cpp @@ -156,7 +156,7 @@ void JfrJvmtiAgent::retransform_classes(JNIEnv* env, jobjectArray classes_array, } ResourceMark rm(THREAD); // WXWrite is needed before entering the vm below and in callee methods. - MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, THREAD)); + WX_OLD_ONLY(ThreadWXEnable __wx(WXWrite, THREAD)); jclass* const classes = create_classes_array(classes_count, CHECK); assert(classes != nullptr, "invariant"); for (jint i = 0; i < classes_count; i++) { diff --git a/src/hotspot/share/jfr/jni/jfrJniMethod.cpp b/src/hotspot/share/jfr/jni/jfrJniMethod.cpp index 18a258cad75dd..4e0bf8127f835 100644 --- a/src/hotspot/share/jfr/jni/jfrJniMethod.cpp +++ b/src/hotspot/share/jfr/jni/jfrJniMethod.cpp @@ -105,7 +105,7 @@ NO_TRANSITION(void, jfr_set_enabled(JNIEnv* env, jclass jvm, jlong event_type_id JfrEventSetting::set_enabled(event_type_id, JNI_TRUE == enabled); if (EventOldObjectSample::eventId == event_type_id) { JavaThread* thread = JavaThread::thread_from_jni_environment(env); - MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread)); + WX_OLD_ONLY(ThreadWXEnable __wx(WXWrite, thread)); ThreadInVMfromNative transition(thread); if (JNI_TRUE == enabled) { LeakProfiler::start(JfrOptionSet::old_object_queue_size()); diff --git a/src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp b/src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp index 07fe019c9af74..5f024521ca2d7 100644 --- a/src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp +++ b/src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp @@ -702,7 +702,7 @@ void JfrRecorderService::emit_leakprofiler_events(int64_t cutoff_ticks, bool emi JfrRotationLock lock; // Take the rotation lock before the transition. JavaThread* current_thread = JavaThread::current(); - MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, current_thread)); + WX_OLD_ONLY(ThreadWXEnable __wx(WXWrite, current_thread)); ThreadInVMfromNative transition(current_thread); LeakProfiler::emit_events(cutoff_ticks, emit_all, skip_bfs); } diff --git a/src/hotspot/share/jfr/recorder/storage/jfrStorage.cpp b/src/hotspot/share/jfr/recorder/storage/jfrStorage.cpp index ca54b297b38d9..580dbe931906d 100644 --- a/src/hotspot/share/jfr/recorder/storage/jfrStorage.cpp +++ b/src/hotspot/share/jfr/recorder/storage/jfrStorage.cpp @@ -290,7 +290,7 @@ void JfrStorage::register_full(BufferPtr buffer, Thread* thread) { JavaThread* jt = JavaThread::cast(thread); if (jt->thread_state() == _thread_in_native) { // Transition java thread to vm so it can issue a notify. - MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, jt)); + WX_OLD_ONLY(ThreadWXEnable __wx(WXWrite, jt)); ThreadInVMfromNative transition(jt); _post_box.post(MSG_FULLBUFFER); return; diff --git a/src/hotspot/share/jfr/support/jfrIntrinsics.cpp b/src/hotspot/share/jfr/support/jfrIntrinsics.cpp index 54208b4aebfd7..6c4b076e07898 100644 --- a/src/hotspot/share/jfr/support/jfrIntrinsics.cpp +++ b/src/hotspot/share/jfr/support/jfrIntrinsics.cpp @@ -44,7 +44,7 @@ void* JfrIntrinsicSupport::write_checkpoint(JavaThread* jt) { assert(JfrThreadLocal::is_vthread(jt), "invariant"); const traceid vthread_tid = JfrThreadLocal::vthread_id(jt); // Transition before reading the epoch generation, now as _thread_in_vm. - MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, jt)); + WX_OLD_ONLY(ThreadWXEnable __wx(WXWrite, jt)); ThreadInVMfromJava transition(jt); JfrThreadLocal::set_vthread_epoch(jt, vthread_tid, ThreadIdAccess::current_epoch()); return JfrJavaEventWriter::event_writer(jt); @@ -52,7 +52,7 @@ void* JfrIntrinsicSupport::write_checkpoint(JavaThread* jt) { void* JfrIntrinsicSupport::return_lease(JavaThread* jt) { DEBUG_ONLY(assert_precondition(jt);) - MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, jt)); + WX_OLD_ONLY(ThreadWXEnable __wx(WXWrite, jt)); ThreadInVMfromJava transition(jt); assert(jt->jfr_thread_local()->has_java_event_writer(), "invariant"); assert(jt->jfr_thread_local()->shelved_buffer() != nullptr, "invariant"); diff --git a/src/hotspot/share/jfr/support/jfrNativeLibraryLoadEvent.cpp b/src/hotspot/share/jfr/support/jfrNativeLibraryLoadEvent.cpp index 2958c1130747d..25af581a347b3 100644 --- a/src/hotspot/share/jfr/support/jfrNativeLibraryLoadEvent.cpp +++ b/src/hotspot/share/jfr/support/jfrNativeLibraryLoadEvent.cpp @@ -116,7 +116,7 @@ static void commit(const HelperType& helper) { JavaThread* jt = JavaThread::cast(thread); if (jt->thread_state() == _thread_in_native) { // For a JavaThread to take a JFR stacktrace, it must be in _thread_in_vm. Can safepoint here. - MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, jt)); + WX_OLD_ONLY(ThreadWXEnable __wx(WXWrite, jt)); ThreadInVMfromNative transition(jt); event.commit(); return; diff --git a/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp b/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp index 3a9fbc54bf941..df0574024f2f4 100644 --- a/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp +++ b/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp @@ -40,6 +40,7 @@ #include "runtime/jniHandles.inline.hpp" #include "runtime/os.hpp" #include "runtime/sharedRuntime.hpp" +#include "runtime/threadWXSetters.inline.hpp" #include "utilities/align.hpp" // frequently used constants @@ -718,6 +719,10 @@ JVMCI::CodeInstallResult CodeInstaller::install(JVMCICompiler* compiler, int speculations_len, JVMCI_TRAPS) { +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(JavaThread::current()); +#endif + JavaThread* thread = JavaThread::current(); HotSpotCompiledCodeStream* stream = new HotSpotCompiledCodeStream(thread, (const u1*) compiled_code_buffer, with_type_info, object_pool); diff --git a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp index a4a8f3bb1d037..35323dde55342 100644 --- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp +++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp @@ -169,7 +169,7 @@ Handle JavaArgumentUnboxer::next_arg(BasicType expectedType) { // Bring the JVMCI compiler thread into the VM state. #define JVMCI_VM_ENTRY_MARK \ - MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread)); \ + WX_OLD_ONLY(ThreadWXEnable __wx(WXWrite, thread)); \ ThreadInVMfromNative __tiv(thread); \ HandleMarkCleaner __hm(thread); \ JavaThread* THREAD = thread; \ diff --git a/src/hotspot/share/jvmci/jvmciEnv.cpp b/src/hotspot/share/jvmci/jvmciEnv.cpp index 5a0efef0bd32b..1c5472dc9320f 100644 --- a/src/hotspot/share/jvmci/jvmciEnv.cpp +++ b/src/hotspot/share/jvmci/jvmciEnv.cpp @@ -939,6 +939,9 @@ void JVMCIEnv::fthrow_error(const char* file, int line, const char* format, ...) char msg[max_msg_size]; os::vsnprintf(msg, max_msg_size, format, ap); va_end(ap); +#if INCLUDE_WX_NEW + auto _wx = WXExecMark(JavaThread::current()); +#endif JavaThread* THREAD = JavaThread::current(); if (is_hotspot()) { Handle h_loader; @@ -1781,7 +1784,11 @@ void JVMCIEnv::invalidate_nmethod_mirror(JVMCIObject mirror, bool deoptimize, JV // Do not clear the address field here as the Java code may still // want to later call this method with deoptimize == true. That requires // the address field to still be pointing at the nmethod. - } else { + } else { +#if INCLUDE_WX_NEW + auto _wx = WXLazyMark(thread); +#endif + // Deoptimize the nmethod immediately. DeoptimizationScope deopt_scope; deopt_scope.mark(nm); diff --git a/src/hotspot/share/jvmci/jvmciRuntime.cpp b/src/hotspot/share/jvmci/jvmciRuntime.cpp index 6f1fa52576e41..0b8eaebbc989a 100644 --- a/src/hotspot/share/jvmci/jvmciRuntime.cpp +++ b/src/hotspot/share/jvmci/jvmciRuntime.cpp @@ -1968,6 +1968,12 @@ static bool after_compiler_upcall(JVMCIEnv* JVMCIENV, JVMCICompiler* compiler, c } void JVMCIRuntime::compile_method(JVMCIEnv* JVMCIENV, JVMCICompiler* compiler, const methodHandle& method, int entry_bci) { + // Compiler thread loop runs as WXLazyWrite. If we need to switch to WXExec here, + // then it means we are mixing C1/C2 tasks with JVMCI tasks in the same thread. +#if INCLUDE_WX_NEW + auto _wx = WXExecMark(JavaThread::current()); +#endif + JVMCI_EXCEPTION_CONTEXT JVMCICompileState* compile_state = JVMCIENV->compile_state(); diff --git a/src/hotspot/share/logging/logTag.hpp b/src/hotspot/share/logging/logTag.hpp index d61e2461e8da6..10e755640209f 100644 --- a/src/hotspot/share/logging/logTag.hpp +++ b/src/hotspot/share/logging/logTag.hpp @@ -218,7 +218,8 @@ class outputStream; LOG_TAG(vmoperation) \ LOG_TAG(vmthread) \ LOG_TAG(vtables) \ - LOG_TAG(vtablestubs) + LOG_TAG(vtablestubs) \ + NOT_PRODUCT(WX_ONLY(LOG_TAG(wx))) \ #define PREFIX_LOG_TAG(T) (LogTag::_##T) diff --git a/src/hotspot/share/memory/heap.cpp b/src/hotspot/share/memory/heap.cpp index bcb9d2e6114b9..f95511f46fe64 100644 --- a/src/hotspot/share/memory/heap.cpp +++ b/src/hotspot/share/memory/heap.cpp @@ -162,6 +162,7 @@ void CodeHeap::mark_segmap_as_used(size_t beg, size_t end, bool is_FreeBlock_joi } void CodeHeap::invalidate(size_t beg, size_t end, size_t hdr_size) { + REQUIRE_THREAD_WX_MODE_WRITE #ifndef PRODUCT // Fill the given range with some bad value. // length is expected to be in segment_size units. @@ -269,6 +270,7 @@ bool CodeHeap::expand_by(size_t size) { void* CodeHeap::allocate(size_t instance_size) { + REQUIRE_THREAD_WX_MODE_WRITE size_t number_of_segments = size_to_segments(instance_size + header_size()); assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList"); assert_locked_or_safepoint(CodeCache_lock); diff --git a/src/hotspot/share/oops/instanceKlass.cpp b/src/hotspot/share/oops/instanceKlass.cpp index 00ef75d8486c5..630475b09103b 100644 --- a/src/hotspot/share/oops/instanceKlass.cpp +++ b/src/hotspot/share/oops/instanceKlass.cpp @@ -1077,6 +1077,9 @@ void InstanceKlass::rewrite_class(TRAPS) { void InstanceKlass::link_methods(TRAPS) { PerfTraceTime timer(ClassLoader::perf_ik_link_methods_time()); +#if INCLUDE_WX_NEW + auto _wx = WXLazyMark(Thread::current()); // link_method --> make_adapters +#endif int len = methods()->length(); for (int i = len-1; i >= 0; i--) { methodHandle m(THREAD, methods()->at(i)); @@ -3479,6 +3482,9 @@ void InstanceKlass::adjust_default_methods(bool* trace_name_printed) { // On-stack replacement stuff void InstanceKlass::add_osr_nmethod(nmethod* n) { assert_lock_strong(NMethodState_lock); +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif #ifndef PRODUCT nmethod* prev = lookup_osr_nmethod(n->method(), n->osr_entry_bci(), n->comp_level(), true); assert(prev == nullptr || !prev->is_in_use() COMPILER2_PRESENT(|| StressRecompilation), @@ -3504,6 +3510,9 @@ void InstanceKlass::add_osr_nmethod(nmethod* n) { bool InstanceKlass::remove_osr_nmethod(nmethod* n) { // This is a short non-blocking critical region, so the no safepoint check is ok. ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag); +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif assert(n->is_osr_method(), "wrong kind of nmethod"); nmethod* last = nullptr; nmethod* cur = osr_nmethods_head(); @@ -3547,6 +3556,9 @@ int InstanceKlass::mark_osr_nmethods(DeoptimizationScope* deopt_scope, const Met ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag); nmethod* osr = osr_nmethods_head(); int found = 0; +#if INCLUDE_WX_NEW + auto _wx = WXLazyMark(JavaThread::current()); +#endif while (osr != nullptr) { assert(osr->is_osr_method(), "wrong kind of nmethod found in chain"); if (osr->method() == m) { diff --git a/src/hotspot/share/opto/c2compiler.cpp b/src/hotspot/share/opto/c2compiler.cpp index 272692446ae61..d3649fb45884a 100644 --- a/src/hotspot/share/opto/c2compiler.cpp +++ b/src/hotspot/share/opto/c2compiler.cpp @@ -34,6 +34,7 @@ #include "opto/runtime.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/globals_extension.hpp" +#include "runtime/threadWXSetters.inline.hpp" #include "utilities/macros.hpp" @@ -63,6 +64,9 @@ const char* C2Compiler::retry_no_superword() { void compiler_stubs_init(bool in_compiler_thread); bool C2Compiler::init_c2_runtime() { +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(JavaThread::current()); +#endif #ifdef ASSERT if (!AlignVector && VerifyAlignVector) { @@ -127,6 +131,10 @@ void C2Compiler::compile_method(ciEnv* env, ciMethod* target, int entry_bci, boo bool do_locks_coarsening = EliminateLocks; bool do_superword = UseSuperWord; +#if INCLUDE_WX_NEW + auto _wx = WXLazyMark(JavaThread::current()); +#endif + while (!env->failing()) { ResourceMark rm; // Attempt to compile while subsuming loads into machine instructions. @@ -138,6 +146,11 @@ void C2Compiler::compile_method(ciEnv* env, ciMethod* target, int entry_bci, boo do_locks_coarsening, do_superword, install_code); +#if 0 +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(JavaThread::current()); +#endif +#endif Compile C(env, target, entry_bci, options, directive); // Check result and retry if appropriate. diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp index 10846a326262a..6fbe7d45b4eba 100644 --- a/src/hotspot/share/opto/compile.cpp +++ b/src/hotspot/share/opto/compile.cpp @@ -3095,6 +3095,9 @@ void Compile::Code_Gen() { { TracePhase tp(_t_output); PhaseOutput output; +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif output.Output(); if (failing()) return; output.install(); diff --git a/src/hotspot/share/prims/jni.cpp b/src/hotspot/share/prims/jni.cpp index 228244bbd0528..8a64620e943ce 100644 --- a/src/hotspot/share/prims/jni.cpp +++ b/src/hotspot/share/prims/jni.cpp @@ -3425,6 +3425,9 @@ void copy_jni_function_table(const struct JNINativeInterface_ *new_jni_NativeInt void quicken_jni_functions() { // Replace GetField with fast versions if (UseFastJNIAccessors && !VerifyJNIFields && !CheckJNICalls) { +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif address func; func = JNI_FastGetField::generate_fast_get_boolean_field(); if (func != (address)-1) { @@ -3630,7 +3633,7 @@ static jint JNI_CreateJavaVM_inner(JavaVM **vm, void **penv, void *args) { // Since this is not a JVM_ENTRY we have to set the thread state manually before leaving. ThreadStateTransition::transition_from_vm(thread, _thread_in_native); - MACOS_AARCH64_ONLY(thread->enable_wx(WXExec)); + WX_OLD_ONLY(thread->enable_wx(WXExec)); } else { // If create_vm exits because of a pending exception, exit with that // exception. In the future when we figure out how to reclaim memory, @@ -3733,7 +3736,7 @@ static jint JNICALL jni_DestroyJavaVM_inner(JavaVM *vm) { // Since this is not a JVM_ENTRY we have to set the thread state manually before entering. // We are going to VM, change W^X state to the expected one. - MACOS_AARCH64_ONLY(WXMode oldmode = thread->enable_wx(WXWrite)); + WX_OLD_ONLY(WXMode oldmode = thread->enable_wx(WXWrite)); ThreadStateTransition::transition_from_native(thread, _thread_in_vm); Threads::destroy_vm(); @@ -3786,11 +3789,11 @@ static jint attach_current_thread(JavaVM *vm, void **penv, void *_args, bool dae // Set correct safepoint info. The thread is going to call into Java when // initializing the Java level thread object. Hence, the correct state must // be set in order for the Safepoint code to deal with it correctly. + thread->initialize_thread_current(); + WX_ONLY(thread->init_wx()); thread->set_thread_state(_thread_in_vm); thread->record_stack_base_and_size(); - thread->initialize_thread_current(); thread->register_thread_stack_with_NMT(); - MACOS_AARCH64_ONLY(thread->init_wx()); if (!os::create_attached_thread(thread)) { thread->unregister_thread_stack_with_NMT(); @@ -3872,7 +3875,7 @@ static jint attach_current_thread(JavaVM *vm, void **penv, void *_args, bool dae // Now leaving the VM, so change thread_state. This is normally automatically taken care // of in the JVM_ENTRY. But in this situation we have to do it manually. ThreadStateTransition::transition_from_vm(thread, _thread_in_native); - MACOS_AARCH64_ONLY(thread->enable_wx(WXExec)); + WX_OLD_ONLY(thread->enable_wx(WXExec)); // Perform any platform dependent FPU setup os::setup_fpu(); @@ -3927,7 +3930,7 @@ jint JNICALL jni_DetachCurrentThread(JavaVM *vm) { } // We are going to VM, change W^X state to the expected one. - MACOS_AARCH64_ONLY(thread->enable_wx(WXWrite)); + WX_OLD_ONLY(thread->enable_wx(WXWrite)); // Safepoint support. Have to do call-back to safepoint code, if in the // middle of a safepoint operation @@ -3948,7 +3951,7 @@ jint JNICALL jni_DetachCurrentThread(JavaVM *vm) { // Go to the execute mode, the initial state of the thread on creation. // Use os interface as the thread is not a JavaThread anymore. - MACOS_AARCH64_ONLY(os::current_thread_enable_wx(WXExec)); + WX_OLD_ONLY(os::current_thread_enable_wx(WXExec)); HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN(JNI_OK); return JNI_OK; diff --git a/src/hotspot/share/prims/jniCheck.cpp b/src/hotspot/share/prims/jniCheck.cpp index 14d9c36c9fd51..c63cc9feef2ac 100644 --- a/src/hotspot/share/prims/jniCheck.cpp +++ b/src/hotspot/share/prims/jniCheck.cpp @@ -100,7 +100,7 @@ extern "C" { \ if (env != xenv) { \ NativeReportJNIFatalError(thr, warn_wrong_jnienv); \ } \ - MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thr)); \ + WX_OLD_ONLY(ThreadWXEnable __wx(WXWrite, thr)); \ VM_ENTRY_BASE(result_type, header, thr) diff --git a/src/hotspot/share/prims/jvmtiEnter.xsl b/src/hotspot/share/prims/jvmtiEnter.xsl index d1274158be465..3e2e663c3741a 100644 --- a/src/hotspot/share/prims/jvmtiEnter.xsl +++ b/src/hotspot/share/prims/jvmtiEnter.xsl @@ -436,7 +436,7 @@ struct jvmtiInterface_1_ jvmti JavaThread* current_thread = JavaThread::cast(this_thread); - MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, current_thread)); + WX_OLD_ONLY(ThreadWXEnable __wx(WXWrite, current_thread)); ThreadInVMfromNative __tiv(current_thread); diff --git a/src/hotspot/share/prims/jvmtiEnv.cpp b/src/hotspot/share/prims/jvmtiEnv.cpp index 6a1f451bb746d..02842b8a71b66 100644 --- a/src/hotspot/share/prims/jvmtiEnv.cpp +++ b/src/hotspot/share/prims/jvmtiEnv.cpp @@ -196,7 +196,7 @@ JvmtiEnv::GetThreadLocalStorage(jthread thread, void** data_ptr) { // other than the current thread is required we need to transition // from native so as to resolve the jthread. - MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, current_thread)); + WX_OLD_ONLY(ThreadWXEnable __wx(WXWrite, current_thread)); ThreadInVMfromNative __tiv(current_thread); VM_ENTRY_BASE(jvmtiError, JvmtiEnv::GetThreadLocalStorage , current_thread) DEBUG_ONLY(VMNativeEntryWrapper __vew;) @@ -3556,7 +3556,7 @@ JvmtiEnv::RawMonitorEnter(JvmtiRawMonitor * rmonitor) { } else { Thread* thread = Thread::current(); // 8266889: raw_enter changes Java thread state, needs WXWrite - MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread)); + WX_OLD_ONLY(ThreadWXEnable __wx(WXWrite, thread)); rmonitor->raw_enter(thread); } return JVMTI_ERROR_NONE; @@ -3590,7 +3590,7 @@ jvmtiError JvmtiEnv::RawMonitorWait(JvmtiRawMonitor * rmonitor, jlong millis) { Thread* thread = Thread::current(); // 8266889: raw_wait changes Java thread state, needs WXWrite - MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread)); + WX_OLD_ONLY(ThreadWXEnable __wx(WXWrite, thread)); int r = rmonitor->raw_wait(millis, thread); switch (r) { diff --git a/src/hotspot/share/prims/jvmtiExport.cpp b/src/hotspot/share/prims/jvmtiExport.cpp index 13822f73f772e..99b9acf2bc876 100644 --- a/src/hotspot/share/prims/jvmtiExport.cpp +++ b/src/hotspot/share/prims/jvmtiExport.cpp @@ -381,7 +381,7 @@ JvmtiExport::get_jvmti_interface(JavaVM *jvm, void **penv, jint version) { if (JvmtiEnv::get_phase() == JVMTI_PHASE_LIVE) { JavaThread* current_thread = JavaThread::current(); // transition code: native to VM - MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, current_thread)); + WX_OLD_ONLY(ThreadWXEnable __wx(WXWrite, current_thread)); ThreadInVMfromNative __tiv(current_thread); VM_ENTRY_BASE(jvmtiEnv*, JvmtiExport::get_jvmti_interface, current_thread) DEBUG_ONLY(VMNativeEntryWrapper __vew;) diff --git a/src/hotspot/share/prims/jvmtiExtensions.cpp b/src/hotspot/share/prims/jvmtiExtensions.cpp index 603d62eff853d..819448b6b968e 100644 --- a/src/hotspot/share/prims/jvmtiExtensions.cpp +++ b/src/hotspot/share/prims/jvmtiExtensions.cpp @@ -133,7 +133,7 @@ static jvmtiError JNICALL GetCarrierThread(const jvmtiEnv* env, ...) { return JVMTI_ERROR_NULL_POINTER; } - MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, current_thread)); + WX_OLD_ONLY(ThreadWXEnable __wx(WXWrite, current_thread)); ThreadInVMfromNative tiv(current_thread); JvmtiVTMSTransitionDisabler disabler; diff --git a/src/hotspot/share/prims/jvmtiImpl.cpp b/src/hotspot/share/prims/jvmtiImpl.cpp index 0059636099ed7..9f469fcf0e678 100644 --- a/src/hotspot/share/prims/jvmtiImpl.cpp +++ b/src/hotspot/share/prims/jvmtiImpl.cpp @@ -913,6 +913,9 @@ void JvmtiDeferredEventQueue::post(JvmtiEnv* env) { } void JvmtiDeferredEventQueue::run_nmethod_entry_barriers() { +#if INCLUDE_WX_NEW + auto _wx = WXLazyMark(Thread::current()); +#endif for(QueueNode* node = _queue_head; node != nullptr; node = node->next()) { node->event().run_nmethod_entry_barriers(); } diff --git a/src/hotspot/share/prims/jvmtiRawMonitor.cpp b/src/hotspot/share/prims/jvmtiRawMonitor.cpp index a1655a7a05eac..6da5cbaca3bca 100644 --- a/src/hotspot/share/prims/jvmtiRawMonitor.cpp +++ b/src/hotspot/share/prims/jvmtiRawMonitor.cpp @@ -43,7 +43,10 @@ void JvmtiPendingMonitors::transition_raw_monitors() { "Java thread has not been created yet or more than one java thread " "is running. Raw monitor transition will not work"); JavaThread* current_java_thread = JavaThread::current(); - { + if (count() > 0) { +#if INCLUDE_WX_NEW + auto _wx = WXExecMark(Thread::current()); +#endif ThreadToNativeFromVM ttnfvm(current_java_thread); for (int i = 0; i < count(); i++) { JvmtiRawMonitor* rmonitor = monitors()->at(i); diff --git a/src/hotspot/share/prims/methodHandles.cpp b/src/hotspot/share/prims/methodHandles.cpp index 1ab5d6ab7f7ba..f8bc886e2ea42 100644 --- a/src/hotspot/share/prims/methodHandles.cpp +++ b/src/hotspot/share/prims/methodHandles.cpp @@ -52,6 +52,7 @@ #include "runtime/interfaceSupport.inline.hpp" #include "runtime/javaCalls.hpp" #include "runtime/jniHandles.inline.hpp" +#include "runtime/threadWXSetters.inline.hpp" #include "runtime/timerTrace.hpp" #include "runtime/reflection.hpp" #include "runtime/safepointVerifiers.hpp" @@ -83,6 +84,9 @@ MethodHandlesAdapterBlob* MethodHandles::_adapter_code = nullptr; * failed and true otherwise. */ void MethodHandles::generate_adapters() { +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif assert(vmClasses::MethodHandle_klass() != nullptr, "should be present"); assert(_adapter_code == nullptr, "generate only once"); diff --git a/src/hotspot/share/prims/unsafe.cpp b/src/hotspot/share/prims/unsafe.cpp index a6300e81468b4..d54e7755c2079 100644 --- a/src/hotspot/share/prims/unsafe.cpp +++ b/src/hotspot/share/prims/unsafe.cpp @@ -393,7 +393,7 @@ UNSAFE_ENTRY_SCOPED(void, Unsafe_SetMemory0(JNIEnv *env, jobject unsafe, jobject { GuardUnsafeAccess guard(thread); if (StubRoutines::unsafe_setmemory() != nullptr) { - MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXExec, thread)); + WX_OLD_ONLY(ThreadWXEnable wx(WXExec, thread)); StubRoutines::UnsafeSetMemory_stub()(p, sz, value); } else { Copy::fill_to_memory_atomic(p, sz, value); @@ -412,7 +412,7 @@ UNSAFE_ENTRY_SCOPED(void, Unsafe_CopyMemory0(JNIEnv *env, jobject unsafe, jobjec { GuardUnsafeAccess guard(thread); if (StubRoutines::unsafe_arraycopy() != nullptr) { - MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXExec, thread)); + WX_OLD_ONLY(ThreadWXEnable wx(WXExec, thread)); StubRoutines::UnsafeArrayCopy_stub()(src, dst, sz); } else { Copy::conjoint_memory_atomic(src, dst, sz); @@ -444,14 +444,14 @@ UNSAFE_LEAF (void, Unsafe_WriteBack0(JNIEnv *env, jobject unsafe, jlong line)) { } #endif - MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXExec, Thread::current())); + WX_OLD_ONLY(ThreadWXEnable wx(WXExec, Thread::current())); assert(StubRoutines::data_cache_writeback() != nullptr, "sanity"); (StubRoutines::DataCacheWriteback_stub())(addr_from_java(line)); } UNSAFE_END static void doWriteBackSync0(bool is_pre) { - MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXExec, Thread::current())); + WX_OLD_ONLY(ThreadWXEnable wx(WXExec, Thread::current())); assert(StubRoutines::data_cache_writeback_sync() != nullptr, "sanity"); (StubRoutines::DataCacheWritebackSync_stub())(is_pre); } diff --git a/src/hotspot/share/prims/upcallLinker.cpp b/src/hotspot/share/prims/upcallLinker.cpp index bc6a56dab055b..de2289ebd2b58 100644 --- a/src/hotspot/share/prims/upcallLinker.cpp +++ b/src/hotspot/share/prims/upcallLinker.cpp @@ -85,7 +85,7 @@ JavaThread* UpcallLinker::on_entry(UpcallStub::FrameData* context) { // The call to transition_from_native below contains a safepoint check // which needs the code cache to be writable. - MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, thread)); + WX_OLD_ONLY(ThreadWXEnable wx(WXWrite, thread)); // After this, we are officially in Java Code. This needs to be done before we change any of the thread local // info, since we cannot find oops before the new information is set up completely. diff --git a/src/hotspot/share/prims/whitebox.cpp b/src/hotspot/share/prims/whitebox.cpp index 2e277ffadabd0..70918091e402d 100644 --- a/src/hotspot/share/prims/whitebox.cpp +++ b/src/hotspot/share/prims/whitebox.cpp @@ -842,6 +842,9 @@ WB_ENTRY(jint, WB_DeoptimizeMethod(JNIEnv* env, jobject o, jobject method, jbool { MutexLocker mu(Compile_lock); methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid)); +#if INCLUDE_WX_NEW + auto _wx = WXLazyMark(thread); +#endif if (is_osr) { result += mh->method_holder()->mark_osr_nmethods(&deopt_scope, mh()); } else { @@ -1645,6 +1648,9 @@ CodeBlob* WhiteBox::allocate_code_blob(int size, CodeBlobType blob_type) { } { MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif blob = (BufferBlob*) CodeCache::allocate(full_size, blob_type); if (blob != nullptr) { ::new (blob) BufferBlob("WB::DummyBlob", CodeBlobKind::Buffer, full_size); diff --git a/src/hotspot/share/prims/whitebox.inline.hpp b/src/hotspot/share/prims/whitebox.inline.hpp index f46b36b84eee0..b47c67e206dbf 100644 --- a/src/hotspot/share/prims/whitebox.inline.hpp +++ b/src/hotspot/share/prims/whitebox.inline.hpp @@ -33,7 +33,7 @@ #define WB_ENTRY(result_type, header) JNI_ENTRY(result_type, header) \ ClearPendingJniExcCheck _clearCheck(env); \ - MACOS_AARCH64_ONLY(ThreadWXEnable _wx(WXWrite, thread)); + WX_OLD_ONLY(ThreadWXEnable _wx(WXWrite, thread)); #define WB_END JNI_END diff --git a/src/hotspot/share/runtime/continuationFreezeThaw.cpp b/src/hotspot/share/runtime/continuationFreezeThaw.cpp index d20cfde09cab0..6ceec66756d0d 100644 --- a/src/hotspot/share/runtime/continuationFreezeThaw.cpp +++ b/src/hotspot/share/runtime/continuationFreezeThaw.cpp @@ -235,7 +235,7 @@ static JRT_LEAF(intptr_t*, thaw(JavaThread* thread, int kind)) DEBUG_ONLY(PauseNoSafepointVerifier pnsv(&__nsv);) // we might modify the code cache via BarrierSetNMethod::nmethod_entry_barrier - MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread)); + WX_OLD_ONLY(ThreadWXEnable __wx(WXWrite, thread)); return ConfigT::thaw(thread, (Continuation::thaw_kind)kind); JRT_END @@ -2282,6 +2282,10 @@ NOINLINE intptr_t* Thaw::thaw_slow(stackChunkOop chunk, Continuation::t return sp; } +#if INCLUDE_WX_NEW + auto _wx = WXLazyMark(Thread::current()); +#endif + LogTarget(Trace, continuations) lt; if (lt.develop_is_enabled()) { LogStream ls(lt); diff --git a/src/hotspot/share/runtime/deoptimization.cpp b/src/hotspot/share/runtime/deoptimization.cpp index 5e4aaf31a3be8..32b7641cb914b 100644 --- a/src/hotspot/share/runtime/deoptimization.cpp +++ b/src/hotspot/share/runtime/deoptimization.cpp @@ -134,8 +134,13 @@ void DeoptimizationScope::mark(nmethod* nm, bool inc_recompile_counts) { return; } +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif + nmethod::DeoptimizationStatus status = inc_recompile_counts ? nmethod::deoptimize : nmethod::deoptimize_noupdate; + REQUIRE_THREAD_WX_MODE_WRITE Atomic::store(&nm->_deoptimization_status, status); // Make sure active is not committed @@ -2617,7 +2622,7 @@ Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int tr Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* current, jint trap_request, jint exec_mode) { // Enable WXWrite: current function is called from methods compiled by C2 directly - MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, current)); + WX_OLD_ONLY(ThreadWXEnable wx(WXWrite, current)); // Still in Java no safepoints { diff --git a/src/hotspot/share/runtime/interfaceSupport.inline.hpp b/src/hotspot/share/runtime/interfaceSupport.inline.hpp index c52c2664faa51..bee6049755a08 100644 --- a/src/hotspot/share/runtime/interfaceSupport.inline.hpp +++ b/src/hotspot/share/runtime/interfaceSupport.inline.hpp @@ -283,7 +283,7 @@ class VMNativeEntryWrapper { #define JRT_ENTRY(result_type, header) \ result_type header { \ assert(current == JavaThread::current(), "Must be"); \ - MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, current)); \ + WX_OLD_ONLY(ThreadWXEnable __wx(WXWrite, current)); \ ThreadInVMfromJava __tiv(current); \ VM_ENTRY_BASE(result_type, header, current) \ DEBUG_ONLY(VMEntryWrapper __vew;) @@ -311,7 +311,7 @@ class VMNativeEntryWrapper { #define JRT_ENTRY_NO_ASYNC(result_type, header) \ result_type header { \ assert(current == JavaThread::current(), "Must be"); \ - MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, current)); \ + WX_OLD_ONLY(ThreadWXEnable __wx(WXWrite, current)); \ ThreadInVMfromJava __tiv(current, false /* check asyncs */); \ VM_ENTRY_BASE(result_type, header, current) \ DEBUG_ONLY(VMEntryWrapper __vew;) @@ -321,7 +321,7 @@ class VMNativeEntryWrapper { #define JRT_BLOCK_ENTRY(result_type, header) \ result_type header { \ assert(current == JavaThread::current(), "Must be"); \ - MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, current)); \ + WX_OLD_ONLY(ThreadWXEnable __wx(WXWrite, current)); \ HandleMarkCleaner __hm(current); #define JRT_BLOCK \ @@ -358,7 +358,7 @@ extern "C" { \ result_type JNICALL header { \ JavaThread* thread=JavaThread::thread_from_jni_environment(env); \ assert(thread == Thread::current(), "JNIEnv is only valid in same thread"); \ - MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread)); \ + WX_OLD_ONLY(ThreadWXEnable __wx(WXWrite, thread)); \ ThreadInVMfromNative __tiv(thread); \ DEBUG_ONLY(VMNativeEntryWrapper __vew;) \ VM_ENTRY_BASE(result_type, header, thread) @@ -383,7 +383,7 @@ extern "C" { \ extern "C" { \ result_type JNICALL header { \ JavaThread* thread=JavaThread::thread_from_jni_environment(env); \ - MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread)); \ + WX_OLD_ONLY(ThreadWXEnable __wx(WXWrite, thread)); \ ThreadInVMfromNative __tiv(thread); \ DEBUG_ONLY(VMNativeEntryWrapper __vew;) \ VM_ENTRY_BASE(result_type, header, thread) @@ -393,7 +393,7 @@ extern "C" { \ extern "C" { \ result_type JNICALL header { \ JavaThread* thread = JavaThread::current(); \ - MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread)); \ + WX_OLD_ONLY(ThreadWXEnable __wx(WXWrite, thread)); \ ThreadInVMfromNative __tiv(thread); \ DEBUG_ONLY(VMNativeEntryWrapper __vew;) \ VM_ENTRY_BASE(result_type, header, thread) diff --git a/src/hotspot/share/runtime/javaCalls.cpp b/src/hotspot/share/runtime/javaCalls.cpp index 04bd08710739d..99bc8ce4f657c 100644 --- a/src/hotspot/share/runtime/javaCalls.cpp +++ b/src/hotspot/share/runtime/javaCalls.cpp @@ -94,14 +94,14 @@ JavaCallWrapper::JavaCallWrapper(const methodHandle& callee_method, Handle recei DEBUG_ONLY(_thread->inc_java_call_counter()); _thread->set_active_handles(new_handles); // install new handle block and reset Java frame linkage - MACOS_AARCH64_ONLY(_thread->enable_wx(WXExec)); + WX_OLD_ONLY(_thread->enable_wx(WXExec)); } JavaCallWrapper::~JavaCallWrapper() { assert(_thread == JavaThread::current(), "must still be the same thread"); - MACOS_AARCH64_ONLY(_thread->enable_wx(WXWrite)); + WX_OLD_ONLY(_thread->enable_wx(WXWrite)); // restore previous handle block & Java frame linkage JNIHandleBlock *_old_handles = _thread->active_handles(); @@ -316,6 +316,9 @@ Handle JavaCalls::construct_new_instance(InstanceKlass* klass, Symbol* construct void JavaCalls::call(JavaValue* result, const methodHandle& method, JavaCallArguments* args, TRAPS) { +#if INCLUDE_WX_NEW + auto _wx = WXExecMark(THREAD); +#endif // Check if we need to wrap a potential OS exception handler around thread. // This is used for e.g. Win32 structured exception handlers. // Need to wrap each and every time, since there might be native code down the @@ -412,6 +415,9 @@ void JavaCalls::call_helper(JavaValue* result, const methodHandle& method, JavaC #endif } } + + REQUIRE_THREAD_WX_MODE_EXEC + StubRoutines::call_stub()( (address)&link, // (intptr_t*)&(result->_value), // see NOTE above (compiler problem) diff --git a/src/hotspot/share/runtime/javaThread.cpp b/src/hotspot/share/runtime/javaThread.cpp index 9f5dd07c4dcc8..e52824cb96cef 100644 --- a/src/hotspot/share/runtime/javaThread.cpp +++ b/src/hotspot/share/runtime/javaThread.cpp @@ -379,10 +379,10 @@ void JavaThread::check_possible_safepoint() { clear_unhandled_oops(); #endif // CHECK_UNHANDLED_OOPS +#if INCLUDE_WX_OLD // Macos/aarch64 should be in the right state for safepoint (e.g. // deoptimization needs WXWrite). Crashes caused by the wrong state rarely // happens in practice, making such issues hard to find and reproduce. -#if defined(__APPLE__) && defined(AARCH64) if (AssertWXAtThreadSync) { assert_wx_state(WXWrite); } @@ -1270,7 +1270,7 @@ void JavaThread::check_special_condition_for_native_trans(JavaThread *thread) { thread->set_thread_state(_thread_in_vm); // Enable WXWrite: called directly from interpreter native wrapper. - MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, thread)); + WX_OLD_ONLY(ThreadWXEnable wx(WXWrite, thread)); SafepointMechanism::process_if_requested_with_exit_check(thread, true /* check asyncs */); @@ -1446,6 +1446,9 @@ void JavaThread::oops_do_frames(OopClosure* f, NMethodClosure* cf) { if (!has_last_Java_frame()) { return; } +#if INCLUDE_WX_NEW + auto _wx = WXLazyMark(Thread::current()); +#endif // Finish any pending lazy GC activity for the frames StackWatermarkSet::finish_processing(this, nullptr /* context */, StackWatermarkKind::gc); // Traverse the execution stack @@ -1463,7 +1466,8 @@ void JavaThread::verify_states_for_handshake() { void JavaThread::nmethods_do(NMethodClosure* cf) { DEBUG_ONLY(verify_frame_info();) - MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current());) + WX_OLD_ONLY(ThreadWXEnable wx(WXWrite, Thread::current());) + REQUIRE_THREAD_WX_MODE_WRITE if (has_last_Java_frame()) { // Traverse the execution stack diff --git a/src/hotspot/share/runtime/javaThread.hpp b/src/hotspot/share/runtime/javaThread.hpp index 8c62319f8dca5..62e29461006ea 100644 --- a/src/hotspot/share/runtime/javaThread.hpp +++ b/src/hotspot/share/runtime/javaThread.hpp @@ -610,6 +610,9 @@ class JavaThread: public Thread { inline ThreadSafepointState* safepoint_state() const; inline void set_safepoint_state(ThreadSafepointState* state); inline bool is_at_poll_safepoint(); +#if INCLUDE_WX_NEW + inline void check_wx(JavaThreadState s) const; +#endif // JavaThread termination and lifecycle support: void smr_delete(); diff --git a/src/hotspot/share/runtime/javaThread.inline.hpp b/src/hotspot/share/runtime/javaThread.inline.hpp index de492fda50bcd..4faf16229bff4 100644 --- a/src/hotspot/share/runtime/javaThread.inline.hpp +++ b/src/hotspot/share/runtime/javaThread.inline.hpp @@ -149,9 +149,37 @@ inline JavaThreadState JavaThread::thread_state() const { #endif } +#if INCLUDE_WX_NEW +inline void JavaThread::check_wx(JavaThreadState s) const { +#ifdef ASSERT + if (!AssertWXAtThreadSync) { + return; + } + if (s == _thread_blocked && _thread_state == _thread_in_vm) { + return; + } + if (s == _thread_in_vm && _thread_state == _thread_blocked) { + return; + } + if (is_Compiler_thread()) { + if (s == _thread_in_native || s == _thread_in_vm) { + return; + } + } + assert(!wx_state().is_lazy(), "thread state transition while in lazy mode"); + REQUIRE_THREAD_WX_MODE_EXEC +#endif +} +#endif + inline void JavaThread::set_thread_state(JavaThreadState s) { assert(current_or_null() == nullptr || current_or_null() == this, "state change should only be called by the current thread"); +#if INCLUDE_WX_NEW + if (AssertWXAtThreadSync) { + check_wx(s); + } +#endif #if defined(PPC64) || defined (AARCH64) || defined(RISCV64) // Use membars when accessing volatile _thread_state. See // Threads::create_vm() for size checks. diff --git a/src/hotspot/share/runtime/os.hpp b/src/hotspot/share/runtime/os.hpp index dde80806912f5..42d6797fe292c 100644 --- a/src/hotspot/share/runtime/os.hpp +++ b/src/hotspot/share/runtime/os.hpp @@ -1072,10 +1072,10 @@ class os: AllStatic { static char* build_agent_function_name(const char *sym, const char *cname, bool is_absolute_path); -#if defined(__APPLE__) && defined(AARCH64) +#if INCLUDE_WX // Enables write or execute access to writeable and executable pages. - static void current_thread_enable_wx(WXMode mode); -#endif // __APPLE__ && AARCH64 + static void current_thread_enable_wx(WXMode mode, bool use_new_code = false); +#endif protected: static volatile unsigned int _rand_seed; // seed for random number generator diff --git a/src/hotspot/share/runtime/safepoint.cpp b/src/hotspot/share/runtime/safepoint.cpp index ab896290007e0..65f37831e4816 100644 --- a/src/hotspot/share/runtime/safepoint.cpp +++ b/src/hotspot/share/runtime/safepoint.cpp @@ -605,7 +605,7 @@ void SafepointSynchronize::handle_polling_page_exception(JavaThread *thread) { thread->set_thread_state(_thread_in_vm); // Enable WXWrite: the function is called implicitly from java code. - MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, thread)); + WX_OLD_ONLY(ThreadWXEnable wx(WXWrite, thread)); if (log_is_enabled(Info, safepoint, stats)) { Atomic::inc(&_nof_threads_hit_polling_page); diff --git a/src/hotspot/share/runtime/sharedRuntime.cpp b/src/hotspot/share/runtime/sharedRuntime.cpp index 733e8a78d903e..53eec4a42df70 100644 --- a/src/hotspot/share/runtime/sharedRuntime.cpp +++ b/src/hotspot/share/runtime/sharedRuntime.cpp @@ -72,6 +72,7 @@ #include "runtime/stackWatermarkSet.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/synchronizer.inline.hpp" +#include "runtime/threadWXSetters.inline.hpp" #include "runtime/timerTrace.hpp" #include "runtime/vframe.inline.hpp" #include "runtime/vframeArray.hpp" @@ -108,6 +109,9 @@ const char *SharedRuntime::_stub_names[] = { //----------------------------generate_stubs----------------------------------- void SharedRuntime::generate_initial_stubs() { +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif // Build this early so it's available for the interpreter. _throw_StackOverflowError_blob = generate_throw_exception(SharedStubId::throw_StackOverflowError_id, @@ -115,6 +119,9 @@ void SharedRuntime::generate_initial_stubs() { } void SharedRuntime::generate_stubs() { +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif _wrong_method_blob = generate_resolve_blob(SharedStubId::wrong_method_id, CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method)); @@ -180,6 +187,9 @@ void SharedRuntime::generate_jfr_stubs() { const char* timer_msg = "SharedRuntime generate_jfr_stubs"; TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime)); +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif _jfr_write_checkpoint_blob = generate_jfr_write_checkpoint(); _jfr_return_lease_blob = generate_jfr_return_lease(); } @@ -1415,12 +1425,17 @@ methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, T CompiledICLocker ml(caller_nm); +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(current); +#endif if (is_virtual && !is_optimized) { CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc()); + REQUIRE_THREAD_WX_MODE_WRITE inline_cache->update(&call_info, receiver->klass()); } else { // Callsite is a direct call - set it to the destination method CompiledDirectCall* callsite = CompiledDirectCall::before(caller_frame.pc()); + REQUIRE_THREAD_WX_MODE_WRITE callsite->set(callee_method); } @@ -1691,6 +1706,9 @@ methodHandle SharedRuntime::reresolve_call_site(TRAPS) { // we will wind up in the interprter (thru a c2i with c2). // CompiledICLocker ml(caller_nm); +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(current); +#endif address call_addr = caller_nm->call_instruction_address(pc); if (call_addr != nullptr) { @@ -1805,7 +1823,7 @@ JRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address cal // write lock needed because we might patch call site by set_to_clean() // and is_unloading() can modify nmethod's state - MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, JavaThread::current())); + WX_OLD_ONLY(ThreadWXEnable __wx(WXWrite, JavaThread::current())); CodeBlob* cb = CodeCache::find_blob(caller_pc); if (cb == nullptr || !cb->is_nmethod() || !callee->is_in_use() || callee->is_unloading()) { @@ -1842,6 +1860,10 @@ JRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address cal } CompiledDirectCall* callsite = CompiledDirectCall::before(return_pc); + // write lock needed because we might patch call site by set_to_clean() +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif callsite->set_to_clean(); JRT_END @@ -2805,6 +2827,10 @@ bool AdapterHandlerLibrary::generate_adapter_code(AdapterBlob*& adapter_blob, ClassLoader::perf_method_adapters_count()->inc(); } +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif + BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache CodeBuffer buffer(buf); short buffer_locs[20]; @@ -3127,6 +3153,9 @@ void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) { ResourceMark rm; BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache if (buf != nullptr) { +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif CodeBuffer buffer(buf); if (method->is_continuation_enter_intrinsic()) { diff --git a/src/hotspot/share/runtime/stackWatermarkSet.cpp b/src/hotspot/share/runtime/stackWatermarkSet.cpp index 528bf767be1a1..425fa593cf233 100644 --- a/src/hotspot/share/runtime/stackWatermarkSet.cpp +++ b/src/hotspot/share/runtime/stackWatermarkSet.cpp @@ -81,6 +81,9 @@ static void verify_processing_context() { } void StackWatermarkSet::before_unwind(JavaThread* jt) { +#if INCLUDE_WX_NEW + auto _wx = WXLazyMark(jt); +#endif verify_processing_context(); assert(jt->has_last_Java_frame(), "must have a Java frame"); for (StackWatermark* current = head(jt); current != nullptr; current = current->next()) { @@ -90,6 +93,9 @@ void StackWatermarkSet::before_unwind(JavaThread* jt) { } void StackWatermarkSet::after_unwind(JavaThread* jt) { +#if INCLUDE_WX_NEW + auto _wx = WXLazyMark(jt); +#endif verify_processing_context(); assert(jt->has_last_Java_frame(), "must have a Java frame"); for (StackWatermark* current = head(jt); current != nullptr; current = current->next()) { @@ -103,6 +109,9 @@ void StackWatermarkSet::on_iteration(JavaThread* jt, const frame& fr) { // Don't perform barrier when error reporting walks the stack. return; } +#if INCLUDE_WX_NEW + auto _wx = WXLazyMark(Thread::current()); +#endif verify_processing_context(); for (StackWatermark* current = head(jt); current != nullptr; current = current->next()) { current->on_iteration(fr); @@ -114,6 +123,9 @@ void StackWatermarkSet::on_iteration(JavaThread* jt, const frame& fr) { void StackWatermarkSet::on_safepoint(JavaThread* jt) { StackWatermark* watermark = get(jt, StackWatermarkKind::gc); if (watermark != nullptr) { +#if INCLUDE_WX_NEW + auto _wx = WXLazyMark(jt); +#endif watermark->on_safepoint(); } } diff --git a/src/hotspot/share/runtime/stubRoutines.cpp b/src/hotspot/share/runtime/stubRoutines.cpp index 358434938f29a..1840a8c7107bd 100644 --- a/src/hotspot/share/runtime/stubRoutines.cpp +++ b/src/hotspot/share/runtime/stubRoutines.cpp @@ -34,6 +34,7 @@ #include "runtime/timerTrace.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" +#include "runtime/threadWXSetters.inline.hpp" #include "utilities/align.hpp" #include "utilities/copy.hpp" #ifdef COMPILER2 @@ -219,6 +220,9 @@ static BufferBlob* initialize_stubs(StubGenBlobId blob_id, const char* timer_msg, const char* buffer_name, const char* assert_msg) { +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(Thread::current()); +#endif ResourceMark rm; TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime)); // Add extra space for large CodeEntryAlignment diff --git a/src/hotspot/share/runtime/stubRoutines.hpp b/src/hotspot/share/runtime/stubRoutines.hpp index 7548a97ced899..8553e6bccf850 100644 --- a/src/hotspot/share/runtime/stubRoutines.hpp +++ b/src/hotspot/share/runtime/stubRoutines.hpp @@ -376,13 +376,20 @@ class StubRoutines: AllStatic { static jshort f2hf(jfloat x) { assert(_f2hf != nullptr, "stub is not implemented on this platform"); - MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXExec, Thread::current());) // About to call into code cache + WX_OLD_ONLY(ThreadWXEnable wx(WXExec, Thread::current());) // About to call into code cache +#if INCLUDE_WX_NEW + auto _wx = WXExecMark(JavaThread::current()); // About to call into code cache +#endif + typedef jshort (*f2hf_stub_t)(jfloat x); return ((f2hf_stub_t)_f2hf)(x); } static jfloat hf2f(jshort x) { assert(_hf2f != nullptr, "stub is not implemented on this platform"); - MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXExec, Thread::current());) // About to call into code cache + WX_OLD_ONLY(ThreadWXEnable wx(WXExec, Thread::current());) // About to call into code cache +#if INCLUDE_WX_NEW + auto _wx = WXExecMark(JavaThread::current()); // About to call into code cache +#endif typedef jfloat (*hf2f_stub_t)(jshort x); return ((hf2f_stub_t)_hf2f)(x); } diff --git a/src/hotspot/share/runtime/thread.cpp b/src/hotspot/share/runtime/thread.cpp index 400d69ad510a3..dacfd9aec8d35 100644 --- a/src/hotspot/share/runtime/thread.cpp +++ b/src/hotspot/share/runtime/thread.cpp @@ -38,6 +38,7 @@ #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/handles.inline.hpp" +#include "runtime/init.hpp" #include "runtime/javaThread.inline.hpp" #include "runtime/nonJavaThread.hpp" #include "runtime/orderAccess.hpp" @@ -137,8 +138,7 @@ Thread::Thread(MemTag mem_tag) { // If the main thread creates other threads before the barrier set that is an error. assert(Thread::current_or_null() == nullptr, "creating thread before barrier set"); } - - MACOS_AARCH64_ONLY(DEBUG_ONLY(_wx_init = false)); + WX_OLD_ONLY(DEBUG_ONLY(_wx_init = false)); } #ifdef ASSERT @@ -211,7 +211,9 @@ void Thread::call_run() { // Perform common initialization actions - MACOS_AARCH64_ONLY(this->init_wx()); +#if INCLUDE_WX + this->init_wx(); +#endif register_thread_stack_with_NMT(); @@ -603,3 +605,43 @@ void Thread::SpinRelease(volatile int * adr) { // more than covers this on all platforms. *adr = 0; } + +#if INCLUDE_WX +void Thread::assert_can_change_wx_state(WXState wx_state) const { +#if 0 + // FIXME + guarantee((wx_mode(wx_state) == os::WXWrite) == ((wx_depth() % 2) == 1), "Strange W^X pattern"); +#endif + if (is_Worker_thread() || is_ConcurrentGC_thread()) { + // GC threads set WXWrite at startup + guarantee(wx_depth() <= 1, "Unexpected GC thread W^X depth"); + return; + } + if (is_VM_thread()) { + // X --> W, should never request W --> X + guarantee(wx_depth() <= 1, "Unexpected CompilerThread W^X depth"); + return; + } + guarantee(is_Java_thread(), "Not allowed to change W^X state"); + JavaThreadState state = JavaThread::cast(this)->thread_state(); + if (is_Compiler_thread()) { + // X --> W --> X (register_method --> gc_on_allocation) + guarantee(wx_depth() <= 2, "Unexpected CompilerThread W^X depth"); + guarantee(state == _thread_in_vm || state == _thread_in_native || + (can_call_java() && state == _thread_in_Java), + "CompilerThread W^X change from unexpected thread state %d", state); + return; + } + int max_in_init_depth = 3; // X --> W --> X --> W (universe_post_init) + int max_post_init_depth = 2; // X --> W --> X (AdapterBlob::create --> gc_on_allocation) + int max_depth = is_init_completed() ? max_post_init_depth : max_in_init_depth; + guarantee(wx_depth() <= max_depth, "Unexpected JavaThread W^X depth"); + guarantee(state == _thread_in_vm || (can_call_java() && state == _thread_in_Java), + "JavaThread W^X change from unexpected thread state %d", state); +} + +#if 0 +const Thread::WXEnable Thread::_wx_root_scope; +#endif + +#endif diff --git a/src/hotspot/share/runtime/thread.hpp b/src/hotspot/share/runtime/thread.hpp index 81307c4acab42..289a372482ebe 100644 --- a/src/hotspot/share/runtime/thread.hpp +++ b/src/hotspot/share/runtime/thread.hpp @@ -39,6 +39,9 @@ #include "runtime/threadStatisticalInfo.hpp" #include "runtime/unhandledOops.hpp" #include "utilities/globalDefinitions.hpp" +#if 1 +#include "utilities/vmError.hpp" +#endif #include "utilities/macros.hpp" #if INCLUDE_JFR #include "jfr/support/jfrThreadExtension.hpp" @@ -60,7 +63,6 @@ class ThreadsList; class ThreadsSMRSupport; class VMErrorCallback; - DEBUG_ONLY(class ResourceMark;) class WorkerThread; @@ -608,18 +610,127 @@ class Thread: public ThreadShadow { static void SpinAcquire(volatile int * Lock); static void SpinRelease(volatile int * Lock); -#if defined(__APPLE__) && defined(AARCH64) + // Lazy mode allows lazy transitions between Write and Exec. + class WXState { + WXMode _mode; + bool _lazy; + public: + WXState(WXMode mode, bool lazy = false) : _mode(mode), _lazy(lazy) {} + WXMode wx_mode() const { return _mode; } + void set_wx_mode(WXMode mode) { _mode = mode; } + bool is_lazy() const { return _lazy; } + const char* name() const { + return _lazy ? + _mode == WXWrite ? "WXWrite (lazy)" : "WXExec (lazy)" + : + _mode == WXWrite ? "WXWrite" : "WXExec"; + } + bool operator== (const WXState s) const { return _mode == s._mode && _lazy == s._lazy; } + bool operator!= (const WXState s) const { return !(*this == s); } + }; + +#if INCLUDE_WX +#if INCLUDE_WX_OLD private: DEBUG_ONLY(bool _wx_init); WXMode _wx_state; + inline void set_os_wx_mode_old(WXMode mode); public: - void init_wx(); WXMode enable_wx(WXMode new_state); void assert_wx_state(WXMode expected) { assert(_wx_state == expected, "wrong state"); } -#endif // __APPLE__ && AARCH64 +#endif +#if INCLUDE_WX_NEW + + static WXState wx_lazy_state(WXState s) { + return WXState(s.wx_mode(), true /* lazy */); + } + + class WXEnable; + + private: + friend class WXEnable; + + static const WXEnable _wx_root_scope; + + struct wx { + WXState _state; +#ifdef ASSERT + bool _init; + uint _writes_required; + uint _writes_required_at_last_x2w; + uint _depth; +#if 0 + const WXEnable* _scope; +#endif +#if INCLUDE_WX_OLD + uint _changes_old; +#endif +#if INCLUDE_WX_NEW + uint _changes_new; +#endif + const char* _last_change_file; + int _last_change_line; + void set_last_change_loc(const char* FILE, int LINE) { + _last_change_file = FILE; _last_change_line = LINE; + } + const char* last_change_file() const { return _last_change_file; } + int last_change_line() const { return _last_change_line; } +#else + void set_last_change_loc(const char* FILE, int LINE) {} + const char* last_change_file() const { return ""; } + int last_change_line() const { return -1; } +#endif + wx() : _state(WXExec) { +#ifdef ASSERT +#if 0 + _scope = &_wx_root_scope; +#endif + _depth = 0; + _init = false; + _writes_required = 0; + _writes_required_at_last_x2w = 0; + _last_change_file = __FILE__; + _last_change_line = __LINE__; +#if INCLUDE_WX_OLD + _changes_old = 0; +#endif + _changes_new = 0; +#endif + } + } _wx; + + inline void set_os_wx_mode_new(WXMode mode); + + WXState set_wx_state(WXState new_state, const char* FILE, int LINE); + + public: + void init_wx(); + WXState wx_state() const { return _wx._state; } + WXState wx_lazy_state() const { return wx_lazy_state(_wx._state); } +#if 0 + const WXEnable* wx_scope() const { return _wx._scope; } + void set_wx_scope(const WXEnable* s) { _wx._scope = s; } +#endif +#ifdef ASSERT + void set_last_wx_change_loc(const char* FILE, int LINE) { _wx.set_last_change_loc(FILE, LINE); } + const char* last_wx_change_file() const { return _wx.last_change_file(); } + int last_wx_change_line() const { return _wx.last_change_line(); } + int inc_wx_depth(int i) { return _wx._depth += i; } + int wx_depth() const { return _wx._depth; } + void inc_wx_writes_required() { _wx._writes_required += 1; } + uint wx_writes_required() const { return _wx._writes_required; } + uint wx_writes_required_at_last_x2w() const { return _wx._writes_required_at_last_x2w; } + void set_wx_writes_required_at_last_x2w() { _wx._writes_required_at_last_x2w = _wx._writes_required; } + void assert_can_change_wx_state(WXState new_state) const; + + inline void require_wx_mode(WXMode expected, const char* FILE, int LINE); +#endif + +#endif // INCLUDE_WX_NEW +#endif // INCLUDE_WX private: bool _in_asgct = false; diff --git a/src/hotspot/share/runtime/thread.inline.hpp b/src/hotspot/share/runtime/thread.inline.hpp index 756a4702159d2..b4fdbd6b7497f 100644 --- a/src/hotspot/share/runtime/thread.inline.hpp +++ b/src/hotspot/share/runtime/thread.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -30,6 +30,7 @@ #include "gc/shared/tlab_globals.hpp" #include "runtime/atomic.hpp" +#include "utilities/events.hpp" #if defined(__APPLE__) && defined(AARCH64) #include "runtime/os.hpp" @@ -70,13 +71,28 @@ inline void Thread::set_threads_hazard_ptr(ThreadsList* new_list) { Atomic::release_store_fence(&_threads_hazard_ptr, new_list); } -#if defined(__APPLE__) && defined(AARCH64) +#if INCLUDE_WX inline void Thread::init_wx() { assert(this == Thread::current(), "should only be called for current thread"); +#if INCLUDE_WX_OLD assert(!_wx_init, "second init"); _wx_state = WXWrite; - os::current_thread_enable_wx(_wx_state); + set_os_wx_mode_old(_wx_state); DEBUG_ONLY(_wx_init = true); +#endif +#if INCLUDE_WX_NEW + assert(!_wx._init, "second init"); + set_os_wx_mode_new(wx_state().wx_mode()); + DEBUG_ONLY(_wx._init = true;) +#endif +} + +#if INCLUDE_WX_OLD +inline void Thread::set_os_wx_mode_old(WXMode mode) { +#ifdef ASSERT + ++_wx._changes_old; +#endif + os::current_thread_enable_wx(mode, false); } inline WXMode Thread::enable_wx(WXMode new_state) { @@ -85,10 +101,49 @@ inline WXMode Thread::enable_wx(WXMode new_state) { WXMode old = _wx_state; if (_wx_state != new_state) { _wx_state = new_state; - os::current_thread_enable_wx(new_state); + set_os_wx_mode_old(new_state); } return old; } -#endif // __APPLE__ && AARCH64 +#endif // INCLUDE_WX_OLD + +#if INCLUDE_WX_NEW +inline void Thread::set_os_wx_mode_new(WXMode mode) { +#ifdef ASSERT + ++_wx._changes_new; +#endif +#if INCLUDE_WX_OLD + assert(_wx._changes_new <= _wx._changes_old + 2, "new code not better?"); +#endif + os::current_thread_enable_wx(mode, true); +} + +inline Thread::WXState Thread::set_wx_state(WXState new_state, const char* FILE, int LINE) { + assert(_wx._init, "should be inited"); + WXState old_state = _wx._state; + if (AssertWX) { + guarantee(this == Thread::current(), "should only be called for current thread"); +#ifdef ASSERT + if (new_state != old_state) { + assert_can_change_wx_state(new_state); + } +#endif + } + + if (new_state.wx_mode() != old_state.wx_mode()) { + set_os_wx_mode_new(new_state.wx_mode()); + } +#ifdef ASSERT + if (new_state != old_state) { + set_last_wx_change_loc(FILE, LINE); + } +#endif + _wx._state = new_state; + + return old_state; +} + +#endif // INCLUDE_WX_NEW +#endif // INCLUDE_WX #endif // SHARE_RUNTIME_THREAD_INLINE_HPP diff --git a/src/hotspot/share/runtime/threadWXSetters.inline.hpp b/src/hotspot/share/runtime/threadWXSetters.inline.hpp index 121584b81be85..3524cd55b982b 100644 --- a/src/hotspot/share/runtime/threadWXSetters.inline.hpp +++ b/src/hotspot/share/runtime/threadWXSetters.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -28,9 +28,12 @@ // No threadWXSetters.hpp -#if defined(__APPLE__) && defined(AARCH64) +#if INCLUDE_WX #include "runtime/thread.inline.hpp" +#include "utilities/events.hpp" + +#if INCLUDE_WX_OLD class ThreadWXEnable { Thread* _thread; @@ -46,7 +49,225 @@ class ThreadWXEnable { } } }; -#endif // __APPLE__ && AARCH64 +#endif + +#if INCLUDE_WX_NEW +class Thread::WXEnable : public StackObj { + Thread* _thread; + WXState _old_state; + WXState _new_state; + uint _wx_writes_required; + const WXEnable* _parent; +#ifdef ASSERT + const char* _old_file; + int _old_line; +#endif + + friend class Thread; + + WXEnable() : _old_state(WXExec), _new_state(WXExec) { + _thread = nullptr; + _wx_writes_required = 0; +#if 0 + _parent = nullptr; +#endif +#ifdef ASSERT + _old_file = __FILE__; + _old_line = __LINE__; +#endif + } + +public: + WXEnable(Thread* thread, WXState new_state, const char* FILE, int LINE, bool speculative = false) : + _thread(thread), _old_state(thread->wx_state()), _new_state(new_state) + { +#if 0 +// FIXME: use array instead, to avoid dangling pointer error. + WXScope* scope = new (&thread->_wx_scopes[_wx_scope_depth]) WXScope(_wx_scope_depth); +#endif +#if 0 + _parent = thread->wx_scope(); + thread->set_wx_scope(this); +#endif + WXState old_state = _old_state; +#if 1 + if (old_state == new_state) { + os::breakpoint(); + } else if (old_state.wx_mode() == new_state.wx_mode()) { + os::breakpoint(); + } +#endif + +#if 0 + assert(old_state.is_lazy() == _parent->_new_state.is_lazy(), + "lazy state (%s) does not match parent (%s)", + wx_state_name(old_state), + wx_state_name(_parent->_new_state)); +#endif + +#ifdef ASSERT + _old_file = thread->last_wx_change_file(); + _old_line = thread->last_wx_change_line(); +#endif + + // FIXME TODO use state transition table + asserts to verify? + assert(!new_state.is_lazy() || new_state.wx_mode() == old_state.wx_mode(), "lazy request changed mode"); + +#ifdef ASSERT + // Outermost scope? + if (AssertWX) { + _wx_writes_required = _thread->wx_writes_required(); + if (old_state == WXWrite && new_state == WXExec) { + guarantee(_thread->wx_writes_required() > _thread->wx_writes_required_at_last_x2w(), + "Unused outer write scope"); + } else if (new_state == WXWrite && old_state != WXWrite) { + _thread->set_wx_writes_required_at_last_x2w(); + } + } else { + _wx_writes_required = 0; + } + // TODO: also check for X(W,W) that should be Lazy(W,W) + // and check for W(X()) where the X is not required +#endif +#if 0 + // FIXME + if (old_state.wx_mode() != new_state.wx_mode() && !new_state.is_lazy() && !old_state.is_lazy()) { + thread->inc_wx_depth(1); + } +#endif + if (old_state != new_state) { + _thread->set_wx_state(new_state, FILE, LINE); + } + if (speculative) { + // Here we simulate a single write to make sure the write scope is marked as + // needed, which satisfies the debug check above for unneeded write scopes. + assert(new_state == WXWrite, "unexpected state"); + // REQUIRE_THREAD_WX_MODE_WRITE + _thread->require_wx_mode(WXWrite, FILE, LINE); // inc_wx_writes_required(); + } + } + + ~WXEnable() { + Thread* thread = _thread; + if (thread == nullptr) { + // root scope + return; + } + WXState cur_state = thread->wx_state(); + WXState new_state = _new_state; + + assert(new_state == cur_state || (new_state.is_lazy() && cur_state.is_lazy()), + "state not restored by inner scope?"); + +#ifdef ASSERT + if (AssertWX) { + if (new_state == WXWrite) { + guarantee(thread->wx_writes_required() > _wx_writes_required, "no writes required, use lazy mode?"); + } + } +#endif + + WXState old_state = _old_state; + if (old_state.is_lazy()) { + old_state.set_wx_mode(cur_state.wx_mode()); + } + +#if 0 + // FIXME + if (old_state.wx_mode() != cur_state.wx_mode() && !old_state.is_lazy() && !cur_state.is_lazy()) { + thread->inc_wx_depth(-1); + } +#endif + + if (old_state != cur_state) { + thread->set_wx_state(old_state, __FILE__, __LINE__); + } +#ifdef ASSERT + thread->set_last_wx_change_loc(_old_file, _old_line); +#endif +#if 0 +// FIXME: use array instead, to avoid dangling pointer error. +#endif +#if 0 + thread->set_wx_scope(_parent); +#endif + +#if 0 + assert(old_state.is_lazy() == _parent->_new_state.is_lazy(), + "lazy state (%s) does not match parent (%s)", + old_state.name(), + _parent->_new_state.name()); +#endif + } +}; + +typedef Thread::WXEnable WXMark; + +inline WXMark WXLazyMark(Thread* t, const char* FILE, int LINE) { + return WXMark(t, t->wx_lazy_state(), FILE, LINE); +} + +inline WXMark WXConditionalWriteMark(Thread* t, bool cond, const char* FILE, int LINE) { + return WXMark(t, cond ? WXWrite : t->wx_state(), FILE, LINE); +} + +// This variant is used when we want to set write mode, expecting writes +// to happen, but we can't guarantee it. We might use this outside a loop +// when there are conditional writes inside the loop, and we don't want to +// slow down the loop with additional scopes. +inline WXMark WXSpeculativeWriteMark(Thread* t, bool cond, const char* FILE, int LINE) { + return WXMark(t, cond ? WXWrite : t->wx_state(), FILE, LINE, true /* speculative */); +} + +#define WXExecMark(t) WXMark(t, WXExec, __FILE__, __LINE__) +#define WXWriteMark(t) WXMark(t, WXWrite, __FILE__, __LINE__) +#define WXLazyMark(t) WXLazyMark(t, __FILE__, __LINE__) +#define WXConditionalWriteMark(t, cond) WXConditionalWriteMark(t, cond, __FILE__, __LINE__) +#define WXSpeculativeWriteMark(t, cond) WXSpeculativeWriteMark(t, cond, __FILE__, __LINE__) + +#ifdef ASSERT +inline void Thread::require_wx_mode(WXMode expected, const char* FILE, int LINE) { + assert(this == Thread::current(), "should only be called for current thread"); + if (AssertWX) { + if (wx_state().is_lazy()) { +#if 1 + if (VMError::is_error_reported_in_current_thread()) { + abort(); + } +#endif + guarantee(!wx_state().is_lazy(), "definite state required"); + } + if (expected == WXWrite) { + inc_wx_writes_required(); + } + guarantee(wx_state().wx_mode() == expected, + "unexpected state %s (expected %s) at %s:%d, last set at %s:%d", + wx_state().name(), + expected == WXExec ? "WXExec" : "WXWrite", + FILE, LINE, + last_wx_change_file(), last_wx_change_line()); + } +} +#endif + +#define require_wx_mode(mode) require_wx_mode((mode), __FILE__, __LINE__) + +#define REQUIRE_THREAD_WX_MODE_EXEC Thread::current()->require_wx_mode(WXExec); +#define REQUIRE_THREAD_WX_MODE_WRITE Thread::current()->require_wx_mode(WXWrite); + +#endif // INCLUDE_WX_NEW + +#else + +#define WXMark(t, m) StackObj() +#define WXExecMark(t) StackObj() +#define WXWriteMark(t) StackObj() +#define WXLazyMark(t) StackObj() + +#define REQUIRE_THREAD_WX_MODE_EXEC +#define REQUIRE_THREAD_WX_MODE_WRITE + +#endif // INCLUDE_WX #endif // SHARE_RUNTIME_THREADWXSETTERS_INLINE_HPP diff --git a/src/hotspot/share/runtime/threads.cpp b/src/hotspot/share/runtime/threads.cpp index 203062582a0e2..cfaa95bd82bfc 100644 --- a/src/hotspot/share/runtime/threads.cpp +++ b/src/hotspot/share/runtime/threads.cpp @@ -459,7 +459,7 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) { // Initialize the os module os::init(); - MACOS_AARCH64_ONLY(os::current_thread_enable_wx(WXWrite)); + WX_OLD_ONLY(os::current_thread_enable_wx(WXWrite)); // Record VM creation timing statistics TraceVmCreationTime create_vm_timer; @@ -553,15 +553,17 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) { // Attach the main thread to this os thread JavaThread* main_thread = new JavaThread(); - main_thread->set_thread_state(_thread_in_vm); main_thread->initialize_thread_current(); + main_thread->set_thread_state(_thread_in_vm); +#if INCLUDE_WX + main_thread->init_wx(); +#endif // Once mutexes and main_thread are ready, we can use NmtVirtualMemoryLocker. MemTracker::NmtVirtualMemoryLocker::set_safe_to_use(); // must do this before set_active_handles main_thread->record_stack_base_and_size(); main_thread->register_thread_stack_with_NMT(); main_thread->set_active_handles(JNIHandleBlock::allocate_block()); - MACOS_AARCH64_ONLY(main_thread->init_wx()); // Set the _monitor_owner_id now since we will run Java code before the Thread instance // is even created. The same value will be assigned to the Thread instance on init. @@ -585,6 +587,10 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) { ObjectMonitor::Initialize(); ObjectSynchronizer::initialize(); +#if INCLUDE_WX_NEW + auto _wx = WXLazyMark(main_thread); +#endif + // Initialize global modules jint status = init_globals(); if (status != JNI_OK) { diff --git a/src/hotspot/share/runtime/vmThread.cpp b/src/hotspot/share/runtime/vmThread.cpp index 0ff5e5d227b5b..d4fa7e41aabc8 100644 --- a/src/hotspot/share/runtime/vmThread.cpp +++ b/src/hotspot/share/runtime/vmThread.cpp @@ -484,6 +484,10 @@ void VMThread::loop() { no_op.set_calling_thread(_vm_thread); safepointALot_op.set_calling_thread(_vm_thread); +#if INCLUDE_WX_NEW + auto _wx = WXLazyMark(Thread::current()); +#endif + while (true) { if (should_terminate()) break; wait_for_operation(); diff --git a/src/hotspot/share/utilities/macros.hpp b/src/hotspot/share/utilities/macros.hpp index a5caf316aa3b8..4dee9787fe49f 100644 --- a/src/hotspot/share/utilities/macros.hpp +++ b/src/hotspot/share/utilities/macros.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -643,4 +643,48 @@ #define INCLUDE_ASAN 0 #endif +#if defined(__APPLE__) && defined(AARCH64) +#define WX_HW 1 +#define WX_EMUL 0 +#else +#define WX_HW 0 +#ifdef ASSERT +#define WX_EMUL 1 +#define AssertWX 1 +#define AssertWXAtThreadSync 1 +#define WXLazyForceExec 0 +#define WXLazyNoRestore 1 +#else +#define WX_EMUL 0 +#endif +#endif + +#if WX_HW || WX_EMUL +#define INCLUDE_WX 1 +#define INCLUDE_WX_OLD 1 +#define INCLUDE_WX_NEW 1 +#else +#define INCLUDE_WX 0 +#define INCLUDE_WX_OLD 0 +#define INCLUDE_WX_NEW 0 +#endif + +#if INCLUDE_WX +#define WX_ONLY(x) x +#if INCLUDE_WX_OLD +#define WX_OLD_ONLY(x) x +#else +#define WX_OLD_ONLY(x) +#endif +#if INCLUDE_WX_NEW +#define WX_NEW_ONLY(x) x +#else +#define WX_NEW_ONLY(x) +#endif +#else +#define WX_ONLY(x) +#define WX_OLD_ONLY(x) +#define WX_NEW_ONLY(x) +#endif + #endif // SHARE_UTILITIES_MACROS_HPP diff --git a/src/hotspot/share/utilities/zipLibrary.cpp b/src/hotspot/share/utilities/zipLibrary.cpp index ae68fb9ef7703..8c5d0bf46d2c2 100644 --- a/src/hotspot/share/utilities/zipLibrary.cpp +++ b/src/hotspot/share/utilities/zipLibrary.cpp @@ -28,6 +28,7 @@ #include "runtime/os.inline.hpp" #include "runtime/semaphore.inline.hpp" #include "runtime/thread.inline.hpp" +#include "runtime/threadWXSetters.inline.hpp" #include "utilities/zipLibrary.hpp" // Entry points in zip.dll for loading zip/jar file entries @@ -149,6 +150,9 @@ static void initialize(bool vm_exit_on_failure = true) { if (is_loaded()) { return; } +#if INCLUDE_WX_NEW + auto _wx = WXExecMark(Thread::current()); +#endif ZipLibraryLoaderLock lock; if (not_loaded()) { load_zip_library(vm_exit_on_failure); diff --git a/test/hotspot/gtest/aarch64/test_assembler_aarch64.cpp b/test/hotspot/gtest/aarch64/test_assembler_aarch64.cpp index 686f1ed4f3a89..367785c963e74 100644 --- a/test/hotspot/gtest/aarch64/test_assembler_aarch64.cpp +++ b/test/hotspot/gtest/aarch64/test_assembler_aarch64.cpp @@ -31,6 +31,8 @@ #include "compiler/disassembler.hpp" #include "memory/resourceArea.hpp" #include "nativeInst_aarch64.hpp" +#include "runtime/interfaceSupport.inline.hpp" +#include "runtime/threadWXSetters.inline.hpp" #include "unittest.hpp" #define __ _masm. @@ -52,6 +54,12 @@ static void asm_check(const unsigned int *insns, const unsigned int *insns1, siz } TEST_VM(AssemblerAArch64, validate) { + JavaThread* THREAD = JavaThread::current(); + ThreadInVMfromNative invm(THREAD); +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(THREAD); +#endif + // Smoke test for assembler BufferBlob* b = BufferBlob::create("aarch64Test", 500000); CodeBuffer code(b); diff --git a/test/hotspot/gtest/code/test_codestrings.cpp b/test/hotspot/gtest/code/test_codestrings.cpp index 46a8ff12f0894..b0781e75b083d 100644 --- a/test/hotspot/gtest/code/test_codestrings.cpp +++ b/test/hotspot/gtest/code/test_codestrings.cpp @@ -28,6 +28,8 @@ #include "asm/macroAssembler.inline.hpp" #include "compiler/disassembler.hpp" #include "memory/resourceArea.hpp" +#include "runtime/interfaceSupport.inline.hpp" +#include "runtime/threadWXSetters.inline.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/vmassert_uninstall.hpp" @@ -272,6 +274,12 @@ TEST_VM(codestrings, DISABLED_validate) TEST_VM(codestrings, validate) #endif { + JavaThread* THREAD = JavaThread::current(); + ThreadInVMfromNative invm(THREAD); +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(THREAD); +#endif + code_buffer_test(); buffer_blob_test(); } diff --git a/test/hotspot/gtest/code/test_vtableStub.cpp b/test/hotspot/gtest/code/test_vtableStub.cpp index 74b39d017e7ad..f21e82307db1d 100644 --- a/test/hotspot/gtest/code/test_vtableStub.cpp +++ b/test/hotspot/gtest/code/test_vtableStub.cpp @@ -24,6 +24,7 @@ #include "code/vtableStubs.hpp" #include "runtime/interfaceSupport.inline.hpp" +#include "runtime/threadWXSetters.inline.hpp" #include "unittest.hpp" #ifndef ZERO @@ -31,6 +32,9 @@ TEST_VM(code, vtableStubs) { // Should be in VM to use locks ThreadInVMfromNative ThreadInVMfromNative(JavaThread::current()); +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(JavaThread::current()); +#endif VtableStubs::find_vtable_stub(0); // min vtable index for (int i = 0; i < 15; i++) { @@ -43,6 +47,10 @@ TEST_VM(code, vtableStubs) { TEST_VM(code, itableStubs) { // Should be in VM to use locks ThreadInVMfromNative ThreadInVMfromNative(JavaThread::current()); +#if INCLUDE_WX_NEW + auto _wx = WXWriteMark(JavaThread::current()); +#endif + VtableStubs::find_itable_stub(0); // min itable index for (int i = 0; i < 15; i++) { diff --git a/test/hotspot/gtest/gtestMain.cpp b/test/hotspot/gtest/gtestMain.cpp index c593f8dbb19ee..6defd83cf351e 100644 --- a/test/hotspot/gtest/gtestMain.cpp +++ b/test/hotspot/gtest/gtestMain.cpp @@ -97,7 +97,7 @@ static int init_jvm(int argc, char **argv, bool disable_error_handling, JavaVM** // CreateJavaVM leaves WXExec context, while gtests // calls internal functions assuming running in WXWwrite. // Switch to WXWrite once for all test cases. - MACOS_AARCH64_ONLY(Thread::current()->enable_wx(WXWrite)); + WX_OLD_ONLY(Thread::current()->enable_wx(WXWrite)); } return ret; } diff --git a/test/hotspot/gtest/runtime/test_stubRoutines.cpp b/test/hotspot/gtest/runtime/test_stubRoutines.cpp index 6d718a8209f55..db79c5679933d 100644 --- a/test/hotspot/gtest/runtime/test_stubRoutines.cpp +++ b/test/hotspot/gtest/runtime/test_stubRoutines.cpp @@ -61,7 +61,7 @@ static void test_arraycopy_func(address func, int alignment) { } TEST_VM(StubRoutines, array_copy_routine) { - MACOS_AARCH64_ONLY(os::current_thread_enable_wx(WXExec)); + WX_OLD_ONLY(os::current_thread_enable_wx(WXExec)); #define TEST_ARRAYCOPY(type) \ test_arraycopy_func( StubRoutines::type##_arraycopy(), sizeof(type)); \ @@ -77,11 +77,11 @@ TEST_VM(StubRoutines, array_copy_routine) { #undef TEST_ARRAYCOPY - MACOS_AARCH64_ONLY(os::current_thread_enable_wx(WXWrite)); + WX_OLD_ONLY(os::current_thread_enable_wx(WXWrite)); } TEST_VM(StubRoutines, copy_routine) { - MACOS_AARCH64_ONLY(os::current_thread_enable_wx(WXExec)); + WX_OLD_ONLY(os::current_thread_enable_wx(WXExec)); #define TEST_COPYRTN(type) \ test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::conjoint_##type##s_atomic), sizeof(type)); \ @@ -102,11 +102,11 @@ TEST_VM(StubRoutines, copy_routine) { test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::aligned_conjoint_words), sizeof(jlong)); test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::aligned_disjoint_words), sizeof(jlong)); - MACOS_AARCH64_ONLY(os::current_thread_enable_wx(WXWrite)); + WX_OLD_ONLY(os::current_thread_enable_wx(WXWrite)); } TEST_VM(StubRoutines, array_fill_routine) { - MACOS_AARCH64_ONLY(os::current_thread_enable_wx(WXExec)); + WX_OLD_ONLY(os::current_thread_enable_wx(WXExec)); #define TEST_FILL(type) \ if (StubRoutines::type##_fill() != nullptr) { \ @@ -148,5 +148,5 @@ TEST_VM(StubRoutines, array_fill_routine) { #undef TEST_FILL - MACOS_AARCH64_ONLY(os::current_thread_enable_wx(WXWrite)); + WX_OLD_ONLY(os::current_thread_enable_wx(WXWrite)); } diff --git a/test/hotspot/gtest/runtime/test_threads.cpp b/test/hotspot/gtest/runtime/test_threads.cpp index 080ce64c4472f..77b44bd2e8b43 100644 --- a/test/hotspot/gtest/runtime/test_threads.cpp +++ b/test/hotspot/gtest/runtime/test_threads.cpp @@ -179,7 +179,7 @@ TEST_VM(ThreadsTest, claim_overflow) { TEST_VM(ThreadsTest, fast_jni_in_vm) { JavaThread* current = JavaThread::current(); JNIEnv* env = current->jni_environment(); - MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, current)); + WX_OLD_ONLY(ThreadWXEnable wx(WXWrite, current)); // DirectByteBuffer is an easy way to trigger GetIntField, // see JDK-8262896