diff --git a/.github/workflows/build-windows.yml b/.github/workflows/build-windows.yml
index 3bb50a137ec8..49071de7771f 100644
--- a/.github/workflows/build-windows.yml
+++ b/.github/workflows/build-windows.yml
@@ -31,6 +31,9 @@ on:
platform:
required: true
type: string
+ runs-on:
+ required: true
+ type: string
extra-conf-options:
required: false
type: string
@@ -67,7 +70,7 @@ env:
jobs:
build-windows:
name: build
- runs-on: windows-2025
+ runs-on: ${{ inputs.runs-on }}
defaults:
run:
shell: bash
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index 20be196b128a..94f011ff76c2 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -353,6 +353,7 @@ jobs:
uses: ./.github/workflows/build-windows.yml
with:
platform: windows-x64
+ runs-on: windows-2022
msvc-toolset-version: '14.44'
msvc-toolset-architecture: 'x86.x64'
configure-arguments: ${{ github.event.inputs.configure-arguments }}
@@ -366,6 +367,7 @@ jobs:
uses: ./.github/workflows/build-windows.yml
with:
platform: windows-aarch64
+ runs-on: windows-2022
msvc-toolset-version: '14.44'
msvc-toolset-architecture: 'arm64'
make-target: 'hotspot'
@@ -446,6 +448,6 @@ jobs:
with:
platform: windows-x64
bootjdk-platform: windows-x64
- runs-on: windows-2025
+ runs-on: windows-2022
dry-run: ${{ needs.prepare.outputs.dry-run == 'true' }}
debug-suffix: -debug
diff --git a/make/RunTestsPrebuiltSpec.gmk b/make/RunTestsPrebuiltSpec.gmk
index 5fe559eafadb..568f69da5a51 100644
--- a/make/RunTestsPrebuiltSpec.gmk
+++ b/make/RunTestsPrebuiltSpec.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -64,7 +64,7 @@ TEST_JOBS ?= 0
# Use hard-coded values for java flags (one size, fits all!)
JAVA_FLAGS := -Duser.language=en -Duser.country=US
JAVA_FLAGS_BIG := -Xms64M -Xmx2048M
-JAVA_FLAGS_SMALL := -XX:+UseSerialGC -Xms32M -Xmx512M -XX:TieredStopAtLevel=1
+JAVA_FLAGS_SMALL := -Xms32M -Xmx512M -XX:TieredStopAtLevel=1
BUILDJDK_JAVA_FLAGS_SMALL := -Xms32M -Xmx512M -XX:TieredStopAtLevel=1
BUILD_JAVA_FLAGS := $(JAVA_FLAGS_BIG)
diff --git a/make/autoconf/boot-jdk.m4 b/make/autoconf/boot-jdk.m4
index b3dbc2929191..4468a9acf279 100644
--- a/make/autoconf/boot-jdk.m4
+++ b/make/autoconf/boot-jdk.m4
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -481,8 +481,6 @@ AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS],
AC_MSG_CHECKING([flags for boot jdk java command for small workloads])
- # Use serial gc for small short lived tools if possible
- UTIL_ADD_JVM_ARG_IF_OK([-XX:+UseSerialGC],boot_jdk_jvmargs_small,[$JAVA])
UTIL_ADD_JVM_ARG_IF_OK([-Xms32M],boot_jdk_jvmargs_small,[$JAVA])
UTIL_ADD_JVM_ARG_IF_OK([-Xmx512M],boot_jdk_jvmargs_small,[$JAVA])
UTIL_ADD_JVM_ARG_IF_OK([-XX:TieredStopAtLevel=1],boot_jdk_jvmargs_small,[$JAVA])
@@ -492,8 +490,6 @@ AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS],
JAVA_FLAGS_SMALL=$boot_jdk_jvmargs_small
AC_SUBST(JAVA_FLAGS_SMALL)
- # Don't presuppose SerialGC is present in the buildjdk. Also, we cannot test
- # the buildjdk, but on the other hand we know what it will support.
BUILD_JAVA_FLAGS_SMALL="-Xms32M -Xmx512M -XX:TieredStopAtLevel=1"
AC_SUBST(BUILD_JAVA_FLAGS_SMALL)
diff --git a/make/data/cldr/common/dtd/ldml.dtd b/make/data/cldr/common/dtd/ldml.dtd
index aebedd33a43c..b4247f2d9243 100644
--- a/make/data/cldr/common/dtd/ldml.dtd
+++ b/make/data/cldr/common/dtd/ldml.dtd
@@ -1,5 +1,5 @@
-
+
@@ -493,6 +493,16 @@ CLDR data files are interpreted according to the LDML specification (http://unic
+
+
+
+
+
+
+
+
+
+
diff --git a/make/data/cldr/common/main/aa.xml b/make/data/cldr/common/main/aa.xml
index 3ff6fb6dd066..791c30096585 100644
--- a/make/data/cldr/common/main/aa.xml
+++ b/make/data/cldr/common/main/aa.xml
@@ -1,6 +1,6 @@
-
@@ -1027,6 +1027,7 @@ For terms of use, see http://www.unicode.org/copyright.html
+
diff --git a/make/data/cldr/common/supplemental/likelySubtags.xml b/make/data/cldr/common/supplemental/likelySubtags.xml
index 76e215255fdc..a73b8a8c95bf 100644
--- a/make/data/cldr/common/supplemental/likelySubtags.xml
+++ b/make/data/cldr/common/supplemental/likelySubtags.xml
@@ -1,7 +1,7 @@
-
+
@@ -1343,7 +1343,7 @@ not be patched by hand, as any changes made in that fashion may be lost.
-
+
diff --git a/make/data/cldr/common/supplemental/metaZones.xml b/make/data/cldr/common/supplemental/metaZones.xml
index 710934fef81d..610921a8f6d2 100644
--- a/make/data/cldr/common/supplemental/metaZones.xml
+++ b/make/data/cldr/common/supplemental/metaZones.xml
@@ -735,7 +735,7 @@ For terms of use, see http://www.unicode.org/copyright.html
-
+
diff --git a/make/data/cldr/common/supplemental/supplementalData.xml b/make/data/cldr/common/supplemental/supplementalData.xml
index 25684d36c6e1..cbfe2c5e8751 100644
--- a/make/data/cldr/common/supplemental/supplementalData.xml
+++ b/make/data/cldr/common/supplemental/supplementalData.xml
@@ -1,7 +1,7 @@
@@ -57,7 +57,7 @@ For terms of use, see https://www.unicode.org/copyright.html
-
+
@@ -3147,7 +3147,7 @@ XXX Code for transations where no currency is involved
-
+
diff --git a/make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java b/make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java
index 18ce0c334fb8..7b198f976793 100644
--- a/make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java
+++ b/make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java
@@ -815,6 +815,13 @@ private static Map extractZoneNames(Map map, Str
data = map.get(TIMEZONE_ID_PREFIX + tzLink);
}
+ String meta = handlerMetaZones.get(tzKey);
+ if (meta == null && tzLink != null) {
+ // Check for tzLink
+ meta = handlerMetaZones.get(tzLink);
+ }
+ String metaKey = meta != null ? METAZONE_ID_PREFIX + meta : null;
+
if (data instanceof String[] tznames) {
// Hack for UTC. UTC is an alias to Etc/UTC in CLDR
if (tzid.equals("Etc/UTC") && !map.containsKey(TIMEZONE_ID_PREFIX + "UTC")) {
@@ -826,24 +833,14 @@ private static Map extractZoneNames(Map map, Str
tznames = Arrays.copyOf(tznames, tznames.length);
fillTZDBShortNames(tzKey, tznames);
names.put(tzid, tznames);
+ if (meta != null && map.get(metaKey) instanceof String[] metaNames) {
+ recordMetazone(names, meta, tzKey, metaNames);
+ }
}
} else {
- String meta = handlerMetaZones.get(tzKey);
- if (meta == null && tzLink != null) {
- // Check for tzLink
- meta = handlerMetaZones.get(tzLink);
- }
if (meta != null) {
- String metaKey = METAZONE_ID_PREFIX + meta;
- data = map.get(metaKey);
- if (data instanceof String[] tznames) {
- if (isDefaultZone(meta, tzKey)) {
- // Record the metazone names only from the default
- // (001) zone, with short names filled from TZDB
- tznames = Arrays.copyOf(tznames, tznames.length);
- fillTZDBShortNames(tzKey, tznames);
- names.put(metaKey, tznames);
- }
+ if (map.get(metaKey) instanceof String[] metaNames) {
+ recordMetazone(names, meta, tzKey, metaNames);
names.put(tzid, meta);
if (tzLink != null && availableIds.contains(tzLink)) {
names.put(tzLink, meta);
@@ -1508,11 +1505,18 @@ private static void fillTZDBShortNames(String tzid, String[] names) {
}
}
- private static boolean isDefaultZone(String meta, String tzid) {
+ private static void recordMetazone(Map names, String meta, String tzid, String[] tznames) {
String zone001 = handlerMetaZones.zidMap().get(meta);
var tzLink = getTZDBLink(tzid);
- return canonicalTZMap.getOrDefault(tzid, tzid).equals(zone001) ||
- tzLink != null && canonicalTZMap.getOrDefault(tzLink, tzLink).equals(zone001);
+
+ // Record the metazone names only from the default
+ // (001) zone, with short names filled from TZDB
+ if (canonicalTZMap.getOrDefault(tzid, tzid).equals(zone001) ||
+ tzLink != null && canonicalTZMap.getOrDefault(tzLink, tzLink).equals(zone001)) {
+ tznames = Arrays.copyOf(tznames, tznames.length);
+ fillTZDBShortNames(tzid, tznames);
+ names.put(METAZONE_ID_PREFIX + meta, tznames);
+ }
}
private static String getTZDBLink(String tzid) {
diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad
index 4fbbfc9d1dca..7487de2d5775 100644
--- a/src/hotspot/cpu/aarch64/aarch64.ad
+++ b/src/hotspot/cpu/aarch64/aarch64.ad
@@ -3389,12 +3389,13 @@ encode %{
assert(rtype == relocInfo::none || rtype == relocInfo::external_word_type, "unexpected reloc type");
// load fake address constants using a normal move
if (! __ is_valid_AArch64_address(con) ||
- con < (address)(uintptr_t)os::vm_page_size()) {
+ con < (address)(uintptr_t)os::vm_page_size() ||
+ rtype == relocInfo::none) {
__ mov(dst_reg, con);
} else {
- // no reloc so just use adrp and add
+ // use shorter adrp/add sequence for external_word relocation
uint64_t offset;
- __ adrp(dst_reg, con, offset);
+ __ adrp(dst_reg, Address(con, rtype), offset);
__ add(dst_reg, dst_reg, offset);
}
}
diff --git a/src/hotspot/cpu/aarch64/gc/shenandoah/c1/shenandoahBarrierSetC1_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shenandoah/c1/shenandoahBarrierSetC1_aarch64.cpp
deleted file mode 100644
index e31a58243b5a..000000000000
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/c1/shenandoahBarrierSetC1_aarch64.cpp
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved.
- * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "c1/c1_LIRAssembler.hpp"
-#include "c1/c1_MacroAssembler.hpp"
-#include "compiler/compilerDefinitions.inline.hpp"
-#include "gc/shared/gc_globals.hpp"
-#include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
-#include "gc/shenandoah/shenandoahBarrierSet.hpp"
-#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
-
-#define __ masm->masm()->
-
-void LIR_OpShenandoahCompareAndSwap::emit_code(LIR_Assembler* masm) {
- Register addr = _addr->as_register_lo();
- Register newval = _new_value->as_register();
- Register cmpval = _cmp_value->as_register();
- Register tmp1 = _tmp1->as_register();
- Register tmp2 = _tmp2->as_register();
- Register result = result_opr()->as_register();
-
- if (UseCompressedOops) {
- __ encode_heap_oop(tmp1, cmpval);
- cmpval = tmp1;
- __ encode_heap_oop(tmp2, newval);
- newval = tmp2;
- }
-
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm->masm(), addr, cmpval, newval, /*acquire*/ true, /*release*/ true, /*is_cae*/ false, result);
-
- // The membar here is necessary to prevent reordering between the
- // release store in the CAS above and a subsequent volatile load.
- // See also: LIR_Assembler::casw, LIR_Assembler::casl.
- __ membar(__ AnyAny);
-}
-
-#undef __
-
-#ifdef ASSERT
-#define __ gen->lir(__FILE__, __LINE__)->
-#else
-#define __ gen->lir()->
-#endif
-
-LIR_Opr ShenandoahBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
- BasicType bt = access.type();
- if (access.is_oop()) {
- LIRGenerator *gen = access.gen();
- if (ShenandoahSATBBarrier) {
- pre_barrier(gen, access.access_emit_info(), access.decorators(), access.resolved_addr(),
- LIR_OprFact::illegalOpr /* pre_val */);
- }
- if (ShenandoahCASBarrier) {
- cmp_value.load_item();
- new_value.load_item();
-
- LIR_Opr t1 = gen->new_register(T_OBJECT);
- LIR_Opr t2 = gen->new_register(T_OBJECT);
- LIR_Opr addr = access.resolved_addr()->as_address_ptr()->base();
- LIR_Opr result = gen->new_register(T_INT);
-
- __ append(new LIR_OpShenandoahCompareAndSwap(addr, cmp_value.result(), new_value.result(), t1, t2, result));
-
- if (ShenandoahCardBarrier) {
- post_barrier(access, access.resolved_addr(), new_value.result());
- }
- return result;
- }
- }
- return BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value);
-}
-
-LIR_Opr ShenandoahBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) {
- LIRGenerator* gen = access.gen();
- BasicType type = access.type();
-
- LIR_Opr result = gen->new_register(type);
- value.load_item();
- LIR_Opr value_opr = value.result();
-
- assert(type == T_INT || is_reference_type(type) LP64_ONLY( || type == T_LONG ), "unexpected type");
- LIR_Opr tmp = gen->new_register(T_INT);
- __ xchg(access.resolved_addr(), value_opr, result, tmp);
-
- if (access.is_oop()) {
- result = load_reference_barrier(access.gen(), result, LIR_OprFact::addressConst(0), access.decorators());
- LIR_Opr tmp = gen->new_register(type);
- __ move(result, tmp);
- result = tmp;
- if (ShenandoahSATBBarrier) {
- pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), LIR_OprFact::illegalOpr,
- result /* pre_val */);
- }
- if (ShenandoahCardBarrier) {
- post_barrier(access, access.resolved_addr(), result);
- }
- }
-
- return result;
-}
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
index fdb016acf31a..8f5eb7027143 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
@@ -6735,13 +6735,14 @@ void MacroAssembler::java_round_float(Register dst, FloatRegister src,
// by the call to JavaThread::aarch64_get_thread_helper() or, indeed,
// the call setup code.
//
-// On Linux, aarch64_get_thread_helper() clobbers only r0, r1, and flags.
+// On Linux and Windows, aarch64_get_thread_helper() is implemented in
+// assembly and clobbers only r0, r1, and flags.
// On other systems, the helper is a usual C function.
//
void MacroAssembler::get_thread(Register dst) {
RegSet saved_regs =
- LINUX_ONLY(RegSet::range(r0, r1) + lr - dst)
- NOT_LINUX (RegSet::range(r0, r17) + lr - dst);
+ BSD_ONLY(RegSet::range(r0, r17) + lr - dst)
+ NOT_BSD (RegSet::range(r0, r1) + lr - dst);
protect_return_address();
push(saved_regs, sp);
diff --git a/src/hotspot/cpu/arm/arm_32.ad b/src/hotspot/cpu/arm/arm_32.ad
index 9438e8da8b59..2af7e253a1a8 100644
--- a/src/hotspot/cpu/arm/arm_32.ad
+++ b/src/hotspot/cpu/arm/arm_32.ad
@@ -1,5 +1,5 @@
//
-// Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@@ -501,7 +501,7 @@ operand immIRotn() %{
%}
operand immPRot() %{
- predicate(n->get_ptr() == 0 || (AsmOperand::is_rotated_imm(n->get_ptr()) && ((ConPNode*)n)->type()->reloc() == relocInfo::none));
+ predicate(n->get_ptr() == 0 || (AsmOperand::is_rotated_imm(n->get_ptr()) && ((ConPNode*)n)->type()->is_ptr()->reloc() == relocInfo::none));
match(ConP);
diff --git a/src/hotspot/cpu/ppc/assembler_ppc.hpp b/src/hotspot/cpu/ppc/assembler_ppc.hpp
index 378e01fc1ccd..f62c93e466cf 100644
--- a/src/hotspot/cpu/ppc/assembler_ppc.hpp
+++ b/src/hotspot/cpu/ppc/assembler_ppc.hpp
@@ -461,10 +461,6 @@ class Assembler : public AbstractAssembler {
FRIN_OPCODE = (63u << OPCODE_SHIFT | 392u << 1),
FRIP_OPCODE = (63u << OPCODE_SHIFT | 456u << 1),
FRIM_OPCODE = (63u << OPCODE_SHIFT | 488u << 1),
- // These are special Power6 opcodes, reused for "lfdepx" and "stfdepx"
- // on Power7. Do not use.
- // MFFGPR_OPCODE = (31u << OPCODE_SHIFT | 607u << 1),
- // MFTGPR_OPCODE = (31u << OPCODE_SHIFT | 735u << 1),
CMPB_OPCODE = (31u << OPCODE_SHIFT | 508 << 1),
POPCNTB_OPCODE = (31u << OPCODE_SHIFT | 122 << 1),
POPCNTW_OPCODE = (31u << OPCODE_SHIFT | 378 << 1),
@@ -518,7 +514,6 @@ class Assembler : public AbstractAssembler {
FSQRT_OPCODE = (63u << OPCODE_SHIFT | 22u << 1), // A-FORM
FSQRTS_OPCODE = (59u << OPCODE_SHIFT | 22u << 1), // A-FORM
- // Vector instruction support for >= Power6
// Vector Storage Access
LVEBX_OPCODE = (31u << OPCODE_SHIFT | 7u << 1),
LVEHX_OPCODE = (31u << OPCODE_SHIFT | 39u << 1),
@@ -1236,7 +1231,7 @@ class Assembler : public AbstractAssembler {
static int u( int x) { return opp_u_field(x, 19, 16); }
static int ui( int x) { return opp_u_field(x, 31, 16); }
- // Support vector instructions for >= Power6.
+ // Support vector instructions.
static int vra( int x) { return opp_u_field(x, 15, 11); }
static int vrb( int x) { return opp_u_field(x, 20, 16); }
static int vrc( int x) { return opp_u_field(x, 25, 21); }
@@ -2036,7 +2031,7 @@ class Assembler : public AbstractAssembler {
inline void stqcx_( Register s, Register a, Register b);
// Instructions for adjusting thread priority for simultaneous
- // multithreading (SMT) on Power5.
+ // multithreading (SMT).
private:
inline void smt_prio_very_low();
inline void smt_prio_medium_high();
@@ -2204,7 +2199,7 @@ class Assembler : public AbstractAssembler {
inline void fsqrt( FloatRegister d, FloatRegister b);
inline void fsqrts(FloatRegister d, FloatRegister b);
- // Vector instructions for >= Power6.
+ // Vector instructions.
inline void lvebx( VectorRegister d, Register s1, Register s2);
inline void lvehx( VectorRegister d, Register s1, Register s2);
inline void lvewx( VectorRegister d, Register s1, Register s2);
diff --git a/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp b/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp
index d349bbc6f872..22b9e268dcdf 100644
--- a/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp
+++ b/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp
@@ -642,7 +642,6 @@ inline void Assembler::crorc( ConditionRegister crdst, Condition cdst, Condition
crorc(dst_bit, src_bit, dst_bit);
}
-// Conditional move (>= Power7)
inline void Assembler::isel(Register d, ConditionRegister cr, Condition cc, bool inv, Register a, Register b) {
if (b == noreg) {
b = d; // Can be omitted if old value should be kept in "else" case.
@@ -689,7 +688,7 @@ inline void Assembler::elemental_membar(int e) { assert(0 < e && e < 16, "invali
// Wait instructions for polling.
inline void Assembler::wait() { emit_int32( WAIT_OPCODE); }
-inline void Assembler::waitrsv() { emit_int32( WAIT_OPCODE | 1<<(31-10)); } // WC=0b01 >=Power7
+inline void Assembler::waitrsv() { emit_int32( WAIT_OPCODE | 1<<(31-10)); } // WC=0b01
// atomics
// Use ra0mem to disallow R0 as base.
@@ -709,19 +708,16 @@ inline void Assembler::stwcx_(Register s, Register a, Register b)
inline void Assembler::stdcx_(Register s, Register a, Register b) { emit_int32( STDCX_OPCODE | rs(s) | ra0mem(a) | rb(b) | rc(1)); }
inline void Assembler::stqcx_(Register s, Register a, Register b) { emit_int32( STQCX_OPCODE | rs(s) | ra0mem(a) | rb(b) | rc(1)); }
-// Instructions for adjusting thread priority
-// for simultaneous multithreading (SMT) on >= POWER5.
+// Instructions for adjusting thread priority for simultaneous multithreading (SMT).
inline void Assembler::smt_prio_very_low() { Assembler::or_unchecked(R31, R31, R31); }
inline void Assembler::smt_prio_low() { Assembler::or_unchecked(R1, R1, R1); }
inline void Assembler::smt_prio_medium_low() { Assembler::or_unchecked(R6, R6, R6); }
inline void Assembler::smt_prio_medium() { Assembler::or_unchecked(R2, R2, R2); }
inline void Assembler::smt_prio_medium_high() { Assembler::or_unchecked(R5, R5, R5); }
-inline void Assembler::smt_prio_high() { Assembler::or_unchecked(R3, R3, R3); }
-// >= Power7
+inline void Assembler::smt_prio_high() { Assembler::or_unchecked(R3, R3, R3); } // Restricted to supervisor state since Power9.
inline void Assembler::smt_yield() { Assembler::or_unchecked(R27, R27, R27); } // never actually implemented
-inline void Assembler::smt_mdoio() { Assembler::or_unchecked(R29, R29, R29); } // never actually implemetned
+inline void Assembler::smt_mdoio() { Assembler::or_unchecked(R29, R29, R29); } // never actually implemented
inline void Assembler::smt_mdoom() { Assembler::or_unchecked(R30, R30, R30); } // never actually implemented
-// Power8
inline void Assembler::smt_miso() { Assembler::or_unchecked(R26, R26, R26); } // never actually implemented
inline void Assembler::twi_0(Register a) { twi_unchecked(0, a, 0);}
@@ -766,10 +762,6 @@ inline void Assembler::frin( FloatRegister d, FloatRegister b) { emit_int32( FRI
inline void Assembler::frip( FloatRegister d, FloatRegister b) { emit_int32( FRIP_OPCODE | frt(d) | frb(b) | rc(0)); }
inline void Assembler::frim( FloatRegister d, FloatRegister b) { emit_int32( FRIM_OPCODE | frt(d) | frb(b) | rc(0)); }
-// These are special Power6 opcodes, reused for "lfdepx" and "stfdepx"
-// on Power7. Do not use.
-//inline void Assembler::mffgpr( FloatRegister d, Register b) { emit_int32( MFFGPR_OPCODE | frt(d) | rb(b) | rc(0)); }
-//inline void Assembler::mftgpr( Register d, FloatRegister b) { emit_int32( MFTGPR_OPCODE | rt(d) | frb(b) | rc(0)); }
// add cmpb and popcntb to detect ppc power version.
inline void Assembler::cmpb( Register a, Register s, Register b) { emit_int32( CMPB_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
inline void Assembler::popcntb(Register a, Register s) { emit_int32( POPCNTB_OPCODE | rta(a) | rs(s)); };
@@ -837,7 +829,7 @@ inline void Assembler::fcmpu( ConditionRegister crx, FloatRegister a, FloatRegis
inline void Assembler::fsqrt( FloatRegister d, FloatRegister b) { emit_int32( FSQRT_OPCODE | frt(d) | frb(b) | rc(0)); }
inline void Assembler::fsqrts(FloatRegister d, FloatRegister b) { emit_int32( FSQRTS_OPCODE | frt(d) | frb(b) | rc(0)); }
-// Vector instructions for >= Power6.
+// Vector instructions.
inline void Assembler::lvebx( VectorRegister d, Register s1, Register s2) { emit_int32( LVEBX_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
inline void Assembler::lvehx( VectorRegister d, Register s1, Register s2) { emit_int32( LVEHX_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
inline void Assembler::lvewx( VectorRegister d, Register s1, Register s2) { emit_int32( LVEWX_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
diff --git a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
index 0b48653ae64c..777b41577bed 100644
--- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
@@ -210,7 +210,7 @@ int LIR_Assembler::emit_unwind_handler() {
_masm->block_comment("Unwind handler");
int offset = code_offset();
- bool preserve_exception = method()->is_synchronized() || compilation()->env()->dtrace_method_probes();
+ bool preserve_exception = method()->is_synchronized();
const Register Rexception = R3 /*LIRGenerator::exceptionOopOpr()*/, Rexception_save = R31;
// Fetch the exception from TLS and clear out exception related thread state.
@@ -232,10 +232,6 @@ int LIR_Assembler::emit_unwind_handler() {
__ bind(*stub->continuation());
}
- if (compilation()->env()->dtrace_method_probes()) {
- Unimplemented();
- }
-
// Dispatch to the unwind logic.
address unwind_stub = Runtime1::entry_for(StubId::c1_unwind_exception_id);
//__ load_const_optimized(R0, unwind_stub);
diff --git a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp
index 4d7af0e4a711..359c7cf22ad6 100644
--- a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp
@@ -232,13 +232,6 @@ void C1_MacroAssembler::initialize_object(
initialize_body(obj, t1, t2, con_size_in_bytes, hdr_size_in_bytes);
}
- if (CURRENT_ENV->dtrace_alloc_probes()) {
- Unimplemented();
-// assert(obj == O0, "must be");
-// call(CAST_FROM_FN_PTR(address, Runtime1::entry_for(StubId::c1_dtrace_object_alloc_id)),
-// relocInfo::runtime_call_type);
- }
-
verify_oop(obj, FILE_AND_LINE);
}
@@ -308,13 +301,6 @@ void C1_MacroAssembler::allocate_array(
initialize_body(base, index);
}
- if (CURRENT_ENV->dtrace_alloc_probes()) {
- Unimplemented();
- //assert(obj == O0, "must be");
- //call(CAST_FROM_FN_PTR(address, Runtime1::entry_for(StubId::c1_dtrace_object_alloc_id)),
- // relocInfo::runtime_call_type);
- }
-
verify_oop(obj, FILE_AND_LINE);
}
diff --git a/src/hotspot/cpu/ppc/gc/shenandoah/c1/shenandoahBarrierSetC1_ppc.cpp b/src/hotspot/cpu/ppc/gc/shenandoah/c1/shenandoahBarrierSetC1_ppc.cpp
deleted file mode 100644
index 5b24259103f5..000000000000
--- a/src/hotspot/cpu/ppc/gc/shenandoah/c1/shenandoahBarrierSetC1_ppc.cpp
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Copyright (c) 2018, 2023, Red Hat, Inc. All rights reserved.
- * Copyright (c) 2012, 2023 SAP SE. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "asm/macroAssembler.inline.hpp"
-#include "c1/c1_LIRAssembler.hpp"
-#include "c1/c1_MacroAssembler.hpp"
-#include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
-#include "gc/shenandoah/shenandoahBarrierSet.hpp"
-#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
-
-#define __ masm->masm()->
-
-void LIR_OpShenandoahCompareAndSwap::emit_code(LIR_Assembler *masm) {
- __ block_comment("LIR_OpShenandoahCompareAndSwap (shenandaohgc) {");
-
- Register addr = _addr->as_register_lo();
- Register new_val = _new_value->as_register();
- Register cmp_val = _cmp_value->as_register();
- Register tmp1 = _tmp1->as_register();
- Register tmp2 = _tmp2->as_register();
- Register result = result_opr()->as_register();
-
- if (UseCompressedOops) {
- __ encode_heap_oop(cmp_val, cmp_val);
- __ encode_heap_oop(new_val, new_val);
- }
-
- // There might be a volatile load before this Unsafe CAS.
- if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
- __ sync();
- } else {
- __ lwsync();
- }
-
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm->masm(), addr, cmp_val, new_val, tmp1, tmp2,
- false, result);
-
- if (UseCompressedOops) {
- __ decode_heap_oop(cmp_val);
- __ decode_heap_oop(new_val);
- }
-
- if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
- __ isync();
- } else {
- __ sync();
- }
-
- __ block_comment("} LIR_OpShenandoahCompareAndSwap (shenandaohgc)");
-}
-
-#undef __
-
-#ifdef ASSERT
-#define __ gen->lir(__FILE__, __LINE__)->
-#else
-#define __ gen->lir()->
-#endif
-
-LIR_Opr ShenandoahBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess &access, LIRItem &cmp_value, LIRItem &new_value) {
- BasicType bt = access.type();
-
- if (access.is_oop()) {
- LIRGenerator* gen = access.gen();
-
- if (ShenandoahSATBBarrier) {
- pre_barrier(gen, access.access_emit_info(), access.decorators(), access.resolved_addr(),
- LIR_OprFact::illegalOpr);
- }
-
- if (ShenandoahCASBarrier) {
- cmp_value.load_item();
- new_value.load_item();
-
- LIR_Opr t1 = gen->new_register(T_OBJECT);
- LIR_Opr t2 = gen->new_register(T_OBJECT);
- LIR_Opr addr = access.resolved_addr()->as_address_ptr()->base();
- LIR_Opr result = gen->new_register(T_INT);
-
- __ append(new LIR_OpShenandoahCompareAndSwap(addr, cmp_value.result(), new_value.result(), t1, t2, result));
-
- if (ShenandoahCardBarrier) {
- post_barrier(access, access.resolved_addr(), new_value.result());
- }
-
- return result;
- }
- }
-
- return BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value);
-}
-
-LIR_Opr ShenandoahBarrierSetC1::atomic_xchg_at_resolved(LIRAccess &access, LIRItem &value) {
- LIRGenerator* gen = access.gen();
- BasicType type = access.type();
-
- LIR_Opr result = gen->new_register(type);
- value.load_item();
- LIR_Opr value_opr = value.result();
-
- assert(type == T_INT || is_reference_type(type) LP64_ONLY( || type == T_LONG ), "unexpected type");
- LIR_Opr tmp_xchg = gen->new_register(T_INT);
- __ xchg(access.resolved_addr(), value_opr, result, tmp_xchg);
-
- if (access.is_oop()) {
- result = load_reference_barrier_impl(access.gen(), result, LIR_OprFact::addressConst(0),
- access.decorators());
-
- LIR_Opr tmp_barrier = gen->new_register(type);
- __ move(result, tmp_barrier);
- result = tmp_barrier;
-
- if (ShenandoahSATBBarrier) {
- pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), LIR_OprFact::illegalOpr, result);
- }
-
- if (ShenandoahCardBarrier) {
- post_barrier(access, access.resolved_addr(), result);
- }
- }
-
- return result;
-}
diff --git a/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.cpp b/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.cpp
index 43fd54eb78a4..82f3bb38012d 100644
--- a/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.cpp
@@ -893,13 +893,11 @@ void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assemble
Register tmp2 = stub->tmp2()->as_register();
assert_different_registers(addr, res, tmp1, tmp2);
-#ifdef ASSERT
- // Ensure that 'res' is 'R3_ARG1' and contains the same value as 'obj' to reduce the number of required
- // copy instructions.
assert(R3_RET == res, "res must be r3");
- __ cmpd(CR0, res, obj);
- __ asm_assert_eq("result register must contain the reference stored in obj");
-#endif
+
+ if (res != obj) {
+ __ mr(res, obj);
+ }
DecoratorSet decorators = stub->decorators();
@@ -1034,7 +1032,7 @@ void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_s
__ save_volatile_gprs(R1_SP, -nbytes_save, true, false);
// Load arguments from stack.
- // No load required, as assured by assertions in 'ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub'.
+ // No load required, as caller has already loaded obj into R3.
Register R3_obj = R3_ARG1;
Register R4_load_addr = R4_ARG2;
__ ld(R4_load_addr, -8, R1_SP);
diff --git a/src/hotspot/cpu/ppc/matcher_ppc.hpp b/src/hotspot/cpu/ppc/matcher_ppc.hpp
index 88a189d670eb..a3ab382564c2 100644
--- a/src/hotspot/cpu/ppc/matcher_ppc.hpp
+++ b/src/hotspot/cpu/ppc/matcher_ppc.hpp
@@ -54,7 +54,7 @@
// PowerPC requires masked shift counts.
static const bool need_masked_shift_count = true;
- // Power6 requires postalloc expand (see block.cpp for description of postalloc expand).
+ // PPC64 requires postalloc expand (see block.cpp for description of postalloc expand).
static const bool require_postalloc_expand = true;
// No support for generic vector operands.
@@ -157,7 +157,7 @@
// true means we have fast l2f conversion
static constexpr bool convL2FSupported(void) {
- // fcfids can do the conversion (>= Power7).
+ // fcfids can do the conversion.
// fcfid + frsp showed rounding problem when result should be 0x3f800001.
return true;
}
diff --git a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
index 37f780535b4d..252425fb1045 100644
--- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
+++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
@@ -3852,13 +3852,6 @@ void TemplateTable::_new() {
__ store_klass(RallocatedObject, RinstanceKlass, Rscratch);
}
- // Check and trigger dtrace event.
- if (DTraceAllocProbes) {
- __ push(atos);
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast(SharedRuntime::dtrace_object_alloc)));
- __ pop(atos);
- }
-
__ b(Ldone);
}
diff --git a/src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.cpp b/src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.cpp
index 5097d7ec58d2..fd78b429ee4f 100644
--- a/src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.cpp
+++ b/src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.cpp
@@ -228,7 +228,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slo
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
Assembler::IncompressibleScope scope(masm); // Fixed length: see entry_barrier_offset()
- Label local_guard;
+ Label local_guard, skip_barrier;
NMethodPatchingType patching_type = nmethod_patching_type();
if (slow_path == nullptr) {
@@ -290,24 +290,26 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slo
ShouldNotReachHere();
}
+ Label& barrier_target = slow_path == nullptr ? skip_barrier : *slow_path;
if (slow_path == nullptr) {
- Label skip_barrier;
- __ beq(t0, t1, skip_barrier);
+ __ beq(t0, t1, barrier_target, true /* is_far */);
+ } else {
+ __ bne(t0, t1, barrier_target, true /* is_far */);
+ }
+ if (slow_path == nullptr) {
__ rt_call(StubRoutines::method_entry_barrier());
-
__ j(skip_barrier);
__ bind(local_guard);
MacroAssembler::assert_alignment(__ pc());
__ emit_int32(0); // nmethod guard value. Skipped over in common case.
- __ bind(skip_barrier);
} else {
- __ beq(t0, t1, *continuation);
- __ j(*slow_path);
__ bind(*continuation);
}
+
+ __ bind(skip_barrier);
}
void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
diff --git a/src/hotspot/cpu/riscv/gc/shared/barrierSetNMethod_riscv.cpp b/src/hotspot/cpu/riscv/gc/shared/barrierSetNMethod_riscv.cpp
index 5003b9584a31..9b318dbe5799 100644
--- a/src/hotspot/cpu/riscv/gc/shared/barrierSetNMethod_riscv.cpp
+++ b/src/hotspot/cpu/riscv/gc/shared/barrierSetNMethod_riscv.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -41,17 +41,16 @@
static int slow_path_size(nmethod* nm) {
// The slow path code is out of line with C2.
- // Leave a jal to the stub in the fast path.
- return nm->is_compiled_by_c2() ? 1 : 8;
+ return nm->is_compiled_by_c2() ? 0 : 4;
}
static int entry_barrier_offset(nmethod* nm) {
BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
switch (bs_asm->nmethod_patching_type()) {
case NMethodPatchingType::stw_instruction_and_data_patch:
- return -4 * (4 + slow_path_size(nm));
+ return -4 * (5 + slow_path_size(nm));
case NMethodPatchingType::conc_instruction_and_data_patch:
- return -4 * (15 + slow_path_size(nm));
+ return -4 * ((UseZtso ? 14 : 16) + slow_path_size(nm));
}
ShouldNotReachHere();
return 0;
@@ -103,6 +102,10 @@ class NativeNMethodBarrier {
}
_guard_addr = reinterpret_cast(instruction_address() + local_guard_offset(nm));
}
+
+ // Perform the checking as verification.
+ err_msg msg("%s", "");
+ assert(check_barrier(msg), "%s", msg.buffer());
}
int get_value() {
@@ -128,10 +131,6 @@ class NativeNMethodBarrier {
}
bool check_barrier(err_msg& msg) const;
- void verify() const {
- err_msg msg("%s", "");
- assert(check_barrier(msg), "%s", msg.buffer());
- }
};
// Store the instruction bitmask, bits and name for checking the barrier.
@@ -142,8 +141,8 @@ struct CheckInsn {
};
static const struct CheckInsn barrierInsn[] = {
- { 0x00000fff, 0x00000297, "auipc t0, 0 "},
- { 0x000fffff, 0x0002e283, "lwu t0, guard_offset(t0) "},
+ { 0x00000fff, 0x00000297, "auipc t0, 0 " },
+ { 0x000fffff, 0x0002e283, "lwu t0, guard_offset(t0)" },
/* ...... */
/* ...... */
/* guard: */
@@ -155,10 +154,11 @@ static const struct CheckInsn barrierInsn[] = {
// register numbers and immediate values in the encoding.
bool NativeNMethodBarrier::check_barrier(err_msg& msg) const {
address addr = instruction_address();
- for(unsigned int i = 0; i < sizeof(barrierInsn)/sizeof(struct CheckInsn); i++ ) {
+ for (unsigned int i = 0; i < sizeof(barrierInsn) / sizeof(struct CheckInsn); i++) {
uint32_t inst = Assembler::ld_instr(addr);
if ((inst & barrierInsn[i].mask) != barrierInsn[i].bits) {
- msg.print("Addr: " INTPTR_FORMAT " Code: 0x%x not an %s instruction", p2i(addr), inst, barrierInsn[i].name);
+ msg.print("Nmethod entry barrier did not start with auipc & lwu as expected. "
+ "Addr: " INTPTR_FORMAT " Code: 0x%x not an %s instruction.", p2i(addr), inst, barrierInsn[i].name);
return false;
}
addr += 4;
diff --git a/src/hotspot/cpu/riscv/gc/shenandoah/c1/shenandoahBarrierSetC1_riscv.cpp b/src/hotspot/cpu/riscv/gc/shenandoah/c1/shenandoahBarrierSetC1_riscv.cpp
deleted file mode 100644
index 11c4e5dc81b6..000000000000
--- a/src/hotspot/cpu/riscv/gc/shenandoah/c1/shenandoahBarrierSetC1_riscv.cpp
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
- * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "c1/c1_LIRAssembler.hpp"
-#include "c1/c1_MacroAssembler.hpp"
-#include "gc/shared/gc_globals.hpp"
-#include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
-#include "gc/shenandoah/shenandoahBarrierSet.hpp"
-#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
-
-#define __ masm->masm()->
-
-void LIR_OpShenandoahCompareAndSwap::emit_code(LIR_Assembler* masm) {
- Register addr = _addr->as_register_lo();
- Register newval = _new_value->as_register();
- Register cmpval = _cmp_value->as_register();
- Register tmp1 = _tmp1->as_register();
- Register tmp2 = _tmp2->as_register();
- Register result = result_opr()->as_register();
-
- if (UseCompressedOops) {
- __ encode_heap_oop(tmp1, cmpval);
- cmpval = tmp1;
- __ encode_heap_oop(tmp2, newval);
- newval = tmp2;
- }
-
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm->masm(), addr, cmpval, newval, /* acquire */ Assembler::aq,
- /* release */ Assembler::rl, /* is_cae */ false, result);
-}
-
-#undef __
-
-#ifdef ASSERT
-#define __ gen->lir(__FILE__, __LINE__)->
-#else
-#define __ gen->lir()->
-#endif
-
-LIR_Opr ShenandoahBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
- BasicType bt = access.type();
- if (access.is_oop()) {
- LIRGenerator *gen = access.gen();
- if (ShenandoahSATBBarrier) {
- pre_barrier(gen, access.access_emit_info(), access.decorators(), access.resolved_addr(),
- LIR_OprFact::illegalOpr /* pre_val */);
- }
- if (ShenandoahCASBarrier) {
- cmp_value.load_item();
- new_value.load_item();
-
- LIR_Opr tmp1 = gen->new_register(T_OBJECT);
- LIR_Opr tmp2 = gen->new_register(T_OBJECT);
- LIR_Opr addr = access.resolved_addr()->as_address_ptr()->base();
- LIR_Opr result = gen->new_register(T_INT);
-
- __ append(new LIR_OpShenandoahCompareAndSwap(addr, cmp_value.result(), new_value.result(), tmp1, tmp2, result));
-
- if (ShenandoahCardBarrier) {
- post_barrier(access, access.resolved_addr(), new_value.result());
- }
- return result;
- }
- }
-
- return BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value);
-}
-
-LIR_Opr ShenandoahBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) {
- LIRGenerator* gen = access.gen();
- BasicType type = access.type();
-
- LIR_Opr result = gen->new_register(type);
- value.load_item();
- LIR_Opr value_opr = value.result();
-
- assert(type == T_INT || is_reference_type(type) LP64_ONLY( || type == T_LONG ), "unexpected type");
- LIR_Opr tmp = gen->new_register(T_INT);
- __ xchg(access.resolved_addr(), value_opr, result, tmp);
-
- if (access.is_oop()) {
- result = load_reference_barrier(access.gen(), result, LIR_OprFact::addressConst(0), access.decorators());
- LIR_Opr tmp_opr = gen->new_register(type);
- __ move(result, tmp_opr);
- result = tmp_opr;
- if (ShenandoahSATBBarrier) {
- pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), LIR_OprFact::illegalOpr,
- result /* pre_val */);
- }
- if (ShenandoahCardBarrier) {
- post_barrier(access, access.resolved_addr(), result);
- }
- }
-
- return result;
-}
diff --git a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp
index e1d8d062c238..38698370faa2 100644
--- a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp
@@ -2884,10 +2884,23 @@ void LIR_Assembler::on_spin_wait() {
}
void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
- assert(patch_code == lir_patch_none, "Patch code not supported");
+ assert(addr_opr->is_address(), "must be an address");
+ assert(dest->is_register(), "must be a register");
+
LIR_Address* addr = addr_opr->as_address_ptr();
+ Register reg = dest->as_pointer_register();
assert(addr->scale() == LIR_Address::times_1, "scaling unsupported");
- __ load_address(dest->as_pointer_register(), as_Address(addr));
+
+ if (addr->index()->is_illegal() && patch_code != lir_patch_none) {
+ PatchingStub* patch = new PatchingStub(_masm, PatchingStub::access_field_id);
+
+ // TODO: Use load_const_32to64 here by extending NativeMovRegMem to support both instruction patterns.
+ __ load_const(Z_R0_scratch, (intptr_t)0);
+ __ z_agrk(reg, addr->base()->as_pointer_register(), Z_R0_scratch);
+ patching_epilog(patch, patch_code, addr->base()->as_register(), info);
+ } else {
+ __ load_address(reg, as_Address(addr));
+ }
}
void LIR_Assembler::get_thread(LIR_Opr result_reg) {
diff --git a/src/hotspot/cpu/s390/registerMap_s390.cpp b/src/hotspot/cpu/s390/registerMap_s390.cpp
new file mode 100644
index 000000000000..85a49ff1d603
--- /dev/null
+++ b/src/hotspot/cpu/s390/registerMap_s390.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2026 IBM Corp. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "runtime/registerMap.hpp"
+
+address RegisterMap::pd_location(VMReg base_reg, int slot_idx) const {
+ if (base_reg->is_VectorRegister()) {
+ // Not all physical slots belonging to a VectorRegister have corresponding
+ // valid VMReg locations in the RegisterMap.
+ // (See RegisterSaver::save_live_registers.)
+ // However, the slots are always saved to the stack in a contiguous region
+ // of memory so we can calculate the address of the upper slots by
+ // offsetting from the base address.
+ assert(base_reg->is_concrete(), "must pass base reg");
+ address base_location = location(base_reg, nullptr);
+ if (base_location != nullptr) {
+ intptr_t offset_in_bytes = slot_idx * VMRegImpl::stack_slot_size;
+ return base_location + offset_in_bytes;
+ } else {
+ return nullptr;
+ }
+ } else {
+ return location(base_reg->next(slot_idx), nullptr);
+ }
+}
diff --git a/src/hotspot/cpu/s390/registerMap_s390.hpp b/src/hotspot/cpu/s390/registerMap_s390.hpp
index 827e3b44e046..9069fb1e31d3 100644
--- a/src/hotspot/cpu/s390/registerMap_s390.hpp
+++ b/src/hotspot/cpu/s390/registerMap_s390.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -36,9 +36,7 @@
// Since there is none, we just return null.
address pd_location(VMReg reg) const {return nullptr;}
- address pd_location(VMReg base_reg, int slot_idx) const {
- return location(base_reg->next(slot_idx), nullptr);
- }
+ address pd_location(VMReg base_reg, int slot_idx) const;
// No PD state to clear or copy.
void pd_clear() {}
diff --git a/src/hotspot/cpu/s390/registerSaver_s390.hpp b/src/hotspot/cpu/s390/registerSaver_s390.hpp
index a049f8b581b2..2d3c35250ba8 100644
--- a/src/hotspot/cpu/s390/registerSaver_s390.hpp
+++ b/src/hotspot/cpu/s390/registerSaver_s390.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -58,7 +58,7 @@ class RegisterSaver {
// During deoptimization only the result register need to be restored
// all the other values have already been extracted.
- static void restore_result_registers(MacroAssembler* masm);
+ static void restore_result_registers(MacroAssembler* masm, bool save_vectors);
// Constants and data structures:
diff --git a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
index 00a830a80cd9..e5a27e66968b 100644
--- a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
+++ b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
@@ -402,9 +402,7 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, RegisterSet reg
break;
}
- // Second set_callee_saved is really a waste but we'll keep things as they were for now
map->set_callee_saved(VMRegImpl::stack2reg(offset >> 2), live_regs[i].vmreg);
- map->set_callee_saved(VMRegImpl::stack2reg((offset + half_reg_size) >> 2), live_regs[i].vmreg->next());
}
assert(first != noreg, "Should spill at least one int reg.");
__ z_stmg(first, last, first_offset, Z_SP);
@@ -416,12 +414,6 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, RegisterSet reg
map->set_callee_saved(VMRegImpl::stack2reg(offset>>2),
RegisterSaver_LiveVRegs[i].vmreg);
- map->set_callee_saved(VMRegImpl::stack2reg((offset + half_reg_size ) >> 2),
- RegisterSaver_LiveVRegs[i].vmreg->next());
- map->set_callee_saved(VMRegImpl::stack2reg((offset + (half_reg_size * 2)) >> 2),
- RegisterSaver_LiveVRegs[i].vmreg->next(2));
- map->set_callee_saved(VMRegImpl::stack2reg((offset + (half_reg_size * 3)) >> 2),
- RegisterSaver_LiveVRegs[i].vmreg->next(3));
}
assert(offset == frame_size_in_bytes, "consistency check");
@@ -473,7 +465,6 @@ OopMap* RegisterSaver::generate_oop_map(MacroAssembler* masm, RegisterSet reg_se
for (int i = 0; i < regstosave_num; i++) {
if (live_regs[i].reg_type < RegisterSaver::excluded_reg) {
map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), live_regs[i].vmreg);
- map->set_callee_saved(VMRegImpl::stack2reg((offset + half_reg_size)>>2), live_regs[i].vmreg->next());
}
offset += reg_size;
}
@@ -580,10 +571,12 @@ void RegisterSaver::restore_live_registers(MacroAssembler* masm, RegisterSet reg
// Pop the current frame and restore the registers that might be holding a result.
-void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
+void RegisterSaver::restore_result_registers(MacroAssembler* masm, bool save_vectors) {
const int regstosave_num = sizeof(RegisterSaver_LiveRegs) /
sizeof(RegisterSaver::LiveRegType);
- const int register_save_offset = live_reg_frame_size(all_registers) - live_reg_save_size(all_registers);
+ const int vecregstosave_num = save_vectors ? calculate_vregstosave_num() : 0;
+ const int vreg_save_size = vecregstosave_num * v_reg_size;
+ const int register_save_offset = live_reg_frame_size(all_registers, save_vectors) - (live_reg_save_size(all_registers) + vreg_save_size);
// Restore all result registers (ints and floats).
int offset = register_save_offset;
@@ -609,7 +602,7 @@ void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
ShouldNotReachHere();
}
}
- assert(offset == live_reg_frame_size(all_registers), "consistency check");
+ assert(offset == live_reg_frame_size(all_registers, save_vectors) - (save_vectors ? vreg_save_size : 0) , "consistency check");
}
// ---------------------------------------------------------------------------
@@ -2557,7 +2550,7 @@ void SharedRuntime::generate_deopt_blob() {
// nmethod that was valid just before the nmethod was deoptimized.
// save R14 into the deoptee frame. the `fetch_unroll_info'
// procedure called below will read it from there.
- map = RegisterSaver::save_live_registers(masm, RegisterSaver::all_registers);
+ map = RegisterSaver::save_live_registers(masm, RegisterSaver::all_registers, Z_R14, /* save_vectors= */ SuperwordUseVX);
// note the entry point.
__ load_const_optimized(exec_mode_reg, Deoptimization::Unpack_deopt);
@@ -2573,7 +2566,7 @@ void SharedRuntime::generate_deopt_blob() {
int reexecute_offset = __ offset() - start_off;
// No need to update map as each call to save_live_registers will produce identical oopmap
- (void) RegisterSaver::save_live_registers(masm, RegisterSaver::all_registers);
+ (void) RegisterSaver::save_live_registers(masm, RegisterSaver::all_registers, Z_R14, /* save_vectors= */ SuperwordUseVX);
__ load_const_optimized(exec_mode_reg, Deoptimization::Unpack_reexecute);
__ z_bru(exec_mode_initialized);
@@ -2611,7 +2604,7 @@ void SharedRuntime::generate_deopt_blob() {
__ z_lg(Z_R1_scratch, Address(Z_thread, JavaThread::exception_pc_offset()));
// Save everything in sight.
- (void) RegisterSaver::save_live_registers(masm, RegisterSaver::all_registers, Z_R1_scratch);
+ (void) RegisterSaver::save_live_registers(masm, RegisterSaver::all_registers, Z_R1_scratch, /* save_vectors= */ SuperwordUseVX);
// Now it is safe to overwrite any register
@@ -2661,7 +2654,7 @@ void SharedRuntime::generate_deopt_blob() {
__ z_lgr(unroll_block_reg, Z_RET);
// restore the return registers that have been saved
// (among other registers) by save_live_registers(...).
- RegisterSaver::restore_result_registers(masm);
+ RegisterSaver::restore_result_registers(masm, /* save_vectors= */ SuperwordUseVX);
// reload the exec mode from the UnrollBlock (it might have changed)
__ z_llgf(exec_mode_reg, Address(unroll_block_reg, Deoptimization::UnrollBlock::unpack_kind_offset()));
@@ -2737,7 +2730,7 @@ void SharedRuntime::generate_deopt_blob() {
// Make sure all code is generated
masm->flush();
- _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, RegisterSaver::live_reg_frame_size(RegisterSaver::all_registers)/wordSize);
+ _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, RegisterSaver::live_reg_frame_size(RegisterSaver::all_registers, SuperwordUseVX)/wordSize);
_deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
}
diff --git a/src/hotspot/cpu/s390/vm_version_s390.cpp b/src/hotspot/cpu/s390/vm_version_s390.cpp
index 7e9000991cae..c3f981f159a2 100644
--- a/src/hotspot/cpu/s390/vm_version_s390.cpp
+++ b/src/hotspot/cpu/s390/vm_version_s390.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -24,8 +24,9 @@
*/
#include "asm/assembler.inline.hpp"
-#include "compiler/disassembler.hpp"
#include "code/compiledIC.hpp"
+#include "compiler/compilerDefinitions.inline.hpp"
+#include "compiler/disassembler.hpp"
#include "jvm.h"
#include "memory/resourceArea.hpp"
#include "runtime/java.hpp"
@@ -105,7 +106,7 @@ void VM_Version::initialize() {
int model_ix = get_model_index();
if ( model_ix >= 7 ) {
- if (FLAG_IS_DEFAULT(SuperwordUseVX)) {
+ if (FLAG_IS_DEFAULT(SuperwordUseVX) && CompilerConfig::is_c2_enabled()) {
FLAG_SET_ERGO(SuperwordUseVX, true);
}
if (model_ix > 7 && FLAG_IS_DEFAULT(UseSFPV) && SuperwordUseVX) {
diff --git a/src/hotspot/cpu/s390/vmreg_s390.hpp b/src/hotspot/cpu/s390/vmreg_s390.hpp
index 517fb8e2130b..5fb5b7b40b10 100644
--- a/src/hotspot/cpu/s390/vmreg_s390.hpp
+++ b/src/hotspot/cpu/s390/vmreg_s390.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -59,7 +59,12 @@ inline VectorRegister as_VectorRegister() {
inline bool is_concrete() {
assert(is_reg(), "must be");
- return is_even(value());
+ if (is_Register() || is_FloatRegister()) return is_even(value());
+ if (is_VectorRegister()) {
+ int base = value() - ConcreteRegisterImpl::max_fpr;
+ return (base & 3) == 0;
+ }
+ return true;
}
#endif // CPU_S390_VMREG_S390_HPP
diff --git a/src/hotspot/cpu/x86/assembler_x86.cpp b/src/hotspot/cpu/x86/assembler_x86.cpp
index 0c8dd85b15d7..39e7f1734f11 100644
--- a/src/hotspot/cpu/x86/assembler_x86.cpp
+++ b/src/hotspot/cpu/x86/assembler_x86.cpp
@@ -15091,7 +15091,6 @@ void Assembler::cdqe() {
}
void Assembler::clflush(Address adr) {
- assert(VM_Version::supports_clflush(), "should do");
prefix(adr, true /* is_map1 */);
emit_int8((unsigned char)0xAE);
emit_operand(rdi, adr, 0);
diff --git a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp
index 3c4659934c60..69308bb2a7e8 100644
--- a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp
@@ -483,7 +483,7 @@ void C2_MacroAssembler::fast_unlock(Register obj, Register reg_rax, Register t,
// Try to unlock. Transition lock bits 0b00 => 0b01
movptr(reg_rax, mark);
- andptr(reg_rax, ~(int32_t)markWord::lock_mask);
+ andptr(reg_rax, ~(int32_t)markWord::lock_mask_in_place);
orptr(mark, markWord::unlocked_value);
lock(); cmpxchgptr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
jcc(Assembler::notEqual, push_and_slow_path);
diff --git a/src/hotspot/cpu/x86/gc/shenandoah/c1/shenandoahBarrierSetC1_x86.cpp b/src/hotspot/cpu/x86/gc/shenandoah/c1/shenandoahBarrierSetC1_x86.cpp
deleted file mode 100644
index 66fb4cbb8c78..000000000000
--- a/src/hotspot/cpu/x86/gc/shenandoah/c1/shenandoahBarrierSetC1_x86.cpp
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved.
- * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "c1/c1_LIRAssembler.hpp"
-#include "c1/c1_MacroAssembler.hpp"
-#include "gc/shared/gc_globals.hpp"
-#include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
-#include "gc/shenandoah/shenandoahBarrierSet.hpp"
-#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
-
-#define __ masm->masm()->
-
-void LIR_OpShenandoahCompareAndSwap::emit_code(LIR_Assembler* masm) {
- Register addr = _addr->is_single_cpu() ? _addr->as_register() : _addr->as_register_lo();
- Register newval = _new_value->as_register();
- Register cmpval = _cmp_value->as_register();
- Register tmp1 = _tmp1->as_register();
- Register tmp2 = _tmp2->as_register();
- Register result = result_opr()->as_register();
- assert(cmpval == rax, "wrong register");
- assert(newval != noreg, "new val must be register");
- assert(cmpval != newval, "cmp and new values must be in different registers");
- assert(cmpval != addr, "cmp and addr must be in different registers");
- assert(newval != addr, "new value and addr must be in different registers");
-
- if (UseCompressedOops) {
- __ encode_heap_oop(cmpval);
- __ mov(rscratch1, newval);
- __ encode_heap_oop(rscratch1);
- newval = rscratch1;
- }
-
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm->masm(), result, Address(addr, 0), cmpval, newval, false, tmp1, tmp2);
-}
-
-#undef __
-
-#ifdef ASSERT
-#define __ gen->lir(__FILE__, __LINE__)->
-#else
-#define __ gen->lir()->
-#endif
-
-LIR_Opr ShenandoahBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
-
- if (access.is_oop()) {
- LIRGenerator* gen = access.gen();
- if (ShenandoahSATBBarrier) {
- pre_barrier(gen, access.access_emit_info(), access.decorators(), access.resolved_addr(),
- LIR_OprFact::illegalOpr /* pre_val */);
- }
- if (ShenandoahCASBarrier) {
- cmp_value.load_item_force(FrameMap::rax_oop_opr);
- new_value.load_item();
-
- LIR_Opr t1 = gen->new_register(T_OBJECT);
- LIR_Opr t2 = gen->new_register(T_OBJECT);
- LIR_Opr addr = access.resolved_addr()->as_address_ptr()->base();
- LIR_Opr result = gen->new_register(T_INT);
-
- __ append(new LIR_OpShenandoahCompareAndSwap(addr, cmp_value.result(), new_value.result(), t1, t2, result));
-
- if (ShenandoahCardBarrier) {
- post_barrier(access, access.resolved_addr(), new_value.result());
- }
- return result;
- }
- }
- return BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value);
-}
-
-LIR_Opr ShenandoahBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) {
- LIRGenerator* gen = access.gen();
- BasicType type = access.type();
-
- LIR_Opr result = gen->new_register(type);
- value.load_item();
- LIR_Opr value_opr = value.result();
-
- // Because we want a 2-arg form of xchg and xadd
- __ move(value_opr, result);
-
- assert(type == T_INT || is_reference_type(type) || type == T_LONG, "unexpected type");
- __ xchg(access.resolved_addr(), result, result, LIR_OprFact::illegalOpr);
-
- if (access.is_oop()) {
- result = load_reference_barrier(access.gen(), result, LIR_OprFact::addressConst(0), access.decorators());
- LIR_Opr tmp = gen->new_register(type);
- __ move(result, tmp);
- result = tmp;
- if (ShenandoahSATBBarrier) {
- pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), LIR_OprFact::illegalOpr,
- result /* pre_val */);
- }
- if (ShenandoahCardBarrier) {
- post_barrier(access, access.resolved_addr(), result);
- }
- }
-
- return result;
-}
diff --git a/src/hotspot/cpu/x86/globals_x86.hpp b/src/hotspot/cpu/x86/globals_x86.hpp
index 6de467527906..c00cfba698fa 100644
--- a/src/hotspot/cpu/x86/globals_x86.hpp
+++ b/src/hotspot/cpu/x86/globals_x86.hpp
@@ -99,7 +99,7 @@ define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong);
\
product(int, UseSSE, 4, \
"Highest supported SSE instructions set on x86/x64") \
- range(0, 4) \
+ range(2, 4) \
\
product(int, UseAVX, 3, \
"Highest supported AVX instructions set on x86/x64") \
diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp
index f64c4d3f086e..b250073be7cf 100644
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp
@@ -5355,12 +5355,10 @@ void MacroAssembler::print_CPU_state() {
void MacroAssembler::restore_cpu_control_state_after_jni(Register rscratch) {
// Either restore the MXCSR register after returning from the JNI Call
// or verify that it wasn't changed (with -Xcheck:jni flag).
- if (VM_Version::supports_sse()) {
- if (RestoreMXCSROnJNICalls) {
- ldmxcsr(ExternalAddress(StubRoutines::x86::addr_mxcsr_std()), rscratch);
- } else if (CheckJNICalls) {
- call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
- }
+ if (RestoreMXCSROnJNICalls) {
+ ldmxcsr(ExternalAddress(StubRoutines::x86::addr_mxcsr_std()), rscratch);
+ } else if (CheckJNICalls) {
+ call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
}
// Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty.
vzeroupper();
@@ -9811,7 +9809,6 @@ void MacroAssembler::convert_d2l(Register dst, XMMRegister src) {
void MacroAssembler::cache_wb(Address line)
{
// 64 bit cpus always support clflush
- assert(VM_Version::supports_clflush(), "clflush should be available");
bool optimized = VM_Version::supports_clflushopt();
bool no_evict = VM_Version::supports_clwb();
@@ -9833,7 +9830,6 @@ void MacroAssembler::cache_wb(Address line)
void MacroAssembler::cache_wbsync(bool is_pre)
{
- assert(VM_Version::supports_clflush(), "clflush should be available");
bool optimized = VM_Version::supports_clflushopt();
bool no_evict = VM_Version::supports_clwb();
diff --git a/src/hotspot/cpu/x86/vm_version_x86.cpp b/src/hotspot/cpu/x86/vm_version_x86.cpp
index 80d88f2ecb8f..46d0a41641e3 100644
--- a/src/hotspot/cpu/x86/vm_version_x86.cpp
+++ b/src/hotspot/cpu/x86/vm_version_x86.cpp
@@ -33,6 +33,7 @@
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "runtime/globals_extension.hpp"
+#include "runtime/icache.hpp"
#include "runtime/java.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/stubCodeGenerator.hpp"
@@ -80,20 +81,6 @@ static detect_virt_stub_t detect_virt_stub = nullptr;
static clear_apx_test_state_t clear_apx_test_state_stub = nullptr;
static getCPUIDBrandString_stub_t getCPUIDBrandString_stub = nullptr;
-bool VM_Version::supports_clflush() {
- // clflush should always be available on x86_64
- // if not we are in real trouble because we rely on it
- // to flush the code cache.
- // Unfortunately, Assembler::clflush is currently called as part
- // of generation of the code cache flush routine. This happens
- // under Universe::init before the processor features are set
- // up. Assembler::flush calls this routine to check that clflush
- // is allowed. So, we give the caller a free pass if Universe init
- // is still in progress.
- assert ((!Universe::is_fully_initialized() || _features.supports_feature(CPU_FLUSH)), "clflush should be available");
- return true;
-}
-
#define CPUID_STANDARD_FN 0x0
#define CPUID_STANDARD_FN_1 0x1
#define CPUID_STANDARD_FN_4 0x4
@@ -511,7 +498,6 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
// and check upper YMM/ZMM bits after it.
//
int saved_useavx = UseAVX;
- int saved_usesse = UseSSE;
// If UseAVX is uninitialized or is set by the user to include EVEX
if (use_evex) {
@@ -542,7 +528,6 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
// EVEX setup: run in lowest evex mode
VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts
UseAVX = 3;
- UseSSE = 2;
#ifdef _WINDOWS
// xmm5-xmm15 are not preserved by caller on windows
// https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
@@ -569,7 +554,6 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
// AVX setup
VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts
UseAVX = 1;
- UseSSE = 2;
#ifdef _WINDOWS
__ subptr(rsp, 32);
__ vmovdqu(Address(rsp, 0), xmm7);
@@ -623,7 +607,6 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
// EVEX check: run in lowest evex mode
VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts
UseAVX = 3;
- UseSSE = 2;
__ lea(rsi, Address(rbp, in_bytes(VM_Version::zmm_save_offset())));
__ evmovdqul(Address(rsi, 0), xmm0, Assembler::AVX_512bit);
__ evmovdqul(Address(rsi, 64), xmm7, Assembler::AVX_512bit);
@@ -641,7 +624,6 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
generate_vzeroupper(wrapup);
VM_Version::clean_cpuFeatures();
UseAVX = saved_useavx;
- UseSSE = saved_usesse;
__ jmp(wrapup);
}
@@ -649,7 +631,6 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
// AVX check
VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts
UseAVX = 1;
- UseSSE = 2;
__ lea(rsi, Address(rbp, in_bytes(VM_Version::ymm_save_offset())));
__ vmovdqu(Address(rsi, 0), xmm0);
__ vmovdqu(Address(rsi, 32), xmm7);
@@ -668,7 +649,6 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
generate_vzeroupper(wrapup);
VM_Version::clean_cpuFeatures();
UseAVX = saved_useavx;
- UseSSE = saved_usesse;
__ bind(wrapup);
__ popf();
@@ -905,25 +885,6 @@ void VM_Version::get_processor_features() {
_supports_atomic_getset8 = true;
_supports_atomic_getadd8 = true;
- // OS should support SSE for x64 and hardware should support at least SSE2.
- if (!VM_Version::supports_sse2()) {
- vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported");
- }
- // in 64 bit the use of SSE2 is the minimum
- if (UseSSE < 2) UseSSE = 2;
-
- // flush_icache_stub have to be generated first.
- // That is why Icache line size is hard coded in ICache class,
- // see icache_x86.hpp. It is also the reason why we can't use
- // clflush instruction in 32-bit VM since it could be running
- // on CPU which does not support it.
- //
- // The only thing we can do is to verify that flushed
- // ICache::line_size has correct value.
- guarantee(_cpuid_info.std_cpuid1_edx.bits.clflush != 0, "clflush is not supported");
- // clflush_size is size in quadwords (8 bytes).
- guarantee(_cpuid_info.std_cpuid1_ebx.bits.clflush_size == 8, "such clflush size is not supported");
-
// assigning this field effectively enables Unsafe.writebackMemory()
// by initing UnsafeConstant.DATA_CACHE_LINE_FLUSH_SIZE to non-zero
// that is only implemented on x86_64 and only if the OS plays ball
@@ -952,12 +913,6 @@ void VM_Version::get_processor_features() {
clear_feature(CPU_SSE4A);
}
- if (UseSSE < 2)
- clear_feature(CPU_SSE2);
-
- if (UseSSE < 1)
- clear_feature(CPU_SSE);
-
// ZX cpus specific settings
if (is_zx() && FLAG_IS_DEFAULT(UseAVX)) {
if (cpu_family() == 7) {
@@ -972,21 +927,13 @@ void VM_Version::get_processor_features() {
}
// UseSSE is set to the smaller of what hardware supports and what
- // the command line requires. I.e., you cannot set UseSSE to 2 on
- // older Pentiums which do not support it.
- int use_sse_limit = 0;
- if (UseSSE > 0) {
- if (UseSSE > 3 && supports_sse4_1()) {
- use_sse_limit = 4;
- } else if (UseSSE > 2 && supports_sse3()) {
- use_sse_limit = 3;
- } else if (UseSSE > 1 && supports_sse2()) {
- use_sse_limit = 2;
- } else if (UseSSE > 0 && supports_sse()) {
- use_sse_limit = 1;
- } else {
- use_sse_limit = 0;
- }
+ // the command line requires. i.e., you cannot set UseSSE to 4 on
+ // older systems which do not support it.
+ int use_sse_limit = 2;
+ if (UseSSE > 3 && supports_sse4_1()) {
+ use_sse_limit = 4;
+ } else if (UseSSE > 2 && supports_sse3()) {
+ use_sse_limit = 3;
}
if (FLAG_IS_DEFAULT(UseSSE)) {
FLAG_SET_DEFAULT(UseSSE, use_sse_limit);
@@ -1150,7 +1097,6 @@ void VM_Version::get_processor_features() {
_has_intel_jcc_erratum = IntelJccErratumMitigation;
}
- assert(supports_clflush(), "Always present");
if (X86ICacheSync == -1) {
// Auto-detect, choosing the best performant one that still flushes
// the cache. We could switch to CPUID/SERIALIZE ("4"/"5") going forward.
@@ -1535,7 +1481,7 @@ void VM_Version::get_processor_features() {
}
if (is_amd_family()) { // AMD cpus specific settings
- if (supports_sse2() && FLAG_IS_DEFAULT(UseAddressNop)) {
+ if (FLAG_IS_DEFAULT(UseAddressNop)) {
// Use it on new AMD cpus starting from Opteron.
UseAddressNop = true;
}
@@ -1578,7 +1524,7 @@ void VM_Version::get_processor_features() {
if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
}
- if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
+ if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
FLAG_SET_DEFAULT(UseUnalignedLoadStores, true);
}
}
@@ -1594,7 +1540,7 @@ void VM_Version::get_processor_features() {
if (cpu_family() >= 0x17) {
// On family >=17h processors use XMM and UnalignedLoadStores
// for Array Copy
- if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
+ if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
FLAG_SET_DEFAULT(UseUnalignedLoadStores, true);
}
#ifdef COMPILER2
@@ -1796,8 +1742,6 @@ void VM_Version::get_processor_features() {
if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
if (AllocatePrefetchInstr == 3 && !supports_3dnow_prefetch()) {
FLAG_SET_DEFAULT(AllocatePrefetchInstr, 0);
- } else if (!supports_sse() && supports_3dnow_prefetch()) {
- FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
}
}
@@ -2889,29 +2833,23 @@ int64_t VM_Version::maximum_qualified_cpu_frequency(void) {
VM_Version::VM_Features VM_Version::CpuidInfo::feature_flags() const {
VM_Features vm_features;
+
+ // check the features that must be present
+ guarantee(std_cpuid1_edx.bits.sse2 != 0, "sse2 is not supported");
+ guarantee(_cpuid_info.std_cpuid1_edx.bits.clflush != 0, "clflush is not supported");
+ // clflush_size is size in quadwords (8 bytes).
+ guarantee(_cpuid_info.std_cpuid1_ebx.bits.clflush_size == ICache::line_size/8, "clflush size is not supported");
+
if (std_cpuid1_edx.bits.cmpxchg8 != 0)
vm_features.set_feature(CPU_CX8);
if (std_cpuid1_edx.bits.cmov != 0)
vm_features.set_feature(CPU_CMOV);
- if (std_cpuid1_edx.bits.clflush != 0)
- vm_features.set_feature(CPU_FLUSH);
- // clflush should always be available on x86_64
- // if not we are in real trouble because we rely on it
- // to flush the code cache.
- assert (vm_features.supports_feature(CPU_FLUSH), "clflush should be available");
if (std_cpuid1_edx.bits.fxsr != 0 || (is_amd_family() &&
ext_cpuid1_edx.bits.fxsr != 0))
vm_features.set_feature(CPU_FXSR);
// HT flag is set for multi-core processors also.
if (threads_per_core() > 1)
vm_features.set_feature(CPU_HT);
- if (std_cpuid1_edx.bits.mmx != 0 || (is_amd_family() &&
- ext_cpuid1_edx.bits.mmx != 0))
- vm_features.set_feature(CPU_MMX);
- if (std_cpuid1_edx.bits.sse != 0)
- vm_features.set_feature(CPU_SSE);
- if (std_cpuid1_edx.bits.sse2 != 0)
- vm_features.set_feature(CPU_SSE2);
if (std_cpuid1_ecx.bits.sse3 != 0)
vm_features.set_feature(CPU_SSE3);
if (std_cpuid1_ecx.bits.ssse3 != 0)
@@ -3243,17 +3181,9 @@ int VM_Version::allocate_prefetch_distance(bool use_watermark_prefetch) {
// It will be used only when AllocatePrefetchStyle > 0
if (is_amd_family()) { // AMD | Hygon
- if (supports_sse2()) {
- return 256; // Opteron
- } else {
- return 128; // Athlon
- }
+ return 256; // Opteron
} else if (is_zx()) {
- if (supports_sse2()) {
- return 256;
- } else {
- return 128;
- }
+ return 256;
} else { // Intel
if (supports_sse3() && is_intel_server_family()) {
if (is_intel_modern_cpu()) { // Nehalem based cpus
@@ -3262,14 +3192,10 @@ int VM_Version::allocate_prefetch_distance(bool use_watermark_prefetch) {
return 384;
}
}
- if (supports_sse2()) {
- if (is_intel_server_family()) {
- return 256; // Pentium M, Core, Core2
- } else {
- return 512; // Pentium 4
- }
+ if (is_intel_server_family()) {
+ return 256; // Pentium M, Core, Core2
} else {
- return 128; // Pentium 3 (and all other old CPUs)
+ return 512; // Pentium 4
}
}
}
diff --git a/src/hotspot/cpu/x86/vm_version_x86.hpp b/src/hotspot/cpu/x86/vm_version_x86.hpp
index fe6d424f50c2..a9bdeae41f1b 100644
--- a/src/hotspot/cpu/x86/vm_version_x86.hpp
+++ b/src/hotspot/cpu/x86/vm_version_x86.hpp
@@ -381,58 +381,43 @@ class VM_Version : public Abstract_VM_Version {
decl(CMOV, cmov ) \
decl(FXSR, fxsr ) \
decl(HT, ht ) \
- \
- decl(MMX, mmx ) \
decl(3DNOW_PREFETCH, 3dnowpref ) /* Processor supports 3dnow prefetch and prefetchw instructions */ \
/* may not necessarily support other 3dnow instructions */ \
- decl(SSE, sse ) \
- decl(SSE2, sse2 ) \
- \
decl(SSE3, sse3 ) /* SSE3 comes from cpuid 1 (ECX) */ \
decl(SSSE3, ssse3 ) \
decl(SSE4A, sse4a ) \
decl(SSE4_1, sse4.1 ) \
- \
decl(SSE4_2, sse4.2 ) \
decl(POPCNT, popcnt ) \
decl(LZCNT, lzcnt ) \
decl(TSC, tsc ) \
- \
decl(TSCINV_BIT, tscinvbit ) \
decl(TSCINV, tscinv ) \
decl(AVX, avx ) \
decl(AVX2, avx2 ) \
- \
decl(AES, aes ) \
decl(ERMS, erms ) /* enhanced 'rep movsb/stosb' instructions */ \
decl(CLMUL, clmul ) /* carryless multiply for CRC */ \
decl(BMI1, bmi1 ) \
- \
decl(BMI2, bmi2 ) \
decl(RTM, rtm ) /* Restricted Transactional Memory instructions */ \
decl(ADX, adx ) \
decl(AVX512F, avx512f ) /* AVX 512bit foundation instructions */ \
- \
decl(AVX512DQ, avx512dq ) \
decl(AVX512PF, avx512pf ) \
decl(AVX512ER, avx512er ) \
decl(AVX512CD, avx512cd ) \
- \
decl(AVX512BW, avx512bw ) /* Byte and word vector instructions */ \
decl(AVX512VL, avx512vl ) /* EVEX instructions with smaller vector length */ \
decl(SHA, sha ) /* SHA instructions */ \
decl(FMA, fma ) /* FMA instructions */ \
- \
decl(VZEROUPPER, vzeroupper ) /* Vzeroupper instruction */ \
decl(AVX512_VPOPCNTDQ, avx512_vpopcntdq ) /* Vector popcount */ \
decl(AVX512_VPCLMULQDQ, avx512_vpclmulqdq ) /* Vector carryless multiplication */ \
decl(AVX512_VAES, avx512_vaes ) /* Vector AES instruction */ \
- \
decl(AVX512_VNNI, avx512_vnni ) /* Vector Neural Network Instructions */ \
- decl(FLUSH, clflush ) /* flush instruction */ \
decl(FLUSHOPT, clflushopt ) /* flusopth instruction */ \
decl(CLWB, clwb ) /* clwb instruction */ \
- \
decl(AVX512_VBMI2, avx512_vbmi2 ) /* VBMI2 shift left double instructions */ \
decl(AVX512_VBMI, avx512_vbmi ) /* Vector BMI instructions */ \
decl(HV, hv ) /* Hypervisor instructions */ \
@@ -790,16 +775,12 @@ class VM_Version : public Abstract_VM_Version {
VM_Version::clear_cpu_features();
}
static void set_avx_cpuFeatures() {
- _features.set_feature(CPU_SSE);
- _features.set_feature(CPU_SSE2);
_features.set_feature(CPU_AVX);
_features.set_feature(CPU_VZEROUPPER);
}
static void set_evex_cpuFeatures() {
_features.set_feature(CPU_AVX10_1);
_features.set_feature(CPU_AVX512F);
- _features.set_feature(CPU_SSE);
- _features.set_feature(CPU_SSE2);
_features.set_feature(CPU_VZEROUPPER);
}
static void set_apx_cpuFeatures() {
@@ -869,9 +850,6 @@ class VM_Version : public Abstract_VM_Version {
static bool supports_cmov() { return _features.supports_feature(CPU_CMOV); }
static bool supports_fxsr() { return _features.supports_feature(CPU_FXSR); }
static bool supports_ht() { return _features.supports_feature(CPU_HT); }
- static bool supports_mmx() { return _features.supports_feature(CPU_MMX); }
- static bool supports_sse() { return _features.supports_feature(CPU_SSE); }
- static bool supports_sse2() { return _features.supports_feature(CPU_SSE2); }
static bool supports_sse3() { return _features.supports_feature(CPU_SSE3); }
static bool supports_ssse3() { return _features.supports_feature(CPU_SSSE3); }
static bool supports_sse4_1() { return _features.supports_feature(CPU_SSE4_1); }
@@ -1010,10 +988,10 @@ class VM_Version : public Abstract_VM_Version {
static int allocate_prefetch_distance(bool use_watermark_prefetch);
- // SSE2 and later processors implement a 'pause' instruction
- // that can be used for efficient implementation of
- // the intrinsic for java.lang.Thread.onSpinWait()
- static bool supports_on_spin_wait() { return supports_sse2(); }
+ // All currently supported processors support PAUSE instruction
+ // that can be used for efficient implementation of intrinsic for
+ // java.lang.Thread.onSpinWait().
+ static bool supports_on_spin_wait() { return true; }
// x86_64 supports fast class initialization checks
static bool supports_fast_class_init_checks() {
@@ -1046,7 +1024,6 @@ class VM_Version : public Abstract_VM_Version {
// pending in-cache changes.
//
// 64 bit cpus always support clflush which writes back and evicts
- // on 32 bit cpus support is recorded via a feature flag
//
// clflushopt is optional and acts like clflush except it does
// not synchronize with other memory ops. it needs a preceding
@@ -1057,8 +1034,6 @@ class VM_Version : public Abstract_VM_Version {
// synchronize with other memory ops. so, it needs preceding
// and trailing StoreStore fences.
- static bool supports_clflush(); // Can't inline due to header file conflict
-
// Note: CPU_FLUSHOPT and CPU_CLWB bits should always be zero for 32-bit
static bool supports_clflushopt() { return (_features.supports_feature(CPU_FLUSHOPT)); }
static bool supports_clwb() { return (_features.supports_feature(CPU_CLWB)); }
diff --git a/src/hotspot/cpu/x86/x86.ad b/src/hotspot/cpu/x86/x86.ad
index f99d1ea9d48d..db87f81d6c41 100644
--- a/src/hotspot/cpu/x86/x86.ad
+++ b/src/hotspot/cpu/x86/x86.ad
@@ -4950,7 +4950,7 @@ operand immN0() %{
operand immP31()
%{
- predicate(n->as_Type()->type()->reloc() == relocInfo::none
+ predicate(n->as_Type()->type()->is_ptr()->reloc() == relocInfo::none
&& (n->get_ptr() >> 31) == 0);
match(ConP);
@@ -16337,7 +16337,7 @@ instruct compP_rReg_mem(rFlagsRegU cr, rRegP op1, memory op2)
// and raw pointers have no anti-dependencies.
instruct compP_mem_rReg(rFlagsRegU cr, rRegP op1, memory op2)
%{
- predicate(n->in(2)->in(2)->bottom_type()->reloc() == relocInfo::none &&
+ predicate(n->in(2)->in(2)->bottom_type()->isa_rawptr() != nullptr &&
n->in(2)->as_Load()->barrier_data() == 0);
match(Set cr (CmpP op1 (LoadP op2)));
diff --git a/src/hotspot/os/bsd/os_bsd.cpp b/src/hotspot/os/bsd/os_bsd.cpp
index b46cc6443938..fc5b9952f78a 100644
--- a/src/hotspot/os/bsd/os_bsd.cpp
+++ b/src/hotspot/os/bsd/os_bsd.cpp
@@ -1123,7 +1123,7 @@ bool os::dll_address_to_library_name(address addr, char* buf,
// in case of error it checks if .dll/.so was built for the
// same architecture as Hotspot is running on
-void *os::Bsd::dlopen_helper(const char *filename, int mode, char *ebuf, int ebuflen) {
+static void *dlopen_helper(const char *filename, char *ebuf, int ebuflen) {
bool ieee_handling = IEEE_subnormal_handling_OK();
if (!ieee_handling) {
Events::log_dll_message(nullptr, "IEEE subnormal handling check failed before loading %s", filename);
@@ -1207,7 +1207,7 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
log_info(os)("attempting shared library load of %s", filename);
- return os::Bsd::dlopen_helper(filename, RTLD_LAZY, ebuf, ebuflen);
+ return dlopen_helper(filename, ebuf, ebuflen);
}
#else
void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
@@ -1218,7 +1218,7 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
log_info(os)("attempting shared library load of %s", filename);
void* result;
- result = os::Bsd::dlopen_helper(filename, RTLD_LAZY, ebuf, ebuflen);
+ result = dlopen_helper(filename, ebuf, ebuflen);
if (result != nullptr) {
return result;
}
diff --git a/src/hotspot/os/bsd/os_bsd.hpp b/src/hotspot/os/bsd/os_bsd.hpp
index e87a680b2d2f..91fcb090f503 100644
--- a/src/hotspot/os/bsd/os_bsd.hpp
+++ b/src/hotspot/os/bsd/os_bsd.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -76,8 +76,6 @@ class os::Bsd {
// Real-time clock functions
static void clock_init(void);
- static void *dlopen_helper(const char *path, int mode, char *ebuf, int ebuflen);
-
// Stack repair handling
// none present
@@ -105,6 +103,7 @@ class os::Bsd {
static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; }
static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; }
static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
+
public:
static int sched_getcpu() { return _sched_getcpu != nullptr ? _sched_getcpu() : -1; }
static int numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen) {
diff --git a/src/hotspot/os/windows/os_perf_windows.cpp b/src/hotspot/os/windows/os_perf_windows.cpp
index 59ea83b91488..d083c72c2e03 100644
--- a/src/hotspot/os/windows/os_perf_windows.cpp
+++ b/src/hotspot/os/windows/os_perf_windows.cpp
@@ -779,6 +779,114 @@ static OSReturn allocate_pdh_constants() {
return OS_OK;
}
+// Look up the PDH index by reading the English (locale 009) counter name
+// registry. See KB Q287159: Using PDH APIs Correctly in a Localized Language
+// for details.
+static OSReturn lookup_perf_index_by_english_name(const char* english_name,
+ DWORD* result) {
+ ResourceMark rm;
+
+ DWORD type = 0;
+ DWORD size = 0;
+
+ // Determine the required buffer size
+ if (RegQueryValueEx(HKEY_PERFORMANCE_DATA, "Counter 009",
+ nullptr, &type, nullptr, &size) != ERROR_SUCCESS) {
+ return OS_ERR;
+ }
+
+ // Since registry entries in `HKEY_PERFORMANCE_DATA` are generated on the fly,
+ // they could change between calls, so we can't rely just on the size returned
+ // by the first call. Instead, Microsoft's documentation suggests running
+ // these calls in a loop until the return code is no longer `ERROR_MORE_DATA`.
+
+ char* buffer;
+ do {
+ if (size == 0) {
+ return OS_ERR;
+ }
+
+ // When `RegQueryValueEx()` returns `ERROR_MORE_DATA`, the value in the
+ // callback argument is undefined, so we need to create a new variable whose
+ // address is passed as the callback size argument.
+ buffer = NEW_RESOURCE_ARRAY(char, size);
+
+ DWORD cb_size = size;
+ LSTATUS status = RegQueryValueEx(HKEY_PERFORMANCE_DATA, "Counter 009",
+ nullptr, &type, (LPBYTE)buffer,
+ &cb_size);
+ if (status == ERROR_MORE_DATA) {
+ // We need to increase the buffer size. Since we don't know _how much_ to
+ // increase it by, we use an estimate (4096) for the increment.
+ DWORD increment = 4096;
+ if (size > MAXDWORD - increment) {
+ return OS_ERR;
+ }
+ size += increment;
+ } else if (status == ERROR_SUCCESS) {
+ break;
+ } else {
+ // If there was some other problem fetching this registry entry, tell the
+ // caller that we couldn't lookup the index.
+ return OS_ERR;
+ }
+ } while (true);
+
+ if (type != REG_MULTI_SZ) {
+ return OS_ERR;
+ }
+
+ // The buffer contains indices and names in the form (\0\0)*, so
+ // iterate character by character to parse the name and if it matches the
+ // English name, then we return the integer value of the index.
+ for (const char* p = buffer; *p != '\0'; ) {
+ const char* idx_str = p;
+ p += strlen(p) + 1;
+ if (*p == '\0') {
+ break;
+ }
+
+ const char* name = p;
+ p += strlen(p) + 1;
+ if (strcmp(name, english_name) == 0) {
+ errno = 0;
+ char* end = nullptr;
+ unsigned long value = strtoul(idx_str, &end, 10);
+ if (errno == 0 && end != idx_str && value <= MAXDWORD) {
+ *result = (DWORD)value;
+ return OS_OK;
+ }
+ }
+ }
+
+ return OS_ERR;
+}
+
+// Return the counter index of the 'Processor Information' counter, if
+// available, or else the 'Processor' counter. The former is aware of the
+// possibility of multiple processor groups and thus provides a more accurate
+// processor count whereas the latter serves as fallback.
+static DWORD get_proc_counter() {
+ static DWORD pdh_idx = 0;
+ if (pdh_idx != 0) {
+ return pdh_idx;
+ }
+
+ // Some APIs accept English counter names whereas others accept counter names
+ // in the specific user's locale. We determine the locale-specific name using
+ // the counter index, but to find the counter index, we use the English name
+ // of the counter and look for it in a specific registry key.
+ DWORD info_idx;
+ if (lookup_perf_index_by_english_name("Processor Information",
+ &info_idx) != OS_OK) {
+ info_idx = PDH_PROCESSOR_IDX;
+ }
+
+ // Assign to the static variable so that the value persists across calls.
+ pdh_idx = info_idx;
+ return pdh_idx;
+}
+
/*
* Enuerate the Processor PDH object and returns a buffer containing the enumerated instances.
* Caller needs ResourceMark;
@@ -786,8 +894,11 @@ static OSReturn allocate_pdh_constants() {
* @return buffer if successful, null on failure.
*/
static const char* enumerate_cpu_instances() {
- char* processor; //'Processor' == PDH_PROCESSOR_IDX
- if (lookup_name_by_index(PDH_PROCESSOR_IDX, &processor) != OS_OK) {
+ // The `PdhEnumObjectItems()` function accepts a localized name of the perf
+ // counter. To obtain the name that is specific to the user's locale, we
+ // perform a reverse lookup from counter index to counter name.
+ char* processor;
+ if (lookup_name_by_index(get_proc_counter(), &processor) != OS_OK) {
return nullptr;
}
DWORD c_size = 0;
@@ -821,13 +932,17 @@ static const char* enumerate_cpu_instances() {
static int count_logical_cpus(const char* instances) {
assert(instances != nullptr, "invariant");
- // count logical instances.
- DWORD count;
- char* tmp;
- for (count = 0, tmp = const_cast(instances); *tmp != '\0'; tmp = &tmp[strlen(tmp) + 1], count++);
- // PDH reports an instance for each logical processor plus an instance for the total (_Total)
- assert(count == os::processor_count() + 1, "invalid enumeration!");
- return count - 1;
+ DWORD count = 0;
+ for (const char* tmp = instances; *tmp != '\0'; tmp += strlen(tmp) + 1) {
+ // In both the 'Processor' counter and the 'Processor Information' counter,
+ // the output contains totals for the processor group(s). We filter those
+ // out by looking for the `_Total` substring.
+ if (strstr(tmp, "_Total") == nullptr) {
+ count++;
+ }
+ }
+ assert(count >= 1, "invalid enumeration!");
+ return count;
}
static int number_of_logical_cpus() {
@@ -847,7 +962,16 @@ static double cpu_factor() {
static double cpuFactor = .0;
if (numCpus == 0) {
numCpus = number_of_logical_cpus();
- assert(os::processor_count() <= (int)numCpus, "invariant");
+
+ // If we are using the legacy 'Processor' counter, which counts processors
+ // only in the first processor group, then `numCpus` can undercount, in
+ // which case, `numCpus` will be likely smaller than `os_processor_count`.
+ // However, when we use the 'Processor Information' counter, we expect both
+ // `numCpus` and `os::processorCount` to be identical. In both cases, we
+ // expect to see at least one CPU.
+ assert(numCpus >= 1 && numCpus <= (DWORD)os::processor_count(),
+ "unexpected cpu count");
+
cpuFactor = numCpus * 100;
}
return cpuFactor;
@@ -861,8 +985,8 @@ static void log_error_message_on_no_PDH_artifact(const char* counter_path) {
static int initialize_cpu_query_counters(MultiCounterQueryP query, DWORD pdh_counter_idx) {
assert(query != nullptr, "invariant");
assert(query->counters != nullptr, "invariant");
- char* processor; //'Processor' == PDH_PROCESSOR_IDX
- if (lookup_name_by_index(PDH_PROCESSOR_IDX, &processor) != OS_OK) {
+ char* processor;
+ if (lookup_name_by_index(get_proc_counter(), &processor) != OS_OK) {
return OS_ERR;
}
char* counter_name = nullptr;
@@ -880,7 +1004,11 @@ static int initialize_cpu_query_counters(MultiCounterQueryP query, DWORD pdh_cou
counter_len += OBJECT_WITH_INSTANCES_COUNTER_FMT_LEN; // "\\%s(%s)\\%s"
const char* instances = enumerate_cpu_instances();
DWORD index = 0;
- for (char* tmp = const_cast(instances); *tmp != '\0'; tmp = &tmp[strlen(tmp) + 1], index++) {
+ for (char* tmp = const_cast(instances); *tmp != '\0'; tmp = &tmp[strlen(tmp) + 1]) {
+ // Skip totals for each processor group.
+ if (strstr(tmp, ",_Total") != nullptr) {
+ continue;
+ }
const size_t tmp_len = strlen(tmp);
char* counter_path = NEW_RESOURCE_ARRAY(char, counter_len + tmp_len + 1);
const size_t jio_snprintf_result = jio_snprintf(counter_path,
@@ -896,6 +1024,7 @@ static int initialize_cpu_query_counters(MultiCounterQueryP query, DWORD pdh_cou
// return OS_OK to have the system continue to run without the missing counter
return OS_OK;
}
+ index++;
}
// Query once to initialize the counters which require at least two samples
// (like the % CPU usage) to calculate correctly.
diff --git a/src/hotspot/os_cpu/windows_aarch64/javaThread_windows_aarch64.cpp b/src/hotspot/os_cpu/windows_aarch64/javaThread_windows_aarch64.cpp
index 8f6f1ccd38ab..3f77a27f0519 100644
--- a/src/hotspot/os_cpu/windows_aarch64/javaThread_windows_aarch64.cpp
+++ b/src/hotspot/os_cpu/windows_aarch64/javaThread_windows_aarch64.cpp
@@ -26,6 +26,12 @@
#include "runtime/frame.inline.hpp"
#include "runtime/javaThread.hpp"
+// CRT-provided TLS slot for this module (jvm.dll), set by the OS loader.
+extern "C" unsigned long _tls_index;
+
+// TLS offset read by the assembly code in `aarch64_get_thread_helper()`.
+extern "C" ptrdiff_t _jvm_thr_current_tls_offset = JavaThread::get_thr_tls_offset();
+
frame JavaThread::pd_last_frame() {
assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
vmassert(_anchor.last_Java_pc() != nullptr, "not walkable");
@@ -87,3 +93,25 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava)
}
void JavaThread::cache_global_variables() { }
+
+ptrdiff_t JavaThread::get_thr_tls_offset() {
+ char* tebPointer = (char*)NtCurrentTeb();
+
+ // 0x58 is the offset of ThreadLocalStoragePointer within the TEB. This is
+ // a stable Windows ABI constant but is not exposed in the SDK's minimal
+ // _TEB struct.
+ void** tls_array = *(void***)(tebPointer + 0x58);
+ char* curr_ptr = (char*)&Thread::_thr_current;
+ char* tls_block = (char*)tls_array[_tls_index];
+
+ // Compute the offset of Thread::_thr_current within this module's TLS
+ // block. Unlike ELF, which provides `tlsdesc` relocations that lets
+ // assembly code resolve TLS variables symbolically at link/load time,
+ // Windows PE/COFF has no equivalent mechanism for armasm64. So we compute
+ // the offset here in C++ (where the compiler knows how to access
+ // __declspec(thread) variables) and store it in a plain global that the
+ // assembly can load directly. In subsequent calls to
+ // `aarch64_get_thread_helper()`, the assembly will read the TEB to find the
+ // TLS block and then add this offset to find `Thread::_thr_current`.
+ return curr_ptr - tls_block;
+}
diff --git a/src/hotspot/os_cpu/windows_aarch64/javaThread_windows_aarch64.hpp b/src/hotspot/os_cpu/windows_aarch64/javaThread_windows_aarch64.hpp
index 7d6ed16e629c..349846078146 100644
--- a/src/hotspot/os_cpu/windows_aarch64/javaThread_windows_aarch64.hpp
+++ b/src/hotspot/os_cpu/windows_aarch64/javaThread_windows_aarch64.hpp
@@ -46,8 +46,11 @@
bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava);
public:
- static Thread *aarch64_get_thread_helper() {
- return Thread::current();
- }
+ static Thread *aarch64_get_thread_helper();
+
+ // Compute the offset of `Thread::_thr_current` in the thread-local storage
+ // This offset is then used by the assembly code implementation of
+ // `aarch64_get_thread_helper()`.
+ static ptrdiff_t get_thr_tls_offset();
#endif // OS_CPU_WINDOWS_AARCH64_JAVATHREAD_WINDOWS_AARCH64_HPP
diff --git a/src/hotspot/os_cpu/windows_aarch64/threadLS_windows_aarch64.S b/src/hotspot/os_cpu/windows_aarch64/threadLS_windows_aarch64.S
new file mode 100644
index 000000000000..81749b9a3722
--- /dev/null
+++ b/src/hotspot/os_cpu/windows_aarch64/threadLS_windows_aarch64.S
@@ -0,0 +1,64 @@
+;
+; Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
+; DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+;
+; This code is free software; you can redistribute it and/or modify it
+; under the terms of the GNU General Public License version 2 only, as
+; published by the Free Software Foundation.
+;
+; This code is distributed in the hope that it will be useful, but WITHOUT
+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; version 2 for more details (a copy is included in the LICENSE file that
+; accompanied this code).
+;
+; You should have received a copy of the GNU General Public License version
+; 2 along with this work; if not, write to the Free Software Foundation,
+; Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+;
+; Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+; or visit www.oracle.com if you need additional information or have any
+; questions.
+;
+
+ ; JavaThread::aarch64_get_thread_helper()
+ ;
+ ; Optimized TLS access to `Thread::_thr_current` on Windows AArch64.
+ ; Returns the current thread pointer in x0, clobbers x1, while all other
+ ; registers are preserved.
+
+ IMPORT _tls_index
+ IMPORT _jvm_thr_current_tls_offset
+
+ AREA threadls_text, CODE, READONLY
+ ALIGN 4
+
+ ; MSVC-decorated name for: static Thread* JavaThread::aarch64_get_thread_helper()
+ EXPORT |?aarch64_get_thread_helper@JavaThread@@SAPEAVThread@@XZ|
+
+|?aarch64_get_thread_helper@JavaThread@@SAPEAVThread@@XZ| PROC
+
+ ; x18 holds the TEB, 0x58 is a well-defined offset into the TEB on 64-bit
+ ; systems, so the following line loads the thread-local storage pointer
+ ; inside the TEB
+ ldr x1, [x18, #0x58]
+
+ ; Load `_tls_index` and zero-extend it to 64 bits to occupy x0
+ adrp x0, _tls_index
+ ldr w0, [x0, _tls_index]
+
+ ; `x0` holds the index, `x1` holds the array base address (each entry is 64
+ ; bits long), so in the following line, x1 = array_base[_tls_index]
+ ldr x1, [x1, x0, lsl #3]
+
+ ; Load cached TLS offset of `Thread::_thr_current`
+ adrp x0, _jvm_thr_current_tls_offset
+ ldr x0, [x0, _jvm_thr_current_tls_offset]
+
+ ; Load `Thread::_thr_current` value
+ ldr x0, [x1, x0]
+
+ ret
+
+ ENDP
+ END
diff --git a/src/hotspot/share/adlc/formssel.cpp b/src/hotspot/share/adlc/formssel.cpp
index 79779782c2a3..5802217c1c1c 100644
--- a/src/hotspot/share/adlc/formssel.cpp
+++ b/src/hotspot/share/adlc/formssel.cpp
@@ -453,6 +453,14 @@ Form::DataType InstructForm::is_ideal_store() const {
return _matrule->is_ideal_store();
}
+// Return 'true' if this instruction matches an ideal vector node
+bool InstructForm::is_vector() const {
+ if( _matrule == nullptr ) return false;
+
+ return _matrule->is_vector();
+}
+
+
// Return the input register that must match the output register
// If this is not required, return 0
uint InstructForm::two_address(FormDict &globals) {
@@ -759,6 +767,51 @@ int InstructForm::memory_operand(FormDict &globals) const {
return NO_MEMORY_OPERAND;
}
+// This instruction captures the machine-independent bottom_type
+// Expected use is for pointer vs oop determination for LoadP
+bool InstructForm::captures_bottom_type(FormDict &globals) const {
+ if (_matrule && _matrule->_rChild &&
+ (!strcmp(_matrule->_rChild->_opType,"CastPP") || // new result type
+ !strcmp(_matrule->_rChild->_opType,"CastDD") ||
+ !strcmp(_matrule->_rChild->_opType,"CastFF") ||
+ !strcmp(_matrule->_rChild->_opType,"CastII") ||
+ !strcmp(_matrule->_rChild->_opType,"CastLL") ||
+ !strcmp(_matrule->_rChild->_opType,"CastVV") ||
+ !strcmp(_matrule->_rChild->_opType,"CastX2P") || // new result type
+ !strcmp(_matrule->_rChild->_opType,"DecodeN") ||
+ !strcmp(_matrule->_rChild->_opType,"EncodeP") ||
+ !strcmp(_matrule->_rChild->_opType,"DecodeNKlass") ||
+ !strcmp(_matrule->_rChild->_opType,"EncodePKlass") ||
+ !strcmp(_matrule->_rChild->_opType,"LoadN") ||
+ !strcmp(_matrule->_rChild->_opType,"LoadNKlass") ||
+ !strcmp(_matrule->_rChild->_opType,"CreateEx") || // type of exception
+ !strcmp(_matrule->_rChild->_opType,"CheckCastPP") ||
+ !strcmp(_matrule->_rChild->_opType,"GetAndSetP") ||
+ !strcmp(_matrule->_rChild->_opType,"GetAndSetN") ||
+ !strcmp(_matrule->_rChild->_opType,"RotateLeft") ||
+ !strcmp(_matrule->_rChild->_opType,"RotateRight") ||
+#if INCLUDE_SHENANDOAHGC
+ !strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeP") ||
+ !strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeN") ||
+#endif
+ !strcmp(_matrule->_rChild->_opType,"StrInflatedCopy") ||
+ !strcmp(_matrule->_rChild->_opType,"VectorCmpMasked")||
+ !strcmp(_matrule->_rChild->_opType,"VectorMaskGen")||
+ !strcmp(_matrule->_rChild->_opType,"VerifyVectorAlignment")||
+ !strcmp(_matrule->_rChild->_opType,"CompareAndExchangeP") ||
+ !strcmp(_matrule->_rChild->_opType,"CompareAndExchangeN"))) return true;
+ else if ( is_ideal_load() == Form::idealP ) return true;
+ else if ( is_ideal_store() != Form::none ) return true;
+
+ if (needs_base_oop_edge(globals)) return true;
+
+ if (is_vector()) return true;
+ if (is_mach_constant()) return true;
+
+ return false;
+}
+
+
// Access instr_cost attribute or return null.
const char* InstructForm::cost() {
for (Attribute* cur = _attribs; cur != nullptr; cur = (Attribute*)cur->_next) {
@@ -1128,6 +1181,9 @@ const char *InstructForm::mach_base_class(FormDict &globals) const {
}
else if (is_mach_constant()) {
return "MachConstantNode";
+ }
+ else if (captures_bottom_type(globals)) {
+ return "MachTypeNode";
} else {
return "MachNode";
}
@@ -4281,6 +4337,58 @@ Form::DataType MatchRule::is_ideal_load() const {
return ideal_load;
}
+bool MatchRule::is_vector() const {
+ static const char *vector_list[] = {
+ "AddVB","AddVS","AddVI","AddVL","AddVHF","AddVF","AddVD",
+ "SubVB","SubVS","SubVI","SubVL","SubVHF","SubVF","SubVD",
+ "MulVB","MulVS","MulVI","MulVL","MulVHF","MulVF","MulVD",
+ "DivVHF","DivVF","DivVD",
+ "AbsVB","AbsVS","AbsVI","AbsVL","AbsVF","AbsVD",
+ "NegVF","NegVD","NegVI","NegVL",
+ "SqrtVD","SqrtVF","SqrtVHF",
+ "AndV" ,"XorV" ,"OrV",
+ "MaxV", "MinV", "MinVHF", "MaxVHF", "UMinV", "UMaxV",
+ "CompressV", "ExpandV", "CompressM", "CompressBitsV", "ExpandBitsV",
+ "AddReductionVI", "AddReductionVL",
+ "AddReductionVHF", "AddReductionVF", "AddReductionVD",
+ "MulReductionVI", "MulReductionVL",
+ "MulReductionVHF", "MulReductionVF", "MulReductionVD",
+ "MaxReductionV", "MinReductionV",
+ "AndReductionV", "OrReductionV", "XorReductionV",
+ "MulAddVS2VI", "MacroLogicV",
+ "LShiftCntV","RShiftCntV",
+ "LShiftVB","LShiftVS","LShiftVI","LShiftVL",
+ "RShiftVB","RShiftVS","RShiftVI","RShiftVL",
+ "URShiftVB","URShiftVS","URShiftVI","URShiftVL",
+ "Replicate","ReverseV","ReverseBytesV",
+ "RoundDoubleModeV","RotateLeftV" , "RotateRightV", "LoadVector","StoreVector",
+ "LoadVectorGather", "StoreVectorScatter", "LoadVectorGatherMasked", "StoreVectorScatterMasked",
+ "SelectFromTwoVector", "VectorTest", "VectorLoadMask", "VectorStoreMask", "VectorBlend", "VectorInsert",
+ "VectorRearrange", "VectorLoadShuffle", "VectorLoadConst",
+ "VectorCastB2X", "VectorCastS2X", "VectorCastI2X",
+ "VectorCastL2X", "VectorCastF2X", "VectorCastD2X", "VectorCastF2HF", "VectorCastHF2F",
+ "VectorUCastB2X", "VectorUCastS2X", "VectorUCastI2X",
+ "VectorMaskWrapper","VectorMaskCmp","VectorReinterpret","LoadVectorMasked","StoreVectorMasked",
+ "FmaVD", "FmaVF", "FmaVHF", "PopCountVI", "PopCountVL", "PopulateIndex", "VectorLongToMask",
+ "CountLeadingZerosV", "CountTrailingZerosV", "SignumVF", "SignumVD", "SaturatingAddV", "SaturatingSubV",
+ // Next are vector mask ops.
+ "MaskAll", "AndVMask", "OrVMask", "XorVMask", "VectorMaskCast",
+ "RoundVF", "RoundVD",
+ // Next are not supported currently.
+ "PackB","PackS","PackI","PackL","PackF","PackD","Pack2L","Pack2D",
+ "ExtractB","ExtractUB","ExtractC","ExtractS","ExtractI","ExtractL","ExtractF","ExtractD"
+ };
+ int cnt = sizeof(vector_list)/sizeof(char*);
+ if (_rChild) {
+ const char *opType = _rChild->_opType;
+ for (int i=0; iadd_req(inst%d->in(%d)); // unmatched ideal edge\n",
inst_num, unmatched_edge);
}
- // Get bottom type from instruction whose result we are replacing
- fprintf(fp, " root->_bottom_type = inst%d->bottom_type();\n", inst_num);
+ // If new instruction captures bottom type
+ if( root_form->captures_bottom_type(globals) ) {
+ // Get bottom type from instruction whose result we are replacing
+ fprintf(fp, " root->_bottom_type = inst%d->bottom_type();\n", inst_num);
+ }
// Define result register and result operand
fprintf(fp, " ra_->set_oop (root, ra_->is_oop(inst%d));\n", inst_num);
fprintf(fp, " ra_->set_pair(root->_idx, ra_->get_reg_second(inst%d), ra_->get_reg_first(inst%d));\n", inst_num, inst_num);
@@ -1584,8 +1587,11 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
fprintf(fp, " ((MachIfNode*)n%d)->_fcnt = _fcnt;\n", cnt);
}
- // Fill in the bottom_type
- fprintf(fp, " n%d->_bottom_type = bottom_type();\n", cnt);
+ // Fill in the bottom_type where requested
+ if (node->captures_bottom_type(_globalNames) &&
+ new_inst->captures_bottom_type(_globalNames)) {
+ fprintf(fp, " ((MachTypeNode*)n%d)->_bottom_type = bottom_type();\n", cnt);
+ }
const char *resultOper = new_inst->reduce_result();
fprintf(fp," n%d->set_opnd_array(0, state->MachOperGenerator(%s));\n",
@@ -3959,15 +3965,13 @@ void ArchDesc::buildMachNode(FILE *fp_cpp, InstructForm *inst, const char *inden
}
}
- // Fill in the bottom_type
- if (inst->_matrule != nullptr && strcmp(inst->_matrule->_opType, "PrefetchAllocation") == 0) {
- // Special case, with AllocatePrefetchStyle == 3, this should be Type::MEMORY, but the graph
- // seems unsound, needs further investigation
- fprintf(fp_cpp, "%s node->_bottom_type = Type::ABIO;\n", indent);
- } else {
- fprintf(fp_cpp, "%s node->_bottom_type = _leaf->bottom_type();\n", indent);
+ // Fill in the bottom_type where requested
+ if (inst->captures_bottom_type(_globalNames)) {
+ if (strncmp("MachCall", inst->mach_base_class(_globalNames), strlen("MachCall")) != 0
+ && strncmp("MachIf", inst->mach_base_class(_globalNames), strlen("MachIf")) != 0) {
+ fprintf(fp_cpp, "%s node->_bottom_type = _leaf->bottom_type();\n", indent);
+ }
}
-
if( inst->is_ideal_if() ) {
fprintf(fp_cpp, "%s node->_prob = _leaf->as_If()->_prob;\n", indent);
fprintf(fp_cpp, "%s node->_fcnt = _leaf->as_If()->_fcnt;\n", indent);
@@ -4022,8 +4026,10 @@ void InstructForm::define_cisc_version(ArchDesc& AD, FILE* fp_cpp) {
fprintf(fp_cpp, "MachNode *%sNode::cisc_version(int offset) {\n", this->_ident);
// Create the MachNode object
fprintf(fp_cpp, " %sNode *node = new %sNode();\n", name, name);
- // Fill in the bottom_type
- fprintf(fp_cpp, " node->_bottom_type = bottom_type();\n");
+ // Fill in the bottom_type where requested
+ if ( this->captures_bottom_type(AD.globalNames()) ) {
+ fprintf(fp_cpp, " node->_bottom_type = bottom_type();\n");
+ }
uint cur_num_opnds = num_opnds();
if (cur_num_opnds > 1 && cur_num_opnds != num_unique_opnds()) {
@@ -4069,9 +4075,10 @@ void InstructForm::define_short_branch_methods(ArchDesc& AD, FILE* fp_cpp) {
fprintf(fp_cpp, " node->_prob = _prob;\n");
fprintf(fp_cpp, " node->_fcnt = _fcnt;\n");
}
-
- // Fill in the bottom_type
- fprintf(fp_cpp, " node->_bottom_type = bottom_type();\n");
+ // Fill in the bottom_type where requested
+ if ( this->captures_bottom_type(AD.globalNames()) ) {
+ fprintf(fp_cpp, " node->_bottom_type = bottom_type();\n");
+ }
fprintf(fp_cpp, "\n");
// Short branch version must use same node index for access
diff --git a/src/hotspot/share/adlc/output_h.cpp b/src/hotspot/share/adlc/output_h.cpp
index f7389b5a1b1f..6cb82f4df7f7 100644
--- a/src/hotspot/share/adlc/output_h.cpp
+++ b/src/hotspot/share/adlc/output_h.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1841,6 +1841,112 @@ void ArchDesc::declareClasses(FILE *fp) {
fprintf(fp," virtual const Pipeline *pipeline() const;\n");
}
+ // Generate virtual function for MachNodeX::bottom_type when necessary
+ //
+ // Note on accuracy: Pointer-types of machine nodes need to be accurate,
+ // or else alias analysis on the matched graph may produce bad code.
+ // Moreover, the aliasing decisions made on machine-node graph must be
+ // no less accurate than those made on the ideal graph, or else the graph
+ // may fail to schedule. (Reason: Memory ops which are reordered in
+ // the ideal graph might look interdependent in the machine graph,
+ // thereby removing degrees of scheduling freedom that the optimizer
+ // assumed would be available.)
+ //
+ // %%% We should handle many of these cases with an explicit ADL clause:
+ // instruct foo() %{ ... bottom_type(TypeRawPtr::BOTTOM); ... %}
+ if( data_type != Form::none ) {
+ // A constant's bottom_type returns a Type containing its constant value
+
+ // !!!!!
+ // Convert all ints, floats, ... to machine-independent TypeXs
+ // as is done for pointers
+ //
+ // Construct appropriate constant type containing the constant value.
+ fprintf(fp," virtual const class Type *bottom_type() const {\n");
+ switch( data_type ) {
+ case Form::idealI:
+ fprintf(fp," return TypeInt::make(opnd_array(1)->constant());\n");
+ break;
+ case Form::idealP:
+ case Form::idealN:
+ case Form::idealNKlass:
+ fprintf(fp," return opnd_array(1)->type();\n");
+ break;
+ case Form::idealD:
+ fprintf(fp," return TypeD::make(opnd_array(1)->constantD());\n");
+ break;
+ case Form::idealH:
+ fprintf(fp," return TypeH::make(opnd_array(1)->constantH());\n");
+ break;
+ case Form::idealF:
+ fprintf(fp," return TypeF::make(opnd_array(1)->constantF());\n");
+ break;
+ case Form::idealL:
+ fprintf(fp," return TypeLong::make(opnd_array(1)->constantL());\n");
+ break;
+ default:
+ assert( false, "Unimplemented()" );
+ break;
+ }
+ fprintf(fp," };\n");
+ }
+/* else if ( instr->_matrule && instr->_matrule->_rChild &&
+ ( strcmp("ConvF2I",instr->_matrule->_rChild->_opType)==0
+ || strcmp("ConvD2I",instr->_matrule->_rChild->_opType)==0 ) ) {
+ // !!!!! !!!!!
+ // Provide explicit bottom type for conversions to int
+ // On Intel the result operand is a stackSlot, untyped.
+ fprintf(fp," virtual const class Type *bottom_type() const {");
+ fprintf(fp, " return TypeInt::INT;");
+ fprintf(fp, " };\n");
+ }*/
+ else if( instr->is_ideal_copy() &&
+ !strcmp(instr->_matrule->_lChild->_opType,"stackSlotP") ) {
+ // !!!!!
+ // Special hack for ideal Copy of pointer. Bottom type is oop or not depending on input.
+ fprintf(fp," const Type *bottom_type() const { return in(1)->bottom_type(); } // Copy?\n");
+ }
+ else if( instr->is_ideal_loadPC() ) {
+ // LoadPCNode provides the return address of a call to native code.
+ // Define its bottom type to be TypeRawPtr::BOTTOM instead of TypePtr::BOTTOM
+ // since it is a pointer to an internal VM location and must have a zero offset.
+ // Allocation detects derived pointers, in part, by their non-zero offsets.
+ fprintf(fp," const Type *bottom_type() const { return TypeRawPtr::BOTTOM; } // LoadPC?\n");
+ }
+ else if( instr->is_ideal_box() ) {
+ // BoxNode provides the address of a stack slot.
+ // Define its bottom type to be TypeRawPtr::BOTTOM instead of TypePtr::BOTTOM
+ // This prevents raise_above_anti_dependences from complaining. It will
+ // complain if it sees that the pointer base is TypePtr::BOTTOM since
+ // it doesn't understand what that might alias.
+ fprintf(fp," const Type *bottom_type() const { return TypeRawPtr::BOTTOM; } // Box?\n");
+ }
+ else if (instr->_matrule && instr->_matrule->_rChild &&
+ (!strcmp(instr->_matrule->_rChild->_opType,"CMoveP") || !strcmp(instr->_matrule->_rChild->_opType,"CMoveN")) ) {
+ int offset = 1;
+ // Special special hack to see if the Cmp? has been incorporated in the conditional move
+ MatchNode *rl = instr->_matrule->_rChild->_lChild;
+ if (rl && !strcmp(rl->_opType, "Binary") && rl->_rChild && strncmp(rl->_rChild->_opType, "Cmp", 3) == 0) {
+ offset = 2;
+ fprintf(fp," const Type *bottom_type() const { if (req() == 3) return in(2)->bottom_type();\n\tconst Type *t = in(oper_input_base()+%d)->bottom_type(); return (req() <= oper_input_base()+%d) ? t : t->meet(in(oper_input_base()+%d)->bottom_type()); } // %s\n",
+ offset, offset+1, offset+1, instr->_matrule->_rChild->_opType);
+ } else {
+ // Special hack for ideal CMove; ideal type depends on inputs
+ fprintf(fp," const Type *bottom_type() const { const Type *t = in(oper_input_base()+%d)->bottom_type(); return (req() <= oper_input_base()+%d) ? t : t->meet(in(oper_input_base()+%d)->bottom_type()); } // %s\n",
+ offset, offset+1, offset+1, instr->_matrule->_rChild->_opType);
+ }
+ }
+ else if (instr->is_tls_instruction()) {
+ // Special hack for tlsLoadP
+ fprintf(fp," const Type *bottom_type() const { return TypeRawPtr::BOTTOM; } // tlsLoadP\n");
+ }
+ else if ( instr->is_ideal_if() ) {
+ fprintf(fp," const Type *bottom_type() const { return TypeTuple::IFBOTH; } // matched IfNode\n");
+ }
+ else if ( instr->is_ideal_membar() ) {
+ fprintf(fp," const Type *bottom_type() const { return TypeTuple::MEMBAR; } // matched MemBar\n");
+ }
+
// Check where 'ideal_type' must be customized
/*
if ( instr->_matrule && instr->_matrule->_rChild &&
diff --git a/src/hotspot/share/c1/c1_GraphBuilder.cpp b/src/hotspot/share/c1/c1_GraphBuilder.cpp
index f910ecadc164..db55b8c5fa81 100644
--- a/src/hotspot/share/c1/c1_GraphBuilder.cpp
+++ b/src/hotspot/share/c1/c1_GraphBuilder.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -3558,7 +3558,7 @@ const char* GraphBuilder::check_can_parse(ciMethod* callee) const {
// negative filter: should callee NOT be inlined? returns null, ok to inline, or rejection msg
const char* GraphBuilder::should_not_inline(ciMethod* callee) const {
- if ( compilation()->directive()->should_not_inline(callee)) return "disallowed by CompileCommand";
+ if ( compilation()->directive()->should_not_inline(callee, compilation()->env()->comp_level())) return "disallowed by CompileCommand";
if ( callee->dont_inline()) return "don't inline by annotation";
return nullptr;
}
diff --git a/src/hotspot/share/ci/ciEnv.hpp b/src/hotspot/share/ci/ciEnv.hpp
index b384ff47a895..8167697e84be 100644
--- a/src/hotspot/share/ci/ciEnv.hpp
+++ b/src/hotspot/share/ci/ciEnv.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -236,6 +236,9 @@ class ciEnv : StackObj {
ciInstanceKlass* declared_holder = get_instance_klass_for_declared_method_holder(holder);
return _factory->get_unloaded_method(declared_holder, name, signature, accessor);
}
+ InstanceKlass::ClassState get_cached_init_state(uint id) {
+ return (InstanceKlass::ClassState)_factory->cached_init_state(id);
+ }
// Get a ciKlass representing an unloaded klass.
// Ensures uniqueness of the result.
diff --git a/src/hotspot/share/ci/ciInstanceKlass.cpp b/src/hotspot/share/ci/ciInstanceKlass.cpp
index 6243258acd9b..293063d0b680 100644
--- a/src/hotspot/share/ci/ciInstanceKlass.cpp
+++ b/src/hotspot/share/ci/ciInstanceKlass.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -136,12 +136,14 @@ ciInstanceKlass::ciInstanceKlass(ciSymbol* name,
// ------------------------------------------------------------------
-// ciInstanceKlass::compute_shared_is_initialized
-void ciInstanceKlass::compute_shared_init_state() {
- GUARDED_VM_ENTRY(
- InstanceKlass* ik = get_instanceKlass();
- _init_state = ik->init_state();
- )
+InstanceKlass::ClassState ciInstanceKlass::compute_init_state() {
+ if (_is_shared && is_loaded()) {
+ // Return cached init state of shared klass
+ ciEnv* env = CURRENT_ENV;
+ assert(env->task() != nullptr, "only calls from compilation are expected here");
+ return env->get_cached_init_state(ident());
+ }
+ return _init_state;
}
// ------------------------------------------------------------------
@@ -319,11 +321,11 @@ void ciInstanceKlass::print_impl(outputStream* st) {
bool_to_str(has_subklass()),
layout_helper());
- _flags.print_klass_flags();
+ _flags.print_klass_flags(st);
if (_super) {
st->print(" super=");
- _super->print_name();
+ _super->print_name_on(st);
}
if (_java_mirror) {
st->print(" mirror=PRESENT");
diff --git a/src/hotspot/share/ci/ciInstanceKlass.hpp b/src/hotspot/share/ci/ciInstanceKlass.hpp
index a84c63981c9b..221e6806b5ac 100644
--- a/src/hotspot/share/ci/ciInstanceKlass.hpp
+++ b/src/hotspot/share/ci/ciInstanceKlass.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -106,43 +106,36 @@ class ciInstanceKlass : public ciKlass {
bool is_shared() { return _is_shared; }
- void compute_shared_init_state();
+ InstanceKlass::ClassState compute_init_state();
bool compute_shared_has_subklass();
int compute_nonstatic_fields();
GrowableArray* compute_nonstatic_fields_impl(GrowableArray* super_fields);
bool compute_has_trusted_loader();
- // Update the init_state for shared klasses
- void update_if_shared(InstanceKlass::ClassState expected) {
- if (_is_shared && _init_state != expected) {
- if (is_loaded()) compute_shared_init_state();
- }
- }
-
public:
// Has this klass been initialized?
bool is_initialized() {
- update_if_shared(InstanceKlass::fully_initialized);
- return _init_state == InstanceKlass::fully_initialized;
+ InstanceKlass::ClassState state = compute_init_state();
+ return state == InstanceKlass::fully_initialized;
}
bool is_not_initialized() {
- update_if_shared(InstanceKlass::fully_initialized);
- return _init_state < InstanceKlass::being_initialized;
+ InstanceKlass::ClassState state = compute_init_state();
+ return state < InstanceKlass::being_initialized;
}
// Is this klass being initialized?
bool is_being_initialized() {
- update_if_shared(InstanceKlass::being_initialized);
- return _init_state == InstanceKlass::being_initialized;
+ InstanceKlass::ClassState state = compute_init_state();
+ return state == InstanceKlass::being_initialized;
}
// Has this klass been linked?
bool is_linked() {
- update_if_shared(InstanceKlass::linked);
- return _init_state >= InstanceKlass::linked;
+ InstanceKlass::ClassState state = compute_init_state();
+ return state >= InstanceKlass::linked;
}
// Is this klass in error state?
bool is_in_error_state() {
- update_if_shared(InstanceKlass::initialization_error);
- return _init_state == InstanceKlass::initialization_error;
+ InstanceKlass::ClassState state = compute_init_state();
+ return state == InstanceKlass::initialization_error;
}
// General klass information.
diff --git a/src/hotspot/share/ci/ciObjectFactory.cpp b/src/hotspot/share/ci/ciObjectFactory.cpp
index 2af5d812922f..d3bef01f8525 100644
--- a/src/hotspot/share/ci/ciObjectFactory.cpp
+++ b/src/hotspot/share/ci/ciObjectFactory.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -48,6 +48,7 @@
#include "gc/shared/collectedHeap.inline.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/universe.hpp"
+#include "oops/instanceKlass.hpp"
#include "oops/oop.inline.hpp"
#include "oops/trainingData.hpp"
#include "runtime/handles.inline.hpp"
@@ -83,6 +84,7 @@ ciObjectFactory::ciObjectFactory(Arena* arena,
int expected_size)
: _arena(arena),
_ci_metadata(arena, expected_size, 0, nullptr),
+ _cached_init_state(arena, _shared_ident_limit, 0, (u1)0),
_unloaded_methods(arena, 4, 0, nullptr),
_unloaded_klasses(arena, 8, 0, nullptr),
_unloaded_instances(arena, 4, 0, nullptr),
@@ -97,6 +99,28 @@ ciObjectFactory::ciObjectFactory(Arena* arena,
// If the shared ci objects exist append them to this factory's objects
if (_shared_ci_metadata != nullptr) {
_ci_metadata.appendAll(_shared_ci_metadata);
+ // ciInstanceKlass for well-known class is shared by all
+ // compiler threads and can be updated concurrently by
+ // other compiler threads during compilation.
+ // Make local copy of class state to avoid state change
+ // during compilation.
+ int len = _ci_metadata.length();
+ for (int i = 0; i < len; i++) {
+ ciMetadata* obj = _ci_metadata.at(i);
+ if (obj->is_loaded() && obj->is_instance_klass()) {
+ ciInstanceKlass* cik = obj->as_instance_klass();
+ precond(cik->is_shared());
+ InstanceKlass::ClassState current_state = cik->_init_state;
+ InstanceKlass::ClassState state = InstanceKlass::fully_initialized;
+ if (current_state != state) {
+ GUARDED_VM_ENTRY( state = cik->get_instanceKlass()->init_state(); )
+ // Update state of shared ciInstanceKlass
+ cik->_init_state = state;
+ }
+ // Cache state for current compilation
+ _cached_init_state.at_put_grow(cik->ident(), (u1)state, 0);
+ }
+ }
}
}
diff --git a/src/hotspot/share/ci/ciObjectFactory.hpp b/src/hotspot/share/ci/ciObjectFactory.hpp
index fd7ca6bb8013..c578aecb5647 100644
--- a/src/hotspot/share/ci/ciObjectFactory.hpp
+++ b/src/hotspot/share/ci/ciObjectFactory.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -47,6 +47,8 @@ class ciObjectFactory : public ArenaObj {
Arena* _arena;
GrowableArray _ci_metadata;
+ // Local copy of shared ciInstanceKlass init state for current compilation
+ GrowableArray _cached_init_state;
GrowableArray _unloaded_methods;
GrowableArray _unloaded_klasses;
GrowableArray _unloaded_instances;
@@ -103,6 +105,11 @@ class ciObjectFactory : public ArenaObj {
ciMetadata* cached_metadata(Metadata* key);
ciSymbol* get_symbol(Symbol* key);
+ // Get cached init state of shared ciInstanceKlass
+ u1 cached_init_state(uint id) {
+ return _cached_init_state.at(id);
+ }
+
// Get the ciSymbol corresponding to one of the vmSymbols.
static ciSymbol* vm_symbol_at(vmSymbolID index);
diff --git a/src/hotspot/share/classfile/classLoader.cpp b/src/hotspot/share/classfile/classLoader.cpp
index 6d25e460688d..bf00185ffa9a 100644
--- a/src/hotspot/share/classfile/classLoader.cpp
+++ b/src/hotspot/share/classfile/classLoader.cpp
@@ -96,9 +96,18 @@ static JImageClose_t JImageClose = nullptr;
static JImageFindResource_t JImageFindResource = nullptr;
static JImageGetResource_t JImageGetResource = nullptr;
-// JimageFile pointer, or null if exploded JDK build.
+// JImageFile pointer, or null if exploded JDK build.
static JImageFile* JImage_file = nullptr;
+// PreviewMode status to control preview behaviour. JImage_file is unusable
+// for normal lookup until (Preview_mode != PREVIEW_MODE_UNINITIALIZED).
+enum PreviewMode {
+ PREVIEW_MODE_UNINITIALIZED = 0,
+ PREVIEW_MODE_DEFAULT = 1,
+ PREVIEW_MODE_ENABLE_PREVIEW = 2
+};
+static PreviewMode Preview_mode = PREVIEW_MODE_UNINITIALIZED;
+
// Globals
PerfCounter* ClassLoader::_perf_accumulated_time = nullptr;
@@ -154,7 +163,7 @@ void ClassLoader::print_counters(outputStream *st) {
GrowableArray* ClassLoader::_patch_mod_entries = nullptr;
GrowableArray* ClassLoader::_exploded_entries = nullptr;
-ClassPathEntry* ClassLoader::_jrt_entry = nullptr;
+ClassPathImageEntry* ClassLoader::_jrt_entry = nullptr;
ClassPathEntry* volatile ClassLoader::_first_append_entry_list = nullptr;
ClassPathEntry* volatile ClassLoader::_last_append_entry = nullptr;
@@ -171,15 +180,6 @@ static bool string_starts_with(const char* str, const char* str_to_find) {
}
#endif
-static const char* get_jimage_version_string() {
- static char version_string[10] = "";
- if (version_string[0] == '\0') {
- jio_snprintf(version_string, sizeof(version_string), "%d.%d",
- VM_Version::vm_major_version(), VM_Version::vm_minor_version());
- }
- return (const char*)version_string;
-}
-
bool ClassLoader::string_ends_with(const char* str, const char* str_to_find) {
size_t str_len = strlen(str);
size_t str_to_find_len = strlen(str_to_find);
@@ -234,6 +234,69 @@ Symbol* ClassLoader::package_from_class_name(const Symbol* name, bool* bad_class
return SymbolTable::new_symbol(name, pointer_delta_as_int(start, base), pointer_delta_as_int(end, base));
}
+// --------------------------------
+// The following jimage_xxx static functions encapsulate all JImage_file and Preview_mode access.
+// This is done to make it easy to reason about the JImage file state (exists vs initialized etc.).
+
+// Opens the named JImage file and sets the JImage file reference.
+// Returns true if opening the JImage file was successful (see also jimage_is_open()).
+static bool jimage_open(const char* modules_path) {
+ // Currently 'error' is not set to anything useful, so ignore it here.
+ jint error;
+ JImage_file = (*JImageOpen)(modules_path, &error);
+ if (Arguments::has_jimage() && JImage_file == nullptr) {
+ // The modules file exists but is unreadable or corrupt
+ vm_exit_during_initialization(err_msg("Unable to load %s", modules_path));
+ }
+ return JImage_file != nullptr;
+}
+
+// Closes and clears the JImage file reference (this will only be called during shutdown).
+static void jimage_close() {
+ if (JImage_file != nullptr) {
+ (*JImageClose)(JImage_file);
+ JImage_file = nullptr;
+ }
+}
+
+// Returns whether a JImage file was opened (but NOT whether it was initialized yet).
+static bool jimage_is_open() {
+ return JImage_file != nullptr;
+}
+
+// Returns the JImage file reference (which may or may not be initialized).
+static JImageFile* jimage_non_null() {
+ assert(jimage_is_open(), "should have been opened by ClassLoader::lookup_vm_options "
+ "and remains open throughout normal JVM lifetime");
+ return JImage_file;
+}
+
+// Returns true if jimage_init() has been called. Once the JImage file is initialized,
+// jimage_is_preview_enabled() can be called to correctly determine the access mode.
+static bool jimage_is_initialized() {
+ return jimage_is_open() && Preview_mode != PREVIEW_MODE_UNINITIALIZED;
+}
+
+// Returns the access mode for an initialized JImage file (reflects --enable-preview).
+static bool is_preview_enabled() {
+ return Preview_mode == PREVIEW_MODE_ENABLE_PREVIEW;
+}
+
+// Looks up the location of a named JImage resource. This "raw" lookup function allows
+// the preview mode to be manually specified, so must not be accessible outside this
+// class. ClassPathImageEntry manages all calls for resources after startup is complete.
+static JImageLocationRef jimage_find_resource(const char* module_name,
+ const char* file_name,
+ bool is_preview,
+ jlong* size) {
+ return ((*JImageFindResource)(jimage_non_null(),
+ module_name,
+ file_name,
+ is_preview,
+ size));
+}
+// --------------------------------
+
// Given a fully qualified package name, find its defining package in the class loader's
// package entry table.
PackageEntry* ClassLoader::get_package_entry(Symbol* pkg_name, ClassLoaderData* loader_data) {
@@ -372,28 +435,15 @@ ClassFileStream* ClassPathZipEntry::open_stream(JavaThread* current, const char*
DEBUG_ONLY(ClassPathImageEntry* ClassPathImageEntry::_singleton = nullptr;)
-JImageFile* ClassPathImageEntry::jimage() const {
- return JImage_file;
-}
-
-JImageFile* ClassPathImageEntry::jimage_non_null() const {
- assert(ClassLoader::has_jrt_entry(), "must be");
- assert(jimage() != nullptr, "should have been opened by ClassLoader::lookup_vm_options "
- "and remained throughout normal JVM lifetime");
- return jimage();
-}
-
void ClassPathImageEntry::close_jimage() {
- if (jimage() != nullptr) {
- (*JImageClose)(jimage());
- JImage_file = nullptr;
- }
+ jimage_close();
}
-ClassPathImageEntry::ClassPathImageEntry(JImageFile* jimage, const char* name) :
+ClassPathImageEntry::ClassPathImageEntry(const char* name) :
ClassPathEntry() {
- guarantee(jimage != nullptr, "jimage file is null");
+ guarantee(jimage_is_initialized(), "jimage is not initialized");
guarantee(name != nullptr, "jimage file name is null");
+
assert(_singleton == nullptr, "VM supports only one jimage");
DEBUG_ONLY(_singleton = this);
size_t len = strlen(name) + 1;
@@ -412,6 +462,8 @@ ClassFileStream* ClassPathImageEntry::open_stream(JavaThread* current, const cha
// 2. A package is in at most one module in the jimage file.
//
ClassFileStream* ClassPathImageEntry::open_stream_for_loader(JavaThread* current, const char* name, ClassLoaderData* loader_data) {
+ const bool is_preview = is_preview_enabled();
+
jlong size;
JImageLocationRef location = 0;
@@ -420,7 +472,7 @@ ClassFileStream* ClassPathImageEntry::open_stream_for_loader(JavaThread* current
if (pkg_name != nullptr) {
if (!Universe::is_module_initialized()) {
- location = (*JImageFindResource)(jimage_non_null(), JAVA_BASE_NAME, get_jimage_version_string(), name, &size);
+ location = jimage_find_resource(JAVA_BASE_NAME, name, is_preview, &size);
} else {
PackageEntry* package_entry = ClassLoader::get_package_entry(pkg_name, loader_data);
if (package_entry != nullptr) {
@@ -431,7 +483,7 @@ ClassFileStream* ClassPathImageEntry::open_stream_for_loader(JavaThread* current
assert(module->is_named(), "Boot classLoader package is in unnamed module");
const char* module_name = module->name()->as_C_string();
if (module_name != nullptr) {
- location = (*JImageFindResource)(jimage_non_null(), module_name, get_jimage_version_string(), name, &size);
+ location = jimage_find_resource(module_name, name, is_preview, &size);
}
}
}
@@ -444,7 +496,7 @@ ClassFileStream* ClassPathImageEntry::open_stream_for_loader(JavaThread* current
char* data = NEW_RESOURCE_ARRAY(char, size);
(*JImageGetResource)(jimage_non_null(), location, data, size);
// Resource allocated
- assert(this == (ClassPathImageEntry*)ClassLoader::get_jrt_entry(), "must be");
+ assert(this == ClassLoader::get_jrt_entry(), "must be");
return new ClassFileStream((u1*)data,
checked_cast(size),
_name,
@@ -454,16 +506,9 @@ ClassFileStream* ClassPathImageEntry::open_stream_for_loader(JavaThread* current
return nullptr;
}
-JImageLocationRef ClassLoader::jimage_find_resource(JImageFile* jf,
- const char* module_name,
- const char* file_name,
- jlong &size) {
- return ((*JImageFindResource)(jf, module_name, get_jimage_version_string(), file_name, &size));
-}
-
bool ClassPathImageEntry::is_modules_image() const {
assert(this == _singleton, "VM supports a single jimage");
- assert(this == (ClassPathImageEntry*)ClassLoader::get_jrt_entry(), "must be used for jrt entry");
+ assert(this == ClassLoader::get_jrt_entry(), "must be used for jrt entry");
return true;
}
@@ -618,14 +663,15 @@ void ClassLoader::setup_bootstrap_search_path_impl(JavaThread* current, const ch
struct stat st;
if (os::stat(path, &st) == 0) {
// Directory found
- if (JImage_file != nullptr) {
+ if (jimage_is_open()) {
assert(Arguments::has_jimage(), "sanity check");
const char* canonical_path = get_canonical_path(path, current);
assert(canonical_path != nullptr, "canonical_path issue");
- _jrt_entry = new ClassPathImageEntry(JImage_file, canonical_path);
+ // Hand over lifecycle control of the JImage file to the _jrt_entry singleton
+ // (see ClassPathImageEntry::close_jimage). The image must be initialized by now.
+ _jrt_entry = new ClassPathImageEntry(canonical_path);
assert(_jrt_entry != nullptr && _jrt_entry->is_modules_image(), "No java runtime image present");
- assert(_jrt_entry->jimage() != nullptr, "No java runtime image");
} // else it's an exploded build.
} else {
// If path does not exist, exit
@@ -645,7 +691,7 @@ void ClassLoader::setup_bootstrap_search_path_impl(JavaThread* current, const ch
static const char* get_exploded_module_path(const char* module_name, bool c_heap) {
const char *home = Arguments::get_java_home();
const char file_sep = os::file_separator()[0];
- // 10 represents the length of "modules" + 2 file separators + \0
+ // 10 represents the length of "modules" (7) + 2 file separators + \0
size_t len = strlen(home) + strlen(module_name) + 10;
char *path = c_heap ? NEW_C_HEAP_ARRAY(char, len, mtModule) : NEW_RESOURCE_ARRAY(char, len);
jio_snprintf(path, len, "%s%cmodules%c%s", home, file_sep, file_sep, module_name);
@@ -1398,20 +1444,8 @@ void ClassLoader::initialize(TRAPS) {
setup_bootstrap_search_path(THREAD);
}
-static char* lookup_vm_resource(JImageFile *jimage, const char *jimage_version, const char *path) {
- jlong size;
- JImageLocationRef location = (*JImageFindResource)(jimage, "java.base", jimage_version, path, &size);
- if (location == 0)
- return nullptr;
- char *val = NEW_C_HEAP_ARRAY(char, size+1, mtClass);
- (*JImageGetResource)(jimage, location, val, size);
- val[size] = '\0';
- return val;
-}
-
// Lookup VM options embedded in the modules jimage file
char* ClassLoader::lookup_vm_options() {
- jint error;
char modules_path[JVM_MAXPATHLEN];
const char* fileSep = os::file_separator();
@@ -1419,32 +1453,41 @@ char* ClassLoader::lookup_vm_options() {
load_jimage_library();
jio_snprintf(modules_path, JVM_MAXPATHLEN, "%s%slib%smodules", Arguments::get_java_home(), fileSep, fileSep);
- JImage_file =(*JImageOpen)(modules_path, &error);
- if (JImage_file == nullptr) {
- if (Arguments::has_jimage()) {
- // The modules file exists but is unreadable or corrupt
- vm_exit_during_initialization(err_msg("Unable to load %s", modules_path));
+ if (jimage_open(modules_path)) {
+ // Special case where we lookup the options string *before* set_preview_mode() is called.
+ // Since VM arguments have not been parsed, and the ClassPathImageEntry singleton
+ // has not been created yet, we access the JImage file directly in non-preview mode.
+ jlong size;
+ JImageLocationRef location =
+ jimage_find_resource(JAVA_BASE_NAME, "jdk/internal/vm/options", /* is_preview */ false, &size);
+ if (location != 0) {
+ char* options = NEW_C_HEAP_ARRAY(char, size+1, mtClass);
+ (*JImageGetResource)(jimage_non_null(), location, options, size);
+ options[size] = '\0';
+ return options;
}
- return nullptr;
}
+ return nullptr;
+}
- const char *jimage_version = get_jimage_version_string();
- char *options = lookup_vm_resource(JImage_file, jimage_version, "jdk/internal/vm/options");
- return options;
+// Finishes initializing the JImageFile (if present) by setting the access mode.
+void ClassLoader::set_preview_mode(bool enable_preview) {
+ assert(Preview_mode == PREVIEW_MODE_UNINITIALIZED, "set_preview_mode must not be called twice");
+ Preview_mode = enable_preview ? PREVIEW_MODE_ENABLE_PREVIEW : PREVIEW_MODE_DEFAULT;
}
bool ClassLoader::is_module_observable(const char* module_name) {
assert(JImageOpen != nullptr, "jimage library should have been opened");
- if (JImage_file == nullptr) {
+ if (!jimage_is_open()) {
struct stat st;
const char *path = get_exploded_module_path(module_name, true);
bool res = os::stat(path, &st) == 0;
FREE_C_HEAP_ARRAY(path);
return res;
}
+ // We don't expect preview mode (i.e. --enable-preview) to affect module visibility.
jlong size;
- const char *jimage_version = get_jimage_version_string();
- return (*JImageFindResource)(JImage_file, module_name, jimage_version, "module-info.class", &size) != 0;
+ return jimage_find_resource(module_name, "module-info.class", /* is_preview */ false, &size) != 0;
}
jlong ClassLoader::classloader_time_ms() {
diff --git a/src/hotspot/share/classfile/classLoader.hpp b/src/hotspot/share/classfile/classLoader.hpp
index a935d3027ac2..ff7e89996889 100644
--- a/src/hotspot/share/classfile/classLoader.hpp
+++ b/src/hotspot/share/classfile/classLoader.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -99,7 +99,8 @@ class ClassPathZipEntry: public ClassPathEntry {
};
-// For java image files
+// A singleton path entry which takes ownership of the initialized JImageFile
+// reference. Not used for exploded builds.
class ClassPathImageEntry: public ClassPathEntry {
private:
const char* _name;
@@ -107,11 +108,12 @@ class ClassPathImageEntry: public ClassPathEntry {
public:
bool is_modules_image() const;
const char* name() const { return _name == nullptr ? "" : _name; }
- JImageFile* jimage() const;
- JImageFile* jimage_non_null() const;
+ // Called to close the JImage during os::abort (normally not called).
void close_jimage();
- ClassPathImageEntry(JImageFile* jimage, const char* name);
+ // Takes effective ownership of the static JImageFile pointer.
+ ClassPathImageEntry(const char* name);
virtual ~ClassPathImageEntry() { ShouldNotReachHere(); }
+
ClassFileStream* open_stream(JavaThread* current, const char* name);
ClassFileStream* open_stream_for_loader(JavaThread* current, const char* name, ClassLoaderData* loader_data);
};
@@ -201,10 +203,10 @@ class ClassLoader: AllStatic {
static GrowableArray* _patch_mod_entries;
// 2. the base piece
- // Contains the ClassPathEntry of the modular java runtime image.
+ // Contains the ClassPathImageEntry of the modular java runtime image.
// If no java runtime image is present, this indicates a
// build with exploded modules is being used instead.
- static ClassPathEntry* _jrt_entry;
+ static ClassPathImageEntry* _jrt_entry;
static GrowableArray* _exploded_entries;
enum { EXPLODED_ENTRY_SIZE = 80 }; // Initial number of exploded modules
@@ -354,15 +356,20 @@ class ClassLoader: AllStatic {
static void append_boot_classpath(ClassPathEntry* new_entry);
#endif
+ // Retrieves additional VM options prior to flags processing. Options held
+ // in the JImage file are retrieved without fully initializing it. (this is
+ // the only JImage lookup which can succeed before init_jimage() is called).
static char* lookup_vm_options();
+ // Called once, after all flags are processed, to finish initializing the
+ // JImage file. Until this is called, jimage_find_resource(), and any other
+ // JImage resource lookups or access will fail.
+ static void set_preview_mode(bool enable_preview);
+
// Determines if the named module is present in the
// modules jimage file or in the exploded modules directory.
static bool is_module_observable(const char* module_name);
- static JImageLocationRef jimage_find_resource(JImageFile* jf, const char* module_name,
- const char* file_name, jlong &size);
-
static void trace_class_path(const char* msg, const char* name = nullptr);
// VM monitoring and management support
diff --git a/src/hotspot/share/code/codeCache.cpp b/src/hotspot/share/code/codeCache.cpp
index 2aaa061dca3a..ffa88a88b296 100644
--- a/src/hotspot/share/code/codeCache.cpp
+++ b/src/hotspot/share/code/codeCache.cpp
@@ -65,6 +65,7 @@
#include "sanitizers/leak.hpp"
#include "services/memoryService.hpp"
#include "utilities/align.hpp"
+#include "utilities/integerCast.hpp"
#include "utilities/vmError.hpp"
#include "utilities/xmlstream.hpp"
#ifdef COMPILER1
@@ -228,9 +229,14 @@ void CodeCache::initialize_heaps() {
assert(heap_available(CodeBlobType::MethodNonProfiled), "MethodNonProfiled heap is always available for segmented code heap");
- size_t compiler_buffer_size = 0;
- COMPILER1_PRESENT(compiler_buffer_size += CompilationPolicy::c1_count() * Compiler::code_buffer_size());
- COMPILER2_PRESENT(compiler_buffer_size += CompilationPolicy::c2_count() * C2Compiler::initial_code_buffer_size());
+ uint64_t compiler_buffer_size_uint64 = 0;
+ COMPILER1_PRESENT(compiler_buffer_size_uint64 += (uint64_t)CompilationPolicy::c1_count() * Compiler::code_buffer_size());
+ COMPILER2_PRESENT(compiler_buffer_size_uint64 += (uint64_t)CompilationPolicy::c2_count() * C2Compiler::initial_code_buffer_size());
+ if (compiler_buffer_size_uint64 > (uint64_t)CODE_CACHE_SIZE_LIMIT) {
+ err_msg msg("CICompilerCount is too large (%" PRIdPTR "): compiler buffer size exceeds the CodeCache size limit", CICompilerCount);
+ vm_exit_during_initialization(msg);
+ }
+ size_t compiler_buffer_size = integer_cast_permit_tautology(compiler_buffer_size_uint64);
if (!non_nmethod.set) {
non_nmethod.size += compiler_buffer_size;
diff --git a/src/hotspot/share/compiler/compilationPolicy.cpp b/src/hotspot/share/compiler/compilationPolicy.cpp
index 1cc44602186b..e69480560aeb 100644
--- a/src/hotspot/share/compiler/compilationPolicy.cpp
+++ b/src/hotspot/share/compiler/compilationPolicy.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -814,23 +814,32 @@ CompileTask* CompilationPolicy::select_task(CompileQueue* compile_queue, JavaThr
max_method = max_task->method();
}
- methodHandle max_method_h(THREAD, max_method);
+ if (max_task != nullptr && max_method != nullptr) {
+ methodHandle max_method_h(THREAD, max_method);
- if (max_task != nullptr && max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile &&
- max_method != nullptr && is_method_profiled(max_method_h) && !Arguments::is_compiler_only()) {
- max_task->set_comp_level(CompLevel_limited_profile);
+ if (max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile &&
+ is_method_profiled(max_method_h) && !Arguments::is_compiler_only()) {
- if (CompileBroker::compilation_is_complete(max_method_h, max_task->osr_bci(), CompLevel_limited_profile)) {
- if (PrintTieredEvents) {
- print_event(REMOVE_FROM_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
- }
- compile_queue->remove_and_mark_stale(max_task);
- max_method->clear_queued_for_compilation();
- return nullptr;
- }
+ CompilerDirectiveMatcher directive_matcher(max_method_h, CompLevel_limited_profile);
+ bool exclude_limited_profile = directive_matcher.directive_set()->ExcludeOption;
- if (PrintTieredEvents) {
- print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
+ if (!exclude_limited_profile) {
+ max_task->set_comp_level(CompLevel_limited_profile);
+ max_task->transfer_directive(directive_matcher);
+
+ if (CompileBroker::compilation_is_complete(max_method_h, max_task->osr_bci(), CompLevel_limited_profile)) {
+ if (PrintTieredEvents) {
+ print_event(REMOVE_FROM_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
+ }
+ compile_queue->remove_and_mark_stale(max_task);
+ max_method->clear_queued_for_compilation();
+ return nullptr;
+ }
+
+ if (PrintTieredEvents) {
+ print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
+ }
+ }
}
}
return max_task;
diff --git a/src/hotspot/share/compiler/compileBroker.cpp b/src/hotspot/share/compiler/compileBroker.cpp
index c806d356d0cb..ddd3f8ae5f8c 100644
--- a/src/hotspot/share/compiler/compileBroker.cpp
+++ b/src/hotspot/share/compiler/compileBroker.cpp
@@ -1382,7 +1382,7 @@ nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci,
}
#endif
- CompilerDirectiveMatcher matcher(method, comp);
+ CompilerDirectiveMatcher matcher(method, comp_level);
// CompileBroker::compile_method can trap and can have pending async exception.
nmethod* nm = CompileBroker::compile_method(method, osr_bci, comp_level, hot_count, compile_reason, matcher.directive_set(), THREAD);
return nm;
diff --git a/src/hotspot/share/compiler/compileTask.cpp b/src/hotspot/share/compiler/compileTask.cpp
index 193770b66a0e..b22aa4466a47 100644
--- a/src/hotspot/share/compiler/compileTask.cpp
+++ b/src/hotspot/share/compiler/compileTask.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -57,7 +57,7 @@ CompileTask::CompileTask(int compile_id,
_nm_insts_size(0),
_comp_level(comp_level),
_compiler(CompileBroker::compiler(comp_level)),
- _comp_directive_matcher(method, _compiler),
+ _comp_directive_matcher(method, static_cast(comp_level)),
JVMCI_ONLY(_has_waiter(_compiler->is_jvmci()) COMMA)
JVMCI_ONLY(_blocking_jvmci_compile_state(nullptr) COMMA)
_num_inlined_bytecodes(0),
diff --git a/src/hotspot/share/compiler/compileTask.hpp b/src/hotspot/share/compiler/compileTask.hpp
index 4b48ee63be50..b6174af72eb4 100644
--- a/src/hotspot/share/compiler/compileTask.hpp
+++ b/src/hotspot/share/compiler/compileTask.hpp
@@ -131,6 +131,7 @@ class CompileTask : public CHeapObj {
bool is_blocking() const { return _is_blocking; }
bool is_success() const { return _is_success; }
DirectiveSet* directive() const { return _comp_directive_matcher.directive_set(); }
+ void transfer_directive(CompilerDirectiveMatcher& matcher) { _comp_directive_matcher.transfer_from(matcher); }
CompileReason compile_reason() const { return _compile_reason; }
CodeSection::csize_t nm_content_size() { return _nm_content_size; }
void set_nm_content_size(CodeSection::csize_t size) { _nm_content_size = size; }
diff --git a/src/hotspot/share/compiler/compilerDirectives.cpp b/src/hotspot/share/compiler/compilerDirectives.cpp
index d0042d0e16cd..f61aa111e654 100644
--- a/src/hotspot/share/compiler/compilerDirectives.cpp
+++ b/src/hotspot/share/compiler/compilerDirectives.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#include "ci/ciMethod.hpp"
#include "ci/ciUtilities.inline.hpp"
#include "compiler/abstractCompiler.hpp"
+#include "compiler/compileBroker.hpp"
#include "compiler/compilerDefinitions.inline.hpp"
#include "compiler/compilerDirectives.hpp"
#include "compiler/compilerOracle.hpp"
@@ -378,7 +379,7 @@ class DirectiveSetPtr {
// - if some option is changed we need to copy directiveset since it no longer can be shared
// - Need to free copy after use
// - Requires a modified bit so we don't overwrite options that is set by directives
-DirectiveSet* DirectiveSet::compilecommand_compatibility_init(const methodHandle& method) {
+DirectiveSet* DirectiveSet::compilecommand_compatibility_init(const methodHandle& method, int comp_level) {
// Early bail out - checking all options is expensive - we rely on them not being used
// Only set a flag if it has not been modified and value changes.
// Only copy set if a flag needs to be set
@@ -397,7 +398,7 @@ DirectiveSet* DirectiveSet::compilecommand_compatibility_init(const methodHandle
// All CompileCommands are not equal so this gets a bit verbose
// When CompileCommands have been refactored less clutter will remain.
- if (CompilerOracle::should_break_at(method)) {
+ if (CompilerOracle::should_break_at(method, static_cast(comp_level))) {
// If the directives didn't have 'BreakAtCompile' or 'BreakAtExecute',
// the sub-command 'Break' of the 'CompileCommand' would become effective.
if (!_modified[BreakAtCompileIndex]) {
@@ -414,13 +415,13 @@ DirectiveSet* DirectiveSet::compilecommand_compatibility_init(const methodHandle
}
}
- if (CompilerOracle::should_print(method)) {
+ if (CompilerOracle::should_print(method, static_cast(comp_level))) {
if (!_modified[PrintAssemblyIndex]) {
set.cloned()->PrintAssemblyOption = true;
}
}
// Exclude as in should not compile == Enabled
- if (CompilerOracle::should_exclude(method)) {
+ if (CompilerOracle::should_exclude(method, static_cast(comp_level))) {
if (!_modified[ExcludeIndex]) {
set.cloned()->ExcludeOption = true;
}
@@ -547,7 +548,7 @@ bool DirectiveSet::should_inline(ciMethod* inlinee) {
return false;
}
-bool DirectiveSet::should_not_inline(ciMethod* inlinee) {
+bool DirectiveSet::should_not_inline(ciMethod* inlinee, int comp_level) {
inlinee->check_is_loaded();
VM_ENTRY_MARK;
methodHandle mh(THREAD, inlinee->get_Method());
@@ -556,7 +557,7 @@ bool DirectiveSet::should_not_inline(ciMethod* inlinee) {
return matches_inline(mh, InlineMatcher::dont_inline);
}
if (!CompilerDirectivesIgnoreCompileCommandsOption) {
- return CompilerOracle::should_not_inline(mh);
+ return CompilerOracle::should_not_inline(mh, static_cast(comp_level));
}
return false;
}
@@ -755,7 +756,7 @@ void DirectivesStack::release(DirectiveSet* set) {
assert(set != nullptr, "Never nullptr");
MutexLocker locker(DirectivesStack_lock, Mutex::_no_safepoint_check_flag);
if (set->is_exclusive_copy()) {
- // Old CompilecCmmands forced us to create an exclusive copy
+ // Old CompileCommands forced us to create an exclusive copy
delete set;
} else {
assert(set->directive() != nullptr, "Never nullptr");
@@ -772,8 +773,9 @@ void DirectivesStack::release(CompilerDirectives* dir) {
}
}
-DirectiveSet* DirectivesStack::getMatchingDirective(const methodHandle& method, AbstractCompiler *comp) {
+DirectiveSet* DirectivesStack::getMatchingDirective(const methodHandle& method, int comp_level) {
assert(_depth > 0, "Must never be empty");
+ AbstractCompiler* comp = CompileBroker::compiler(comp_level);
DirectiveSet* match = nullptr;
{
@@ -798,5 +800,5 @@ DirectiveSet* DirectivesStack::getMatchingDirective(const methodHandle& method,
guarantee(match != nullptr, "There should always be a default directive that matches");
// Check for legacy compile commands update, without DirectivesStack_lock
- return match->compilecommand_compatibility_init(method);
+ return match->compilecommand_compatibility_init(method, comp_level);
}
diff --git a/src/hotspot/share/compiler/compilerDirectives.hpp b/src/hotspot/share/compiler/compilerDirectives.hpp
index 04873aab664b..ae814cdc491a 100644
--- a/src/hotspot/share/compiler/compilerDirectives.hpp
+++ b/src/hotspot/share/compiler/compilerDirectives.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -115,7 +115,7 @@ class DirectivesStack : AllStatic {
static int _depth;
static void pop_inner(); // no lock version of pop
- static DirectiveSet* getMatchingDirective(const methodHandle& mh, AbstractCompiler* comp);
+ static DirectiveSet* getMatchingDirective(const methodHandle& mh, int comp_level);
static DirectiveSet* getDefaultDirective(AbstractCompiler* comp);
static void release(DirectiveSet* set);
static void release(CompilerDirectives* dir);
@@ -145,10 +145,10 @@ class DirectiveSet : public CHeapObj {
bool parse_and_add_inline(char* str, const char*& error_msg);
void append_inline(InlineMatcher* m);
bool should_inline(ciMethod* inlinee);
- bool should_not_inline(ciMethod* inlinee);
+ bool should_not_inline(ciMethod* inlinee, int comp_level);
bool should_delay_inline(ciMethod* inlinee);
void print_inline(outputStream* st);
- DirectiveSet* compilecommand_compatibility_init(const methodHandle& method);
+ DirectiveSet* compilecommand_compatibility_init(const methodHandle& method, int comp_level);
bool is_exclusive_copy() { return _directive == nullptr; }
bool matches_inline(const methodHandle& method, int inline_action);
static DirectiveSet* clone(DirectiveSet const* src);
@@ -335,21 +335,35 @@ class CompilerDirectives : public CHeapObj {
class CompilerDirectiveMatcher {
private:
DirectiveSet* _match;
+
+ void release_match() {
+ if (_match != nullptr) {
+ DirectivesStack::release(_match);
+ _match = nullptr;
+ }
+ }
+
public:
// Use this constructor to get default directive
CompilerDirectiveMatcher(AbstractCompiler* comp) {
_match = DirectivesStack::getDefaultDirective(comp);
}
- CompilerDirectiveMatcher(const methodHandle& mh, AbstractCompiler* comp) {
- _match = DirectivesStack::getMatchingDirective(mh, comp);
+ CompilerDirectiveMatcher(const methodHandle& mh, int comp_level) {
+ _match = DirectivesStack::getMatchingDirective(mh, comp_level);
}
~CompilerDirectiveMatcher() {
- DirectivesStack::release(_match);
+ release_match();
}
DirectiveSet* directive_set() const { return _match; }
+
+ void transfer_from(CompilerDirectiveMatcher& src) {
+ release_match();
+ _match = src._match;
+ src._match = nullptr;
+ }
};
#endif // SHARE_COMPILER_COMPILERDIRECTIVES_HPP
diff --git a/src/hotspot/share/compiler/compilerOracle.cpp b/src/hotspot/share/compiler/compilerOracle.cpp
index 5bcd01a4d09e..241d27eb6be8 100644
--- a/src/hotspot/share/compiler/compilerOracle.cpp
+++ b/src/hotspot/share/compiler/compilerOracle.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -60,6 +60,37 @@ static const char* const default_compile_commands[] = {
#endif
nullptr };
+// CompLevel | -XX:CompileCommand bitmask
+// ----------------------------------------------------
+// 0 (interpreter) | N/A
+// 1 (C1) | 1
+// 2 (C1 + counters) | 10
+// 3 (C1 + counters + mdo) | 100
+// 4 (C2/JVMCI) | 1000
+// All C1 levels | 111
+// All levels | 1111
+
+static const int comp_level_bitmask[CompLevel_count] = {0, 1, 10, 100, 1000};
+static const int comp_level_bitmask_all_levels = 1111;
+static const intx default_comp_level_argument = comp_level_bitmask_all_levels;
+
+inline bool bitmask_applies_to_comp_level(int bitmask, int comp_level) {
+ assert(comp_level > CompLevel_none && comp_level < CompLevel_count, "CompLevel out of bounds");
+ return (bitmask / comp_level_bitmask[comp_level]) % 10 == 1;
+}
+
+static bool is_valid_comp_level_bitmask(intx bitmask) {
+ if (bitmask < 0 || bitmask > comp_level_bitmask_all_levels) {
+ return false;
+ }
+ for (; bitmask != 0; bitmask /= 10) {
+ if (bitmask % 10 > 1) {
+ return false;
+ }
+ }
+ return true;
+}
+
static const char* optiontype_names[] = {
#define enum_of_types(type, name) name,
OPTION_TYPES(enum_of_types)
@@ -456,36 +487,56 @@ template bool CompilerOracle::option_matches_type(CompileCommandEnum optio
template bool CompilerOracle::option_matches_type(CompileCommandEnum option, ccstr& value);
template bool CompilerOracle::option_matches_type(CompileCommandEnum option, double& value);
+bool CompilerOracle::applies_to_comp_level(const methodHandle& method, CompileCommandEnum command, CompLevel current_level) {
+ if (current_level == CompLevel_none) {
+ return false;
+ }
+
+ intx bitmask = 0;
+ if (!has_option_value(method, command, bitmask)) {
+ return false;
+ }
+
+ // Since we don't have bitmask for interpreter level (0), but still need to call CompilerOracle::should_print()
+ // from collect_profiled_methods() in java.cpp, a special value of CompLevel_any produces a match with any bitmask, even 0
+ return current_level == CompLevel_any
+ || bitmask_applies_to_comp_level(bitmask, current_level);
+}
+
bool CompilerOracle::has_option(const methodHandle& method, CompileCommandEnum option) {
bool value = false;
has_option_value(method, option, value);
return value;
}
-bool CompilerOracle::should_exclude(const methodHandle& method) {
- if (check_predicate(CompileCommandEnum::Exclude, method)) {
+bool CompilerOracle::should_exclude(const methodHandle& method, const CompLevel level) {
+ if (has_exclude(method, level)) {
return true;
}
if (has_command(CompileCommandEnum::CompileOnly)) {
- return !check_predicate(CompileCommandEnum::CompileOnly, method);
+ return !applies_to_comp_level(method, CompileCommandEnum::CompileOnly, level);
}
return false;
}
+bool CompilerOracle::has_exclude(const methodHandle& method, const CompLevel level) {
+ return applies_to_comp_level(method, CompileCommandEnum::Exclude, level);
+}
+
bool CompilerOracle::should_inline(const methodHandle& method) {
return (check_predicate(CompileCommandEnum::Inline, method));
}
-bool CompilerOracle::should_not_inline(const methodHandle& method) {
- return check_predicate(CompileCommandEnum::DontInline, method) || check_predicate(CompileCommandEnum::Exclude, method);
+bool CompilerOracle::should_not_inline(const methodHandle& method, const CompLevel level) {
+ return check_predicate(CompileCommandEnum::DontInline, method) || has_exclude(method, level);
}
bool CompilerOracle::should_delay_inline(const methodHandle& method) {
return (check_predicate(CompileCommandEnum::DelayInline, method));
}
-bool CompilerOracle::should_print(const methodHandle& method) {
- return check_predicate(CompileCommandEnum::Print, method);
+bool CompilerOracle::should_print(const methodHandle& method, const CompLevel level) {
+ return applies_to_comp_level(method, CompileCommandEnum::Print, level);
}
bool CompilerOracle::should_print_methods() {
@@ -505,8 +556,8 @@ bool CompilerOracle::should_log(const methodHandle& method) {
return (check_predicate(CompileCommandEnum::Log, method));
}
-bool CompilerOracle::should_break_at(const methodHandle& method) {
- return check_predicate(CompileCommandEnum::Break, method);
+bool CompilerOracle::should_break_at(const methodHandle& method, const CompLevel level) {
+ return applies_to_comp_level(method, CompileCommandEnum::Break, level);
}
void CompilerOracle::tag_blackhole_if_possible(const methodHandle& method) {
@@ -678,6 +729,19 @@ static void usage() {
tty->print_cr("from inlining, whereas the 'compileonly' command only excludes methods from");
tty->print_cr("top-level compilations (i.e. they can still be inlined into other compilation units).");
tty->cr();
+ tty->print_cr("Compilation levels can be specified in the 'compileonly', 'exclude', 'print',");
+ tty->print_cr("and 'break' commands using a binary bitmask as an optional value:");
+ tty->print_cr(" -XX:CompileCommand=exclude,java/*.*,1011 -XX:CompileCommand=print,java/*.*,100");
+ tty->cr();
+ tty->print_cr("The bitmask is calculated by summing the desired compilation level values:");
+ tty->print_cr(" C1 without profiling = 1");
+ tty->print_cr(" C1 with limited profiling = 10");
+ tty->print_cr(" C1 with full profiling = 100");
+ tty->print_cr(" C2 = 1000");
+ tty->cr();
+ tty->print_cr("Note: Excluding specific compilation levels may disrupt normal state transitions");
+ tty->print_cr("between the levels, as the VM will not automatically work around the excluded ones.");
+ tty->cr();
};
static int skip_whitespace(char* &line) {
@@ -712,7 +776,7 @@ static bool parseMemLimit(const char* line, intx& value, int& bytes_read, char*
size_t s = 0;
char* end;
if (!parse_integer(line, &end, &s)) {
- jio_snprintf(errorbuf, buf_size, "MemLimit: invalid value");
+ jio_snprintf(errorbuf, buf_size, ": invalid integer: '%.20s'", line);
return false;
}
bytes_read = (int)(end - line);
@@ -726,7 +790,7 @@ static bool parseMemLimit(const char* line, intx& value, int& bytes_read, char*
// ok, this is the default
bytes_read += 5;
} else {
- jio_snprintf(errorbuf, buf_size, "MemLimit: invalid option");
+ jio_snprintf(errorbuf, buf_size, ": invalid suffix: '%.6s'", end);
return false;
}
}
@@ -751,7 +815,7 @@ static bool parseMemStat(const char* line, uintx& value, int& bytes_read, char*
});
#undef IF_ENUM_STRING
- jio_snprintf(errorbuf, buf_size, "MemStat: invalid option");
+ jio_snprintf(errorbuf, buf_size, ": invalid option: '%.8s'", line);
return false;
}
@@ -763,21 +827,42 @@ static bool scan_value(enum OptionType type, char* line, int& total_bytes_read,
const char* type_str = optiontype2name(type);
int skipped = skip_whitespace(line);
total_bytes_read += skipped;
+ char parse_error_buf[80] = {};
+
if (type == OptionType::Intx) {
intx value;
bool success = false;
- if (option == CompileCommandEnum::MemLimit) {
- // Special parsing for MemLimit
- success = parseMemLimit(line, value, bytes_read, errorbuf, buf_size);
- } else {
- // Is it a raw number?
- success = sscanf(line, "%zd%n", &value, &bytes_read) == 1;
+ switch (option) {
+ case CompileCommandEnum::MemLimit:
+ // Special parsing for MemLimit
+ success = parseMemLimit(line, value, bytes_read, parse_error_buf, sizeof(parse_error_buf));
+ break;
+ case CompileCommandEnum::Break:
+ case CompileCommandEnum::CompileOnly:
+ case CompileCommandEnum::Exclude:
+ case CompileCommandEnum::Print:
+ // In the commands above the parameter used to be a boolean. Now it is an int (a compilation level mask).
+ // For compatibility with previous versions we keep it optional. If user did not specify the mask, assume default value
+ if (*line == '\0') {
+ value = default_comp_level_argument;
+ success = true;
+ } else {
+ success = sscanf(line, "%zd%n", &value, &bytes_read) == 1;
+ if (success && !is_valid_comp_level_bitmask(value)) {
+ jio_snprintf(parse_error_buf, sizeof(parse_error_buf), ": invalid compilation level bitmask '%.*s'", bytes_read, line);
+ success = false;
+ }
+ }
+ break;
+ default:
+ // Is it a raw number?
+ success = sscanf(line, "%zd%n", &value, &bytes_read) == 1;
}
if (success) {
total_bytes_read += bytes_read;
return register_command(matcher, option, errorbuf, buf_size, value);
} else {
- jio_snprintf(errorbuf, buf_size, "Value cannot be read for option '%s' of type '%s'", ccname, type_str);
+ jio_snprintf(errorbuf, buf_size, "Value cannot be read for option '%s' of type '%s'%s", ccname, type_str, parse_error_buf);
return false;
}
} else if (type == OptionType::Uintx) {
@@ -785,7 +870,7 @@ static bool scan_value(enum OptionType type, char* line, int& total_bytes_read,
bool success = false;
if (option == CompileCommandEnum::MemStat) {
// Special parsing for MemStat
- success = parseMemStat(line, value, bytes_read, errorbuf, buf_size);
+ success = parseMemStat(line, value, bytes_read, parse_error_buf, sizeof(parse_error_buf));
} else {
// parse as raw number
success = sscanf(line, "%zu%n", &value, &bytes_read) == 1;
@@ -794,7 +879,7 @@ static bool scan_value(enum OptionType type, char* line, int& total_bytes_read,
total_bytes_read += bytes_read;
return register_command(matcher, option, errorbuf, buf_size, value);
} else {
- jio_snprintf(errorbuf, buf_size, "Value cannot be read for option '%s' of type '%s'", ccname, type_str);
+ jio_snprintf(errorbuf, buf_size, "Value cannot be read for option '%s' of type '%s'%s", ccname, type_str, parse_error_buf);
return false;
}
} else if (type == OptionType::Ccstr) {
@@ -1089,17 +1174,25 @@ bool CompilerOracle::parse_from_line(char* line) {
return false;
}
return true;
- } else if (option == CompileCommandEnum::MemStat) {
- // MemStat default action is to collect data but to not print
- if (!register_command(matcher, option, error_buf, sizeof(error_buf), (uintx)MemStatAction::collect)) {
+ }
+
+ switch (option) {
+ case CompileCommandEnum::Break:
+ case CompileCommandEnum::CompileOnly:
+ case CompileCommandEnum::Exclude:
+ case CompileCommandEnum::Print:
+ break;
+ case CompileCommandEnum::MemStat:
+ // MemStat default action is to collect data but to not print
+ if (!register_command(matcher, option, error_buf, sizeof(error_buf), (uintx)MemStatAction::collect)) {
+ print_parse_error(error_buf, original.get());
+ return false;
+ }
+ return true;
+ default:
+ jio_snprintf(error_buf, sizeof(error_buf), " Option '%s' is not followed by a value", option2name(option));
print_parse_error(error_buf, original.get());
return false;
- }
- return true;
- } else {
- jio_snprintf(error_buf, sizeof(error_buf), " Option '%s' is not followed by a value", option2name(option));
- print_parse_error(error_buf, original.get());
- return false;
}
}
if (!scan_value(type, line, bytes_read, matcher, option, error_buf, sizeof(error_buf))) {
@@ -1209,7 +1302,7 @@ bool CompilerOracle::parse_compile_only(char* line) {
if (method_pattern != nullptr) {
TypedMethodOptionMatcher* matcher = TypedMethodOptionMatcher::parse_method_pattern(method_pattern, error_buf, sizeof(error_buf));
if (matcher != nullptr) {
- if (register_command(matcher, CompileCommandEnum::CompileOnly, error_buf, sizeof(error_buf), true)) {
+ if (register_command(matcher, CompileCommandEnum::CompileOnly, error_buf, sizeof(error_buf), default_comp_level_argument)) {
continue;
}
}
diff --git a/src/hotspot/share/compiler/compilerOracle.hpp b/src/hotspot/share/compiler/compilerOracle.hpp
index 5615a2cf1fcd..bfed52f12e7b 100644
--- a/src/hotspot/share/compiler/compilerOracle.hpp
+++ b/src/hotspot/share/compiler/compilerOracle.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#ifndef SHARE_COMPILER_COMPILERORACLE_HPP
#define SHARE_COMPILER_COMPILERORACLE_HPP
+#include "compiler/compilerDirectives.hpp"
#include "memory/allStatic.hpp"
#include "oops/oopsHierarchy.hpp"
#include "utilities/istream.hpp"
@@ -49,14 +50,14 @@ class methodHandle;
option(Help, "help", Unknown) \
option(Quiet, "quiet", Unknown) \
option(Log, "log", Bool) \
- option(Print, "print", Bool) \
+ option(Print, "print", Intx) \
option(Inline, "inline", Bool) \
option(DelayInline, "delayinline", Bool) \
option(DontInline, "dontinline", Bool) \
option(Blackhole, "blackhole", Bool) \
- option(CompileOnly, "compileonly", Bool)\
- option(Exclude, "exclude", Bool) \
- option(Break, "break", Bool) \
+ option(CompileOnly, "compileonly", Intx) \
+ option(Exclude, "exclude", Intx) \
+ option(Break, "break", Intx) \
option(BreakAtExecute, "BreakAtExecute", Bool) \
option(BreakAtCompile, "BreakAtCompile", Bool) \
option(MemLimit, "MemLimit", Intx) \
@@ -135,6 +136,9 @@ class CompilerOracle : AllStatic {
static bool parse_from_input(inputStream::Input* input,
parse_from_line_fn_t* parse_from_line);
+ static bool has_exclude(const methodHandle& method, CompLevel level);
+ static bool applies_to_comp_level(const methodHandle& method, CompileCommandEnum command, CompLevel current_level);
+
public:
// True if the command file has been specified or is implicit
static bool has_command_file();
@@ -143,14 +147,15 @@ class CompilerOracle : AllStatic {
static bool parse_from_file();
// Tells whether we to exclude compilation of method
- static bool should_exclude(const methodHandle& method);
+ static bool should_exclude(const methodHandle & method, CompLevel level);
+
static bool be_quiet() { return _quiet; }
// Tells whether we want to inline this method
static bool should_inline(const methodHandle& method);
// Tells whether we want to disallow inlining of this method
- static bool should_not_inline(const methodHandle& method);
+ static bool should_not_inline(const methodHandle& method, CompLevel level);
// Tells whether we want to delay inlining of this method
static bool should_delay_inline(const methodHandle& method);
@@ -159,13 +164,14 @@ class CompilerOracle : AllStatic {
static bool changes_current_thread(const methodHandle& method);
// Tells whether we should print the assembly for this method
- static bool should_print(const methodHandle& method);
+ // If level == CompLevel_none or CompLevel_any, returns true if there is a print command with any mask
+ static bool should_print(const methodHandle& method, CompLevel level);
// Tells whether we should log the compilation data for this method
static bool should_log(const methodHandle& method);
// Tells whether to break when compiling method
- static bool should_break_at(const methodHandle& method);
+ static bool should_break_at(const methodHandle& method, CompLevel level);
// Tells whether there are any methods to print for print_method_statistics()
static bool should_print_methods();
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.inline.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.inline.hpp
index 8cb7881e000a..64441ccac658 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.inline.hpp
@@ -32,7 +32,7 @@
// Total virtual time so far.
inline double G1ConcurrentMarkThread::total_mark_cpu_time_s() {
- return static_cast(os::thread_cpu_time(this)) + worker_threads_cpu_time_s();
+ return static_cast(os::thread_cpu_time(this)) / NANOSECS_PER_SEC + worker_threads_cpu_time_s();
}
// Marking virtual time so far
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.cpp b/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.cpp
index 7da0066e2f16..4fa32b388bd6 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.cpp
@@ -25,18 +25,15 @@
#include "gc/g1/g1ConcurrentRefineStats.inline.hpp"
#include "runtime/timer.hpp"
-void G1ConcurrentRefineStats::add_atomic(G1ConcurrentRefineStats* other) {
- _sweep_duration.add_then_fetch(other->_sweep_duration.load_relaxed(), memory_order_relaxed);
- _yield_during_sweep_duration.add_then_fetch(other->yield_during_sweep_duration(), memory_order_relaxed);
+void G1ConcurrentRefineStats::add_atomic(const G1LocalRefineStats* other) {
+ _cards_scanned.add_then_fetch(other->_cards_scanned, memory_order_relaxed);
+ _cards_clean.add_then_fetch(other->_cards_clean, memory_order_relaxed);
+ _cards_not_parsable.add_then_fetch(other->_cards_not_parsable, memory_order_relaxed);
+ _cards_already_refer_to_cset.add_then_fetch(other->_cards_already_refer_to_cset, memory_order_relaxed);
+ _cards_refer_to_cset.add_then_fetch(other->_cards_refer_to_cset, memory_order_relaxed);
+ _cards_no_cross_region.add_then_fetch(other->_cards_no_cross_region, memory_order_relaxed);
- _cards_scanned.add_then_fetch(other->cards_scanned(), memory_order_relaxed);
- _cards_clean.add_then_fetch(other->cards_clean(), memory_order_relaxed);
- _cards_not_parsable.add_then_fetch(other->cards_not_parsable(), memory_order_relaxed);
- _cards_already_refer_to_cset.add_then_fetch(other->cards_already_refer_to_cset(), memory_order_relaxed);
- _cards_refer_to_cset.add_then_fetch(other->cards_refer_to_cset(), memory_order_relaxed);
- _cards_no_cross_region.add_then_fetch(other->cards_no_cross_region(), memory_order_relaxed);
-
- _refine_duration.add_then_fetch(other->refine_duration(), memory_order_relaxed);
+ _refine_duration.add_then_fetch(other->_refine_duration, memory_order_relaxed);
}
void G1ConcurrentRefineStats::reset() {
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.hpp b/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.hpp
index a91ad0eb2e4f..6f4af71081b2 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.hpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.hpp
@@ -29,9 +29,27 @@
#include "runtime/atomic.hpp"
#include "utilities/globalDefinitions.hpp"
-// Collection of statistics for concurrent refinement processing.
-// Used for collecting per-thread statistics and for summaries over a
-// collection of threads.
+// Thread-local refinement statistics.
+struct G1LocalRefineStats {
+ size_t _cards_scanned;
+ size_t _cards_clean;
+ size_t _cards_not_parsable;
+ size_t _cards_already_refer_to_cset;
+ size_t _cards_refer_to_cset;
+ size_t _cards_no_cross_region;
+ jlong _refine_duration;
+
+ G1LocalRefineStats() :
+ _cards_scanned(0),
+ _cards_clean(0),
+ _cards_not_parsable(0),
+ _cards_already_refer_to_cset(0),
+ _cards_refer_to_cset(0),
+ _cards_no_cross_region(0),
+ _refine_duration(0) {}
+};
+
+// Global statistics for concurrent refinement processing.
class G1ConcurrentRefineStats : public CHeapObj {
Atomic _sweep_duration; // Time spent sweeping the table finding non-clean cards
// and refining them.
@@ -69,18 +87,10 @@ class G1ConcurrentRefineStats : public CHeapObj {
inline size_t cards_to_cset() const;
- inline void inc_sweep_time(jlong t);
- inline void inc_yield_during_sweep_duration(jlong t);
- inline void inc_refine_duration(jlong t);
-
- inline void inc_cards_scanned(size_t increment);
- inline void inc_cards_clean(size_t increment);
- inline void inc_cards_not_parsable();
- inline void inc_cards_already_refer_to_cset();
- inline void inc_cards_refer_to_cset();
- inline void inc_cards_no_cross_region();
+ void add_atomic(const G1LocalRefineStats* other);
- void add_atomic(G1ConcurrentRefineStats* other);
+ inline void inc_sweep_duration(jlong t);
+ inline void inc_yield_during_sweep_duration(jlong t);
void reset();
};
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.inline.hpp b/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.inline.hpp
index e1a296c64948..2ef35caab087 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.inline.hpp
@@ -79,40 +79,12 @@ inline size_t G1ConcurrentRefineStats::cards_to_cset() const {
return cards_already_refer_to_cset() + cards_refer_to_cset();
}
-inline void G1ConcurrentRefineStats::inc_sweep_time(jlong t) {
- _sweep_duration.store_relaxed(_sweep_duration.load_relaxed() + t);
+inline void G1ConcurrentRefineStats::inc_sweep_duration(jlong t) {
+ _sweep_duration.fetch_then_add(t, memory_order_relaxed);
}
inline void G1ConcurrentRefineStats::inc_yield_during_sweep_duration(jlong t) {
- _yield_during_sweep_duration.store_relaxed(yield_during_sweep_duration() + t);
-}
-
-inline void G1ConcurrentRefineStats::inc_refine_duration(jlong t) {
- _refine_duration.store_relaxed(refine_duration() + t);
-}
-
-inline void G1ConcurrentRefineStats::inc_cards_scanned(size_t increment) {
- _cards_scanned.store_relaxed(cards_scanned() + increment);
-}
-
-inline void G1ConcurrentRefineStats::inc_cards_clean(size_t increment) {
- _cards_clean.store_relaxed(cards_clean() + increment);
-}
-
-inline void G1ConcurrentRefineStats::inc_cards_not_parsable() {
- _cards_not_parsable.store_relaxed(cards_not_parsable() + 1);
-}
-
-inline void G1ConcurrentRefineStats::inc_cards_already_refer_to_cset() {
- _cards_already_refer_to_cset.store_relaxed(cards_already_refer_to_cset() + 1);
-}
-
-inline void G1ConcurrentRefineStats::inc_cards_refer_to_cset() {
- _cards_refer_to_cset.store_relaxed(cards_refer_to_cset() + 1);
-}
-
-inline void G1ConcurrentRefineStats::inc_cards_no_cross_region() {
- _cards_no_cross_region.store_relaxed(cards_no_cross_region() + 1);
+ _yield_during_sweep_duration.fetch_then_add(t, memory_order_relaxed);
}
#endif // SHARE_GC_G1_G1CONCURRENTREFINESTATS_INLINE_HPP
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefineSweepTask.cpp b/src/hotspot/share/gc/g1/g1ConcurrentRefineSweepTask.cpp
index e522163f9806..2f99611bb99d 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefineSweepTask.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineSweepTask.cpp
@@ -60,22 +60,22 @@ class G1RefineRegionClosure : public G1HeapRegionClosure {
switch (res) {
case G1RemSet::HasRefToCSet: {
*dest_card = G1CardTable::g1_to_cset_card;
- _refine_stats.inc_cards_refer_to_cset();
+ _per_worker_refine_data._cards_refer_to_cset++;
break;
}
case G1RemSet::AlreadyToCSet: {
*dest_card = G1CardTable::g1_to_cset_card;
- _refine_stats.inc_cards_already_refer_to_cset();
+ _per_worker_refine_data._cards_already_refer_to_cset++;
break;
}
case G1RemSet::NoCrossRegion: {
- _refine_stats.inc_cards_no_cross_region();
+ _per_worker_refine_data._cards_no_cross_region++;
break;
}
case G1RemSet::CouldNotParse: {
// Could not refine - redirty with the original value.
*dest_card = *source_card;
- _refine_stats.inc_cards_not_parsable();
+ _per_worker_refine_data._cards_not_parsable++;
break;
}
case G1RemSet::HasRefToOld : break; // Nothing special to do.
@@ -92,7 +92,7 @@ class G1RefineRegionClosure : public G1HeapRegionClosure {
public:
bool _completed;
- G1ConcurrentRefineStats _refine_stats;
+ G1LocalRefineStats _per_worker_refine_data;
G1RefineRegionClosure(uint worker_id, G1CardTableClaimTable* scan_state) :
G1HeapRegionClosure(),
@@ -100,7 +100,7 @@ class G1RefineRegionClosure : public G1HeapRegionClosure {
_scan_state(scan_state),
_worker_id(worker_id),
_completed(true),
- _refine_stats() { }
+ _per_worker_refine_data() { }
bool do_heap_region(G1HeapRegion* r) override {
@@ -141,7 +141,7 @@ class G1RefineRegionClosure : public G1HeapRegionClosure {
do_claimed_block(dirty_l, dirty_r, dest_card + pointer_delta(dirty_l, start_card, sizeof(CardValue)));
num_dirty_cards += pointer_delta(dirty_r, dirty_l, sizeof(CardValue));
- _refine_stats.inc_refine_duration(os::elapsed_counter() - refine_start);
+ _per_worker_refine_data._refine_duration += os::elapsed_counter() - refine_start;
});
if (VerifyDuringGC) {
@@ -150,8 +150,8 @@ class G1RefineRegionClosure : public G1HeapRegionClosure {
}
}
- _refine_stats.inc_cards_scanned(claim.size());
- _refine_stats.inc_cards_clean(claim.size() - num_dirty_cards);
+ _per_worker_refine_data._cards_scanned += claim.size();
+ _per_worker_refine_data._cards_clean += claim.size() - num_dirty_cards;
if (SuspendibleThreadSet::should_yield()) {
_completed = false;
@@ -183,8 +183,8 @@ void G1ConcurrentRefineSweepTask::work(uint worker_id) {
_sweep_completed = false;
}
- sweep_cl._refine_stats.inc_sweep_time(os::elapsed_counter() - start);
- _stats->add_atomic(&sweep_cl._refine_stats);
+ _stats->inc_sweep_duration(os::elapsed_counter() - start);
+ _stats->add_atomic(&sweep_cl._per_worker_refine_data);
}
bool G1ConcurrentRefineSweepTask::sweep_completed() const { return _sweep_completed; }
diff --git a/src/hotspot/share/gc/g1/g1Policy.cpp b/src/hotspot/share/gc/g1/g1Policy.cpp
index 769977c7dacf..01afb6a5c774 100644
--- a/src/hotspot/share/gc/g1/g1Policy.cpp
+++ b/src/hotspot/share/gc/g1/g1Policy.cpp
@@ -1006,7 +1006,7 @@ void G1Policy::record_young_collection_end(bool concurrent_operation_is_full_mar
G1IHOPControl* G1Policy::create_ihop_control(const G1OldGenAllocationTracker* old_gen_alloc_tracker,
const G1Predictions* predictor) {
- return new G1IHOPControl(InitiatingHeapOccupancyPercent,
+ return new G1IHOPControl(G1IHOP,
old_gen_alloc_tracker,
G1UseAdaptiveIHOP,
predictor,
diff --git a/src/hotspot/share/gc/g1/g1_globals.hpp b/src/hotspot/share/gc/g1/g1_globals.hpp
index baed70b7088b..14daac4800b1 100644
--- a/src/hotspot/share/gc/g1/g1_globals.hpp
+++ b/src/hotspot/share/gc/g1/g1_globals.hpp
@@ -100,9 +100,8 @@
\
product(bool, G1UseAdaptiveIHOP, true, \
"Adaptively adjust the initiating heap occupancy from the " \
- "initial value of InitiatingHeapOccupancyPercent. The policy " \
- "attempts to start marking in time based on application " \
- "behavior.") \
+ "initial value of G1IHOP. The policy attempts to start marking " \
+ "in time based on application behavior.") \
\
product(size_t, G1AdaptiveIHOPNumInitialSamples, 3, EXPERIMENTAL, \
"How many completed time periods from concurrent start to first " \
@@ -110,6 +109,19 @@
"of the optimal occupancy to start marking.") \
range(1, max_intx) \
\
+ product(uint, G1IHOP, 45, \
+ "The Initiating Heap Occupancy Percentage (IHOP) for the " \
+ "concurrent cycle. G1IHOP sets the percentage of the current " \
+ "Java heap capacity occupied by the old generation at which G1 " \
+ "starts this process. If G1UseAdaptiveIHOP is enabled, this " \
+ "value is used as the initial threshold and may be adjusted " \
+ "ergonomically by G1. " \
+ "A value of 0 will result in as frequent as possible concurrent " \
+ "cycles. A value of 100 disables concurrent cycles. " \
+ "Fragmentation waste in the old generation is not considered " \
+ "free space in this calculation.") \
+ range(0, 100) \
+ \
product(uint, G1ConfidencePercent, 50, \
"Confidence level for MMU/pause predictions. A higher value " \
"means that G1 will use less safety margin for its predictions.") \
diff --git a/src/hotspot/share/gc/shared/c2/cardTableBarrierSetC2.cpp b/src/hotspot/share/gc/shared/c2/cardTableBarrierSetC2.cpp
index f7445ff254ff..381a9f65295e 100644
--- a/src/hotspot/share/gc/shared/c2/cardTableBarrierSetC2.cpp
+++ b/src/hotspot/share/gc/shared/c2/cardTableBarrierSetC2.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -126,7 +126,7 @@ Node* CardTableBarrierSetC2::byte_map_base_node(IdealKit* kit) const {
#endif
CardTable::CardValue* card_table_base = ci_card_table_address_const();
if (card_table_base != nullptr) {
- return kit->makecon(TypeRawPtr::make((address)card_table_base));
+ return kit->makecon(TypeRawPtr::make((address)card_table_base, relocInfo::none));
} else {
return kit->makecon(Type::get_zero_type(T_ADDRESS));
}
diff --git a/src/hotspot/share/gc/shared/referencePolicy.cpp b/src/hotspot/share/gc/shared/referencePolicy.cpp
index d1867291479c..6c5f459ebb41 100644
--- a/src/hotspot/share/gc/shared/referencePolicy.cpp
+++ b/src/hotspot/share/gc/shared/referencePolicy.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,21 +28,17 @@
#include "gc/shared/referencePolicy.hpp"
#include "memory/universe.hpp"
#include "runtime/globals.hpp"
+#include "utilities/integerCast.hpp"
-LRUCurrentHeapPolicy::LRUCurrentHeapPolicy() {
- setup();
-}
-
-// Capture state (of-the-VM) information needed to evaluate the policy
-void LRUCurrentHeapPolicy::setup() {
- _max_interval = (Universe::heap()->free_at_last_gc() / M) * SoftRefLRUPolicyMSPerMB;
- assert(_max_interval >= 0,"Sanity check");
+void AbstractLRUReferencePolicy::set_max_interval(jlong max_interval) {
+ assert(max_interval >= 0, "Sanity check");
+ _max_interval = max_interval;
}
// The oop passed in is the SoftReference object, and not
// the object the SoftReference points to.
-bool LRUCurrentHeapPolicy::should_clear_reference(oop p,
- jlong timestamp_clock) {
+bool AbstractLRUReferencePolicy::should_clear_reference(oop p, jlong timestamp_clock) {
+ assert(_max_interval >= 0, "Forgot to call setup");
jlong interval = timestamp_clock - java_lang_ref_SoftReference::timestamp(p);
assert(interval >= 0, "Sanity check");
@@ -54,10 +50,9 @@ bool LRUCurrentHeapPolicy::should_clear_reference(oop p,
return true;
}
-/////////////////////// MaxHeap //////////////////////
-
-LRUMaxHeapPolicy::LRUMaxHeapPolicy() {
- setup();
+// Capture state (of-the-VM) information needed to evaluate the policy
+void LRUCurrentHeapPolicy::setup() {
+ set_max_interval(integer_cast(Universe::heap()->free_at_last_gc() / M) * SoftRefLRUPolicyMSPerMB);
}
// Capture state (of-the-VM) information needed to evaluate the policy
@@ -66,21 +61,5 @@ void LRUMaxHeapPolicy::setup() {
max_heap -= Universe::heap()->used_at_last_gc();
max_heap /= M;
- _max_interval = max_heap * SoftRefLRUPolicyMSPerMB;
- assert(_max_interval >= 0,"Sanity check");
-}
-
-// The oop passed in is the SoftReference object, and not
-// the object the SoftReference points to.
-bool LRUMaxHeapPolicy::should_clear_reference(oop p,
- jlong timestamp_clock) {
- jlong interval = timestamp_clock - java_lang_ref_SoftReference::timestamp(p);
- assert(interval >= 0, "Sanity check");
-
- // The interval will be zero if the ref was accessed since the last scavenge/gc.
- if(interval <= _max_interval) {
- return false;
- }
-
- return true;
+ set_max_interval(integer_cast(max_heap) * SoftRefLRUPolicyMSPerMB);
}
diff --git a/src/hotspot/share/gc/shared/referencePolicy.hpp b/src/hotspot/share/gc/shared/referencePolicy.hpp
index cf0382b036b9..0fd918fa7235 100644
--- a/src/hotspot/share/gc/shared/referencePolicy.hpp
+++ b/src/hotspot/share/gc/shared/referencePolicy.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -56,28 +56,28 @@ class AlwaysClearPolicy : public ReferencePolicy {
}
};
-class LRUCurrentHeapPolicy : public ReferencePolicy {
+class AbstractLRUReferencePolicy : public ReferencePolicy {
private:
- jlong _max_interval;
+ jlong _max_interval = -1;
+
+ protected:
+ void set_max_interval(jlong max_interval);
public:
- LRUCurrentHeapPolicy();
+ bool should_clear_reference(oop p, jlong timestamp_clock) final;
+ void setup() override = 0;
+};
+class LRUCurrentHeapPolicy : public AbstractLRUReferencePolicy {
+ public:
// Capture state (of-the-VM) information needed to evaluate the policy
- void setup();
- virtual bool should_clear_reference(oop p, jlong timestamp_clock);
+ void setup() final;
};
-class LRUMaxHeapPolicy : public ReferencePolicy {
- private:
- jlong _max_interval;
-
+class LRUMaxHeapPolicy : public AbstractLRUReferencePolicy {
public:
- LRUMaxHeapPolicy();
-
// Capture state (of-the-VM) information needed to evaluate the policy
- void setup();
- virtual bool should_clear_reference(oop p, jlong timestamp_clock);
+ void setup() final;
};
#endif // SHARE_GC_SHARED_REFERENCEPOLICY_HPP
diff --git a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp
index ad1dca475036..637ed6e64074 100644
--- a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp
+++ b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp
@@ -133,7 +133,6 @@ LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier_impl(LIRGenerator* gen, L
addr = ensure_in_register(gen, addr, T_ADDRESS);
assert(addr->is_register(), "must be a register at this point");
LIR_Opr result = gen->result_register_for(obj->value_type());
- __ move(obj, result);
LIR_Opr tmp1 = gen->new_register(T_ADDRESS);
LIR_Opr tmp2 = gen->new_register(T_ADDRESS);
@@ -164,6 +163,11 @@ LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier_impl(LIRGenerator* gen, L
CodeStub* slow = new ShenandoahLoadReferenceBarrierStub(obj, addr, result, tmp1, tmp2, decorators);
__ branch(lir_cond_notEqual, slow);
+
+ // No barrier is needed, move obj to result now.
+ __ move(obj, result);
+
+ // Slow-path re-enters here with result set.
__ branch_destination(slow->continuation());
return result;
@@ -199,7 +203,7 @@ void ShenandoahBarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value)
bool precise = is_array || on_anonymous;
LIR_Opr post_addr = precise ? access.resolved_addr() : access.base().opr();
- post_barrier(access, post_addr, value);
+ post_barrier(access, post_addr);
}
}
@@ -314,7 +318,7 @@ bool ShenandoahBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* buffer_blob)
return true;
}
-void ShenandoahBarrierSetC1::post_barrier(LIRAccess& access, LIR_Opr addr, LIR_Opr new_val) {
+void ShenandoahBarrierSetC1::post_barrier(LIRAccess& access, LIR_Opr addr) {
assert(ShenandoahCardBarrier, "Should have been checked by caller");
DecoratorSet decorators = access.decorators();
@@ -368,3 +372,71 @@ void ShenandoahBarrierSetC1::post_barrier(LIRAccess& access, LIR_Opr addr, LIR_O
__ move(dirty, card_addr);
}
}
+
+LIR_Opr ShenandoahBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
+ if (!access.is_oop()) {
+ return BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value);
+ }
+
+ LIRGenerator* gen = access.gen();
+
+ LIR_Opr tmp = gen->new_register(T_OBJECT);
+ LIR_Opr addr = access.resolved_addr();
+
+ // Handle the previous value through SATB, as we are about to perform the store.
+ __ load(addr->as_address_ptr(), tmp);
+ if (ShenandoahSATBBarrier) {
+ pre_barrier(gen, access.access_emit_info(), access.decorators(),
+ /* addr_opr (unused) = */ LIR_OprFact::illegalOpr,
+ /* pre_val = */ tmp);
+ }
+
+ // Perform LRB on location to fix it up for this and all following accesses.
+ // This guarantees there are no false negatives due to concurrent evacuation,
+ // and the value loaded later by CAS is sanitized by some LRB, or is null.
+ if (ShenandoahLoadRefBarrier) {
+ load_reference_barrier(gen, /* obj = */ tmp, /* addr = */ addr, access.decorators());
+ }
+
+ LIR_Opr result = BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value);
+
+ if (ShenandoahCardBarrier) {
+ post_barrier(access, /* addr = */ addr);
+ }
+
+ return result;
+}
+
+LIR_Opr ShenandoahBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) {
+ if (!access.is_oop()) {
+ return BarrierSetC1::atomic_xchg_at_resolved(access, value);
+ }
+
+ LIRGenerator* gen = access.gen();
+
+ LIR_Opr tmp = gen->new_register(T_OBJECT);
+ LIR_Opr addr = access.resolved_addr();
+
+ // Handle the previous value through SATB, as we are about to perform the store.
+ __ load(addr->as_address_ptr(), tmp);
+ if (ShenandoahSATBBarrier) {
+ pre_barrier(gen, access.access_emit_info(), access.decorators(),
+ /* addr_opr (unused) = */ LIR_OprFact::illegalOpr,
+ /* pre_val = */ tmp);
+ }
+
+ // Perform LRB on location to fix it up for this and all following accesses.
+ // This is purely opportunistic: we would not have any false negatives here.
+ // This guarantees the value loaded later by XCHG is sanitized by some LRB, or is null.
+ if (ShenandoahLoadRefBarrier) {
+ load_reference_barrier(gen, /* obj = */ tmp, /* addr = */ addr, access.decorators());
+ }
+
+ LIR_Opr result = BarrierSetC1::atomic_xchg_at_resolved(access, value);
+
+ if (ShenandoahCardBarrier) {
+ post_barrier(access, /* addr = */ addr);
+ }
+
+ return result;
+}
diff --git a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.hpp b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.hpp
index 1b4f2c79bd25..413777a61ee6 100644
--- a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.hpp
+++ b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.hpp
@@ -127,6 +127,7 @@ class ShenandoahLoadReferenceBarrierStub: public CodeStub {
visitor->do_input(_addr);
visitor->do_temp(_addr);
visitor->do_temp(_result);
+ visitor->do_output(_result);
visitor->do_temp(_tmp1);
visitor->do_temp(_tmp2);
}
@@ -135,61 +136,6 @@ class ShenandoahLoadReferenceBarrierStub: public CodeStub {
#endif // PRODUCT
};
-class LIR_OpShenandoahCompareAndSwap : public LIR_Op {
- friend class LIR_OpVisitState;
-
-private:
- LIR_Opr _addr;
- LIR_Opr _cmp_value;
- LIR_Opr _new_value;
- LIR_Opr _tmp1;
- LIR_Opr _tmp2;
-
-public:
- LIR_OpShenandoahCompareAndSwap(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
- LIR_Opr t1, LIR_Opr t2, LIR_Opr result)
- : LIR_Op(lir_none, result, nullptr) // no info
- , _addr(addr)
- , _cmp_value(cmp_value)
- , _new_value(new_value)
- , _tmp1(t1)
- , _tmp2(t2) { }
-
- LIR_Opr addr() const { return _addr; }
- LIR_Opr cmp_value() const { return _cmp_value; }
- LIR_Opr new_value() const { return _new_value; }
- LIR_Opr tmp1() const { return _tmp1; }
- LIR_Opr tmp2() const { return _tmp2; }
-
- virtual void visit(LIR_OpVisitState* state) {
- if (_info) state->do_info(_info);
- assert(_addr->is_valid(), "used"); state->do_input(_addr);
- state->do_temp(_addr);
- assert(_cmp_value->is_valid(), "used"); state->do_input(_cmp_value);
- state->do_temp(_cmp_value);
- assert(_new_value->is_valid(), "used"); state->do_input(_new_value);
- state->do_temp(_new_value);
- if (_tmp1->is_valid()) state->do_temp(_tmp1);
- if (_tmp2->is_valid()) state->do_temp(_tmp2);
- if (_result->is_valid()) state->do_output(_result);
- }
-
- virtual void emit_code(LIR_Assembler* masm);
-
- virtual void print_instr(outputStream* out) const {
- addr()->print(out); out->print(" ");
- cmp_value()->print(out); out->print(" ");
- new_value()->print(out); out->print(" ");
- tmp1()->print(out); out->print(" ");
- tmp2()->print(out); out->print(" ");
- }
-#ifndef PRODUCT
- virtual const char* name() const {
- return "shenandoah_cas_obj";
- }
-#endif // PRODUCT
-};
-
class ShenandoahBarrierSetC1 : public BarrierSetC1 {
private:
CodeBlob* _pre_barrier_c1_runtime_code_blob;
@@ -244,7 +190,7 @@ class ShenandoahBarrierSetC1 : public BarrierSetC1 {
virtual LIR_Opr atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value);
- void post_barrier(LIRAccess& access, LIR_Opr addr, LIR_Opr new_val);
+ void post_barrier(LIRAccess& access, LIR_Opr addr);
public:
diff --git a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp
index 4fcc90d7bde3..deab648a108e 100644
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp
@@ -990,19 +990,23 @@ void ShenandoahBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node*
shenandoah_eliminate_wb_pre(node, ¯o->igvn());
}
if (ShenandoahCardBarrier && node->Opcode() == Op_CastP2X) {
- Node* shift = node->unique_out();
- Node* addp = shift->unique_out();
- for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) {
- Node* mem = addp->last_out(j);
- if (UseCondCardMark && mem->is_Load()) {
- assert(mem->Opcode() == Op_LoadB, "unexpected code shape");
- // The load is checking if the card has been written so
- // replace it with zero to fold the test.
- macro->replace_node(mem, macro->intcon(0));
- continue;
+ for (DUIterator_Last imin, i = node->last_outs(imin); i >= imin; --i) {
+ Node* shift = node->last_out(i);
+ for (DUIterator_Last kmin, k = shift->last_outs(kmin); k >= kmin; --k) {
+ Node* addp = shift->last_out(k);
+ for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) {
+ Node* mem = addp->last_out(j);
+ if (UseCondCardMark && mem->is_Load()) {
+ assert(mem->Opcode() == Op_LoadB, "unexpected code shape");
+ // The load is checking if the card has been written so
+ // replace it with zero to fold the test.
+ macro->replace_node(mem, macro->intcon(0));
+ continue;
+ }
+ assert(mem->is_Store(), "store required");
+ macro->replace_node(mem, mem->in(MemNode::Memory));
+ }
}
- assert(mem->is_Store(), "store required");
- macro->replace_node(mem, mem->in(MemNode::Memory));
}
}
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahObjArrayAllocator.cpp b/src/hotspot/share/gc/shenandoah/shenandoahObjArrayAllocator.cpp
index e2215ea58efe..dc9fef16cf69 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahObjArrayAllocator.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahObjArrayAllocator.cpp
@@ -71,16 +71,14 @@ oop ShenandoahObjArrayAllocator::initialize(HeapWord* mem) const {
// Always initialize the mem with primitive array first so GC won't look into the elements in the array.
// For obj array, the header will be corrected to object array after clearing the memory.
Klass* filling_klass = _klass;
- int filling_array_length = _length;
- const bool is_ref_type = is_reference_type(element_type, true);
+ const bool is_ref_type = is_reference_type(element_type);
if (is_ref_type) {
- const bool is_narrow_oop = element_type == T_NARROWOOP;
- size_t filling_element_byte_size = is_narrow_oop ? T_INT_aelem_bytes : T_LONG_aelem_bytes;
- filling_klass = is_narrow_oop ? Universe::intArrayKlass() : Universe::longArrayKlass();
- filling_array_length = (int) ((process_size << LogBytesPerWord) / filling_element_byte_size);
+ filling_klass = LP64_ONLY(UseCompressedOops ? Universe::intArrayKlass() : Universe::longArrayKlass()) NOT_LP64(Universe::intArrayKlass());
+ assert(type2aelembytes(ArrayKlass::cast(filling_klass)->element_type()) == type2aelembytes(element_type), "filling element size must match ref size");
}
- ObjArrayAllocator filling_array_allocator(filling_klass, _word_size, filling_array_length , /* do_zero */ false);
+ // Use _length directly: it matches the ref count, and the filling element size equals the ref size.
+ ObjArrayAllocator filling_array_allocator(filling_klass, _word_size, _length, /* do_zero */ false);
filling_array_allocator.initialize(mem);
// Invisible roots will be scanned and marked at the end of marking.
@@ -106,7 +104,6 @@ oop ShenandoahObjArrayAllocator::initialize(HeapWord* mem) const {
// reference array, header need to be overridden to its own.
if (is_ref_type) {
- arrayOopDesc::set_length(mem, _length);
finish(mem);
// zap paddings after setting correct klass
mem_zap_start_padding(mem);
diff --git a/src/hotspot/share/gc/z/zIterator.hpp b/src/hotspot/share/gc/z/zIterator.hpp
index e048002e52e9..0e9ef808dffa 100644
--- a/src/hotspot/share/gc/z/zIterator.hpp
+++ b/src/hotspot/share/gc/z/zIterator.hpp
@@ -31,11 +31,14 @@ class ZIterator : AllStatic {
private:
static bool is_invisible_object(oop obj);
static bool is_invisible_object_array(oop obj);
+ static bool is_invisible_object_array(oop obj, Klass* klass);
public:
// This iterator skips invisible roots
template
static void oop_iterate_safe(oop obj, OopClosureT* cl);
+ template
+ static void oop_iterate_safe(oop obj, Klass* klass, OopClosureT* cl);
template
static void oop_iterate(oop obj, OopClosureT* cl);
@@ -46,6 +49,8 @@ class ZIterator : AllStatic {
// This function skips invisible roots
template
static void basic_oop_iterate_safe(oop obj, Function function);
+ template
+ static void basic_oop_iterate_safe(oop obj, Klass* klass, Function function);
template
static void basic_oop_iterate(oop obj, Function function);
diff --git a/src/hotspot/share/gc/z/zIterator.inline.hpp b/src/hotspot/share/gc/z/zIterator.inline.hpp
index cbfe1a79aafe..6e51929c7b40 100644
--- a/src/hotspot/share/gc/z/zIterator.inline.hpp
+++ b/src/hotspot/share/gc/z/zIterator.inline.hpp
@@ -45,17 +45,27 @@ inline bool ZIterator::is_invisible_object(oop obj) {
}
inline bool ZIterator::is_invisible_object_array(oop obj) {
- return obj->klass()->is_objArray_klass() && is_invisible_object(obj);
+ return is_invisible_object_array(obj, obj->klass());
}
-// This iterator skips invisible object arrays
+inline bool ZIterator::is_invisible_object_array(oop obj, Klass* klass) {
+ return klass->is_objArray_klass() && is_invisible_object(obj);
+}
+
+// These iterators skips invisible object arrays
+
template
void ZIterator::oop_iterate_safe(oop obj, OopClosureT* cl) {
+ oop_iterate_safe(obj, obj->klass(), cl);
+}
+
+template
+void ZIterator::oop_iterate_safe(oop obj, Klass* klass, OopClosureT* cl) {
// Skip invisible object arrays - we only filter out *object* arrays,
// because that check is arguably faster than the is_invisible_object
// check, and primitive arrays are cheap to call oop_iterate on.
- if (!is_invisible_object_array(obj)) {
- obj->oop_iterate(cl);
+ if (!is_invisible_object_array(obj, klass)) {
+ OopIteratorClosureDispatch::oop_oop_iterate(cl, obj, klass);
}
}
@@ -89,11 +99,17 @@ class ZBasicOopIterateClosure : public BasicOopIterateClosure {
}
};
-// This function skips invisible roots
+// These functions skip invisible roots
+
template
void ZIterator::basic_oop_iterate_safe(oop obj, Function function) {
+ basic_oop_iterate_safe(obj, obj->klass(), function);
+}
+
+template
+void ZIterator::basic_oop_iterate_safe(oop obj, Klass* klass, Function function) {
ZBasicOopIterateClosure cl(function);
- oop_iterate_safe(obj, &cl);
+ oop_iterate_safe(obj, klass, &cl);
}
template
diff --git a/src/hotspot/share/gc/z/zRelocate.cpp b/src/hotspot/share/gc/z/zRelocate.cpp
index d51cf5abbaef..1c2a4078904a 100644
--- a/src/hotspot/share/gc/z/zRelocate.cpp
+++ b/src/hotspot/share/gc/z/zRelocate.cpp
@@ -1272,7 +1272,7 @@ class ZRelocateAddRemsetForFlipPromoted : public ZRestartableTask {
for (ZPage* page; _iter.next(&page);) {
page->object_iterate([&](oop obj) {
// Remap oops and add remset if needed
- ZIterator::basic_oop_iterate_safe(obj, remap_and_maybe_add_remset);
+ ZIterator::basic_oop_iterate_safe(obj, obj->klass(), remap_and_maybe_add_remset);
// String dedup
string_dedup_context.request(obj);
diff --git a/src/hotspot/share/interpreter/interpreterRuntime.cpp b/src/hotspot/share/interpreter/interpreterRuntime.cpp
index dd183f36ea28..375cb402892e 100644
--- a/src/hotspot/share/interpreter/interpreterRuntime.cpp
+++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp
@@ -1487,23 +1487,32 @@ JRT_ENTRY(void, InterpreterRuntime::member_name_arg_or_null(JavaThread* current,
Method* method, address bcp))
Bytecodes::Code code = Bytecodes::code_at(method, bcp);
if (code != Bytecodes::_invokestatic) {
+ current->set_vm_result_oop(nullptr);
return;
}
+
ConstantPool* cpool = method->constants();
int cp_index = Bytes::get_native_u2(bcp + 1);
Symbol* cname = cpool->klass_name_at(cpool->klass_ref_index_at(cp_index, code));
Symbol* mname = cpool->name_ref_at(cp_index, code);
- if (MethodHandles::has_member_arg(cname, mname)) {
- oop member_name_oop = cast_to_oop(member_name);
- if (java_lang_invoke_DirectMethodHandle::is_instance(member_name_oop)) {
- // FIXME: remove after j.l.i.InvokerBytecodeGenerator code shape is updated.
- member_name_oop = java_lang_invoke_DirectMethodHandle::member(member_name_oop);
- }
- current->set_vm_result_oop(member_name_oop);
- } else {
+ if (!MethodHandles::has_member_arg(cname, mname)) {
current->set_vm_result_oop(nullptr);
+ return;
+ }
+
+ oop member_name_oop = cast_to_oop(member_name);
+
+ guarantee(member_name_oop != nullptr, "member_name_oop should not be nullptr");
+ guarantee(oopDesc::is_oop(member_name_oop), "member_name_oop should be an oop");
+ guarantee(java_lang_invoke_MemberName::is_instance(member_name_oop) ||
+ java_lang_invoke_DirectMethodHandle::is_instance(member_name_oop),
+ "member_name_oop is not MemberName or DMH");
+
+ if (java_lang_invoke_DirectMethodHandle::is_instance(member_name_oop)) {
+ member_name_oop = java_lang_invoke_DirectMethodHandle::member(member_name_oop);
}
+ current->set_vm_result_oop(member_name_oop);
JRT_END
#endif // INCLUDE_JVMTI
diff --git a/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp b/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp
index 969c9ca60c1d..92f406bd0950 100644
--- a/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp
+++ b/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp
@@ -256,10 +256,7 @@ TRACE_REQUEST_FUNC(SystemProcess) {
// feature is implemented, write real event
while (processes != nullptr) {
SystemProcess* tmp = processes;
- const char* info = processes->command_line();
- if (info == nullptr) {
- info = processes->path();
- }
+ const char* info = processes->path();
if (info == nullptr) {
info = processes->name();
}
diff --git a/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp b/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp
index ad787886d7f1..6043b400e3bc 100644
--- a/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp
+++ b/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -818,7 +818,7 @@ JVMCI::CodeInstallResult CodeInstaller::install(JVMCICompiler* compiler,
cb = nm;
if (compile_state == nullptr) {
// This compile didn't come through the CompileBroker so perform the printing here
- CompilerDirectiveMatcher matcher(method, compiler);
+ CompilerDirectiveMatcher matcher(method, CompLevel_full_optimization);
nm->maybe_print_nmethod(matcher.directive_set());
// Since this compilation didn't pass through the broker it wasn't logged yet.
diff --git a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp
index a01f95d8cd5e..5d0d2aedc62b 100644
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp
@@ -585,7 +585,7 @@ C2V_END
C2V_VMENTRY_0(jboolean, hasNeverInlineDirective,(JNIEnv* env, jobject, ARGUMENT_PAIR(method)))
methodHandle method (THREAD, UNPACK_PAIR(Method, method));
- return !Inline || CompilerOracle::should_not_inline(method) || method->dont_inline();
+ return !Inline || CompilerOracle::should_not_inline(method, CompLevel_full_optimization) || method->dont_inline();
C2V_END
C2V_VMENTRY_0(jboolean, shouldInlineMethod,(JNIEnv* env, jobject, ARGUMENT_PAIR(method)))
diff --git a/src/hotspot/share/memory/universe.cpp b/src/hotspot/share/memory/universe.cpp
index aecaa1cac22d..f83f1b1786f7 100644
--- a/src/hotspot/share/memory/universe.cpp
+++ b/src/hotspot/share/memory/universe.cpp
@@ -1348,7 +1348,7 @@ static void log_cpu_time() {
const double gc_string_dedup_cpu_time = (double) CPUTimeUsage::GC::stringdedup() / NANOSECS_PER_SEC;
const double gc_cpu_time = (double) gc_threads_cpu_time + gc_vm_thread_cpu_time + gc_string_dedup_cpu_time;
- const double elasped_time = os::elapsedTime();
+ const double elapsed_time = os::elapsedTime();
const bool has_error = CPUTimeUsage::Error::has_error();
if (gc_cpu_time < process_cpu_time) {
@@ -1359,13 +1359,13 @@ static void log_cpu_time() {
cpuLog.print(" CPUs");
cpuLog.print(" s %% utilized");
cpuLog.print(" Process");
- cpuLog.print(" Total %30.4f %6.2f %8.1f", process_cpu_time, 100.0, process_cpu_time / elasped_time);
- cpuLog.print(" Garbage Collection %30.4f %6.2f %8.1f", gc_cpu_time, percent_of(gc_cpu_time, process_cpu_time), gc_cpu_time / elasped_time);
- cpuLog.print(" GC Threads %30.4f %6.2f %8.1f", gc_threads_cpu_time, percent_of(gc_threads_cpu_time, process_cpu_time), gc_threads_cpu_time / elasped_time);
- cpuLog.print(" VM Thread %30.4f %6.2f %8.1f", gc_vm_thread_cpu_time, percent_of(gc_vm_thread_cpu_time, process_cpu_time), gc_vm_thread_cpu_time / elasped_time);
+ cpuLog.print(" Total %30.4f %6.2f %8.1f", process_cpu_time, 100.0, process_cpu_time / elapsed_time);
+ cpuLog.print(" Garbage Collection %30.4f %6.2f %8.1f", gc_cpu_time, percent_of(gc_cpu_time, process_cpu_time), gc_cpu_time / elapsed_time);
+ cpuLog.print(" GC Threads %30.4f %6.2f %8.1f", gc_threads_cpu_time, percent_of(gc_threads_cpu_time, process_cpu_time), gc_threads_cpu_time / elapsed_time);
+ cpuLog.print(" VM Thread %30.4f %6.2f %8.1f", gc_vm_thread_cpu_time, percent_of(gc_vm_thread_cpu_time, process_cpu_time), gc_vm_thread_cpu_time / elapsed_time);
if (UseStringDeduplication) {
- cpuLog.print(" String Deduplication %30.4f %6.2f %8.1f", gc_string_dedup_cpu_time, percent_of(gc_string_dedup_cpu_time, process_cpu_time), gc_string_dedup_cpu_time / elasped_time);
+ cpuLog.print(" String Deduplication %30.4f %6.2f %8.1f", gc_string_dedup_cpu_time, percent_of(gc_string_dedup_cpu_time, process_cpu_time), gc_string_dedup_cpu_time / elapsed_time);
}
cpuLog.print("=====================================================================================");
}
diff --git a/src/hotspot/share/oops/markWord.hpp b/src/hotspot/share/oops/markWord.hpp
index 4583e6bd3a16..e67d632fdf65 100644
--- a/src/hotspot/share/oops/markWord.hpp
+++ b/src/hotspot/share/oops/markWord.hpp
@@ -30,6 +30,7 @@
#include "oops/compressedKlass.hpp"
#include "oops/oopsHierarchy.hpp"
#include "runtime/globals.hpp"
+#include "utilities/powerOfTwo.hpp"
// The markWord describes the header of an object.
//
@@ -37,32 +38,44 @@
//
// 32 bits:
// --------
-// hash:25 ------------>| age:4 self-fwd:1 lock:2 (normal object)
+// hash:25 age:4 self-fwd:1 lock:2
//
-// 64 bits:
-// --------
-// unused:22 hash:31 -->| unused_gap:4 age:4 self-fwd:1 lock:2 (normal object)
+// 64 bits (without compact headers):
+// ----------------------------------
+// unused:22 hash:31 valhalla:4 age:4 self-fwd:1 lock:2
//
// 64 bits (with compact headers):
// -------------------------------
-// klass:22 hash:31 -->| unused_gap:4 age:4 self-fwd:1 lock:2 (normal object)
-//
-// - hash contains the identity hash value: largest value is
-// 31 bits, see os::random(). Also, 64-bit vm's require
-// a hash value no bigger than 32 bits because they will not
-// properly generate a mask larger than that: see library_call.cpp
+// klass:22 hash:31 valhalla:4 age:4 self-fwd:1 lock:2
//
-// - the two lock bits are used to describe three states: locked/unlocked and monitor.
+// - lock bits are used to describe lock states: locked/unlocked/monitor-locked
+// and to indicate that an object has been GC marked / forwarded.
//
// [header | 00] locked locked regular object header (fast-locking in use)
// [header | 01] unlocked regular object header
-// [ptr | 10] monitor inflated lock (header is swapped out, UseObjectMonitorTable == false)
// [header | 10] monitor inflated lock (UseObjectMonitorTable == true)
-// [ptr | 11] marked used to mark an object
+// [ptr | 10] monitor inflated lock (UseObjectMonitorTable == false, header is swapped out)
+// [ptr | 11] marked used to mark an object (header is swapped out)
+//
+// - self-fwd - used by some GCs to indicate in-place forwarding.
+//
+// Note the position of 'self-fwd' is not by accident. When forwarding an
+// object to a new heap position, HeapWord alignment guarantees the lower
+// bits, including 'self-fwd' are 0. "is_self_forwarded()" will be correctly
+// set to false. Otherwise encode_pointer_as_mark() may have 'self-fwd' set.
+//
+// - age - used by some GCs to track the age of objects.
+//
+// - valhalla - reserved for valhalla
+//
+// - hash - contains the identity hash value: largest value is 31 bits, see
+// os::random(). Also, 64-bit VMs require a hash value no bigger than 32
+// bits because they will not properly generate a mask larger than that:
+// see library_call.cpp
+//
+// - klass - klass identifier used when UseCompactObjectHeaders == true
-class BasicLock;
class ObjectMonitor;
-class JavaThread;
class outputStream;
class markWord {
@@ -97,33 +110,42 @@ class markWord {
// Conversion
uintptr_t value() const { return _value; }
- // Constants
- static const int age_bits = 4;
+ // Constants, in least significant bit order
+
+ // Number of bits
static const int lock_bits = 2;
static const int self_fwd_bits = 1;
- static const int max_hash_bits = BitsPerWord - age_bits - lock_bits - self_fwd_bits;
+ static const int age_bits = 4;
+ static const int valhalla_reserved_bits = LP64_ONLY(4) NOT_LP64(0);
+ static const int max_hash_bits = BitsPerWord - age_bits - lock_bits - self_fwd_bits - valhalla_reserved_bits;
static const int hash_bits = max_hash_bits > 31 ? 31 : max_hash_bits;
- static const int unused_gap_bits = LP64_ONLY(4) NOT_LP64(0); // Reserved for Valhalla.
+ // Shifts
static const int lock_shift = 0;
static const int self_fwd_shift = lock_shift + lock_bits;
static const int age_shift = self_fwd_shift + self_fwd_bits;
- static const int hash_shift = age_shift + age_bits + unused_gap_bits;
+ static const int valhalla_reserved_shift = age_shift + age_bits;
+ static const int hash_shift = valhalla_reserved_shift + valhalla_reserved_bits;
+
+ // Masks (in-place)
+ static const uintptr_t lock_mask_in_place = right_n_bits(lock_bits) << lock_shift;
+ static const uintptr_t self_fwd_bit_in_place = right_n_bits(self_fwd_bits) << self_fwd_shift;
+ static const uintptr_t age_mask_in_place = right_n_bits(age_bits) << age_shift;
+ static const uintptr_t hash_mask_in_place = right_n_bits(hash_bits) << hash_shift;
+
+ // Verify that _bit_in_place refers to constants with only one bit.
+ static_assert(is_power_of_2(self_fwd_bit_in_place));
- static const uintptr_t lock_mask = right_n_bits(lock_bits);
- static const uintptr_t lock_mask_in_place = lock_mask << lock_shift;
- static const uintptr_t self_fwd_mask = right_n_bits(self_fwd_bits);
- static const uintptr_t self_fwd_mask_in_place = self_fwd_mask << self_fwd_shift;
- static const uintptr_t age_mask = right_n_bits(age_bits);
- static const uintptr_t age_mask_in_place = age_mask << age_shift;
- static const uintptr_t hash_mask = right_n_bits(hash_bits);
- static const uintptr_t hash_mask_in_place = hash_mask << hash_shift;
+ // Masks (unshifted)
+ static const uintptr_t lock_mask = lock_mask_in_place >> lock_shift;
+ static const uintptr_t age_mask = age_mask_in_place >> age_shift;
+ static const uintptr_t hash_mask = hash_mask_in_place >> hash_shift;
#ifdef _LP64
// Used only with compact headers:
// We store the (narrow) Klass* in the bits 43 to 64.
- // These are for bit-precise extraction of the narrow Klass* from the 64-bit Markword
+ // These are for bit-precise extraction of the narrow Klass* from the 64-bit markWord
static constexpr int klass_offset_in_bytes = 4;
static constexpr int klass_shift = hash_shift + hash_bits;
static constexpr int klass_shift_at_offset = klass_shift - klass_offset_in_bytes * BitsPerByte;
@@ -132,7 +154,6 @@ class markWord {
static constexpr uintptr_t klass_mask_in_place = klass_mask << klass_shift;
#endif
-
static const uintptr_t locked_value = 0;
static const uintptr_t unlocked_value = 1;
static const uintptr_t monitor_value = 2;
@@ -157,17 +178,19 @@ class markWord {
bool is_marked() const {
return (mask_bits(value(), lock_mask_in_place) == marked_value);
}
- bool is_forwarded() const {
- // Returns true for normal forwarded (0b011) and self-forwarded (0b1xx).
- return mask_bits(value(), lock_mask_in_place | self_fwd_mask_in_place) >= static_cast(marked_value);
- }
+
bool is_neutral() const { // Not locked, or marked - a "clean" neutral state
return (mask_bits(value(), lock_mask_in_place) == unlocked_value);
}
+ bool is_forwarded() const {
+ // Returns true for normal forwarded (0b011) and self-forwarded (0b1xx).
+ return mask_bits(value(), lock_mask_in_place | self_fwd_bit_in_place) >= static_cast(marked_value);
+ }
+
// Should this header be preserved during GC?
bool must_be_preserved() const {
- return (!is_unlocked() || !has_no_hash());
+ return !is_unlocked() || !has_no_hash();
}
// WARNING: The following routines are used EXCLUSIVELY by
@@ -204,10 +227,14 @@ class markWord {
return markWord(tmp | monitor_value);
}
- bool has_displaced_mark_helper() const {
+ bool has_monitor_pointer() const {
intptr_t lockbits = value() & lock_mask_in_place;
return !UseObjectMonitorTable && lockbits == monitor_value;
}
+
+ bool has_displaced_mark_helper() const {
+ return has_monitor_pointer();
+ }
markWord displaced_mark_helper() const;
void set_displaced_mark_helper(markWord m) const;
@@ -248,7 +275,7 @@ class markWord {
// Prototype mark for initialization
static markWord prototype() {
- return markWord( no_hash_in_place | no_lock_in_place );
+ return markWord(unlocked_value);
}
// Debugging
@@ -261,15 +288,15 @@ class markWord {
inline void* decode_pointer() const { return (void*)clear_lock_bits().value(); }
inline bool is_self_forwarded() const {
- return mask_bits(value(), self_fwd_mask_in_place) != 0;
+ return mask_bits(value(), self_fwd_bit_in_place) != 0;
}
inline markWord set_self_forwarded() const {
- return markWord(value() | self_fwd_mask_in_place);
+ return markWord(value() | self_fwd_bit_in_place);
}
inline markWord unset_self_forwarded() const {
- return markWord(value() & ~self_fwd_mask_in_place);
+ return markWord(value() & ~self_fwd_bit_in_place);
}
inline oop forwardee() const {
diff --git a/src/hotspot/share/oops/oop.hpp b/src/hotspot/share/oops/oop.hpp
index d6cc71a60d8b..daa192d51245 100644
--- a/src/hotspot/share/oops/oop.hpp
+++ b/src/hotspot/share/oops/oop.hpp
@@ -90,6 +90,7 @@ class oopDesc {
void set_narrow_klass(narrowKlass nk) NOT_CDS_JAVA_HEAP_RETURN;
inline narrowKlass narrow_klass() const;
+ inline narrowKlass narrow_klass_acquire() const;
inline void set_klass(Klass* k);
static inline void release_set_klass(HeapWord* mem, Klass* k);
diff --git a/src/hotspot/share/oops/oop.inline.hpp b/src/hotspot/share/oops/oop.inline.hpp
index d5cb80e11220..066ba7a8e204 100644
--- a/src/hotspot/share/oops/oop.inline.hpp
+++ b/src/hotspot/share/oops/oop.inline.hpp
@@ -95,57 +95,38 @@ void oopDesc::init_mark() {
}
Klass* oopDesc::klass() const {
- switch (ObjLayout::klass_mode()) {
- case ObjLayout::Compact:
- return mark().klass();
- case ObjLayout::Compressed:
- return CompressedKlassPointers::decode_not_null(_compressed_klass);
- default:
- ShouldNotReachHere();
- }
+ return CompressedKlassPointers::decode_not_null(narrow_klass());
}
Klass* oopDesc::klass_or_null() const {
- switch (ObjLayout::klass_mode()) {
- case ObjLayout::Compact:
- return mark().klass_or_null();
- case ObjLayout::Compressed:
- return CompressedKlassPointers::decode(_compressed_klass);
- default:
- ShouldNotReachHere();
- }
+ return CompressedKlassPointers::decode(narrow_klass());
}
Klass* oopDesc::klass_or_null_acquire() const {
- switch (ObjLayout::klass_mode()) {
- case ObjLayout::Compact:
- return mark_acquire().klass();
- case ObjLayout::Compressed: {
- narrowKlass narrow_klass = AtomicAccess::load_acquire(&_compressed_klass);
- return CompressedKlassPointers::decode(narrow_klass);
- }
- default:
- ShouldNotReachHere();
- }
+ return CompressedKlassPointers::decode(narrow_klass_acquire());
}
Klass* oopDesc::klass_without_asserts() const {
+ return CompressedKlassPointers::decode_without_asserts(narrow_klass());
+}
+
+narrowKlass oopDesc::narrow_klass() const {
switch (ObjLayout::klass_mode()) {
case ObjLayout::Compact:
- return mark().klass_without_asserts();
+ return mark().narrow_klass();
case ObjLayout::Compressed:
- return CompressedKlassPointers::decode_without_asserts(_compressed_klass);
+ return _compressed_klass;
default:
ShouldNotReachHere();
}
}
-narrowKlass oopDesc::narrow_klass() const {
+narrowKlass oopDesc::narrow_klass_acquire() const {
switch (ObjLayout::klass_mode()) {
case ObjLayout::Compact:
- return mark().narrow_klass();
+ return mark_acquire().narrow_klass();
case ObjLayout::Compressed:
- return _compressed_klass;
+ return AtomicAccess::load_acquire(&_compressed_klass);
default:
ShouldNotReachHere();
}
diff --git a/src/hotspot/share/opto/bytecodeInfo.cpp b/src/hotspot/share/opto/bytecodeInfo.cpp
index 56c336dfc731..330a8688110b 100644
--- a/src/hotspot/share/opto/bytecodeInfo.cpp
+++ b/src/hotspot/share/opto/bytecodeInfo.cpp
@@ -234,7 +234,7 @@ bool InlineTree::should_not_inline(ciMethod* callee_method, ciMethod* caller_met
return false;
}
- if (C->directive()->should_not_inline(callee_method)) {
+ if (C->directive()->should_not_inline(callee_method, CompLevel_full_optimization)) {
set_msg("disallowed by CompileCommand");
return true;
}
diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp
index 788c4620b06c..25975334dcb6 100644
--- a/src/hotspot/share/opto/compile.cpp
+++ b/src/hotspot/share/opto/compile.cpp
@@ -3157,30 +3157,20 @@ void Compile::Code_Gen() {
}
//------------------------------Final_Reshape_Counts---------------------------
-// This class defines counters to help identify when a method
-// may/must be executed using hardware with only 24-bit precision.
+// This class defines counters and node lists collected during
+// the final graph reshaping.
struct Final_Reshape_Counts : public StackObj {
- int _call_count; // count non-inlined 'common' calls
- int _float_count; // count float ops requiring 24-bit precision
- int _double_count; // count double ops requiring more precision
int _java_call_count; // count non-inlined 'java' calls
int _inner_loop_count; // count loops which need alignment
VectorSet _visited; // Visitation flags
Node_List _tests; // Set of IfNodes & PCTableNodes
Final_Reshape_Counts() :
- _call_count(0), _float_count(0), _double_count(0),
_java_call_count(0), _inner_loop_count(0) { }
- void inc_call_count () { _call_count ++; }
- void inc_float_count () { _float_count ++; }
- void inc_double_count() { _double_count++; }
void inc_java_call_count() { _java_call_count++; }
void inc_inner_loop_count() { _inner_loop_count++; }
- int get_call_count () const { return _call_count ; }
- int get_float_count () const { return _float_count ; }
- int get_double_count() const { return _double_count; }
int get_java_call_count() const { return _java_call_count; }
int get_inner_loop_count() const { return _inner_loop_count; }
};
@@ -3243,7 +3233,6 @@ void Compile::final_graph_reshaping_impl(Node *n, Final_Reshape_Counts& frc, Uni
"unused CallLeafPureNode should have been removed before final graph reshaping");
}
#endif
- // Count FPU ops and common calls, implements item (3)
bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->final_graph_reshaping(this, n, nop, dead_nodes);
if (!gc_handled) {
final_graph_reshaping_main_switch(n, frc, nop, dead_nodes);
@@ -3288,50 +3277,6 @@ void Compile::handle_div_mod_op(Node* n, BasicType bt, bool is_unsigned) {
void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& frc, uint nop, Unique_Node_List& dead_nodes) {
switch( nop ) {
- // Count all float operations that may use FPU
- case Op_AddHF:
- case Op_MulHF:
- case Op_AddF:
- case Op_SubF:
- case Op_MulF:
- case Op_DivF:
- case Op_NegF:
- case Op_ModF:
- case Op_ConvI2F:
- case Op_ConF:
- case Op_CmpF:
- case Op_CmpF3:
- case Op_StoreF:
- case Op_LoadF:
- // case Op_ConvL2F: // longs are split into 32-bit halves
- frc.inc_float_count();
- break;
-
- case Op_ConvF2D:
- case Op_ConvD2F:
- frc.inc_float_count();
- frc.inc_double_count();
- break;
-
- // Count all double operations that may use FPU
- case Op_AddD:
- case Op_SubD:
- case Op_MulD:
- case Op_DivD:
- case Op_NegD:
- case Op_ModD:
- case Op_ConvI2D:
- case Op_ConvD2I:
- // case Op_ConvL2D: // handled by leaf call
- // case Op_ConvD2L: // handled by leaf call
- case Op_ConD:
- case Op_CmpD:
- case Op_CmpD3:
- case Op_StoreD:
- case Op_LoadD:
- case Op_LoadD_unaligned:
- frc.inc_double_count();
- break;
case Op_Opaque1: // Remove Opaque Nodes before matching
n->subsume_by(n->in(1), this);
break;
@@ -3351,7 +3296,6 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f
}
n->subsume_by(new_call, this);
}
- frc.inc_call_count();
break;
}
case Op_CallStaticJava:
@@ -3364,13 +3308,8 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f
case Op_CallLeafNoFP: {
assert (n->is_Call(), "");
CallNode *call = n->as_Call();
- // Count call sites where the FP mode bit would have to be flipped.
- // Do not count uncommon runtime calls:
- // uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking,
- // _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ...
- if (!call->is_CallStaticJava() || !call->as_CallStaticJava()->_name) {
- frc.inc_call_count(); // Count the call site
- } else { // See if uncommon argument is shared
+ // See if uncommon argument is shared
+ if (call->is_CallStaticJava() && call->as_CallStaticJava()->_name) {
Node *n = call->in(TypeFunc::Parms);
int nop = n->Opcode();
// Clone shared simple arguments to uncommon calls, item (1).
@@ -3388,6 +3327,13 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f
}
break;
}
+
+ // Mem nodes need explicit cases to satisfy assert(!n->is_Mem()) in default.
+ case Op_StoreF:
+ case Op_LoadF:
+ case Op_StoreD:
+ case Op_LoadD:
+ case Op_LoadD_unaligned:
case Op_StoreB:
case Op_StoreC:
case Op_StoreI:
@@ -3435,6 +3381,12 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f
case Op_LoadN:
case Op_LoadRange:
case Op_LoadS:
+ case Op_LoadVectorGather:
+ case Op_StoreVectorScatter:
+ case Op_LoadVectorGatherMasked:
+ case Op_StoreVectorScatterMasked:
+ case Op_LoadVectorMasked:
+ case Op_StoreVectorMasked:
break;
case Op_AddP: { // Assert sane base pointers
@@ -3785,35 +3737,6 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f
#endif
break;
- case Op_LoadVectorGather:
- case Op_StoreVectorScatter:
- case Op_LoadVectorGatherMasked:
- case Op_StoreVectorScatterMasked:
- case Op_VectorCmpMasked:
- case Op_VectorMaskGen:
- case Op_LoadVectorMasked:
- case Op_StoreVectorMasked:
- break;
-
- case Op_AddReductionVI:
- case Op_AddReductionVL:
- case Op_AddReductionVHF:
- case Op_AddReductionVF:
- case Op_AddReductionVD:
- case Op_MulReductionVI:
- case Op_MulReductionVL:
- case Op_MulReductionVHF:
- case Op_MulReductionVF:
- case Op_MulReductionVD:
- case Op_MinReductionV:
- case Op_MaxReductionV:
- case Op_UMinReductionV:
- case Op_UMaxReductionV:
- case Op_AndReductionV:
- case Op_OrReductionV:
- case Op_XorReductionV:
- break;
-
case Op_PackB:
case Op_PackS:
case Op_PackI:
@@ -3889,8 +3812,6 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f
}
break;
}
- case Op_Blackhole:
- break;
case Op_RangeCheck: {
RangeCheckNode* rc = n->as_RangeCheck();
Node* iff = new IfNode(rc->in(0), rc->in(1), rc->_prob, rc->_fcnt);
@@ -4059,20 +3980,13 @@ void Compile::final_graph_reshaping_walk(Node_Stack& nstack, Node* root, Final_R
// Intel update-in-place two-address operations and better register usage
// on RISCs. Must come after regular optimizations to avoid GVN Ideal
// calls canonicalizing them back.
-// (3) Count the number of double-precision FP ops, single-precision FP ops
-// and call sites. On Intel, we can get correct rounding either by
-// forcing singles to memory (requires extra stores and loads after each
-// FP bytecode) or we can set a rounding mode bit (requires setting and
-// clearing the mode bit around call sites). The mode bit is only used
-// if the relative frequency of single FP ops to calls is low enough.
-// This is a key transform for SPEC mpeg_audio.
-// (4) Detect infinite loops; blobs of code reachable from above but not
+// (3) Detect infinite loops; blobs of code reachable from above but not
// below. Several of the Code_Gen algorithms fail on such code shapes,
// so we simply bail out. Happens a lot in ZKM.jar, but also happens
// from time to time in other codes (such as -Xcomp finalizer loops, etc).
// Detection is by looking for IfNodes where only 1 projection is
// reachable from below or CatchNodes missing some targets.
-// (5) Assert for insane oop offsets in debug mode.
+// (4) Assert for insane oop offsets in debug mode.
bool Compile::final_graph_reshaping() {
// an infinite loop may have been eliminated by the optimizer,
diff --git a/src/hotspot/share/opto/machnode.cpp b/src/hotspot/share/opto/machnode.cpp
index c35861df7350..ec861865ff59 100644
--- a/src/hotspot/share/opto/machnode.cpp
+++ b/src/hotspot/share/opto/machnode.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -561,11 +561,7 @@ void MachNode::dump_spec(outputStream *st) const {
if (barrier_data() != 0) {
st->print(" barrier(");
BarrierSet::barrier_set()->barrier_set_c2()->dump_barrier_data(this, st);
- st->print(")");
- }
- if (_bottom_type != nullptr) {
- st->print(" ");
- _bottom_type->dump_on(st);
+ st->print(") ");
}
}
@@ -576,6 +572,19 @@ void MachNode::dump_format(PhaseRegAlloc *ra, outputStream *st) const {
}
#endif
+//=============================================================================
+#ifndef PRODUCT
+void MachTypeNode::dump_spec(outputStream *st) const {
+ MachNode::dump_spec(st);
+ if (_bottom_type != nullptr) {
+ _bottom_type->dump_on(st);
+ } else {
+ st->print(" null");
+ }
+}
+#endif
+
+
//=============================================================================
int MachConstantNode::constant_offset() {
// Bind the offset lazily.
diff --git a/src/hotspot/share/opto/machnode.hpp b/src/hotspot/share/opto/machnode.hpp
index daa2aad960ad..b60313b7f758 100644
--- a/src/hotspot/share/opto/machnode.hpp
+++ b/src/hotspot/share/opto/machnode.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -220,7 +220,7 @@ class MachOper : public ResourceObj {
// ADLC inherit from this class.
class MachNode : public Node {
public:
- MachNode() : Node((uint)0), _bottom_type(nullptr), _barrier(0), _num_opnds(0), _opnds(nullptr) {
+ MachNode() : Node((uint)0), _barrier(0), _num_opnds(0), _opnds(nullptr) {
init_class_id(Class_Mach);
}
// Required boilerplate
@@ -282,9 +282,6 @@ class MachNode : public Node {
// output have choices - but they must use the same choice.
virtual uint two_adr( ) const { return 0; }
- // Capture the type of the matched ideal node
- const Type* _bottom_type;
-
// The GC might require some barrier metadata for machine code emission.
uint8_t _barrier;
@@ -334,17 +331,7 @@ class MachNode : public Node {
virtual MachNode *Expand( State *, Node_List &proj_list, Node* mem ) { return this; }
// Bottom_type call; value comes from operand0
- virtual const Type* bottom_type() const {
- if (_bottom_type != nullptr) {
- return _bottom_type;
- }
- const Type* res = _opnds[0]->type();
- // The type system around pointers is complex, do not rely on operand type then
- assert(res != nullptr, "must be not null");
- assert(is_MachTemp() || res->isa_ptr() == nullptr, "must not be a pointer");
- return res;
- }
-
+ virtual const class Type *bottom_type() const { return _opnds[0]->type(); }
virtual uint ideal_reg() const {
const Type *t = _opnds[0]->type();
if (t == TypeInt::CC) {
@@ -431,6 +418,20 @@ class MachIdealNode : public MachNode {
virtual const class Type *bottom_type() const { return _opnds == nullptr ? Type::CONTROL : MachNode::bottom_type(); }
};
+//------------------------------MachTypeNode----------------------------
+// Machine Nodes that need to retain a known Type.
+class MachTypeNode : public MachNode {
+ virtual uint size_of() const { return sizeof(*this); } // Size is bigger
+public:
+ MachTypeNode( ) {}
+ const Type *_bottom_type;
+
+ virtual const class Type *bottom_type() const { return _bottom_type; }
+#ifndef PRODUCT
+ virtual void dump_spec(outputStream *st) const;
+#endif
+};
+
//------------------------------MachBreakpointNode----------------------------
// Machine breakpoint or interrupt Node
class MachBreakpointNode : public MachIdealNode {
@@ -476,12 +477,12 @@ class MachConstantBaseNode : public MachIdealNode {
//------------------------------MachConstantNode-------------------------------
// Machine node that holds a constant which is stored in the constant table.
-class MachConstantNode : public MachNode {
+class MachConstantNode : public MachTypeNode {
protected:
ConstantTable::Constant _constant; // This node's constant.
public:
- MachConstantNode() : MachNode() {
+ MachConstantNode() : MachTypeNode() {
init_class_id(Class_MachConstant);
}
diff --git a/src/hotspot/share/opto/matcher.cpp b/src/hotspot/share/opto/matcher.cpp
index 69098befa388..d2a9250b3ee0 100644
--- a/src/hotspot/share/opto/matcher.cpp
+++ b/src/hotspot/share/opto/matcher.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2550,7 +2550,7 @@ bool Matcher::gen_narrow_oop_implicit_null_checks() {
// Advice matcher to perform null checks on the narrow oop side.
// Implicit checks are not possible on the uncompressed oop side anyway
// (at least not for read accesses).
- // Performs significantly better (especially on Power 6).
+ // Performs significantly better.
if (!os::zero_page_read_protected()) {
return true;
}
diff --git a/src/hotspot/share/opto/matcher.hpp b/src/hotspot/share/opto/matcher.hpp
index 31f4a7822477..a3d0f8e7d6c6 100644
--- a/src/hotspot/share/opto/matcher.hpp
+++ b/src/hotspot/share/opto/matcher.hpp
@@ -38,6 +38,7 @@
class Compile;
class Node;
class MachNode;
+class MachTypeNode;
class MachOper;
//---------------------------Matcher-------------------------------------------
diff --git a/src/hotspot/share/opto/subnode.hpp b/src/hotspot/share/opto/subnode.hpp
index 387c1c46ba9c..29ec25b41f82 100644
--- a/src/hotspot/share/opto/subnode.hpp
+++ b/src/hotspot/share/opto/subnode.hpp
@@ -520,7 +520,12 @@ class SqrtDNode : public Node {
public:
SqrtDNode(Compile* C, Node *c, Node *in1) : Node(c, in1) {
init_flags(Flag_is_expensive);
- C->add_expensive_node(this);
+ // Treat node only as expensive if a control input is set because it might
+ // be created from SqrtVDNode in VectorNode::push_through_replicate which
+ // does not have control input.
+ if (c != nullptr) {
+ C->add_expensive_node(this);
+ }
}
virtual int Opcode() const;
const Type *bottom_type() const { return Type::DOUBLE; }
diff --git a/src/hotspot/share/opto/type.cpp b/src/hotspot/share/opto/type.cpp
index 6dd0cc183e12..4d1d05c596d2 100644
--- a/src/hotspot/share/opto/type.cpp
+++ b/src/hotspot/share/opto/type.cpp
@@ -62,66 +62,66 @@ Dict* Type::_shared_type_dict = nullptr;
// Array which maps compiler types to Basic Types
const Type::TypeInfo Type::_type_info[Type::lastype] = {
- { Bad, T_ILLEGAL, "bad", false, Node::NotAMachineReg, relocInfo::none }, // Bad
- { Control, T_ILLEGAL, "control", false, 0, relocInfo::none }, // Control
- { Bottom, T_VOID, "top", false, 0, relocInfo::none }, // Top
- { Bad, T_INT, "int:", false, Op_RegI, relocInfo::none }, // Int
- { Bad, T_LONG, "long:", false, Op_RegL, relocInfo::none }, // Long
- { Half, T_VOID, "half", false, 0, relocInfo::none }, // Half
- { Bad, T_NARROWOOP, "narrowoop:", false, Op_RegN, relocInfo::none }, // NarrowOop
- { Bad, T_NARROWKLASS,"narrowklass:", false, Op_RegN, relocInfo::none }, // NarrowKlass
- { Bad, T_ILLEGAL, "tuple:", false, Node::NotAMachineReg, relocInfo::none }, // Tuple
- { Bad, T_ARRAY, "array:", false, Node::NotAMachineReg, relocInfo::none }, // Array
- { Bad, T_ARRAY, "interfaces:", false, Node::NotAMachineReg, relocInfo::none }, // Interfaces
+ { Bad, T_ILLEGAL, "bad", false, Node::NotAMachineReg}, // Bad
+ { Control, T_ILLEGAL, "control", false, 0 }, // Control
+ { Bottom, T_VOID, "top", false, 0 }, // Top
+ { Bad, T_INT, "int:", false, Op_RegI }, // Int
+ { Bad, T_LONG, "long:", false, Op_RegL }, // Long
+ { Half, T_VOID, "half", false, 0 }, // Half
+ { Bad, T_NARROWOOP, "narrowoop:", false, Op_RegN }, // NarrowOop
+ { Bad, T_NARROWKLASS,"narrowklass:", false, Op_RegN }, // NarrowKlass
+ { Bad, T_ILLEGAL, "tuple:", false, Node::NotAMachineReg}, // Tuple
+ { Bad, T_ARRAY, "array:", false, Node::NotAMachineReg}, // Array
+ { Bad, T_ARRAY, "interfaces:", false, Node::NotAMachineReg}, // Interfaces
#if defined(PPC64)
- { Bad, T_ILLEGAL, "vectormask:", false, Op_RegVectMask, relocInfo::none }, // VectorMask.
- { Bad, T_ILLEGAL, "vectora:", false, Op_VecA, relocInfo::none }, // VectorA.
- { Bad, T_ILLEGAL, "vectors:", false, 0, relocInfo::none }, // VectorS
- { Bad, T_ILLEGAL, "vectord:", false, Op_RegL, relocInfo::none }, // VectorD
- { Bad, T_ILLEGAL, "vectorx:", false, Op_VecX, relocInfo::none }, // VectorX
- { Bad, T_ILLEGAL, "vectory:", false, 0, relocInfo::none }, // VectorY
- { Bad, T_ILLEGAL, "vectorz:", false, 0, relocInfo::none }, // VectorZ
+ { Bad, T_ILLEGAL, "vectormask:", false, Op_RegVectMask }, // VectorMask.
+ { Bad, T_ILLEGAL, "vectora:", false, Op_VecA }, // VectorA.
+ { Bad, T_ILLEGAL, "vectors:", false, 0 }, // VectorS
+ { Bad, T_ILLEGAL, "vectord:", false, Op_RegL }, // VectorD
+ { Bad, T_ILLEGAL, "vectorx:", false, Op_VecX }, // VectorX
+ { Bad, T_ILLEGAL, "vectory:", false, 0 }, // VectorY
+ { Bad, T_ILLEGAL, "vectorz:", false, 0 }, // VectorZ
#elif defined(S390)
- { Bad, T_ILLEGAL, "vectormask:", false, Op_RegVectMask, relocInfo::none }, // VectorMask.
- { Bad, T_ILLEGAL, "vectora:", false, Op_VecA, relocInfo::none }, // VectorA.
- { Bad, T_ILLEGAL, "vectors:", false, 0, relocInfo::none }, // VectorS
- { Bad, T_ILLEGAL, "vectord:", false, Op_RegL, relocInfo::none }, // VectorD
- { Bad, T_ILLEGAL, "vectorx:", false, Op_VecX, relocInfo::none }, // VectorX
- { Bad, T_ILLEGAL, "vectory:", false, 0, relocInfo::none }, // VectorY
- { Bad, T_ILLEGAL, "vectorz:", false, 0, relocInfo::none }, // VectorZ
+ { Bad, T_ILLEGAL, "vectormask:", false, Op_RegVectMask }, // VectorMask.
+ { Bad, T_ILLEGAL, "vectora:", false, Op_VecA }, // VectorA.
+ { Bad, T_ILLEGAL, "vectors:", false, 0 }, // VectorS
+ { Bad, T_ILLEGAL, "vectord:", false, Op_RegL }, // VectorD
+ { Bad, T_ILLEGAL, "vectorx:", false, Op_VecX }, // VectorX
+ { Bad, T_ILLEGAL, "vectory:", false, 0 }, // VectorY
+ { Bad, T_ILLEGAL, "vectorz:", false, 0 }, // VectorZ
#else // all other
- { Bad, T_ILLEGAL, "vectormask:", false, Op_RegVectMask, relocInfo::none }, // VectorMask.
- { Bad, T_ILLEGAL, "vectora:", false, Op_VecA, relocInfo::none }, // VectorA.
- { Bad, T_ILLEGAL, "vectors:", false, Op_VecS, relocInfo::none }, // VectorS
- { Bad, T_ILLEGAL, "vectord:", false, Op_VecD, relocInfo::none }, // VectorD
- { Bad, T_ILLEGAL, "vectorx:", false, Op_VecX, relocInfo::none }, // VectorX
- { Bad, T_ILLEGAL, "vectory:", false, Op_VecY, relocInfo::none }, // VectorY
- { Bad, T_ILLEGAL, "vectorz:", false, Op_VecZ, relocInfo::none }, // VectorZ
+ { Bad, T_ILLEGAL, "vectormask:", false, Op_RegVectMask }, // VectorMask.
+ { Bad, T_ILLEGAL, "vectora:", false, Op_VecA }, // VectorA.
+ { Bad, T_ILLEGAL, "vectors:", false, Op_VecS }, // VectorS
+ { Bad, T_ILLEGAL, "vectord:", false, Op_VecD }, // VectorD
+ { Bad, T_ILLEGAL, "vectorx:", false, Op_VecX }, // VectorX
+ { Bad, T_ILLEGAL, "vectory:", false, Op_VecY }, // VectorY
+ { Bad, T_ILLEGAL, "vectorz:", false, Op_VecZ }, // VectorZ
#endif
- { Bad, T_ADDRESS, "anyptr:", false, Op_RegP, relocInfo::none }, // AnyPtr
- { Bad, T_ADDRESS, "rawptr:", false, Op_RegP, relocInfo::external_word_type }, // RawPtr
- { Bad, T_OBJECT, "oop:", true, Op_RegP, relocInfo::oop_type }, // OopPtr
- { Bad, T_OBJECT, "inst:", true, Op_RegP, relocInfo::oop_type }, // InstPtr
- { Bad, T_OBJECT, "ary:", true, Op_RegP, relocInfo::oop_type }, // AryPtr
- { Bad, T_METADATA, "metadata:", false, Op_RegP, relocInfo::metadata_type }, // MetadataPtr
- { Bad, T_METADATA, "klass:", false, Op_RegP, relocInfo::metadata_type }, // KlassPtr
- { Bad, T_METADATA, "instklass:", false, Op_RegP, relocInfo::metadata_type }, // InstKlassPtr
- { Bad, T_METADATA, "aryklass:", false, Op_RegP, relocInfo::metadata_type }, // AryKlassPtr
- { Bad, T_OBJECT, "func", false, 0, relocInfo::none }, // Function
- { Abio, T_ILLEGAL, "abIO", false, 0, relocInfo::none }, // Abio
- { Return_Address, T_ADDRESS, "return_address",false, Op_RegP, relocInfo::none }, // Return_Address
- { Memory, T_ILLEGAL, "memory", false, 0, relocInfo::none }, // Memory
- { HalfFloatBot, T_SHORT, "halffloat_top", false, Op_RegF, relocInfo::none }, // HalfFloatTop
- { HalfFloatCon, T_SHORT, "hfcon:", false, Op_RegF, relocInfo::none }, // HalfFloatCon
- { HalfFloatTop, T_SHORT, "short", false, Op_RegF, relocInfo::none }, // HalfFloatBot
- { FloatBot, T_FLOAT, "float_top", false, Op_RegF, relocInfo::none }, // FloatTop
- { FloatCon, T_FLOAT, "ftcon:", false, Op_RegF, relocInfo::none }, // FloatCon
- { FloatTop, T_FLOAT, "float", false, Op_RegF, relocInfo::none }, // FloatBot
- { DoubleBot, T_DOUBLE, "double_top", false, Op_RegD, relocInfo::none }, // DoubleTop
- { DoubleCon, T_DOUBLE, "dblcon:", false, Op_RegD, relocInfo::none }, // DoubleCon
- { DoubleTop, T_DOUBLE, "double", false, Op_RegD, relocInfo::none }, // DoubleBot
- { Top, T_ILLEGAL, "bottom", false, 0, relocInfo::none } // Bottom
+ { Bad, T_ADDRESS, "anyptr:", false, Op_RegP }, // AnyPtr
+ { Bad, T_ADDRESS, "rawptr:", false, Op_RegP }, // RawPtr
+ { Bad, T_OBJECT, "oop:", true, Op_RegP }, // OopPtr
+ { Bad, T_OBJECT, "inst:", true, Op_RegP }, // InstPtr
+ { Bad, T_OBJECT, "ary:", true, Op_RegP }, // AryPtr
+ { Bad, T_METADATA, "metadata:", false, Op_RegP }, // MetadataPtr
+ { Bad, T_METADATA, "klass:", false, Op_RegP }, // KlassPtr
+ { Bad, T_METADATA, "instklass:", false, Op_RegP }, // InstKlassPtr
+ { Bad, T_METADATA, "aryklass:", false, Op_RegP }, // AryKlassPtr
+ { Bad, T_OBJECT, "func", false, 0 }, // Function
+ { Abio, T_ILLEGAL, "abIO", false, 0 }, // Abio
+ { Return_Address, T_ADDRESS, "return_address",false, Op_RegP }, // Return_Address
+ { Memory, T_ILLEGAL, "memory", false, 0 }, // Memory
+ { HalfFloatBot, T_SHORT, "halffloat_top", false, Op_RegF }, // HalfFloatTop
+ { HalfFloatCon, T_SHORT, "hfcon:", false, Op_RegF }, // HalfFloatCon
+ { HalfFloatTop, T_SHORT, "short", false, Op_RegF }, // HalfFloatBot
+ { FloatBot, T_FLOAT, "float_top", false, Op_RegF }, // FloatTop
+ { FloatCon, T_FLOAT, "ftcon:", false, Op_RegF }, // FloatCon
+ { FloatTop, T_FLOAT, "float", false, Op_RegF }, // FloatBot
+ { DoubleBot, T_DOUBLE, "double_top", false, Op_RegD }, // DoubleTop
+ { DoubleCon, T_DOUBLE, "dblcon:", false, Op_RegD }, // DoubleCon
+ { DoubleTop, T_DOUBLE, "double", false, Op_RegD }, // DoubleBot
+ { Top, T_ILLEGAL, "bottom", false, 0 } // Bottom
};
// Map ideal registers (machine types) to ideal types
@@ -235,7 +235,7 @@ const Type* Type::get_typeflow_type(ciType* type) {
case T_ADDRESS:
assert(type->is_return_address(), "");
- return TypeRawPtr::make((address)(intptr_t)type->as_return_address()->bci());
+ return TypeRawPtr::make((address)(intptr_t)type->as_return_address()->bci(), relocInfo::none);
default:
// make sure we did not mix up the cases:
@@ -2369,7 +2369,7 @@ const TypePtr* TypePtr::with_inline_depth(int depth) const {
if (!UseInlineDepthForSpeculativeTypes) {
return this;
}
- return make(AnyPtr, _ptr, _offset, _speculative, depth);
+ return make(AnyPtr, _ptr, _offset, _speculative, depth, _reloc);
}
//------------------------------dump2------------------------------------------
@@ -2597,15 +2597,17 @@ const TypePtr::PTR TypePtr::ptr_meet[TypePtr::lastPTR][TypePtr::lastPTR] = {
};
//------------------------------make-------------------------------------------
-const TypePtr *TypePtr::make(TYPES t, enum PTR ptr, int offset, const TypePtr* speculative, int inline_depth) {
- return (TypePtr*)(new TypePtr(t,ptr,offset, speculative, inline_depth))->hashcons();
+const TypePtr* TypePtr::make(TYPES t, enum PTR ptr, int offset,
+ const TypePtr* speculative, int inline_depth,
+ relocInfo::relocType reloc) {
+ return (TypePtr*)(new TypePtr(t, ptr, offset, reloc, speculative, inline_depth))->hashcons();
}
//------------------------------cast_to_ptr_type-------------------------------
const TypePtr* TypePtr::cast_to_ptr_type(PTR ptr) const {
assert(_base == AnyPtr, "subclass must override cast_to_ptr_type");
if( ptr == _ptr ) return this;
- return make(_base, ptr, _offset, _speculative, _inline_depth);
+ return make(_base, ptr, _offset, _speculative, _inline_depth, _reloc);
}
//------------------------------get_con----------------------------------------
@@ -2707,7 +2709,7 @@ const TypePtr::PTR TypePtr::ptr_dual[TypePtr::lastPTR] = {
BotPTR, NotNull, Constant, Null, AnyNull, TopPTR
};
const Type *TypePtr::xdual() const {
- return new TypePtr(AnyPtr, dual_ptr(), dual_offset(), dual_speculative(), dual_inline_depth());
+ return new TypePtr(AnyPtr, dual_ptr(), dual_offset(), relocInfo::none, dual_speculative(), dual_inline_depth());
}
//------------------------------xadd_offset------------------------------------
@@ -2728,24 +2730,25 @@ int TypePtr::xadd_offset( intptr_t offset ) const {
//------------------------------add_offset-------------------------------------
const TypePtr *TypePtr::add_offset( intptr_t offset ) const {
- return make(AnyPtr, _ptr, xadd_offset(offset), _speculative, _inline_depth);
+ return make(AnyPtr, _ptr, xadd_offset(offset), _speculative, _inline_depth, _reloc);
}
const TypePtr *TypePtr::with_offset(intptr_t offset) const {
- return make(AnyPtr, _ptr, offset, _speculative, _inline_depth);
+ return make(AnyPtr, _ptr, offset, _speculative, _inline_depth, _reloc);
}
//------------------------------eq---------------------------------------------
// Structural equality check for Type representations
bool TypePtr::eq( const Type *t ) const {
const TypePtr *a = (const TypePtr*)t;
- return _ptr == a->ptr() && _offset == a->offset() && eq_speculative(a) && _inline_depth == a->_inline_depth;
+ return _ptr == a->ptr() && _offset == a->offset() && _reloc == a->reloc() &&
+ eq_speculative(a) && _inline_depth == a->_inline_depth;
}
//------------------------------hash-------------------------------------------
// Type-specific hashing function.
uint TypePtr::hash(void) const {
- return (uint)_ptr + (uint)_offset + (uint)hash_speculative() + (uint)_inline_depth;
+ return (uint)_ptr + (uint)_offset + (uint)_reloc + (uint)hash_speculative() + (uint)_inline_depth;
}
/**
@@ -2756,7 +2759,7 @@ const TypePtr* TypePtr::remove_speculative() const {
return this;
}
assert(_inline_depth == InlineDepthTop || _inline_depth == InlineDepthBottom, "non speculative type shouldn't have inline depth");
- return make(AnyPtr, _ptr, _offset, nullptr, _inline_depth);
+ return make(AnyPtr, _ptr, _offset, nullptr, _inline_depth, _reloc);
}
/**
@@ -3071,12 +3074,12 @@ const TypeRawPtr *TypeRawPtr::NOTNULL;
const TypeRawPtr *TypeRawPtr::make( enum PTR ptr ) {
assert( ptr != Constant, "what is the constant?" );
assert( ptr != Null, "Use TypePtr for null" );
- return (TypeRawPtr*)(new TypeRawPtr(ptr,nullptr))->hashcons();
+ return (TypeRawPtr*)(new TypeRawPtr(ptr, nullptr, relocInfo::none))->hashcons();
}
-const TypeRawPtr *TypeRawPtr::make(address bits) {
+const TypeRawPtr* TypeRawPtr::make(address bits, relocInfo::relocType reloc) {
assert(bits != nullptr, "Use TypePtr for null");
- return (TypeRawPtr*)(new TypeRawPtr(Constant,bits))->hashcons();
+ return (TypeRawPtr*)(new TypeRawPtr(Constant, bits, reloc))->hashcons();
}
//------------------------------cast_to_ptr_type-------------------------------
@@ -3151,7 +3154,7 @@ const Type *TypeRawPtr::xmeet( const Type *t ) const {
//------------------------------xdual------------------------------------------
// Dual: compute field-by-field dual
const Type *TypeRawPtr::xdual() const {
- return new TypeRawPtr( dual_ptr(), _bits );
+ return new TypeRawPtr(dual_ptr(), _bits, _reloc);
}
//------------------------------add_offset-------------------------------------
@@ -3174,7 +3177,7 @@ const TypePtr* TypeRawPtr::add_offset(intptr_t offset) const {
} else if ( sum == 0 ) {
return TypePtr::NULL_PTR;
} else {
- return make( (address)sum );
+ return make((address)sum, _reloc);
}
}
default: ShouldNotReachHere();
@@ -3469,7 +3472,7 @@ bool TypeInterfaces::has_non_array_interface() const {
//------------------------------TypeOopPtr-------------------------------------
TypeOopPtr::TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, const TypeInterfaces* interfaces, bool xk, ciObject* o, int offset,
int instance_id, const TypePtr* speculative, int inline_depth)
- : TypePtr(t, ptr, offset, speculative, inline_depth),
+ : TypePtr(t, ptr, offset, relocInfo::oop_type, speculative, inline_depth),
_const_oop(o), _klass(k),
_interfaces(interfaces),
_klass_is_exact(xk),
@@ -5490,7 +5493,7 @@ void TypeMetadataPtr::dump2( Dict &d, uint depth, outputStream *st ) const {
const TypeMetadataPtr *TypeMetadataPtr::BOTTOM;
TypeMetadataPtr::TypeMetadataPtr(PTR ptr, ciMetadata* metadata, int offset):
- TypePtr(MetadataPtr, ptr, offset), _metadata(metadata) {
+ TypePtr(MetadataPtr, ptr, offset, relocInfo::metadata_type), _metadata(metadata) {
}
const TypeMetadataPtr* TypeMetadataPtr::make(ciMethod* m) {
@@ -5538,7 +5541,7 @@ const TypeKlassPtr* TypeKlassPtr::make(PTR ptr, ciKlass* klass, int offset, Inte
//------------------------------TypeKlassPtr-----------------------------------
TypeKlassPtr::TypeKlassPtr(TYPES t, PTR ptr, ciKlass* klass, const TypeInterfaces* interfaces, int offset)
- : TypePtr(t, ptr, offset), _klass(klass), _interfaces(interfaces) {
+ : TypePtr(t, ptr, offset, relocInfo::metadata_type), _klass(klass), _interfaces(interfaces) {
assert(klass == nullptr || !klass->is_loaded() || (klass->is_instance_klass() && !klass->is_interface()) ||
klass->is_type_array_klass() || !klass->as_obj_array_klass()->base_element_klass()->is_interface(), "no interface here");
}
diff --git a/src/hotspot/share/opto/type.hpp b/src/hotspot/share/opto/type.hpp
index 79777c82cc75..728b7af9c244 100644
--- a/src/hotspot/share/opto/type.hpp
+++ b/src/hotspot/share/opto/type.hpp
@@ -157,7 +157,6 @@ class Type {
const char* msg;
bool isa_oop;
uint ideal_reg;
- relocInfo::relocType reloc;
} TypeInfo;
// Dictionary of types shared among compilations.
@@ -459,7 +458,6 @@ class Type {
uint ideal_reg() const { return _type_info[_base].ideal_reg; }
const char* msg() const { return _type_info[_base].msg; }
bool isa_oop_ptr() const { return _type_info[_base].isa_oop; }
- relocInfo::relocType reloc() const { return _type_info[_base].reloc; }
// Mapping from CI type system to compiler type:
static const Type* get_typeflow_type(ciType* type);
@@ -1176,10 +1174,11 @@ class TypePtr : public Type {
enum PTR { TopPTR, AnyNull, Constant, Null, NotNull, BotPTR, lastPTR };
protected:
TypePtr(TYPES t, PTR ptr, int offset,
+ relocInfo::relocType reloc,
const TypePtr* speculative = nullptr,
int inline_depth = InlineDepthBottom) :
Type(t), _speculative(speculative), _inline_depth(inline_depth), _offset(offset),
- _ptr(ptr) {}
+ _ptr(ptr), _reloc(reloc) {}
static const PTR ptr_meet[lastPTR][lastPTR];
static const PTR ptr_dual[lastPTR];
static const char * const ptr_msg[lastPTR];
@@ -1247,13 +1246,16 @@ class TypePtr : public Type {
public:
const int _offset; // Offset into oop, with TOP & BOT
const PTR _ptr; // Pointer equivalence class
+ const relocInfo::relocType _reloc;
int offset() const { return _offset; }
PTR ptr() const { return _ptr; }
+ relocInfo::relocType reloc() const { return _reloc; }
static const TypePtr *make(TYPES t, PTR ptr, int offset,
const TypePtr* speculative = nullptr,
- int inline_depth = InlineDepthBottom);
+ int inline_depth = InlineDepthBottom,
+ relocInfo::relocType reloc = relocInfo::none);
// Return a 'ptr' version of this type
virtual const TypePtr* cast_to_ptr_type(PTR ptr) const;
@@ -1316,15 +1318,15 @@ class TypePtr : public Type {
// include the stack pointer, top of heap, card-marking area, handles, etc.
class TypeRawPtr : public TypePtr {
protected:
- TypeRawPtr( PTR ptr, address bits ) : TypePtr(RawPtr,ptr,0), _bits(bits){}
+ TypeRawPtr(PTR ptr, address bits, relocInfo::relocType reloc) : TypePtr(RawPtr, ptr, 0, reloc), _bits(bits){}
public:
virtual bool eq( const Type *t ) const;
virtual uint hash() const; // Type specific hashing
const address _bits; // Constant value, if applicable
- static const TypeRawPtr *make( PTR ptr );
- static const TypeRawPtr *make( address bits );
+ static const TypeRawPtr* make(PTR ptr);
+ static const TypeRawPtr* make(address bits, relocInfo::relocType reloc = relocInfo::external_word_type);
// Return a 'ptr' version of this type
virtual const TypeRawPtr* cast_to_ptr_type(PTR ptr) const;
diff --git a/src/hotspot/share/opto/vectornode.cpp b/src/hotspot/share/opto/vectornode.cpp
index 651a27af9c79..a54fe6e3a733 100644
--- a/src/hotspot/share/opto/vectornode.cpp
+++ b/src/hotspot/share/opto/vectornode.cpp
@@ -22,10 +22,12 @@
*/
#include "memory/allocation.inline.hpp"
+#include "opto/addnode.hpp"
#include "opto/c2_globals.hpp"
#include "opto/compile.hpp"
#include "opto/connode.hpp"
#include "opto/convertnode.hpp"
+#include "opto/divnode.hpp"
#include "opto/mulnode.hpp"
#include "opto/subnode.hpp"
#include "opto/vectornode.hpp"
@@ -290,7 +292,146 @@ int VectorNode::opcode(int sopc, BasicType bt) {
assert(!VectorNode::is_convert_opcode(sopc),
"Convert node %s should be processed by VectorCastNode::opcode()",
NodeClassNames[sopc]);
- return 0; // Unimplemented
+ return 0; // not handled
+ }
+}
+
+// Return the scalar opcode for the specified vector opcode and basic type.
+// Returns 0 if not handled.
+int VectorNode::scalar_opcode(int vopc, BasicType bt) {
+ switch (vopc) {
+ case Op_AddVB:
+ case Op_AddVS:
+ case Op_AddVI:
+ return Op_AddI;
+ case Op_AddVL:
+ return Op_AddL;
+ case Op_AddVF:
+ return Op_AddF;
+ case Op_AddVD:
+ return Op_AddD;
+
+ case Op_SubVB:
+ case Op_SubVS:
+ case Op_SubVI:
+ return Op_SubI;
+ case Op_SubVL:
+ return Op_SubL;
+ case Op_SubVF:
+ return Op_SubF;
+ case Op_SubVD:
+ return Op_SubD;
+
+ case Op_MulVB:
+ case Op_MulVS:
+ case Op_MulVI:
+ return Op_MulI;
+ case Op_MulVL:
+ return Op_MulL;
+ case Op_MulVF:
+ return Op_MulF;
+ case Op_MulVD:
+ return Op_MulD;
+
+ case Op_DivVF:
+ return Op_DivF;
+ case Op_DivVD:
+ return Op_DivD;
+
+ case Op_AndV:
+ switch (bt) {
+ case T_BOOLEAN:
+ case T_CHAR:
+ case T_BYTE:
+ case T_SHORT:
+ case T_INT:
+ return Op_AndI;
+ case T_LONG:
+ return Op_AndL;
+ default:
+ return 0;
+ }
+
+ case Op_OrV:
+ switch (bt) {
+ case T_BOOLEAN:
+ case T_CHAR:
+ case T_BYTE:
+ case T_SHORT:
+ case T_INT:
+ return Op_OrI;
+ case T_LONG:
+ return Op_OrL;
+ default:
+ return 0;
+ }
+
+ case Op_XorV:
+ switch (bt) {
+ case T_BOOLEAN:
+ case T_CHAR:
+ case T_BYTE:
+ case T_SHORT:
+ case T_INT:
+ return Op_XorI;
+ case T_LONG:
+ return Op_XorL;
+ default:
+ return 0;
+ }
+
+ case Op_MinV:
+ switch (bt) {
+ case T_BOOLEAN:
+ case T_CHAR:
+ // unsigned, not supported for Min
+ return 0;
+ case T_BYTE:
+ case T_SHORT:
+ case T_INT:
+ return Op_MinI;
+ case T_LONG:
+ return Op_MinL;
+ case T_FLOAT:
+ return Op_MinF;
+ case T_DOUBLE:
+ return Op_MinD;
+ default:
+ return 0;
+ }
+
+ case Op_MaxV:
+ switch (bt) {
+ case T_BOOLEAN:
+ case T_CHAR:
+ // unsigned, not supported for Max
+ return 0;
+ case T_BYTE:
+ case T_SHORT:
+ case T_INT:
+ return Op_MaxI;
+ case T_LONG:
+ return Op_MaxL;
+ case T_FLOAT:
+ return Op_MaxF;
+ case T_DOUBLE:
+ return Op_MaxD;
+ default:
+ return 0;
+ }
+
+ case Op_SqrtVD:
+ return Op_SqrtD;
+ case Op_SqrtVF:
+ return Op_SqrtF;
+
+ case Op_FmaVF:
+ return Op_FmaF;
+ case Op_FmaVD:
+ return Op_FmaD;
+
+ default:
+ return 0; // not handled
}
}
@@ -984,17 +1125,9 @@ static Node* ideal_partial_operations(PhaseGVN* phase, Node* node, const TypeVec
}
}
-bool VectorNode::should_swap_inputs_to_help_global_value_numbering() {
- // Predicated vector operations are sensitive to ordering of inputs.
- // When the mask corresponding to a vector lane is false then
- // the result of the operation is corresponding lane of its first operand.
- // i.e. RES = VEC1.lanewise(OPER, VEC2, MASK) is semantically equivalent to
- // RES = BLEND(VEC1, VEC1.lanewise(OPER, VEC2), MASK)
- if (is_predicated_vector()) {
- return false;
- }
-
- switch(Opcode()) {
+// Check if the vector operation is commutative (assuming that it is not predicated/masked).
+static bool is_commutative_vector_operation(int opcode) {
+ switch(opcode) {
case Op_AddVB:
case Op_AddVS:
case Op_AddVI:
@@ -1022,18 +1155,228 @@ bool VectorNode::should_swap_inputs_to_help_global_value_numbering() {
case Op_XorVMask:
case Op_SaturatingAddV:
- assert(req() == 3, "Must be a binary operation");
- // For non-predicated commutative operations, sort the inputs in
- // increasing order of node indices.
- if (in(1)->_idx > in(2)->_idx) {
- return true;
- }
- // fallthrough
+ return true;
default:
return false;
}
}
+bool VectorNode::should_swap_inputs_to_help_global_value_numbering() {
+ // Predicated vector operations are sensitive to ordering of inputs.
+ // When the mask corresponding to a vector lane is false then
+ // the result of the operation is corresponding lane of its first operand.
+ // i.e. RES = VEC1.lanewise(OPER, VEC2, MASK) is semantically equivalent to
+ // RES = BLEND(VEC1, VEC1.lanewise(OPER, VEC2), MASK)
+ if (is_predicated_vector()) {
+ return false;
+ }
+
+ if (is_commutative_vector_operation(Opcode())) {
+ assert(req() == 3, "Must be a binary operation");
+ // For non-predicated commutative operations, sort the inputs in
+ // increasing order of node indices.
+ if (in(1)->_idx > in(2)->_idx) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+// Check whether we can push this vector op through replicate (all inputs are Replicate).
+bool VectorNode::can_push_through_replicate(BasicType bt) {
+ if (scalar_opcode(Opcode(), bt) == 0) {
+ return false;
+ }
+
+ // Skip over predicated vector operations for now, for masked lanes we preserve
+ // destination/first source vector contents.
+ if (is_predicated_vector()) {
+ return false;
+ }
+
+ for (uint i = 1; i < req(); i++) {
+ if (in(i)->Opcode() != Op_Replicate) {
+ return false;
+ }
+ }
+ return true;
+}
+
+Node* VectorNode::make_scalar(Compile* c, int vopc, BasicType bt, Node* control, Node* in1, Node* in2, Node* in3) {
+ int sopc = scalar_opcode(vopc, bt);
+ assert(sopc != 0, "unhandled vector opcode %s", NodeClassNames[vopc]);
+ assert(opcode(sopc, bt) == vopc, "scalar_opcode and opcode must agree for %s", NodeClassNames[vopc]);
+ switch (sopc) {
+ case Op_AddI:
+ return new AddINode(in1, in2);
+ case Op_AddL:
+ return new AddLNode(in1, in2);
+ case Op_AddF:
+ return new AddFNode(in1, in2);
+ case Op_AddD:
+ return new AddDNode(in1, in2);
+ case Op_MulI:
+ return new MulINode(in1, in2);
+ case Op_MulL:
+ return new MulLNode(in1, in2);
+ case Op_MulF:
+ return new MulFNode(in1, in2);
+ case Op_MulD:
+ return new MulDNode(in1, in2);
+ case Op_AndI:
+ return new AndINode(in1, in2);
+ case Op_AndL:
+ return new AndLNode(in1, in2);
+ case Op_DivF:
+ return new DivFNode(control, in1, in2);
+ case Op_DivD:
+ return new DivDNode(control, in1, in2);
+ case Op_OrI:
+ return new OrINode(in1, in2);
+ case Op_OrL:
+ return new OrLNode(in1, in2);
+ case Op_XorI:
+ return new XorINode(in1, in2);
+ case Op_XorL:
+ return new XorLNode(in1, in2);
+ case Op_SubI:
+ return new SubINode(in1, in2);
+ case Op_SubL:
+ return new SubLNode(in1, in2);
+ case Op_SubF:
+ return new SubFNode(in1, in2);
+ case Op_SubD:
+ return new SubDNode(in1, in2);
+ case Op_MinI:
+ return new MinINode(in1, in2);
+ case Op_MinL:
+ return new MinLNode(c, in1, in2);
+ case Op_MinF:
+ return new MinFNode(in1, in2);
+ case Op_MinD:
+ return new MinDNode(in1, in2);
+ case Op_MaxI:
+ return new MaxINode(in1, in2);
+ case Op_MaxL:
+ return new MaxLNode(c, in1, in2);
+ case Op_MaxF:
+ return new MaxFNode(in1, in2);
+ case Op_MaxD:
+ return new MaxDNode(in1, in2);
+ case Op_SqrtF:
+ return new SqrtFNode(c, control, in1);
+ case Op_SqrtD:
+ return new SqrtDNode(c, control, in1);
+ case Op_FmaF:
+ return new FmaFNode(in1, in2, in3);
+ case Op_FmaD:
+ return new FmaDNode(in1, in2, in3);
+ default:
+ assert(false, "unexpected scalar opcode");
+ return nullptr;
+ }
+}
+
+// Re-wires and creates a new ideal graph pallet with following connectivity
+// parent(child(cinput1, cinput2), pinput2)
+Node* VectorNode::create_reassociated_node(Node* parent, Node* child, Node* cinput1, Node* cinput2,
+ Node* pinput2, PhaseGVN* phase) {
+ Node* cloned_child = child->clone();
+ cloned_child->set_req(1, cinput1);
+ cloned_child->set_req(2, cinput2);
+ cloned_child = phase->transform(cloned_child);
+ Node* cloned_parent = parent->clone();
+ cloned_parent->set_req(1, cloned_child);
+ cloned_parent->set_req(2, pinput2);
+ return cloned_parent;
+}
+
+// Try to reassociate commutative vector operations using the following ideal transformation,
+// this will facilitate strength reducing a vector operation with all replicated inputs to
+// a scalar operation.
+//
+// VectorOp (Replicate INP1) (VectorOp (Replicate INP2) INP3) =>
+// VectorOp (VectorOp (Replicate INP1) (Replicate INP2)) INP3
+//
+Node* VectorNode::reassociate_vector_operation(PhaseGVN* phase) {
+ // Enable re-association for integral vector operations.
+ if (!is_integral_type(vect_type()->element_basic_type())) {
+ return nullptr;
+ }
+
+ // Enable re-association for commutative vector operations.
+ if (!is_commutative_vector_operation(Opcode())) {
+ return nullptr;
+ }
+
+ Node* in1 = in(1);
+ Node* in2 = in(2);
+ if (in2->Opcode() == Op_Replicate && in1->Opcode() == Opcode()) {
+ swap(in1, in2);
+ }
+
+ if (in1->Opcode() != Op_Replicate || in2->Opcode() != Opcode()) {
+ return nullptr;
+ }
+
+ // Skip predicated vector operations, mask semantics prevent reassociation.
+ if (is_predicated_vector() || in2->as_Vector()->is_predicated_vector()) {
+ return nullptr;
+ }
+
+ Node* in2_1 = in2->in(1);
+ Node* in2_2 = in2->in(2);
+ if (in2_1->Opcode() == Op_Replicate) {
+ return create_reassociated_node(this, in2, in1, in2_1, in2_2, phase);
+ } else if (in2_2->Opcode() == Op_Replicate) {
+ return create_reassociated_node(this, in2, in1, in2_2, in2_1, phase);
+ }
+
+ return nullptr;
+}
+
+// Convert vector operation with all Replicate inputs to scalar operation using following
+// ideal transformation.
+//
+// VectorOp (Replicate INP1, Replicate INP2) =>
+// Replicate (ScalarOp INP1, INP2)
+//
+Node* VectorNode::push_through_replicate(PhaseGVN* phase) {
+ BasicType bt = vect_type()->element_basic_type();
+ if (!can_push_through_replicate(bt)) {
+ return nullptr;
+ }
+
+ assert(req() >= 2 && req() <= 4, "unexpected req() %u for %s", req(), NodeClassNames[Opcode()]);
+
+ Node* sinp1 = nullptr;
+ Node* sinp2 = nullptr;
+ Node* sinp3 = nullptr;
+
+ assert(in(1)->Opcode() == Op_Replicate, "");
+ sinp1 = in(1)->in(1);
+
+ if (req() > 2) {
+ assert(in(2)->Opcode() == Op_Replicate, "");
+ sinp2 = in(2)->in(1);
+ }
+
+ if (req() > 3) {
+ assert(in(3)->Opcode() == Op_Replicate, "");
+ sinp3 = in(3)->in(1);
+ }
+
+ Node* sop = make_scalar(phase->C, Opcode(), bt, in(0), sinp1, sinp2, sinp3);
+ if (sop == nullptr) {
+ return nullptr;
+ }
+
+ sop = phase->transform(sop);
+
+ return new ReplicateNode(sop, vect_type());
+}
+
Node* VectorNode::Ideal(PhaseGVN* phase, bool can_reshape) {
Node* n = ideal_partial_operations(phase, this, vect_type());
if (n != nullptr) {
@@ -1044,7 +1387,13 @@ Node* VectorNode::Ideal(PhaseGVN* phase, bool can_reshape) {
if (should_swap_inputs_to_help_global_value_numbering()) {
swap_edges(1, 2);
}
- return nullptr;
+
+ n = push_through_replicate(phase);
+ if (n != nullptr) {
+ return n;
+ }
+
+ return reassociate_vector_operation(phase);
}
// Traverses a chain of VectorMaskCast and returns the first non VectorMaskCast node.
@@ -2094,7 +2443,7 @@ Node* FmaVNode::Ideal(PhaseGVN* phase, bool can_reshape) {
swap_edges(1, 2);
return this;
}
- return nullptr;
+ return VectorNode::Ideal(phase, can_reshape);
}
// Generate other vector nodes to implement the masked/non-masked vector negation.
diff --git a/src/hotspot/share/opto/vectornode.hpp b/src/hotspot/share/opto/vectornode.hpp
index 897cedd6a1bf..6bcb7702d13a 100644
--- a/src/hotspot/share/opto/vectornode.hpp
+++ b/src/hotspot/share/opto/vectornode.hpp
@@ -146,12 +146,20 @@ class VectorNode : public TypeNode {
static bool is_minmax_opcode(int opc);
bool should_swap_inputs_to_help_global_value_numbering();
+ Node* reassociate_vector_operation(PhaseGVN* phase);
+ static Node* create_reassociated_node(Node* parent, Node* child, Node* cinput1, Node* cinput2,
+ Node* pinput2, PhaseGVN* phase);
static bool is_vshift_cnt_opcode(int opc);
static bool is_rotate_opcode(int opc);
static int opcode(int sopc, BasicType bt); // scalar_opc -> vector_opc
+ static int scalar_opcode(int vopc, BasicType bt); // vector_opc -> scalar_opc, 0 if not handled
+ static Node* make_scalar(Compile* c, int vopc, BasicType bt, Node* control, Node* in1, Node* in2, Node* in3);
+
+ bool can_push_through_replicate(BasicType bt);
+ Node* push_through_replicate(PhaseGVN* phase);
static int shift_count_opcode(int opc);
diff --git a/src/hotspot/share/prims/jni.cpp b/src/hotspot/share/prims/jni.cpp
index d6a435d34d1a..318f40374a6f 100644
--- a/src/hotspot/share/prims/jni.cpp
+++ b/src/hotspot/share/prims/jni.cpp
@@ -2835,6 +2835,10 @@ JNI_ENTRY(void*, jni_GetPrimitiveArrayCritical(JNIEnv *env, jarray array, jboole
Handle a(thread, JNIHandles::resolve_non_null(array));
assert(a->is_typeArray(), "just checking");
+ // We must defer JVM TI suspension while we have access to a Java object
+ // as it could surprise the debugger if we mutate it concurrently whilst
+ // logically suspended.
+ thread->enter_jni_deferred_suspension();
// Pin object
Universe::heap()->pin_object(thread, a());
@@ -2852,6 +2856,7 @@ JNI_ENTRY(void, jni_ReleasePrimitiveArrayCritical(JNIEnv *env, jarray array, voi
HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_ENTRY(env, array, carray, mode);
// Unpin object
Universe::heap()->unpin_object(thread, JNIHandles::resolve_non_null(array));
+ thread->exit_jni_deferred_suspension();
HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_RETURN();
JNI_END
@@ -2859,6 +2864,13 @@ JNI_END
JNI_ENTRY(const jchar*, jni_GetStringCritical(JNIEnv *env, jstring string, jboolean *isCopy))
HOTSPOT_JNI_GETSTRINGCRITICAL_ENTRY(env, string, (uintptr_t *) isCopy);
oop s = JNIHandles::resolve_non_null(string);
+
+ // We must defer JVM TI suspension while we have access to a Java object.
+ // Even if we are taking a private copy we must not be considered
+ // suspended as the debugger could be mutating the string we are about
+ // to copy.
+ thread->enter_jni_deferred_suspension();
+
jchar* ret;
if (!java_lang_String::is_latin1(s)) {
typeArrayHandle s_value(thread, java_lang_String::value(s));
@@ -2879,6 +2891,10 @@ JNI_ENTRY(const jchar*, jni_GetStringCritical(JNIEnv *env, jstring string, jbool
ret[i] = ((jchar) s_value->byte_at(i)) & 0xff;
}
ret[s_len] = 0;
+ } else {
+ // If we return null there should not be a paired release operation
+ // so we have to cancel suspension deferral here.
+ thread->exit_jni_deferred_suspension();
}
if (isCopy != nullptr) *isCopy = JNI_TRUE;
}
@@ -2904,6 +2920,7 @@ JNI_ENTRY(void, jni_ReleaseStringCritical(JNIEnv *env, jstring str, const jchar
// Unpin value array
Universe::heap()->unpin_object(thread, value);
}
+ thread->exit_jni_deferred_suspension();
HOTSPOT_JNI_RELEASESTRINGCRITICAL_RETURN();
JNI_END
diff --git a/src/hotspot/share/prims/whitebox.cpp b/src/hotspot/share/prims/whitebox.cpp
index 6d3449098148..4bbaa9eda3cc 100644
--- a/src/hotspot/share/prims/whitebox.cpp
+++ b/src/hotspot/share/prims/whitebox.cpp
@@ -872,11 +872,11 @@ WB_ENTRY(jboolean, WB_IsMethodCompiled(JNIEnv* env, jobject o, jobject method, j
return !code->is_marked_for_deoptimization();
WB_END
-static bool is_excluded_for_compiler(AbstractCompiler* comp, methodHandle& mh) {
+static bool is_excluded_for_compiler(AbstractCompiler* comp, int comp_level, methodHandle& mh) {
if (comp == nullptr) {
return true;
}
- CompilerDirectiveMatcher matcher(mh, comp);
+ CompilerDirectiveMatcher matcher(mh, comp_level);
return matcher.directive_set()->ExcludeOption;
}
@@ -902,8 +902,10 @@ WB_ENTRY(jboolean, WB_IsMethodCompilable(JNIEnv* env, jobject o, jobject method,
// to exclude a compilation of 'method'.
if (comp_level == CompLevel_any) {
// Both compilers could have ExcludeOption set. Check all combinations.
- bool excluded_c1 = is_excluded_for_compiler(CompileBroker::compiler1(), mh);
- bool excluded_c2 = is_excluded_for_compiler(CompileBroker::compiler2(), mh);
+ bool excluded_c1 = is_excluded_for_compiler(CompileBroker::compiler1(), CompLevel_simple, mh)
+ && is_excluded_for_compiler(CompileBroker::compiler1(), CompLevel_limited_profile, mh)
+ && is_excluded_for_compiler(CompileBroker::compiler1(), CompLevel_full_profile, mh);
+ bool excluded_c2 = is_excluded_for_compiler(CompileBroker::compiler2(), CompLevel_full_optimization, mh);
if (excluded_c1 && excluded_c2) {
// Compilation of 'method' excluded by both compilers.
return false;
@@ -914,9 +916,11 @@ WB_ENTRY(jboolean, WB_IsMethodCompilable(JNIEnv* env, jobject o, jobject method,
return can_be_compiled_at_level(mh, is_osr, CompLevel_full_optimization);
} else if (excluded_c2) {
// C2 only has ExcludeOption set: Check if compilable with C1.
- return can_be_compiled_at_level(mh, is_osr, CompLevel_simple);
+ return can_be_compiled_at_level(mh, is_osr, CompLevel_simple)
+ || can_be_compiled_at_level(mh, is_osr, CompLevel_limited_profile)
+ || can_be_compiled_at_level(mh, is_osr, CompLevel_full_profile);
}
- } else if (comp_level > CompLevel_none && is_excluded_for_compiler(CompileBroker::compiler((int)comp_level), mh)) {
+ } else if (comp_level > CompLevel_none && is_excluded_for_compiler(CompileBroker::compiler((int)comp_level), comp_level, mh)) {
// Compilation of 'method' excluded by compiler used for 'comp_level'.
return false;
}
@@ -952,7 +956,7 @@ WB_ENTRY(jboolean, WB_IsIntrinsicAvailable(JNIEnv* env, jobject o, jobject metho
compilation_context_id = reflected_method_to_jmid(thread, env, compilation_context);
CHECK_JNI_EXCEPTION_(env, JNI_FALSE);
methodHandle cch(THREAD, Method::checked_resolve_jmethod_id(compilation_context_id));
- CompilerDirectiveMatcher matcher(cch, comp);
+ CompilerDirectiveMatcher matcher(cch, compLevel);
return comp->is_intrinsic_available(mh, matcher.directive_set());
} else {
// Calling with null matches default directive
@@ -1132,7 +1136,7 @@ bool WhiteBox::compile_method(Method* method, int comp_level, int bci, JavaThrea
// Check if compilation is blocking
methodHandle mh(THREAD, method);
- CompilerDirectiveMatcher matcher(mh, comp);
+ CompilerDirectiveMatcher matcher(mh, comp_level);
bool is_blocking = !matcher.directive_set()->BackgroundCompilationOption;
// Compile method and check result
@@ -1151,7 +1155,7 @@ bool WhiteBox::compile_method(Method* method, int comp_level, int bci, JavaThrea
} else if (mh->lookup_osr_nmethod_for(bci, comp_level, false) != nullptr) {
return true;
}
- tty->print("WB error: failed to %s compile at level %d method ", is_blocking ? "blocking" : "", comp_level);
+ tty->print("WB error: failed to%s compile at level %d method ", is_blocking ? " blocking" : "", comp_level);
mh->print_short_name(tty);
tty->cr();
if (is_blocking && is_queued) {
@@ -1184,7 +1188,7 @@ WB_ENTRY(jboolean, WB_ShouldPrintAssembly(JNIEnv* env, jobject o, jobject method
CHECK_JNI_EXCEPTION_(env, JNI_FALSE);
methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
- CompilerDirectiveMatcher matcher(mh, CompileBroker::compiler(comp_level));
+ CompilerDirectiveMatcher matcher(mh, comp_level);
return matcher.directive_set()->PrintAssemblyOption;
WB_END
diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp
index fe5222df345d..341e0b80a5fa 100644
--- a/src/hotspot/share/runtime/arguments.cpp
+++ b/src/hotspot/share/runtime/arguments.cpp
@@ -534,6 +534,7 @@ static SpecialFlag const special_jvm_flags[] = {
{ "UseSharedSpaces", JDK_Version::jdk(18), JDK_Version::jdk(19), JDK_Version::undefined() },
// --- Deprecated alias flags (see also aliased_jvm_flags) - sorted by obsolete_in then expired_in:
{ "CreateMinidumpOnCrash", JDK_Version::jdk(9), JDK_Version::undefined(), JDK_Version::undefined() },
+ { "InitiatingHeapOccupancyPercent", JDK_Version::jdk(27), JDK_Version::jdk(28), JDK_Version::jdk(29) },
// -------------- Obsolete Flags - sorted by expired_in --------------
@@ -582,6 +583,7 @@ typedef struct {
static AliasedFlag const aliased_jvm_flags[] = {
{ "CreateMinidumpOnCrash", "CreateCoredumpOnCrash" },
+ G1GC_ONLY({"InitiatingHeapOccupancyPercent" COMMA "G1IHOP" } COMMA)
{ nullptr, nullptr}
};
@@ -2717,6 +2719,10 @@ jint Arguments::finalize_vm_init_args() {
return JNI_ERR;
}
+ // Called after ClassLoader::lookup_vm_options() but before class loading begins.
+ // TODO: Obtain and pass correct preview mode flag value here.
+ ClassLoader::set_preview_mode(false);
+
if (!check_vm_args_consistency()) {
return JNI_ERR;
}
diff --git a/src/hotspot/share/runtime/flags/jvmFlagConstraintsCompiler.cpp b/src/hotspot/share/runtime/flags/jvmFlagConstraintsCompiler.cpp
index 36eece6f013f..ebcf6b4e14e3 100644
--- a/src/hotspot/share/runtime/flags/jvmFlagConstraintsCompiler.cpp
+++ b/src/hotspot/share/runtime/flags/jvmFlagConstraintsCompiler.cpp
@@ -61,9 +61,21 @@ JVMFlag::Error CICompilerCountConstraintFunc(intx value, bool verbose) {
"at least %d \n",
value, min_number_of_compiler_threads);
return JVMFlag::VIOLATES_CONSTRAINT;
- } else {
- return JVMFlag::SUCCESS;
}
+
+ // Limit CICompilerCount to a reasonable value in product builds.
+#ifndef ASSERT
+ int active_processor_count = os::active_processor_count();
+ // On a single-CPU machine we still can run C1 and C2 compiler threads, so allow up to 2x for tiered.
+ int reasonable_threads_num = CompilerConfig::is_tiered() ? active_processor_count * 2 : active_processor_count;
+ if (value > reasonable_threads_num) {
+ JVMFlag::printError(verbose, "CICompilerCount is too large (%" PRIdPTR ") for current active processor count %d \n",
+ CICompilerCount, active_processor_count);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+#endif
+
+ return JVMFlag::SUCCESS;
}
JVMFlag::Error AllocatePrefetchStepSizeConstraintFunc(int value, bool verbose) {
diff --git a/src/hotspot/share/runtime/handshake.cpp b/src/hotspot/share/runtime/handshake.cpp
index b54068d65d6f..d61be7631cbb 100644
--- a/src/hotspot/share/runtime/handshake.cpp
+++ b/src/hotspot/share/runtime/handshake.cpp
@@ -83,8 +83,10 @@ class HandshakeOperation : public CHeapObj {
int32_t pending_threads() { return AtomicAccess::load(&_pending_threads); }
const char* name() { return _handshake_cl->name(); }
bool is_async() { return _handshake_cl->is_async(); }
- bool is_suspend() { return _handshake_cl->is_suspend(); }
+ bool is_self_suspend() { return _handshake_cl->is_self_suspend(); }
+ bool is_suspend_request() { return _handshake_cl->is_suspend_request(); }
bool is_async_exception() { return _handshake_cl->is_async_exception(); }
+ bool is_enabled() { return _handshake_cl->is_enabled(_target); }
};
class AsyncHandshakeOperation : public HandshakeOperation {
@@ -472,18 +474,24 @@ void Handshake::execute(AsyncHandshakeClosure* hs_cl, JavaThread* target) {
}
// Filters
+
+// op is enabled and can be executed by the current thread rather than the target.
static bool non_self_executable_filter(HandshakeOperation* op) {
- return !op->is_async();
+ return !op->is_async() && op->is_enabled();
}
+// op is not an async-exception op
static bool no_async_exception_filter(HandshakeOperation* op) {
return !op->is_async_exception();
}
+// op is an async-exception op
static bool async_exception_filter(HandshakeOperation* op) {
return op->is_async_exception();
}
+// op is not any kind of suspend op, nor an async-exception op
static bool no_suspend_no_async_exception_filter(HandshakeOperation* op) {
- return !op->is_suspend() && !op->is_async_exception();
+ return !op->is_self_suspend() && !op->is_suspend_request() && !op->is_async_exception();
}
+// All ops
static bool all_ops_filter(HandshakeOperation* op) {
return true;
}
@@ -521,8 +529,12 @@ HandshakeOperation* HandshakeState::get_op_for_self(bool allow_suspend, bool che
assert(_lock.owned_by_self(), "Lock must be held");
assert(allow_suspend || !check_async_exception, "invalid case");
#if INCLUDE_JVMTI
- if (allow_suspend && (_handshakee->is_disable_suspend() || _handshakee->is_vthread_transition_disabler())) {
- // filter out suspend operations while JavaThread can not be suspended
+ // Filter out suspend operations while JavaThread can not be suspended.
+ // Potentially this could be folded into the `is_enabled` state of the operation
+ // and filtered directly through _queue.peek, but the incoming `allow_suspend`
+ // complicates that so we just maintain the explicit checks for now.
+ if (allow_suspend && (_handshakee->is_disable_suspend() || _handshakee->is_vthread_transition_disabler() ||
+ _handshakee->jni_deferred_suspension())) {
allow_suspend = false;
}
#endif
@@ -565,12 +577,16 @@ void HandshakeState::clean_async_exception_operation() {
}
}
+// Returns true if there is an enabled op that the current thread can execute
+// on behalf of the handshakee.
bool HandshakeState::have_non_self_executable_operation() {
assert(_handshakee != Thread::current(), "Must not be called by self");
assert(_lock.owned_by_self(), "Lock must be held");
return _queue.contains(non_self_executable_filter);
}
+// Returns an enabled op that the current thread can execute
+// on behalf of the handshakee.
HandshakeOperation* HandshakeState::get_op() {
assert(_handshakee != Thread::current(), "Must not be called by self");
assert(_lock.owned_by_self(), "Lock must be held");
@@ -683,7 +699,7 @@ HandshakeState::ProcessResult HandshakeState::try_process(HandshakeOperation* ma
return HandshakeState::_not_safe;
}
- // Claim the mutex if there still an operation to be executed.
+ // Claim the mutex if there still an enabled operation to be executed.
if (!claim_handshake()) {
return HandshakeState::_claim_failed;
}
@@ -699,8 +715,14 @@ HandshakeState::ProcessResult HandshakeState::try_process(HandshakeOperation* ma
Thread* current_thread = Thread::current();
HandshakeOperation* op = get_op();
+ // It is possible that since we claimed the handshake the op has
+ // transitioned to a disabled state and so won't be returned by get_op.
+ if (op == nullptr) {
+ _lock.unlock();
+ return HandshakeState::_no_operation;
+ }
- assert(op != nullptr, "Must have an op");
+ assert(op->is_enabled(), "Should not reach here with a disabled op");
assert(SafepointMechanism::local_poll_armed(_handshakee), "Must be");
assert(op->_target == nullptr || _handshakee == op->_target, "Wrong thread");
diff --git a/src/hotspot/share/runtime/handshake.hpp b/src/hotspot/share/runtime/handshake.hpp
index c764bbcfcd23..cfdaf2e82df1 100644
--- a/src/hotspot/share/runtime/handshake.hpp
+++ b/src/hotspot/share/runtime/handshake.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -50,8 +50,10 @@ class HandshakeClosure : public ThreadClosure, public CHeapObj {
virtual ~HandshakeClosure() {}
const char* name() const { return _name; }
virtual bool is_async() { return false; }
- virtual bool is_suspend() { return false; }
+ virtual bool is_self_suspend() { return false; }
+ virtual bool is_suspend_request() { return false; }
virtual bool is_async_exception() { return false; }
+ virtual bool is_enabled(Thread* target) { return true; }
virtual void do_thread(Thread* thread) = 0;
};
@@ -99,7 +101,6 @@ class HandshakeState {
Monitor _lock;
// Set to the thread executing the handshake operation.
Thread* volatile _active_handshaker;
-
bool claim_handshake();
bool possibly_can_process_handshake();
bool can_process_handshake();
diff --git a/src/hotspot/share/runtime/java.cpp b/src/hotspot/share/runtime/java.cpp
index 758051a73515..e501930b3a04 100644
--- a/src/hotspot/share/runtime/java.cpp
+++ b/src/hotspot/share/runtime/java.cpp
@@ -29,6 +29,7 @@
#include "cds/dynamicArchive.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/classLoaderDataGraph.hpp"
+#include "classfile/classPrinter.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/stringTable.hpp"
#include "classfile/symbolTable.hpp"
@@ -114,11 +115,16 @@ static int compare_methods(Method** a, Method** b) {
return (diff < 0) ? -1 : (diff > 0) ? 1 : 0;
}
+inline CompLevel method_code_comp_level(const Method* m) {
+ const nmethod* code = m->code();
+ return code != nullptr ? static_cast(code->comp_level()) : CompLevel_any;
+}
+
static void collect_profiled_methods(Method* m) {
Thread* thread = Thread::current();
methodHandle mh(thread, m);
if ((m->method_data() != nullptr) &&
- (PrintMethodData || CompilerOracle::should_print(mh))) {
+ (PrintMethodData || CompilerOracle::should_print(mh, method_code_comp_level(m)))) {
collected_profiled_methods->push(m);
}
}
@@ -152,7 +158,7 @@ static void print_method_profiling_data() {
m->method_data()->parameters_type_data()->print_data_on(&ss);
}
// Buffering to a stringStream, disable internal buffering so it's not done twice.
- m->print_codes_on(&ss, 0, false);
+ m->print_codes_on(&ss, ClassPrinter::PRINT_METHOD_DATA, false);
tty->print("%s", ss.as_string()); // print all at once
total_size += m->method_data()->size_in_bytes();
}
diff --git a/src/hotspot/share/runtime/javaThread.cpp b/src/hotspot/share/runtime/javaThread.cpp
index 5a29a668a549..ec1c8f64619f 100644
--- a/src/hotspot/share/runtime/javaThread.cpp
+++ b/src/hotspot/share/runtime/javaThread.cpp
@@ -469,6 +469,7 @@ JavaThread::JavaThread(MemTag mem_tag) :
_exception_handler_pc(nullptr),
_jni_active_critical(0),
+ _jni_deferred_suspension_count(0),
_pending_jni_exception_check_fn(nullptr),
_depth_first_number(0),
diff --git a/src/hotspot/share/runtime/javaThread.hpp b/src/hotspot/share/runtime/javaThread.hpp
index bc6c0a3a4fda..5dd84fcc01d9 100644
--- a/src/hotspot/share/runtime/javaThread.hpp
+++ b/src/hotspot/share/runtime/javaThread.hpp
@@ -458,9 +458,12 @@ class JavaThread: public Thread {
volatile address _exception_handler_pc; // PC for handler of exception
private:
- // support for JNI critical regions
+ // support for JNI critical regions interaction with GC
jint _jni_active_critical; // count of entries into JNI critical region
+ // support for JNI critical regions deferral of JVM TI suspension
+ jint _jni_deferred_suspension_count;
+
// Checked JNI: function name requires exception check
char* _pending_jni_exception_check_fn;
@@ -980,10 +983,19 @@ class JavaThread: public Thread {
_jni_active_critical--;
assert(_jni_active_critical >= 0, "JNI critical nesting problem?");
}
-
// Atomic version; invoked by a thread other than the owning thread.
bool in_critical_atomic() { return AtomicAccess::load(&_jni_active_critical) > 0; }
+ bool jni_deferred_suspension() { return AtomicAccess::load(&_jni_deferred_suspension_count); }
+ inline void enter_jni_deferred_suspension();
+ void exit_jni_deferred_suspension() {
+ precond(Thread::current() == this);
+ int sc = AtomicAccess::load(&_jni_deferred_suspension_count);
+ AtomicAccess::store(&_jni_deferred_suspension_count, sc - 1);
+ assert(_jni_deferred_suspension_count >= 0,
+ "JNI deferred suspension nesting problem?");
+ }
+
// Checked JNI: is the programmer required to check for exceptions, if so specify
// which function name. Returning to a Java frame should implicitly clear the
// pending check, this is done for Native->Java transitions (i.e. user JNI code).
diff --git a/src/hotspot/share/runtime/javaThread.inline.hpp b/src/hotspot/share/runtime/javaThread.inline.hpp
index 2c2ed2da2fae..5b4556b56668 100644
--- a/src/hotspot/share/runtime/javaThread.inline.hpp
+++ b/src/hotspot/share/runtime/javaThread.inline.hpp
@@ -196,6 +196,14 @@ void JavaThread::enter_critical() {
_jni_active_critical++;
}
+void JavaThread::enter_jni_deferred_suspension() {
+ precond(JavaThread::current() == this);
+ assert(_thread_state != _thread_in_native && _thread_state != _thread_blocked,
+ "Must not defer suspension when handshake-safe");
+ int sc = AtomicAccess::load(&_jni_deferred_suspension_count);
+ AtomicAccess::store(&_jni_deferred_suspension_count, sc + 1);
+}
+
inline void JavaThread::set_done_attaching_via_jni() {
_jni_attach_state = _attached_via_jni;
OrderAccess::fence();
diff --git a/src/hotspot/share/runtime/sharedRuntime.cpp b/src/hotspot/share/runtime/sharedRuntime.cpp
index e39ecb7c6c38..3d0ed5d2a23e 100644
--- a/src/hotspot/share/runtime/sharedRuntime.cpp
+++ b/src/hotspot/share/runtime/sharedRuntime.cpp
@@ -3194,7 +3194,7 @@ void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) {
}
}
- CompilerDirectiveMatcher matcher(method, CompileBroker::compiler(CompLevel_simple));
+ CompilerDirectiveMatcher matcher(method, CompLevel_simple);
if (matcher.directive_set()->PrintAssemblyOption) {
nm->print_code();
}
diff --git a/src/hotspot/share/runtime/stubCodeGenerator.cpp b/src/hotspot/share/runtime/stubCodeGenerator.cpp
index 94825ba2d0bd..252f90e1bde3 100644
--- a/src/hotspot/share/runtime/stubCodeGenerator.cpp
+++ b/src/hotspot/share/runtime/stubCodeGenerator.cpp
@@ -255,8 +255,19 @@ void StubCodeGenerator::print_statistics_on(outputStream* st) {
}
#ifdef ASSERT
-void StubCodeGenerator::verify_stub(StubId stub_id) {
- assert(StubRoutines::stub_to_blob(stub_id) == blob_id(), "wrong blob %s for generation of stub %s", StubRoutines::get_blob_name(blob_id()), StubRoutines::get_stub_name(stub_id));
+void StubCodeGenerator::verify_stub(const char* name, StubId stub_id) {
+ if (_stub_data != nullptr) {
+ // we are collecting stub data for the current blob so the stub
+ // code should have been marked using a stub id
+ assert(stub_id != StubId::NO_STUBID, "StubCodeMark for stub %s must be declared with stub id as argument", name);
+ }
+ if (stub_id != StubId::NO_STUBID) {
+ // we may not always be collecting stub data but any stub id that
+ // is provided needs to belong to the current blob id and its name
+ // ought to have been retrieved via a call to StubInfo::name
+ assert(StubRoutines::stub_to_blob(stub_id) == blob_id(), "wrong blob %s for generation of stub %s", StubRoutines::get_blob_name(blob_id()), StubRoutines::get_stub_name(stub_id));
+ assert(name == StubInfo::name(stub_id), "name %s does not match declared stub name %s", name, StubInfo::name(stub_id));
+ }
}
#endif
@@ -268,15 +279,15 @@ StubCodeMark::StubCodeMark(StubCodeGenerator* cgen, const char* group, const cha
_cgen->stub_prolog(_cdesc);
// define the stub's beginning (= entry point) to be after the prolog:
_cdesc->set_begin(_cgen->assembler()->pc());
+#ifdef ASSERT
+ cgen->verify_stub(name, stub_id);
+#endif
}
StubCodeMark::StubCodeMark(StubCodeGenerator* cgen, const char* group, const char* name) : StubCodeMark(cgen, group, name, StubId::NO_STUBID) {
}
StubCodeMark::StubCodeMark(StubCodeGenerator* cgen, StubId stub_id) : StubCodeMark(cgen, "StubRoutines", StubRoutines::get_stub_name(stub_id), stub_id) {
-#ifdef ASSERT
- cgen->verify_stub(stub_id);
-#endif
}
StubCodeMark::~StubCodeMark() {
diff --git a/src/hotspot/share/runtime/stubCodeGenerator.hpp b/src/hotspot/share/runtime/stubCodeGenerator.hpp
index 9b94e24fd9b1..9f796b43fbae 100644
--- a/src/hotspot/share/runtime/stubCodeGenerator.hpp
+++ b/src/hotspot/share/runtime/stubCodeGenerator.hpp
@@ -188,7 +188,7 @@ class StubCodeGenerator: public StackObj {
address load_archive_data(StubId stub_id, GrowableArray *entries = nullptr, GrowableArray* extras = nullptr);
void store_archive_data(StubId stub_id, address start, address end, GrowableArray *entries = nullptr, GrowableArray* extras = nullptr);
#ifdef ASSERT
- void verify_stub(StubId stub_id);
+ void verify_stub(const char* name, StubId stub_id);
#endif
};
diff --git a/src/hotspot/share/runtime/suspendResumeManager.cpp b/src/hotspot/share/runtime/suspendResumeManager.cpp
index 3408d763e573..3181c0f78aac 100644
--- a/src/hotspot/share/runtime/suspendResumeManager.cpp
+++ b/src/hotspot/share/runtime/suspendResumeManager.cpp
@@ -48,7 +48,7 @@ class ThreadSelfSuspensionHandshakeClosure : public AsyncHandshakeClosure {
current->set_thread_state(jts);
current->suspend_resume_manager()->set_async_suspend_handshake(false);
}
- virtual bool is_suspend() { return true; }
+ virtual bool is_self_suspend() { return true; }
};
// This is the closure that synchronously honors the suspend request.
@@ -64,6 +64,16 @@ class SuspendThreadHandshakeClosure : public HandshakeClosure {
_did_suspend = target->suspend_resume_manager()->suspend_with_handshake(_register_vthread_SR);
}
bool did_suspend() { return _did_suspend; }
+ virtual bool is_suspend_request() { return true; }
+
+ // If the target is in a JNI deferred suspension region, then we cannot
+ // process this operation. This must be checked with the HandshakeState lock
+ // held, which together with the fact the target only enters a deferred
+ // region from a handshake-unsafe state, means we cannot race with the
+ // target entering that region.
+ virtual bool is_enabled(Thread* target) {
+ return !JavaThread::cast(target)->jni_deferred_suspension();
+ }
};
void SuspendResumeManager::set_suspended(bool is_suspend, bool register_vthread_SR) {
diff --git a/src/hotspot/share/utilities/unsigned5.hpp b/src/hotspot/share/utilities/unsigned5.hpp
index 8e3724a0012a..1fc6aeaffb60 100644
--- a/src/hotspot/share/utilities/unsigned5.hpp
+++ b/src/hotspot/share/utilities/unsigned5.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -280,6 +280,7 @@ class UNSIGNED5 : AllStatic {
int len = next_length(); // 0 or length in [1..5]
if (len == 0) break;
_position += len;
+ ++actual;
}
return actual;
}
diff --git a/src/java.base/share/classes/com/sun/crypto/provider/ML_KEM.java b/src/java.base/share/classes/com/sun/crypto/provider/ML_KEM.java
index 56a119893a79..6564f40545a3 100644
--- a/src/java.base/share/classes/com/sun/crypto/provider/ML_KEM.java
+++ b/src/java.base/share/classes/com/sun/crypto/provider/ML_KEM.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -527,6 +527,8 @@ protected ML_KEM_KeyPair generateKemKeyPair(byte[] kem_d_z) {
} catch (DigestException e) {
// This should never happen.
throw new RuntimeException(e);
+ } finally {
+ mlKemH.reset();
}
// The 2nd 32-byte `z` is copied into decapsKey
System.arraycopy(kem_d_z, 32, decapsKey,
@@ -562,7 +564,6 @@ protected ML_KEM_EncapsulateResult encapsulate(
var randomCoins = Arrays.copyOfRange(kHatAndRandomCoins, 32, 64);
var cipherText = kPkeEncrypt(new K_PKE_EncryptionKey(encapsulationKey.keyBytes),
randomMessage, randomCoins);
- Arrays.fill(randomCoins, (byte) 0);
byte[] sharedSecret = Arrays.copyOfRange(kHatAndRandomCoins, 0, 32);
Arrays.fill(kHatAndRandomCoins, (byte) 0);
@@ -613,6 +614,7 @@ protected byte[] decapsulate(ML_KEM_DecapsulationKey decapsulationKey,
var fakeResult = mlKemJ.digest();
var computedCipherText = kPkeEncrypt(
new K_PKE_EncryptionKey(encapsKeyBytes), mCandidate, coins);
+ Arrays.fill(mCandidate, (byte)0);
// The rest of this method implements the following in constant time
//
@@ -648,9 +650,11 @@ private K_PKE_KeyPair generateK_PkeKeyPair(byte[] seed) {
MessageDigest mlKemG;
SHAKE256 mlKemJ;
+ int cbdInputLen = 64 * mlKem_eta1;
+ byte[] cbdInput = new byte[cbdInputLen];
try {
mlKemG = MessageDigest.getInstance(HASH_G_NAME);
- mlKemJ = new SHAKE256(64 * mlKem_eta1);
+ mlKemJ = new SHAKE256(cbdInputLen);
} catch (NoSuchAlgorithmException e) {
// This should never happen.
throw new RuntimeException(e);
@@ -671,22 +675,26 @@ private K_PKE_KeyPair generateK_PkeKeyPair(byte[] seed) {
int keyGenN = 0;
byte[] prfSeed = new byte[sigma.length + 1];
System.arraycopy(sigma, 0, prfSeed, 0, sigma.length);
- byte[] cbdInput;
short[][] keyGenS = new short[mlKem_k][];
short[][] keyGenE = new short[mlKem_k][];
- for (int i = 0; i < mlKem_k; i++) {
- prfSeed[sigma.length] = (byte) (keyGenN++);
- mlKemJ.update(prfSeed);
- cbdInput = mlKemJ.digest();
- keyGenS[i] = centeredBinomialDistribution(mlKem_eta1, cbdInput);
- }
- for (int i = 0; i < mlKem_k; i++) {
- prfSeed[sigma.length] = (byte) (keyGenN++);
- mlKemJ.update(prfSeed);
- cbdInput = mlKemJ.digest();
- keyGenE[i] = centeredBinomialDistribution(mlKem_eta1, cbdInput);
+ try {
+ for (int i = 0; i < mlKem_k; i++) {
+ prfSeed[sigma.length] = (byte) (keyGenN++);
+ mlKemJ.update(prfSeed);
+ mlKemJ.digest(cbdInput, 0, cbdInputLen);
+ keyGenS[i] = centeredBinomialDistribution(mlKem_eta1, cbdInput);
+ }
+ for (int i = 0; i < mlKem_k; i++) {
+ prfSeed[sigma.length] = (byte) (keyGenN++);
+ mlKemJ.update(prfSeed);
+ mlKemJ.digest(cbdInput, 0, cbdInputLen);
+ keyGenE[i] = centeredBinomialDistribution(mlKem_eta1, cbdInput);
+ }
+ } catch (DigestException e) {
+ throw new ProviderException("Internal error", e);
}
Arrays.fill(sigma, (byte)0);
+ Arrays.fill(cbdInput, (byte)0);
short[][] keyGenSHat = mlKemVectorNTT(keyGenS);
mlKemVectorReduce(keyGenSHat);
@@ -700,7 +708,6 @@ private K_PKE_KeyPair generateK_PkeKeyPair(byte[] seed) {
for (int i = 0; i < mlKem_k; i++) {
encodePoly12(keyGenTHat[i], pkEncoded, i * ((ML_KEM_N * 12) / 8));
encodePoly12(keyGenSHat[i], skEncoded, i * ((ML_KEM_N * 12) / 8));
- Arrays.fill(keyGenEHat[i], (short) 0);
Arrays.fill(keyGenSHat[i], (short) 0);
}
System.arraycopy(rho, 0,
@@ -723,39 +730,61 @@ private K_PKE_CipherText kPkeEncrypt(
var encryptA = generateA(rho, true);
short[][] encryptR = new short[mlKem_k][];
short[][] encryptE1 = new short[mlKem_k][];
+ short[] encryptE2;
int encryptN = 0;
byte[] prfSeed = new byte[sigma.length + 1];
System.arraycopy(sigma, 0, prfSeed, 0, sigma.length);
+ Arrays.fill(sigma, (byte)0);
- var kPkePRFeta1 = new SHAKE256(64 * mlKem_eta1);
- var kPkePRFeta2 = new SHAKE256(64 * mlKem_eta2);
- for (int i = 0; i < mlKem_k; i++) {
- prfSeed[sigma.length] = (byte) (encryptN++);
- kPkePRFeta1.update(prfSeed);
- byte[] cbdInput = kPkePRFeta1.digest();
- encryptR[i] = centeredBinomialDistribution(mlKem_eta1, cbdInput);
- }
- for (int i = 0; i < mlKem_k; i++) {
- prfSeed[sigma.length] = (byte) (encryptN++);
+ int cbdInput1Len = 64 * mlKem_eta1;
+ var kPkePRFeta1 = new SHAKE256(cbdInput1Len);
+ byte[] cbdInput1 = new byte[cbdInput1Len];
+ int cbdInput2Len = 64 * mlKem_eta2;
+ var kPkePRFeta2 = new SHAKE256(cbdInput2Len);
+ byte[] cbdInput2 = new byte[cbdInput2Len];
+ try {
+ for (int i = 0; i < mlKem_k; i++) {
+ prfSeed[sigma.length] = (byte) (encryptN++);
+ kPkePRFeta1.update(prfSeed);
+ kPkePRFeta1.digest(cbdInput1, 0, cbdInput1Len);
+ encryptR[i] = centeredBinomialDistribution(mlKem_eta1, cbdInput1);
+ }
+ for (int i = 0; i < mlKem_k; i++) {
+ prfSeed[sigma.length] = (byte) (encryptN++);
+ kPkePRFeta2.update(prfSeed);
+ kPkePRFeta2.digest(cbdInput2, 0, cbdInput2Len);
+ encryptE1[i] = centeredBinomialDistribution(mlKem_eta2, cbdInput2);
+ }
+ prfSeed[sigma.length] = (byte) encryptN;
kPkePRFeta2.update(prfSeed);
- byte[] cbdInput = kPkePRFeta2.digest();
- encryptE1[i] = centeredBinomialDistribution(mlKem_eta2, cbdInput);
+ kPkePRFeta2.digest(cbdInput2, 0, cbdInput2Len);
+ encryptE2 = centeredBinomialDistribution(mlKem_eta2, cbdInput2);
+ } catch (DigestException e) {
+ throw new ProviderException("Internal error", e);
+ } finally {
+ kPkePRFeta1.reset();
+ kPkePRFeta2.reset();
+ Arrays.fill(prfSeed, (byte)0);
+ Arrays.fill(cbdInput1, (byte)0);
+ Arrays.fill(cbdInput2, (byte)0);
}
- prfSeed[sigma.length] = (byte) encryptN;
- kPkePRFeta2.reset();
- kPkePRFeta2.update(prfSeed);
- byte[] cbdInput = kPkePRFeta2.digest();
- var encryptE2 = centeredBinomialDistribution(mlKem_eta2, cbdInput);
var encryptRHat = mlKemVectorNTT(encryptR);
var encryptUHat = mlKemMatrixVectorMuladd(encryptA, encryptRHat, zeroes);
var encryptU = mlKemVectorInverseNTT(encryptUHat);
encryptU = mlKemAddVec(encryptU, encryptE1);
+
+ for (int i = 0; i < mlKem_k; i++) {
+ Arrays.fill(encryptE1[i], (short)0);
+ }
+
var encryptVHat = mlKemVectorScalarMult(encryptTHat, encryptRHat);
var encryptV = mlKemInverseNTT(encryptVHat);
encryptV = mlKemAddPoly(encryptV, encryptE2, decompressDecode(message));
var encryptC1 = encodeVector(mlKem_du, compressVector10_11(encryptU, mlKem_du));
var encryptC2 = encodePoly(mlKem_dv, compressPoly4_5(encryptV, mlKem_dv));
+ Arrays.fill(encryptE2, (short)0);
+ Arrays.fill(encryptV, (short)0);
byte[] result = new byte[encryptC1.length + encryptC2.length];
System.arraycopy(encryptC1, 0,
@@ -783,9 +812,11 @@ private byte[] kPkeDecrypt(K_PKE_DecryptionKey privateKey,
Arrays.fill(decryptSHat[i], (short) 0);
}
decryptV = mlKemSubtractPoly(decryptV, decryptSU);
+ var result = encodeCompress(decryptV);
Arrays.fill(decryptSU, (short) 0);
+ Arrays.fill(decryptV, (short) 0);
- return encodeCompress(decryptV);
+ return result;
}
/*
diff --git a/src/java.base/share/classes/java/lang/LazyConstant.java b/src/java.base/share/classes/java/lang/LazyConstant.java
index 85f9d0e82fd8..77d36bb2f52e 100644
--- a/src/java.base/share/classes/java/lang/LazyConstant.java
+++ b/src/java.base/share/classes/java/lang/LazyConstant.java
@@ -29,28 +29,35 @@
import jdk.internal.lang.LazyConstantImpl;
import java.io.Serializable;
-import java.util.*;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+
import java.util.function.Function;
import java.util.function.IntFunction;
import java.util.function.Supplier;
/**
- * A lazy constant is a holder of contents that can be set at most once.
+ * A lazy constant is a holder of content that can be initialized at most once.
*
* A lazy constant is created using the factory method
* {@linkplain LazyConstant#of(Supplier) LazyConstant.of({@code })}.
*
- * When created, the lazy constant is not initialized, meaning it has no contents.
+ * When created, the lazy constant is not initialized, meaning it has no content.
*
* The lazy constant (of type {@code T}) can then be initialized
- * (and its contents retrieved) by calling {@linkplain #get() get()}. The first time
+ * (and its content retrieved) by calling {@linkplain #get() get()}. The first time
* {@linkplain #get() get()} is called, the underlying computing function
* (provided at construction) will be invoked and the result will be used to initialize
* the constant.
*
- * Once a lazy constant is initialized, its contents can never change
- * and will be retrieved over and over again upon subsequent {@linkplain #get() get()}
- * invocations.
+ * Once a lazy constant is initialized, its content can never change
+ * and will always be returned by subsequent {@linkplain #get() get()} invocations.
*
* Consider the following example where a lazy constant field "{@code logger}" holds
* an object of type {@code Logger}:
@@ -83,12 +90,25 @@
* may result in storage resources being prepared.
*
*
Exception handling
- * If the computing function returns {@code null}, a {@linkplain NullPointerException}
- * is thrown. Hence, a lazy constant can never hold a {@code null} value. Clients who
- * want to use a nullable constant can wrap the value into an {@linkplain Optional} holder.
+ * If evaluation of the computing function throws an unchecked exception (i.e., a runtime
+ * exception or an error), the lazy constant is not initialized but instead transitions to
+ * an error state whereafter a {@linkplain NoSuchElementException} is thrown with the
+ * unchecked exception as a cause. Subsequent {@linkplain #get() get()} calls throw
+ * {@linkplain NoSuchElementException} (without ever invoking the computing function
+ * again) with no cause and with a message that includes the name of the original
+ * unchecked exception's class.
+ *
+ * All failures are handled in this way. There are two special cases that cause unchecked
+ * exceptions to be thrown:
*
- * If the computing function recursively invokes itself via the lazy constant, an
- * {@linkplain IllegalStateException} is thrown, and the lazy constant is not initialized.
+ * If the computing function returns {@code null}, a {@linkplain NoSuchElementException}
+ * (with a {@linkplain NullPointerException} as a cause) will be thrown. Hence, a
+ * lazy constant can never hold a {@code null} value. Clients who want to use a nullable
+ * constant can wrap the value into an {@linkplain Optional} holder.
+ *
+ * If the computing function recursively invokes itself via the lazy constant, a
+ * {@linkplain NoSuchElementException} (with an {@linkplain IllegalStateException} as a
+ * cause) will be thrown.
*
*
Composing lazy constants
* A lazy constant can depend on other lazy constants, forming a dependency graph
@@ -98,31 +118,25 @@
* which are held by lazy constants:
*
* {@snippet lang = java:
- * public final class DependencyUtil {
- *
- * private DependencyUtil() {}
+ * public static class Foo {
+ * // ...
+ * }
*
- * public static class Foo {
+ * public static class Bar {
+ * public Bar(Foo foo) {
* // ...
- * }
- *
- * public static class Bar {
- * public Bar(Foo foo) {
- * // ...
- * }
* }
+ * }
*
- * private static final LazyConstant FOO = LazyConstant.of( Foo::new );
- * private static final LazyConstant BAR = LazyConstant.of( () -> new Bar(FOO.get()) );
- *
- * public static Foo foo() {
- * return FOO.get();
- * }
+ * static final LazyConstant FOO = LazyConstant.of( Foo::new );
+ * static final LazyConstant BAR = LazyConstant.of( () -> new Bar(FOO.get()) );
*
- * public static Bar bar() {
- * return BAR.get();
- * }
+ * public static Foo foo() {
+ * return FOO.get();
+ * }
*
+ * public static Bar bar() {
+ * return BAR.get();
* }
* }
* Calling {@code BAR.get()} will create the {@code Bar} singleton if it is not already
@@ -134,13 +148,17 @@
* competing threads are racing to initialize a lazy constant, only one updating thread
* runs the computing function (which runs on the caller's thread and is hereafter denoted
* the computing thread), while the other threads are blocked until the constant
- * is initialized, after which the other threads observe the lazy constant is initialized
- * and leave the constant unchanged and will never invoke any computation.
+ * is initialized (or computation fails), after which the other threads observe the lazy
+ * constant is initialized (or has transisioned to an error state) and leave the constant
+ * unchanged and will never invoke any computation.
*
* The invocation of the computing function and the resulting initialization of
* the constant {@linkplain java.util.concurrent##MemoryVisibility happens-before}
* the initialized constant's content is read. Hence, the initialized constant's content,
* including any {@code final} fields of any newly created objects, is safely published.
+ * As subsequent retrieval of the content might be elided, there are no other memory
+ * ordering or visibility guarantees provided as a consequence of calling
+ * {@linkplain #get()} again.
*
* Thread interruption does not cancel the initialization of a lazy constant. In other
* words, if the computing thread is interrupted, {@code LazyConstant::get} doesn't clear
@@ -150,9 +168,9 @@
* lazy constant may block indefinitely; no timeouts or cancellations are provided.
*
*
Performance
- * The contents of a lazy constant can never change after the lazy constant has been
+ * The content of a lazy constant can never change after the lazy constant has been
* initialized. Therefore, a JVM implementation may, for an initialized lazy constant,
- * elide all future reads of that lazy constant's contents and instead use the contents
+ * elide all future reads of that lazy constant's content and instead use the content
* that has been previously observed. We call this optimization constant folding.
* This is only possible if there is a direct reference from a {@code static final} field
* to a lazy constant or if there is a chain from a {@code static final} field -- via one
@@ -160,15 +178,10 @@
* {@linkplain Record record} fields, or final instance fields in hidden classes) --
* to a lazy constant.
*
- *
Miscellaneous
- * Except for {@linkplain Object#equals(Object) equals(obj)} and
- * {@linkplain #orElse(Object) orElse(other)} parameters, all method parameters
- * must be non-null, or a {@link NullPointerException} will be thrown.
- *
- * @apiNote Once a lazy constant is initialized, its contents cannot ever be removed.
+ * @apiNote Once a lazy constant is initialized, its content can't be removed.
* This can be a source of an unintended memory leak. More specifically,
* a lazy constant {@linkplain java.lang.ref##reachability strongly references}
- * it contents. Hence, the contents of a lazy constant will be reachable as long
+ * its content. Hence, the content of a lazy constant will be reachable as long
* as the lazy constant itself is reachable.
*
* While it's possible to store an array inside a lazy constant, doing so will
@@ -185,7 +198,7 @@
* @implNote
* A lazy constant is free to synchronize on itself. Hence, care must be
* taken when directly or indirectly synchronizing on a lazy constant.
- * A lazy constant is unmodifiable but its contents may or may not be
+ * A lazy constant is unmodifiable but its content may or may not be
* immutable (e.g., it may hold an {@linkplain ArrayList}).
*
* @param type of the constant
@@ -205,39 +218,24 @@ public sealed interface LazyConstant
permits LazyConstantImpl {
/**
- * {@return the contents of this lazy constant if initialized, otherwise,
- * returns {@code other}}
+ * {@return the initialized content of this constant, computing it if necessary}
*
- * This method never triggers initialization of this lazy constant and will observe
- * initialization by other threads atomically (i.e., it returns the contents
- * if and only if the initialization has already completed).
- *
- * @param other value to return if the content is not initialized
- * (can be {@code null})
- */
- T orElse(T other);
-
- /**
- * {@return the contents of this initialized constant. If not initialized, first
- * computes and initializes this constant using the computing function}
+ * If this constant is not initialized, first computes and initializes it
+ * using the computing function.
*
* After this method returns successfully, the constant is guaranteed to be
* initialized.
*
- * If the computing function throws, the throwable is relayed to the caller and
- * the lazy constant remains uninitialized; a subsequent call to get() may then
- * attempt the computation again.
+ * If an unchecked exception is thrown when evaluating the computing function or if
+ * the computing function returns {@code null}, this lazy constant is not initialized
+ * but transitions to an error state whereafter a {@linkplain NoSuchElementException}
+ * is thrown as described in the {@linkplain ##exception-handling Exception handling}
+ * section.
+ *
+ * @throws NoSuchElementException if this lazy constant is in an error state
*/
T get();
- /**
- * {@return {@code true} if the constant is initialized, {@code false} otherwise}
- *
- * This method never triggers initialization of this lazy constant and will observe
- * changes in the initialization state made by other threads atomically.
- */
- boolean isInitialized();
-
// Object methods
/**
@@ -245,7 +243,7 @@ public sealed interface LazyConstant
* the provided {@code obj}, otherwise {@code false}}
*
* In other words, equals compares the identity of this lazy constant and {@code obj}
- * to determine equality. Hence, two distinct lazy constants with the same contents are
+ * to determine equality. Hence, two distinct lazy constants with the same content are
* not equal.
*
* This method never triggers initialization of this lazy constant.
@@ -267,11 +265,11 @@ public sealed interface LazyConstant
*
* This method never triggers initialization of this lazy constant and will observe
* initialization by other threads atomically (i.e., it observes the
- * contents if and only if the initialization has already completed).
+ * content if and only if the initialization has already completed).
*
* If this lazy constant is initialized, an implementation-dependent string
* containing the {@linkplain Object#toString()} of the
- * contents will be returned; otherwise, an implementation-dependent string is
+ * content will be returned; otherwise, an implementation-dependent string is
* returned that indicates this lazy constant is not yet initialized.
*/
@Override
@@ -280,31 +278,41 @@ public sealed interface LazyConstant
// Factory
/**
- * {@return a lazy constant whose contents is to be computed later via the provided
- * {@code computingFunction}}
+ * {@return a new lazy constant whose content is to be computed later via the
+ * provided {@code computingFunction}}
*
* The returned lazy constant strongly references the provided
- * {@code computingFunction} at least until initialization completes successfully.
+ * {@code computingFunction} until computation completes (successfully or with
+ * failure).
*
- * If the provided computing function is already an instance of
- * {@code LazyConstant}, the method is free to return the provided computing function
- * directly.
+ * By design, the method always returns a new lazy constant even if the provided
+ * computing function is already an instance of {@code LazyConstant}. Clients that
+ * want to elide creation under this condition can write a utility method similar
+ * to the one in the snippet below and create lazy constants via this method rather
+ * than calling the built-in factory {@linkplain #of(Supplier)} directly:
+ *
+ * {@snippet lang = java:
+ * static LazyConstant ofFlattened(Supplier extends T> computingFunction) {
+ * return (computingFunction instanceof LazyConstant extends T> lc)
+ * ? (LazyConstant) lc // unchecked cast is safe under normal generic usage
+ * : LazyConstant.of(computingFunction);
+ * }
+ * }
*
- * @implNote after initialization completes successfully, the computing function is
- * no longer strongly referenced and becomes eligible for
- * garbage collection.
+ * @implNote after the computing function completes (regardless of whether it
+ * succeeds or throws an unchecked exception), the computing function is no
+ * longer strongly referenced and becomes eligible for garbage collection.
*
* @param computingFunction in the form of a {@linkplain Supplier} to be used
* to initialize the constant
* @param type of the constant
+ * @throws NullPointerException if the provided {@code computingFunction} is
+ * {@code null}
*
*/
@SuppressWarnings("unchecked")
static LazyConstant of(Supplier extends T> computingFunction) {
Objects.requireNonNull(computingFunction);
- if (computingFunction instanceof LazyConstant extends T> lc) {
- return (LazyConstant) lc;
- }
return LazyConstantImpl.ofLazy(computingFunction);
}
diff --git a/src/java.base/share/classes/java/math/BigDecimal.java b/src/java.base/share/classes/java/math/BigDecimal.java
index 6e651b4fde28..3cfe18e84c30 100644
--- a/src/java.base/share/classes/java/math/BigDecimal.java
+++ b/src/java.base/share/classes/java/math/BigDecimal.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1996, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1996, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -155,6 +155,7 @@
*
Multiply
multiplier.scale() + multiplicand.scale()
*
Divide
dividend.scale() - divisor.scale()
*
Square root
ceil(radicand.scale()/2.0)
+ *
nth root
ceil((double) radicand.scale()/n)
*
*
*
@@ -2142,149 +2143,224 @@ public BigDecimal[] divideAndRemainder(BigDecimal divisor, MathContext mc) {
* @since 9
*/
public BigDecimal sqrt(MathContext mc) {
+ return rootn(2, mc);
+ }
+
+ /**
+ * Returns an approximation to the {@code n}th root of {@code this}
+ * with rounding according to the context settings.
+ *
+ *
The preferred scale of the returned result is equal to
+ * {@code Math.ceilDiv(this.scale(), n)}. The value of the returned result is
+ * always within one ulp of the exact decimal value for the
+ * precision in question. If the rounding mode is {@link
+ * RoundingMode#HALF_UP HALF_UP}, {@link RoundingMode#HALF_DOWN
+ * HALF_DOWN}, or {@link RoundingMode#HALF_EVEN HALF_EVEN}, the
+ * result is within one half an ulp of the exact decimal value.
+ *
+ *
Special case:
+ *
+ *
The {@code n}th root of a number numerically equal to {@code
+ * ZERO} is numerically equal to {@code ZERO} with a preferred
+ * scale according to the general rule above. In particular, for
+ * {@code ZERO}, {@code ZERO.rootn(n, mc).equals(ZERO)} is true with
+ * any {@code MathContext} as an argument.
+ *
+ *
+ * @param n the root degree
+ * @param mc the context to use.
+ * @return the {@code n}th root of {@code this}.
+ * @throws ArithmeticException if {@code n == 0 || n == Integer.MIN_VALUE}.
+ * @throws ArithmeticException if {@code n} is even and {@code this} is negative.
+ * @throws ArithmeticException if {@code n} is negative and {@code this} is zero.
+ * @throws ArithmeticException if an exact result is requested
+ * ({@code mc.getPrecision() == 0}) and there is no finite decimal
+ * expansion of the exact result
+ * @throws ArithmeticException if
+ * {@code (mc.getRoundingMode() == RoundingMode.UNNECESSARY}) and
+ * the exact result cannot fit in {@code mc.getPrecision()} digits.
+ * @see #sqrt(MathContext)
+ * @see BigInteger#rootn(int)
+ * @since 27
+ * @apiNote Note that calling {@code rootn(2, mc)} is equivalent to calling {@code sqrt(mc)}.
+ */
+ public BigDecimal rootn(int n, MathContext mc) {
+ // Special cases
+ if (n == 0)
+ throw new ArithmeticException("Zero root degree");
+
final int signum = signum();
- if (signum != 1) {
- switch (signum) {
- case -1 -> throw new ArithmeticException("Attempted square root of negative BigDecimal");
- case 0 -> {
- BigDecimal result = valueOf(0L, scale/2);
- assert squareRootResultAssertions(result, mc);
- return result;
- }
- default -> throw new AssertionError("Bad value from signum");
- }
+ if (signum < 0 && (n & 1) == 0)
+ throw new ArithmeticException("Negative radicand with even root degree");
+
+ final int preferredScale = saturateLong(Math.ceilDiv((long) this.scale, n));
+ if (signum == 0) {
+ if (n < 0)
+ throw new ArithmeticException("Zero radicand with negative root degree");
+
+ return zeroValueOf(preferredScale);
}
+
/*
* The main steps of the algorithm below are as follows,
* first argument reduce the value to an integer
- * using the following relations:
+ * using the following relations, assuming n > 0:
*
* x = y * 10 ^ exp
- * sqrt(x) = sqrt(y) * 10^(exp / 2) if exp is even
- * sqrt(x) = sqrt(y*10) * 10^((exp-1)/2) is exp is odd
+ * rootn(x, n) = rootn(y, n) * 10^(exp / n) if exp mod n == 0
+ * rootn(x, n) = rootn(y*10^(exp mod n), n) * 10^((exp - (exp mod n))/n) otherwise
*
- * Then use BigInteger.sqrt() on the reduced value to compute
+ * where exp mod n == Math.floorMod(exp, n).
+ *
+ * Then use BigInteger.rootn() on the reduced value to compute
* the numerical digits of the desired result.
*
* Finally, scale back to the desired exponent range and
* perform any adjustment to get the preferred scale in the
* representation.
*/
-
- // The code below favors relative simplicity over checking
- // for special cases that could run faster.
- final int preferredScale = Math.ceilDiv(this.scale, 2);
-
+ final int nAbs = Math.absExact(n);
BigDecimal result;
if (mc.roundingMode == RoundingMode.UNNECESSARY || mc.precision == 0) { // Exact result requested
// To avoid trailing zeros in the result, strip trailing zeros.
final BigDecimal stripped = this.stripTrailingZeros();
- final int strippedScale = stripped.scale;
-
- if ((strippedScale & 1) != 0) // 10*stripped.unscaledValue() can't be an exact square
- throw new ArithmeticException("Computed square root not exact.");
- // Check for even powers of 10. Numerically sqrt(10^2N) = 10^N
- if (stripped.isPowerOfTen()) {
- result = valueOf(1L, strippedScale >> 1);
- // Adjust to requested precision and preferred
- // scale as appropriate.
- return result.adjustToPreferredScale(preferredScale, mc.precision);
- }
+ // if stripped.scale is not a multiple of n,
+ // 10^((-stripped.scale) mod n)*stripped.unscaledValue() can't be an exact power
+ if (stripped.scale % n != 0)
+ throw new ArithmeticException("Computed root not exact.");
// After stripTrailingZeros, the representation is normalized as
//
// unscaledValue * 10^(-scale)
//
// where unscaledValue is an integer with the minimum
- // precision for the cohort of the numerical value and the scale is even.
- BigInteger[] sqrtRem = stripped.unscaledValue().sqrtAndRemainder();
- result = new BigDecimal(sqrtRem[0], strippedScale >> 1);
-
- // If result*result != this numerically or requires too high precision,
- // the square root isn't exact
- if (sqrtRem[1].signum != 0 || mc.precision != 0 && result.precision() > mc.precision)
- throw new ArithmeticException("Computed square root not exact.");
-
- // Test numerical properties at full precision before any
- // scale adjustments.
- assert squareRootResultAssertions(result, mc);
- // Adjust to requested precision and preferred
- // scale as appropriate.
+ // precision for the cohort of the numerical value and the scale is a multiple of n.
+ BigInteger[] rootRem = stripped.unscaledValue().rootnAndRemainder(nAbs);
+ result = new BigDecimal(rootRem[0], stripped.scale / nAbs);
+ // If result^nAbs != this numerically, the root isn't exact
+ if (rootRem[1].signum != 0)
+ throw new ArithmeticException("Computed root not exact.");
+
+ if (n > 0) {
+ // If result requires too high precision, the root isn't exact
+ if (mc.precision != 0 && result.precision() > mc.precision)
+ throw new ArithmeticException("Computed root not exact.");
+ } else {
+ try {
+ result = ONE.divide(result, mc);
+ } catch (ArithmeticException e) {
+ // The exact result requires too high precision,
+ // including non-terminating decimal expansions
+ throw new ArithmeticException("Computed root not exact.");
+ }
+ }
+ // Test numerical properties at full precision before any scale adjustments.
+ assert rootnResultAssertions(result, mc, n);
+ // Adjust to requested precision and preferred scale as appropriate.
return result.adjustToPreferredScale(preferredScale, mc.precision);
}
- // To allow BigInteger.sqrt() to be used to get the square
- // root, it is necessary to normalize the input so that
- // its integer part is sufficient to get the square root
+
+ // Handle negative radicands
+ BigDecimal x = this;
+ if (signum < 0) {
+ x = x.negate();
+ if (mc.roundingMode == RoundingMode.FLOOR) {
+ mc = new MathContext(mc.precision, RoundingMode.UP);
+ } else if (mc.roundingMode == RoundingMode.CEILING) {
+ mc = new MathContext(mc.precision, RoundingMode.DOWN);
+ }
+ }
+
+ // To allow BigInteger.rootn() to be used to get the root,
+ // it is necessary to normalize the input so that
+ // its integer part is sufficient to get the root
// with the desired precision.
final boolean halfWay = isHalfWay(mc.roundingMode);
- // To obtain a square root with N digits,
- // the radicand must have at least 2*(N-1)+1 == 2*N-1 digits.
- final long minWorkingPrec = ((mc.precision + (halfWay ? 1L : 0L)) << 1) - 1L;
- // normScale is the number of digits to take from the fraction of the input
- long normScale = minWorkingPrec - this.precision() + this.scale;
- normScale += normScale & 1L; // the scale for normalizing must be even
-
- final long workingScale = this.scale - normScale;
- if (workingScale != (int) workingScale)
- throw new ArithmeticException("Overflow");
-
- BigDecimal working = new BigDecimal(this.intVal, this.intCompact, (int) workingScale, this.precision);
- BigInteger workingInt = working.toBigInteger();
-
- BigInteger sqrt;
- long resultScale = normScale >> 1;
- // Round sqrt with the specified settings
+ final long rootDigits = mc.precision + (halfWay ? 1L : 0L);
+ // To obtain an n-th root with k digits,
+ // the radicand must have at least n*(k-1)+1 digits.
+ final long minWorkingPrec = nAbs * (rootDigits - 1L) + 1L;
+
+ long normScale; // the number of digits to take from the fraction of the input
+ BigDecimal working = null, xInv = null;
+ BigInteger workingInt;
+ if (n > 0) {
+ normScale = minWorkingPrec - x.precision() + x.scale;
+ int mod = Math.floorMod(normScale, n);
+ if (mod != 0) // the scale for normalizing must be a multiple of n
+ normScale += n - mod;
+
+ working = new BigDecimal(x.intVal, x.intCompact, checkScaleNonZero(x.scale - normScale), x.precision);
+ workingInt = working.toBigInteger();
+ } else { // Handle negative degrees
+ /* Computing the n-th root of x is equivalent
+ * to computing the (-n)-th root of 1/x.
+ */
+ // Compute the scale for xInv, in order to ensure
+ // that xInv's precision is at least minWorkingPrec
+ final int fracZeros = x.precision() - 1 - (x.isPowerOfTen() ? 1 : 0);
+ normScale = minWorkingPrec + fracZeros - x.scale;
+ int mod = Math.floorMod(normScale, nAbs);
+ if (mod != 0)
+ normScale += nAbs - mod;
+
+ xInv = ONE.divide(x, checkScaleNonZero(normScale), RoundingMode.DOWN);
+ workingInt = xInv.unscaledValue();
+ }
+
+ // Compute and round the root with the specified settings
+ BigInteger root;
+ long resultScale = normScale / nAbs;
+ boolean increment = false;
if (halfWay) { // half-way rounding
- BigInteger workingSqrt = workingInt.sqrt();
+ BigInteger[] rootRem = workingInt.rootnAndRemainder(nAbs);
// remove the one-tenth digit
- BigInteger[] quotRem10 = workingSqrt.divideAndRemainder(BigInteger.TEN);
- sqrt = quotRem10[0];
+ BigInteger[] quotRem10 = rootRem[0].divideAndRemainder(BigInteger.TEN);
+ root = quotRem10[0];
resultScale--;
- boolean increment = false;
int digit = quotRem10[1].intValue();
if (digit > 5) {
increment = true;
} else if (digit == 5) {
if (mc.roundingMode == RoundingMode.HALF_UP
- || mc.roundingMode == RoundingMode.HALF_EVEN && sqrt.testBit(0)
+ || mc.roundingMode == RoundingMode.HALF_EVEN && root.testBit(0)
// Check if remainder is non-zero
- || !workingInt.equals(workingSqrt.multiply(workingSqrt))
- || !working.isInteger()) {
+ || rootRem[1].signum != 0
+ || (n > 0 ? !working.isInteger() : xInv.multiply(x).compareMagnitude(ONE) != 0)) {
increment = true;
}
}
-
- if (increment)
- sqrt = sqrt.add(1L);
} else {
switch (mc.roundingMode) {
- case DOWN, FLOOR -> sqrt = workingInt.sqrt(); // No need to round
+ case DOWN, FLOOR -> root = workingInt.rootn(nAbs); // No need to round
case UP, CEILING -> {
- BigInteger[] sqrtRem = workingInt.sqrtAndRemainder();
- sqrt = sqrtRem[0];
+ BigInteger[] rootRem = workingInt.rootnAndRemainder(nAbs);
+ root = rootRem[0];
// Check if remainder is non-zero
- if (sqrtRem[1].signum != 0 || !working.isInteger())
- sqrt = sqrt.add(1L);
+ if (rootRem[1].signum != 0
+ || (n > 0 ? !working.isInteger() : xInv.multiply(x).compareMagnitude(ONE) != 0))
+ increment = true;
}
default -> throw new AssertionError("Unexpected value for RoundingMode: " + mc.roundingMode);
}
}
+ if (increment) {
+ root = root.add(1L);
+ }
- result = new BigDecimal(sqrt, checkScale(sqrt, resultScale), mc); // mc ensures no increase of precision
- // Test numerical properties at full precision before any
- // scale adjustments.
- assert squareRootResultAssertions(result, mc);
- // Adjust to requested precision and preferred
- // scale as appropriate.
- if (result.scale > preferredScale) // else can't increase the result's precision to fit the preferred scale
+ result = new BigDecimal(root, checkScale(root, resultScale), mc); // mc ensures no increase of precision
+ // Test numerical properties at full precision before any scale adjustments.
+ assert rootnResultAssertions(result, mc, n);
+ // Adjust to requested precision and preferred scale as appropriate.
+ if (result.scale > preferredScale) // else can't increase result's precision to fit the preferred scale
result = stripZerosToMatchScale(result.intVal, result.intCompact, result.scale, preferredScale);
- return result;
+ return signum > 0 ? result : result.negate();
}
/**
@@ -2315,12 +2391,8 @@ private static boolean isHalfWay(RoundingMode m) {
};
}
- private BigDecimal square() {
- return this.multiply(this);
- }
-
private boolean isPowerOfTen() {
- return BigInteger.ONE.equals(this.unscaledValue());
+ return this.stripTrailingZeros().unscaledValue().equals(BigInteger.ONE);
}
/**
@@ -2331,92 +2403,102 @@ private boolean isPowerOfTen() {
*
*
*
- *
For DOWN and FLOOR, result^2 must be {@code <=} the input
- * and (result+ulp)^2 must be {@code >} the input.
+ *
For DOWN and FLOOR if input > 0 and CEIL if input < 0,
+ * |result|^n must be {@code <=} |input|
+ * and (|result|+ulp)^n must be {@code >} |input|.
*
- *
Conversely, for UP and CEIL, result^2 must be {@code >=}
- * the input and (result-ulp)^2 must be {@code <} the input.
+ *
Conversely, for UP and FLOOR if input < 0 and CEIL if input > 0,
+ * |result|^n must be {@code >=} |input|
+ * and (|result|-ulp)^n must be {@code <} |input|.
*
*/
- private boolean squareRootResultAssertions(BigDecimal result, MathContext mc) {
- if (result.signum() == 0) {
- return squareRootZeroResultAssertions(result, mc);
- } else {
- RoundingMode rm = mc.getRoundingMode();
- BigDecimal ulp = result.ulp();
- BigDecimal neighborUp = result.add(ulp);
- // Make neighbor down accurate even for powers of ten
- if (result.isPowerOfTen()) {
- ulp = ulp.divide(TEN);
+ private boolean rootnResultAssertions(BigDecimal result, MathContext mc, int n) {
+ BigDecimal rad = this.abs(), resAbs = result.abs();
+ RoundingMode rm = mc.roundingMode;
+ if (this.signum() < 0) {
+ if (rm == RoundingMode.FLOOR) {
+ rm = RoundingMode.UP;
+ } else if (rm == RoundingMode.CEILING) {
+ rm = RoundingMode.DOWN;
}
- BigDecimal neighborDown = result.subtract(ulp);
-
- // Both the starting value and result should be nonzero and positive.
- assert (result.signum() == 1 &&
- this.signum() == 1) :
- "Bad signum of this and/or its sqrt.";
-
- switch (rm) {
- case DOWN:
- case FLOOR:
- assert
- result.square().compareTo(this) <= 0 &&
- neighborUp.square().compareTo(this) > 0:
- "Square of result out for bounds rounding " + rm;
- return true;
+ }
- case UP:
- case CEILING:
- assert
- result.square().compareTo(this) >= 0 &&
- neighborDown.square().compareTo(this) < 0:
- "Square of result out for bounds rounding " + rm;
- return true;
+ int nAbs = Math.abs(n);
+ BigDecimal ulp = resAbs.ulp();
+ BigDecimal neighborUp = resAbs.add(ulp);
+ // Make neighbor down accurate even for powers of ten
+ if (resAbs.isPowerOfTen()) {
+ ulp = ulp.scaleByPowerOfTen(-1);
+ }
+ BigDecimal neighborDown = resAbs.subtract(ulp);
+
+ switch (rm) {
+ case DOWN:
+ case FLOOR:
+ assert
+ (n > 0 ? resAbs.pow(nAbs).compareTo(rad) <= 0 &&
+ neighborUp.pow(nAbs).compareTo(rad) > 0
+ : resAbs.pow(nAbs).multiply(rad).compareTo(ONE) <= 0 &&
+ neighborUp.pow(nAbs).multiply(rad).compareTo(ONE) > 0)
+ : "Power of result out for bounds rounding " + rm;
+ return true;
+ case UP:
+ case CEILING:
+ assert
+ (n > 0 ? resAbs.pow(nAbs).compareTo(rad) >= 0 &&
+ neighborDown.pow(nAbs).compareTo(rad) < 0
+ : resAbs.pow(nAbs).multiply(rad).compareTo(ONE) >= 0 &&
+ neighborDown.pow(nAbs).multiply(rad).compareTo(ONE) < 0)
+ : "Power of result out for bounds rounding " + rm;
+ return true;
- case HALF_DOWN:
- case HALF_EVEN:
- case HALF_UP:
- BigDecimal err = result.square().subtract(this).abs();
- BigDecimal errUp = neighborUp.square().subtract(this);
- BigDecimal errDown = this.subtract(neighborDown.square());
- // All error values should be positive so don't need to
- // compare absolute values.
-
- int err_comp_errUp = err.compareTo(errUp);
- int err_comp_errDown = err.compareTo(errDown);
-
- assert
- errUp.signum() == 1 &&
- errDown.signum() == 1 :
- "Errors of neighbors squared don't have correct signs";
-
- // For breaking a half-way tie, the return value may
- // have a larger error than one of the neighbors. For
- // example, the square root of 2.25 to a precision of
- // 1 digit is either 1 or 2 depending on how the exact
- // value of 1.5 is rounded. If 2 is returned, it will
- // have a larger rounding error than its neighbor 1.
- assert
- err_comp_errUp <= 0 ||
- err_comp_errDown <= 0 :
- "Computed square root has larger error than neighbors for " + rm;
-
- assert
- ((err_comp_errUp == 0 ) ? err_comp_errDown < 0 : true) &&
- ((err_comp_errDown == 0 ) ? err_comp_errUp < 0 : true) :
- "Incorrect error relationships";
- // && could check for digit conditions for ties too
- return true;
- default: // Definition of UNNECESSARY already verified.
- return true;
+ case HALF_DOWN:
+ case HALF_EVEN:
+ case HALF_UP:
+ BigDecimal err, errUp, errDown;
+ if (n > 0) {
+ err = resAbs.pow(nAbs).subtract(rad).abs();
+ errUp = neighborUp.pow(nAbs).subtract(rad);
+ errDown = rad.subtract(neighborDown.pow(nAbs));
+ } else {
+ err = resAbs.pow(nAbs).multiply(rad).subtract(ONE).abs();
+ errUp = neighborUp.pow(nAbs).multiply(rad).subtract(ONE);
+ errDown = ONE.subtract(neighborDown.pow(nAbs).multiply(rad));
}
- }
- }
- private boolean squareRootZeroResultAssertions(BigDecimal result, MathContext mc) {
- return this.compareTo(ZERO) == 0;
+ // All error values should be positive
+ // so don't need to compare absolute values.
+ int err_comp_errUp = err.compareTo(errUp);
+ int err_comp_errDown = err.compareTo(errDown);
+
+ assert
+ errUp.signum() == 1 &&
+ errDown.signum() == 1
+ : "Errors of neighbors powered don't have correct signs";
+
+ // For breaking a half-way tie, the return value may
+ // have a larger error than one of the neighbors. For
+ // example, the square root of 2.25 to a precision of
+ // 1 digit is either 1 or 2 depending on how the exact
+ // value of 1.5 is rounded. If 2 is returned, it will
+ // have a larger rounding error than its neighbor 1.
+ assert
+ err_comp_errUp <= 0 ||
+ err_comp_errDown <= 0 :
+ "Computed root has larger error than neighbors for " + rm;
+
+ assert
+ ((err_comp_errUp == 0 ) ? err_comp_errDown < 0 : true) &&
+ ((err_comp_errDown == 0 ) ? err_comp_errUp < 0 : true) :
+ "Incorrect error relationships";
+ // && could check for digit conditions for ties too
+ return true;
+
+ default: // Definition of UNNECESSARY already verified.
+ return true;
+ }
}
/**
diff --git a/src/java.base/share/classes/java/net/Socket.java b/src/java.base/share/classes/java/net/Socket.java
index 42ca5314b78e..4c91a6ffce61 100644
--- a/src/java.base/share/classes/java/net/Socket.java
+++ b/src/java.base/share/classes/java/net/Socket.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1995, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1995, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1632,16 +1632,15 @@ public void close() throws IOException {
}
/**
- * Places the input stream for this socket at "end of stream".
- * Any data sent to the input stream side of the socket is acknowledged
- * and then silently discarded.
+ * Shutdown the connection for reading without closing the socket.
*
- * If you read from a socket input stream after invoking this method on the
- * socket, the stream's {@code available} method will return 0, and its
- * {@code read} methods will return {@code -1} (end of stream).
+ * If you read from a {@linkplain Socket#getInputStream() socket input stream}
+ * after invoking this method, the stream's {@code available} method will
+ * return {@code 0}, and its {@code read} methods will return {@code -1} (end of stream).
*
* @throws IOException if an I/O error occurs when shutting down this socket, the
- * socket is not connected or the socket is closed.
+ * socket is not connected, the socket is already shutdown for reading,
+ * or the socket is closed.
*
* @since 1.3
* @see java.net.Socket#shutdownOutput()
@@ -1662,16 +1661,14 @@ public void shutdownInput() throws IOException {
}
/**
- * Disables the output stream for this socket.
- * For a TCP socket, any previously written data will be sent
- * followed by TCP's normal connection termination sequence.
- *
- * If you write to a socket output stream after invoking
- * shutdownOutput() on the socket, the stream will throw
- * an IOException.
+ * Shutdown the connection for writing without closing the socket.
+ *
+ * If you write to a {@linkplain Socket#getOutputStream() socket output stream}
+ * after invoking this method, the stream will throw an {@code IOException}.
*
- * @throws IOException if an I/O error occurs when shutting down this socket, the socket
- * is not connected or the socket is closed.
+ * @throws IOException if an I/O error occurs when shutting down this socket, the
+ * socket is not connected, the socket is already shutdown for writing,
+ * or the socket is closed.
*
* @since 1.3
* @see java.net.Socket#shutdownInput()
@@ -1710,10 +1707,9 @@ public String toString() {
/**
* Returns the connection state of the socket.
*
- * Note: Closing a socket doesn't clear its connection state, which means
+ * {@linkplain #close() Closing} a socket doesn't clear its connection state, which means
* this method will return {@code true} for a closed socket
- * (see {@link #isClosed()}) if it was successfully connected prior
- * to being closed.
+ * if it was successfully connected prior to being closed.
*
* @return true if the socket was successfully connected to a server
* @since 1.4
@@ -1725,10 +1721,9 @@ public boolean isConnected() {
/**
* Returns the binding state of the socket.
*
- * Note: Closing a socket doesn't clear its binding state, which means
+ * {@linkplain #close() Closing} a socket doesn't clear its binding state, which means
* this method will return {@code true} for a closed socket
- * (see {@link #isClosed()}) if it was successfully bound prior
- * to being closed.
+ * if it was successfully bound prior to being closed.
*
* @return true if the socket was successfully bound to an address
* @since 1.4
@@ -1750,22 +1745,22 @@ public boolean isClosed() {
}
/**
- * Returns whether the read-half of the socket connection is closed.
+ * Returns {@code true} if the socket was shutdown for reading.
*
- * @return true if the input of the socket has been shutdown
+ * @return true only if a prior call to {@link #shutdownInput()} completed successfully,
+ * false otherwise
* @since 1.4
- * @see #shutdownInput
*/
public boolean isInputShutdown() {
return isInputShutdown(state);
}
/**
- * Returns whether the write-half of the socket connection is closed.
+ * Returns {@code true} if the socket was shutdown for writing.
*
- * @return true if the output of the socket has been shutdown
+ * @return true only if a prior call to {@link #shutdownOutput()} completed successfully,
+ * false otherwise
* @since 1.4
- * @see #shutdownOutput
*/
public boolean isOutputShutdown() {
return isOutputShutdown(state);
diff --git a/src/java.base/share/classes/java/text/AttributedString.java b/src/java.base/share/classes/java/text/AttributedString.java
index 0333efde81b5..52e5b5dba3c9 100644
--- a/src/java.base/share/classes/java/text/AttributedString.java
+++ b/src/java.base/share/classes/java/text/AttributedString.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -46,7 +46,6 @@
* @see Annotation
* @since 1.2
*/
-
public class AttributedString {
// field holding the text
String text;
@@ -54,12 +53,11 @@ public class AttributedString {
// Fields holding run attribute information.
// Run attributes are organized by run.
// Arrays are always of equal lengths (the current capacity).
- // Since there are no vectors of int, we have to use arrays.
+ // Since there are not yet Lists of unboxed int, we use arrays.
private static final int INITIAL_CAPACITY = 10;
- int runCount; // actual number of runs, <= current capacity
- int[] runStarts; // start index for each run
- Vector[] runAttributes; // vector of attribute keys for each run
- Vector