aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoramurillo <none@none>2013-04-04 21:06:38 -0700
committeramurillo <none@none>2013-04-04 21:06:38 -0700
commite09d5361888c08c50c8cfe6df6861fe56ba2c945 (patch)
tree0c2704f84892806ffa57f4baf1f4a6a6e89b2b1a
parent594de0cbbad1478792fc9bd1f78fbd0ddc2f10e7 (diff)
parentc776620f14c15f640d1e65efb2907b48a3ab812f (diff)
-rw-r--r--agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebuggerLocal.java9
-rw-r--r--make/bsd/makefiles/buildtree.make14
-rw-r--r--make/build.sh (renamed from make/bsd/build.sh)77
-rw-r--r--make/hotspot_version2
-rw-r--r--make/linux/build.sh98
-rw-r--r--make/linux/makefiles/buildtree.make16
-rw-r--r--make/solaris/build.sh127
-rw-r--r--make/solaris/makefiles/buildtree.make15
-rw-r--r--src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp38
-rw-r--r--src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp39
-rw-r--r--src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp11
-rw-r--r--src/cpu/sparc/vm/c1_Runtime1_sparc.cpp19
-rw-r--r--src/cpu/x86/vm/c1_CodeStubs_x86.cpp33
-rw-r--r--src/cpu/x86/vm/c1_LIRAssembler_x86.cpp38
-rw-r--r--src/cpu/x86/vm/c1_LIRGenerator_x86.cpp12
-rw-r--r--src/cpu/x86/vm/c1_LinearScan_x86.cpp3
-rw-r--r--src/cpu/x86/vm/c1_Runtime1_x86.cpp18
-rw-r--r--src/cpu/x86/vm/cppInterpreter_x86.cpp21
-rw-r--r--src/cpu/x86/vm/macroAssembler_x86.cpp44
-rw-r--r--src/cpu/x86/vm/macroAssembler_x86.hpp3
-rw-r--r--src/cpu/x86/vm/sharedRuntime_x86_32.cpp3
-rw-r--r--src/cpu/x86/vm/sharedRuntime_x86_64.cpp12
-rw-r--r--src/cpu/x86/vm/stubGenerator_x86_32.cpp5
-rw-r--r--src/cpu/x86/vm/stubGenerator_x86_64.cpp8
-rw-r--r--src/cpu/x86/vm/templateInterpreter_x86_32.cpp18
-rw-r--r--src/cpu/x86/vm/templateInterpreter_x86_64.cpp11
-rw-r--r--src/cpu/x86/vm/x86_32.ad90
-rw-r--r--src/cpu/x86/vm/x86_64.ad62
-rw-r--r--src/os/bsd/vm/os_bsd.cpp14
-rw-r--r--src/os/linux/vm/os_linux.cpp14
-rw-r--r--src/os/posix/launcher/launcher.script2
-rw-r--r--src/os/posix/vm/os_posix.cpp62
-rw-r--r--src/os/solaris/vm/os_solaris.cpp18
-rw-r--r--src/os/windows/vm/os_windows.cpp51
-rw-r--r--src/os_cpu/bsd_x86/vm/bsd_x86_64.ad18
-rw-r--r--src/os_cpu/linux_x86/vm/linux_x86_64.ad18
-rw-r--r--src/os_cpu/solaris_x86/vm/solaris_x86_64.ad29
-rw-r--r--src/os_cpu/windows_x86/vm/windows_x86_64.ad23
-rw-r--r--src/share/vm/adlc/archDesc.cpp1
-rw-r--r--src/share/vm/adlc/dfa.cpp41
-rw-r--r--src/share/vm/c1/c1_Canonicalizer.cpp2
-rw-r--r--src/share/vm/c1/c1_Canonicalizer.hpp2
-rw-r--r--src/share/vm/c1/c1_CodeStubs.hpp16
-rw-r--r--src/share/vm/c1/c1_Compilation.cpp54
-rw-r--r--src/share/vm/c1/c1_Compilation.hpp13
-rw-r--r--src/share/vm/c1/c1_GraphBuilder.cpp28
-rw-r--r--src/share/vm/c1/c1_GraphBuilder.hpp2
-rw-r--r--src/share/vm/c1/c1_IR.cpp94
-rw-r--r--src/share/vm/c1/c1_IR.hpp7
-rw-r--r--src/share/vm/c1/c1_Instruction.cpp96
-rw-r--r--src/share/vm/c1/c1_Instruction.hpp145
-rw-r--r--src/share/vm/c1/c1_InstructionPrinter.cpp34
-rw-r--r--src/share/vm/c1/c1_InstructionPrinter.hpp2
-rw-r--r--src/share/vm/c1/c1_LIR.cpp15
-rw-r--r--src/share/vm/c1/c1_LIR.hpp36
-rw-r--r--src/share/vm/c1/c1_LIRAssembler.hpp3
-rw-r--r--src/share/vm/c1/c1_LIRGenerator.cpp135
-rw-r--r--src/share/vm/c1/c1_LIRGenerator.hpp4
-rw-r--r--src/share/vm/c1/c1_LinearScan.cpp37
-rw-r--r--src/share/vm/c1/c1_Optimizer.cpp11
-rw-r--r--src/share/vm/c1/c1_RangeCheckElimination.cpp1517
-rw-r--r--src/share/vm/c1/c1_RangeCheckElimination.hpp241
-rw-r--r--src/share/vm/c1/c1_Runtime1.cpp44
-rw-r--r--src/share/vm/c1/c1_Runtime1.hpp3
-rw-r--r--src/share/vm/c1/c1_ValueMap.cpp210
-rw-r--r--src/share/vm/c1/c1_ValueMap.hpp10
-rw-r--r--src/share/vm/c1/c1_globals.hpp18
-rw-r--r--src/share/vm/ci/ciMethod.cpp11
-rw-r--r--src/share/vm/ci/ciMethod.hpp8
-rw-r--r--src/share/vm/classfile/classFileParser.cpp11
-rw-r--r--src/share/vm/classfile/classFileParser.hpp1
-rw-r--r--src/share/vm/classfile/classLoaderData.cpp7
-rw-r--r--src/share/vm/classfile/classLoaderData.hpp1
-rw-r--r--src/share/vm/classfile/javaClasses.hpp15
-rw-r--r--src/share/vm/classfile/symbolTable.cpp11
-rw-r--r--src/share/vm/classfile/symbolTable.hpp1
-rw-r--r--src/share/vm/classfile/systemDictionary.cpp41
-rw-r--r--src/share/vm/classfile/systemDictionary.hpp13
-rw-r--r--src/share/vm/classfile/vmSymbols.hpp8
-rw-r--r--src/share/vm/code/codeCache.hpp5
-rw-r--r--src/share/vm/code/nmethod.cpp54
-rw-r--r--src/share/vm/compiler/compileBroker.cpp3
-rw-r--r--src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp37
-rw-r--r--src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp9
-rw-r--r--src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp7
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp6
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp2
-rw-r--r--src/share/vm/interpreter/linkResolver.cpp39
-rw-r--r--src/share/vm/memory/collectorPolicy.cpp7
-rw-r--r--src/share/vm/memory/universe.cpp9
-rw-r--r--src/share/vm/oops/constMethod.cpp20
-rw-r--r--src/share/vm/oops/constMethod.hpp3
-rw-r--r--src/share/vm/oops/instanceKlass.cpp11
-rw-r--r--src/share/vm/oops/klassVtable.cpp10
-rw-r--r--src/share/vm/oops/method.cpp34
-rw-r--r--src/share/vm/oops/method.hpp33
-rw-r--r--src/share/vm/oops/methodData.cpp8
-rw-r--r--src/share/vm/oops/symbol.cpp2
-rw-r--r--src/share/vm/opto/graphKit.cpp1
-rw-r--r--src/share/vm/opto/idealKit.cpp28
-rw-r--r--src/share/vm/opto/idealKit.hpp13
-rw-r--r--src/share/vm/opto/ifg.cpp44
-rw-r--r--src/share/vm/opto/library_call.cpp156
-rw-r--r--src/share/vm/opto/loopnode.cpp12
-rw-r--r--src/share/vm/opto/output.cpp34
-rw-r--r--src/share/vm/opto/parse2.cpp4
-rw-r--r--src/share/vm/opto/phaseX.cpp59
-rw-r--r--src/share/vm/prims/jvm.cpp67
-rw-r--r--src/share/vm/prims/jvmtiEventController.cpp7
-rw-r--r--src/share/vm/prims/jvmtiRedefineClassesTrace.hpp77
-rw-r--r--src/share/vm/prims/methodHandles.cpp25
-rw-r--r--src/share/vm/prims/unsafe.cpp261
-rw-r--r--src/share/vm/prims/whitebox.cpp20
-rw-r--r--src/share/vm/runtime/arguments.cpp28
-rw-r--r--src/share/vm/runtime/arguments.hpp3
-rw-r--r--src/share/vm/runtime/globals.hpp13
-rw-r--r--src/share/vm/runtime/init.cpp9
-rw-r--r--src/share/vm/runtime/os.hpp4
-rw-r--r--src/share/vm/runtime/thread.cpp11
-rw-r--r--src/share/vm/runtime/vframe.cpp49
-rw-r--r--src/share/vm/runtime/vframe.hpp1
-rw-r--r--src/share/vm/services/memTracker.hpp8
-rw-r--r--src/share/vm/utilities/utf8.cpp8
-rw-r--r--src/share/vm/utilities/utf8.hpp4
-rw-r--r--test/compiler/8009761/Test8009761.java2
-rw-r--r--test/gc/TestVerifyBeforeGCDuringStartup.java45
-rw-r--r--test/gc/metaspace/ClassMetaspaceSizeInJmapHeap.java6
-rw-r--r--test/runtime/7116786/Test7116786.java9
-rw-r--r--test/runtime/interned/SanityTest.java59
-rw-r--r--test/testlibrary/com/oracle/java/testlibrary/JDKToolLauncher.java114
-rw-r--r--test/testlibrary/whitebox/sun/hotspot/WhiteBox.java6
131 files changed, 4242 insertions, 1288 deletions
diff --git a/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebuggerLocal.java b/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebuggerLocal.java
index fe7559d05..22957a27d 100644
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebuggerLocal.java
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebuggerLocal.java
@@ -572,9 +572,14 @@ public class WindbgDebuggerLocal extends DebuggerBase implements WindbgDebugger
DTFWHome = sysRoot + File.separator + ".." + File.separator +
"Program Files" + File.separator + "Debugging Tools For Windows";
searchList.add(DTFWHome);
- searchList.add(DTFWHome + " (x86)");
- searchList.add(DTFWHome + " (x64)");
+ // Only add the search path for the current CPU architecture:
+ String cpu = PlatformInfo.getCPU();
+ if (cpu.equals("x86")) {
+ searchList.add(DTFWHome + " (x86)");
+ } else if (cpu.equals("amd64")) {
+ searchList.add(DTFWHome + " (x64)");
+ }
// The last place to search is the system directory:
searchList.add(sysRoot + File.separator + "system32");
}
diff --git a/make/bsd/makefiles/buildtree.make b/make/bsd/makefiles/buildtree.make
index 71bb04b98..752e0febb 100644
--- a/make/bsd/makefiles/buildtree.make
+++ b/make/bsd/makefiles/buildtree.make
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -190,6 +190,17 @@ $(SIMPLE_DIRS):
# literal "$(GAMMADIR)/" suitable for inclusion in a Makefile.
gamma-path=$(subst $(GAMMADIR),\$$(GAMMADIR),$(call $(1),$(HS_COMMON_SRC)/$(2)))
+# This bit is needed to enable local rebuilds.
+# Unless the makefile itself sets LP64, any environmental
+# setting of LP64 will interfere with the build.
+LP64_SETTING/32 = LP64 = \#empty
+LP64_SETTING/64 = LP64 = 1
+
+DATA_MODE/i486 = 32
+DATA_MODE/amd64 = 64
+
+DATA_MODE = $(DATA_MODE/$(BUILDARCH))
+
flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
@echo Creating $@ ...
$(QUIETLY) ( \
@@ -212,6 +223,7 @@ flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
echo "HOTSPOT_BUILD_USER = $(HOTSPOT_BUILD_USER)"; \
echo "HOTSPOT_VM_DISTRO = $(HOTSPOT_VM_DISTRO)"; \
echo "OPENJDK = $(OPENJDK)"; \
+ echo "$(LP64_SETTING/$(DATA_MODE))"; \
echo; \
echo "# Used for platform dispatching"; \
echo "TARGET_DEFINES = -DTARGET_OS_FAMILY_\$$(Platform_os_family)"; \
diff --git a/make/bsd/build.sh b/make/build.sh
index ddb07e541..d05ce4474 100644
--- a/make/bsd/build.sh
+++ b/make/build.sh
@@ -1,6 +1,6 @@
#! /bin/sh
#
-# Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -28,44 +28,38 @@
set -u
-if [ $# != 2 ]; then
- echo "Usage : $0 Build_Options Location"
- echo "Build Options : debug or optimized or basicdebug or basic or clean"
- echo "Location : specify any workspace which has gamma sources"
+if [ $# -lt 1 ]; then
+ echo "Usage : $0 BuildTarget [LP64=1] [BuildOptions]"
+ echo " Server VM | Client VM"
+ echo "BuildTarget : debug | debug1"
+ echo " fastdebug | fastdebug1"
+ echo " jvmg | jvmg1"
+ echo " optimized | optimized1"
+ echo " profiled | profiled1"
+ echo " product | product1"
exit 1
fi
-# Just in case:
-case ${JAVA_HOME} in
-/*) true;;
-?*) JAVA_HOME=`( cd $JAVA_HOME; pwd )`;;
-esac
-
-case `uname -m` in
- i386|i486|i586|i686)
- mach=i386
- ;;
- *)
- echo "Unsupported machine: " `uname -m`
- exit 1
- ;;
-esac
-
-if [ "${JAVA_HOME}" = "" -o ! -d "${JAVA_HOME}" -o ! -d ${JAVA_HOME}/jre/lib/${mach} ]; then
+if [ "${JAVA_HOME-}" = "" -o ! -d "${JAVA_HOME-}" -o ! -d ${JAVA_HOME-}/jre/lib/ ]; then
echo "JAVA_HOME needs to be set to a valid JDK path"
- echo "ksh : export JAVA_HOME=/net/tetrasparc/export/gobi/JDK1.2_fcs_V/bsd"
- echo "csh : setenv JAVA_HOME /net/tetrasparc/export/gobi/JDK1.2_fcs_V/bsd"
+ echo "JAVA_HOME: ${JAVA_HOME-}"
exit 1
fi
+# Just in case:
+JAVA_HOME=`( cd $JAVA_HOME; pwd )`
-LD_LIBRARY_PATH=${JAVA_HOME}/jre/lib/`uname -p`:\
-${JAVA_HOME}/jre/lib/`uname -p`/native_threads:${LD_LIBRARY_PATH-.}
+if [ "${ALT_BOOTDIR-}" = "" -o ! -d "${ALT_BOOTDIR-}" -o ! -d ${ALT_BOOTDIR-}/jre/lib/ ]; then
+ ALT_BOOTDIR=${JAVA_HOME}
+fi
-# This is necessary as long as we are using the old launcher
-# with the new distribution format:
-CLASSPATH=${JAVA_HOME}/jre/lib/rt.jar:${CLASSPATH-.}
+# build in current directory by default
+if [ "${ALT_OUTPUTDIR-}" = "" -o ! -d "${ALT_OUTPUTDIR-}" ]; then
+ ALT_OUTPUTDIR=`(pwd)`
+fi
+HOTSPOT_SRC=`(dirname $0)`/..
+HOTSPOT_SRC=`(cd ${HOTSPOT_SRC}; pwd)`
for gm in gmake gnumake
do
@@ -74,22 +68,25 @@ do
done
: ${GNUMAKE:?'Cannot locate the gnumake program. Stop.'}
+# quiet build by default
+Quiet="MAKE_VERBOSE="
+
+# no debug info by default
+NoDebugInfo="ENABLE_FULL_DEBUG_SYMBOLS="
+
+LANG=C
echo "### ENVIRONMENT SETTINGS:"
+export HOTSPOT_SRC ; echo "HOTSPOT_SRC=$HOTSPOT_SRC"
export JAVA_HOME ; echo "JAVA_HOME=$JAVA_HOME"
-export LD_LIBRARY_PATH ; echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
-export CLASSPATH ; echo "CLASSPATH=$CLASSPATH"
+export ALT_BOOTDIR ; echo "ALT_BOOTDIR=$ALT_BOOTDIR"
+export ALT_OUTPUTDIR ; echo "ALT_OUTPUTDIR=$ALT_OUTPUTDIR"
export GNUMAKE ; echo "GNUMAKE=$GNUMAKE"
+export LANG ; echo "LANG=$LANG"
echo "###"
-Build_Options=$1
-Location=$2
-
-case ${Location} in
-/*) true;;
-?*) Location=`(cd ${Location}; pwd)`;;
-esac
+BuildOptions="$Quiet $NoDebugInfo $*"
echo \
-${GNUMAKE} -f ${Location}/make/bsd/Makefile $Build_Options GAMMADIR=${Location}
-${GNUMAKE} -f ${Location}/make/bsd/Makefile $Build_Options GAMMADIR=${Location}
+${GNUMAKE} -f ${HOTSPOT_SRC}/make/Makefile $BuildOptions GAMMADIR=${HOTSPOT_SRC}
+${GNUMAKE} -f ${HOTSPOT_SRC}/make/Makefile $BuildOptions GAMMADIR=${HOTSPOT_SRC}
diff --git a/make/hotspot_version b/make/hotspot_version
index c6bad83df..03616715c 100644
--- a/make/hotspot_version
+++ b/make/hotspot_version
@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2013
HS_MAJOR_VER=25
HS_MINOR_VER=0
-HS_BUILD_NUMBER=25
+HS_BUILD_NUMBER=26
JDK_MAJOR_VER=1
JDK_MINOR_VER=8
diff --git a/make/linux/build.sh b/make/linux/build.sh
deleted file mode 100644
index 79844c51e..000000000
--- a/make/linux/build.sh
+++ /dev/null
@@ -1,98 +0,0 @@
-#! /bin/sh
-#
-# Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-#
-
-# Make sure the variable JAVA_HOME is set before running this script.
-
-set -u
-
-
-if [ $# != 2 ]; then
- echo "Usage : $0 Build_Options Location"
- echo "Build Options : debug or optimized or basicdebug or basic or clean"
- echo "Location : specify any workspace which has gamma sources"
- exit 1
-fi
-
-# Just in case:
-case ${JAVA_HOME} in
-/*) true;;
-?*) JAVA_HOME=`( cd $JAVA_HOME; pwd )`;;
-esac
-
-case `uname -m` in
- i386|i486|i586|i686)
- mach=i386
- ;;
- x86_64)
- mach=amd64
- ;;
- *)
- echo "Unsupported machine: " `uname -m`
- exit 1
- ;;
-esac
-
-if [ "${JAVA_HOME}" = "" -o ! -d "${JAVA_HOME}" -o ! -d ${JAVA_HOME}/jre/lib/${mach} ]; then
- echo "JAVA_HOME needs to be set to a valid JDK path"
- echo "ksh : export JAVA_HOME=/net/tetrasparc/export/gobi/JDK1.2_fcs_V/linux"
- echo "csh : setenv JAVA_HOME /net/tetrasparc/export/gobi/JDK1.2_fcs_V/linux"
- exit 1
-fi
-
-
-LD_LIBRARY_PATH=${JAVA_HOME}/jre/lib/`uname -p`:\
-${JAVA_HOME}/jre/lib/`uname -p`/native_threads:${LD_LIBRARY_PATH-.}
-
-# This is necessary as long as we are using the old launcher
-# with the new distribution format:
-CLASSPATH=${JAVA_HOME}/jre/lib/rt.jar:${CLASSPATH-.}
-
-
-for gm in gmake gnumake
-do
- if [ "${GNUMAKE-}" != "" ]; then break; fi
- ($gm --version >/dev/null) 2>/dev/null && GNUMAKE=$gm
-done
-: ${GNUMAKE:?'Cannot locate the gnumake program. Stop.'}
-
-
-echo "### ENVIRONMENT SETTINGS:"
-export JAVA_HOME ; echo "JAVA_HOME=$JAVA_HOME"
-export LD_LIBRARY_PATH ; echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
-export CLASSPATH ; echo "CLASSPATH=$CLASSPATH"
-export GNUMAKE ; echo "GNUMAKE=$GNUMAKE"
-echo "###"
-
-Build_Options=$1
-Location=$2
-
-case ${Location} in
-/*) true;;
-?*) Location=`(cd ${Location}; pwd)`;;
-esac
-
-echo \
-${GNUMAKE} -f ${Location}/make/linux/Makefile $Build_Options GAMMADIR=${Location}
-${GNUMAKE} -f ${Location}/make/linux/Makefile $Build_Options GAMMADIR=${Location}
diff --git a/make/linux/makefiles/buildtree.make b/make/linux/makefiles/buildtree.make
index b75b4d578..f980dcdaf 100644
--- a/make/linux/makefiles/buildtree.make
+++ b/make/linux/makefiles/buildtree.make
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -183,6 +183,19 @@ $(SIMPLE_DIRS):
# literal "$(GAMMADIR)/" suitable for inclusion in a Makefile.
gamma-path=$(subst $(GAMMADIR),\$$(GAMMADIR),$(call $(1),$(HS_COMMON_SRC)/$(2)))
+# This bit is needed to enable local rebuilds.
+# Unless the makefile itself sets LP64, any environmental
+# setting of LP64 will interfere with the build.
+LP64_SETTING/32 = LP64 = \#empty
+LP64_SETTING/64 = LP64 = 1
+
+DATA_MODE/i486 = 32
+DATA_MODE/sparc = 32
+DATA_MODE/sparcv9 = 64
+DATA_MODE/amd64 = 64
+
+DATA_MODE = $(DATA_MODE/$(BUILDARCH))
+
flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
@echo Creating $@ ...
$(QUIETLY) ( \
@@ -205,6 +218,7 @@ flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
echo "HOTSPOT_BUILD_USER = $(HOTSPOT_BUILD_USER)"; \
echo "HOTSPOT_VM_DISTRO = $(HOTSPOT_VM_DISTRO)"; \
echo "OPENJDK = $(OPENJDK)"; \
+ echo "$(LP64_SETTING/$(DATA_MODE))"; \
echo; \
echo "# Used for platform dispatching"; \
echo "TARGET_DEFINES = -DTARGET_OS_FAMILY_\$$(Platform_os_family)"; \
diff --git a/make/solaris/build.sh b/make/solaris/build.sh
deleted file mode 100644
index 9a8326ac6..000000000
--- a/make/solaris/build.sh
+++ /dev/null
@@ -1,127 +0,0 @@
-#! /bin/sh
-#
-# Copyright (c) 1998, 2008, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-#
-
-# Make sure the variable JAVA_HOME is set before running this script.
-
-set -u
-
-
-usage() {
- (
- echo "Usage : $0 [-sb | -sbfast] config ws_path"
- echo ""
- echo "Where:"
- echo " -sb ::= enable source browser info generation for"
- echo " all configs during compilation"
- echo ""
- echo " -sbfast ::= enable source browser info generation for"
- echo " all configs without compilation"
- echo ""
- echo " config ::= debug | debug1 | debugcore"
- echo " fastdebug | fastdebug1 | fastdebugcore"
- echo " jvmg | jvmg1 | jvmgcore"
- echo " optimized | optimized1 | optimizedcore"
- echo " profiled | profiled1 | profiledcore"
- echo " product | product1 | productcore"
- echo ""
- echo " ws_path ::= path to HotSpot workspace"
- ) >&2
- exit 1
-}
-
-# extract possible options
-options=""
-if [ $# -gt 2 ]; then
- case "$1" in
- -sb)
- options="CFLAGS_BROWSE=-xsb"
- shift
- ;;
- -sbfast)
- options="CFLAGS_BROWSE=-xsbfast"
- shift
- ;;
- *)
- echo "Unknown option: '$1'" >&2
- usage
- ;;
- esac
-fi
-
-# should be just two args left at this point
-if [ $# != 2 ]; then
- usage
-fi
-
-# Just in case:
-case ${JAVA_HOME} in
-/*) true;;
-?*) JAVA_HOME=`( cd $JAVA_HOME; pwd )`;;
-esac
-
-if [ "${JAVA_HOME}" = "" -o ! -d "${JAVA_HOME}" -o ! -d ${JAVA_HOME}/jre/lib/`uname -p` ]; then
- echo "JAVA_HOME needs to be set to a valid JDK path"
- echo "ksh : export JAVA_HOME=/net/tetrasparc/export/gobi/JDK1.2_fcs_V/solaris"
- echo "csh : setenv JAVA_HOME /net/tetrasparc/export/gobi/JDK1.2_fcs_V/solaris"
- exit 1
-fi
-
-
-LD_LIBRARY_PATH=${JAVA_HOME}/jre/lib/`uname -p`:\
-${JAVA_HOME}/jre/lib/`uname -p`/native_threads:${LD_LIBRARY_PATH-.}
-
-# This is necessary as long as we are using the old launcher
-# with the new distribution format:
-CLASSPATH=${JAVA_HOME}/jre/lib/rt.jar:${CLASSPATH-.}
-
-
-for gm in gmake gnumake
-do
- if [ "${GNUMAKE-}" != "" ]; then break; fi
- ($gm --version >/dev/null) 2>/dev/null && GNUMAKE=$gm
-done
-: ${GNUMAKE:?'Cannot locate the gnumake program. Stop.'}
-
-
-echo "### ENVIRONMENT SETTINGS:"
-export JAVA_HOME ; echo "JAVA_HOME=$JAVA_HOME"
-export LD_LIBRARY_PATH ; echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
-export CLASSPATH ; echo "CLASSPATH=$CLASSPATH"
-export GNUMAKE ; echo "GNUMAKE=$GNUMAKE"
-echo "###"
-
-config=$1
-ws_path=$2
-
-case ${ws_path} in
-/*) true;;
-?*) ws_path=`(cd ${ws_path}; pwd)`;;
-esac
-
-echo \
-${GNUMAKE} -f ${ws_path}/make/solaris/Makefile \
- $config GAMMADIR=${ws_path} $options
-${GNUMAKE} -f ${ws_path}/make/solaris/Makefile \
- $config GAMMADIR=${ws_path} $options
diff --git a/make/solaris/makefiles/buildtree.make b/make/solaris/makefiles/buildtree.make
index 707d5f36a..a3ab0b5e5 100644
--- a/make/solaris/makefiles/buildtree.make
+++ b/make/solaris/makefiles/buildtree.make
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -176,6 +176,19 @@ $(SIMPLE_DIRS):
# literal "$(GAMMADIR)/" suitable for inclusion in a Makefile.
gamma-path=$(subst $(GAMMADIR),\$$(GAMMADIR),$(call $(1),$(HS_COMMON_SRC)/$(2)))
+# This bit is needed to enable local rebuilds.
+# Unless the makefile itself sets LP64, any environmental
+# setting of LP64 will interfere with the build.
+LP64_SETTING/32 = LP64 = \#empty
+LP64_SETTING/64 = LP64 = 1
+
+DATA_MODE/i486 = 32
+DATA_MODE/sparc = 32
+DATA_MODE/sparcv9 = 64
+DATA_MODE/amd64 = 64
+
+DATA_MODE = $(DATA_MODE/$(BUILDARCH))
+
flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
@echo Creating $@ ...
$(QUIETLY) ( \
diff --git a/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp b/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp
index 6d936b376..113665220 100644
--- a/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp
+++ b/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp
@@ -51,6 +51,16 @@ RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
void RangeCheckStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
+ if (_info->deoptimize_on_exception()) {
+ address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
+ __ call(a, relocInfo::runtime_call_type);
+ __ delayed()->nop();
+ ce->add_call_info_here(_info);
+ ce->verify_oop_map(_info);
+ debug_only(__ should_not_reach_here());
+ return;
+ }
+
if (_index->is_register()) {
__ mov(_index->as_register(), G4);
} else {
@@ -64,11 +74,22 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
__ delayed()->nop();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
-#ifdef ASSERT
- __ should_not_reach_here();
-#endif
+ debug_only(__ should_not_reach_here());
}
+PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
+ _info = new CodeEmitInfo(info);
+}
+
+void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
+ __ bind(_entry);
+ address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
+ __ call(a, relocInfo::runtime_call_type);
+ __ delayed()->nop();
+ ce->add_call_info_here(_info);
+ ce->verify_oop_map(_info);
+ debug_only(__ should_not_reach_here());
+}
void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
@@ -99,10 +120,17 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) {
void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
+ address a;
+ if (_info->deoptimize_on_exception()) {
+ // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
+ a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
+ } else {
+ a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
+ }
+
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
__ bind(_entry);
- __ call(Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id),
- relocInfo::runtime_call_type);
+ __ call(a, relocInfo::runtime_call_type);
__ delayed()->nop();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
diff --git a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp
index 27bf44244..7c4c54ea3 100644
--- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp
+++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp
@@ -3361,6 +3361,45 @@ void LIR_Assembler::get_thread(LIR_Opr result_reg) {
__ mov(G2_thread, result_reg->as_register());
}
+#ifdef ASSERT
+// emit run-time assertion
+void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
+ assert(op->code() == lir_assert, "must be");
+
+ if (op->in_opr1()->is_valid()) {
+ assert(op->in_opr2()->is_valid(), "both operands must be valid");
+ comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
+ } else {
+ assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
+ assert(op->condition() == lir_cond_always, "no other conditions allowed");
+ }
+
+ Label ok;
+ if (op->condition() != lir_cond_always) {
+ Assembler::Condition acond;
+ switch (op->condition()) {
+ case lir_cond_equal: acond = Assembler::equal; break;
+ case lir_cond_notEqual: acond = Assembler::notEqual; break;
+ case lir_cond_less: acond = Assembler::less; break;
+ case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
+ case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break;
+ case lir_cond_greater: acond = Assembler::greater; break;
+ case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break;
+ case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break;
+ default: ShouldNotReachHere();
+ };
+ __ br(acond, false, Assembler::pt, ok);
+ __ delayed()->nop();
+ }
+ if (op->halt()) {
+ const char* str = __ code_string(op->msg());
+ __ stop(str);
+ } else {
+ breakpoint();
+ }
+ __ bind(ok);
+}
+#endif
void LIR_Assembler::peephole(LIR_List* lir) {
LIR_OpList* inst = lir->instructions_list();
diff --git a/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp b/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp
index 2d4b3a2f1..82cc696e8 100644
--- a/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp
+++ b/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp
@@ -324,7 +324,7 @@ void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp)
void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
assert(x->is_pinned(),"");
- bool needs_range_check = true;
+ bool needs_range_check = x->compute_needs_range_check();
bool use_length = x->length() != NULL;
bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
@@ -339,12 +339,9 @@ void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
array.load_item();
index.load_nonconstant();
- if (use_length) {
- needs_range_check = x->compute_needs_range_check();
- if (needs_range_check) {
- length.set_instruction(x->length());
- length.load_item();
- }
+ if (use_length && needs_range_check) {
+ length.set_instruction(x->length());
+ length.load_item();
}
if (needs_store_check) {
value.load_item();
diff --git a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp
index b8c838b16..6723ef2c3 100644
--- a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp
+++ b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp
@@ -987,6 +987,25 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
break;
#endif // INCLUDE_ALL_GCS
+ case predicate_failed_trap_id:
+ {
+ __ set_info("predicate_failed_trap", dont_gc_arguments);
+ OopMap* oop_map = save_live_registers(sasm);
+
+ int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
+
+ oop_maps = new OopMapSet();
+ oop_maps->add_gc_map(call_offset, oop_map);
+
+ DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
+ assert(deopt_blob != NULL, "deoptimization blob must have been created");
+ restore_live_registers(sasm);
+ __ restore();
+ __ br(Assembler::always, false, Assembler::pt, deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type);
+ __ delayed()->nop();
+ }
+ break;
+
default:
{ __ set_info("unimplemented entry", dont_gc_arguments);
__ save_frame(0);
diff --git a/src/cpu/x86/vm/c1_CodeStubs_x86.cpp b/src/cpu/x86/vm/c1_CodeStubs_x86.cpp
index 806bce01b..cef3cdbbe 100644
--- a/src/cpu/x86/vm/c1_CodeStubs_x86.cpp
+++ b/src/cpu/x86/vm/c1_CodeStubs_x86.cpp
@@ -101,6 +101,15 @@ RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
void RangeCheckStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
+ if (_info->deoptimize_on_exception()) {
+ address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
+ __ call(RuntimeAddress(a));
+ ce->add_call_info_here(_info);
+ ce->verify_oop_map(_info);
+ debug_only(__ should_not_reach_here());
+ return;
+ }
+
// pass the array index on stack because all registers must be preserved
if (_index->is_cpu_register()) {
ce->store_parameter(_index->as_register(), 0);
@@ -115,9 +124,22 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
}
__ call(RuntimeAddress(Runtime1::entry_for(stub_id)));
ce->add_call_info_here(_info);
+ ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
}
+PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
+ _info = new CodeEmitInfo(info);
+}
+
+void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
+ __ bind(_entry);
+ address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
+ __ call(RuntimeAddress(a));
+ ce->add_call_info_here(_info);
+ ce->verify_oop_map(_info);
+ debug_only(__ should_not_reach_here());
+}
void DivByZeroStub::emit_code(LIR_Assembler* ce) {
if (_offset != -1) {
@@ -414,10 +436,19 @@ void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
+ address a;
+ if (_info->deoptimize_on_exception()) {
+ // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
+ a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
+ } else {
+ a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
+ }
+
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
__ bind(_entry);
- __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id)));
+ __ call(RuntimeAddress(a));
ce->add_call_info_here(_info);
+ ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
}
diff --git a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
index 83146761c..a99d79393 100644
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
+++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
@@ -3755,6 +3755,44 @@ void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type,
}
}
+#ifdef ASSERT
+// emit run-time assertion
+void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
+ assert(op->code() == lir_assert, "must be");
+
+ if (op->in_opr1()->is_valid()) {
+ assert(op->in_opr2()->is_valid(), "both operands must be valid");
+ comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
+ } else {
+ assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
+ assert(op->condition() == lir_cond_always, "no other conditions allowed");
+ }
+
+ Label ok;
+ if (op->condition() != lir_cond_always) {
+ Assembler::Condition acond = Assembler::zero;
+ switch (op->condition()) {
+ case lir_cond_equal: acond = Assembler::equal; break;
+ case lir_cond_notEqual: acond = Assembler::notEqual; break;
+ case lir_cond_less: acond = Assembler::less; break;
+ case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
+ case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
+ case lir_cond_greater: acond = Assembler::greater; break;
+ case lir_cond_belowEqual: acond = Assembler::belowEqual; break;
+ case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break;
+ default: ShouldNotReachHere();
+ }
+ __ jcc(acond, ok);
+ }
+ if (op->halt()) {
+ const char* str = __ code_string(op->msg());
+ __ stop(str);
+ } else {
+ breakpoint();
+ }
+ __ bind(ok);
+}
+#endif
void LIR_Assembler::membar() {
// QQQ sparc TSO uses this,
diff --git a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp
index 5ef148619..6810ae542 100644
--- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp
+++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp
@@ -263,7 +263,7 @@ void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp)
void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
assert(x->is_pinned(),"");
- bool needs_range_check = true;
+ bool needs_range_check = x->compute_needs_range_check();
bool use_length = x->length() != NULL;
bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
@@ -278,12 +278,10 @@ void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
array.load_item();
index.load_nonconstant();
- if (use_length) {
- needs_range_check = x->compute_needs_range_check();
- if (needs_range_check) {
- length.set_instruction(x->length());
- length.load_item();
- }
+ if (use_length && needs_range_check) {
+ length.set_instruction(x->length());
+ length.load_item();
+
}
if (needs_store_check) {
value.load_item();
diff --git a/src/cpu/x86/vm/c1_LinearScan_x86.cpp b/src/cpu/x86/vm/c1_LinearScan_x86.cpp
index 7956a6af5..baecb9df9 100644
--- a/src/cpu/x86/vm/c1_LinearScan_x86.cpp
+++ b/src/cpu/x86/vm/c1_LinearScan_x86.cpp
@@ -675,7 +675,8 @@ void FpuStackAllocator::handle_op2(LIR_Op2* op2) {
switch (op2->code()) {
case lir_cmp:
case lir_cmp_fd2i:
- case lir_ucmp_fd2i: {
+ case lir_ucmp_fd2i:
+ case lir_assert: {
assert(left->is_fpu_register(), "invalid LIR");
assert(right->is_fpu_register(), "invalid LIR");
diff --git a/src/cpu/x86/vm/c1_Runtime1_x86.cpp b/src/cpu/x86/vm/c1_Runtime1_x86.cpp
index d3ac75e40..ff9c11d86 100644
--- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp
+++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp
@@ -1807,6 +1807,24 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
break;
#endif // INCLUDE_ALL_GCS
+ case predicate_failed_trap_id:
+ {
+ StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);
+
+ OopMap* map = save_live_registers(sasm, 1);
+
+ int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
+ oop_maps = new OopMapSet();
+ oop_maps->add_gc_map(call_offset, map);
+ restore_live_registers(sasm);
+ __ leave();
+ DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
+ assert(deopt_blob != NULL, "deoptimization blob must have been created");
+
+ __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
+ }
+ break;
+
default:
{ StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
__ movptr(rax, (int)id);
diff --git a/src/cpu/x86/vm/cppInterpreter_x86.cpp b/src/cpu/x86/vm/cppInterpreter_x86.cpp
index c568c6f4d..24e669408 100644
--- a/src/cpu/x86/vm/cppInterpreter_x86.cpp
+++ b/src/cpu/x86/vm/cppInterpreter_x86.cpp
@@ -1299,25 +1299,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ push(rdx);
#endif // _LP64
- // Either restore the MXCSR register after returning from the JNI Call
- // or verify that it wasn't changed.
- if (VM_Version::supports_sse()) {
- if (RestoreMXCSROnJNICalls) {
- __ ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std()));
- }
- else if (CheckJNICalls ) {
- __ call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
- }
- }
-
-#ifndef _LP64
- // Either restore the x87 floating pointer control word after returning
- // from the JNI call or verify that it wasn't changed.
- if (CheckJNICalls) {
- __ call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry()));
- }
-#endif // _LP64
-
+ // Verify or restore cpu control state after JNI call
+ __ restore_cpu_control_state_after_jni();
// change thread state
__ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
diff --git a/src/cpu/x86/vm/macroAssembler_x86.cpp b/src/cpu/x86/vm/macroAssembler_x86.cpp
index b64518a7b..98c93f99a 100644
--- a/src/cpu/x86/vm/macroAssembler_x86.cpp
+++ b/src/cpu/x86/vm/macroAssembler_x86.cpp
@@ -4765,6 +4765,31 @@ void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
pop_CPU_state();
}
+void MacroAssembler::restore_cpu_control_state_after_jni() {
+ // Either restore the MXCSR register after returning from the JNI Call
+ // or verify that it wasn't changed (with -Xcheck:jni flag).
+ if (VM_Version::supports_sse()) {
+ if (RestoreMXCSROnJNICalls) {
+ ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std()));
+ } else if (CheckJNICalls) {
+ call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
+ }
+ }
+ if (VM_Version::supports_avx()) {
+ // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty.
+ vzeroupper();
+ }
+
+#ifndef _LP64
+ // Either restore the x87 floating pointer control word after returning
+ // from the JNI call or verify that it wasn't changed.
+ if (CheckJNICalls) {
+ call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry()));
+ }
+#endif // _LP64
+}
+
+
void MacroAssembler::load_klass(Register dst, Register src) {
#ifdef _LP64
if (UseCompressedKlassPointers) {
@@ -5759,6 +5784,8 @@ void MacroAssembler::string_compare(Register str1, Register str2,
addptr(result, stride2);
subl(cnt2, stride2);
jccb(Assembler::notZero, COMPARE_WIDE_VECTORS_LOOP);
+ // clean upper bits of YMM registers
+ vzeroupper();
// compare wide vectors tail
bind(COMPARE_WIDE_TAIL);
@@ -5772,6 +5799,8 @@ void MacroAssembler::string_compare(Register str1, Register str2,
// Identifies the mismatching (higher or lower)16-bytes in the 32-byte vectors.
bind(VECTOR_NOT_EQUAL);
+ // clean upper bits of YMM registers
+ vzeroupper();
lea(str1, Address(str1, result, scale));
lea(str2, Address(str2, result, scale));
jmp(COMPARE_16_CHARS);
@@ -6028,6 +6057,10 @@ void MacroAssembler::char_arrays_equals(bool is_array_equ, Register ary1, Regist
// That's it
bind(DONE);
+ if (UseAVX >= 2) {
+ // clean upper bits of YMM registers
+ vzeroupper();
+ }
}
void MacroAssembler::generate_fill(BasicType t, bool aligned,
@@ -6157,6 +6190,10 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
vmovdqu(Address(to, 0), xtmp);
addptr(to, 32);
subl(count, 8 << shift);
+
+ BIND(L_check_fill_8_bytes);
+ // clean upper bits of YMM registers
+ vzeroupper();
} else {
// Fill 32-byte chunks
pshufd(xtmp, xtmp, 0);
@@ -6180,8 +6217,9 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
addptr(to, 32);
subl(count, 8 << shift);
jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
+
+ BIND(L_check_fill_8_bytes);
}
- BIND(L_check_fill_8_bytes);
addl(count, 8 << shift);
jccb(Assembler::zero, L_exit);
jmpb(L_fill_8_bytes);
@@ -6316,6 +6354,10 @@ void MacroAssembler::encode_iso_array(Register src, Register dst, Register len,
jccb(Assembler::lessEqual, L_copy_16_chars);
bind(L_copy_16_chars_exit);
+ if (UseAVX >= 2) {
+ // clean upper bits of YMM registers
+ vzeroupper();
+ }
subptr(len, 8);
jccb(Assembler::greater, L_copy_8_chars_exit);
diff --git a/src/cpu/x86/vm/macroAssembler_x86.hpp b/src/cpu/x86/vm/macroAssembler_x86.hpp
index 9500f3164..e9f409dc5 100644
--- a/src/cpu/x86/vm/macroAssembler_x86.hpp
+++ b/src/cpu/x86/vm/macroAssembler_x86.hpp
@@ -582,6 +582,9 @@ class MacroAssembler: public Assembler {
// only if +VerifyFPU
void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
+ // Verify or restore cpu control state after JNI call
+ void restore_cpu_control_state_after_jni();
+
// prints msg, dumps registers and stops execution
void stop(const char* msg);
diff --git a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp
index dc705421c..0fce7952a 100644
--- a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp
+++ b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp
@@ -2065,6 +2065,9 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ call(RuntimeAddress(native_func));
+ // Verify or restore cpu control state after JNI call
+ __ restore_cpu_control_state_after_jni();
+
// WARNING - on Windows Java Natives use pascal calling convention and pop the
// arguments off of the stack. We could just re-adjust the stack pointer here
// and continue to do SP relative addressing but we instead switch to FP
diff --git a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
index 50255eeef..db20c1f23 100644
--- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
+++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
@@ -2315,16 +2315,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ call(RuntimeAddress(native_func));
- // Either restore the MXCSR register after returning from the JNI Call
- // or verify that it wasn't changed.
- if (RestoreMXCSROnJNICalls) {
- __ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std()));
-
- }
- else if (CheckJNICalls ) {
- __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::verify_mxcsr_entry())));
- }
-
+ // Verify or restore cpu control state after JNI call
+ __ restore_cpu_control_state_after_jni();
// Unpack native results.
switch (ret_type) {
diff --git a/src/cpu/x86/vm/stubGenerator_x86_32.cpp b/src/cpu/x86/vm/stubGenerator_x86_32.cpp
index e56bb2266..f3a91d03c 100644
--- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp
+++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp
@@ -835,6 +835,11 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_copy_64_bytes);
__ subl(qword_count, 8);
__ jcc(Assembler::greaterEqual, L_copy_64_bytes_loop);
+
+ if (UseUnalignedLoadStores && (UseAVX >= 2)) {
+ // clean upper bits of YMM registers
+ __ vzeroupper();
+ }
__ addl(qword_count, 8);
__ jccb(Assembler::zero, L_exit);
//
diff --git a/src/cpu/x86/vm/stubGenerator_x86_64.cpp b/src/cpu/x86/vm/stubGenerator_x86_64.cpp
index c6b94e243..ace545383 100644
--- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp
+++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp
@@ -1331,6 +1331,10 @@ class StubGenerator: public StubCodeGenerator {
}
__ addptr(qword_count, 4);
__ BIND(L_end);
+ if (UseAVX >= 2) {
+ // clean upper bits of YMM registers
+ __ vzeroupper();
+ }
} else {
// Copy 32-bytes per iteration
__ BIND(L_loop);
@@ -1404,6 +1408,10 @@ class StubGenerator: public StubCodeGenerator {
}
__ subptr(qword_count, 4);
__ BIND(L_end);
+ if (UseAVX >= 2) {
+ // clean upper bits of YMM registers
+ __ vzeroupper();
+ }
} else {
// Copy 32-bytes per iteration
__ BIND(L_loop);
diff --git a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp
index 5df98394c..fb13a4404 100644
--- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp
+++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp
@@ -1080,22 +1080,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// result potentially in rdx:rax or ST0
- // Either restore the MXCSR register after returning from the JNI Call
- // or verify that it wasn't changed.
- if (VM_Version::supports_sse()) {
- if (RestoreMXCSROnJNICalls) {
- __ ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std()));
- }
- else if (CheckJNICalls ) {
- __ call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
- }
- }
-
- // Either restore the x87 floating pointer control word after returning
- // from the JNI call or verify that it wasn't changed.
- if (CheckJNICalls) {
- __ call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry()));
- }
+ // Verify or restore cpu control state after JNI call
+ __ restore_cpu_control_state_after_jni();
// save potential result in ST(0) & rdx:rax
// (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 -
diff --git a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp
index c446cb3c9..6b3f7b6ba 100644
--- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp
+++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp
@@ -1079,15 +1079,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ call(rax);
// result potentially in rax or xmm0
- // Depending on runtime options, either restore the MXCSR
- // register after returning from the JNI Call or verify that
- // it wasn't changed during -Xcheck:jni.
- if (RestoreMXCSROnJNICalls) {
- __ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std()));
- }
- else if (CheckJNICalls) {
- __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::verify_mxcsr_entry())));
- }
+ // Verify or restore cpu control state after JNI call
+ __ restore_cpu_control_state_after_jni();
// NOTE: The order of these pushes is known to frame::interpreter_frame_result
// in order to extract the result of a method call. If the order of these
diff --git a/src/cpu/x86/vm/x86_32.ad b/src/cpu/x86/vm/x86_32.ad
index 6ceb50cee..67f33d3ba 100644
--- a/src/cpu/x86/vm/x86_32.ad
+++ b/src/cpu/x86/vm/x86_32.ad
@@ -228,10 +228,16 @@ static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], CON
static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
// Offset hacking within calls.
-static int pre_call_FPU_size() {
- if (Compile::current()->in_24_bit_fp_mode())
- return 6; // fldcw
- return 0;
+static int pre_call_resets_size() {
+ int size = 0;
+ Compile* C = Compile::current();
+ if (C->in_24_bit_fp_mode()) {
+ size += 6; // fldcw
+ }
+ if (C->max_vector_size() > 16) {
+ size += 3; // vzeroupper
+ }
+ return size;
}
static int preserve_SP_size() {
@@ -242,21 +248,21 @@ static int preserve_SP_size() {
// from the start of the call to the point where the return address
// will point.
int MachCallStaticJavaNode::ret_addr_offset() {
- int offset = 5 + pre_call_FPU_size(); // 5 bytes from start of call to where return address points
+ int offset = 5 + pre_call_resets_size(); // 5 bytes from start of call to where return address points
if (_method_handle_invoke)
offset += preserve_SP_size();
return offset;
}
int MachCallDynamicJavaNode::ret_addr_offset() {
- return 10 + pre_call_FPU_size(); // 10 bytes from start of call to where return address points
+ return 10 + pre_call_resets_size(); // 10 bytes from start of call to where return address points
}
static int sizeof_FFree_Float_Stack_All = -1;
int MachCallRuntimeNode::ret_addr_offset() {
assert(sizeof_FFree_Float_Stack_All != -1, "must have been emitted already");
- return sizeof_FFree_Float_Stack_All + 5 + pre_call_FPU_size();
+ return sizeof_FFree_Float_Stack_All + 5 + pre_call_resets_size();
}
// Indicate if the safepoint node needs the polling page as an input.
@@ -272,7 +278,7 @@ bool SafePointNode::needs_polling_address_input() {
// The address of the call instruction needs to be 4-byte aligned to
// ensure that it does not span a cache line so that it can be patched.
int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
- current_offset += pre_call_FPU_size(); // skip fldcw, if any
+ current_offset += pre_call_resets_size(); // skip fldcw, if any
current_offset += 1; // skip call opcode byte
return round_to(current_offset, alignment_required()) - current_offset;
}
@@ -280,7 +286,7 @@ int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
// The address of the call instruction needs to be 4-byte aligned to
// ensure that it does not span a cache line so that it can be patched.
int CallStaticJavaHandleNode::compute_padding(int current_offset) const {
- current_offset += pre_call_FPU_size(); // skip fldcw, if any
+ current_offset += pre_call_resets_size(); // skip fldcw, if any
current_offset += preserve_SP_size(); // skip mov rbp, rsp
current_offset += 1; // skip call opcode byte
return round_to(current_offset, alignment_required()) - current_offset;
@@ -289,7 +295,7 @@ int CallStaticJavaHandleNode::compute_padding(int current_offset) const {
// The address of the call instruction needs to be 4-byte aligned to
// ensure that it does not span a cache line so that it can be patched.
int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
- current_offset += pre_call_FPU_size(); // skip fldcw, if any
+ current_offset += pre_call_resets_size(); // skip fldcw, if any
current_offset += 5; // skip MOV instruction
current_offset += 1; // skip call opcode byte
return round_to(current_offset, alignment_required()) - current_offset;
@@ -583,16 +589,20 @@ void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
// Remove two words for return addr and rbp,
framesize -= 2*wordSize;
- if( C->in_24_bit_fp_mode() ) {
+ if (C->max_vector_size() > 16) {
+ st->print("VZEROUPPER");
+ st->cr(); st->print("\t");
+ }
+ if (C->in_24_bit_fp_mode()) {
st->print("FLDCW standard control word");
st->cr(); st->print("\t");
}
- if( framesize ) {
+ if (framesize) {
st->print("ADD ESP,%d\t# Destroy frame",framesize);
st->cr(); st->print("\t");
}
st->print_cr("POPL EBP"); st->print("\t");
- if( do_polling() && C->is_method_compilation() ) {
+ if (do_polling() && C->is_method_compilation()) {
st->print("TEST PollPage,EAX\t! Poll Safepoint");
st->cr(); st->print("\t");
}
@@ -602,8 +612,14 @@ void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
Compile *C = ra_->C;
+ if (C->max_vector_size() > 16) {
+ // Clear upper bits of YMM registers when current compiled code uses
+ // wide vectors to avoid AVX <-> SSE transition penalty during call.
+ MacroAssembler masm(&cbuf);
+ masm.vzeroupper();
+ }
// If method set FPU control word, restore to standard control word
- if( C->in_24_bit_fp_mode() ) {
+ if (C->in_24_bit_fp_mode()) {
MacroAssembler masm(&cbuf);
masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
}
@@ -615,12 +631,11 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
// Note that VerifyStackAtCalls' Majik cookie does not change the frame size popped here
- if( framesize >= 128 ) {
+ if (framesize >= 128) {
emit_opcode(cbuf, 0x81); // add SP, #framesize
emit_rm(cbuf, 0x3, 0x00, ESP_enc);
emit_d32(cbuf, framesize);
- }
- else if( framesize ) {
+ } else if (framesize) {
emit_opcode(cbuf, 0x83); // add SP, #framesize
emit_rm(cbuf, 0x3, 0x00, ESP_enc);
emit_d8(cbuf, framesize);
@@ -628,7 +643,7 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
emit_opcode(cbuf, 0x58 | EBP_enc);
- if( do_polling() && C->is_method_compilation() ) {
+ if (do_polling() && C->is_method_compilation()) {
cbuf.relocate(cbuf.insts_end(), relocInfo::poll_return_type, 0);
emit_opcode(cbuf,0x85);
emit_rm(cbuf, 0x0, EAX_enc, 0x5); // EAX
@@ -640,7 +655,8 @@ uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
Compile *C = ra_->C;
// If method set FPU control word, restore to standard control word
int size = C->in_24_bit_fp_mode() ? 6 : 0;
- if( do_polling() && C->is_method_compilation() ) size += 6;
+ if (C->max_vector_size() > 16) size += 3; // vzeroupper
+ if (do_polling() && C->is_method_compilation()) size += 6;
int framesize = C->frame_slots() << LogBytesPerInt;
assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
@@ -649,7 +665,7 @@ uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
size++; // popl rbp,
- if( framesize >= 128 ) {
+ if (framesize >= 128) {
size += 6;
} else {
size += framesize ? 3 : 0;
@@ -1853,20 +1869,26 @@ encode %{
%}
- enc_class pre_call_FPU %{
+ enc_class pre_call_resets %{
// If method sets FPU control word restore it here
debug_only(int off0 = cbuf.insts_size());
- if( Compile::current()->in_24_bit_fp_mode() ) {
- MacroAssembler masm(&cbuf);
- masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
+ if (ra_->C->in_24_bit_fp_mode()) {
+ MacroAssembler _masm(&cbuf);
+ __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
+ }
+ if (ra_->C->max_vector_size() > 16) {
+ // Clear upper bits of YMM registers when current compiled code uses
+ // wide vectors to avoid AVX <-> SSE transition penalty during call.
+ MacroAssembler _masm(&cbuf);
+ __ vzeroupper();
}
debug_only(int off1 = cbuf.insts_size());
- assert(off1 - off0 == pre_call_FPU_size(), "correct size prediction");
+ assert(off1 - off0 == pre_call_resets_size(), "correct size prediction");
%}
enc_class post_call_FPU %{
// If method sets FPU control word do it here also
- if( Compile::current()->in_24_bit_fp_mode() ) {
+ if (Compile::current()->in_24_bit_fp_mode()) {
MacroAssembler masm(&cbuf);
masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
}
@@ -1877,17 +1899,17 @@ encode %{
// who we intended to call.
cbuf.set_insts_mark();
$$$emit8$primary;
- if ( !_method ) {
+ if (!_method) {
emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
runtime_call_Relocation::spec(), RELOC_IMM32 );
- } else if(_optimized_virtual) {
+ } else if (_optimized_virtual) {
emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
opt_virtual_call_Relocation::spec(), RELOC_IMM32 );
} else {
emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
static_call_Relocation::spec(), RELOC_IMM32 );
}
- if( _method ) { // Emit stub for static call
+ if (_method) { // Emit stub for static call
emit_java_to_interp(cbuf);
}
%}
@@ -12828,7 +12850,7 @@ instruct CallStaticJavaDirect(method meth) %{
ins_cost(300);
format %{ "CALL,static " %}
opcode(0xE8); /* E8 cd */
- ins_encode( pre_call_FPU,
+ ins_encode( pre_call_resets,
Java_Static_Call( meth ),
call_epilog,
post_call_FPU );
@@ -12849,7 +12871,7 @@ instruct CallStaticJavaHandle(method meth, eBPRegP ebp_mh_SP_save) %{
ins_cost(300);
format %{ "CALL,static/MethodHandle " %}
opcode(0xE8); /* E8 cd */
- ins_encode( pre_call_FPU,
+ ins_encode( pre_call_resets,
preserve_SP,
Java_Static_Call( meth ),
restore_SP,
@@ -12870,7 +12892,7 @@ instruct CallDynamicJavaDirect(method meth) %{
format %{ "MOV EAX,(oop)-1\n\t"
"CALL,dynamic" %}
opcode(0xE8); /* E8 cd */
- ins_encode( pre_call_FPU,
+ ins_encode( pre_call_resets,
Java_Dynamic_Call( meth ),
call_epilog,
post_call_FPU );
@@ -12887,7 +12909,7 @@ instruct CallRuntimeDirect(method meth) %{
format %{ "CALL,runtime " %}
opcode(0xE8); /* E8 cd */
// Use FFREEs to clear entries in float stack
- ins_encode( pre_call_FPU,
+ ins_encode( pre_call_resets,
FFree_Float_Stack_All,
Java_To_Runtime( meth ),
post_call_FPU );
@@ -12902,7 +12924,7 @@ instruct CallLeafDirect(method meth) %{
ins_cost(300);
format %{ "CALL_LEAF,runtime " %}
opcode(0xE8); /* E8 cd */
- ins_encode( pre_call_FPU,
+ ins_encode( pre_call_resets,
FFree_Float_Stack_All,
Java_To_Runtime( meth ),
Verify_FPU_For_Leaf, post_call_FPU );
diff --git a/src/cpu/x86/vm/x86_64.ad b/src/cpu/x86/vm/x86_64.ad
index 7c902c4e3..77dc5b011 100644
--- a/src/cpu/x86/vm/x86_64.ad
+++ b/src/cpu/x86/vm/x86_64.ad
@@ -399,6 +399,9 @@ source %{
static int preserve_SP_size() {
return 3; // rex.w, op, rm(reg/reg)
}
+static int clear_avx_size() {
+ return (Compile::current()->max_vector_size() > 16) ? 3 : 0; // vzeroupper
+}
// !!!!! Special hack to get all types of calls to specify the byte offset
// from the start of the call to the point where the return address
@@ -406,6 +409,7 @@ static int preserve_SP_size() {
int MachCallStaticJavaNode::ret_addr_offset()
{
int offset = 5; // 5 bytes from start of call to where return address points
+ offset += clear_avx_size();
if (_method_handle_invoke)
offset += preserve_SP_size();
return offset;
@@ -413,11 +417,16 @@ int MachCallStaticJavaNode::ret_addr_offset()
int MachCallDynamicJavaNode::ret_addr_offset()
{
- return 15; // 15 bytes from start of call to where return address points
+ int offset = 15; // 15 bytes from start of call to where return address points
+ offset += clear_avx_size();
+ return offset;
}
-// In os_cpu .ad file
-// int MachCallRuntimeNode::ret_addr_offset()
+int MachCallRuntimeNode::ret_addr_offset() {
+ int offset = 13; // movq r10,#addr; callq (r10)
+ offset += clear_avx_size();
+ return offset;
+}
// Indicate if the safepoint node needs the polling page as an input,
// it does if the polling page is more than disp32 away.
@@ -434,6 +443,7 @@ bool SafePointNode::needs_polling_address_input()
// ensure that it does not span a cache line so that it can be patched.
int CallStaticJavaDirectNode::compute_padding(int current_offset) const
{
+ current_offset += clear_avx_size(); // skip vzeroupper
current_offset += 1; // skip call opcode byte
return round_to(current_offset, alignment_required()) - current_offset;
}
@@ -443,6 +453,7 @@ int CallStaticJavaDirectNode::compute_padding(int current_offset) const
int CallStaticJavaHandleNode::compute_padding(int current_offset) const
{
current_offset += preserve_SP_size(); // skip mov rbp, rsp
+ current_offset += clear_avx_size(); // skip vzeroupper
current_offset += 1; // skip call opcode byte
return round_to(current_offset, alignment_required()) - current_offset;
}
@@ -451,6 +462,7 @@ int CallStaticJavaHandleNode::compute_padding(int current_offset) const
// ensure that it does not span a cache line so that it can be patched.
int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
{
+ current_offset += clear_avx_size(); // skip vzeroupper
current_offset += 11; // skip movq instruction + call opcode byte
return round_to(current_offset, alignment_required()) - current_offset;
}
@@ -764,6 +776,11 @@ int MachPrologNode::reloc() const
void MachEpilogNode::format(PhaseRegAlloc* ra_, outputStream* st) const
{
Compile* C = ra_->C;
+ if (C->max_vector_size() > 16) {
+ st->print("vzeroupper");
+ st->cr(); st->print("\t");
+ }
+
int framesize = C->frame_slots() << LogBytesPerInt;
assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
// Remove word for return adr already pushed
@@ -793,6 +810,13 @@ void MachEpilogNode::format(PhaseRegAlloc* ra_, outputStream* st) const
void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
{
Compile* C = ra_->C;
+ if (C->max_vector_size() > 16) {
+ // Clear upper bits of YMM registers when current compiled code uses
+ // wide vectors to avoid AVX <-> SSE transition penalty during call.
+ MacroAssembler _masm(&cbuf);
+ __ vzeroupper();
+ }
+
int framesize = C->frame_slots() << LogBytesPerInt;
assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
// Remove word for return adr already pushed
@@ -2008,6 +2032,25 @@ encode %{
__ bind(miss);
%}
+ enc_class clear_avx %{
+ debug_only(int off0 = cbuf.insts_size());
+ if (ra_->C->max_vector_size() > 16) {
+ // Clear upper bits of YMM registers when current compiled code uses
+ // wide vectors to avoid AVX <-> SSE transition penalty during call.
+ MacroAssembler _masm(&cbuf);
+ __ vzeroupper();
+ }
+ debug_only(int off1 = cbuf.insts_size());
+ assert(off1 - off0 == clear_avx_size(), "correct size prediction");
+ %}
+
+ enc_class Java_To_Runtime(method meth) %{
+ // No relocation needed
+ MacroAssembler _masm(&cbuf);
+ __ mov64(r10, (int64_t) $meth$$method);
+ __ call(r10);
+ %}
+
enc_class Java_To_Interpreter(method meth)
%{
// CALL Java_To_Interpreter
@@ -11366,7 +11409,7 @@ instruct CallStaticJavaDirect(method meth) %{
ins_cost(300);
format %{ "call,static " %}
opcode(0xE8); /* E8 cd */
- ins_encode(Java_Static_Call(meth), call_epilog);
+ ins_encode(clear_avx, Java_Static_Call(meth), call_epilog);
ins_pipe(pipe_slow);
ins_alignment(4);
%}
@@ -11384,7 +11427,7 @@ instruct CallStaticJavaHandle(method meth, rbp_RegP rbp_mh_SP_save) %{
ins_cost(300);
format %{ "call,static/MethodHandle " %}
opcode(0xE8); /* E8 cd */
- ins_encode(preserve_SP,
+ ins_encode(clear_avx, preserve_SP,
Java_Static_Call(meth),
restore_SP,
call_epilog);
@@ -11403,7 +11446,7 @@ instruct CallDynamicJavaDirect(method meth)
ins_cost(300);
format %{ "movq rax, #Universe::non_oop_word()\n\t"
"call,dynamic " %}
- ins_encode(Java_Dynamic_Call(meth), call_epilog);
+ ins_encode(clear_avx, Java_Dynamic_Call(meth), call_epilog);
ins_pipe(pipe_slow);
ins_alignment(4);
%}
@@ -11416,8 +11459,7 @@ instruct CallRuntimeDirect(method meth)
ins_cost(300);
format %{ "call,runtime " %}
- opcode(0xE8); /* E8 cd */
- ins_encode(Java_To_Runtime(meth));
+ ins_encode(clear_avx, Java_To_Runtime(meth));
ins_pipe(pipe_slow);
%}
@@ -11429,8 +11471,7 @@ instruct CallLeafDirect(method meth)
ins_cost(300);
format %{ "call_leaf,runtime " %}
- opcode(0xE8); /* E8 cd */
- ins_encode(Java_To_Runtime(meth));
+ ins_encode(clear_avx, Java_To_Runtime(meth));
ins_pipe(pipe_slow);
%}
@@ -11442,7 +11483,6 @@ instruct CallLeafNoFPDirect(method meth)
ins_cost(300);
format %{ "call_leaf_nofp,runtime " %}
- opcode(0xE8); /* E8 cd */
ins_encode(Java_To_Runtime(meth));
ins_pipe(pipe_slow);
%}
diff --git a/src/os/bsd/vm/os_bsd.cpp b/src/os/bsd/vm/os_bsd.cpp
index a09db3728..d062b048a 100644
--- a/src/os/bsd/vm/os_bsd.cpp
+++ b/src/os/bsd/vm/os_bsd.cpp
@@ -167,20 +167,6 @@ julong os::physical_memory() {
return Bsd::physical_memory();
}
-julong os::allocatable_physical_memory(julong size) {
-#ifdef _LP64
- return size;
-#else
- julong result = MIN2(size, (julong)3800*M);
- if (!is_allocatable(result)) {
- // See comments under solaris for alignment considerations
- julong reasonable_size = (julong)2*G - 2 * os::vm_page_size();
- result = MIN2(size, reasonable_size);
- }
- return result;
-#endif // _LP64
-}
-
////////////////////////////////////////////////////////////////////////////////
// environment support
diff --git a/src/os/linux/vm/os_linux.cpp b/src/os/linux/vm/os_linux.cpp
index 29842b223..df6af0076 100644
--- a/src/os/linux/vm/os_linux.cpp
+++ b/src/os/linux/vm/os_linux.cpp
@@ -194,20 +194,6 @@ julong os::physical_memory() {
return Linux::physical_memory();
}
-julong os::allocatable_physical_memory(julong size) {
-#ifdef _LP64
- return size;
-#else
- julong result = MIN2(size, (julong)3800*M);
- if (!is_allocatable(result)) {
- // See comments under solaris for alignment considerations
- julong reasonable_size = (julong)2*G - 2 * os::vm_page_size();
- result = MIN2(size, reasonable_size);
- }
- return result;
-#endif // _LP64
-}
-
////////////////////////////////////////////////////////////////////////////////
// environment support
diff --git a/src/os/posix/launcher/launcher.script b/src/os/posix/launcher/launcher.script
index 0a2ae5f4d..e8d428180 100644
--- a/src/os/posix/launcher/launcher.script
+++ b/src/os/posix/launcher/launcher.script
@@ -199,7 +199,7 @@ case "$MODE" in
rm -f $GDBSCR
;;
dbx)
- $DBX -s $MYDIR/.dbxrc $LAUNCHER $JPARAMS
+ $DBX -s $HOME/.dbxrc $LAUNCHER $JPARMS
;;
valgrind)
echo Warning: Defaulting to 16Mb heap to make Valgrind run faster, use -Xmx for larger heap
diff --git a/src/os/posix/vm/os_posix.cpp b/src/os/posix/vm/os_posix.cpp
index af75b3b36..033027836 100644
--- a/src/os/posix/vm/os_posix.cpp
+++ b/src/os/posix/vm/os_posix.cpp
@@ -188,4 +188,66 @@ void os::Posix::print_uname_info(outputStream* st) {
st->cr();
}
+bool os::has_allocatable_memory_limit(julong* limit) {
+ struct rlimit rlim;
+ int getrlimit_res = getrlimit(RLIMIT_AS, &rlim);
+ // if there was an error when calling getrlimit, assume that there is no limitation
+ // on virtual memory.
+ bool result;
+ if ((getrlimit_res != 0) || (rlim.rlim_cur == RLIM_INFINITY)) {
+ result = false;
+ } else {
+ *limit = (julong)rlim.rlim_cur;
+ result = true;
+ }
+#ifdef _LP64
+ return result;
+#else
+ // arbitrary virtual space limit for 32 bit Unices found by testing. If
+ // getrlimit above returned a limit, bound it with this limit. Otherwise
+ // directly use it.
+ const julong max_virtual_limit = (julong)3800*M;
+ if (result) {
+ *limit = MIN2(*limit, max_virtual_limit);
+ } else {
+ *limit = max_virtual_limit;
+ }
+ // bound by actually allocatable memory. The algorithm uses two bounds, an
+ // upper and a lower limit. The upper limit is the current highest amount of
+ // memory that could not be allocated, the lower limit is the current highest
+ // amount of memory that could be allocated.
+ // The algorithm iteratively refines the result by halving the difference
+ // between these limits, updating either the upper limit (if that value could
+ // not be allocated) or the lower limit (if the that value could be allocated)
+ // until the difference between these limits is "small".
+
+ // the minimum amount of memory we care about allocating.
+ const julong min_allocation_size = M;
+
+ julong upper_limit = *limit;
+
+ // first check a few trivial cases
+ if (is_allocatable(upper_limit) || (upper_limit <= min_allocation_size)) {
+ *limit = upper_limit;
+ } else if (!is_allocatable(min_allocation_size)) {
+ // we found that not even min_allocation_size is allocatable. Return it
+ // anyway. There is no point to search for a better value any more.
+ *limit = min_allocation_size;
+ } else {
+ // perform the binary search.
+ julong lower_limit = min_allocation_size;
+ while ((upper_limit - lower_limit) > min_allocation_size) {
+ julong temp_limit = ((upper_limit - lower_limit) / 2) + lower_limit;
+ temp_limit = align_size_down_(temp_limit, min_allocation_size);
+ if (is_allocatable(temp_limit)) {
+ lower_limit = temp_limit;
+ } else {
+ upper_limit = temp_limit;
+ }
+ }
+ *limit = lower_limit;
+ }
+ return true;
+#endif
+}
diff --git a/src/os/solaris/vm/os_solaris.cpp b/src/os/solaris/vm/os_solaris.cpp
index bdd78ad02..5570195bb 100644
--- a/src/os/solaris/vm/os_solaris.cpp
+++ b/src/os/solaris/vm/os_solaris.cpp
@@ -476,24 +476,6 @@ julong os::physical_memory() {
return Solaris::physical_memory();
}
-julong os::allocatable_physical_memory(julong size) {
-#ifdef _LP64
- return size;
-#else
- julong result = MIN2(size, (julong)3835*M);
- if (!is_allocatable(result)) {
- // Memory allocations will be aligned but the alignment
- // is not known at this point. Alignments will
- // be at most to LargePageSizeInBytes. Protect
- // allocations from alignments up to illegal
- // values. If at this point 2G is illegal.
- julong reasonable_size = (julong)2*G - 2 * LargePageSizeInBytes;
- result = MIN2(size, reasonable_size);
- }
- return result;
-#endif
-}
-
static hrtime_t first_hrtime = 0;
static const hrtime_t hrtime_hz = 1000*1000*1000;
const int LOCK_BUSY = 1;
diff --git a/src/os/windows/vm/os_windows.cpp b/src/os/windows/vm/os_windows.cpp
index 4a99a1b39..0cb394ade 100644
--- a/src/os/windows/vm/os_windows.cpp
+++ b/src/os/windows/vm/os_windows.cpp
@@ -686,12 +686,17 @@ julong os::physical_memory() {
return win32::physical_memory();
}
-julong os::allocatable_physical_memory(julong size) {
+bool os::has_allocatable_memory_limit(julong* limit) {
+ MEMORYSTATUSEX ms;
+ ms.dwLength = sizeof(ms);
+ GlobalMemoryStatusEx(&ms);
#ifdef _LP64
- return size;
+ *limit = (julong)ms.ullAvailVirtual;
+ return true;
#else
// Limit to 1400m because of the 2gb address space wall
- return MIN2(size, (julong)1400*M);
+ *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
+ return true;
#endif
}
@@ -3768,6 +3773,8 @@ extern "C" {
}
}
+static jint initSock();
+
// this is called _after_ the global arguments have been parsed
jint os::init_2(void) {
// Allocate a single page and mark it as readable for safepoint polling
@@ -3898,6 +3905,10 @@ jint os::init_2(void) {
if (!success) UseNUMAInterleaving = false;
}
+ if (initSock() != JNI_OK) {
+ return JNI_ERR;
+ }
+
return JNI_OK;
}
@@ -4894,42 +4905,24 @@ LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) {
// We don't build a headless jre for Windows
bool os::is_headless_jre() { return false; }
-
-typedef CRITICAL_SECTION mutex_t;
-#define mutexInit(m) InitializeCriticalSection(m)
-#define mutexDestroy(m) DeleteCriticalSection(m)
-#define mutexLock(m) EnterCriticalSection(m)
-#define mutexUnlock(m) LeaveCriticalSection(m)
-
-static bool sock_initialized = FALSE;
-static mutex_t sockFnTableMutex;
-
-static void initSock() {
+static jint initSock() {
WSADATA wsadata;
if (!os::WinSock2Dll::WinSock2Available()) {
- jio_fprintf(stderr, "Could not load Winsock 2 (error: %d)\n",
+ jio_fprintf(stderr, "Could not load Winsock (error: %d)\n",
::GetLastError());
- return;
+ return JNI_ERR;
}
- if (sock_initialized == TRUE) return;
- ::mutexInit(&sockFnTableMutex);
- ::mutexLock(&sockFnTableMutex);
- if (os::WinSock2Dll::WSAStartup(MAKEWORD(1,1), &wsadata) != 0) {
- jio_fprintf(stderr, "Could not initialize Winsock\n");
+ if (os::WinSock2Dll::WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
+ jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
+ ::GetLastError());
+ return JNI_ERR;
}
- sock_initialized = TRUE;
- ::mutexUnlock(&sockFnTableMutex);
+ return JNI_OK;
}
struct hostent* os::get_host_by_name(char* name) {
- if (!sock_initialized) {
- initSock();
- }
- if (!os::WinSock2Dll::WinSock2Available()) {
- return NULL;
- }
return (struct hostent*)os::WinSock2Dll::gethostbyname(name);
}
diff --git a/src/os_cpu/bsd_x86/vm/bsd_x86_64.ad b/src/os_cpu/bsd_x86/vm/bsd_x86_64.ad
index f4dc25d34..254328e09 100644
--- a/src/os_cpu/bsd_x86/vm/bsd_x86_64.ad
+++ b/src/os_cpu/bsd_x86/vm/bsd_x86_64.ad
@@ -55,20 +55,6 @@ encode %{
// adding a syntax that specifies the sizes of fields in an order,
// so that the adlc can build the emit functions automagically
- enc_class Java_To_Runtime(method meth) %{
- // No relocation needed
-
- // movq r10, <meth>
- emit_opcode(cbuf, Assembler::REX_WB);
- emit_opcode(cbuf, 0xB8 | (R10_enc - 8));
- emit_d64(cbuf, (int64_t) $meth$$method);
-
- // call (r10)
- emit_opcode(cbuf, Assembler::REX_B);
- emit_opcode(cbuf, 0xFF);
- emit_opcode(cbuf, 0xD0 | (R10_enc - 8));
- %}
-
%}
@@ -76,8 +62,4 @@ encode %{
source %{
-int MachCallRuntimeNode::ret_addr_offset() {
- return 13; // movq r10,#addr; callq (r10)
-}
-
%}
diff --git a/src/os_cpu/linux_x86/vm/linux_x86_64.ad b/src/os_cpu/linux_x86/vm/linux_x86_64.ad
index cf9adf40e..3b3ac007c 100644
--- a/src/os_cpu/linux_x86/vm/linux_x86_64.ad
+++ b/src/os_cpu/linux_x86/vm/linux_x86_64.ad
@@ -55,20 +55,6 @@ encode %{
// adding a syntax that specifies the sizes of fields in an order,
// so that the adlc can build the emit functions automagically
- enc_class Java_To_Runtime(method meth) %{
- // No relocation needed
-
- // movq r10, <meth>
- emit_opcode(cbuf, Assembler::REX_WB);
- emit_opcode(cbuf, 0xB8 | (R10_enc - 8));
- emit_d64(cbuf, (int64_t) $meth$$method);
-
- // call (r10)
- emit_opcode(cbuf, Assembler::REX_B);
- emit_opcode(cbuf, 0xFF);
- emit_opcode(cbuf, 0xD0 | (R10_enc - 8));
- %}
-
%}
@@ -76,8 +62,4 @@ encode %{
source %{
-int MachCallRuntimeNode::ret_addr_offset() {
- return 13; // movq r10,#addr; callq (r10)
-}
-
%}
diff --git a/src/os_cpu/solaris_x86/vm/solaris_x86_64.ad b/src/os_cpu/solaris_x86/vm/solaris_x86_64.ad
index fdce355ab..f3334952f 100644
--- a/src/os_cpu/solaris_x86/vm/solaris_x86_64.ad
+++ b/src/os_cpu/solaris_x86/vm/solaris_x86_64.ad
@@ -54,39 +54,10 @@ encode %{
// main source block for now. In future, we can generalize this by
// adding a syntax that specifies the sizes of fields in an order,
// so that the adlc can build the emit functions automagically
-
- enc_class Java_To_Runtime(method meth) %{
- // No relocation needed
-
- // movq r10, <meth>
- emit_opcode(cbuf, Assembler::REX_WB);
- emit_opcode(cbuf, 0xB8 | (R10_enc - 8));
- emit_d64(cbuf, (int64_t) $meth$$method);
-
- // call (r10)
- emit_opcode(cbuf, Assembler::REX_B);
- emit_opcode(cbuf, 0xFF);
- emit_opcode(cbuf, 0xD0 | (R10_enc - 8));
- %}
-
- enc_class post_call_verify_mxcsr %{
- MacroAssembler _masm(&cbuf);
- if (RestoreMXCSROnJNICalls) {
- __ ldmxcsr(ExternalAddress(StubRoutines::amd64::mxcsr_std()));
- }
- else if (CheckJNICalls) {
- __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::amd64::verify_mxcsr_entry())));
- }
- %}
%}
// Platform dependent source
source %{
-
-int MachCallRuntimeNode::ret_addr_offset() {
- return 13; // movq r10,#addr; callq (r10)
-}
-
%}
diff --git a/src/os_cpu/windows_x86/vm/windows_x86_64.ad b/src/os_cpu/windows_x86/vm/windows_x86_64.ad
index e251b2b0c..54e183a0b 100644
--- a/src/os_cpu/windows_x86/vm/windows_x86_64.ad
+++ b/src/os_cpu/windows_x86/vm/windows_x86_64.ad
@@ -53,30 +53,11 @@ encode %{
// adding a syntax that specifies the sizes of fields in an order,
// so that the adlc can build the emit functions automagically
- enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime
- // No relocation needed
-
- // movq r10, <meth>
- emit_opcode(cbuf, Assembler::REX_WB);
- emit_opcode(cbuf, 0xB8 | (R10_enc - 8));
- emit_d64(cbuf, (int64_t) $meth$$method);
-
- // call (r10)
- emit_opcode(cbuf, Assembler::REX_B);
- emit_opcode(cbuf, 0xFF);
- emit_opcode(cbuf, 0xD0 | (R10_enc - 8));
- %}
-
%}
-//
+
// Platform dependent source
-//
-source %{
-int MachCallRuntimeNode::ret_addr_offset()
-{
- return 13; // movq r10,#addr; callq (r10)
-}
+source %{
%}
diff --git a/src/share/vm/adlc/archDesc.cpp b/src/share/vm/adlc/archDesc.cpp
index a8983ebae..7e272e4d0 100644
--- a/src/share/vm/adlc/archDesc.cpp
+++ b/src/share/vm/adlc/archDesc.cpp
@@ -832,6 +832,7 @@ static const char *getRegMask(const char *reg_class_name) {
int length = (int)strlen(rc_name) + (int)strlen(mask) + 5;
char *regMask = new char[length];
sprintf(regMask,"%s%s()", rc_name, mask);
+ delete[] rc_name;
return regMask;
}
}
diff --git a/src/share/vm/adlc/dfa.cpp b/src/share/vm/adlc/dfa.cpp
index 5333c152c..6b15911a0 100644
--- a/src/share/vm/adlc/dfa.cpp
+++ b/src/share/vm/adlc/dfa.cpp
@@ -191,12 +191,19 @@ static void cost_check(FILE *fp, const char *spaces,
// Macro equivalent to: _kids[0]->valid(FOO) && _kids[1]->valid(BAR)
//
static void child_test(FILE *fp, MatchList &mList) {
- if( mList._lchild ) // If left child, check it
- fprintf(fp, "STATE__VALID_CHILD(_kids[0], %s)", ArchDesc::getMachOperEnum(mList._lchild));
- if( mList._lchild && mList._rchild ) // If both, add the "&&"
- fprintf(fp, " && " );
- if( mList._rchild ) // If right child, check it
- fprintf(fp, "STATE__VALID_CHILD(_kids[1], %s)", ArchDesc::getMachOperEnum(mList._rchild));
+ if (mList._lchild) { // If left child, check it
+ const char* lchild_to_upper = ArchDesc::getMachOperEnum(mList._lchild);
+ fprintf(fp, "STATE__VALID_CHILD(_kids[0], %s)", lchild_to_upper);
+ delete[] lchild_to_upper;
+ }
+ if (mList._lchild && mList._rchild) { // If both, add the "&&"
+ fprintf(fp, " && ");
+ }
+ if (mList._rchild) { // If right child, check it
+ const char* rchild_to_upper = ArchDesc::getMachOperEnum(mList._rchild);
+ fprintf(fp, "STATE__VALID_CHILD(_kids[1], %s)", rchild_to_upper);
+ delete[] rchild_to_upper;
+ }
}
//---------------------------calc_cost-----------------------------------------
@@ -206,13 +213,17 @@ static void child_test(FILE *fp, MatchList &mList) {
Expr *ArchDesc::calc_cost(FILE *fp, const char *spaces, MatchList &mList, ProductionState &status) {
fprintf(fp, "%sunsigned int c = ", spaces);
Expr *c = new Expr("0");
- if (mList._lchild ) { // If left child, add it in
- sprintf(Expr::buffer(), "_kids[0]->_cost[%s]", ArchDesc::getMachOperEnum(mList._lchild));
+ if (mList._lchild) { // If left child, add it in
+ const char* lchild_to_upper = ArchDesc::getMachOperEnum(mList._lchild);
+ sprintf(Expr::buffer(), "_kids[0]->_cost[%s]", lchild_to_upper);
c->add(Expr::buffer());
+ delete[] lchild_to_upper;
}
- if (mList._rchild) { // If right child, add it in
- sprintf(Expr::buffer(), "_kids[1]->_cost[%s]", ArchDesc::getMachOperEnum(mList._rchild));
+ if (mList._rchild) { // If right child, add it in
+ const char* rchild_to_upper = ArchDesc::getMachOperEnum(mList._rchild);
+ sprintf(Expr::buffer(), "_kids[1]->_cost[%s]", rchild_to_upper);
c->add(Expr::buffer());
+ delete[] rchild_to_upper;
}
// Add in cost of this rule
const char *mList_cost = mList.get_cost();
@@ -232,15 +243,17 @@ void ArchDesc::gen_match(FILE *fp, MatchList &mList, ProductionState &status, Di
fprintf(fp, "%s", spaces4);
// Only generate child tests if this is not a leaf node
bool has_child_constraints = mList._lchild || mList._rchild;
- const char *predicate_test = mList.get_pred();
- if( has_child_constraints || predicate_test ) {
+ const char *predicate_test = mList.get_pred();
+ if (has_child_constraints || predicate_test) {
// Open the child-and-predicate-test braces
fprintf(fp, "if( ");
status.set_constraint(hasConstraint);
child_test(fp, mList);
// Only generate predicate test if one exists for this match
- if( predicate_test ) {
- if( has_child_constraints ) { fprintf(fp," &&\n"); }
+ if (predicate_test) {
+ if (has_child_constraints) {
+ fprintf(fp," &&\n");
+ }
fprintf(fp, "%s %s", spaces6, predicate_test);
}
// End of outer tests
diff --git a/src/share/vm/c1/c1_Canonicalizer.cpp b/src/share/vm/c1/c1_Canonicalizer.cpp
index 40ecd6490..a4cda5f90 100644
--- a/src/share/vm/c1/c1_Canonicalizer.cpp
+++ b/src/share/vm/c1/c1_Canonicalizer.cpp
@@ -937,4 +937,6 @@ void Canonicalizer::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {}
void Canonicalizer::do_ProfileCall(ProfileCall* x) {}
void Canonicalizer::do_ProfileInvoke(ProfileInvoke* x) {}
void Canonicalizer::do_RuntimeCall(RuntimeCall* x) {}
+void Canonicalizer::do_RangeCheckPredicate(RangeCheckPredicate* x) {}
+void Canonicalizer::do_Assert(Assert* x) {}
void Canonicalizer::do_MemBar(MemBar* x) {}
diff --git a/src/share/vm/c1/c1_Canonicalizer.hpp b/src/share/vm/c1/c1_Canonicalizer.hpp
index d1eb55b07..b8bcfd7e6 100644
--- a/src/share/vm/c1/c1_Canonicalizer.hpp
+++ b/src/share/vm/c1/c1_Canonicalizer.hpp
@@ -107,6 +107,8 @@ class Canonicalizer: InstructionVisitor {
virtual void do_ProfileInvoke (ProfileInvoke* x);
virtual void do_RuntimeCall (RuntimeCall* x);
virtual void do_MemBar (MemBar* x);
+ virtual void do_RangeCheckPredicate(RangeCheckPredicate* x);
+ virtual void do_Assert (Assert* x);
};
#endif // SHARE_VM_C1_C1_CANONICALIZER_HPP
diff --git a/src/share/vm/c1/c1_CodeStubs.hpp b/src/share/vm/c1/c1_CodeStubs.hpp
index 9fbeb29b5..7235cd6c3 100644
--- a/src/share/vm/c1/c1_CodeStubs.hpp
+++ b/src/share/vm/c1/c1_CodeStubs.hpp
@@ -166,6 +166,22 @@ class RangeCheckStub: public CodeStub {
#endif // PRODUCT
};
+// stub used when predicate fails and deoptimization is needed
+class PredicateFailedStub: public CodeStub {
+ private:
+ CodeEmitInfo* _info;
+
+ public:
+ PredicateFailedStub(CodeEmitInfo* info);
+ virtual void emit_code(LIR_Assembler* e);
+ virtual CodeEmitInfo* info() const { return _info; }
+ virtual void visit(LIR_OpVisitState* visitor) {
+ visitor->do_slow_case(_info);
+ }
+#ifndef PRODUCT
+ virtual void print_name(outputStream* out) const { out->print("PredicateFailedStub"); }
+#endif // PRODUCT
+};
class DivByZeroStub: public CodeStub {
private:
diff --git a/src/share/vm/c1/c1_Compilation.cpp b/src/share/vm/c1/c1_Compilation.cpp
index cc268ef14..a8effa4bc 100644
--- a/src/share/vm/c1/c1_Compilation.cpp
+++ b/src/share/vm/c1/c1_Compilation.cpp
@@ -33,13 +33,16 @@
#include "c1/c1_ValueStack.hpp"
#include "code/debugInfoRec.hpp"
#include "compiler/compileLog.hpp"
+#include "c1/c1_RangeCheckElimination.hpp"
typedef enum {
_t_compile,
_t_setup,
- _t_optimizeIR,
_t_buildIR,
+ _t_optimize_blocks,
+ _t_optimize_null_checks,
+ _t_rangeCheckElimination,
_t_emit_lir,
_t_linearScan,
_t_lirGeneration,
@@ -52,8 +55,10 @@ typedef enum {
static const char * timer_name[] = {
"compile",
"setup",
- "optimizeIR",
"buildIR",
+ "optimize_blocks",
+ "optimize_null_checks",
+ "rangeCheckElimination",
"emit_lir",
"linearScan",
"lirGeneration",
@@ -159,9 +164,9 @@ void Compilation::build_hir() {
if (UseC1Optimizations) {
NEEDS_CLEANUP
// optimization
- PhaseTraceTime timeit(_t_optimizeIR);
+ PhaseTraceTime timeit(_t_optimize_blocks);
- _hir->optimize();
+ _hir->optimize_blocks();
}
_hir->verify();
@@ -180,13 +185,47 @@ void Compilation::build_hir() {
_hir->compute_code();
if (UseGlobalValueNumbering) {
- ResourceMark rm;
+ // No resource mark here! LoopInvariantCodeMotion can allocate ValueStack objects.
int instructions = Instruction::number_of_instructions();
GlobalValueNumbering gvn(_hir);
assert(instructions == Instruction::number_of_instructions(),
"shouldn't have created an instructions");
}
+ _hir->verify();
+
+#ifndef PRODUCT
+ if (PrintCFGToFile) {
+ CFGPrinter::print_cfg(_hir, "Before RangeCheckElimination", true, false);
+ }
+#endif
+
+ if (RangeCheckElimination) {
+ if (_hir->osr_entry() == NULL) {
+ PhaseTraceTime timeit(_t_rangeCheckElimination);
+ RangeCheckElimination::eliminate(_hir);
+ }
+ }
+
+#ifndef PRODUCT
+ if (PrintCFGToFile) {
+ CFGPrinter::print_cfg(_hir, "After RangeCheckElimination", true, false);
+ }
+#endif
+
+ if (UseC1Optimizations) {
+ // loop invariant code motion reorders instructions and range
+ // check elimination adds new instructions so do null check
+ // elimination after.
+ NEEDS_CLEANUP
+ // optimization
+ PhaseTraceTime timeit(_t_optimize_null_checks);
+
+ _hir->eliminate_null_checks();
+ }
+
+ _hir->verify();
+
// compute use counts after global value numbering
_hir->compute_use_counts();
@@ -502,6 +541,7 @@ Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* metho
, _next_id(0)
, _next_block_id(0)
, _code(buffer_blob)
+, _has_access_indexed(false)
, _current_instruction(NULL)
#ifndef PRODUCT
, _last_instruction_printed(NULL)
@@ -567,7 +607,9 @@ void Compilation::print_timers() {
tty->print_cr(" Detailed C1 Timings");
tty->print_cr(" Setup time: %6.3f s (%4.1f%%)", timers[_t_setup].seconds(), (timers[_t_setup].seconds() / total) * 100.0);
tty->print_cr(" Build IR: %6.3f s (%4.1f%%)", timers[_t_buildIR].seconds(), (timers[_t_buildIR].seconds() / total) * 100.0);
- tty->print_cr(" Optimize: %6.3f s (%4.1f%%)", timers[_t_optimizeIR].seconds(), (timers[_t_optimizeIR].seconds() / total) * 100.0);
+ float t_optimizeIR = timers[_t_optimize_blocks].seconds() + timers[_t_optimize_null_checks].seconds();
+ tty->print_cr(" Optimize: %6.3f s (%4.1f%%)", t_optimizeIR, (t_optimizeIR / total) * 100.0);
+ tty->print_cr(" RCE: %6.3f s (%4.1f%%)", timers[_t_rangeCheckElimination].seconds(), (timers[_t_rangeCheckElimination].seconds() / total) * 100.0);
tty->print_cr(" Emit LIR: %6.3f s (%4.1f%%)", timers[_t_emit_lir].seconds(), (timers[_t_emit_lir].seconds() / total) * 100.0);
tty->print_cr(" LIR Gen: %6.3f s (%4.1f%%)", timers[_t_lirGeneration].seconds(), (timers[_t_lirGeneration].seconds() / total) * 100.0);
tty->print_cr(" Linear Scan: %6.3f s (%4.1f%%)", timers[_t_linearScan].seconds(), (timers[_t_linearScan].seconds() / total) * 100.0);
diff --git a/src/share/vm/c1/c1_Compilation.hpp b/src/share/vm/c1/c1_Compilation.hpp
index 0a7373da8..897da9762 100644
--- a/src/share/vm/c1/c1_Compilation.hpp
+++ b/src/share/vm/c1/c1_Compilation.hpp
@@ -26,8 +26,10 @@
#define SHARE_VM_C1_C1_COMPILATION_HPP
#include "ci/ciEnv.hpp"
+#include "ci/ciMethodData.hpp"
#include "code/exceptionHandlerTable.hpp"
#include "memory/resourceArea.hpp"
+#include "runtime/deoptimization.hpp"
class CompilationResourceObj;
class XHandlers;
@@ -85,6 +87,7 @@ class Compilation: public StackObj {
LinearScan* _allocator;
CodeOffsets _offsets;
CodeBuffer _code;
+ bool _has_access_indexed;
// compilation helpers
void initialize();
@@ -140,6 +143,7 @@ class Compilation: public StackObj {
C1_MacroAssembler* masm() const { return _masm; }
CodeOffsets* offsets() { return &_offsets; }
Arena* arena() { return _arena; }
+ bool has_access_indexed() { return _has_access_indexed; }
// Instruction ids
int get_next_id() { return _next_id++; }
@@ -154,6 +158,7 @@ class Compilation: public StackObj {
void set_has_fpu_code(bool f) { _has_fpu_code = f; }
void set_has_unsafe_access(bool f) { _has_unsafe_access = f; }
void set_would_profile(bool f) { _would_profile = f; }
+ void set_has_access_indexed(bool f) { _has_access_indexed = f; }
// Add a set of exception handlers covering the given PC offset
void add_exception_handlers_for_pco(int pco, XHandlers* exception_handlers);
// Statistics gathering
@@ -233,6 +238,14 @@ class Compilation: public StackObj {
return env()->comp_level() == CompLevel_full_profile &&
C1UpdateMethodData && C1ProfileCheckcasts;
}
+
+ // will compilation make optimistic assumptions that might lead to
+ // deoptimization and that the runtime will account for?
+ bool is_optimistic() const {
+ return !TieredCompilation &&
+ (RangeCheckElimination || UseLoopInvariantCodeMotion) &&
+ method()->method_data()->trap_count(Deoptimization::Reason_none) == 0;
+ }
};
diff --git a/src/share/vm/c1/c1_GraphBuilder.cpp b/src/share/vm/c1/c1_GraphBuilder.cpp
index 9491607ad..8d7619eed 100644
--- a/src/share/vm/c1/c1_GraphBuilder.cpp
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp
@@ -947,7 +947,9 @@ void GraphBuilder::store_local(ValueStack* state, Value x, int index) {
void GraphBuilder::load_indexed(BasicType type) {
- ValueStack* state_before = copy_state_for_exception();
+ // In case of in block code motion in range check elimination
+ ValueStack* state_before = copy_state_indexed_access();
+ compilation()->set_has_access_indexed(true);
Value index = ipop();
Value array = apop();
Value length = NULL;
@@ -961,7 +963,9 @@ void GraphBuilder::load_indexed(BasicType type) {
void GraphBuilder::store_indexed(BasicType type) {
- ValueStack* state_before = copy_state_for_exception();
+ // In case of in block code motion in range check elimination
+ ValueStack* state_before = copy_state_indexed_access();
+ compilation()->set_has_access_indexed(true);
Value value = pop(as_ValueType(type));
Value index = ipop();
Value array = apop();
@@ -1179,7 +1183,9 @@ void GraphBuilder::if_node(Value x, If::Condition cond, Value y, ValueStack* sta
BlockBegin* tsux = block_at(stream()->get_dest());
BlockBegin* fsux = block_at(stream()->next_bci());
bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci();
- Instruction *i = append(new If(x, cond, false, y, tsux, fsux, is_bb ? state_before : NULL, is_bb));
+ // In case of loop invariant code motion or predicate insertion
+ // before the body of a loop the state is needed
+ Instruction *i = append(new If(x, cond, false, y, tsux, fsux, (is_bb || compilation()->is_optimistic()) ? state_before : NULL, is_bb));
assert(i->as_Goto() == NULL ||
(i->as_Goto()->sux_at(0) == tsux && i->as_Goto()->is_safepoint() == tsux->bci() < stream()->cur_bci()) ||
@@ -1294,7 +1300,9 @@ void GraphBuilder::table_switch() {
BlockBegin* tsux = block_at(bci() + sw.dest_offset_at(0));
BlockBegin* fsux = block_at(bci() + sw.default_offset());
bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
- ValueStack* state_before = is_bb ? copy_state_before() : NULL;
+ // In case of loop invariant code motion or predicate insertion
+ // before the body of a loop the state is needed
+ ValueStack* state_before = copy_state_if_bb(is_bb);
append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
} else {
// collect successors
@@ -1308,7 +1316,9 @@ void GraphBuilder::table_switch() {
// add default successor
if (sw.default_offset() < 0) has_bb = true;
sux->at_put(i, block_at(bci() + sw.default_offset()));
- ValueStack* state_before = has_bb ? copy_state_before() : NULL;
+ // In case of loop invariant code motion or predicate insertion
+ // before the body of a loop the state is needed
+ ValueStack* state_before = copy_state_if_bb(has_bb);
Instruction* res = append(new TableSwitch(ipop(), sux, sw.low_key(), state_before, has_bb));
#ifdef ASSERT
if (res->as_Goto()) {
@@ -1336,7 +1346,9 @@ void GraphBuilder::lookup_switch() {
BlockBegin* tsux = block_at(bci() + pair.offset());
BlockBegin* fsux = block_at(bci() + sw.default_offset());
bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
- ValueStack* state_before = is_bb ? copy_state_before() : NULL;
+ // In case of loop invariant code motion or predicate insertion
+ // before the body of a loop the state is needed
+ ValueStack* state_before = copy_state_if_bb(is_bb);;
append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
} else {
// collect successors & keys
@@ -1353,7 +1365,9 @@ void GraphBuilder::lookup_switch() {
// add default successor
if (sw.default_offset() < 0) has_bb = true;
sux->at_put(i, block_at(bci() + sw.default_offset()));
- ValueStack* state_before = has_bb ? copy_state_before() : NULL;
+ // In case of loop invariant code motion or predicate insertion
+ // before the body of a loop the state is needed
+ ValueStack* state_before = copy_state_if_bb(has_bb);
Instruction* res = append(new LookupSwitch(ipop(), sux, keys, state_before, has_bb));
#ifdef ASSERT
if (res->as_Goto()) {
diff --git a/src/share/vm/c1/c1_GraphBuilder.hpp b/src/share/vm/c1/c1_GraphBuilder.hpp
index 1eca297cd..ae5afd4e0 100644
--- a/src/share/vm/c1/c1_GraphBuilder.hpp
+++ b/src/share/vm/c1/c1_GraphBuilder.hpp
@@ -301,6 +301,8 @@ class GraphBuilder VALUE_OBJ_CLASS_SPEC {
ValueStack* copy_state_exhandling();
ValueStack* copy_state_for_exception_with_bci(int bci);
ValueStack* copy_state_for_exception();
+ ValueStack* copy_state_if_bb(bool is_bb) { return (is_bb || compilation()->is_optimistic()) ? copy_state_before() : NULL; }
+ ValueStack* copy_state_indexed_access() { return compilation()->is_optimistic() ? copy_state_before() : copy_state_for_exception(); }
//
// Inlining support
diff --git a/src/share/vm/c1/c1_IR.cpp b/src/share/vm/c1/c1_IR.cpp
index 015874ac0..e9e73db0c 100644
--- a/src/share/vm/c1/c1_IR.cpp
+++ b/src/share/vm/c1/c1_IR.cpp
@@ -182,13 +182,14 @@ bool IRScopeDebugInfo::should_reexecute() {
// Implementation of CodeEmitInfo
// Stack must be NON-null
-CodeEmitInfo::CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers)
+CodeEmitInfo::CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers, bool deoptimize_on_exception)
: _scope(stack->scope())
, _scope_debug_info(NULL)
, _oop_map(NULL)
, _stack(stack)
, _exception_handlers(exception_handlers)
- , _is_method_handle_invoke(false) {
+ , _is_method_handle_invoke(false)
+ , _deoptimize_on_exception(deoptimize_on_exception) {
assert(_stack != NULL, "must be non null");
}
@@ -199,7 +200,8 @@ CodeEmitInfo::CodeEmitInfo(CodeEmitInfo* info, ValueStack* stack)
, _scope_debug_info(NULL)
, _oop_map(NULL)
, _stack(stack == NULL ? info->_stack : stack)
- , _is_method_handle_invoke(info->_is_method_handle_invoke) {
+ , _is_method_handle_invoke(info->_is_method_handle_invoke)
+ , _deoptimize_on_exception(info->_deoptimize_on_exception) {
// deep copy of exception handlers
if (info->_exception_handlers != NULL) {
@@ -239,7 +241,7 @@ IR::IR(Compilation* compilation, ciMethod* method, int osr_bci) :
}
-void IR::optimize() {
+void IR::optimize_blocks() {
Optimizer opt(this);
if (!compilation()->profile_branches()) {
if (DoCEE) {
@@ -257,6 +259,10 @@ void IR::optimize() {
#endif
}
}
+}
+
+void IR::eliminate_null_checks() {
+ Optimizer opt(this);
if (EliminateNullChecks) {
opt.eliminate_null_checks();
#ifndef PRODUCT
@@ -429,6 +435,7 @@ class ComputeLinearScanOrder : public StackObj {
BlockList _loop_end_blocks; // list of all loop end blocks collected during count_edges
BitMap2D _loop_map; // two-dimensional bit set: a bit is set if a block is contained in a loop
BlockList _work_list; // temporary list (used in mark_loops and compute_order)
+ BlockList _loop_headers;
Compilation* _compilation;
@@ -594,6 +601,7 @@ void ComputeLinearScanOrder::count_edges(BlockBegin* cur, BlockBegin* parent) {
TRACE_LINEAR_SCAN(3, tty->print_cr("Block B%d is loop header of loop %d", cur->block_id(), _num_loops));
cur->set_loop_index(_num_loops);
+ _loop_headers.append(cur);
_num_loops++;
}
@@ -656,6 +664,16 @@ void ComputeLinearScanOrder::clear_non_natural_loops(BlockBegin* start_block) {
// -> this is not a natural loop, so ignore it
TRACE_LINEAR_SCAN(2, tty->print_cr("Loop %d is non-natural, so it is ignored", i));
+ BlockBegin *loop_header = _loop_headers.at(i);
+ assert(loop_header->is_set(BlockBegin::linear_scan_loop_header_flag), "Must be loop header");
+
+ for (int j = 0; j < loop_header->number_of_preds(); j++) {
+ BlockBegin *pred = loop_header->pred_at(j);
+ pred->clear(BlockBegin::linear_scan_loop_end_flag);
+ }
+
+ loop_header->clear(BlockBegin::linear_scan_loop_header_flag);
+
for (int block_id = _max_block_id - 1; block_id >= 0; block_id--) {
clear_block_in_loop(i, block_id);
}
@@ -729,9 +747,20 @@ void ComputeLinearScanOrder::compute_dominator(BlockBegin* cur, BlockBegin* pare
} else if (!(cur->is_set(BlockBegin::linear_scan_loop_header_flag) && parent->is_set(BlockBegin::linear_scan_loop_end_flag))) {
TRACE_LINEAR_SCAN(4, tty->print_cr("DOM: computing dominator of B%d: common dominator of B%d and B%d is B%d", cur->block_id(), parent->block_id(), cur->dominator()->block_id(), common_dominator(cur->dominator(), parent)->block_id()));
- assert(cur->number_of_preds() > 1, "");
+ // Does not hold for exception blocks
+ assert(cur->number_of_preds() > 1 || cur->is_set(BlockBegin::exception_entry_flag), "");
cur->set_dominator(common_dominator(cur->dominator(), parent));
}
+
+ // Additional edge to xhandler of all our successors
+ // range check elimination needs that the state at the end of a
+ // block be valid in every block it dominates so cur must dominate
+ // the exception handlers of its successors.
+ int num_cur_xhandler = cur->number_of_exception_handlers();
+ for (int j = 0; j < num_cur_xhandler; j++) {
+ BlockBegin* xhandler = cur->exception_handler_at(j);
+ compute_dominator(xhandler, parent);
+ }
}
@@ -898,7 +927,6 @@ void ComputeLinearScanOrder::compute_order(BlockBegin* start_block) {
num_sux = cur->number_of_exception_handlers();
for (i = 0; i < num_sux; i++) {
BlockBegin* sux = cur->exception_handler_at(i);
- compute_dominator(sux, cur);
if (ready_for_processing(sux)) {
sort_into_work_list(sux);
}
@@ -918,8 +946,23 @@ bool ComputeLinearScanOrder::compute_dominators_iter() {
BlockBegin* dominator = block->pred_at(0);
int num_preds = block->number_of_preds();
- for (int i = 1; i < num_preds; i++) {
- dominator = common_dominator(dominator, block->pred_at(i));
+
+ TRACE_LINEAR_SCAN(4, tty->print_cr("DOM: Processing B%d", block->block_id()));
+
+ for (int j = 0; j < num_preds; j++) {
+
+ BlockBegin *pred = block->pred_at(j);
+ TRACE_LINEAR_SCAN(4, tty->print_cr(" DOM: Subrocessing B%d", pred->block_id()));
+
+ if (block->is_set(BlockBegin::exception_entry_flag)) {
+ dominator = common_dominator(dominator, pred);
+ int num_pred_preds = pred->number_of_preds();
+ for (int k = 0; k < num_pred_preds; k++) {
+ dominator = common_dominator(dominator, pred->pred_at(k));
+ }
+ } else {
+ dominator = common_dominator(dominator, pred);
+ }
}
if (dominator != block->dominator()) {
@@ -946,6 +989,21 @@ void ComputeLinearScanOrder::compute_dominators() {
// check that dominators are correct
assert(!compute_dominators_iter(), "fix point not reached");
+
+ // Add Blocks to dominates-Array
+ int num_blocks = _linear_scan_order->length();
+ for (int i = 0; i < num_blocks; i++) {
+ BlockBegin* block = _linear_scan_order->at(i);
+
+ BlockBegin *dom = block->dominator();
+ if (dom) {
+ assert(dom->dominator_depth() != -1, "Dominator must have been visited before");
+ dom->dominates()->append(block);
+ block->set_dominator_depth(dom->dominator_depth() + 1);
+ } else {
+ block->set_dominator_depth(0);
+ }
+ }
}
@@ -1032,7 +1090,7 @@ void ComputeLinearScanOrder::verify() {
BlockBegin* sux = cur->sux_at(j);
assert(sux->linear_scan_number() >= 0 && sux->linear_scan_number() == _linear_scan_order->index_of(sux), "incorrect linear_scan_number");
- if (!cur->is_set(BlockBegin::linear_scan_loop_end_flag)) {
+ if (!sux->is_set(BlockBegin::backward_branch_target_flag)) {
assert(cur->linear_scan_number() < sux->linear_scan_number(), "invalid order");
}
if (cur->loop_depth() == sux->loop_depth()) {
@@ -1044,7 +1102,7 @@ void ComputeLinearScanOrder::verify() {
BlockBegin* pred = cur->pred_at(j);
assert(pred->linear_scan_number() >= 0 && pred->linear_scan_number() == _linear_scan_order->index_of(pred), "incorrect linear_scan_number");
- if (!cur->is_set(BlockBegin::linear_scan_loop_header_flag)) {
+ if (!cur->is_set(BlockBegin::backward_branch_target_flag)) {
assert(cur->linear_scan_number() > pred->linear_scan_number(), "invalid order");
}
if (cur->loop_depth() == pred->loop_depth()) {
@@ -1060,7 +1118,8 @@ void ComputeLinearScanOrder::verify() {
} else {
assert(cur->dominator() != NULL, "all but first block must have dominator");
}
- assert(cur->number_of_preds() != 1 || cur->dominator() == cur->pred_at(0), "Single predecessor must also be dominator");
+ // Assertion does not hold for exception handlers
+ assert(cur->number_of_preds() != 1 || cur->dominator() == cur->pred_at(0) || cur->is_set(BlockBegin::exception_entry_flag), "Single predecessor must also be dominator");
}
// check that all loops are continuous
@@ -1249,9 +1308,22 @@ class PredecessorValidator : public BlockClosure {
}
};
+class VerifyBlockBeginField : public BlockClosure {
+
+public:
+
+ virtual void block_do(BlockBegin *block) {
+ for ( Instruction *cur = block; cur != NULL; cur = cur->next()) {
+ assert(cur->block() == block, "Block begin is not correct");
+ }
+ }
+};
+
void IR::verify() {
#ifdef ASSERT
PredecessorValidator pv(this);
+ VerifyBlockBeginField verifier;
+ this->iterate_postorder(&verifier);
#endif
}
diff --git a/src/share/vm/c1/c1_IR.hpp b/src/share/vm/c1/c1_IR.hpp
index e1f4c15eb..bc57300c1 100644
--- a/src/share/vm/c1/c1_IR.hpp
+++ b/src/share/vm/c1/c1_IR.hpp
@@ -254,6 +254,7 @@ class CodeEmitInfo: public CompilationResourceObj {
OopMap* _oop_map;
ValueStack* _stack; // used by deoptimization (contains also monitors
bool _is_method_handle_invoke; // true if the associated call site is a MethodHandle call site.
+ bool _deoptimize_on_exception;
FrameMap* frame_map() const { return scope()->compilation()->frame_map(); }
Compilation* compilation() const { return scope()->compilation(); }
@@ -261,7 +262,7 @@ class CodeEmitInfo: public CompilationResourceObj {
public:
// use scope from ValueStack
- CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers);
+ CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers, bool deoptimize_on_exception = false);
// make a copy
CodeEmitInfo(CodeEmitInfo* info, ValueStack* stack = NULL);
@@ -272,6 +273,7 @@ class CodeEmitInfo: public CompilationResourceObj {
IRScope* scope() const { return _scope; }
XHandlers* exception_handlers() const { return _exception_handlers; }
ValueStack* stack() const { return _stack; }
+ bool deoptimize_on_exception() const { return _deoptimize_on_exception; }
void add_register_oop(LIR_Opr opr);
void record_debug_info(DebugInformationRecorder* recorder, int pc_offset);
@@ -309,7 +311,8 @@ class IR: public CompilationResourceObj {
int max_stack() const { return top_scope()->max_stack(); } // expensive
// ir manipulation
- void optimize();
+ void optimize_blocks();
+ void eliminate_null_checks();
void compute_predecessors();
void split_critical_edges();
void compute_code();
diff --git a/src/share/vm/c1/c1_Instruction.cpp b/src/share/vm/c1/c1_Instruction.cpp
index 985cb098e..a026b4cbe 100644
--- a/src/share/vm/c1/c1_Instruction.cpp
+++ b/src/share/vm/c1/c1_Instruction.cpp
@@ -34,6 +34,15 @@
// Implementation of Instruction
+int Instruction::dominator_depth() {
+ int result = -1;
+ if (block()) {
+ result = block()->dominator_depth();
+ }
+ assert(result != -1 || this->as_Local(), "Only locals have dominator depth -1");
+ return result;
+}
+
Instruction::Condition Instruction::mirror(Condition cond) {
switch (cond) {
case eql: return eql;
@@ -42,6 +51,8 @@ Instruction::Condition Instruction::mirror(Condition cond) {
case leq: return geq;
case gtr: return lss;
case geq: return leq;
+ case aeq: return beq;
+ case beq: return aeq;
}
ShouldNotReachHere();
return eql;
@@ -56,6 +67,8 @@ Instruction::Condition Instruction::negate(Condition cond) {
case leq: return gtr;
case gtr: return leq;
case geq: return lss;
+ case aeq: assert(false, "Above equal cannot be negated");
+ case beq: assert(false, "Below equal cannot be negated");
}
ShouldNotReachHere();
return eql;
@@ -70,10 +83,10 @@ void Instruction::update_exception_state(ValueStack* state) {
}
}
-
-Instruction* Instruction::prev(BlockBegin* block) {
+// Prev without need to have BlockBegin
+Instruction* Instruction::prev() {
Instruction* p = NULL;
- Instruction* q = block;
+ Instruction* q = block();
while (q != this) {
assert(q != NULL, "this is not in the block's instruction list");
p = q; q = q->next();
@@ -122,15 +135,24 @@ void Instruction::print(InstructionPrinter& ip) {
// perform constant and interval tests on index value
bool AccessIndexed::compute_needs_range_check() {
- Constant* clength = length()->as_Constant();
- Constant* cindex = index()->as_Constant();
- if (clength && cindex) {
- IntConstant* l = clength->type()->as_IntConstant();
- IntConstant* i = cindex->type()->as_IntConstant();
- if (l && i && i->value() < l->value() && i->value() >= 0) {
- return false;
+
+ if (length()) {
+
+ Constant* clength = length()->as_Constant();
+ Constant* cindex = index()->as_Constant();
+ if (clength && cindex) {
+ IntConstant* l = clength->type()->as_IntConstant();
+ IntConstant* i = cindex->type()->as_IntConstant();
+ if (l && i && i->value() < l->value() && i->value() >= 0) {
+ return false;
+ }
}
}
+
+ if (!this->check_flag(NeedsRangeCheckFlag)) {
+ return false;
+ }
+
return true;
}
@@ -631,19 +653,25 @@ void BlockBegin::substitute_sux(BlockBegin* old_sux, BlockBegin* new_sux) {
// of the inserted block, without recomputing the values of the other blocks
// in the CFG. Therefore the value of "depth_first_number" in BlockBegin becomes meaningless.
BlockBegin* BlockBegin::insert_block_between(BlockBegin* sux) {
- BlockBegin* new_sux = new BlockBegin(end()->state()->bci());
+ int bci = sux->bci();
+ // critical edge splitting may introduce a goto after a if and array
+ // bound check elimination may insert a predicate between the if and
+ // goto. The bci of the goto can't be the one of the if otherwise
+ // the state and bci are inconsistent and a deoptimization triggered
+ // by the predicate would lead to incorrect execution/a crash.
+ BlockBegin* new_sux = new BlockBegin(bci);
// mark this block (special treatment when block order is computed)
new_sux->set(critical_edge_split_flag);
// This goto is not a safepoint.
Goto* e = new Goto(sux, false);
- new_sux->set_next(e, end()->state()->bci());
+ new_sux->set_next(e, bci);
new_sux->set_end(e);
// setup states
ValueStack* s = end()->state();
- new_sux->set_state(s->copy());
- e->set_state(s->copy());
+ new_sux->set_state(s->copy(s->kind(), bci));
+ e->set_state(s->copy(s->kind(), bci));
assert(new_sux->state()->locals_size() == s->locals_size(), "local size mismatch!");
assert(new_sux->state()->stack_size() == s->stack_size(), "stack size mismatch!");
assert(new_sux->state()->locks_size() == s->locks_size(), "locks size mismatch!");
@@ -960,15 +988,14 @@ void BlockEnd::set_begin(BlockBegin* begin) {
BlockList* sux = NULL;
if (begin != NULL) {
sux = begin->successors();
- } else if (_begin != NULL) {
+ } else if (this->begin() != NULL) {
// copy our sux list
- BlockList* sux = new BlockList(_begin->number_of_sux());
- for (int i = 0; i < _begin->number_of_sux(); i++) {
- sux->append(_begin->sux_at(i));
+ BlockList* sux = new BlockList(this->begin()->number_of_sux());
+ for (int i = 0; i < this->begin()->number_of_sux(); i++) {
+ sux->append(this->begin()->sux_at(i));
}
}
_sux = sux;
- _begin = begin;
}
@@ -1008,7 +1035,38 @@ int Phi::operand_count() const {
}
}
+#ifdef ASSERT
+// Constructor of Assert
+Assert::Assert(Value x, Condition cond, bool unordered_is_true, Value y) : Instruction(illegalType)
+ , _x(x)
+ , _cond(cond)
+ , _y(y)
+{
+ set_flag(UnorderedIsTrueFlag, unordered_is_true);
+ assert(x->type()->tag() == y->type()->tag(), "types must match");
+ pin();
+
+ stringStream strStream;
+ Compilation::current()->method()->print_name(&strStream);
+
+ stringStream strStream1;
+ InstructionPrinter ip1(1, &strStream1);
+ ip1.print_instr(x);
+
+ stringStream strStream2;
+ InstructionPrinter ip2(1, &strStream2);
+ ip2.print_instr(y);
+ stringStream ss;
+ ss.print("Assertion %s %s %s in method %s", strStream1.as_string(), ip2.cond_name(cond), strStream2.as_string(), strStream.as_string());
+
+ _message = ss.as_string();
+}
+#endif
+
+void RangeCheckPredicate::check_state() {
+ assert(state()->kind() != ValueStack::EmptyExceptionState && state()->kind() != ValueStack::ExceptionState, "will deopt with empty state");
+}
void ProfileInvoke::state_values_do(ValueVisitor* f) {
if (state() != NULL) state()->values_do(f);
diff --git a/src/share/vm/c1/c1_Instruction.hpp b/src/share/vm/c1/c1_Instruction.hpp
index 4fff026e0..b93525bf5 100644
--- a/src/share/vm/c1/c1_Instruction.hpp
+++ b/src/share/vm/c1/c1_Instruction.hpp
@@ -110,6 +110,8 @@ class ProfileCall;
class ProfileInvoke;
class RuntimeCall;
class MemBar;
+class RangeCheckPredicate;
+class Assert;
// A Value is a reference to the instruction creating the value
typedef Instruction* Value;
@@ -210,6 +212,10 @@ class InstructionVisitor: public StackObj {
virtual void do_ProfileInvoke (ProfileInvoke* x) = 0;
virtual void do_RuntimeCall (RuntimeCall* x) = 0;
virtual void do_MemBar (MemBar* x) = 0;
+ virtual void do_RangeCheckPredicate(RangeCheckPredicate* x) = 0;
+#ifdef ASSERT
+ virtual void do_Assert (Assert* x) = 0;
+#endif
};
@@ -306,8 +312,9 @@ class Instruction: public CompilationResourceObj {
void update_exception_state(ValueStack* state);
- //protected:
- public:
+ protected:
+ BlockBegin* _block; // Block that contains this instruction
+
void set_type(ValueType* type) {
assert(type != NULL, "type must exist");
_type = type;
@@ -342,6 +349,9 @@ class Instruction: public CompilationResourceObj {
ThrowIncompatibleClassChangeErrorFlag,
ProfileMDOFlag,
IsLinkedInBlockFlag,
+ NeedsRangeCheckFlag,
+ InWorkListFlag,
+ DeoptimizeOnException,
InstructionLastFlag
};
@@ -351,7 +361,7 @@ class Instruction: public CompilationResourceObj {
// 'globally' used condition values
enum Condition {
- eql, neq, lss, leq, gtr, geq
+ eql, neq, lss, leq, gtr, geq, aeq, beq
};
// Instructions may be pinned for many reasons and under certain conditions
@@ -381,6 +391,7 @@ class Instruction: public CompilationResourceObj {
, _pin_state(0)
, _type(type)
, _next(NULL)
+ , _block(NULL)
, _subst(NULL)
, _flags(0)
, _operand(LIR_OprFact::illegalOpr)
@@ -399,11 +410,13 @@ class Instruction: public CompilationResourceObj {
int printable_bci() const { assert(has_printable_bci(), "_printable_bci should have been set"); return _printable_bci; }
void set_printable_bci(int bci) { _printable_bci = bci; }
#endif
+ int dominator_depth();
int use_count() const { return _use_count; }
int pin_state() const { return _pin_state; }
bool is_pinned() const { return _pin_state != 0 || PinAllInstructions; }
ValueType* type() const { return _type; }
- Instruction* prev(BlockBegin* block); // use carefully, expensive operation
+ BlockBegin *block() const { return _block; }
+ Instruction* prev(); // use carefully, expensive operation
Instruction* next() const { return _next; }
bool has_subst() const { return _subst != NULL; }
Instruction* subst() { return _subst == NULL ? this : _subst->subst(); }
@@ -432,6 +445,9 @@ class Instruction: public CompilationResourceObj {
assert(as_BlockEnd() == NULL, "BlockEnd instructions must have no next");
assert(next->can_be_linked(), "shouldn't link these instructions into list");
+ BlockBegin *block = this->block();
+ next->_block = block;
+
next->set_flag(Instruction::IsLinkedInBlockFlag, true);
_next = next;
return next;
@@ -444,6 +460,29 @@ class Instruction: public CompilationResourceObj {
return set_next(next);
}
+ // when blocks are merged
+ void fixup_block_pointers() {
+ Instruction *cur = next()->next(); // next()'s block is set in set_next
+ while (cur && cur->_block != block()) {
+ cur->_block = block();
+ cur = cur->next();
+ }
+ }
+
+ Instruction *insert_after(Instruction *i) {
+ Instruction* n = _next;
+ set_next(i);
+ i->set_next(n);
+ return _next;
+ }
+
+ Instruction *insert_after_same_bci(Instruction *i) {
+#ifndef PRODUCT
+ i->set_printable_bci(printable_bci());
+#endif
+ return insert_after(i);
+ }
+
void set_subst(Instruction* subst) {
assert(subst == NULL ||
type()->base() == subst->type()->base() ||
@@ -452,6 +491,7 @@ class Instruction: public CompilationResourceObj {
}
void set_exception_handlers(XHandlers *xhandlers) { _exception_handlers = xhandlers; }
void set_exception_state(ValueStack* s) { check_state(s); _exception_state = s; }
+ void set_state_before(ValueStack* s) { check_state(s); _state_before = s; }
// machine-specifics
void set_operand(LIR_Opr operand) { assert(operand != LIR_OprFact::illegalOpr, "operand must exist"); _operand = operand; }
@@ -509,6 +549,11 @@ class Instruction: public CompilationResourceObj {
virtual ExceptionObject* as_ExceptionObject() { return NULL; }
virtual UnsafeOp* as_UnsafeOp() { return NULL; }
virtual ProfileInvoke* as_ProfileInvoke() { return NULL; }
+ virtual RangeCheckPredicate* as_RangeCheckPredicate() { return NULL; }
+
+#ifdef ASSERT
+ virtual Assert* as_Assert() { return NULL; }
+#endif
virtual void visit(InstructionVisitor* v) = 0;
@@ -570,7 +615,6 @@ class AssertValues: public ValueVisitor {
LEAF(Phi, Instruction)
private:
- BlockBegin* _block; // the block to which the phi function belongs
int _pf_flags; // the flags of the phi function
int _index; // to value on operand stack (index < 0) or to local
public:
@@ -578,9 +622,9 @@ LEAF(Phi, Instruction)
Phi(ValueType* type, BlockBegin* b, int index)
: Instruction(type->base())
, _pf_flags(0)
- , _block(b)
, _index(index)
{
+ _block = b;
NOT_PRODUCT(set_printable_bci(Value(b)->printable_bci()));
if (type->is_illegal()) {
make_illegal();
@@ -603,8 +647,6 @@ LEAF(Phi, Instruction)
Value operand_at(int i) const;
int operand_count() const;
- BlockBegin* block() const { return _block; }
-
void set(Flag f) { _pf_flags |= f; }
void clear(Flag f) { _pf_flags &= ~f; }
bool is_set(Flag f) const { return (_pf_flags & f) != 0; }
@@ -670,6 +712,7 @@ LEAF(Constant, Instruction)
pin();
}
+ // generic
virtual bool can_trap() const { return state_before() != NULL; }
virtual void input_values_do(ValueVisitor* f) { /* no values */ }
@@ -852,6 +895,7 @@ BASE(AccessIndexed, AccessArray)
, _length(length)
, _elt_type(elt_type)
{
+ set_flag(Instruction::NeedsRangeCheckFlag, true);
ASSERT_VALUES
}
@@ -860,6 +904,7 @@ BASE(AccessIndexed, AccessArray)
Value length() const { return _length; }
BasicType elt_type() const { return _elt_type; }
+ void clear_length() { _length = NULL; }
// perform elimination of range checks involving constants
bool compute_needs_range_check();
@@ -1524,6 +1569,7 @@ LEAF(BlockBegin, StateSplit)
int _bci; // start-bci of block
int _depth_first_number; // number of this block in a depth-first ordering
int _linear_scan_number; // number of this block in linear-scan ordering
+ int _dominator_depth;
int _loop_depth; // the loop nesting level of this block
int _loop_index; // number of the innermost loop of this block
int _flags; // the flags associated with this block
@@ -1535,6 +1581,7 @@ LEAF(BlockBegin, StateSplit)
// SSA specific fields: (factor out later)
BlockList _successors; // the successors of this block
BlockList _predecessors; // the predecessors of this block
+ BlockList _dominates; // list of blocks that are dominated by this block
BlockBegin* _dominator; // the dominator of this block
// SSA specific ends
BlockEnd* _end; // the last instruction of this block
@@ -1583,10 +1630,12 @@ LEAF(BlockBegin, StateSplit)
, _linear_scan_number(-1)
, _loop_depth(0)
, _flags(0)
+ , _dominator_depth(-1)
, _dominator(NULL)
, _end(NULL)
, _predecessors(2)
, _successors(2)
+ , _dominates(2)
, _exception_handlers(1)
, _exception_states(NULL)
, _exception_handler_pco(-1)
@@ -1603,6 +1652,7 @@ LEAF(BlockBegin, StateSplit)
, _total_preds(0)
, _stores_to_locals()
{
+ _block = this;
#ifndef PRODUCT
set_printable_bci(bci);
#endif
@@ -1612,8 +1662,10 @@ LEAF(BlockBegin, StateSplit)
int block_id() const { return _block_id; }
int bci() const { return _bci; }
BlockList* successors() { return &_successors; }
+ BlockList* dominates() { return &_dominates; }
BlockBegin* dominator() const { return _dominator; }
int loop_depth() const { return _loop_depth; }
+ int dominator_depth() const { return _dominator_depth; }
int depth_first_number() const { return _depth_first_number; }
int linear_scan_number() const { return _linear_scan_number; }
BlockEnd* end() const { return _end; }
@@ -1634,6 +1686,7 @@ LEAF(BlockBegin, StateSplit)
// manipulation
void set_dominator(BlockBegin* dom) { _dominator = dom; }
void set_loop_depth(int d) { _loop_depth = d; }
+ void set_dominator_depth(int d) { _dominator_depth = d; }
void set_depth_first_number(int dfn) { _depth_first_number = dfn; }
void set_linear_scan_number(int lsn) { _linear_scan_number = lsn; }
void set_end(BlockEnd* end);
@@ -1695,7 +1748,8 @@ LEAF(BlockBegin, StateSplit)
parser_loop_header_flag = 1 << 7, // set by parser to identify blocks where phi functions can not be created on demand
critical_edge_split_flag = 1 << 8, // set for all blocks that are introduced when critical edges are split
linear_scan_loop_header_flag = 1 << 9, // set during loop-detection for LinearScan
- linear_scan_loop_end_flag = 1 << 10 // set during loop-detection for LinearScan
+ linear_scan_loop_end_flag = 1 << 10, // set during loop-detection for LinearScan
+ donot_eliminate_range_checks = 1 << 11 // Should be try to eliminate range checks in this block
};
void set(Flag f) { _flags |= f; }
@@ -1728,7 +1782,6 @@ LEAF(BlockBegin, StateSplit)
BASE(BlockEnd, StateSplit)
private:
- BlockBegin* _begin;
BlockList* _sux;
protected:
@@ -1746,7 +1799,6 @@ BASE(BlockEnd, StateSplit)
// creation
BlockEnd(ValueType* type, ValueStack* state_before, bool is_safepoint)
: StateSplit(type, state_before)
- , _begin(NULL)
, _sux(NULL)
{
set_flag(IsSafepointFlag, is_safepoint);
@@ -1754,7 +1806,8 @@ BASE(BlockEnd, StateSplit)
// accessors
bool is_safepoint() const { return check_flag(IsSafepointFlag); }
- BlockBegin* begin() const { return _begin; }
+ // For compatibility with old code, for new code use block()
+ BlockBegin* begin() const { return _block; }
// manipulation
void set_begin(BlockBegin* begin);
@@ -1811,6 +1864,74 @@ LEAF(Goto, BlockEnd)
void set_direction(Direction d) { _direction = d; }
};
+#ifdef ASSERT
+LEAF(Assert, Instruction)
+ private:
+ Value _x;
+ Condition _cond;
+ Value _y;
+ char *_message;
+
+ public:
+ // creation
+ // unordered_is_true is valid for float/double compares only
+ Assert(Value x, Condition cond, bool unordered_is_true, Value y);
+
+ // accessors
+ Value x() const { return _x; }
+ Condition cond() const { return _cond; }
+ bool unordered_is_true() const { return check_flag(UnorderedIsTrueFlag); }
+ Value y() const { return _y; }
+ const char *message() const { return _message; }
+
+ // generic
+ virtual void input_values_do(ValueVisitor* f) { f->visit(&_x); f->visit(&_y); }
+};
+#endif
+
+LEAF(RangeCheckPredicate, StateSplit)
+ private:
+ Value _x;
+ Condition _cond;
+ Value _y;
+
+ void check_state();
+
+ public:
+ // creation
+ // unordered_is_true is valid for float/double compares only
+ RangeCheckPredicate(Value x, Condition cond, bool unordered_is_true, Value y, ValueStack* state) : StateSplit(illegalType)
+ , _x(x)
+ , _cond(cond)
+ , _y(y)
+ {
+ ASSERT_VALUES
+ set_flag(UnorderedIsTrueFlag, unordered_is_true);
+ assert(x->type()->tag() == y->type()->tag(), "types must match");
+ this->set_state(state);
+ check_state();
+ }
+
+ // Always deoptimize
+ RangeCheckPredicate(ValueStack* state) : StateSplit(illegalType)
+ {
+ this->set_state(state);
+ _x = _y = NULL;
+ check_state();
+ }
+
+ // accessors
+ Value x() const { return _x; }
+ Condition cond() const { return _cond; }
+ bool unordered_is_true() const { return check_flag(UnorderedIsTrueFlag); }
+ Value y() const { return _y; }
+
+ void always_fail() { _x = _y = NULL; }
+
+ // generic
+ virtual void input_values_do(ValueVisitor* f) { StateSplit::input_values_do(f); f->visit(&_x); f->visit(&_y); }
+ HASHING3(RangeCheckPredicate, true, x()->subst(), y()->subst(), cond())
+};
LEAF(If, BlockEnd)
private:
diff --git a/src/share/vm/c1/c1_InstructionPrinter.cpp b/src/share/vm/c1/c1_InstructionPrinter.cpp
index 68e0feb5b..4c88e50cb 100644
--- a/src/share/vm/c1/c1_InstructionPrinter.cpp
+++ b/src/share/vm/c1/c1_InstructionPrinter.cpp
@@ -57,6 +57,8 @@ const char* InstructionPrinter::cond_name(If::Condition cond) {
case If::leq: return "<=";
case If::gtr: return ">";
case If::geq: return ">=";
+ case If::aeq: return "|>=|";
+ case If::beq: return "|<=|";
}
ShouldNotReachHere();
return NULL;
@@ -181,6 +183,11 @@ void InstructionPrinter::print_indexed(AccessIndexed* indexed) {
output()->put('[');
print_value(indexed->index());
output()->put(']');
+ if (indexed->length() != NULL) {
+ output()->put('(');
+ print_value(indexed->length());
+ output()->put(')');
+ }
}
@@ -373,6 +380,7 @@ void InstructionPrinter::do_Constant(Constant* x) {
void InstructionPrinter::do_LoadField(LoadField* x) {
print_field(x);
output()->print(" (%c)", type2char(x->field()->type()->basic_type()));
+ output()->print(" %s", x->field()->name()->as_utf8());
}
@@ -381,6 +389,7 @@ void InstructionPrinter::do_StoreField(StoreField* x) {
output()->print(" := ");
print_value(x->value());
output()->print(" (%c)", type2char(x->field()->type()->basic_type()));
+ output()->print(" %s", x->field()->name()->as_utf8());
}
@@ -393,6 +402,9 @@ void InstructionPrinter::do_ArrayLength(ArrayLength* x) {
void InstructionPrinter::do_LoadIndexed(LoadIndexed* x) {
print_indexed(x);
output()->print(" (%c)", type2char(x->elt_type()));
+ if (x->check_flag(Instruction::NeedsRangeCheckFlag)) {
+ output()->print(" [rc]");
+ }
}
@@ -401,6 +413,9 @@ void InstructionPrinter::do_StoreIndexed(StoreIndexed* x) {
output()->print(" := ");
print_value(x->value());
output()->print(" (%c)", type2char(x->elt_type()));
+ if (x->check_flag(Instruction::NeedsRangeCheckFlag)) {
+ output()->print(" [rc]");
+ }
}
void InstructionPrinter::do_NegateOp(NegateOp* x) {
@@ -843,6 +858,25 @@ void InstructionPrinter::do_UnsafePrefetchRead(UnsafePrefetchRead* x) {
output()->put(')');
}
+void InstructionPrinter::do_RangeCheckPredicate(RangeCheckPredicate* x) {
+
+ if (x->x() != NULL && x->y() != NULL) {
+ output()->print("if ");
+ print_value(x->x());
+ output()->print(" %s ", cond_name(x->cond()));
+ print_value(x->y());
+ output()->print(" then deoptimize!");
+ } else {
+ output()->print("always deoptimize!");
+ }
+}
+
+void InstructionPrinter::do_Assert(Assert* x) {
+ output()->print("assert ");
+ print_value(x->x());
+ output()->print(" %s ", cond_name(x->cond()));
+ print_value(x->y());
+}
void InstructionPrinter::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {
print_unsafe_object_op(x, "UnsafePrefetchWrite");
diff --git a/src/share/vm/c1/c1_InstructionPrinter.hpp b/src/share/vm/c1/c1_InstructionPrinter.hpp
index d1d99cc21..d8d6502eb 100644
--- a/src/share/vm/c1/c1_InstructionPrinter.hpp
+++ b/src/share/vm/c1/c1_InstructionPrinter.hpp
@@ -135,6 +135,8 @@ class InstructionPrinter: public InstructionVisitor {
virtual void do_ProfileInvoke (ProfileInvoke* x);
virtual void do_RuntimeCall (RuntimeCall* x);
virtual void do_MemBar (MemBar* x);
+ virtual void do_RangeCheckPredicate(RangeCheckPredicate* x);
+ virtual void do_Assert (Assert* x);
};
#endif // PRODUCT
diff --git a/src/share/vm/c1/c1_LIR.cpp b/src/share/vm/c1/c1_LIR.cpp
index 2082b5268..df0828ee5 100644
--- a/src/share/vm/c1/c1_LIR.cpp
+++ b/src/share/vm/c1/c1_LIR.cpp
@@ -633,6 +633,7 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
case lir_ushr:
case lir_xadd:
case lir_xchg:
+ case lir_assert:
{
assert(op->as_Op2() != NULL, "must be");
LIR_Op2* op2 = (LIR_Op2*)op;
@@ -1112,6 +1113,11 @@ void LIR_OpLock::emit_code(LIR_Assembler* masm) {
}
}
+#ifdef ASSERT
+void LIR_OpAssert::emit_code(LIR_Assembler* masm) {
+ masm->emit_assert(this);
+}
+#endif
void LIR_OpDelay::emit_code(LIR_Assembler* masm) {
masm->emit_delay(this);
@@ -1771,6 +1777,8 @@ const char * LIR_Op::name() const {
case lir_cas_int: s = "cas_int"; break;
// LIR_OpProfileCall
case lir_profile_call: s = "profile_call"; break;
+ // LIR_OpAssert
+ case lir_assert: s = "assert"; break;
case lir_none: ShouldNotReachHere();break;
default: s = "illegal_op"; break;
}
@@ -2017,6 +2025,13 @@ void LIR_OpLock::print_instr(outputStream* out) const {
out->print("[lbl:0x%x]", stub()->entry());
}
+void LIR_OpAssert::print_instr(outputStream* out) const {
+ print_condition(out, condition()); out->print(" ");
+ in_opr1()->print(out); out->print(" ");
+ in_opr2()->print(out); out->print(", \"");
+ out->print(msg()); out->print("\"");
+}
+
void LIR_OpDelay::print_instr(outputStream* out) const {
_op->print_on(out);
diff --git a/src/share/vm/c1/c1_LIR.hpp b/src/share/vm/c1/c1_LIR.hpp
index 72051f19f..5bd0e57d6 100644
--- a/src/share/vm/c1/c1_LIR.hpp
+++ b/src/share/vm/c1/c1_LIR.hpp
@@ -881,6 +881,7 @@ class LIR_OpLock;
class LIR_OpTypeCheck;
class LIR_OpCompareAndSwap;
class LIR_OpProfileCall;
+class LIR_OpAssert;
// LIR operation codes
@@ -1000,6 +1001,9 @@ enum LIR_Code {
, begin_opMDOProfile
, lir_profile_call
, end_opMDOProfile
+ , begin_opAssert
+ , lir_assert
+ , end_opAssert
};
@@ -1135,6 +1139,7 @@ class LIR_Op: public CompilationResourceObj {
virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; }
virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; }
virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; }
+ virtual LIR_OpAssert* as_OpAssert() { return NULL; }
virtual void verify() const {}
};
@@ -1623,7 +1628,7 @@ class LIR_Op2: public LIR_Op {
, _tmp3(LIR_OprFact::illegalOpr)
, _tmp4(LIR_OprFact::illegalOpr)
, _tmp5(LIR_OprFact::illegalOpr) {
- assert(code == lir_cmp, "code check");
+ assert(code == lir_cmp || code == lir_assert, "code check");
}
LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type)
@@ -1683,7 +1688,7 @@ class LIR_Op2: public LIR_Op {
LIR_Opr tmp4_opr() const { return _tmp4; }
LIR_Opr tmp5_opr() const { return _tmp5; }
LIR_Condition condition() const {
- assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove"); return _condition;
+ assert(code() == lir_cmp || code() == lir_cmove || code() == lir_assert, "only valid for cmp and cmove and assert"); return _condition;
}
void set_condition(LIR_Condition condition) {
assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove"); _condition = condition;
@@ -1823,6 +1828,30 @@ class LIR_OpDelay: public LIR_Op {
CodeEmitInfo* call_info() const { return info(); }
};
+#ifdef ASSERT
+// LIR_OpAssert
+class LIR_OpAssert : public LIR_Op2 {
+ friend class LIR_OpVisitState;
+
+ private:
+ const char* _msg;
+ bool _halt;
+
+ public:
+ LIR_OpAssert(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, const char* msg, bool halt)
+ : LIR_Op2(lir_assert, condition, opr1, opr2)
+ , _halt(halt)
+ , _msg(msg) {
+ }
+
+ const char* msg() const { return _msg; }
+ bool halt() const { return _halt; }
+
+ virtual void emit_code(LIR_Assembler* masm);
+ virtual LIR_OpAssert* as_OpAssert() { return this; }
+ virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
+};
+#endif
// LIR_OpCompareAndSwap
class LIR_OpCompareAndSwap : public LIR_Op {
@@ -2196,6 +2225,9 @@ class LIR_List: public CompilationResourceObj {
void xadd(LIR_Opr src, LIR_Opr add, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xadd, src, add, res, tmp)); }
void xchg(LIR_Opr src, LIR_Opr set, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xchg, src, set, res, tmp)); }
+#ifdef ASSERT
+ void lir_assert(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, const char* msg, bool halt) { append(new LIR_OpAssert(condition, opr1, opr2, msg, halt)); }
+#endif
};
void print_LIR(BlockList* blocks);
diff --git a/src/share/vm/c1/c1_LIRAssembler.hpp b/src/share/vm/c1/c1_LIRAssembler.hpp
index 5cce9d0b7..87dd8dbae 100644
--- a/src/share/vm/c1/c1_LIRAssembler.hpp
+++ b/src/share/vm/c1/c1_LIRAssembler.hpp
@@ -210,6 +210,9 @@ class LIR_Assembler: public CompilationResourceObj {
void arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack);
void arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info);
void intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op);
+#ifdef ASSERT
+ void emit_assert(LIR_OpAssert* op);
+#endif
void logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest);
diff --git a/src/share/vm/c1/c1_LIRGenerator.cpp b/src/share/vm/c1/c1_LIRGenerator.cpp
index b46acec14..84402a9ef 100644
--- a/src/share/vm/c1/c1_LIRGenerator.cpp
+++ b/src/share/vm/c1/c1_LIRGenerator.cpp
@@ -403,6 +403,10 @@ void LIRGenerator::walk(Value instr) {
CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
assert(state != NULL, "state must be defined");
+#ifndef PRODUCT
+ state->verify();
+#endif
+
ValueStack* s = state;
for_each_state(s) {
if (s->kind() == ValueStack::EmptyExceptionState) {
@@ -453,7 +457,7 @@ CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ig
}
}
- return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers());
+ return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers(), x->check_flag(Instruction::DeoptimizeOnException));
}
@@ -1792,11 +1796,18 @@ void LIRGenerator::do_LoadField(LoadField* x) {
}
#endif
+ bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
if (x->needs_null_check() &&
(needs_patching ||
- MacroAssembler::needs_explicit_null_check(x->offset()))) {
+ MacroAssembler::needs_explicit_null_check(x->offset()) ||
+ stress_deopt)) {
+ LIR_Opr obj = object.result();
+ if (stress_deopt) {
+ obj = new_register(T_OBJECT);
+ __ move(LIR_OprFact::oopConst(NULL), obj);
+ }
// emit an explicit null check because the offset is too large
- __ null_check(object.result(), new CodeEmitInfo(info));
+ __ null_check(obj, new CodeEmitInfo(info));
}
LIR_Opr reg = rlock_result(x, field_type);
@@ -1873,6 +1884,11 @@ void LIRGenerator::do_ArrayLength(ArrayLength* x) {
} else {
info = state_for(nc);
}
+ if (StressLoopInvariantCodeMotion && info->deoptimize_on_exception()) {
+ LIR_Opr obj = new_register(T_OBJECT);
+ __ move(LIR_OprFact::oopConst(NULL), obj);
+ __ null_check(obj, new CodeEmitInfo(info));
+ }
}
__ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
}
@@ -1883,14 +1899,11 @@ void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
LIRItem array(x->array(), this);
LIRItem index(x->index(), this);
LIRItem length(this);
- bool needs_range_check = true;
+ bool needs_range_check = x->compute_needs_range_check();
- if (use_length) {
- needs_range_check = x->compute_needs_range_check();
- if (needs_range_check) {
- length.set_instruction(x->length());
- length.load_item();
- }
+ if (use_length && needs_range_check) {
+ length.set_instruction(x->length());
+ length.load_item();
}
array.load_item();
@@ -1910,13 +1923,20 @@ void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
} else {
null_check_info = range_check_info;
}
+ if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
+ LIR_Opr obj = new_register(T_OBJECT);
+ __ move(LIR_OprFact::oopConst(NULL), obj);
+ __ null_check(obj, new CodeEmitInfo(null_check_info));
+ }
}
// emit array address setup early so it schedules better
LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
if (GenerateRangeChecks && needs_range_check) {
- if (use_length) {
+ if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
+ __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
+ } else if (use_length) {
// TODO: use a (modified) version of array_range_check that does not require a
// constant length to be loaded to a register
__ cmp(lir_cond_belowEqual, length.result(), index.result());
@@ -2634,7 +2654,7 @@ void LIRGenerator::do_Base(Base* x) {
LIR_Opr lock = new_register(T_INT);
__ load_stack_address_monitor(0, lock);
- CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL);
+ CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException));
CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
// receiver is guaranteed non-NULL so don't need CodeEmitInfo
@@ -2644,7 +2664,7 @@ void LIRGenerator::do_Base(Base* x) {
// increment invocation counters if needed
if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
- CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL);
+ CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
increment_invocation_counter(info);
}
@@ -3102,6 +3122,95 @@ void LIRGenerator::do_RuntimeCall(RuntimeCall* x) {
}
}
+void LIRGenerator::do_Assert(Assert *x) {
+#ifdef ASSERT
+ ValueTag tag = x->x()->type()->tag();
+ If::Condition cond = x->cond();
+
+ LIRItem xitem(x->x(), this);
+ LIRItem yitem(x->y(), this);
+ LIRItem* xin = &xitem;
+ LIRItem* yin = &yitem;
+
+ assert(tag == intTag, "Only integer assertions are valid!");
+
+ xin->load_item();
+ yin->dont_load_item();
+
+ set_no_result(x);
+
+ LIR_Opr left = xin->result();
+ LIR_Opr right = yin->result();
+
+ __ lir_assert(lir_cond(x->cond()), left, right, x->message(), true);
+#endif
+}
+
+
+void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) {
+
+
+ Instruction *a = x->x();
+ Instruction *b = x->y();
+ if (!a || StressRangeCheckElimination) {
+ assert(!b || StressRangeCheckElimination, "B must also be null");
+
+ CodeEmitInfo *info = state_for(x, x->state());
+ CodeStub* stub = new PredicateFailedStub(info);
+
+ __ jump(stub);
+ } else if (a->type()->as_IntConstant() && b->type()->as_IntConstant()) {
+ int a_int = a->type()->as_IntConstant()->value();
+ int b_int = b->type()->as_IntConstant()->value();
+
+ bool ok = false;
+
+ switch(x->cond()) {
+ case Instruction::eql: ok = (a_int == b_int); break;
+ case Instruction::neq: ok = (a_int != b_int); break;
+ case Instruction::lss: ok = (a_int < b_int); break;
+ case Instruction::leq: ok = (a_int <= b_int); break;
+ case Instruction::gtr: ok = (a_int > b_int); break;
+ case Instruction::geq: ok = (a_int >= b_int); break;
+ case Instruction::aeq: ok = ((unsigned int)a_int >= (unsigned int)b_int); break;
+ case Instruction::beq: ok = ((unsigned int)a_int <= (unsigned int)b_int); break;
+ default: ShouldNotReachHere();
+ }
+
+ if (ok) {
+
+ CodeEmitInfo *info = state_for(x, x->state());
+ CodeStub* stub = new PredicateFailedStub(info);
+
+ __ jump(stub);
+ }
+ } else {
+
+ ValueTag tag = x->x()->type()->tag();
+ If::Condition cond = x->cond();
+ LIRItem xitem(x->x(), this);
+ LIRItem yitem(x->y(), this);
+ LIRItem* xin = &xitem;
+ LIRItem* yin = &yitem;
+
+ assert(tag == intTag, "Only integer deoptimizations are valid!");
+
+ xin->load_item();
+ yin->dont_load_item();
+ set_no_result(x);
+
+ LIR_Opr left = xin->result();
+ LIR_Opr right = yin->result();
+
+ CodeEmitInfo *info = state_for(x, x->state());
+ CodeStub* stub = new PredicateFailedStub(info);
+
+ __ cmp(lir_cond(cond), left, right);
+ __ branch(lir_cond(cond), right->type(), stub);
+ }
+}
+
+
LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
LIRItemList args(1);
LIRItem value(arg1, this);
diff --git a/src/share/vm/c1/c1_LIRGenerator.hpp b/src/share/vm/c1/c1_LIRGenerator.hpp
index aedd6a69c..4c70a9f64 100644
--- a/src/share/vm/c1/c1_LIRGenerator.hpp
+++ b/src/share/vm/c1/c1_LIRGenerator.hpp
@@ -412,6 +412,8 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
case If::leq: l = lir_cond_lessEqual; break;
case If::geq: l = lir_cond_greaterEqual; break;
case If::gtr: l = lir_cond_greater; break;
+ case If::aeq: l = lir_cond_aboveEqual; break;
+ case If::beq: l = lir_cond_belowEqual; break;
};
return l;
}
@@ -534,6 +536,8 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
virtual void do_ProfileInvoke (ProfileInvoke* x);
virtual void do_RuntimeCall (RuntimeCall* x);
virtual void do_MemBar (MemBar* x);
+ virtual void do_RangeCheckPredicate(RangeCheckPredicate* x);
+ virtual void do_Assert (Assert* x);
};
diff --git a/src/share/vm/c1/c1_LinearScan.cpp b/src/share/vm/c1/c1_LinearScan.cpp
index 1db08a857..65d4c60b6 100644
--- a/src/share/vm/c1/c1_LinearScan.cpp
+++ b/src/share/vm/c1/c1_LinearScan.cpp
@@ -6231,26 +6231,29 @@ void ControlFlowOptimizer::delete_unnecessary_jumps(BlockList* code) {
assert(prev_op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch");
LIR_OpBranch* prev_branch = (LIR_OpBranch*)prev_op;
- LIR_Op2* prev_cmp = NULL;
-
- for(int j = instructions->length() - 3; j >= 0 && prev_cmp == NULL; j--) {
- prev_op = instructions->at(j);
- if(prev_op->code() == lir_cmp) {
- assert(prev_op->as_Op2() != NULL, "branch must be of type LIR_Op2");
- prev_cmp = (LIR_Op2*)prev_op;
- assert(prev_branch->cond() == prev_cmp->condition(), "should be the same");
+ if (prev_branch->stub() == NULL) {
+
+ LIR_Op2* prev_cmp = NULL;
+
+ for(int j = instructions->length() - 3; j >= 0 && prev_cmp == NULL; j--) {
+ prev_op = instructions->at(j);
+ if (prev_op->code() == lir_cmp) {
+ assert(prev_op->as_Op2() != NULL, "branch must be of type LIR_Op2");
+ prev_cmp = (LIR_Op2*)prev_op;
+ assert(prev_branch->cond() == prev_cmp->condition(), "should be the same");
+ }
}
- }
- assert(prev_cmp != NULL, "should have found comp instruction for branch");
- if (prev_branch->block() == code->at(i + 1) && prev_branch->info() == NULL) {
+ assert(prev_cmp != NULL, "should have found comp instruction for branch");
+ if (prev_branch->block() == code->at(i + 1) && prev_branch->info() == NULL) {
- TRACE_LINEAR_SCAN(3, tty->print_cr("Negating conditional branch and deleting unconditional branch at end of block B%d", block->block_id()));
+ TRACE_LINEAR_SCAN(3, tty->print_cr("Negating conditional branch and deleting unconditional branch at end of block B%d", block->block_id()));
- // eliminate a conditional branch to the immediate successor
- prev_branch->change_block(last_branch->block());
- prev_branch->negate_cond();
- prev_cmp->set_condition(prev_branch->cond());
- instructions->truncate(instructions->length() - 1);
+ // eliminate a conditional branch to the immediate successor
+ prev_branch->change_block(last_branch->block());
+ prev_branch->negate_cond();
+ prev_cmp->set_condition(prev_branch->cond());
+ instructions->truncate(instructions->length() - 1);
+ }
}
}
}
diff --git a/src/share/vm/c1/c1_Optimizer.cpp b/src/share/vm/c1/c1_Optimizer.cpp
index 7d24a126b..74e9d2240 100644
--- a/src/share/vm/c1/c1_Optimizer.cpp
+++ b/src/share/vm/c1/c1_Optimizer.cpp
@@ -178,7 +178,7 @@ void CE_Eliminator::block_do(BlockBegin* block) {
// 2) substitute conditional expression
// with an IfOp followed by a Goto
// cut if_ away and get node before
- Instruction* cur_end = if_->prev(block);
+ Instruction* cur_end = if_->prev();
// append constants of true- and false-block if necessary
// clone constants because original block must not be destroyed
@@ -202,7 +202,7 @@ void CE_Eliminator::block_do(BlockBegin* block) {
}
// append Goto to successor
- ValueStack* state_before = if_->is_safepoint() ? if_->state_before() : NULL;
+ ValueStack* state_before = if_->state_before();
Goto* goto_ = new Goto(sux, state_before, if_->is_safepoint() || t_goto->is_safepoint() || f_goto->is_safepoint());
// prepare state for Goto
@@ -367,10 +367,11 @@ class BlockMerger: public BlockClosure {
#endif
// find instruction before end & append first instruction of sux block
- Instruction* prev = end->prev(block);
+ Instruction* prev = end->prev();
Instruction* next = sux->next();
assert(prev->as_BlockEnd() == NULL, "must not be a BlockEnd");
prev->set_next(next);
+ prev->fixup_block_pointers();
sux->disconnect_from_graph();
block->set_end(sux->end());
// add exception handlers of deleted block, if any
@@ -533,6 +534,8 @@ public:
void do_ProfileInvoke (ProfileInvoke* x);
void do_RuntimeCall (RuntimeCall* x);
void do_MemBar (MemBar* x);
+ void do_RangeCheckPredicate(RangeCheckPredicate* x);
+ void do_Assert (Assert* x);
};
@@ -714,6 +717,8 @@ void NullCheckVisitor::do_ProfileCall (ProfileCall* x) { nce()->clear_las
void NullCheckVisitor::do_ProfileInvoke (ProfileInvoke* x) {}
void NullCheckVisitor::do_RuntimeCall (RuntimeCall* x) {}
void NullCheckVisitor::do_MemBar (MemBar* x) {}
+void NullCheckVisitor::do_RangeCheckPredicate(RangeCheckPredicate* x) {}
+void NullCheckVisitor::do_Assert (Assert* x) {}
void NullCheckEliminator::visit(Value* p) {
diff --git a/src/share/vm/c1/c1_RangeCheckElimination.cpp b/src/share/vm/c1/c1_RangeCheckElimination.cpp
new file mode 100644
index 000000000..40c448a39
--- /dev/null
+++ b/src/share/vm/c1/c1_RangeCheckElimination.cpp
@@ -0,0 +1,1517 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "c1/c1_ValueStack.hpp"
+#include "c1/c1_RangeCheckElimination.hpp"
+#include "c1/c1_IR.hpp"
+#include "c1/c1_Canonicalizer.hpp"
+#include "c1/c1_ValueMap.hpp"
+#include "ci/ciMethodData.hpp"
+#include "runtime/deoptimization.hpp"
+
+// Macros for the Trace and the Assertion flag
+#ifdef ASSERT
+#define TRACE_RANGE_CHECK_ELIMINATION(code) if (TraceRangeCheckElimination) { code; }
+#define ASSERT_RANGE_CHECK_ELIMINATION(code) if (AssertRangeCheckElimination) { code; }
+#define TRACE_OR_ASSERT_RANGE_CHECK_ELIMINATION(code) if (TraceRangeCheckElimination || AssertRangeCheckElimination) { code; }
+#else
+#define TRACE_RANGE_CHECK_ELIMINATION(code)
+#define ASSERT_RANGE_CHECK_ELIMINATION(code)
+#define TRACE_OR_ASSERT_RANGE_CHECK_ELIMINATION(code)
+#endif
+
+// Entry point for the optimization
+void RangeCheckElimination::eliminate(IR *ir) {
+ bool do_elimination = ir->compilation()->has_access_indexed();
+ ASSERT_RANGE_CHECK_ELIMINATION(do_elimination = true);
+ if (do_elimination) {
+ RangeCheckEliminator rce(ir);
+ }
+}
+
+// Constructor
+RangeCheckEliminator::RangeCheckEliminator(IR *ir) :
+ _bounds(Instruction::number_of_instructions(), NULL),
+ _access_indexed_info(Instruction::number_of_instructions(), NULL)
+{
+ _visitor.set_range_check_eliminator(this);
+ _ir = ir;
+ _number_of_instructions = Instruction::number_of_instructions();
+ _optimistic = ir->compilation()->is_optimistic();
+
+ TRACE_RANGE_CHECK_ELIMINATION(
+ tty->print_cr("");
+ tty->print_cr("Range check elimination");
+ ir->method()->print_name(tty);
+ tty->print_cr("");
+ );
+
+ TRACE_RANGE_CHECK_ELIMINATION(
+ tty->print_cr("optimistic=%d", (int)_optimistic);
+ );
+
+#ifdef ASSERT
+ // Verifies several conditions that must be true on the IR-input. Only used for debugging purposes.
+ TRACE_RANGE_CHECK_ELIMINATION(
+ tty->print_cr("Verification of IR . . .");
+ );
+ Verification verification(ir);
+#endif
+
+ // Set process block flags
+ // Optimization so a blocks is only processed if it contains an access indexed instruction or if
+ // one of its children in the dominator tree contains an access indexed instruction.
+ set_process_block_flags(ir->start());
+
+ // Pass over instructions in the dominator tree
+ TRACE_RANGE_CHECK_ELIMINATION(
+ tty->print_cr("Starting pass over dominator tree . . .")
+ );
+ calc_bounds(ir->start(), NULL);
+
+ TRACE_RANGE_CHECK_ELIMINATION(
+ tty->print_cr("Finished!")
+ );
+}
+
+// Instruction specific work for some instructions
+// Constant
+void RangeCheckEliminator::Visitor::do_Constant(Constant *c) {
+ IntConstant *ic = c->type()->as_IntConstant();
+ if (ic != NULL) {
+ int value = ic->value();
+ _bound = new Bound(value, NULL, value, NULL);
+ }
+}
+
+// LogicOp
+void RangeCheckEliminator::Visitor::do_LogicOp(LogicOp *lo) {
+ if (lo->type()->as_IntType() && lo->op() == Bytecodes::_iand && (lo->x()->as_Constant() || lo->y()->as_Constant())) {
+ int constant = 0;
+ Constant *c = lo->x()->as_Constant();
+ if (c != NULL) {
+ constant = c->type()->as_IntConstant()->value();
+ } else {
+ constant = lo->y()->as_Constant()->type()->as_IntConstant()->value();
+ }
+ if (constant >= 0) {
+ _bound = new Bound(0, NULL, constant, NULL);
+ }
+ }
+}
+
+// Phi
+void RangeCheckEliminator::Visitor::do_Phi(Phi *phi) {
+ if (!phi->type()->as_IntType() && !phi->type()->as_ObjectType()) return;
+
+ BlockBegin *block = phi->block();
+ int op_count = phi->operand_count();
+ bool has_upper = true;
+ bool has_lower = true;
+ assert(phi, "Phi must not be null");
+ Bound *bound = NULL;
+
+ // TODO: support more difficult phis
+ for (int i=0; i<op_count; i++) {
+ Value v = phi->operand_at(i);
+
+ if (v == phi) continue;
+
+ // Check if instruction is connected with phi itself
+ Op2 *op2 = v->as_Op2();
+ if (op2 != NULL) {
+ Value x = op2->x();
+ Value y = op2->y();
+ if ((x == phi || y == phi)) {
+ Value other = x;
+ if (other == phi) {
+ other = y;
+ }
+ ArithmeticOp *ao = v->as_ArithmeticOp();
+ if (ao != NULL && ao->op() == Bytecodes::_iadd) {
+ assert(ao->op() == Bytecodes::_iadd, "Has to be add!");
+ if (ao->type()->as_IntType()) {
+ Constant *c = other->as_Constant();
+ if (c != NULL) {
+ assert(c->type()->as_IntConstant(), "Constant has to be of type integer");
+ int value = c->type()->as_IntConstant()->value();
+ if (value == 1) {
+ has_upper = false;
+ } else if (value > 1) {
+ // Overflow not guaranteed
+ has_upper = false;
+ has_lower = false;
+ } else if (value < 0) {
+ has_lower = false;
+ }
+ continue;
+ }
+ }
+ }
+ }
+ }
+
+ // No connection -> new bound
+ Bound *v_bound = _rce->get_bound(v);
+ Bound *cur_bound;
+ int cur_constant = 0;
+ Value cur_value = v;
+
+ if (v->type()->as_IntConstant()) {
+ cur_constant = v->type()->as_IntConstant()->value();
+ cur_value = NULL;
+ }
+ if (!v_bound->has_upper() || !v_bound->has_lower()) {
+ cur_bound = new Bound(cur_constant, cur_value, cur_constant, cur_value);
+ } else {
+ cur_bound = v_bound;
+ }
+ if (cur_bound) {
+ if (!bound) {
+ bound = cur_bound->copy();
+ } else {
+ bound->or_op(cur_bound);
+ }
+ } else {
+ // No bound!
+ bound = NULL;
+ break;
+ }
+ }
+
+ if (bound) {
+ if (!has_upper) {
+ bound->remove_upper();
+ }
+ if (!has_lower) {
+ bound->remove_lower();
+ }
+ _bound = bound;
+ } else {
+ _bound = new Bound();
+ }
+}
+
+
+// ArithmeticOp
+void RangeCheckEliminator::Visitor::do_ArithmeticOp(ArithmeticOp *ao) {
+ Value x = ao->x();
+ Value y = ao->y();
+
+ if (ao->op() == Bytecodes::_irem) {
+ Bound* x_bound = _rce->get_bound(x);
+ Bound* y_bound = _rce->get_bound(y);
+ if (x_bound->lower() >= 0 && x_bound->lower_instr() == NULL && y->as_ArrayLength() != NULL) {
+ _bound = new Bound(0, NULL, -1, y);
+ } else {
+ _bound = new Bound();
+ }
+ } else if (!x->as_Constant() || !y->as_Constant()) {
+ assert(!x->as_Constant() || !y->as_Constant(), "One of the operands must be non-constant!");
+ if (((x->as_Constant() || y->as_Constant()) && (ao->op() == Bytecodes::_iadd)) || (y->as_Constant() && ao->op() == Bytecodes::_isub)) {
+ assert(ao->op() == Bytecodes::_iadd || ao->op() == Bytecodes::_isub, "Operand must be iadd or isub");
+
+ if (y->as_Constant()) {
+ Value tmp = x;
+ x = y;
+ y = tmp;
+ }
+ assert(x->as_Constant()->type()->as_IntConstant(), "Constant must be int constant!");
+
+ // Constant now in x
+ int const_value = x->as_Constant()->type()->as_IntConstant()->value();
+ if (ao->op() == Bytecodes::_iadd || const_value != min_jint) {
+ if (ao->op() == Bytecodes::_isub) {
+ const_value = -const_value;
+ }
+
+ Bound * bound = _rce->get_bound(y);
+ if (bound->has_upper() && bound->has_lower()) {
+ int new_lower = bound->lower() + const_value;
+ jlong new_lowerl = ((jlong)bound->lower()) + const_value;
+ int new_upper = bound->upper() + const_value;
+ jlong new_upperl = ((jlong)bound->upper()) + const_value;
+
+ if (((jlong)new_lower) == new_lowerl && ((jlong)new_upper == new_upperl)) {
+ Bound *newBound = new Bound(new_lower, bound->lower_instr(), new_upper, bound->upper_instr());
+ _bound = newBound;
+ } else {
+ // overflow
+ _bound = new Bound();
+ }
+ } else {
+ _bound = new Bound();
+ }
+ } else {
+ _bound = new Bound();
+ }
+ } else {
+ Bound *bound = _rce->get_bound(x);
+ if (ao->op() == Bytecodes::_isub) {
+ if (bound->lower_instr() == y) {
+ _bound = new Bound(Instruction::geq, NULL, bound->lower());
+ } else {
+ _bound = new Bound();
+ }
+ } else {
+ _bound = new Bound();
+ }
+ }
+ }
+}
+
+// IfOp
+void RangeCheckEliminator::Visitor::do_IfOp(IfOp *ifOp)
+{
+ if (ifOp->tval()->type()->as_IntConstant() && ifOp->fval()->type()->as_IntConstant()) {
+ int min = ifOp->tval()->type()->as_IntConstant()->value();
+ int max = ifOp->fval()->type()->as_IntConstant()->value();
+ if (min > max) {
+ // min ^= max ^= min ^= max;
+ int tmp = min;
+ min = max;
+ max = tmp;
+ }
+ _bound = new Bound(min, NULL, max, NULL);
+ }
+}
+
+// Get bound. Returns the current bound on Value v. Normally this is the topmost element on the bound stack.
+RangeCheckEliminator::Bound *RangeCheckEliminator::get_bound(Value v) {
+ // Wrong type or NULL -> No bound
+ if (!v || (!v->type()->as_IntType() && !v->type()->as_ObjectType())) return NULL;
+
+ if (!_bounds[v->id()]) {
+ // First (default) bound is calculated
+ // Create BoundStack
+ _bounds[v->id()] = new BoundStack();
+ _visitor.clear_bound();
+ Value visit_value = v;
+ visit_value->visit(&_visitor);
+ Bound *bound = _visitor.bound();
+ if (bound) {
+ _bounds[v->id()]->push(bound);
+ }
+ if (_bounds[v->id()]->length() == 0) {
+ assert(!(v->as_Constant() && v->type()->as_IntConstant()), "constants not handled here");
+ _bounds[v->id()]->push(new Bound());
+ }
+ } else if (_bounds[v->id()]->length() == 0) {
+ // To avoid endless loops, bound is currently in calculation -> nothing known about it
+ return new Bound();
+ }
+
+ // Return bound
+ return _bounds[v->id()]->top();
+}
+
+// Update bound
+void RangeCheckEliminator::update_bound(IntegerStack &pushed, Value v, Instruction::Condition cond, Value value, int constant) {
+ if (cond == Instruction::gtr) {
+ cond = Instruction::geq;
+ constant++;
+ } else if (cond == Instruction::lss) {
+ cond = Instruction::leq;
+ constant--;
+ }
+ Bound *bound = new Bound(cond, value, constant);
+ update_bound(pushed, v, bound);
+}
+
+// Checks for loop invariance. Returns true if the instruction is outside of the loop which is identified by loop_header.
+bool RangeCheckEliminator::loop_invariant(BlockBegin *loop_header, Instruction *instruction) {
+ assert(loop_header, "Loop header must not be null!");
+ if (!instruction) return true;
+ return instruction->dominator_depth() < loop_header->dominator_depth();
+}
+
+// Update bound. Pushes a new bound onto the stack. Tries to do a conjunction with the current bound.
+void RangeCheckEliminator::update_bound(IntegerStack &pushed, Value v, Bound *bound) {
+ if (v->as_Constant()) {
+ // No bound update for constants
+ return;
+ }
+ if (!_bounds[v->id()]) {
+ get_bound(v);
+ assert(_bounds[v->id()], "Now Stack must exist");
+ }
+ Bound *top = NULL;
+ if (_bounds[v->id()]->length() > 0) {
+ top = _bounds[v->id()]->top();
+ }
+ if (top) {
+ bound->and_op(top);
+ }
+ _bounds[v->id()]->push(bound);
+ pushed.append(v->id());
+}
+
+// Add instruction + idx for in block motion
+void RangeCheckEliminator::add_access_indexed_info(InstructionList &indices, int idx, Value instruction, AccessIndexed *ai) {
+ int id = instruction->id();
+ AccessIndexedInfo *aii = _access_indexed_info[id];
+ if (aii == NULL) {
+ aii = new AccessIndexedInfo();
+ _access_indexed_info[id] = aii;
+ indices.append(instruction);
+ aii->_min = idx;
+ aii->_max = idx;
+ aii->_list = new AccessIndexedList();
+ } else if (idx >= aii->_min && idx <= aii->_max) {
+ remove_range_check(ai);
+ return;
+ }
+ aii->_min = MIN2(aii->_min, idx);
+ aii->_max = MAX2(aii->_max, idx);
+ aii->_list->append(ai);
+}
+
+// In block motion. Tries to reorder checks in order to reduce some of them.
+// Example:
+// a[i] = 0;
+// a[i+2] = 0;
+// a[i+1] = 0;
+// In this example the check for a[i+1] would be considered as unnecessary during the first iteration.
+// After this i is only checked once for i >= 0 and i+2 < a.length before the first array access. If this
+// check fails, deoptimization is called.
+void RangeCheckEliminator::in_block_motion(BlockBegin *block, AccessIndexedList &accessIndexed, InstructionList &arrays) {
+ InstructionList indices;
+
+ // Now iterate over all arrays
+ for (int i=0; i<arrays.length(); i++) {
+ int max_constant = -1;
+ AccessIndexedList list_constant;
+ Value array = arrays.at(i);
+
+ // For all AccessIndexed-instructions in this block concerning the current array.
+ for(int j=0; j<accessIndexed.length(); j++) {
+ AccessIndexed *ai = accessIndexed.at(j);
+ if (ai->array() != array || !ai->check_flag(Instruction::NeedsRangeCheckFlag)) continue;
+
+ Value index = ai->index();
+ Constant *c = index->as_Constant();
+ if (c != NULL) {
+ int constant_value = c->type()->as_IntConstant()->value();
+ if (constant_value >= 0) {
+ if (constant_value <= max_constant) {
+ // No range check needed for this
+ remove_range_check(ai);
+ } else {
+ max_constant = constant_value;
+ list_constant.append(ai);
+ }
+ }
+ } else {
+ int last_integer = 0;
+ Instruction *last_instruction = index;
+ int base = 0;
+ ArithmeticOp *ao = index->as_ArithmeticOp();
+
+ while (ao != NULL && (ao->x()->as_Constant() || ao->y()->as_Constant()) && (ao->op() == Bytecodes::_iadd || ao->op() == Bytecodes::_isub)) {
+ c = ao->y()->as_Constant();
+ Instruction *other = ao->x();
+ if (!c && ao->op() == Bytecodes::_iadd) {
+ c = ao->x()->as_Constant();
+ other = ao->y();
+ }
+
+ if (c) {
+ int value = c->type()->as_IntConstant()->value();
+ if (value != min_jint) {
+ if (ao->op() == Bytecodes::_isub) {
+ value = -value;
+ }
+ base += value;
+ last_integer = base;
+ last_instruction = other;
+ }
+ index = other;
+ } else {
+ break;
+ }
+ ao = index->as_ArithmeticOp();
+ }
+ add_access_indexed_info(indices, last_integer, last_instruction, ai);
+ }
+ }
+
+ // Iterate over all different indices
+ if (_optimistic) {
+ for (int i=0; i<indices.length(); i++) {
+ Instruction *index_instruction = indices.at(i);
+ AccessIndexedInfo *info = _access_indexed_info[index_instruction->id()];
+ assert(info != NULL, "Info must not be null");
+
+ // if idx < 0, max > 0, max + idx may fall between 0 and
+ // length-1 and if min < 0, min + idx may overflow and be >=
+ // 0. The predicate wouldn't trigger but some accesses could
+ // be with a negative index. This test guarantees that for the
+ // min and max value that are kept the predicate can't let
+ // some incorrect accesses happen.
+ bool range_cond = (info->_max < 0 || info->_max + min_jint <= info->_min);
+
+ // Generate code only if more than 2 range checks can be eliminated because of that.
+ // 2 because at least 2 comparisons are done
+ if (info->_list->length() > 2 && range_cond) {
+ AccessIndexed *first = info->_list->at(0);
+ Instruction *insert_position = first->prev();
+ assert(insert_position->next() == first, "prev was calculated");
+ ValueStack *state = first->state_before();
+
+ // Load min Constant
+ Constant *min_constant = NULL;
+ if (info->_min != 0) {
+ min_constant = new Constant(new IntConstant(info->_min));
+ NOT_PRODUCT(min_constant->set_printable_bci(first->printable_bci()));
+ insert_position = insert_position->insert_after(min_constant);
+ }
+
+ // Load max Constant
+ Constant *max_constant = NULL;
+ if (info->_max != 0) {
+ max_constant = new Constant(new IntConstant(info->_max));
+ NOT_PRODUCT(max_constant->set_printable_bci(first->printable_bci()));
+ insert_position = insert_position->insert_after(max_constant);
+ }
+
+ // Load array length
+ Value length_instr = first->length();
+ if (!length_instr) {
+ ArrayLength *length = new ArrayLength(array, first->state_before()->copy());
+ length->set_exception_state(length->state_before());
+ length->set_flag(Instruction::DeoptimizeOnException, true);
+ insert_position = insert_position->insert_after_same_bci(length);
+ length_instr = length;
+ }
+
+ // Calculate lower bound
+ Instruction *lower_compare = index_instruction;
+ if (min_constant) {
+ ArithmeticOp *ao = new ArithmeticOp(Bytecodes::_iadd, min_constant, lower_compare, false, NULL);
+ insert_position = insert_position->insert_after_same_bci(ao);
+ lower_compare = ao;
+ }
+
+ // Calculate upper bound
+ Instruction *upper_compare = index_instruction;
+ if (max_constant) {
+ ArithmeticOp *ao = new ArithmeticOp(Bytecodes::_iadd, max_constant, upper_compare, false, NULL);
+ insert_position = insert_position->insert_after_same_bci(ao);
+ upper_compare = ao;
+ }
+
+ // Trick with unsigned compare is done
+ int bci = NOT_PRODUCT(first->printable_bci()) PRODUCT_ONLY(-1);
+ insert_position = predicate(upper_compare, Instruction::aeq, length_instr, state, insert_position, bci);
+ insert_position = predicate_cmp_with_const(lower_compare, Instruction::leq, -1, state, insert_position);
+ for (int j = 0; j<info->_list->length(); j++) {
+ AccessIndexed *ai = info->_list->at(j);
+ remove_range_check(ai);
+ }
+ }
+ _access_indexed_info[index_instruction->id()] = NULL;
+ }
+ indices.clear();
+
+ if (list_constant.length() > 1) {
+ AccessIndexed *first = list_constant.at(0);
+ Instruction *insert_position = first->prev();
+ ValueStack *state = first->state_before();
+ // Load max Constant
+ Constant *constant = new Constant(new IntConstant(max_constant));
+ NOT_PRODUCT(constant->set_printable_bci(first->printable_bci()));
+ insert_position = insert_position->insert_after(constant);
+ Instruction *compare_instr = constant;
+ Value length_instr = first->length();
+ if (!length_instr) {
+ ArrayLength *length = new ArrayLength(array, state->copy());
+ length->set_exception_state(length->state_before());
+ length->set_flag(Instruction::DeoptimizeOnException, true);
+ insert_position = insert_position->insert_after_same_bci(length);
+ length_instr = length;
+ }
+ // Compare for greater or equal to array length
+ insert_position = predicate(compare_instr, Instruction::geq, length_instr, state, insert_position);
+ for (int j = 0; j<list_constant.length(); j++) {
+ AccessIndexed *ai = list_constant.at(j);
+ remove_range_check(ai);
+ }
+ }
+ }
+ }
+}
+
+bool RangeCheckEliminator::set_process_block_flags(BlockBegin *block) {
+ Instruction *cur = block;
+ bool process = false;
+
+ while (cur) {
+ process |= (cur->as_AccessIndexed() != NULL);
+ cur = cur->next();
+ }
+
+ BlockList *dominates = block->dominates();
+ for (int i=0; i<dominates->length(); i++) {
+ BlockBegin *next = dominates->at(i);
+ process |= set_process_block_flags(next);
+ }
+
+ if (!process) {
+ block->set(BlockBegin::donot_eliminate_range_checks);
+ }
+ return process;
+}
+
+bool RangeCheckEliminator::is_ok_for_deoptimization(Instruction *insert_position, Instruction *array_instr, Instruction *length_instr, Instruction *lower_instr, int lower, Instruction *upper_instr, int upper) {
+ bool upper_check = true;
+ assert(lower_instr || lower >= 0, "If no lower_instr present, lower must be greater 0");
+ assert(!lower_instr || lower_instr->dominator_depth() <= insert_position->dominator_depth(), "Dominator depth must be smaller");
+ assert(!upper_instr || upper_instr->dominator_depth() <= insert_position->dominator_depth(), "Dominator depth must be smaller");
+ assert(array_instr, "Array instruction must exist");
+ assert(array_instr->dominator_depth() <= insert_position->dominator_depth(), "Dominator depth must be smaller");
+ assert(!length_instr || length_instr->dominator_depth() <= insert_position->dominator_depth(), "Dominator depth must be smaller");
+
+ if (upper_instr && upper_instr->as_ArrayLength() && upper_instr->as_ArrayLength()->array() == array_instr) {
+ // static check
+ if (upper >= 0) return false; // would always trigger a deopt:
+ // array_length + x >= array_length, x >= 0 is always true
+ upper_check = false;
+ }
+ if (lower_instr && lower_instr->as_ArrayLength() && lower_instr->as_ArrayLength()->array() == array_instr) {
+ if (lower > 0) return false;
+ }
+ // No upper check required -> skip
+ if (upper_check && upper_instr && upper_instr->type()->as_ObjectType() && upper_instr == array_instr) {
+ // upper_instr is object means that the upper bound is the length
+ // of the upper_instr.
+ return false;
+ }
+ return true;
+}
+
+Instruction* RangeCheckEliminator::insert_after(Instruction* insert_position, Instruction* instr, int bci) {
+ if (bci != -1) {
+ NOT_PRODUCT(instr->set_printable_bci(bci));
+ return insert_position->insert_after(instr);
+ } else {
+ return insert_position->insert_after_same_bci(instr);
+ }
+}
+
+Instruction* RangeCheckEliminator::predicate(Instruction* left, Instruction::Condition cond, Instruction* right, ValueStack* state, Instruction *insert_position, int bci) {
+ RangeCheckPredicate *deoptimize = new RangeCheckPredicate(left, cond, true, right, state->copy());
+ return insert_after(insert_position, deoptimize, bci);
+}
+
+Instruction* RangeCheckEliminator::predicate_cmp_with_const(Instruction* instr, Instruction::Condition cond, int constant, ValueStack* state, Instruction *insert_position, int bci) {
+ Constant *const_instr = new Constant(new IntConstant(constant));
+ insert_position = insert_after(insert_position, const_instr, bci);
+ return predicate(instr, cond, const_instr, state, insert_position);
+}
+
+Instruction* RangeCheckEliminator::predicate_add(Instruction* left, int left_const, Instruction::Condition cond, Instruction* right, ValueStack* state, Instruction *insert_position, int bci) {
+ Constant *constant = new Constant(new IntConstant(left_const));
+ insert_position = insert_after(insert_position, constant, bci);
+ ArithmeticOp *ao = new ArithmeticOp(Bytecodes::_iadd, constant, left, false, NULL);
+ insert_position = insert_position->insert_after_same_bci(ao);
+ return predicate(ao, cond, right, state, insert_position);
+}
+
+Instruction* RangeCheckEliminator::predicate_add_cmp_with_const(Instruction* left, int left_const, Instruction::Condition cond, int constant, ValueStack* state, Instruction *insert_position, int bci) {
+ Constant *const_instr = new Constant(new IntConstant(constant));
+ insert_position = insert_after(insert_position, const_instr, bci);
+ return predicate_add(left, left_const, cond, const_instr, state, insert_position);
+}
+
+// Insert deoptimization
+void RangeCheckEliminator::insert_deoptimization(ValueStack *state, Instruction *insert_position, Instruction *array_instr, Instruction *length_instr, Instruction *lower_instr, int lower, Instruction *upper_instr, int upper, AccessIndexed *ai) {
+ assert(is_ok_for_deoptimization(insert_position, array_instr, length_instr, lower_instr, lower, upper_instr, upper), "should have been tested before");
+ bool upper_check = !(upper_instr && upper_instr->as_ArrayLength() && upper_instr->as_ArrayLength()->array() == array_instr);
+
+ int bci = NOT_PRODUCT(ai->printable_bci()) PRODUCT_ONLY(-1);
+ if (lower_instr) {
+ assert(!lower_instr->type()->as_ObjectType(), "Must not be object type");
+ if (lower == 0) {
+ // Compare for less than 0
+ insert_position = predicate_cmp_with_const(lower_instr, Instruction::lss, 0, state, insert_position, bci);
+ } else if (lower > 0) {
+ // Compare for smaller 0
+ insert_position = predicate_add_cmp_with_const(lower_instr, lower, Instruction::lss, 0, state, insert_position, bci);
+ } else {
+ assert(lower < 0, "");
+ // Add 1
+ lower++;
+ lower = -lower;
+ // Compare for smaller or equal 0
+ insert_position = predicate_cmp_with_const(lower_instr, Instruction::leq, lower, state, insert_position, bci);
+ }
+ }
+
+ // No upper check required -> skip
+ if (!upper_check) return;
+
+ // We need to know length of array
+ if (!length_instr) {
+ // Load length if necessary
+ ArrayLength *length = new ArrayLength(array_instr, state->copy());
+ NOT_PRODUCT(length->set_printable_bci(ai->printable_bci()));
+ length->set_exception_state(length->state_before());
+ length->set_flag(Instruction::DeoptimizeOnException, true);
+ insert_position = insert_position->insert_after(length);
+ length_instr = length;
+ }
+
+ if (!upper_instr) {
+ // Compare for geq array.length
+ insert_position = predicate_cmp_with_const(length_instr, Instruction::leq, upper, state, insert_position, bci);
+ } else {
+ if (upper_instr->type()->as_ObjectType()) {
+ assert(state, "must not be null");
+ assert(upper_instr != array_instr, "should be");
+ ArrayLength *length = new ArrayLength(upper_instr, state->copy());
+ NOT_PRODUCT(length->set_printable_bci(ai->printable_bci()));
+ length->set_flag(Instruction::DeoptimizeOnException, true);
+ length->set_exception_state(length->state_before());
+ insert_position = insert_position->insert_after(length);
+ upper_instr = length;
+ }
+ assert(upper_instr->type()->as_IntType(), "Must not be object type!");
+
+ if (upper == 0) {
+ // Compare for geq array.length
+ insert_position = predicate(upper_instr, Instruction::geq, length_instr, state, insert_position, bci);
+ } else if (upper < 0) {
+ // Compare for geq array.length
+ insert_position = predicate_add(upper_instr, upper, Instruction::geq, length_instr, state, insert_position, bci);
+ } else {
+ assert(upper > 0, "");
+ upper = -upper;
+ // Compare for geq array.length
+ insert_position = predicate_add(length_instr, upper, Instruction::leq, upper_instr, state, insert_position, bci);
+ }
+ }
+}
+
+// Add if condition
+void RangeCheckEliminator::add_if_condition(IntegerStack &pushed, Value x, Value y, Instruction::Condition condition) {
+ if (y->as_Constant()) return;
+
+ int const_value = 0;
+ Value instr_value = x;
+ Constant *c = x->as_Constant();
+ ArithmeticOp *ao = x->as_ArithmeticOp();
+
+ if (c != NULL) {
+ const_value = c->type()->as_IntConstant()->value();
+ instr_value = NULL;
+ } else if (ao != NULL && (!ao->x()->as_Constant() || !ao->y()->as_Constant()) && ((ao->op() == Bytecodes::_isub && ao->y()->as_Constant()) || ao->op() == Bytecodes::_iadd)) {
+ assert(!ao->x()->as_Constant() || !ao->y()->as_Constant(), "At least one operator must be non-constant!");
+ assert(ao->op() == Bytecodes::_isub || ao->op() == Bytecodes::_iadd, "Operation has to be add or sub!");
+ c = ao->x()->as_Constant();
+ if (c != NULL) {
+ const_value = c->type()->as_IntConstant()->value();
+ instr_value = ao->y();
+ } else {
+ c = ao->y()->as_Constant();
+ if (c != NULL) {
+ const_value = c->type()->as_IntConstant()->value();
+ instr_value = ao->x();
+ }
+ }
+ if (ao->op() == Bytecodes::_isub) {
+ assert(ao->y()->as_Constant(), "1 - x not supported, only x - 1 is valid!");
+ if (const_value > min_jint) {
+ const_value = -const_value;
+ } else {
+ const_value = 0;
+ instr_value = x;
+ }
+ }
+ }
+
+ update_bound(pushed, y, condition, instr_value, const_value);
+}
+
+// Process If
+void RangeCheckEliminator::process_if(IntegerStack &pushed, BlockBegin *block, If *cond) {
+ // Only if we are direct true / false successor and NOT both ! (even this may occur)
+ if ((cond->tsux() == block || cond->fsux() == block) && cond->tsux() != cond->fsux()) {
+ Instruction::Condition condition = cond->cond();
+ if (cond->fsux() == block) {
+ condition = Instruction::negate(condition);
+ }
+ Value x = cond->x();
+ Value y = cond->y();
+ if (x->type()->as_IntType() && y->type()->as_IntType()) {
+ add_if_condition(pushed, y, x, condition);
+ add_if_condition(pushed, x, y, Instruction::mirror(condition));
+ }
+ }
+}
+
+// Process access indexed
+void RangeCheckEliminator::process_access_indexed(BlockBegin *loop_header, BlockBegin *block, AccessIndexed *ai) {
+ TRACE_RANGE_CHECK_ELIMINATION(
+ tty->fill_to(block->dominator_depth()*2)
+ );
+ TRACE_RANGE_CHECK_ELIMINATION(
+ tty->print_cr("Access indexed: index=%d length=%d", ai->index()->id(), (ai->length() != NULL ? ai->length()->id() :-1 ))
+ );
+
+ if (ai->check_flag(Instruction::NeedsRangeCheckFlag)) {
+ Bound *index_bound = get_bound(ai->index());
+ if (!index_bound->has_lower() || !index_bound->has_upper()) {
+ TRACE_RANGE_CHECK_ELIMINATION(
+ tty->fill_to(block->dominator_depth()*2);
+ tty->print_cr("Index instruction %d has no lower and/or no upper bound!", ai->index()->id())
+ );
+ return;
+ }
+
+ Bound *array_bound;
+ if (ai->length()) {
+ array_bound = get_bound(ai->length());
+ } else {
+ array_bound = get_bound(ai->array());
+ }
+
+ if (in_array_bound(index_bound, ai->array()) ||
+ (index_bound && array_bound && index_bound->is_smaller(array_bound) && !index_bound->lower_instr() && index_bound->lower() >= 0)) {
+ TRACE_RANGE_CHECK_ELIMINATION(
+ tty->fill_to(block->dominator_depth()*2);
+ tty->print_cr("Bounds check for instruction %d in block B%d can be fully eliminated!", ai->id(), ai->block()->block_id())
+ );
+
+ remove_range_check(ai);
+ } else if (_optimistic && loop_header) {
+ assert(ai->array(), "Array must not be null!");
+ assert(ai->index(), "Index must not be null!");
+
+ // Array instruction
+ Instruction *array_instr = ai->array();
+ if (!loop_invariant(loop_header, array_instr)) {
+ TRACE_RANGE_CHECK_ELIMINATION(
+ tty->fill_to(block->dominator_depth()*2);
+ tty->print_cr("Array %d is not loop invariant to header B%d", ai->array()->id(), loop_header->block_id())
+ );
+ return;
+ }
+
+ // Lower instruction
+ Value index_instr = ai->index();
+ Value lower_instr = index_bound->lower_instr();
+ if (!loop_invariant(loop_header, lower_instr)) {
+ TRACE_RANGE_CHECK_ELIMINATION(
+ tty->fill_to(block->dominator_depth()*2);
+ tty->print_cr("Lower instruction %d not loop invariant!", lower_instr->id())
+ );
+ return;
+ }
+ if (!lower_instr && index_bound->lower() < 0) {
+ TRACE_RANGE_CHECK_ELIMINATION(
+ tty->fill_to(block->dominator_depth()*2);
+ tty->print_cr("Lower bound smaller than 0 (%d)!", index_bound->lower())
+ );
+ return;
+ }
+
+ // Upper instruction
+ Value upper_instr = index_bound->upper_instr();
+ if (!loop_invariant(loop_header, upper_instr)) {
+ TRACE_RANGE_CHECK_ELIMINATION(
+ tty->fill_to(block->dominator_depth()*2);
+ tty->print_cr("Upper instruction %d not loop invariant!", upper_instr->id())
+ );
+ return;
+ }
+
+ // Length instruction
+ Value length_instr = ai->length();
+ if (!loop_invariant(loop_header, length_instr)) {
+ // Generate length instruction yourself!
+ length_instr = NULL;
+ }
+
+ TRACE_RANGE_CHECK_ELIMINATION(
+ tty->fill_to(block->dominator_depth()*2);
+ tty->print_cr("LOOP INVARIANT access indexed %d found in block B%d!", ai->id(), ai->block()->block_id())
+ );
+
+ BlockBegin *pred_block = loop_header->dominator();
+ assert(pred_block != NULL, "Every loop header has a dominator!");
+ BlockEnd *pred_block_end = pred_block->end();
+ Instruction *insert_position = pred_block_end->prev();
+ ValueStack *state = pred_block_end->state_before();
+ if (pred_block_end->as_Goto() && state == NULL) state = pred_block_end->state();
+ assert(state, "State must not be null");
+
+ // Add deoptimization to dominator of loop header
+ TRACE_RANGE_CHECK_ELIMINATION(
+ tty->fill_to(block->dominator_depth()*2);
+ tty->print_cr("Inserting deopt at bci %d in block B%d!", state->bci(), insert_position->block()->block_id())
+ );
+
+ if (!is_ok_for_deoptimization(insert_position, array_instr, length_instr, lower_instr, index_bound->lower(), upper_instr, index_bound->upper())) {
+ TRACE_RANGE_CHECK_ELIMINATION(
+ tty->fill_to(block->dominator_depth()*2);
+ tty->print_cr("Could not eliminate because of static analysis!")
+ );
+ return;
+ }
+
+ insert_deoptimization(state, insert_position, array_instr, length_instr, lower_instr, index_bound->lower(), upper_instr, index_bound->upper(), ai);
+
+ // Finally remove the range check!
+ remove_range_check(ai);
+ }
+ }
+}
+
+void RangeCheckEliminator::remove_range_check(AccessIndexed *ai) {
+ ai->set_flag(Instruction::NeedsRangeCheckFlag, false);
+ // no range check, no need for the length instruction anymore
+ ai->clear_length();
+
+ TRACE_RANGE_CHECK_ELIMINATION(
+ tty->fill_to(ai->dominator_depth()*2);
+ tty->print_cr("Range check for instruction %d eliminated!", ai->id());
+ );
+
+ ASSERT_RANGE_CHECK_ELIMINATION(
+ Value array_length = ai->length();
+ if (!array_length) {
+ array_length = ai->array();
+ assert(array_length->type()->as_ObjectType(), "Has to be object type!");
+ }
+ int cur_constant = -1;
+ Value cur_value = array_length;
+ if (cur_value->type()->as_IntConstant()) {
+ cur_constant += cur_value->type()->as_IntConstant()->value();
+ cur_value = NULL;
+ }
+ Bound *new_index_bound = new Bound(0, NULL, cur_constant, cur_value);
+ add_assertions(new_index_bound, ai->index(), ai);
+ );
+}
+
+// Calculate bounds for instruction in this block and children blocks in the dominator tree
+void RangeCheckEliminator::calc_bounds(BlockBegin *block, BlockBegin *loop_header) {
+ // Ensures a valid loop_header
+ assert(!loop_header || loop_header->is_set(BlockBegin::linear_scan_loop_header_flag), "Loop header has to be real !");
+
+ // Tracing output
+ TRACE_RANGE_CHECK_ELIMINATION(
+ tty->fill_to(block->dominator_depth()*2);
+ tty->print_cr("Block B%d", block->block_id());
+ );
+
+ // Pushed stack for conditions
+ IntegerStack pushed;
+ // Process If
+ BlockBegin *parent = block->dominator();
+ if (parent != NULL) {
+ If *cond = parent->end()->as_If();
+ if (cond != NULL) {
+ process_if(pushed, block, cond);
+ }
+ }
+
+ // Interate over current block
+ InstructionList arrays;
+ AccessIndexedList accessIndexed;
+ Instruction *cur = block;
+
+ while (cur) {
+ // Ensure cur wasn't inserted during the elimination
+ if (cur->id() < this->_bounds.length()) {
+ // Process only if it is an access indexed instruction
+ AccessIndexed *ai = cur->as_AccessIndexed();
+ if (ai != NULL) {
+ process_access_indexed(loop_header, block, ai);
+ accessIndexed.append(ai);
+ if (!arrays.contains(ai->array())) {
+ arrays.append(ai->array());
+ }
+ Bound *b = get_bound(ai->index());
+ if (!b->lower_instr()) {
+ // Lower bound is constant
+ update_bound(pushed, ai->index(), Instruction::geq, NULL, 0);
+ }
+ if (!b->has_upper()) {
+ if (ai->length() && ai->length()->type()->as_IntConstant()) {
+ int value = ai->length()->type()->as_IntConstant()->value();
+ update_bound(pushed, ai->index(), Instruction::lss, NULL, value);
+ } else {
+ // Has no upper bound
+ Instruction *instr = ai->length();
+ if (instr != NULL) instr = ai->array();
+ update_bound(pushed, ai->index(), Instruction::lss, instr, 0);
+ }
+ }
+ }
+ }
+ cur = cur->next();
+ }
+
+ // Output current condition stack
+ TRACE_RANGE_CHECK_ELIMINATION(dump_condition_stack(block));
+
+ // Do in block motion of range checks
+ in_block_motion(block, accessIndexed, arrays);
+
+ // Call all dominated blocks
+ for (int i=0; i<block->dominates()->length(); i++) {
+ BlockBegin *next = block->dominates()->at(i);
+ if (!next->is_set(BlockBegin::donot_eliminate_range_checks)) {
+ // if current block is a loop header and:
+ // - next block belongs to the same loop
+ // or
+ // - next block belongs to an inner loop
+ // then current block is the loop header for next block
+ if (block->is_set(BlockBegin::linear_scan_loop_header_flag) && (block->loop_index() == next->loop_index() || next->loop_depth() > block->loop_depth())) {
+ calc_bounds(next, block);
+ } else {
+ calc_bounds(next, loop_header);
+ }
+ }
+ }
+
+ // Reset stack
+ for (int i=0; i<pushed.length(); i++) {
+ _bounds[pushed[i]]->pop();
+ }
+}
+
+#ifndef PRODUCT
+// Dump condition stack
+void RangeCheckEliminator::dump_condition_stack(BlockBegin *block) {
+ for (int i=0; i<_ir->linear_scan_order()->length(); i++) {
+ BlockBegin *cur_block = _ir->linear_scan_order()->at(i);
+ Instruction *instr = cur_block;
+ for_each_phi_fun(cur_block, phi,
+ BoundStack *bound_stack = _bounds.at(phi->id());
+ if (bound_stack && bound_stack->length() > 0) {
+ Bound *bound = bound_stack->top();
+ if ((bound->has_lower() || bound->has_upper()) && (bound->lower_instr() != phi || bound->upper_instr() != phi || bound->lower() != 0 || bound->upper() != 0)) {
+ TRACE_RANGE_CHECK_ELIMINATION(tty->fill_to(2*block->dominator_depth());
+ tty->print("i%d", phi->id());
+ tty->print(": ");
+ bound->print();
+ tty->print_cr("");
+ );
+ }
+ });
+
+ while (!instr->as_BlockEnd()) {
+ if (instr->id() < _bounds.length()) {
+ BoundStack *bound_stack = _bounds.at(instr->id());
+ if (bound_stack && bound_stack->length() > 0) {
+ Bound *bound = bound_stack->top();
+ if ((bound->has_lower() || bound->has_upper()) && (bound->lower_instr() != instr || bound->upper_instr() != instr || bound->lower() != 0 || bound->upper() != 0)) {
+ TRACE_RANGE_CHECK_ELIMINATION(tty->fill_to(2*block->dominator_depth());
+ tty->print("i%d", instr->id());
+ tty->print(": ");
+ bound->print();
+ tty->print_cr("");
+ );
+ }
+ }
+ }
+ instr = instr->next();
+ }
+ }
+}
+#endif
+
+// Verification or the IR
+RangeCheckEliminator::Verification::Verification(IR *ir) : _used(BlockBegin::number_of_blocks(), false) {
+ this->_ir = ir;
+ ir->iterate_linear_scan_order(this);
+}
+
+// Verify this block
+void RangeCheckEliminator::Verification::block_do(BlockBegin *block) {
+ If *cond = block->end()->as_If();
+ // Watch out: tsux and fsux can be the same!
+ if (block->number_of_sux() > 1) {
+ for (int i=0; i<block->number_of_sux(); i++) {
+ BlockBegin *sux = block->sux_at(i);
+ BlockBegin *pred = NULL;
+ for (int j=0; j<sux->number_of_preds(); j++) {
+ BlockBegin *cur = sux->pred_at(j);
+ assert(cur != NULL, "Predecessor must not be null");
+ if (!pred) {
+ pred = cur;
+ }
+ assert(cur == pred, "Block must not have more than one predecessor if its predecessor has more than one successor");
+ }
+ assert(sux->number_of_preds() >= 1, "Block must have at least one predecessor");
+ assert(sux->pred_at(0) == block, "Wrong successor");
+ }
+ }
+
+ BlockBegin *dominator = block->dominator();
+ if (dominator) {
+ assert(block != _ir->start(), "Start block must not have a dominator!");
+ assert(can_reach(dominator, block), "Dominator can't reach his block !");
+ assert(can_reach(_ir->start(), dominator), "Dominator is unreachable !");
+ assert(!can_reach(_ir->start(), block, dominator), "Wrong dominator ! Block can be reached anyway !");
+ BlockList *all_blocks = _ir->linear_scan_order();
+ for (int i=0; i<all_blocks->length(); i++) {
+ BlockBegin *cur = all_blocks->at(i);
+ if (cur != dominator && cur != block) {
+ assert(can_reach(dominator, block, cur), "There has to be another dominator!");
+ }
+ }
+ } else {
+ assert(block == _ir->start(), "Only start block must not have a dominator");
+ }
+
+ if (block->is_set(BlockBegin::linear_scan_loop_header_flag)) {
+ int loop_index = block->loop_index();
+ BlockList *all_blocks = _ir->linear_scan_order();
+ assert(block->number_of_preds() >= 1, "Block must have at least one predecessor");
+ assert(!block->is_set(BlockBegin::exception_entry_flag), "Loop header must not be exception handler!");
+ // Sometimes, the backbranch comes from an exception handler. In
+ // this case, loop indexes/loop depths may not appear correct.
+ bool loop_through_xhandler = false;
+ for (int i = 0; i < block->number_of_exception_handlers(); i++) {
+ BlockBegin *xhandler = block->exception_handler_at(i);
+ for (int j = 0; j < block->number_of_preds(); j++) {
+ if (dominates(xhandler, block->pred_at(j)) || xhandler == block->pred_at(j)) {
+ loop_through_xhandler = true;
+ }
+ }
+ }
+
+ for (int i=0; i<block->number_of_sux(); i++) {
+ BlockBegin *sux = block->sux_at(i);
+ assert(sux->loop_depth() != block->loop_depth() || sux->loop_index() == block->loop_index() || loop_through_xhandler, "Loop index has to be same");
+ assert(sux->loop_depth() == block->loop_depth() || sux->loop_index() != block->loop_index(), "Loop index has to be different");
+ }
+
+ for (int i=0; i<all_blocks->length(); i++) {
+ BlockBegin *cur = all_blocks->at(i);
+ if (cur->loop_index() == loop_index && cur != block) {
+ assert(dominates(block->dominator(), cur), "Dominator of loop header must dominate all loop blocks");
+ }
+ }
+ }
+
+ Instruction *cur = block;
+ while (cur) {
+ assert(cur->block() == block, "Block begin has to be set correctly!");
+ cur = cur->next();
+ }
+}
+
+// Loop header must dominate all loop blocks
+bool RangeCheckEliminator::Verification::dominates(BlockBegin *dominator, BlockBegin *block) {
+ BlockBegin *cur = block->dominator();
+ while (cur && cur != dominator) {
+ cur = cur->dominator();
+ }
+ return cur == dominator;
+}
+
+// Try to reach Block end beginning in Block start and not using Block dont_use
+bool RangeCheckEliminator::Verification::can_reach(BlockBegin *start, BlockBegin *end, BlockBegin *dont_use /* = NULL */) {
+ if (start == end) return start != dont_use;
+ // Simple BSF from start to end
+ // BlockBeginList _current;
+ for (int i=0; i<_used.length(); i++) {
+ _used[i] = false;
+ }
+ _current.truncate(0);
+ _successors.truncate(0);
+ if (start != dont_use) {
+ _current.push(start);
+ _used[start->block_id()] = true;
+ }
+
+ // BlockBeginList _successors;
+ while (_current.length() > 0) {
+ BlockBegin *cur = _current.pop();
+ // Add exception handlers to list
+ for (int i=0; i<cur->number_of_exception_handlers(); i++) {
+ BlockBegin *xhandler = cur->exception_handler_at(i);
+ _successors.push(xhandler);
+ // Add exception handlers of _successors to list
+ for (int j=0; j<xhandler->number_of_exception_handlers(); j++) {
+ BlockBegin *sux_xhandler = xhandler->exception_handler_at(j);
+ _successors.push(sux_xhandler);
+ }
+ }
+ // Add normal _successors to list
+ for (int i=0; i<cur->number_of_sux(); i++) {
+ BlockBegin *sux = cur->sux_at(i);
+ _successors.push(sux);
+ // Add exception handlers of _successors to list
+ for (int j=0; j<sux->number_of_exception_handlers(); j++) {
+ BlockBegin *xhandler = sux->exception_handler_at(j);
+ _successors.push(xhandler);
+ }
+ }
+ for (int i=0; i<_successors.length(); i++) {
+ BlockBegin *sux = _successors[i];
+ assert(sux != NULL, "Successor must not be NULL!");
+ if (sux == end) {
+ return true;
+ }
+ if (sux != dont_use && !_used[sux->block_id()]) {
+ _used[sux->block_id()] = true;
+ _current.push(sux);
+ }
+ }
+ _successors.truncate(0);
+ }
+
+ return false;
+}
+
+// Bound
+RangeCheckEliminator::Bound::~Bound() {
+}
+
+// Bound constructor
+RangeCheckEliminator::Bound::Bound() {
+ init();
+ this->_lower = min_jint;
+ this->_upper = max_jint;
+ this->_lower_instr = NULL;
+ this->_upper_instr = NULL;
+}
+
+// Bound constructor
+RangeCheckEliminator::Bound::Bound(int lower, Value lower_instr, int upper, Value upper_instr) {
+ init();
+ assert(!lower_instr || !lower_instr->as_Constant() || !lower_instr->type()->as_IntConstant(), "Must not be constant!");
+ assert(!upper_instr || !upper_instr->as_Constant() || !upper_instr->type()->as_IntConstant(), "Must not be constant!");
+ this->_lower = lower;
+ this->_upper = upper;
+ this->_lower_instr = lower_instr;
+ this->_upper_instr = upper_instr;
+}
+
+// Bound constructor
+RangeCheckEliminator::Bound::Bound(Instruction::Condition cond, Value v, int constant) {
+ assert(!v || (v->type() && (v->type()->as_IntType() || v->type()->as_ObjectType())), "Type must be array or integer!");
+ assert(!v || !v->as_Constant() || !v->type()->as_IntConstant(), "Must not be constant!");
+
+ init();
+ if (cond == Instruction::eql) {
+ _lower = constant;
+ _lower_instr = v;
+ _upper = constant;
+ _upper_instr = v;
+ } else if (cond == Instruction::neq) {
+ _lower = min_jint;
+ _upper = max_jint;
+ _lower_instr = NULL;
+ _upper_instr = NULL;
+ if (v == NULL) {
+ if (constant == min_jint) {
+ _lower++;
+ }
+ if (constant == max_jint) {
+ _upper--;
+ }
+ }
+ } else if (cond == Instruction::geq) {
+ _lower = constant;
+ _lower_instr = v;
+ _upper = max_jint;
+ _upper_instr = NULL;
+ } else if (cond == Instruction::leq) {
+ _lower = min_jint;
+ _lower_instr = NULL;
+ _upper = constant;
+ _upper_instr = v;
+ } else {
+ ShouldNotReachHere();
+ }
+}
+
+// Set lower
+void RangeCheckEliminator::Bound::set_lower(int value, Value v) {
+ assert(!v || !v->as_Constant() || !v->type()->as_IntConstant(), "Must not be constant!");
+ this->_lower = value;
+ this->_lower_instr = v;
+}
+
+// Set upper
+void RangeCheckEliminator::Bound::set_upper(int value, Value v) {
+ assert(!v || !v->as_Constant() || !v->type()->as_IntConstant(), "Must not be constant!");
+ this->_upper = value;
+ this->_upper_instr = v;
+}
+
+// Add constant -> no overflow may occur
+void RangeCheckEliminator::Bound::add_constant(int value) {
+ this->_lower += value;
+ this->_upper += value;
+}
+
+// Init
+void RangeCheckEliminator::Bound::init() {
+}
+
+// or
+void RangeCheckEliminator::Bound::or_op(Bound *b) {
+ // Watch out, bound is not guaranteed not to overflow!
+ // Update lower bound
+ if (_lower_instr != b->_lower_instr || (_lower_instr && _lower != b->_lower)) {
+ _lower_instr = NULL;
+ _lower = min_jint;
+ } else {
+ _lower = MIN2(_lower, b->_lower);
+ }
+ // Update upper bound
+ if (_upper_instr != b->_upper_instr || (_upper_instr && _upper != b->_upper)) {
+ _upper_instr = NULL;
+ _upper = max_jint;
+ } else {
+ _upper = MAX2(_upper, b->_upper);
+ }
+}
+
+// and
+void RangeCheckEliminator::Bound::and_op(Bound *b) {
+ // Update lower bound
+ if (_lower_instr == b->_lower_instr) {
+ _lower = MAX2(_lower, b->_lower);
+ }
+ if (b->has_lower()) {
+ bool set = true;
+ if (_lower_instr != NULL && b->_lower_instr != NULL) {
+ set = (_lower_instr->dominator_depth() > b->_lower_instr->dominator_depth());
+ }
+ if (set) {
+ _lower = b->_lower;
+ _lower_instr = b->_lower_instr;
+ }
+ }
+ // Update upper bound
+ if (_upper_instr == b->_upper_instr) {
+ _upper = MIN2(_upper, b->_upper);
+ }
+ if (b->has_upper()) {
+ bool set = true;
+ if (_upper_instr != NULL && b->_upper_instr != NULL) {
+ set = (_upper_instr->dominator_depth() > b->_upper_instr->dominator_depth());
+ }
+ if (set) {
+ _upper = b->_upper;
+ _upper_instr = b->_upper_instr;
+ }
+ }
+}
+
+// has_upper
+bool RangeCheckEliminator::Bound::has_upper() {
+ return _upper_instr != NULL || _upper < max_jint;
+}
+
+// is_smaller
+bool RangeCheckEliminator::Bound::is_smaller(Bound *b) {
+ if (b->_lower_instr != _upper_instr) {
+ return false;
+ }
+ return _upper < b->_lower;
+}
+
+// has_lower
+bool RangeCheckEliminator::Bound::has_lower() {
+ return _lower_instr != NULL || _lower > min_jint;
+}
+
+// in_array_bound
+bool RangeCheckEliminator::in_array_bound(Bound *bound, Value array){
+ if (!bound) return false;
+ assert(array != NULL, "Must not be null!");
+ assert(bound != NULL, "Must not be null!");
+ if (bound->lower() >=0 && bound->lower_instr() == NULL && bound->upper() < 0 && bound->upper_instr() != NULL) {
+ ArrayLength *len = bound->upper_instr()->as_ArrayLength();
+ if (bound->upper_instr() == array || (len != NULL && len->array() == array)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// remove_lower
+void RangeCheckEliminator::Bound::remove_lower() {
+ _lower = min_jint;
+ _lower_instr = NULL;
+}
+
+// remove_upper
+void RangeCheckEliminator::Bound::remove_upper() {
+ _upper = max_jint;
+ _upper_instr = NULL;
+}
+
+// upper
+int RangeCheckEliminator::Bound::upper() {
+ return _upper;
+}
+
+// lower
+int RangeCheckEliminator::Bound::lower() {
+ return _lower;
+}
+
+// upper_instr
+Value RangeCheckEliminator::Bound::upper_instr() {
+ return _upper_instr;
+}
+
+// lower_instr
+Value RangeCheckEliminator::Bound::lower_instr() {
+ return _lower_instr;
+}
+
+// print
+void RangeCheckEliminator::Bound::print() {
+ tty->print("");
+ if (this->_lower_instr || this->_lower != min_jint) {
+ if (this->_lower_instr) {
+ tty->print("i%d", this->_lower_instr->id());
+ if (this->_lower > 0) {
+ tty->print("+%d", _lower);
+ }
+ if (this->_lower < 0) {
+ tty->print("%d", _lower);
+ }
+ } else {
+ tty->print("%d", _lower);
+ }
+ tty->print(" <= ");
+ }
+ tty->print("x");
+ if (this->_upper_instr || this->_upper != max_jint) {
+ tty->print(" <= ");
+ if (this->_upper_instr) {
+ tty->print("i%d", this->_upper_instr->id());
+ if (this->_upper > 0) {
+ tty->print("+%d", _upper);
+ }
+ if (this->_upper < 0) {
+ tty->print("%d", _upper);
+ }
+ } else {
+ tty->print("%d", _upper);
+ }
+ }
+}
+
+// Copy
+RangeCheckEliminator::Bound *RangeCheckEliminator::Bound::copy() {
+ Bound *b = new Bound();
+ b->_lower = _lower;
+ b->_lower_instr = _lower_instr;
+ b->_upper = _upper;
+ b->_upper_instr = _upper_instr;
+ return b;
+}
+
+#ifdef ASSERT
+// Add assertion
+void RangeCheckEliminator::Bound::add_assertion(Instruction *instruction, Instruction *position, int i, Value instr, Instruction::Condition cond) {
+ Instruction *result = position;
+ Instruction *compare_with = NULL;
+ ValueStack *state = position->state_before();
+ if (position->as_BlockEnd() && !position->as_Goto()) {
+ state = position->as_BlockEnd()->state_before();
+ }
+ Instruction *instruction_before = position->prev();
+ if (position->as_Return() && Compilation::current()->method()->is_synchronized() && instruction_before->as_MonitorExit()) {
+ instruction_before = instruction_before->prev();
+ }
+ result = instruction_before;
+ // Load constant only if needed
+ Constant *constant = NULL;
+ if (i != 0 || !instr) {
+ constant = new Constant(new IntConstant(i));
+ NOT_PRODUCT(constant->set_printable_bci(position->printable_bci()));
+ result = result->insert_after(constant);
+ compare_with = constant;
+ }
+
+ if (instr) {
+ assert(instr->type()->as_ObjectType() || instr->type()->as_IntType(), "Type must be array or integer!");
+ compare_with = instr;
+ // Load array length if necessary
+ Instruction *op = instr;
+ if (instr->type()->as_ObjectType()) {
+ assert(state, "must not be null");
+ ArrayLength *length = new ArrayLength(instr, state->copy());
+ NOT_PRODUCT(length->set_printable_bci(position->printable_bci()));
+ length->set_exception_state(length->state_before());
+ result = result->insert_after(length);
+ op = length;
+ compare_with = length;
+ }
+ // Add operation only if necessary
+ if (constant) {
+ ArithmeticOp *ao = new ArithmeticOp(Bytecodes::_iadd, constant, op, false, NULL);
+ NOT_PRODUCT(ao->set_printable_bci(position->printable_bci()));
+ result = result->insert_after(ao);
+ compare_with = ao;
+ // TODO: Check that add operation does not overflow!
+ }
+ }
+ assert(compare_with != NULL, "You have to compare with something!");
+ assert(instruction != NULL, "Instruction must not be null!");
+
+ if (instruction->type()->as_ObjectType()) {
+ // Load array length if necessary
+ Instruction *op = instruction;
+ assert(state, "must not be null");
+ ArrayLength *length = new ArrayLength(instruction, state->copy());
+ length->set_exception_state(length->state_before());
+ NOT_PRODUCT(length->set_printable_bci(position->printable_bci()));
+ result = result->insert_after(length);
+ instruction = length;
+ }
+
+ Assert *assert = new Assert(instruction, cond, false, compare_with);
+ NOT_PRODUCT(assert->set_printable_bci(position->printable_bci()));
+ result->insert_after(assert);
+}
+
+// Add assertions
+void RangeCheckEliminator::add_assertions(Bound *bound, Instruction *instruction, Instruction *position) {
+ // Add lower bound assertion
+ if (bound->has_lower()) {
+ bound->add_assertion(instruction, position, bound->lower(), bound->lower_instr(), Instruction::geq);
+ }
+ // Add upper bound assertion
+ if (bound->has_upper()) {
+ bound->add_assertion(instruction, position, bound->upper(), bound->upper_instr(), Instruction::leq);
+ }
+}
+#endif
+
diff --git a/src/share/vm/c1/c1_RangeCheckElimination.hpp b/src/share/vm/c1/c1_RangeCheckElimination.hpp
new file mode 100644
index 000000000..af6d9d948
--- /dev/null
+++ b/src/share/vm/c1/c1_RangeCheckElimination.hpp
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_C1_C1_RANGECHECKELIMINATION_HPP
+#define SHARE_VM_C1_C1_RANGECHECKELIMINATION_HPP
+
+#include "c1/c1_Instruction.hpp"
+
+// Base class for range check elimination
+class RangeCheckElimination : AllStatic {
+public:
+ static void eliminate(IR *ir);
+};
+
+// Implementation
+class RangeCheckEliminator VALUE_OBJ_CLASS_SPEC {
+private:
+ int _number_of_instructions;
+ bool _optimistic; // Insert predicates and deoptimize when they fail
+ IR *_ir;
+
+ define_array(BlockBeginArray, BlockBegin*)
+ define_stack(BlockBeginList, BlockBeginArray)
+ define_stack(IntegerStack, intArray)
+ define_array(IntegerMap, IntegerStack*)
+
+ class Verification : public _ValueObj /*VALUE_OBJ_CLASS_SPEC*/, public BlockClosure {
+ private:
+ IR *_ir;
+ boolArray _used;
+ BlockBeginList _current;
+ BlockBeginList _successors;
+
+ public:
+ Verification(IR *ir);
+ virtual void block_do(BlockBegin *block);
+ bool can_reach(BlockBegin *start, BlockBegin *end, BlockBegin *dont_use = NULL);
+ bool dominates(BlockBegin *dominator, BlockBegin *block);
+ };
+
+public:
+ // Bounds for an instruction in the form x + c which c integer
+ // constant and x another instruction
+ class Bound : public CompilationResourceObj {
+ private:
+ int _upper;
+ Value _upper_instr;
+ int _lower;
+ Value _lower_instr;
+
+ public:
+ Bound();
+ Bound(Value v);
+ Bound(Instruction::Condition cond, Value v, int constant = 0);
+ Bound(int lower, Value lower_instr, int upper, Value upper_instr);
+ ~Bound();
+
+#ifdef ASSERT
+ void add_assertion(Instruction *instruction, Instruction *position, int i, Value instr, Instruction::Condition cond);
+#endif
+ int upper();
+ Value upper_instr();
+ int lower();
+ Value lower_instr();
+ void print();
+ bool check_no_overflow(int const_value);
+ void or_op(Bound *b);
+ void and_op(Bound *b);
+ bool has_upper();
+ bool has_lower();
+ void set_upper(int upper, Value upper_instr);
+ void set_lower(int lower, Value lower_instr);
+ bool is_smaller(Bound *b);
+ void remove_upper();
+ void remove_lower();
+ void add_constant(int value);
+ Bound *copy();
+
+ private:
+ void init();
+ };
+
+
+ class Visitor : public InstructionVisitor {
+ private:
+ Bound *_bound;
+ RangeCheckEliminator *_rce;
+
+ public:
+ void set_range_check_eliminator(RangeCheckEliminator *rce) { _rce = rce; }
+ Bound *bound() const { return _bound; }
+ void clear_bound() { _bound = NULL; }
+
+ protected:
+ // visitor functions
+ void do_Constant (Constant* x);
+ void do_IfOp (IfOp* x);
+ void do_LogicOp (LogicOp* x);
+ void do_ArithmeticOp (ArithmeticOp* x);
+ void do_Phi (Phi* x);
+
+ void do_StoreField (StoreField* x) { /* nothing to do */ };
+ void do_StoreIndexed (StoreIndexed* x) { /* nothing to do */ };
+ void do_MonitorEnter (MonitorEnter* x) { /* nothing to do */ };
+ void do_MonitorExit (MonitorExit* x) { /* nothing to do */ };
+ void do_Invoke (Invoke* x) { /* nothing to do */ };
+ void do_UnsafePutRaw (UnsafePutRaw* x) { /* nothing to do */ };
+ void do_UnsafePutObject(UnsafePutObject* x) { /* nothing to do */ };
+ void do_Intrinsic (Intrinsic* x) { /* nothing to do */ };
+ void do_Local (Local* x) { /* nothing to do */ };
+ void do_LoadField (LoadField* x) { /* nothing to do */ };
+ void do_ArrayLength (ArrayLength* x) { /* nothing to do */ };
+ void do_LoadIndexed (LoadIndexed* x) { /* nothing to do */ };
+ void do_NegateOp (NegateOp* x) { /* nothing to do */ };
+ void do_ShiftOp (ShiftOp* x) { /* nothing to do */ };
+ void do_CompareOp (CompareOp* x) { /* nothing to do */ };
+ void do_Convert (Convert* x) { /* nothing to do */ };
+ void do_NullCheck (NullCheck* x) { /* nothing to do */ };
+ void do_TypeCast (TypeCast* x) { /* nothing to do */ };
+ void do_NewInstance (NewInstance* x) { /* nothing to do */ };
+ void do_NewTypeArray (NewTypeArray* x) { /* nothing to do */ };
+ void do_NewObjectArray (NewObjectArray* x) { /* nothing to do */ };
+ void do_NewMultiArray (NewMultiArray* x) { /* nothing to do */ };
+ void do_CheckCast (CheckCast* x) { /* nothing to do */ };
+ void do_InstanceOf (InstanceOf* x) { /* nothing to do */ };
+ void do_BlockBegin (BlockBegin* x) { /* nothing to do */ };
+ void do_Goto (Goto* x) { /* nothing to do */ };
+ void do_If (If* x) { /* nothing to do */ };
+ void do_IfInstanceOf (IfInstanceOf* x) { /* nothing to do */ };
+ void do_TableSwitch (TableSwitch* x) { /* nothing to do */ };
+ void do_LookupSwitch (LookupSwitch* x) { /* nothing to do */ };
+ void do_Return (Return* x) { /* nothing to do */ };
+ void do_Throw (Throw* x) { /* nothing to do */ };
+ void do_Base (Base* x) { /* nothing to do */ };
+ void do_OsrEntry (OsrEntry* x) { /* nothing to do */ };
+ void do_ExceptionObject(ExceptionObject* x) { /* nothing to do */ };
+ void do_RoundFP (RoundFP* x) { /* nothing to do */ };
+ void do_UnsafeGetRaw (UnsafeGetRaw* x) { /* nothing to do */ };
+ void do_UnsafeGetObject(UnsafeGetObject* x) { /* nothing to do */ };
+ void do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) { /* nothing to do */ };
+ void do_UnsafePrefetchRead (UnsafePrefetchRead* x) { /* nothing to do */ };
+ void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ };
+ void do_ProfileCall (ProfileCall* x) { /* nothing to do */ };
+ void do_ProfileInvoke (ProfileInvoke* x) { /* nothing to do */ };
+ void do_RuntimeCall (RuntimeCall* x) { /* nothing to do */ };
+ void do_MemBar (MemBar* x) { /* nothing to do */ };
+ void do_RangeCheckPredicate(RangeCheckPredicate* x) { /* nothing to do */ };
+ void do_Assert (Assert* x) { /* nothing to do */ };
+ };
+
+#ifdef ASSERT
+ void add_assertions(Bound *bound, Instruction *instruction, Instruction *position);
+#endif
+
+ define_array(BoundArray, Bound *)
+ define_stack(BoundStack, BoundArray)
+ define_array(BoundMap, BoundStack *)
+ define_array(AccessIndexedArray, AccessIndexed *)
+ define_stack(AccessIndexedList, AccessIndexedArray)
+ define_array(InstructionArray, Instruction *)
+ define_stack(InstructionList, InstructionArray)
+
+ class AccessIndexedInfo : public CompilationResourceObj {
+ public:
+ AccessIndexedList *_list;
+ int _min;
+ int _max;
+ };
+
+ define_array(AccessIndexedInfoArray, AccessIndexedInfo *)
+ BoundMap _bounds; // Mapping from Instruction's id to current bound
+ AccessIndexedInfoArray _access_indexed_info; // Mapping from Instruction's id to AccessIndexedInfo for in block motion
+ Visitor _visitor;
+
+public:
+ RangeCheckEliminator(IR *ir);
+
+ IR *ir() const { return _ir; }
+
+ // Pass over the dominator tree to identify blocks where there's an oppportunity for optimization
+ bool set_process_block_flags(BlockBegin *block);
+ // The core of the optimization work: pass over the dominator tree
+ // to propagate bound information, insert predicate out of loops,
+ // eliminate bound checks when possible and perform in block motion
+ void calc_bounds(BlockBegin *block, BlockBegin *loop_header);
+ // reorder bound checks within a block in order to eliminate some of them
+ void in_block_motion(BlockBegin *block, AccessIndexedList &accessIndexed, InstructionList &arrays);
+
+ // update/access current bound
+ void update_bound(IntegerStack &pushed, Value v, Instruction::Condition cond, Value value, int constant);
+ void update_bound(IntegerStack &pushed, Value v, Bound *bound);
+ Bound *get_bound(Value v);
+
+ bool loop_invariant(BlockBegin *loop_header, Instruction *instruction); // check for loop invariance
+ void add_access_indexed_info(InstructionList &indices, int i, Value instruction, AccessIndexed *ai); // record indexed access for in block motion
+ void remove_range_check(AccessIndexed *ai); // Mark this instructions as not needing a range check
+ void add_if_condition(IntegerStack &pushed, Value x, Value y, Instruction::Condition condition); // Update bound for an If
+ bool in_array_bound(Bound *bound, Value array); // Check whether bound is known to fall within array
+
+ // helper functions to work with predicates
+ Instruction* insert_after(Instruction* insert_position, Instruction* instr, int bci);
+ Instruction* predicate(Instruction* left, Instruction::Condition cond, Instruction* right, ValueStack* state, Instruction *insert_position, int bci=-1);
+ Instruction* predicate_cmp_with_const(Instruction* instr, Instruction::Condition cond, int constant, ValueStack* state, Instruction *insert_position, int bci=1);
+ Instruction* predicate_add(Instruction* left, int left_const, Instruction::Condition cond, Instruction* right, ValueStack* state, Instruction *insert_position, int bci=-1);
+ Instruction* predicate_add_cmp_with_const(Instruction* left, int left_const, Instruction::Condition cond, int constant, ValueStack* state, Instruction *insert_position, int bci=-1);
+
+ void insert_deoptimization(ValueStack *state, Instruction *insert_position, Instruction *array_instr, // Add predicate
+ Instruction *length_instruction, Instruction *lower_instr, int lower,
+ Instruction *upper_instr, int upper, AccessIndexed *ai);
+ bool is_ok_for_deoptimization(Instruction *insert_position, Instruction *array_instr, // Can we safely add a predicate?
+ Instruction *length_instr, Instruction *lower_instr,
+ int lower, Instruction *upper_instr, int upper);
+ void process_if(IntegerStack &pushed, BlockBegin *block, If *cond); // process If Instruction
+ void process_access_indexed(BlockBegin *loop_header, BlockBegin *block, AccessIndexed *ai); // process indexed access
+
+ void dump_condition_stack(BlockBegin *cur_block);
+ static void print_statistics();
+};
+
+#endif // SHARE_VM_C1_C1_RANGECHECKELIMINATION_HPP
diff --git a/src/share/vm/c1/c1_Runtime1.cpp b/src/share/vm/c1/c1_Runtime1.cpp
index 3002c1304..e274076d0 100644
--- a/src/share/vm/c1/c1_Runtime1.cpp
+++ b/src/share/vm/c1/c1_Runtime1.cpp
@@ -1330,6 +1330,50 @@ JRT_LEAF(int, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj))
return (k != NULL && obj != NULL && obj->is_a(k)) ? 1 : 0;
JRT_END
+JRT_ENTRY(void, Runtime1::predicate_failed_trap(JavaThread* thread))
+ ResourceMark rm;
+
+ assert(!TieredCompilation, "incompatible with tiered compilation");
+
+ RegisterMap reg_map(thread, false);
+ frame runtime_frame = thread->last_frame();
+ frame caller_frame = runtime_frame.sender(&reg_map);
+
+ nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
+ assert (nm != NULL, "no more nmethod?");
+ nm->make_not_entrant();
+
+ methodHandle m(nm->method());
+ MethodData* mdo = m->method_data();
+
+ if (mdo == NULL && !HAS_PENDING_EXCEPTION) {
+ // Build an MDO. Ignore errors like OutOfMemory;
+ // that simply means we won't have an MDO to update.
+ Method::build_interpreter_method_data(m, THREAD);
+ if (HAS_PENDING_EXCEPTION) {
+ assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");
+ CLEAR_PENDING_EXCEPTION;
+ }
+ mdo = m->method_data();
+ }
+
+ if (mdo != NULL) {
+ mdo->inc_trap_count(Deoptimization::Reason_none);
+ }
+
+ if (TracePredicateFailedTraps) {
+ stringStream ss1, ss2;
+ vframeStream vfst(thread);
+ methodHandle inlinee = methodHandle(vfst.method());
+ inlinee->print_short_name(&ss1);
+ m->print_short_name(&ss2);
+ tty->print_cr("Predicate failed trap in method %s at bci %d inlined in %s at pc %x", ss1.as_string(), vfst.bci(), ss2.as_string(), caller_frame.pc());
+ }
+
+
+ Deoptimization::deoptimize_frame(thread, caller_frame.id());
+
+JRT_END
#ifndef PRODUCT
void Runtime1::print_statistics() {
diff --git a/src/share/vm/c1/c1_Runtime1.hpp b/src/share/vm/c1/c1_Runtime1.hpp
index 06ef147a9..9b12d2622 100644
--- a/src/share/vm/c1/c1_Runtime1.hpp
+++ b/src/share/vm/c1/c1_Runtime1.hpp
@@ -71,6 +71,7 @@ class StubAssembler;
stub(g1_post_barrier_slow) \
stub(fpu2long_stub) \
stub(counter_overflow) \
+ stub(predicate_failed_trap) \
last_entry(number_of_ids)
#define DECLARE_STUB_ID(x) x ## _id ,
@@ -190,6 +191,8 @@ class Runtime1: public AllStatic {
static void oop_arraycopy(HeapWord* src, HeapWord* dst, int length);
static int is_instance_of(oopDesc* mirror, oopDesc* obj);
+ static void predicate_failed_trap(JavaThread* thread);
+
static void print_statistics() PRODUCT_RETURN;
};
diff --git a/src/share/vm/c1/c1_ValueMap.cpp b/src/share/vm/c1/c1_ValueMap.cpp
index a75985410..fb9b931e3 100644
--- a/src/share/vm/c1/c1_ValueMap.cpp
+++ b/src/share/vm/c1/c1_ValueMap.cpp
@@ -26,9 +26,9 @@
#include "c1/c1_Canonicalizer.hpp"
#include "c1/c1_IR.hpp"
#include "c1/c1_ValueMap.hpp"
+#include "c1/c1_ValueStack.hpp"
#include "utilities/bitMap.inline.hpp"
-
#ifndef PRODUCT
int ValueMap::_number_of_finds = 0;
@@ -192,10 +192,6 @@ Value ValueMap::find_insert(Value x) {
&& lf->field()->holder() == field->holder() \
&& (all_offsets || lf->field()->offset() == field->offset());
-#define MUST_KILL_EXCEPTION(must_kill, entry, value) \
- assert(entry->nesting() < nesting(), "must not find bigger nesting than current"); \
- bool must_kill = (entry->nesting() == nesting() - 1);
-
void ValueMap::kill_memory() {
GENERIC_KILL_VALUE(MUST_KILL_MEMORY);
@@ -209,11 +205,6 @@ void ValueMap::kill_field(ciField* field, bool all_offsets) {
GENERIC_KILL_VALUE(MUST_KILL_FIELD);
}
-void ValueMap::kill_exception() {
- GENERIC_KILL_VALUE(MUST_KILL_EXCEPTION);
-}
-
-
void ValueMap::kill_map(ValueMap* map) {
assert(is_global_value_numbering(), "only for global value numbering");
_killed_values.set_union(&map->_killed_values);
@@ -274,6 +265,8 @@ class ShortLoopOptimizer : public ValueNumberingVisitor {
GlobalValueNumbering* _gvn;
BlockList _loop_blocks;
bool _too_complicated_loop;
+ bool _has_field_store[T_ARRAY + 1];
+ bool _has_indexed_store[T_ARRAY + 1];
// simplified access to methods of GlobalValueNumbering
ValueMap* current_map() { return _gvn->current_map(); }
@@ -281,8 +274,16 @@ class ShortLoopOptimizer : public ValueNumberingVisitor {
// implementation for abstract methods of ValueNumberingVisitor
void kill_memory() { _too_complicated_loop = true; }
- void kill_field(ciField* field, bool all_offsets) { current_map()->kill_field(field, all_offsets); };
- void kill_array(ValueType* type) { current_map()->kill_array(type); };
+ void kill_field(ciField* field, bool all_offsets) {
+ current_map()->kill_field(field, all_offsets);
+ assert(field->type()->basic_type() >= 0 && field->type()->basic_type() <= T_ARRAY, "Invalid type");
+ _has_field_store[field->type()->basic_type()] = true;
+ }
+ void kill_array(ValueType* type) {
+ current_map()->kill_array(type);
+ BasicType basic_type = as_BasicType(type); assert(basic_type >= 0 && basic_type <= T_ARRAY, "Invalid type");
+ _has_indexed_store[basic_type] = true;
+ }
public:
ShortLoopOptimizer(GlobalValueNumbering* gvn)
@@ -290,11 +291,141 @@ class ShortLoopOptimizer : public ValueNumberingVisitor {
, _loop_blocks(ValueMapMaxLoopSize)
, _too_complicated_loop(false)
{
+ for (int i=0; i<= T_ARRAY; i++){
+ _has_field_store[i] = false;
+ _has_indexed_store[i] = false;
+ }
+ }
+
+ bool has_field_store(BasicType type) {
+ assert(type >= 0 && type <= T_ARRAY, "Invalid type");
+ return _has_field_store[type];
+ }
+
+ bool has_indexed_store(BasicType type) {
+ assert(type >= 0 && type <= T_ARRAY, "Invalid type");
+ return _has_indexed_store[type];
}
bool process(BlockBegin* loop_header);
};
+class LoopInvariantCodeMotion : public StackObj {
+ private:
+ GlobalValueNumbering* _gvn;
+ ShortLoopOptimizer* _short_loop_optimizer;
+ Instruction* _insertion_point;
+ ValueStack * _state;
+
+ void set_invariant(Value v) const { _gvn->set_processed(v); }
+ bool is_invariant(Value v) const { return _gvn->is_processed(v); }
+
+ void process_block(BlockBegin* block);
+
+ public:
+ LoopInvariantCodeMotion(ShortLoopOptimizer *slo, GlobalValueNumbering* gvn, BlockBegin* loop_header, BlockList* loop_blocks);
+};
+
+LoopInvariantCodeMotion::LoopInvariantCodeMotion(ShortLoopOptimizer *slo, GlobalValueNumbering* gvn, BlockBegin* loop_header, BlockList* loop_blocks)
+ : _gvn(gvn), _short_loop_optimizer(slo) {
+
+ TRACE_VALUE_NUMBERING(tty->print_cr("using loop invariant code motion loop_header = %d", loop_header->block_id()));
+ TRACE_VALUE_NUMBERING(tty->print_cr("** loop invariant code motion for short loop B%d", loop_header->block_id()));
+
+ BlockBegin* insertion_block = loop_header->dominator();
+ if (insertion_block->number_of_preds() == 0) {
+ return; // only the entry block does not have a predecessor
+ }
+
+ assert(insertion_block->end()->as_Base() == NULL, "cannot insert into entry block");
+ _insertion_point = insertion_block->end()->prev();
+
+ BlockEnd *block_end = insertion_block->end();
+ _state = block_end->state_before();
+
+ if (!_state) {
+ // If, TableSwitch and LookupSwitch always have state_before when
+ // loop invariant code motion happens..
+ assert(block_end->as_Goto(), "Block has to be goto");
+ _state = block_end->state();
+ }
+
+ // the loop_blocks are filled by going backward from the loop header, so this processing order is best
+ assert(loop_blocks->at(0) == loop_header, "loop header must be first loop block");
+ process_block(loop_header);
+ for (int i = loop_blocks->length() - 1; i >= 1; i--) {
+ process_block(loop_blocks->at(i));
+ }
+}
+
+void LoopInvariantCodeMotion::process_block(BlockBegin* block) {
+ TRACE_VALUE_NUMBERING(tty->print_cr("processing block B%d", block->block_id()));
+
+ Instruction* prev = block;
+ Instruction* cur = block->next();
+
+ while (cur != NULL) {
+
+ // determine if cur instruction is loop invariant
+ // only selected instruction types are processed here
+ bool cur_invariant = false;
+
+ if (cur->as_Constant() != NULL) {
+ cur_invariant = !cur->can_trap();
+ } else if (cur->as_ArithmeticOp() != NULL || cur->as_LogicOp() != NULL || cur->as_ShiftOp() != NULL) {
+ assert(cur->as_Op2() != NULL, "must be Op2");
+ Op2* op2 = (Op2*)cur;
+ cur_invariant = !op2->can_trap() && is_invariant(op2->x()) && is_invariant(op2->y());
+ } else if (cur->as_LoadField() != NULL) {
+ LoadField* lf = (LoadField*)cur;
+ // deoptimizes on NullPointerException
+ cur_invariant = !lf->needs_patching() && !lf->field()->is_volatile() && !_short_loop_optimizer->has_field_store(lf->field()->type()->basic_type()) && is_invariant(lf->obj());
+ } else if (cur->as_ArrayLength() != NULL) {
+ ArrayLength *length = cur->as_ArrayLength();
+ cur_invariant = is_invariant(length->array());
+ } else if (cur->as_LoadIndexed() != NULL) {
+ LoadIndexed *li = (LoadIndexed *)cur->as_LoadIndexed();
+ cur_invariant = !_short_loop_optimizer->has_indexed_store(as_BasicType(cur->type())) && is_invariant(li->array()) && is_invariant(li->index());
+ }
+
+ if (cur_invariant) {
+ // perform value numbering and mark instruction as loop-invariant
+ _gvn->substitute(cur);
+
+ if (cur->as_Constant() == NULL) {
+ // ensure that code for non-constant instructions is always generated
+ cur->pin();
+ }
+
+ // remove cur instruction from loop block and append it to block before loop
+ Instruction* next = cur->next();
+ Instruction* in = _insertion_point->next();
+ _insertion_point = _insertion_point->set_next(cur);
+ cur->set_next(in);
+
+ // Deoptimize on exception
+ cur->set_flag(Instruction::DeoptimizeOnException, true);
+
+ // Clear exception handlers
+ cur->set_exception_handlers(NULL);
+
+ TRACE_VALUE_NUMBERING(tty->print_cr("Instruction %c%d is loop invariant", cur->type()->tchar(), cur->id()));
+
+ if (cur->state_before() != NULL) {
+ cur->set_state_before(_state->copy());
+ }
+ if (cur->exception_state() != NULL) {
+ cur->set_exception_state(_state->copy());
+ }
+
+ cur = prev->set_next(next);
+
+ } else {
+ prev = cur;
+ cur = cur->next();
+ }
+ }
+}
bool ShortLoopOptimizer::process(BlockBegin* loop_header) {
TRACE_VALUE_NUMBERING(tty->print_cr("** loop header block"));
@@ -316,6 +447,10 @@ bool ShortLoopOptimizer::process(BlockBegin* loop_header) {
for (int j = block->number_of_preds() - 1; j >= 0; j--) {
BlockBegin* pred = block->pred_at(j);
+ if (pred->is_set(BlockBegin::osr_entry_flag)) {
+ return false;
+ }
+
ValueMap* pred_map = value_map_of(pred);
if (pred_map != NULL) {
current_map()->kill_map(pred_map);
@@ -336,6 +471,12 @@ bool ShortLoopOptimizer::process(BlockBegin* loop_header) {
}
}
+ bool optimistic = this->_gvn->compilation()->is_optimistic();
+
+ if (UseLoopInvariantCodeMotion && optimistic) {
+ LoopInvariantCodeMotion code_motion(this, _gvn, loop_header, &_loop_blocks);
+ }
+
TRACE_VALUE_NUMBERING(tty->print_cr("** loop successfully optimized"));
return true;
}
@@ -344,11 +485,11 @@ bool ShortLoopOptimizer::process(BlockBegin* loop_header) {
GlobalValueNumbering::GlobalValueNumbering(IR* ir)
: _current_map(NULL)
, _value_maps(ir->linear_scan_order()->length(), NULL)
+ , _compilation(ir->compilation())
{
TRACE_VALUE_NUMBERING(tty->print_cr("****** start of global value numbering"));
ShortLoopOptimizer short_loop_optimizer(this);
- int subst_count = 0;
BlockList* blocks = ir->linear_scan_order();
int num_blocks = blocks->length();
@@ -357,6 +498,12 @@ GlobalValueNumbering::GlobalValueNumbering(IR* ir)
assert(start_block == ir->start() && start_block->number_of_preds() == 0 && start_block->dominator() == NULL, "must be start block");
assert(start_block->next()->as_Base() != NULL && start_block->next()->next() == NULL, "start block must not have instructions");
+ // method parameters are not linked in instructions list, so process them separateley
+ for_each_state_value(start_block->state(), value,
+ assert(value->as_Local() != NULL, "only method parameters allowed");
+ set_processed(value);
+ );
+
// initial, empty value map with nesting 0
set_value_map_of(start_block, new ValueMap());
@@ -374,7 +521,7 @@ GlobalValueNumbering::GlobalValueNumbering(IR* ir)
// create new value map with increased nesting
_current_map = new ValueMap(value_map_of(dominator));
- if (num_preds == 1) {
+ if (num_preds == 1 && !block->is_set(BlockBegin::exception_entry_flag)) {
assert(dominator == block->pred_at(0), "dominator must be equal to predecessor");
// nothing to do here
@@ -403,36 +550,41 @@ GlobalValueNumbering::GlobalValueNumbering(IR* ir)
}
}
- if (block->is_set(BlockBegin::exception_entry_flag)) {
- current_map()->kill_exception();
- }
+ // phi functions are not linked in instructions list, so process them separateley
+ for_each_phi_fun(block, phi,
+ set_processed(phi);
+ );
TRACE_VALUE_NUMBERING(tty->print("value map before processing block: "); current_map()->print());
// visit all instructions of this block
for (Value instr = block->next(); instr != NULL; instr = instr->next()) {
- assert(!instr->has_subst(), "substitution already set");
-
// check if instruction kills any values
instr->visit(this);
-
- if (instr->hash() != 0) {
- Value f = current_map()->find_insert(instr);
- if (f != instr) {
- assert(!f->has_subst(), "can't have a substitution");
- instr->set_subst(f);
- subst_count++;
- }
- }
+ // perform actual value numbering
+ substitute(instr);
}
// remember value map for successors
set_value_map_of(block, current_map());
}
- if (subst_count != 0) {
+ if (_has_substitutions) {
SubstitutionResolver resolver(ir);
}
TRACE_VALUE_NUMBERING(tty->print("****** end of global value numbering. "); ValueMap::print_statistics());
}
+
+void GlobalValueNumbering::substitute(Instruction* instr) {
+ assert(!instr->has_subst(), "substitution already set");
+ Value subst = current_map()->find_insert(instr);
+ if (subst != instr) {
+ assert(!subst->has_subst(), "can't have a substitution");
+
+ TRACE_VALUE_NUMBERING(tty->print_cr("substitution for %d set to %d", instr->id(), subst->id()));
+ instr->set_subst(subst);
+ _has_substitutions = true;
+ }
+ set_processed(instr);
+}
diff --git a/src/share/vm/c1/c1_ValueMap.hpp b/src/share/vm/c1/c1_ValueMap.hpp
index 07dd9ddfb..c76ef46be 100644
--- a/src/share/vm/c1/c1_ValueMap.hpp
+++ b/src/share/vm/c1/c1_ValueMap.hpp
@@ -206,6 +206,8 @@ class ValueNumberingVisitor: public InstructionVisitor {
void do_ProfileInvoke (ProfileInvoke* x) { /* nothing to do */ };
void do_RuntimeCall (RuntimeCall* x) { /* nothing to do */ };
void do_MemBar (MemBar* x) { /* nothing to do */ };
+ void do_RangeCheckPredicate(RangeCheckPredicate* x) { /* nothing to do */ };
+ void do_Assert (Assert* x) { /* nothing to do */ };
};
@@ -225,15 +227,22 @@ class ValueNumberingEffects: public ValueNumberingVisitor {
class GlobalValueNumbering: public ValueNumberingVisitor {
private:
+ Compilation* _compilation; // compilation data
ValueMap* _current_map; // value map of current block
ValueMapArray _value_maps; // list of value maps for all blocks
+ ValueSet _processed_values; // marker for instructions that were already processed
+ bool _has_substitutions; // set to true when substitutions must be resolved
public:
// accessors
+ Compilation* compilation() const { return _compilation; }
ValueMap* current_map() { return _current_map; }
ValueMap* value_map_of(BlockBegin* block) { return _value_maps.at(block->linear_scan_number()); }
void set_value_map_of(BlockBegin* block, ValueMap* map) { assert(value_map_of(block) == NULL, ""); _value_maps.at_put(block->linear_scan_number(), map); }
+ bool is_processed(Value v) { return _processed_values.contains(v); }
+ void set_processed(Value v) { _processed_values.put(v); }
+
// implementation for abstract methods of ValueNumberingVisitor
void kill_memory() { current_map()->kill_memory(); }
void kill_field(ciField* field, bool all_offsets) { current_map()->kill_field(field, all_offsets); }
@@ -241,6 +250,7 @@ class GlobalValueNumbering: public ValueNumberingVisitor {
// main entry point that performs global value numbering
GlobalValueNumbering(IR* ir);
+ void substitute(Instruction* instr); // substitute instruction if it is contained in current value map
};
#endif // SHARE_VM_C1_C1_VALUEMAP_HPP
diff --git a/src/share/vm/c1/c1_globals.hpp b/src/share/vm/c1/c1_globals.hpp
index 16451f6d5..844880be2 100644
--- a/src/share/vm/c1/c1_globals.hpp
+++ b/src/share/vm/c1/c1_globals.hpp
@@ -119,6 +119,24 @@
develop(bool, UseGlobalValueNumbering, true, \
"Use Global Value Numbering (separate phase)") \
\
+ product(bool, UseLoopInvariantCodeMotion, true, \
+ "Simple loop invariant code motion for short loops during GVN") \
+ \
+ develop(bool, TracePredicateFailedTraps, false, \
+ "trace runtime traps caused by predicate failure") \
+ \
+ develop(bool, StressLoopInvariantCodeMotion, false, \
+ "stress loop invariant code motion") \
+ \
+ develop(bool, TraceRangeCheckElimination, false, \
+ "Trace Range Check Elimination") \
+ \
+ develop(bool, AssertRangeCheckElimination, false, \
+ "Assert Range Check Elimination") \
+ \
+ develop(bool, StressRangeCheckElimination, false, \
+ "stress Range Check Elimination") \
+ \
develop(bool, PrintValueNumbering, false, \
"Print Value Numbering") \
\
diff --git a/src/share/vm/ci/ciMethod.cpp b/src/share/vm/ci/ciMethod.cpp
index 0fa11470c..646de740f 100644
--- a/src/share/vm/ci/ciMethod.cpp
+++ b/src/share/vm/ci/ciMethod.cpp
@@ -790,6 +790,17 @@ int ciMethod::scale_count(int count, float prof_factor) {
return count;
}
+
+// ------------------------------------------------------------------
+// ciMethod::is_special_get_caller_class_method
+//
+bool ciMethod::is_ignored_by_security_stack_walk() const {
+ check_is_loaded();
+ VM_ENTRY_MARK;
+ return get_Method()->is_ignored_by_security_stack_walk();
+}
+
+
// ------------------------------------------------------------------
// invokedynamic support
diff --git a/src/share/vm/ci/ciMethod.hpp b/src/share/vm/ci/ciMethod.hpp
index c98f2c0dc..cbef39b3c 100644
--- a/src/share/vm/ci/ciMethod.hpp
+++ b/src/share/vm/ci/ciMethod.hpp
@@ -166,8 +166,9 @@ class ciMethod : public ciMetadata {
// Code size for inlining decisions.
int code_size_for_inlining();
- bool force_inline() { return get_Method()->force_inline(); }
- bool dont_inline() { return get_Method()->dont_inline(); }
+ bool caller_sensitive() { return get_Method()->caller_sensitive(); }
+ bool force_inline() { return get_Method()->force_inline(); }
+ bool dont_inline() { return get_Method()->dont_inline(); }
int comp_level();
int highest_osr_comp_level();
@@ -264,6 +265,9 @@ class ciMethod : public ciMetadata {
int instructions_size();
int scale_count(int count, float prof_factor = 1.); // make MDO count commensurate with IIC
+ // Stack walking support
+ bool is_ignored_by_security_stack_walk() const;
+
// JSR 292 support
bool is_method_handle_intrinsic() const;
bool is_compiled_lambda_form() const;
diff --git a/src/share/vm/classfile/classFileParser.cpp b/src/share/vm/classfile/classFileParser.cpp
index 47692ac63..c18e112c0 100644
--- a/src/share/vm/classfile/classFileParser.cpp
+++ b/src/share/vm/classfile/classFileParser.cpp
@@ -1735,9 +1735,14 @@ ClassFileParser::AnnotationCollector::annotation_index(ClassLoaderData* loader_d
Symbol* name) {
vmSymbols::SID sid = vmSymbols::find_sid(name);
// Privileged code can use all annotations. Other code silently drops some.
- bool privileged = loader_data->is_the_null_class_loader_data() ||
- loader_data->is_anonymous();
+ const bool privileged = loader_data->is_the_null_class_loader_data() ||
+ loader_data->is_ext_class_loader_data() ||
+ loader_data->is_anonymous();
switch (sid) {
+ case vmSymbols::VM_SYMBOL_ENUM_NAME(sun_reflect_CallerSensitive_signature):
+ if (_location != _in_method) break; // only allow for methods
+ if (!privileged) break; // only allow in privileged code
+ return _method_CallerSensitive;
case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_ForceInline_signature):
if (_location != _in_method) break; // only allow for methods
if (!privileged) break; // only allow in privileged code
@@ -1775,6 +1780,8 @@ ClassFileParser::FieldAnnotationCollector::~FieldAnnotationCollector() {
}
void ClassFileParser::MethodAnnotationCollector::apply_to(methodHandle m) {
+ if (has_annotation(_method_CallerSensitive))
+ m->set_caller_sensitive(true);
if (has_annotation(_method_ForceInline))
m->set_force_inline(true);
if (has_annotation(_method_DontInline))
diff --git a/src/share/vm/classfile/classFileParser.hpp b/src/share/vm/classfile/classFileParser.hpp
index 0fd883046..3d6046935 100644
--- a/src/share/vm/classfile/classFileParser.hpp
+++ b/src/share/vm/classfile/classFileParser.hpp
@@ -119,6 +119,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
enum Location { _in_field, _in_method, _in_class };
enum ID {
_unknown = 0,
+ _method_CallerSensitive,
_method_ForceInline,
_method_DontInline,
_method_LambdaForm_Compiled,
diff --git a/src/share/vm/classfile/classLoaderData.cpp b/src/share/vm/classfile/classLoaderData.cpp
index f74f8f1ce..b680574d9 100644
--- a/src/share/vm/classfile/classLoaderData.cpp
+++ b/src/share/vm/classfile/classLoaderData.cpp
@@ -321,6 +321,13 @@ ClassLoaderData::~ClassLoaderData() {
}
}
+/**
+ * Returns true if this class loader data is for the extension class loader.
+ */
+bool ClassLoaderData::is_ext_class_loader_data() const {
+ return SystemDictionary::is_ext_class_loader(class_loader());
+}
+
Metaspace* ClassLoaderData::metaspace_non_null() {
assert(!DumpSharedSpaces, "wrong metaspace!");
// If the metaspace has not been allocated, create a new one. Might want
diff --git a/src/share/vm/classfile/classLoaderData.hpp b/src/share/vm/classfile/classLoaderData.hpp
index 8fbf454fe..6c8906b26 100644
--- a/src/share/vm/classfile/classLoaderData.hpp
+++ b/src/share/vm/classfile/classLoaderData.hpp
@@ -191,6 +191,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
bool is_the_null_class_loader_data() const {
return this == _the_null_class_loader_data;
}
+ bool is_ext_class_loader_data() const;
// The Metaspace is created lazily so may be NULL. This
// method will allocate a Metaspace if needed.
diff --git a/src/share/vm/classfile/javaClasses.hpp b/src/share/vm/classfile/javaClasses.hpp
index 478509d5c..ac0f15e2c 100644
--- a/src/share/vm/classfile/javaClasses.hpp
+++ b/src/share/vm/classfile/javaClasses.hpp
@@ -1050,15 +1050,16 @@ class java_lang_invoke_MemberName: AllStatic {
// Relevant integer codes (keep these in synch. with MethodHandleNatives.Constants):
enum {
- MN_IS_METHOD = 0x00010000, // method (not constructor)
- MN_IS_CONSTRUCTOR = 0x00020000, // constructor
- MN_IS_FIELD = 0x00040000, // field
- MN_IS_TYPE = 0x00080000, // nested type
+ MN_IS_METHOD = 0x00010000, // method (not constructor)
+ MN_IS_CONSTRUCTOR = 0x00020000, // constructor
+ MN_IS_FIELD = 0x00040000, // field
+ MN_IS_TYPE = 0x00080000, // nested type
+ MN_CALLER_SENSITIVE = 0x00100000, // @CallerSensitive annotation detected
MN_REFERENCE_KIND_SHIFT = 24, // refKind
- MN_REFERENCE_KIND_MASK = 0x0F000000 >> MN_REFERENCE_KIND_SHIFT,
+ MN_REFERENCE_KIND_MASK = 0x0F000000 >> MN_REFERENCE_KIND_SHIFT,
// The SEARCH_* bits are not for MN.flags but for the matchFlags argument of MHN.getMembers:
- MN_SEARCH_SUPERCLASSES = 0x00100000, // walk super classes
- MN_SEARCH_INTERFACES = 0x00200000 // walk implemented interfaces
+ MN_SEARCH_SUPERCLASSES = 0x00100000, // walk super classes
+ MN_SEARCH_INTERFACES = 0x00200000 // walk implemented interfaces
};
// Accessors for code generation:
diff --git a/src/share/vm/classfile/symbolTable.cpp b/src/share/vm/classfile/symbolTable.cpp
index fc19dd555..0f8da2d89 100644
--- a/src/share/vm/classfile/symbolTable.cpp
+++ b/src/share/vm/classfile/symbolTable.cpp
@@ -677,9 +677,14 @@ oop StringTable::lookup(Symbol* symbol) {
ResourceMark rm;
int length;
jchar* chars = symbol->as_unicode(length);
- unsigned int hashValue = hash_string(chars, length);
- int index = the_table()->hash_to_index(hashValue);
- return the_table()->lookup(index, chars, length, hashValue);
+ return lookup(chars, length);
+}
+
+
+oop StringTable::lookup(jchar* name, int len) {
+ unsigned int hash = hash_string(name, len);
+ int index = the_table()->hash_to_index(hash);
+ return the_table()->lookup(index, name, len, hash);
}
diff --git a/src/share/vm/classfile/symbolTable.hpp b/src/share/vm/classfile/symbolTable.hpp
index 3eee99ddb..a2896382f 100644
--- a/src/share/vm/classfile/symbolTable.hpp
+++ b/src/share/vm/classfile/symbolTable.hpp
@@ -287,6 +287,7 @@ public:
// Probing
static oop lookup(Symbol* symbol);
+ static oop lookup(jchar* chars, int length);
// Interning
static oop intern(Symbol* symbol, TRAPS);
diff --git a/src/share/vm/classfile/systemDictionary.cpp b/src/share/vm/classfile/systemDictionary.cpp
index edd310706..42571b68a 100644
--- a/src/share/vm/classfile/systemDictionary.cpp
+++ b/src/share/vm/classfile/systemDictionary.cpp
@@ -146,6 +146,17 @@ bool SystemDictionary::is_parallelDefine(Handle class_loader) {
}
return false;
}
+
+/**
+ * Returns true if the passed class loader is the extension class loader.
+ */
+bool SystemDictionary::is_ext_class_loader(Handle class_loader) {
+ if (class_loader.is_null()) {
+ return false;
+ }
+ return (class_loader->klass()->name() == vmSymbols::sun_misc_Launcher_ExtClassLoader());
+}
+
// ----------------------------------------------------------------------------
// Resolving of classes
@@ -816,13 +827,28 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, Handle cla
// We didn't go as far as Klass::restore_unshareable_info(),
// so nothing to clean up.
} else {
- MutexLocker mu(SystemDictionary_lock, THREAD);
- Klass* kk = find_class(name, ik->class_loader_data());
+ Klass *kk;
+ {
+ MutexLocker mu(SystemDictionary_lock, THREAD);
+ kk = find_class(name, ik->class_loader_data());
+ }
if (kk != NULL) {
// No clean up is needed if the shared class has been entered
// into system dictionary, as load_shared_class() won't be called
// again.
} else {
+ // This must be done outside of the SystemDictionary_lock to
+ // avoid deadlock.
+ //
+ // Note that Klass::restore_unshareable_info (called via
+ // load_instance_class above) is also called outside
+ // of SystemDictionary_lock. Other threads are blocked from
+ // loading this class because they are waiting on the
+ // SystemDictionary_lock until this thread removes
+ // the placeholder below.
+ //
+ // This need to be re-thought when parallel-capable non-boot
+ // classloaders are supported by CDS (today they're not).
clean_up_shared_class(ik, class_loader, THREAD);
}
}
@@ -2185,10 +2211,9 @@ Symbol* SystemDictionary::find_resolution_error(constantPoolHandle pool, int whi
// Make sure all class components (including arrays) in the given
// signature will be resolved to the same class in both loaders.
// Returns the name of the type that failed a loader constraint check, or
-// NULL if no constraint failed. The returned C string needs cleaning up
-// with a ResourceMark in the caller. No exception except OOME is thrown.
+// NULL if no constraint failed. No exception except OOME is thrown.
// Arrays are not added to the loader constraint table, their elements are.
-char* SystemDictionary::check_signature_loaders(Symbol* signature,
+Symbol* SystemDictionary::check_signature_loaders(Symbol* signature,
Handle loader1, Handle loader2,
bool is_method, TRAPS) {
// Nothing to do if loaders are the same.
@@ -2196,14 +2221,12 @@ char* SystemDictionary::check_signature_loaders(Symbol* signature,
return NULL;
}
- ResourceMark rm(THREAD);
SignatureStream sig_strm(signature, is_method);
while (!sig_strm.is_done()) {
if (sig_strm.is_object()) {
- Symbol* s = sig_strm.as_symbol(CHECK_NULL);
- Symbol* sig = s;
+ Symbol* sig = sig_strm.as_symbol(CHECK_NULL);
if (!add_loader_constraint(sig, loader1, loader2, THREAD)) {
- return sig->as_C_string();
+ return sig;
}
}
sig_strm.next();
diff --git a/src/share/vm/classfile/systemDictionary.hpp b/src/share/vm/classfile/systemDictionary.hpp
index d282fedfb..f1ac0b4e6 100644
--- a/src/share/vm/classfile/systemDictionary.hpp
+++ b/src/share/vm/classfile/systemDictionary.hpp
@@ -106,6 +106,7 @@ class SymbolPropertyTable;
do_klass(ThreadDeath_klass, java_lang_ThreadDeath, Pre ) \
do_klass(Exception_klass, java_lang_Exception, Pre ) \
do_klass(RuntimeException_klass, java_lang_RuntimeException, Pre ) \
+ do_klass(SecurityManager_klass, java_lang_SecurityManager, Pre ) \
do_klass(ProtectionDomain_klass, java_security_ProtectionDomain, Pre ) \
do_klass(AccessControlContext_klass, java_security_AccessControlContext, Pre ) \
do_klass(ClassNotFoundException_klass, java_lang_ClassNotFoundException, Pre ) \
@@ -138,13 +139,14 @@ class SymbolPropertyTable;
/* NOTE: needed too early in bootstrapping process to have checks based on JDK version */ \
/* Universe::is_gte_jdk14x_version() is not set up by this point. */ \
/* It's okay if this turns out to be NULL in non-1.4 JDKs. */ \
- do_klass(lambda_MagicLambdaImpl_klass, java_lang_invoke_MagicLambdaImpl, Opt ) \
+ do_klass(lambda_MagicLambdaImpl_klass, java_lang_invoke_MagicLambdaImpl, Opt ) \
do_klass(reflect_MagicAccessorImpl_klass, sun_reflect_MagicAccessorImpl, Opt ) \
do_klass(reflect_MethodAccessorImpl_klass, sun_reflect_MethodAccessorImpl, Opt_Only_JDK14NewRef) \
do_klass(reflect_ConstructorAccessorImpl_klass, sun_reflect_ConstructorAccessorImpl, Opt_Only_JDK14NewRef) \
do_klass(reflect_DelegatingClassLoader_klass, sun_reflect_DelegatingClassLoader, Opt ) \
do_klass(reflect_ConstantPool_klass, sun_reflect_ConstantPool, Opt_Only_JDK15 ) \
do_klass(reflect_UnsafeStaticFieldAccessorImpl_klass, sun_reflect_UnsafeStaticFieldAccessorImpl, Opt_Only_JDK15 ) \
+ do_klass(reflect_CallerSensitive_klass, sun_reflect_CallerSensitive, Opt ) \
\
/* support for dynamic typing; it's OK if these are NULL in earlier JDKs */ \
do_klass(MethodHandle_klass, java_lang_invoke_MethodHandle, Pre_JSR292 ) \
@@ -483,8 +485,8 @@ public:
// Check class loader constraints
static bool add_loader_constraint(Symbol* name, Handle loader1,
Handle loader2, TRAPS);
- static char* check_signature_loaders(Symbol* signature, Handle loader1,
- Handle loader2, bool is_method, TRAPS);
+ static Symbol* check_signature_loaders(Symbol* signature, Handle loader1,
+ Handle loader2, bool is_method, TRAPS);
// JSR 292
// find a java.lang.invoke.MethodHandle.invoke* method for a given signature
@@ -628,12 +630,15 @@ private:
static bool is_parallelCapable(Handle class_loader);
static bool is_parallelDefine(Handle class_loader);
+public:
+ static bool is_ext_class_loader(Handle class_loader);
+
+private:
static Klass* find_shared_class(Symbol* class_name);
// Setup link to hierarchy
static void add_to_hierarchy(instanceKlassHandle k, TRAPS);
-private:
// We pass in the hashtable index so we can calculate it outside of
// the SystemDictionary_lock.
diff --git a/src/share/vm/classfile/vmSymbols.hpp b/src/share/vm/classfile/vmSymbols.hpp
index f28f6d488..1e66346ee 100644
--- a/src/share/vm/classfile/vmSymbols.hpp
+++ b/src/share/vm/classfile/vmSymbols.hpp
@@ -91,6 +91,7 @@
template(java_lang_StringBuffer, "java/lang/StringBuffer") \
template(java_lang_StringBuilder, "java/lang/StringBuilder") \
template(java_lang_CharSequence, "java/lang/CharSequence") \
+ template(java_lang_SecurityManager, "java/lang/SecurityManager") \
template(java_security_AccessControlContext, "java/security/AccessControlContext") \
template(java_security_ProtectionDomain, "java/security/ProtectionDomain") \
template(java_io_OutputStream, "java/io/OutputStream") \
@@ -211,6 +212,8 @@
template(sun_reflect_SerializationConstructorAccessorImpl, "sun/reflect/SerializationConstructorAccessorImpl") \
template(sun_reflect_DelegatingClassLoader, "sun/reflect/DelegatingClassLoader") \
template(sun_reflect_Reflection, "sun/reflect/Reflection") \
+ template(sun_reflect_CallerSensitive, "sun/reflect/CallerSensitive") \
+ template(sun_reflect_CallerSensitive_signature, "Lsun/reflect/CallerSensitive;") \
template(checkedExceptions_name, "checkedExceptions") \
template(clazz_name, "clazz") \
template(exceptionTypes_name, "exceptionTypes") \
@@ -343,6 +346,7 @@
template(contextClassLoader_name, "contextClassLoader") \
template(inheritedAccessControlContext_name, "inheritedAccessControlContext") \
template(isPrivileged_name, "isPrivileged") \
+ template(getClassContext_name, "getClassContext") \
template(wait_name, "wait") \
template(checkPackageAccess_name, "checkPackageAccess") \
template(stackSize_name, "stackSize") \
@@ -463,6 +467,7 @@
template(void_classloader_signature, "()Ljava/lang/ClassLoader;") \
template(void_object_signature, "()Ljava/lang/Object;") \
template(void_class_signature, "()Ljava/lang/Class;") \
+ template(void_class_array_signature, "()[Ljava/lang/Class;") \
template(void_string_signature, "()Ljava/lang/String;") \
template(object_array_object_signature, "([Ljava/lang/Object;)Ljava/lang/Object;") \
template(object_object_array_object_signature, "(Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;")\
@@ -705,9 +710,8 @@
do_intrinsic(_getLength, java_lang_reflect_Array, getLength_name, object_int_signature, F_SN) \
do_name( getLength_name, "getLength") \
\
- do_intrinsic(_getCallerClass, sun_reflect_Reflection, getCallerClass_name, getCallerClass_signature, F_SN) \
+ do_intrinsic(_getCallerClass, sun_reflect_Reflection, getCallerClass_name, void_class_signature, F_SN) \
do_name( getCallerClass_name, "getCallerClass") \
- do_signature(getCallerClass_signature, "(I)Ljava/lang/Class;") \
\
do_intrinsic(_newArray, java_lang_reflect_Array, newArray_name, newArray_signature, F_SN) \
do_name( newArray_name, "newArray") \
diff --git a/src/share/vm/code/codeCache.hpp b/src/share/vm/code/codeCache.hpp
index 92ce241b9..e19aec61b 100644
--- a/src/share/vm/code/codeCache.hpp
+++ b/src/share/vm/code/codeCache.hpp
@@ -156,6 +156,11 @@ class CodeCache : AllStatic {
static address low_bound() { return (address) _heap->low_boundary(); }
static address high_bound() { return (address) _heap->high_boundary(); }
+ static bool has_space(int size) {
+ // Always leave some room in the CodeCache for I2C/C2I adapters
+ return largest_free_block() > (CodeCacheMinimumFreeSpace + size);
+ }
+
// Profiling
static address first_address(); // first address used for CodeBlobs
static address last_address(); // last address used for CodeBlobs
diff --git a/src/share/vm/code/nmethod.cpp b/src/share/vm/code/nmethod.cpp
index 157136edc..55a2c05f5 100644
--- a/src/share/vm/code/nmethod.cpp
+++ b/src/share/vm/code/nmethod.cpp
@@ -486,7 +486,6 @@ void nmethod::init_defaults() {
#endif // def HAVE_DTRACE_H
}
-
nmethod* nmethod::new_native_nmethod(methodHandle method,
int compile_id,
CodeBuffer *code_buffer,
@@ -502,17 +501,19 @@ nmethod* nmethod::new_native_nmethod(methodHandle method,
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
- CodeOffsets offsets;
- offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
- offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
- nm = new (native_nmethod_size)
- nmethod(method(), native_nmethod_size, compile_id, &offsets,
- code_buffer, frame_size,
- basic_lock_owner_sp_offset, basic_lock_sp_offset,
- oop_maps);
- NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_native_nmethod(nm));
- if (PrintAssembly && nm != NULL)
- Disassembler::decode(nm);
+ if (CodeCache::has_space(native_nmethod_size)) {
+ CodeOffsets offsets;
+ offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
+ offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
+ nm = new (native_nmethod_size) nmethod(method(), native_nmethod_size,
+ compile_id, &offsets,
+ code_buffer, frame_size,
+ basic_lock_owner_sp_offset,
+ basic_lock_sp_offset, oop_maps);
+ NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_native_nmethod(nm));
+ if (PrintAssembly && nm != NULL)
+ Disassembler::decode(nm);
+ }
}
// verify nmethod
debug_only(if (nm) nm->verify();) // might block
@@ -537,16 +538,19 @@ nmethod* nmethod::new_dtrace_nmethod(methodHandle method,
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
int nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
- CodeOffsets offsets;
- offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
- offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
- offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
+ if (CodeCache::has_space(nmethod_size)) {
+ CodeOffsets offsets;
+ offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
+ offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
+ offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
- nm = new (nmethod_size) nmethod(method(), nmethod_size, &offsets, code_buffer, frame_size);
+ nm = new (nmethod_size) nmethod(method(), nmethod_size,
+ &offsets, code_buffer, frame_size);
- NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm));
- if (PrintAssembly && nm != NULL)
- Disassembler::decode(nm);
+ NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm));
+ if (PrintAssembly && nm != NULL)
+ Disassembler::decode(nm);
+ }
}
// verify nmethod
debug_only(if (nm) nm->verify();) // might block
@@ -587,7 +591,8 @@ nmethod* nmethod::new_nmethod(methodHandle method,
+ round_to(handler_table->size_in_bytes(), oopSize)
+ round_to(nul_chk_table->size_in_bytes(), oopSize)
+ round_to(debug_info->data_size() , oopSize);
- nm = new (nmethod_size)
+ if (CodeCache::has_space(nmethod_size)) {
+ nm = new (nmethod_size)
nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
oop_maps,
@@ -595,6 +600,7 @@ nmethod* nmethod::new_nmethod(methodHandle method,
nul_chk_table,
compiler,
comp_level);
+ }
if (nm != NULL) {
// To make dependency checking during class loading fast, record
// the nmethod dependencies in the classes it is dependent on.
@@ -793,9 +799,9 @@ nmethod::nmethod(
#endif // def HAVE_DTRACE_H
void* nmethod::operator new(size_t size, int nmethod_size) {
- // Always leave some room in the CodeCache for I2C/C2I adapters
- if (CodeCache::largest_free_block() < CodeCacheMinimumFreeSpace) return NULL;
- return CodeCache::allocate(nmethod_size);
+ void* alloc = CodeCache::allocate(nmethod_size);
+ guarantee(alloc != NULL, "CodeCache should have enough space");
+ return alloc;
}
diff --git a/src/share/vm/compiler/compileBroker.cpp b/src/share/vm/compiler/compileBroker.cpp
index 1b1e2d1cd..63207d082 100644
--- a/src/share/vm/compiler/compileBroker.cpp
+++ b/src/share/vm/compiler/compileBroker.cpp
@@ -2166,6 +2166,9 @@ void CompileBroker::print_times() {
comp->print_timers();
}
tty->cr();
+ tty->print_cr(" Total compiled methods : %6d methods", CompileBroker::_total_compile_count);
+ tty->print_cr(" Standard compilation : %6d methods", CompileBroker::_total_standard_compile_count);
+ tty->print_cr(" On stack replacement : %6d methods", CompileBroker::_total_osr_compile_count);
int tcb = CompileBroker::_sum_osr_bytes_compiled + CompileBroker::_sum_standard_bytes_compiled;
tty->print_cr(" Total compiled bytecodes : %6d bytes", tcb);
tty->print_cr(" Standard compilation : %6d bytes", CompileBroker::_sum_standard_bytes_compiled);
diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
index 646479790..610942585 100644
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
@@ -854,7 +854,8 @@ HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
assert(!isHumongous(word_size), "we do not allow humongous TLABs");
unsigned int dummy_gc_count_before;
- return attempt_allocation(word_size, &dummy_gc_count_before);
+ int dummy_gclocker_retry_count = 0;
+ return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
}
HeapWord*
@@ -863,14 +864,14 @@ G1CollectedHeap::mem_allocate(size_t word_size,
assert_heap_not_locked_and_not_at_safepoint();
// Loop until the allocation is satisified, or unsatisfied after GC.
- for (int try_count = 1; /* we'll return */; try_count += 1) {
+ for (int try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
unsigned int gc_count_before;
HeapWord* result = NULL;
if (!isHumongous(word_size)) {
- result = attempt_allocation(word_size, &gc_count_before);
+ result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
} else {
- result = attempt_allocation_humongous(word_size, &gc_count_before);
+ result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
}
if (result != NULL) {
return result;
@@ -894,6 +895,9 @@ G1CollectedHeap::mem_allocate(size_t word_size,
}
return result;
} else {
+ if (gclocker_retry_count > GCLockerRetryAllocationCount) {
+ return NULL;
+ }
assert(op.result() == NULL,
"the result should be NULL if the VM op did not succeed");
}
@@ -910,7 +914,8 @@ G1CollectedHeap::mem_allocate(size_t word_size,
}
HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
- unsigned int *gc_count_before_ret) {
+ unsigned int *gc_count_before_ret,
+ int* gclocker_retry_count_ret) {
// Make sure you read the note in attempt_allocation_humongous().
assert_heap_not_locked_and_not_at_safepoint();
@@ -986,10 +991,16 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
return NULL;
}
} else {
+ if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
+ MutexLockerEx x(Heap_lock);
+ *gc_count_before_ret = total_collections();
+ return NULL;
+ }
// The GCLocker is either active or the GCLocker initiated
// GC has not yet been performed. Stall until it is and
// then retry the allocation.
GC_locker::stall_until_clear();
+ (*gclocker_retry_count_ret) += 1;
}
// We can reach here if we were unsuccessul in scheduling a
@@ -1019,7 +1030,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
}
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
- unsigned int * gc_count_before_ret) {
+ unsigned int * gc_count_before_ret,
+ int* gclocker_retry_count_ret) {
// The structure of this method has a lot of similarities to
// attempt_allocation_slow(). The reason these two were not merged
// into a single one is that such a method would require several "if
@@ -1104,10 +1116,16 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
return NULL;
}
} else {
+ if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
+ MutexLockerEx x(Heap_lock);
+ *gc_count_before_ret = total_collections();
+ return NULL;
+ }
// The GCLocker is either active or the GCLocker initiated
// GC has not yet been performed. Stall until it is and
// then retry the allocation.
GC_locker::stall_until_clear();
+ (*gclocker_retry_count_ret) += 1;
}
// We can reach here if we were unsuccessul in scheduling a
@@ -3270,12 +3288,12 @@ void G1CollectedHeap::verify(bool silent) {
void G1CollectedHeap::verify(bool silent,
VerifyOption vo) {
- if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
+ if (SafepointSynchronize::is_at_safepoint()) {
if (!silent) { gclog_or_tty->print("Roots "); }
VerifyRootsClosure rootsCl(vo);
assert(Thread::current()->is_VM_thread(),
- "Expected to be executed serially by the VM thread at this point");
+ "Expected to be executed serially by the VM thread at this point");
CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false);
VerifyKlassClosure klassCl(this, &rootsCl);
@@ -3360,7 +3378,8 @@ void G1CollectedHeap::verify(bool silent,
}
guarantee(!failures, "there should not have been any failures");
} else {
- if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) ");
+ if (!silent)
+ gclog_or_tty->print("(SKIPPING roots, heapRegionSets, heapRegions, remset) ");
}
}
diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
index 7dc5bd830..557daf85e 100644
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
@@ -559,18 +559,21 @@ protected:
// the mutator alloc region without taking the Heap_lock. This
// should only be used for non-humongous allocations.
inline HeapWord* attempt_allocation(size_t word_size,
- unsigned int* gc_count_before_ret);
+ unsigned int* gc_count_before_ret,
+ int* gclocker_retry_count_ret);
// Second-level mutator allocation attempt: take the Heap_lock and
// retry the allocation attempt, potentially scheduling a GC
// pause. This should only be used for non-humongous allocations.
HeapWord* attempt_allocation_slow(size_t word_size,
- unsigned int* gc_count_before_ret);
+ unsigned int* gc_count_before_ret,
+ int* gclocker_retry_count_ret);
// Takes the Heap_lock and attempts a humongous allocation. It can
// potentially schedule a GC pause.
HeapWord* attempt_allocation_humongous(size_t word_size,
- unsigned int* gc_count_before_ret);
+ unsigned int* gc_count_before_ret,
+ int* gclocker_retry_count_ret);
// Allocation attempt that should be called during safepoints (e.g.,
// at the end of a successful GC). expect_null_mutator_alloc_region
diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp
index 4f9c77262..20eb1693c 100644
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp
@@ -60,7 +60,8 @@ inline bool G1CollectedHeap::obj_in_cs(oop obj) {
inline HeapWord*
G1CollectedHeap::attempt_allocation(size_t word_size,
- unsigned int* gc_count_before_ret) {
+ unsigned int* gc_count_before_ret,
+ int* gclocker_retry_count_ret) {
assert_heap_not_locked_and_not_at_safepoint();
assert(!isHumongous(word_size), "attempt_allocation() should not "
"be called for humongous allocation requests");
@@ -68,7 +69,9 @@ G1CollectedHeap::attempt_allocation(size_t word_size,
HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size,
false /* bot_updates */);
if (result == NULL) {
- result = attempt_allocation_slow(word_size, gc_count_before_ret);
+ result = attempt_allocation_slow(word_size,
+ gc_count_before_ret,
+ gclocker_retry_count_ret);
}
assert_heap_not_locked();
if (result != NULL) {
diff --git a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
index 0c39e69e1..5d77e31b6 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
@@ -326,6 +326,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
uint loop_count = 0;
uint gc_count = 0;
+ int gclocker_stalled_count = 0;
while (result == NULL) {
// We don't want to have multiple collections for a single filled generation.
@@ -354,6 +355,10 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
return result;
}
+ if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
+ return NULL;
+ }
+
// Failed to allocate without a gc.
if (GC_locker::is_active_and_needs_gc()) {
// If this thread is not in a jni critical section, we stall
@@ -366,6 +371,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
if (!jthr->in_critical()) {
MutexUnlocker mul(Heap_lock);
GC_locker::stall_until_clear();
+ gclocker_stalled_count += 1;
continue;
} else {
if (CheckJNICalls) {
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp
index 48e3ebb45..2cb3b35e0 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp
@@ -256,7 +256,7 @@ void PSOldGen::expand(size_t bytes) {
}
if (PrintGC && Verbose) {
- if (success && GC_locker::is_active()) {
+ if (success && GC_locker::is_active_and_needs_gc()) {
gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
}
}
diff --git a/src/share/vm/interpreter/linkResolver.cpp b/src/share/vm/interpreter/linkResolver.cpp
index f5639a55b..f4b67eeed 100644
--- a/src/share/vm/interpreter/linkResolver.cpp
+++ b/src/share/vm/interpreter/linkResolver.cpp
@@ -458,25 +458,27 @@ void LinkResolver::resolve_method(methodHandle& resolved_method, KlassHandle res
Handle class_loader (THREAD, resolved_method->method_holder()->class_loader());
{
ResourceMark rm(THREAD);
- char* failed_type_name =
+ Symbol* failed_type_symbol =
SystemDictionary::check_signature_loaders(method_signature, loader,
class_loader, true, CHECK);
- if (failed_type_name != NULL) {
+ if (failed_type_symbol != NULL) {
const char* msg = "loader constraint violation: when resolving method"
" \"%s\" the class loader (instance of %s) of the current class, %s,"
- " and the class loader (instance of %s) for resolved class, %s, have"
+ " and the class loader (instance of %s) for the method's defining class, %s, have"
" different Class objects for the type %s used in the signature";
char* sig = Method::name_and_sig_as_C_string(resolved_klass(),method_name,method_signature);
const char* loader1 = SystemDictionary::loader_name(loader());
char* current = InstanceKlass::cast(current_klass())->name()->as_C_string();
const char* loader2 = SystemDictionary::loader_name(class_loader());
- char* resolved = InstanceKlass::cast(resolved_klass())->name()->as_C_string();
+ char* target = InstanceKlass::cast(resolved_method->method_holder())
+ ->name()->as_C_string();
+ char* failed_type_name = failed_type_symbol->as_C_string();
size_t buflen = strlen(msg) + strlen(sig) + strlen(loader1) +
- strlen(current) + strlen(loader2) + strlen(resolved) +
- strlen(failed_type_name);
+ strlen(current) + strlen(loader2) + strlen(target) +
+ strlen(failed_type_name) + 1;
char* buf = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, buflen);
jio_snprintf(buf, buflen, msg, sig, loader1, current, loader2,
- resolved, failed_type_name);
+ target, failed_type_name);
THROW_MSG(vmSymbols::java_lang_LinkageError(), buf);
}
}
@@ -520,26 +522,28 @@ void LinkResolver::resolve_interface_method(methodHandle& resolved_method,
Handle class_loader (THREAD, resolved_method->method_holder()->class_loader());
{
ResourceMark rm(THREAD);
- char* failed_type_name =
+ Symbol* failed_type_symbol =
SystemDictionary::check_signature_loaders(method_signature, loader,
class_loader, true, CHECK);
- if (failed_type_name != NULL) {
+ if (failed_type_symbol != NULL) {
const char* msg = "loader constraint violation: when resolving "
"interface method \"%s\" the class loader (instance of %s) of the "
"current class, %s, and the class loader (instance of %s) for "
- "resolved class, %s, have different Class objects for the type %s "
+ "the method's defining class, %s, have different Class objects for the type %s "
"used in the signature";
char* sig = Method::name_and_sig_as_C_string(resolved_klass(),method_name,method_signature);
const char* loader1 = SystemDictionary::loader_name(loader());
char* current = InstanceKlass::cast(current_klass())->name()->as_C_string();
const char* loader2 = SystemDictionary::loader_name(class_loader());
- char* resolved = InstanceKlass::cast(resolved_klass())->name()->as_C_string();
+ char* target = InstanceKlass::cast(resolved_method->method_holder())
+ ->name()->as_C_string();
+ char* failed_type_name = failed_type_symbol->as_C_string();
size_t buflen = strlen(msg) + strlen(sig) + strlen(loader1) +
- strlen(current) + strlen(loader2) + strlen(resolved) +
- strlen(failed_type_name);
+ strlen(current) + strlen(loader2) + strlen(target) +
+ strlen(failed_type_name) + 1;
char* buf = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, buflen);
jio_snprintf(buf, buflen, msg, sig, loader1, current, loader2,
- resolved, failed_type_name);
+ target, failed_type_name);
THROW_MSG(vmSymbols::java_lang_LinkageError(), buf);
}
}
@@ -642,12 +646,12 @@ void LinkResolver::resolve_field(FieldAccessInfo& result, constantPoolHandle poo
Symbol* signature_ref = pool->signature_ref_at(index);
{
ResourceMark rm(THREAD);
- char* failed_type_name =
+ Symbol* failed_type_symbol =
SystemDictionary::check_signature_loaders(signature_ref,
ref_loader, sel_loader,
false,
CHECK);
- if (failed_type_name != NULL) {
+ if (failed_type_symbol != NULL) {
const char* msg = "loader constraint violation: when resolving field"
" \"%s\" the class loader (instance of %s) of the referring class, "
"%s, and the class loader (instance of %s) for the field's resolved "
@@ -656,8 +660,9 @@ void LinkResolver::resolve_field(FieldAccessInfo& result, constantPoolHandle poo
const char* loader1 = SystemDictionary::loader_name(ref_loader());
char* sel = InstanceKlass::cast(sel_klass())->name()->as_C_string();
const char* loader2 = SystemDictionary::loader_name(sel_loader());
+ char* failed_type_name = failed_type_symbol->as_C_string();
size_t buflen = strlen(msg) + strlen(field_name) + strlen(loader1) +
- strlen(sel) + strlen(loader2) + strlen(failed_type_name);
+ strlen(sel) + strlen(loader2) + strlen(failed_type_name) + 1;
char* buf = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, buflen);
jio_snprintf(buf, buflen, msg, field_name, loader1, sel, loader2,
failed_type_name);
diff --git a/src/share/vm/memory/collectorPolicy.cpp b/src/share/vm/memory/collectorPolicy.cpp
index d5eb387a0..a2049597a 100644
--- a/src/share/vm/memory/collectorPolicy.cpp
+++ b/src/share/vm/memory/collectorPolicy.cpp
@@ -532,7 +532,7 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
// Loop until the allocation is satisified,
// or unsatisfied after GC.
- for (int try_count = 1; /* return or throw */; try_count += 1) {
+ for (int try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
HandleMark hm; // discard any handles allocated in each iteration
// First allocation attempt is lock-free.
@@ -576,6 +576,10 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
}
}
+ if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
+ return NULL; // we didn't get to do a GC and we didn't get any memory
+ }
+
// If this thread is not in a jni critical section, we stall
// the requestor until the critical section has cleared and
// GC allowed. When the critical section clears, a GC is
@@ -587,6 +591,7 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
MutexUnlocker mul(Heap_lock);
// Wait for JNI critical section to be exited
GC_locker::stall_until_clear();
+ gclocker_stalled_count += 1;
continue;
} else {
if (CheckJNICalls) {
diff --git a/src/share/vm/memory/universe.cpp b/src/share/vm/memory/universe.cpp
index 79e092a3b..d2bbf0175 100644
--- a/src/share/vm/memory/universe.cpp
+++ b/src/share/vm/memory/universe.cpp
@@ -953,15 +953,6 @@ void Universe::update_heap_info_at_gc() {
void universe2_init() {
EXCEPTION_MARK;
Universe::genesis(CATCH);
- // Although we'd like to verify here that the state of the heap
- // is good, we can't because the main thread has not yet added
- // itself to the threads list (so, using current interfaces
- // we can't "fill" its TLAB), unless TLABs are disabled.
- if (VerifyBeforeGC && !UseTLAB &&
- Universe::heap()->total_collections() >= VerifyGCStartAt) {
- Universe::heap()->prepare_for_verify();
- Universe::verify(); // make sure we're starting with a clean slate
- }
}
diff --git a/src/share/vm/oops/constMethod.cpp b/src/share/vm/oops/constMethod.cpp
index 98a29a2e1..1d0376a0b 100644
--- a/src/share/vm/oops/constMethod.cpp
+++ b/src/share/vm/oops/constMethod.cpp
@@ -363,6 +363,26 @@ AnnotationArray** ConstMethod::default_annotations_addr() const {
return (AnnotationArray**)constMethod_end() - offset;
}
+// copy annotations from 'cm' to 'this'
+void ConstMethod::copy_annotations_from(ConstMethod* cm) {
+ if (cm->has_method_annotations()) {
+ assert(has_method_annotations(), "should be allocated already");
+ set_method_annotations(cm->method_annotations());
+ }
+ if (cm->has_parameter_annotations()) {
+ assert(has_parameter_annotations(), "should be allocated already");
+ set_parameter_annotations(cm->parameter_annotations());
+ }
+ if (cm->has_type_annotations()) {
+ assert(has_type_annotations(), "should be allocated already");
+ set_type_annotations(cm->type_annotations());
+ }
+ if (cm->has_default_annotations()) {
+ assert(has_default_annotations(), "should be allocated already");
+ set_default_annotations(cm->default_annotations());
+ }
+}
+
// Printing
void ConstMethod::print_on(outputStream* st) const {
diff --git a/src/share/vm/oops/constMethod.hpp b/src/share/vm/oops/constMethod.hpp
index 0c4212564..21df75bde 100644
--- a/src/share/vm/oops/constMethod.hpp
+++ b/src/share/vm/oops/constMethod.hpp
@@ -441,6 +441,9 @@ public:
return has_default_annotations() ? default_annotations()->length() : 0;
}
+ // Copy annotations from other ConstMethod
+ void copy_annotations_from(ConstMethod* cm);
+
// byte codes
void set_code(address code) {
if (code_size() > 0) {
diff --git a/src/share/vm/oops/instanceKlass.cpp b/src/share/vm/oops/instanceKlass.cpp
index e296c501c..78d158a86 100644
--- a/src/share/vm/oops/instanceKlass.cpp
+++ b/src/share/vm/oops/instanceKlass.cpp
@@ -2228,8 +2228,6 @@ void InstanceKlass::clean_implementors_list(BoolObjectClosure* is_alive) {
}
void InstanceKlass::clean_method_data(BoolObjectClosure* is_alive) {
-#ifdef COMPILER2
- // Currently only used by C2.
for (int m = 0; m < methods()->length(); m++) {
MethodData* mdo = methods()->at(m)->method_data();
if (mdo != NULL) {
@@ -2240,15 +2238,6 @@ void InstanceKlass::clean_method_data(BoolObjectClosure* is_alive) {
}
}
}
-#else
-#ifdef ASSERT
- // Verify that we haven't started to use MDOs for C1.
- for (int m = 0; m < methods()->length(); m++) {
- MethodData* mdo = methods()->at(m)->method_data();
- assert(mdo == NULL, "Didn't expect C1 to use MDOs");
- }
-#endif // ASSERT
-#endif // !COMPILER2
}
diff --git a/src/share/vm/oops/klassVtable.cpp b/src/share/vm/oops/klassVtable.cpp
index 43036e754..3f8609532 100644
--- a/src/share/vm/oops/klassVtable.cpp
+++ b/src/share/vm/oops/klassVtable.cpp
@@ -327,11 +327,11 @@ bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle tar
if (target_loader() != super_loader()) {
ResourceMark rm(THREAD);
- char* failed_type_name =
+ Symbol* failed_type_symbol =
SystemDictionary::check_signature_loaders(signature, target_loader,
super_loader, true,
CHECK_(false));
- if (failed_type_name != NULL) {
+ if (failed_type_symbol != NULL) {
const char* msg = "loader constraint violation: when resolving "
"overridden method \"%s\" the class loader (instance"
" of %s) of the current class, %s, and its superclass loader "
@@ -341,6 +341,7 @@ bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle tar
const char* loader1 = SystemDictionary::loader_name(target_loader());
char* current = _klass->name()->as_C_string();
const char* loader2 = SystemDictionary::loader_name(super_loader());
+ char* failed_type_name = failed_type_symbol->as_C_string();
size_t buflen = strlen(msg) + strlen(sig) + strlen(loader1) +
strlen(current) + strlen(loader2) + strlen(failed_type_name);
char* buf = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, buflen);
@@ -787,12 +788,12 @@ void klassItable::initialize_itable_for_interface(int method_table_offset, Klass
Handle method_holder_loader (THREAD, target->method_holder()->class_loader());
if (method_holder_loader() != interface_loader()) {
ResourceMark rm(THREAD);
- char* failed_type_name =
+ Symbol* failed_type_symbol =
SystemDictionary::check_signature_loaders(method_signature,
method_holder_loader,
interface_loader,
true, CHECK);
- if (failed_type_name != NULL) {
+ if (failed_type_symbol != NULL) {
const char* msg = "loader constraint violation in interface "
"itable initialization: when resolving method \"%s\" the class"
" loader (instance of %s) of the current class, %s, "
@@ -804,6 +805,7 @@ void klassItable::initialize_itable_for_interface(int method_table_offset, Klass
char* current = klass->name()->as_C_string();
const char* loader2 = SystemDictionary::loader_name(interface_loader());
char* iface = InstanceKlass::cast(interf_h())->name()->as_C_string();
+ char* failed_type_name = failed_type_symbol->as_C_string();
size_t buflen = strlen(msg) + strlen(sig) + strlen(loader1) +
strlen(current) + strlen(loader2) + strlen(iface) +
strlen(failed_type_name);
diff --git a/src/share/vm/oops/method.cpp b/src/share/vm/oops/method.cpp
index 11ddd21f1..1e74a2148 100644
--- a/src/share/vm/oops/method.cpp
+++ b/src/share/vm/oops/method.cpp
@@ -967,6 +967,32 @@ bool Method::should_not_be_cached() const {
return false;
}
+
+/**
+ * Returns true if this is one of the specially treated methods for
+ * security related stack walks (like Reflection.getCallerClass).
+ */
+bool Method::is_ignored_by_security_stack_walk() const {
+ const bool use_new_reflection = JDK_Version::is_gte_jdk14x_version() && UseNewReflection;
+
+ assert(intrinsic_id() != vmIntrinsics::_invoke || Universe::reflect_invoke_cache()->is_same_method((Method*)this), "sanity");
+ if (intrinsic_id() == vmIntrinsics::_invoke) {
+ // This is Method.invoke() -- ignore it
+ return true;
+ }
+ if (use_new_reflection &&
+ method_holder()->is_subclass_of(SystemDictionary::reflect_MethodAccessorImpl_klass())) {
+ // This is an auxilary frame -- ignore it
+ return true;
+ }
+ if (is_method_handle_intrinsic() || is_compiled_lambda_form()) {
+ // This is an internal adapter frame for method handles -- ignore it
+ return true;
+ }
+ return false;
+}
+
+
// Constant pool structure for invoke methods:
enum {
_imcp_invoke_name = 1, // utf8: 'invokeExact', etc.
@@ -1170,6 +1196,8 @@ methodHandle Method::clone_with_new_data(methodHandle m, u_char* new_code, int n
newm->set_stackmap_data(stackmap_data);
}
+ // copy annotations over to new method
+ newcm->copy_annotations_from(cm);
return newm;
}
@@ -1178,13 +1206,13 @@ vmSymbols::SID Method::klass_id_for_intrinsics(Klass* holder) {
// because we are not loading from core libraries
// exception: the AES intrinsics come from lib/ext/sunjce_provider.jar
// which does not use the class default class loader so we check for its loader here
- if ((InstanceKlass::cast(holder)->class_loader() != NULL) &&
- InstanceKlass::cast(holder)->class_loader()->klass()->name() != vmSymbols::sun_misc_Launcher_ExtClassLoader()) {
+ InstanceKlass* ik = InstanceKlass::cast(holder);
+ if ((ik->class_loader() != NULL) && !SystemDictionary::is_ext_class_loader(ik->class_loader())) {
return vmSymbols::NO_SID; // regardless of name, no intrinsics here
}
// see if the klass name is well-known:
- Symbol* klass_name = InstanceKlass::cast(holder)->name();
+ Symbol* klass_name = ik->name();
return vmSymbols::find_sid(klass_name);
}
diff --git a/src/share/vm/oops/method.hpp b/src/share/vm/oops/method.hpp
index 0c6203fc6..ea92383d1 100644
--- a/src/share/vm/oops/method.hpp
+++ b/src/share/vm/oops/method.hpp
@@ -118,11 +118,12 @@ class Method : public Metadata {
#endif
u2 _method_size; // size of this object
u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none)
- u1 _jfr_towrite : 1, // Flags
- _force_inline : 1,
- _hidden : 1,
- _dont_inline : 1,
- : 4;
+ u1 _jfr_towrite : 1, // Flags
+ _caller_sensitive : 1,
+ _force_inline : 1,
+ _hidden : 1,
+ _dont_inline : 1,
+ : 3;
u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting
u2 _number_of_breakpoints; // fullspeed debugging support
InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations
@@ -618,6 +619,9 @@ class Method : public Metadata {
// Reflection support
bool is_overridden_in(Klass* k) const;
+ // Stack walking support
+ bool is_ignored_by_security_stack_walk() const;
+
// JSR 292 support
bool is_method_handle_intrinsic() const; // MethodHandles::is_signature_polymorphic_intrinsic(intrinsic_id)
bool is_compiled_lambda_form() const; // intrinsic_id() == vmIntrinsics::_compiledLambdaForm
@@ -705,15 +709,16 @@ class Method : public Metadata {
void init_intrinsic_id(); // updates from _none if a match
static vmSymbols::SID klass_id_for_intrinsics(Klass* holder);
- bool jfr_towrite() { return _jfr_towrite; }
- void set_jfr_towrite(bool towrite) { _jfr_towrite = towrite; }
-
- bool force_inline() { return _force_inline; }
- void set_force_inline(bool x) { _force_inline = x; }
- bool dont_inline() { return _dont_inline; }
- void set_dont_inline(bool x) { _dont_inline = x; }
- bool is_hidden() { return _hidden; }
- void set_hidden(bool x) { _hidden = x; }
+ bool jfr_towrite() { return _jfr_towrite; }
+ void set_jfr_towrite(bool x) { _jfr_towrite = x; }
+ bool caller_sensitive() { return _caller_sensitive; }
+ void set_caller_sensitive(bool x) { _caller_sensitive = x; }
+ bool force_inline() { return _force_inline; }
+ void set_force_inline(bool x) { _force_inline = x; }
+ bool dont_inline() { return _dont_inline; }
+ void set_dont_inline(bool x) { _dont_inline = x; }
+ bool is_hidden() { return _hidden; }
+ void set_hidden(bool x) { _hidden = x; }
ConstMethod::MethodType method_type() const {
return _constMethod->method_type();
}
diff --git a/src/share/vm/oops/methodData.cpp b/src/share/vm/oops/methodData.cpp
index c9687fb32..e43b93baf 100644
--- a/src/share/vm/oops/methodData.cpp
+++ b/src/share/vm/oops/methodData.cpp
@@ -392,6 +392,9 @@ MethodData* MethodData::allocate(ClassLoaderData* loader_data, methodHandle meth
}
int MethodData::bytecode_cell_count(Bytecodes::Code code) {
+#if defined(COMPILER1) && !defined(COMPILER2)
+ return no_profile_data;
+#else
switch (code) {
case Bytecodes::_checkcast:
case Bytecodes::_instanceof:
@@ -438,6 +441,7 @@ int MethodData::bytecode_cell_count(Bytecodes::Code code) {
return variable_cell_count;
}
return no_profile_data;
+#endif
}
// Compute the size of the profiling information corresponding to
@@ -509,6 +513,9 @@ int MethodData::compute_allocation_size_in_words(methodHandle method) {
// the segment in bytes.
int MethodData::initialize_data(BytecodeStream* stream,
int data_index) {
+#if defined(COMPILER1) && !defined(COMPILER2)
+ return 0;
+#else
int cell_count = -1;
int tag = DataLayout::no_tag;
DataLayout* data_layout = data_layout_at(data_index);
@@ -587,6 +594,7 @@ int MethodData::initialize_data(BytecodeStream* stream,
assert(!bytecode_has_profile(c), "agree w/ !BHP");
return 0;
}
+#endif
}
// Get the data at an arbitrary (sort of) data index.
diff --git a/src/share/vm/oops/symbol.cpp b/src/share/vm/oops/symbol.cpp
index b3c71813b..253d0df88 100644
--- a/src/share/vm/oops/symbol.cpp
+++ b/src/share/vm/oops/symbol.cpp
@@ -162,7 +162,7 @@ char* Symbol::as_quoted_ascii() const {
const char *ptr = (const char *)&_body[0];
int quoted_length = UTF8::quoted_ascii_length(ptr, utf8_length());
char* result = NEW_RESOURCE_ARRAY(char, quoted_length + 1);
- UTF8::as_quoted_ascii(ptr, result, quoted_length + 1);
+ UTF8::as_quoted_ascii(ptr, utf8_length(), result, quoted_length + 1);
return result;
}
diff --git a/src/share/vm/opto/graphKit.cpp b/src/share/vm/opto/graphKit.cpp
index cd329a211..028020f51 100644
--- a/src/share/vm/opto/graphKit.cpp
+++ b/src/share/vm/opto/graphKit.cpp
@@ -3445,7 +3445,6 @@ void GraphKit::sync_kit(IdealKit& ideal) {
void GraphKit::final_sync(IdealKit& ideal) {
// Final sync IdealKit and graphKit.
- __ drain_delay_transform();
sync_kit(ideal);
}
diff --git a/src/share/vm/opto/idealKit.cpp b/src/share/vm/opto/idealKit.cpp
index 986f2e178..90eff2bb5 100644
--- a/src/share/vm/opto/idealKit.cpp
+++ b/src/share/vm/opto/idealKit.cpp
@@ -48,9 +48,9 @@ IdealKit::IdealKit(GraphKit* gkit, bool delay_all_transforms, bool has_declarati
_cvstate = NULL;
// We can go memory state free or else we need the entire memory state
assert(_initial_memory == NULL || _initial_memory->Opcode() == Op_MergeMem, "memory must be pre-split");
+ assert(!_gvn.is_IterGVN(), "IdealKit can't be used during Optimize phase");
int init_size = 5;
_pending_cvstates = new (C->node_arena()) GrowableArray<Node*>(C->node_arena(), init_size, 0, 0);
- _delay_transform = new (C->node_arena()) GrowableArray<Node*>(C->node_arena(), init_size, 0, 0);
DEBUG_ONLY(_state = new (C->node_arena()) GrowableArray<int>(C->node_arena(), init_size, 0, 0));
if (!has_declarations) {
declarations_done();
@@ -296,19 +296,16 @@ Node* IdealKit::transform(Node* n) {
return delay_transform(n);
} else {
n = gvn().transform(n);
- if (!gvn().is_IterGVN()) {
- C->record_for_igvn(n);
- }
+ C->record_for_igvn(n);
return n;
}
}
//-----------------------------delay_transform-----------------------------------
Node* IdealKit::delay_transform(Node* n) {
- if (!gvn().is_IterGVN() || !gvn().is_IterGVN()->delay_transform()) {
- gvn().set_type(n, n->bottom_type());
- }
- _delay_transform->push(n);
+ // Delay transform until IterativeGVN
+ gvn().set_type(n, n->bottom_type());
+ C->record_for_igvn(n);
return n;
}
@@ -332,17 +329,6 @@ void IdealKit::clear(Node* m) {
for (uint i = 0; i < m->req(); i++) m->set_req(i, NULL);
}
-//-----------------------------drain_delay_transform----------------------------
-void IdealKit::drain_delay_transform() {
- while (_delay_transform->length() > 0) {
- Node* n = _delay_transform->pop();
- gvn().transform(n);
- if (!gvn().is_IterGVN()) {
- C->record_for_igvn(n);
- }
- }
-}
-
//-----------------------------IdealVariable----------------------------
IdealVariable::IdealVariable(IdealKit &k) {
k.declare(this);
@@ -351,9 +337,7 @@ IdealVariable::IdealVariable(IdealKit &k) {
Node* IdealKit::memory(uint alias_idx) {
MergeMemNode* mem = merged_memory();
Node* p = mem->memory_at(alias_idx);
- if (!gvn().is_IterGVN() || !gvn().is_IterGVN()->delay_transform()) {
- _gvn.set_type(p, Type::MEMORY); // must be mapped
- }
+ _gvn.set_type(p, Type::MEMORY); // must be mapped
return p;
}
diff --git a/src/share/vm/opto/idealKit.hpp b/src/share/vm/opto/idealKit.hpp
index 15e4274db..16833c0cb 100644
--- a/src/share/vm/opto/idealKit.hpp
+++ b/src/share/vm/opto/idealKit.hpp
@@ -102,7 +102,6 @@ class IdealKit: public StackObj {
Compile * const C;
PhaseGVN &_gvn;
GrowableArray<Node*>* _pending_cvstates; // stack of cvstates
- GrowableArray<Node*>* _delay_transform; // delay invoking gvn.transform until drain
Node* _cvstate; // current cvstate (control, memory and variables)
uint _var_ct; // number of variables
bool _delay_all_transforms; // flag forcing all transforms to be delayed
@@ -121,7 +120,7 @@ class IdealKit: public StackObj {
void clear(Node* m); // clear a cvstate
void stop() { clear(_cvstate); } // clear current cvstate
Node* delay_transform(Node* n);
- Node* transform(Node* n); // gvn.transform or push node on delay list
+ Node* transform(Node* n); // gvn.transform or skip it
Node* promote_to_phi(Node* n, Node* reg);// Promote "n" to a phi on region "reg"
bool was_promoted_to_phi(Node* n, Node* reg) {
return (n->is_Phi() && n->in(0) == reg);
@@ -146,7 +145,6 @@ class IdealKit: public StackObj {
IdealKit(GraphKit* gkit, bool delay_all_transforms = false, bool has_declarations = false);
~IdealKit() {
stop();
- drain_delay_transform();
}
void sync_kit(GraphKit* gkit);
@@ -173,7 +171,6 @@ class IdealKit: public StackObj {
void bind(Node* lab);
void goto_(Node* lab, bool bind = false);
void declarations_done();
- void drain_delay_transform();
Node* IfTrue(IfNode* iff) { return transform(new (C) IfTrueNode(iff)); }
Node* IfFalse(IfNode* iff) { return transform(new (C) IfFalseNode(iff)); }
@@ -198,7 +195,11 @@ class IdealKit: public StackObj {
Node* thread() { return gvn().transform(new (C) ThreadLocalNode()); }
// Pointers
- Node* AddP(Node *base, Node *ptr, Node *off) { return transform(new (C) AddPNode(base, ptr, off)); }
+
+ // Raw address should be transformed regardless 'delay_transform' flag
+ // to produce canonical form CastX2P(offset).
+ Node* AddP(Node *base, Node *ptr, Node *off) { return _gvn.transform(new (C) AddPNode(base, ptr, off)); }
+
Node* CmpP(Node* l, Node* r) { return transform(new (C) CmpPNode(l, r)); }
#ifdef _LP64
Node* XorX(Node* l, Node* r) { return transform(new (C) XorLNode(l, r)); }
@@ -208,8 +209,6 @@ class IdealKit: public StackObj {
Node* URShiftX(Node* l, Node* r) { return transform(new (C) URShiftXNode(l, r)); }
Node* ConX(jint k) { return (Node*)gvn().MakeConX(k); }
Node* CastPX(Node* ctl, Node* p) { return transform(new (C) CastP2XNode(ctl, p)); }
- // Add a fixed offset to a pointer
- Node* basic_plus_adr(Node* base, Node* ptr, intptr_t offset);
// Memory operations
diff --git a/src/share/vm/opto/ifg.cpp b/src/share/vm/opto/ifg.cpp
index 44828fa6a..c40265214 100644
--- a/src/share/vm/opto/ifg.cpp
+++ b/src/share/vm/opto/ifg.cpp
@@ -37,8 +37,6 @@
#include "opto/memnode.hpp"
#include "opto/opcodes.hpp"
-#define EXACT_PRESSURE 1
-
//=============================================================================
//------------------------------IFG--------------------------------------------
PhaseIFG::PhaseIFG( Arena *arena ) : Phase(Interference_Graph), _arena(arena) {
@@ -445,23 +443,15 @@ static void lower_pressure( LRG *lrg, uint where, Block *b, uint *pressure, uint
pressure[1] -= lrg->reg_pressure();
if( pressure[1] == (uint)FLOATPRESSURE ) {
hrp_index[1] = where;
-#ifdef EXACT_PRESSURE
- if( pressure[1] > b->_freg_pressure )
- b->_freg_pressure = pressure[1]+1;
-#else
- b->_freg_pressure = (uint)FLOATPRESSURE+1;
-#endif
+ if( pressure[1] > b->_freg_pressure )
+ b->_freg_pressure = pressure[1]+1;
}
} else if( lrg->mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) {
pressure[0] -= lrg->reg_pressure();
if( pressure[0] == (uint)INTPRESSURE ) {
hrp_index[0] = where;
-#ifdef EXACT_PRESSURE
- if( pressure[0] > b->_reg_pressure )
- b->_reg_pressure = pressure[0]+1;
-#else
- b->_reg_pressure = (uint)INTPRESSURE+1;
-#endif
+ if( pressure[0] > b->_reg_pressure )
+ b->_reg_pressure = pressure[0]+1;
}
}
}
@@ -526,17 +516,13 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
if (lrg.mask().is_UP() && lrg.mask_size()) {
if (lrg._is_float || lrg._is_vector) { // Count float pressure
pressure[1] += lrg.reg_pressure();
-#ifdef EXACT_PRESSURE
if( pressure[1] > b->_freg_pressure )
b->_freg_pressure = pressure[1];
-#endif
// Count int pressure, but do not count the SP, flags
} else if( lrgs(lidx).mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) {
pressure[0] += lrg.reg_pressure();
-#ifdef EXACT_PRESSURE
if( pressure[0] > b->_reg_pressure )
b->_reg_pressure = pressure[0];
-#endif
}
}
}
@@ -589,30 +575,20 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
RegMask itmp = lrgs(r).mask();
itmp.AND(*Matcher::idealreg2regmask[Op_RegI]);
int iregs = itmp.Size();
-#ifdef EXACT_PRESSURE
if( pressure[0]+iregs > b->_reg_pressure )
b->_reg_pressure = pressure[0]+iregs;
-#endif
if( pressure[0] <= (uint)INTPRESSURE &&
pressure[0]+iregs > (uint)INTPRESSURE ) {
-#ifndef EXACT_PRESSURE
- b->_reg_pressure = (uint)INTPRESSURE+1;
-#endif
hrp_index[0] = j-1;
}
// Count the float-only registers
RegMask ftmp = lrgs(r).mask();
ftmp.AND(*Matcher::idealreg2regmask[Op_RegD]);
int fregs = ftmp.Size();
-#ifdef EXACT_PRESSURE
if( pressure[1]+fregs > b->_freg_pressure )
b->_freg_pressure = pressure[1]+fregs;
-#endif
if( pressure[1] <= (uint)FLOATPRESSURE &&
pressure[1]+fregs > (uint)FLOATPRESSURE ) {
-#ifndef EXACT_PRESSURE
- b->_freg_pressure = (uint)FLOATPRESSURE+1;
-#endif
hrp_index[1] = j-1;
}
}
@@ -769,16 +745,12 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
if (lrg.mask().is_UP() && lrg.mask_size()) {
if (lrg._is_float || lrg._is_vector) {
pressure[1] += lrg.reg_pressure();
-#ifdef EXACT_PRESSURE
if( pressure[1] > b->_freg_pressure )
b->_freg_pressure = pressure[1];
-#endif
} else if( lrg.mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) {
pressure[0] += lrg.reg_pressure();
-#ifdef EXACT_PRESSURE
if( pressure[0] > b->_reg_pressure )
b->_reg_pressure = pressure[0];
-#endif
}
}
assert( pressure[0] == count_int_pressure (&liveout), "" );
@@ -794,21 +766,13 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
// the whole block is high pressure.
if( pressure[0] > (uint)INTPRESSURE ) {
hrp_index[0] = 0;
-#ifdef EXACT_PRESSURE
if( pressure[0] > b->_reg_pressure )
b->_reg_pressure = pressure[0];
-#else
- b->_reg_pressure = (uint)INTPRESSURE+1;
-#endif
}
if( pressure[1] > (uint)FLOATPRESSURE ) {
hrp_index[1] = 0;
-#ifdef EXACT_PRESSURE
if( pressure[1] > b->_freg_pressure )
b->_freg_pressure = pressure[1];
-#else
- b->_freg_pressure = (uint)FLOATPRESSURE+1;
-#endif
}
// Compute high pressure indice; avoid landing in the middle of projnodes
diff --git a/src/share/vm/opto/library_call.cpp b/src/share/vm/opto/library_call.cpp
index cdcb94177..1f4b58ebb 100644
--- a/src/share/vm/opto/library_call.cpp
+++ b/src/share/vm/opto/library_call.cpp
@@ -231,7 +231,6 @@ class LibraryCallKit : public GraphKit {
void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark);
bool inline_native_clone(bool is_virtual);
bool inline_native_Reflection_getCallerClass();
- bool is_method_invoke_or_aux_frame(JVMState* jvms);
// Helper function for inlining native object hash method
bool inline_native_hashcode(bool is_virtual, bool is_static);
bool inline_native_getClass();
@@ -393,7 +392,7 @@ CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
case vmIntrinsics::_getCallerClass:
if (!UseNewReflection) return NULL;
if (!InlineReflectionGetCallerClass) return NULL;
- if (!JDK_Version::is_gte_jdk14x_version()) return NULL;
+ if (SystemDictionary::reflect_CallerSensitive_klass() == NULL) return NULL;
break;
case vmIntrinsics::_bitCount_i:
@@ -3872,13 +3871,13 @@ bool LibraryCallKit::inline_native_getClass() {
}
//-----------------inline_native_Reflection_getCallerClass---------------------
-// public static native Class<?> sun.reflect.Reflection.getCallerClass(int realFramesToSkip);
+// public static native Class<?> sun.reflect.Reflection.getCallerClass();
//
// In the presence of deep enough inlining, getCallerClass() becomes a no-op.
//
-// NOTE that this code must perform the same logic as
-// vframeStream::security_get_caller_frame in that it must skip
-// Method.invoke() and auxiliary frames.
+// NOTE: This code must perform the same logic as JVM_GetCallerClass
+// in that it must skip particular security frames and checks for
+// caller sensitive methods.
bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
#ifndef PRODUCT
if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
@@ -3886,35 +3885,6 @@ bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
}
#endif
- Node* caller_depth_node = argument(0);
-
- // The depth value must be a constant in order for the runtime call
- // to be eliminated.
- const TypeInt* caller_depth_type = _gvn.type(caller_depth_node)->isa_int();
- if (caller_depth_type == NULL || !caller_depth_type->is_con()) {
-#ifndef PRODUCT
- if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
- tty->print_cr(" Bailing out because caller depth was not a constant");
- }
-#endif
- return false;
- }
- // Note that the JVM state at this point does not include the
- // getCallerClass() frame which we are trying to inline. The
- // semantics of getCallerClass(), however, are that the "first"
- // frame is the getCallerClass() frame, so we subtract one from the
- // requested depth before continuing. We don't inline requests of
- // getCallerClass(0).
- int caller_depth = caller_depth_type->get_con() - 1;
- if (caller_depth < 0) {
-#ifndef PRODUCT
- if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
- tty->print_cr(" Bailing out because caller depth was %d", caller_depth);
- }
-#endif
- return false;
- }
-
if (!jvms()->has_method()) {
#ifndef PRODUCT
if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
@@ -3923,95 +3893,67 @@ bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
#endif
return false;
}
- int _depth = jvms()->depth(); // cache call chain depth
// Walk back up the JVM state to find the caller at the required
- // depth. NOTE that this code must perform the same logic as
- // vframeStream::security_get_caller_frame in that it must skip
- // Method.invoke() and auxiliary frames. Note also that depth is
- // 1-based (1 is the bottom of the inlining).
- int inlining_depth = _depth;
- JVMState* caller_jvms = NULL;
-
- if (inlining_depth > 0) {
- caller_jvms = jvms();
- assert(caller_jvms = jvms()->of_depth(inlining_depth), "inlining_depth == our depth");
- do {
- // The following if-tests should be performed in this order
- if (is_method_invoke_or_aux_frame(caller_jvms)) {
- // Skip a Method.invoke() or auxiliary frame
- } else if (caller_depth > 0) {
- // Skip real frame
- --caller_depth;
- } else {
- // We're done: reached desired caller after skipping.
- break;
+ // depth.
+ JVMState* caller_jvms = jvms();
+
+ // Cf. JVM_GetCallerClass
+ // NOTE: Start the loop at depth 1 because the current JVM state does
+ // not include the Reflection.getCallerClass() frame.
+ for (int n = 1; caller_jvms != NULL; caller_jvms = caller_jvms->caller(), n++) {
+ ciMethod* m = caller_jvms->method();
+ switch (n) {
+ case 0:
+ fatal("current JVM state does not include the Reflection.getCallerClass frame");
+ break;
+ case 1:
+ // Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass).
+ if (!m->caller_sensitive()) {
+#ifndef PRODUCT
+ if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+ tty->print_cr(" Bailing out: CallerSensitive annotation expected at frame %d", n);
+ }
+#endif
+ return false; // bail-out; let JVM_GetCallerClass do the work
}
- caller_jvms = caller_jvms->caller();
- --inlining_depth;
- } while (inlining_depth > 0);
- }
+ break;
+ default:
+ if (!m->is_ignored_by_security_stack_walk()) {
+ // We have reached the desired frame; return the holder class.
+ // Acquire method holder as java.lang.Class and push as constant.
+ ciInstanceKlass* caller_klass = caller_jvms->method()->holder();
+ ciInstance* caller_mirror = caller_klass->java_mirror();
+ set_result(makecon(TypeInstPtr::make(caller_mirror)));
- if (inlining_depth == 0) {
#ifndef PRODUCT
- if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
- tty->print_cr(" Bailing out because caller depth (%d) exceeded inlining depth (%d)", caller_depth_type->get_con(), _depth);
- tty->print_cr(" JVM state at this point:");
- for (int i = _depth; i >= 1; i--) {
- ciMethod* m = jvms()->of_depth(i)->method();
- tty->print_cr(" %d) %s.%s", i, m->holder()->name()->as_utf8(), m->name()->as_utf8());
+ if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+ tty->print_cr(" Succeeded: caller = %d) %s.%s, JVMS depth = %d", n, caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), jvms()->depth());
+ tty->print_cr(" JVM state at this point:");
+ for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
+ ciMethod* m = jvms()->of_depth(i)->method();
+ tty->print_cr(" %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
+ }
+ }
+#endif
+ return true;
}
+ break;
}
-#endif
- return false; // Reached end of inlining
}
- // Acquire method holder as java.lang.Class
- ciInstanceKlass* caller_klass = caller_jvms->method()->holder();
- ciInstance* caller_mirror = caller_klass->java_mirror();
-
- // Push this as a constant
- set_result(makecon(TypeInstPtr::make(caller_mirror)));
-
#ifndef PRODUCT
if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
- tty->print_cr(" Succeeded: caller = %s.%s, caller depth = %d, depth = %d", caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), caller_depth_type->get_con(), _depth);
+ tty->print_cr(" Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth());
tty->print_cr(" JVM state at this point:");
- for (int i = _depth; i >= 1; i--) {
+ for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
ciMethod* m = jvms()->of_depth(i)->method();
- tty->print_cr(" %d) %s.%s", i, m->holder()->name()->as_utf8(), m->name()->as_utf8());
+ tty->print_cr(" %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
}
}
#endif
- return true;
-}
-
-// Helper routine for above
-bool LibraryCallKit::is_method_invoke_or_aux_frame(JVMState* jvms) {
- ciMethod* method = jvms->method();
-
- // Is this the Method.invoke method itself?
- if (method->intrinsic_id() == vmIntrinsics::_invoke)
- return true;
-
- // Is this a helper, defined somewhere underneath MethodAccessorImpl.
- ciKlass* k = method->holder();
- if (k->is_instance_klass()) {
- ciInstanceKlass* ik = k->as_instance_klass();
- for (; ik != NULL; ik = ik->super()) {
- if (ik->name() == ciSymbol::sun_reflect_MethodAccessorImpl() &&
- ik == env()->find_system_klass(ik->name())) {
- return true;
- }
- }
- }
- else if (method->is_method_handle_intrinsic() ||
- method->is_compiled_lambda_form()) {
- // This is an internal adapter frame from the MethodHandleCompiler -- skip it
- return true;
- }
- return false;
+ return false; // bail-out; let JVM_GetCallerClass do the work
}
bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {
diff --git a/src/share/vm/opto/loopnode.cpp b/src/share/vm/opto/loopnode.cpp
index 6812c62fa..c323d02f8 100644
--- a/src/share/vm/opto/loopnode.cpp
+++ b/src/share/vm/opto/loopnode.cpp
@@ -2251,6 +2251,11 @@ void PhaseIdealLoop::build_and_optimize(bool do_split_ifs, bool skip_loop_opts)
return;
}
+ // clear out the dead code after build_loop_late
+ while (_deadlist.size()) {
+ _igvn.remove_globally_dead_node(_deadlist.pop());
+ }
+
if (stop_early) {
assert(do_expensive_nodes, "why are we here?");
if (process_expensive_nodes()) {
@@ -2260,9 +2265,7 @@ void PhaseIdealLoop::build_and_optimize(bool do_split_ifs, bool skip_loop_opts)
// nodes again.
C->set_major_progress();
}
-
_igvn.optimize();
-
return;
}
@@ -2273,11 +2276,6 @@ void PhaseIdealLoop::build_and_optimize(bool do_split_ifs, bool skip_loop_opts)
eliminate_useless_predicates();
}
- // clear out the dead code
- while(_deadlist.size()) {
- _igvn.remove_globally_dead_node(_deadlist.pop());
- }
-
#ifndef PRODUCT
C->verify_graph_edges();
if (_verify_me) { // Nested verify pass?
diff --git a/src/share/vm/opto/output.cpp b/src/share/vm/opto/output.cpp
index c77b9f60c..178f3b717 100644
--- a/src/share/vm/opto/output.cpp
+++ b/src/share/vm/opto/output.cpp
@@ -449,6 +449,17 @@ void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size
int max_loop_pad = nb->code_alignment()-relocInfo::addr_unit();
if (max_loop_pad > 0) {
assert(is_power_of_2(max_loop_pad+relocInfo::addr_unit()), "");
+ // Adjust last_call_adr and/or last_avoid_back_to_back_adr.
+ // If either is the last instruction in this block, bump by
+ // max_loop_pad in lock-step with blk_size, so sizing
+ // calculations in subsequent blocks still can conservatively
+ // detect that it may the last instruction in this block.
+ if (last_call_adr == blk_starts[i]+blk_size) {
+ last_call_adr += max_loop_pad;
+ }
+ if (last_avoid_back_to_back_adr == blk_starts[i]+blk_size) {
+ last_avoid_back_to_back_adr += max_loop_pad;
+ }
blk_size += max_loop_pad;
}
}
@@ -1193,8 +1204,6 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
int last_call_offset = -1;
int last_avoid_back_to_back_offset = -1;
#ifdef ASSERT
- int block_alignment_padding = 0;
-
uint* jmp_target = NEW_RESOURCE_ARRAY(uint,nblocks);
uint* jmp_offset = NEW_RESOURCE_ARRAY(uint,nblocks);
uint* jmp_size = NEW_RESOURCE_ARRAY(uint,nblocks);
@@ -1228,8 +1237,6 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
Node *delay_slot = NULL;
for (uint i=0; i < nblocks; i++) {
- guarantee(blk_starts[i] >= (uint)cb->insts_size(),"should not increase size");
-
Block *b = _cfg->_blocks[i];
Node *head = b->head();
@@ -1250,14 +1257,6 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
jmp_offset[i] = 0;
jmp_size[i] = 0;
jmp_rule[i] = 0;
-
- // Maximum alignment padding for loop block was used
- // during first round of branches shortening, as result
- // padding for nodes (sfpt after call) was not added.
- // Take this into account for block's size change check
- // and allow increase block's size by the difference
- // of maximum and actual alignment paddings.
- int orig_blk_size = blk_starts[i+1] - blk_starts[i] + block_alignment_padding;
#endif
int blk_offset = current_offset;
@@ -1557,8 +1556,6 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
}
} // End for all instructions in block
- assert((uint)blk_offset <= blk_starts[i], "shouldn't increase distance");
- blk_starts[i] = blk_offset;
// If the next block is the top of a loop, pad this block out to align
// the loop top a little. Helps prevent pipe stalls at loop back branches.
@@ -1572,16 +1569,13 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
nop->emit(*cb, _regalloc);
current_offset = cb->insts_size();
}
-#ifdef ASSERT
- int max_loop_pad = nb->code_alignment()-relocInfo::addr_unit();
- block_alignment_padding = (max_loop_pad - padding);
- assert(block_alignment_padding >= 0, "sanity");
-#endif
}
// Verify that the distance for generated before forward
// short branches is still valid.
- assert(orig_blk_size >= (current_offset - blk_offset), "shouldn't increase block size");
+ guarantee((int)(blk_starts[i+1] - blk_starts[i]) >= (current_offset - blk_offset), "shouldn't increase block size");
+ // Save new block start offset
+ blk_starts[i] = blk_offset;
} // End of for all blocks
blk_starts[nblocks] = current_offset;
diff --git a/src/share/vm/opto/parse2.cpp b/src/share/vm/opto/parse2.cpp
index 9a14c9a58..73be6aae5 100644
--- a/src/share/vm/opto/parse2.cpp
+++ b/src/share/vm/opto/parse2.cpp
@@ -104,7 +104,8 @@ Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) {
if (C->log() != NULL) C->log()->elem("observe that='!need_range_check'");
}
- if (!arytype->klass()->is_loaded()) {
+ ciKlass * arytype_klass = arytype->klass();
+ if ((arytype_klass != NULL) && (!arytype_klass->is_loaded())) {
// Only fails for some -Xcomp runs
// The class is unloaded. We have to run this bytecode in the interpreter.
uncommon_trap(Deoptimization::Reason_unloaded,
@@ -1385,6 +1386,7 @@ void Parse::do_one_bytecode() {
if (TraceOptoParse) {
tty->print(" @");
dump_bci(bci());
+ tty->cr();
}
#endif
diff --git a/src/share/vm/opto/phaseX.cpp b/src/share/vm/opto/phaseX.cpp
index 85714c4e3..a8c979662 100644
--- a/src/share/vm/opto/phaseX.cpp
+++ b/src/share/vm/opto/phaseX.cpp
@@ -1166,31 +1166,30 @@ void PhaseIterGVN::remove_globally_dead_node( Node *dead ) {
if (progress_state == PROCESS_INPUTS) {
// After following inputs, continue to outputs
_stack.set_index(PROCESS_OUTPUTS);
- // Remove from iterative worklist
- _worklist.remove(dead);
if (!dead->is_Con()) { // Don't kill cons but uses
bool recurse = false;
// Remove from hash table
_table.hash_delete( dead );
// Smash all inputs to 'dead', isolating him completely
- for( uint i = 0; i < dead->req(); i++ ) {
+ for (uint i = 0; i < dead->req(); i++) {
Node *in = dead->in(i);
- if( in ) { // Points to something?
- dead->set_req(i,NULL); // Kill the edge
- if (in->outcnt() == 0 && in != C->top()) {// Made input go dead?
+ if (in != NULL && in != C->top()) { // Points to something?
+ int nrep = dead->replace_edge(in, NULL); // Kill edges
+ assert((nrep > 0), "sanity");
+ if (in->outcnt() == 0) { // Made input go dead?
_stack.push(in, PROCESS_INPUTS); // Recursively remove
recurse = true;
} else if (in->outcnt() == 1 &&
in->has_special_unique_user()) {
_worklist.push(in->unique_out());
} else if (in->outcnt() <= 2 && dead->is_Phi()) {
- if( in->Opcode() == Op_Region )
+ if (in->Opcode() == Op_Region) {
_worklist.push(in);
- else if( in->is_Store() ) {
+ } else if (in->is_Store()) {
DUIterator_Fast imax, i = in->fast_outs(imax);
_worklist.push(in->fast_out(i));
i++;
- if(in->outcnt() == 2) {
+ if (in->outcnt() == 2) {
_worklist.push(in->fast_out(i));
i++;
}
@@ -1209,38 +1208,42 @@ void PhaseIterGVN::remove_globally_dead_node( Node *dead ) {
}
}
}
- }
- }
- C->record_dead_node(dead->_idx);
- if (dead->is_macro()) {
- C->remove_macro_node(dead);
- }
- if (dead->is_expensive()) {
- C->remove_expensive_node(dead);
- }
-
+ } // if (in != NULL && in != C->top())
+ } // for (uint i = 0; i < dead->req(); i++)
if (recurse) {
continue;
}
- }
- // Constant node that has no out-edges and has only one in-edge from
- // root is usually dead. However, sometimes reshaping walk makes
- // it reachable by adding use edges. So, we will NOT count Con nodes
- // as dead to be conservative about the dead node count at any
- // given time.
- }
+ } // if (!dead->is_Con())
+ } // if (progress_state == PROCESS_INPUTS)
// Aggressively kill globally dead uses
// (Rather than pushing all the outs at once, we push one at a time,
// plus the parent to resume later, because of the indefinite number
// of edge deletions per loop trip.)
if (dead->outcnt() > 0) {
- // Recursively remove
+ // Recursively remove output edges
_stack.push(dead->raw_out(0), PROCESS_INPUTS);
} else {
+ // Finished disconnecting all input and output edges.
_stack.pop();
+ // Remove dead node from iterative worklist
+ _worklist.remove(dead);
+ // Constant node that has no out-edges and has only one in-edge from
+ // root is usually dead. However, sometimes reshaping walk makes
+ // it reachable by adding use edges. So, we will NOT count Con nodes
+ // as dead to be conservative about the dead node count at any
+ // given time.
+ if (!dead->is_Con()) {
+ C->record_dead_node(dead->_idx);
+ }
+ if (dead->is_macro()) {
+ C->remove_macro_node(dead);
+ }
+ if (dead->is_expensive()) {
+ C->remove_expensive_node(dead);
+ }
}
- }
+ } // while (_stack.is_nonempty())
}
//------------------------------subsume_node-----------------------------------
diff --git a/src/share/vm/prims/jvm.cpp b/src/share/vm/prims/jvm.cpp
index 4e599d8dd..5c31ea1e5 100644
--- a/src/share/vm/prims/jvm.cpp
+++ b/src/share/vm/prims/jvm.cpp
@@ -30,6 +30,7 @@
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
+#include "interpreter/bytecode.hpp"
#include "memory/oopFactory.hpp"
#include "memory/universe.inline.hpp"
#include "oops/fieldStreams.hpp"
@@ -665,8 +666,51 @@ JVM_END
JVM_ENTRY(jclass, JVM_GetCallerClass(JNIEnv* env, int depth))
JVMWrapper("JVM_GetCallerClass");
- Klass* k = thread->security_get_caller_class(depth);
- return (k == NULL) ? NULL : (jclass) JNIHandles::make_local(env, k->java_mirror());
+
+ // Pre-JDK 8 and early builds of JDK 8 don't have a CallerSensitive annotation.
+ if (SystemDictionary::reflect_CallerSensitive_klass() == NULL) {
+ Klass* k = thread->security_get_caller_class(depth);
+ return (k == NULL) ? NULL : (jclass) JNIHandles::make_local(env, k->java_mirror());
+ } else {
+ // Basic handshaking with Java_sun_reflect_Reflection_getCallerClass
+ assert(depth == -1, "wrong handshake depth");
+ }
+
+ // Getting the class of the caller frame.
+ //
+ // The call stack at this point looks something like this:
+ //
+ // [0] [ @CallerSensitive public sun.reflect.Reflection.getCallerClass ]
+ // [1] [ @CallerSensitive API.method ]
+ // [.] [ (skipped intermediate frames) ]
+ // [n] [ caller ]
+ vframeStream vfst(thread);
+ // Cf. LibraryCallKit::inline_native_Reflection_getCallerClass
+ for (int n = 0; !vfst.at_end(); vfst.security_next(), n++) {
+ Method* m = vfst.method();
+ assert(m != NULL, "sanity");
+ switch (n) {
+ case 0:
+ // This must only be called from Reflection.getCallerClass
+ if (m->intrinsic_id() != vmIntrinsics::_getCallerClass) {
+ THROW_MSG_NULL(vmSymbols::java_lang_InternalError(), "JVM_GetCallerClass must only be called from Reflection.getCallerClass");
+ }
+ // fall-through
+ case 1:
+ // Frame 0 and 1 must be caller sensitive.
+ if (!m->caller_sensitive()) {
+ THROW_MSG_NULL(vmSymbols::java_lang_InternalError(), err_msg("CallerSensitive annotation expected at frame %d", n));
+ }
+ break;
+ default:
+ if (!m->is_ignored_by_security_stack_walk()) {
+ // We have reached the desired frame; return the holder class.
+ return (jclass) JNIHandles::make_local(env, m->method_holder()->java_mirror());
+ }
+ break;
+ }
+ }
+ return NULL;
JVM_END
@@ -3208,11 +3252,24 @@ JVM_ENTRY(jobjectArray, JVM_GetClassContext(JNIEnv *env))
KlassLink* first = NULL;
KlassLink* last = NULL;
int depth = 0;
+ vframeStream vfst(thread);
- for(vframeStream vfst(thread); !vfst.at_end(); vfst.security_get_caller_frame(1)) {
+ if (SystemDictionary::reflect_CallerSensitive_klass() != NULL) {
+ // This must only be called from SecurityManager.getClassContext
+ Method* m = vfst.method();
+ if (!(m->method_holder() == SystemDictionary::SecurityManager_klass() &&
+ m->name() == vmSymbols::getClassContext_name() &&
+ m->signature() == vmSymbols::void_class_array_signature())) {
+ THROW_MSG_NULL(vmSymbols::java_lang_InternalError(), "JVM_GetClassContext must only be called from SecurityManager.getClassContext");
+ }
+ }
+
+ // Collect method holders
+ for (; !vfst.at_end(); vfst.security_next()) {
+ Method* m = vfst.method();
// Native frames are not returned
- if (!vfst.method()->is_native()) {
- Klass* holder = vfst.method()->method_holder();
+ if (!m->is_ignored_by_security_stack_walk() && !m->is_native()) {
+ Klass* holder = m->method_holder();
assert(holder->is_klass(), "just checking");
depth++;
KlassLink* l = new KlassLink(KlassHandle(thread, holder));
diff --git a/src/share/vm/prims/jvmtiEventController.cpp b/src/share/vm/prims/jvmtiEventController.cpp
index 6b7b72b7a..cdec7d71f 100644
--- a/src/share/vm/prims/jvmtiEventController.cpp
+++ b/src/share/vm/prims/jvmtiEventController.cpp
@@ -39,7 +39,12 @@
#include "runtime/vm_operations.hpp"
#ifdef JVMTI_TRACE
-#define EC_TRACE(out) if (JvmtiTrace::trace_event_controller()) { SafeResourceMark rm; tty->print_cr out; } while (0)
+#define EC_TRACE(out) do { \
+ if (JvmtiTrace::trace_event_controller()) { \
+ SafeResourceMark rm; \
+ tty->print_cr out; \
+ } \
+} while (0)
#else
#define EC_TRACE(out)
#endif /*JVMTI_TRACE */
diff --git a/src/share/vm/prims/jvmtiRedefineClassesTrace.hpp b/src/share/vm/prims/jvmtiRedefineClassesTrace.hpp
index 878d300f5..43174e494 100644
--- a/src/share/vm/prims/jvmtiRedefineClassesTrace.hpp
+++ b/src/share/vm/prims/jvmtiRedefineClassesTrace.hpp
@@ -72,36 +72,6 @@
// 0x20000000 | 536870912 - unused
// 0x40000000 | 1073741824 - unused
// 0x80000000 | 2147483648 - unused
-//
-// Note: The ResourceMark is to cleanup resource allocated args.
-// The "while (0)" is so we can use semi-colon at end of RC_TRACE().
-#define RC_TRACE(level, args) \
- if ((TraceRedefineClasses & level) != 0) { \
- ResourceMark rm; \
- tty->print("RedefineClasses-0x%x: ", level); \
- tty->print_cr args; \
- } while (0)
-
-#define RC_TRACE_NO_CR(level, args) \
- if ((TraceRedefineClasses & level) != 0) { \
- ResourceMark rm; \
- tty->print("RedefineClasses-0x%x: ", level); \
- tty->print args; \
- } while (0)
-
-#define RC_TRACE_WITH_THREAD(level, thread, args) \
- if ((TraceRedefineClasses & level) != 0) { \
- ResourceMark rm(thread); \
- tty->print("RedefineClasses-0x%x: ", level); \
- tty->print_cr args; \
- } while (0)
-
-#define RC_TRACE_MESG(args) \
- { \
- ResourceMark rm; \
- tty->print("RedefineClasses: "); \
- tty->print_cr args; \
- } while (0)
// Macro for checking if TraceRedefineClasses has a specific bit
// enabled. Returns true if the bit specified by level is set.
@@ -120,16 +90,49 @@
#define RC_TRACE_IN_RANGE(low, high) \
(((TraceRedefineClasses & ((high << 1) - 1)) & ~(low - 1)) != 0)
-// Timer support macros. Only do timer operations if timer tracing
-// is enabled. The "while (0)" is so we can use semi-colon at end of
-// the macro.
-#define RC_TIMER_START(t) \
+// Note: The ResourceMark is to cleanup resource allocated args.
+// The "do {...} while (0)" is so we can use semi-colon at end of RC_TRACE().
+#define RC_TRACE(level, args) do { \
+ if (RC_TRACE_ENABLED(level)) { \
+ ResourceMark rm; \
+ tty->print("RedefineClasses-0x%x: ", level); \
+ tty->print_cr args; \
+ } \
+} while (0)
+
+#define RC_TRACE_NO_CR(level, args) do { \
+ if (RC_TRACE_ENABLED(level)) { \
+ ResourceMark rm; \
+ tty->print("RedefineClasses-0x%x: ", level); \
+ tty->print args; \
+ } \
+} while (0)
+
+#define RC_TRACE_WITH_THREAD(level, thread, args) do { \
+ if (RC_TRACE_ENABLED(level)) { \
+ ResourceMark rm(thread); \
+ tty->print("RedefineClasses-0x%x: ", level); \
+ tty->print_cr args; \
+ } \
+} while (0)
+
+#define RC_TRACE_MESG(args) do { \
+ ResourceMark rm; \
+ tty->print("RedefineClasses: "); \
+ tty->print_cr args; \
+} while (0)
+
+// Timer support macros. Only do timer operations if timer tracing is enabled.
+// The "do {...} while (0)" is so we can use semi-colon at end of the macro.
+#define RC_TIMER_START(t) do { \
if (RC_TRACE_ENABLED(0x00000004)) { \
t.start(); \
- } while (0)
-#define RC_TIMER_STOP(t) \
+ } \
+} while (0)
+#define RC_TIMER_STOP(t) do { \
if (RC_TRACE_ENABLED(0x00000004)) { \
t.stop(); \
- } while (0)
+ } \
+} while (0)
#endif // SHARE_VM_PRIMS_JVMTIREDEFINECLASSESTRACE_HPP
diff --git a/src/share/vm/prims/methodHandles.cpp b/src/share/vm/prims/methodHandles.cpp
index c3b587967..54881388b 100644
--- a/src/share/vm/prims/methodHandles.cpp
+++ b/src/share/vm/prims/methodHandles.cpp
@@ -105,14 +105,15 @@ void MethodHandles::set_enabled(bool z) {
// import java_lang_invoke_MemberName.*
enum {
- IS_METHOD = java_lang_invoke_MemberName::MN_IS_METHOD,
- IS_CONSTRUCTOR = java_lang_invoke_MemberName::MN_IS_CONSTRUCTOR,
- IS_FIELD = java_lang_invoke_MemberName::MN_IS_FIELD,
- IS_TYPE = java_lang_invoke_MemberName::MN_IS_TYPE,
+ IS_METHOD = java_lang_invoke_MemberName::MN_IS_METHOD,
+ IS_CONSTRUCTOR = java_lang_invoke_MemberName::MN_IS_CONSTRUCTOR,
+ IS_FIELD = java_lang_invoke_MemberName::MN_IS_FIELD,
+ IS_TYPE = java_lang_invoke_MemberName::MN_IS_TYPE,
+ CALLER_SENSITIVE = java_lang_invoke_MemberName::MN_CALLER_SENSITIVE,
REFERENCE_KIND_SHIFT = java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT,
REFERENCE_KIND_MASK = java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK,
- SEARCH_SUPERCLASSES = java_lang_invoke_MemberName::MN_SEARCH_SUPERCLASSES,
- SEARCH_INTERFACES = java_lang_invoke_MemberName::MN_SEARCH_INTERFACES,
+ SEARCH_SUPERCLASSES = java_lang_invoke_MemberName::MN_SEARCH_SUPERCLASSES,
+ SEARCH_INTERFACES = java_lang_invoke_MemberName::MN_SEARCH_INTERFACES,
ALL_KINDS = IS_METHOD | IS_CONSTRUCTOR | IS_FIELD | IS_TYPE
};
@@ -207,10 +208,15 @@ oop MethodHandles::init_method_MemberName(oop mname_oop, Method* m, bool do_disp
vmindex = m->vtable_index();
}
- java_lang_invoke_MemberName::set_flags(mname_oop, flags);
+ // @CallerSensitive annotation detected
+ if (m->caller_sensitive()) {
+ flags |= CALLER_SENSITIVE;
+ }
+
+ java_lang_invoke_MemberName::set_flags( mname_oop, flags);
java_lang_invoke_MemberName::set_vmtarget(mname_oop, m);
- java_lang_invoke_MemberName::set_vmindex(mname_oop, vmindex); // vtable/itable index
- java_lang_invoke_MemberName::set_clazz(mname_oop, receiver_limit->java_mirror());
+ java_lang_invoke_MemberName::set_vmindex( mname_oop, vmindex); // vtable/itable index
+ java_lang_invoke_MemberName::set_clazz( mname_oop, receiver_limit->java_mirror());
// Note: name and type can be lazily computed by resolve_MemberName,
// if Java code needs them as resolved String and MethodType objects.
// The clazz must be eagerly stored, because it provides a GC
@@ -940,6 +946,7 @@ JVM_END
template(java_lang_invoke_MemberName,MN_IS_CONSTRUCTOR) \
template(java_lang_invoke_MemberName,MN_IS_FIELD) \
template(java_lang_invoke_MemberName,MN_IS_TYPE) \
+ template(java_lang_invoke_MemberName,MN_CALLER_SENSITIVE) \
template(java_lang_invoke_MemberName,MN_SEARCH_SUPERCLASSES) \
template(java_lang_invoke_MemberName,MN_SEARCH_INTERFACES) \
template(java_lang_invoke_MemberName,MN_REFERENCE_KIND_SHIFT) \
diff --git a/src/share/vm/prims/unsafe.cpp b/src/share/vm/prims/unsafe.cpp
index 18252c154..243570b18 100644
--- a/src/share/vm/prims/unsafe.cpp
+++ b/src/share/vm/prims/unsafe.cpp
@@ -868,7 +868,7 @@ static inline void throw_new(JNIEnv *env, const char *ename) {
env->ThrowNew(cls, msg);
}
-static jclass Unsafe_DefineClass(JNIEnv *env, jstring name, jbyteArray data, int offset, int length, jobject loader, jobject pd) {
+static jclass Unsafe_DefineClass_impl(JNIEnv *env, jstring name, jbyteArray data, int offset, int length, jobject loader, jobject pd) {
{
// Code lifted from JDK 1.3 ClassLoader.c
@@ -939,30 +939,30 @@ static jclass Unsafe_DefineClass(JNIEnv *env, jstring name, jbyteArray data, int
}
-UNSAFE_ENTRY(jclass, Unsafe_DefineClass0(JNIEnv *env, jobject unsafe, jstring name, jbyteArray data, int offset, int length))
+UNSAFE_ENTRY(jclass, Unsafe_DefineClass(JNIEnv *env, jobject unsafe, jstring name, jbyteArray data, int offset, int length, jobject loader, jobject pd))
UnsafeWrapper("Unsafe_DefineClass");
{
ThreadToNativeFromVM ttnfv(thread);
-
- int depthFromDefineClass0 = 1;
- jclass caller = JVM_GetCallerClass(env, depthFromDefineClass0);
- jobject loader = (caller == NULL) ? NULL : JVM_GetClassLoader(env, caller);
- jobject pd = (caller == NULL) ? NULL : JVM_GetProtectionDomain(env, caller);
-
- return Unsafe_DefineClass(env, name, data, offset, length, loader, pd);
+ return Unsafe_DefineClass_impl(env, name, data, offset, length, loader, pd);
}
UNSAFE_END
-UNSAFE_ENTRY(jclass, Unsafe_DefineClass1(JNIEnv *env, jobject unsafe, jstring name, jbyteArray data, int offset, int length, jobject loader, jobject pd))
+UNSAFE_ENTRY(jclass, Unsafe_DefineClass0(JNIEnv *env, jobject unsafe, jstring name, jbyteArray data, int offset, int length))
UnsafeWrapper("Unsafe_DefineClass");
{
ThreadToNativeFromVM ttnfv(thread);
- return Unsafe_DefineClass(env, name, data, offset, length, loader, pd);
+ int depthFromDefineClass0 = 1;
+ jclass caller = JVM_GetCallerClass(env, depthFromDefineClass0);
+ jobject loader = (caller == NULL) ? NULL : JVM_GetClassLoader(env, caller);
+ jobject pd = (caller == NULL) ? NULL : JVM_GetProtectionDomain(env, caller);
+
+ return Unsafe_DefineClass_impl(env, name, data, offset, length, loader, pd);
}
UNSAFE_END
+
#define DAC_Args CLS"[B["OBJ
// define a class but do not make it known to the class loader or system dictionary
// - host_class: supplies context for linkage, access control, protection domain, and class loader
@@ -1323,7 +1323,7 @@ UNSAFE_END
#define THR LANG"Throwable;"
#define DC0_Args LANG"String;[BII"
-#define DC1_Args DC0_Args LANG"ClassLoader;" "Ljava/security/ProtectionDomain;"
+#define DC_Args DC0_Args LANG"ClassLoader;" "Ljava/security/ProtectionDomain;"
#define CC (char*) /*cast a literal from (const char*)*/
#define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f)
@@ -1352,10 +1352,8 @@ UNSAFE_END
-// %%% These are temporarily supported until the SDK sources
-// contain the necessarily updated Unsafe.java.
+// These are the methods for 1.4.0
static JNINativeMethod methods_140[] = {
-
{CC"getObject", CC"("OBJ"I)"OBJ"", FN_PTR(Unsafe_GetObject140)},
{CC"putObject", CC"("OBJ"I"OBJ")V", FN_PTR(Unsafe_SetObject140)},
@@ -1381,12 +1379,10 @@ static JNINativeMethod methods_140[] = {
{CC"allocateMemory", CC"(J)"ADR, FN_PTR(Unsafe_AllocateMemory)},
{CC"reallocateMemory", CC"("ADR"J)"ADR, FN_PTR(Unsafe_ReallocateMemory)},
-// {CC"setMemory", CC"("ADR"JB)V", FN_PTR(Unsafe_SetMemory)},
-// {CC"copyMemory", CC"("ADR ADR"J)V", FN_PTR(Unsafe_CopyMemory)},
{CC"freeMemory", CC"("ADR")V", FN_PTR(Unsafe_FreeMemory)},
- {CC"fieldOffset", CC"("FLD")I", FN_PTR(Unsafe_FieldOffset)}, //deprecated
- {CC"staticFieldBase", CC"("CLS")"OBJ, FN_PTR(Unsafe_StaticFieldBaseFromClass)}, //deprecated
+ {CC"fieldOffset", CC"("FLD")I", FN_PTR(Unsafe_FieldOffset)},
+ {CC"staticFieldBase", CC"("CLS")"OBJ, FN_PTR(Unsafe_StaticFieldBaseFromClass)},
{CC"ensureClassInitialized",CC"("CLS")V", FN_PTR(Unsafe_EnsureClassInitialized)},
{CC"arrayBaseOffset", CC"("CLS")I", FN_PTR(Unsafe_ArrayBaseOffset)},
{CC"arrayIndexScale", CC"("CLS")I", FN_PTR(Unsafe_ArrayIndexScale)},
@@ -1394,16 +1390,15 @@ static JNINativeMethod methods_140[] = {
{CC"pageSize", CC"()I", FN_PTR(Unsafe_PageSize)},
{CC"defineClass", CC"("DC0_Args")"CLS, FN_PTR(Unsafe_DefineClass0)},
- {CC"defineClass", CC"("DC1_Args")"CLS, FN_PTR(Unsafe_DefineClass1)},
+ {CC"defineClass", CC"("DC_Args")"CLS, FN_PTR(Unsafe_DefineClass)},
{CC"allocateInstance", CC"("CLS")"OBJ, FN_PTR(Unsafe_AllocateInstance)},
{CC"monitorEnter", CC"("OBJ")V", FN_PTR(Unsafe_MonitorEnter)},
{CC"monitorExit", CC"("OBJ")V", FN_PTR(Unsafe_MonitorExit)},
{CC"throwException", CC"("THR")V", FN_PTR(Unsafe_ThrowException)}
};
-// These are the old methods prior to the JSR 166 changes in 1.5.0
+// These are the methods prior to the JSR 166 changes in 1.5.0
static JNINativeMethod methods_141[] = {
-
{CC"getObject", CC"("OBJ"J)"OBJ"", FN_PTR(Unsafe_GetObject)},
{CC"putObject", CC"("OBJ"J"OBJ")V", FN_PTR(Unsafe_SetObject)},
@@ -1429,8 +1424,6 @@ static JNINativeMethod methods_141[] = {
{CC"allocateMemory", CC"(J)"ADR, FN_PTR(Unsafe_AllocateMemory)},
{CC"reallocateMemory", CC"("ADR"J)"ADR, FN_PTR(Unsafe_ReallocateMemory)},
-// {CC"setMemory", CC"("ADR"JB)V", FN_PTR(Unsafe_SetMemory)},
-// {CC"copyMemory", CC"("ADR ADR"J)V", FN_PTR(Unsafe_CopyMemory)},
{CC"freeMemory", CC"("ADR")V", FN_PTR(Unsafe_FreeMemory)},
{CC"objectFieldOffset", CC"("FLD")J", FN_PTR(Unsafe_ObjectFieldOffset)},
@@ -1443,7 +1436,7 @@ static JNINativeMethod methods_141[] = {
{CC"pageSize", CC"()I", FN_PTR(Unsafe_PageSize)},
{CC"defineClass", CC"("DC0_Args")"CLS, FN_PTR(Unsafe_DefineClass0)},
- {CC"defineClass", CC"("DC1_Args")"CLS, FN_PTR(Unsafe_DefineClass1)},
+ {CC"defineClass", CC"("DC_Args")"CLS, FN_PTR(Unsafe_DefineClass)},
{CC"allocateInstance", CC"("CLS")"OBJ, FN_PTR(Unsafe_AllocateInstance)},
{CC"monitorEnter", CC"("OBJ")V", FN_PTR(Unsafe_MonitorEnter)},
{CC"monitorExit", CC"("OBJ")V", FN_PTR(Unsafe_MonitorExit)},
@@ -1451,9 +1444,8 @@ static JNINativeMethod methods_141[] = {
};
-// These are the old methods prior to the JSR 166 changes in 1.6.0
+// These are the methods prior to the JSR 166 changes in 1.6.0
static JNINativeMethod methods_15[] = {
-
{CC"getObject", CC"("OBJ"J)"OBJ"", FN_PTR(Unsafe_GetObject)},
{CC"putObject", CC"("OBJ"J"OBJ")V", FN_PTR(Unsafe_SetObject)},
{CC"getObjectVolatile",CC"("OBJ"J)"OBJ"", FN_PTR(Unsafe_GetObjectVolatile)},
@@ -1482,8 +1474,6 @@ static JNINativeMethod methods_15[] = {
{CC"allocateMemory", CC"(J)"ADR, FN_PTR(Unsafe_AllocateMemory)},
{CC"reallocateMemory", CC"("ADR"J)"ADR, FN_PTR(Unsafe_ReallocateMemory)},
-// {CC"setMemory", CC"("ADR"JB)V", FN_PTR(Unsafe_SetMemory)},
-// {CC"copyMemory", CC"("ADR ADR"J)V", FN_PTR(Unsafe_CopyMemory)},
{CC"freeMemory", CC"("ADR")V", FN_PTR(Unsafe_FreeMemory)},
{CC"objectFieldOffset", CC"("FLD")J", FN_PTR(Unsafe_ObjectFieldOffset)},
@@ -1496,7 +1486,7 @@ static JNINativeMethod methods_15[] = {
{CC"pageSize", CC"()I", FN_PTR(Unsafe_PageSize)},
{CC"defineClass", CC"("DC0_Args")"CLS, FN_PTR(Unsafe_DefineClass0)},
- {CC"defineClass", CC"("DC1_Args")"CLS, FN_PTR(Unsafe_DefineClass1)},
+ {CC"defineClass", CC"("DC_Args")"CLS, FN_PTR(Unsafe_DefineClass)},
{CC"allocateInstance", CC"("CLS")"OBJ, FN_PTR(Unsafe_AllocateInstance)},
{CC"monitorEnter", CC"("OBJ")V", FN_PTR(Unsafe_MonitorEnter)},
{CC"monitorExit", CC"("OBJ")V", FN_PTR(Unsafe_MonitorExit)},
@@ -1509,15 +1499,13 @@ static JNINativeMethod methods_15[] = {
};
-// These are the correct methods, moving forward:
-static JNINativeMethod methods[] = {
-
+// These are the methods for 1.6.0 and 1.7.0
+static JNINativeMethod methods_16[] = {
{CC"getObject", CC"("OBJ"J)"OBJ"", FN_PTR(Unsafe_GetObject)},
{CC"putObject", CC"("OBJ"J"OBJ")V", FN_PTR(Unsafe_SetObject)},
{CC"getObjectVolatile",CC"("OBJ"J)"OBJ"", FN_PTR(Unsafe_GetObjectVolatile)},
{CC"putObjectVolatile",CC"("OBJ"J"OBJ")V", FN_PTR(Unsafe_SetObjectVolatile)},
-
DECLARE_GETSETOOP(Boolean, Z),
DECLARE_GETSETOOP(Byte, B),
DECLARE_GETSETOOP(Short, S),
@@ -1540,8 +1528,6 @@ static JNINativeMethod methods[] = {
{CC"allocateMemory", CC"(J)"ADR, FN_PTR(Unsafe_AllocateMemory)},
{CC"reallocateMemory", CC"("ADR"J)"ADR, FN_PTR(Unsafe_ReallocateMemory)},
-// {CC"setMemory", CC"("ADR"JB)V", FN_PTR(Unsafe_SetMemory)},
-// {CC"copyMemory", CC"("ADR ADR"J)V", FN_PTR(Unsafe_CopyMemory)},
{CC"freeMemory", CC"("ADR")V", FN_PTR(Unsafe_FreeMemory)},
{CC"objectFieldOffset", CC"("FLD")J", FN_PTR(Unsafe_ObjectFieldOffset)},
@@ -1554,7 +1540,7 @@ static JNINativeMethod methods[] = {
{CC"pageSize", CC"()I", FN_PTR(Unsafe_PageSize)},
{CC"defineClass", CC"("DC0_Args")"CLS, FN_PTR(Unsafe_DefineClass0)},
- {CC"defineClass", CC"("DC1_Args")"CLS, FN_PTR(Unsafe_DefineClass1)},
+ {CC"defineClass", CC"("DC_Args")"CLS, FN_PTR(Unsafe_DefineClass)},
{CC"allocateInstance", CC"("CLS")"OBJ, FN_PTR(Unsafe_AllocateInstance)},
{CC"monitorEnter", CC"("OBJ")V", FN_PTR(Unsafe_MonitorEnter)},
{CC"monitorExit", CC"("OBJ")V", FN_PTR(Unsafe_MonitorExit)},
@@ -1566,23 +1552,68 @@ static JNINativeMethod methods[] = {
{CC"putOrderedObject", CC"("OBJ"J"OBJ")V", FN_PTR(Unsafe_SetOrderedObject)},
{CC"putOrderedInt", CC"("OBJ"JI)V", FN_PTR(Unsafe_SetOrderedInt)},
{CC"putOrderedLong", CC"("OBJ"JJ)V", FN_PTR(Unsafe_SetOrderedLong)},
- {CC"loadFence", CC"()V", FN_PTR(Unsafe_LoadFence)},
- {CC"storeFence", CC"()V", FN_PTR(Unsafe_StoreFence)},
- {CC"fullFence", CC"()V", FN_PTR(Unsafe_FullFence)},
{CC"park", CC"(ZJ)V", FN_PTR(Unsafe_Park)},
{CC"unpark", CC"("OBJ")V", FN_PTR(Unsafe_Unpark)}
+};
-// {CC"getLoadAverage", CC"([DI)I", FN_PTR(Unsafe_Loadavg)},
+// These are the methods for 1.8.0
+static JNINativeMethod methods_18[] = {
+ {CC"getObject", CC"("OBJ"J)"OBJ"", FN_PTR(Unsafe_GetObject)},
+ {CC"putObject", CC"("OBJ"J"OBJ")V", FN_PTR(Unsafe_SetObject)},
+ {CC"getObjectVolatile",CC"("OBJ"J)"OBJ"", FN_PTR(Unsafe_GetObjectVolatile)},
+ {CC"putObjectVolatile",CC"("OBJ"J"OBJ")V", FN_PTR(Unsafe_SetObjectVolatile)},
-// {CC"prefetchRead", CC"("OBJ"J)V", FN_PTR(Unsafe_PrefetchRead)},
-// {CC"prefetchWrite", CC"("OBJ"J)V", FN_PTR(Unsafe_PrefetchWrite)}
-// {CC"prefetchReadStatic", CC"("OBJ"J)V", FN_PTR(Unsafe_PrefetchRead)},
-// {CC"prefetchWriteStatic",CC"("OBJ"J)V", FN_PTR(Unsafe_PrefetchWrite)}
+ DECLARE_GETSETOOP(Boolean, Z),
+ DECLARE_GETSETOOP(Byte, B),
+ DECLARE_GETSETOOP(Short, S),
+ DECLARE_GETSETOOP(Char, C),
+ DECLARE_GETSETOOP(Int, I),
+ DECLARE_GETSETOOP(Long, J),
+ DECLARE_GETSETOOP(Float, F),
+ DECLARE_GETSETOOP(Double, D),
+
+ DECLARE_GETSETNATIVE(Byte, B),
+ DECLARE_GETSETNATIVE(Short, S),
+ DECLARE_GETSETNATIVE(Char, C),
+ DECLARE_GETSETNATIVE(Int, I),
+ DECLARE_GETSETNATIVE(Long, J),
+ DECLARE_GETSETNATIVE(Float, F),
+ DECLARE_GETSETNATIVE(Double, D),
+
+ {CC"getAddress", CC"("ADR")"ADR, FN_PTR(Unsafe_GetNativeAddress)},
+ {CC"putAddress", CC"("ADR""ADR")V", FN_PTR(Unsafe_SetNativeAddress)},
+
+ {CC"allocateMemory", CC"(J)"ADR, FN_PTR(Unsafe_AllocateMemory)},
+ {CC"reallocateMemory", CC"("ADR"J)"ADR, FN_PTR(Unsafe_ReallocateMemory)},
+ {CC"freeMemory", CC"("ADR")V", FN_PTR(Unsafe_FreeMemory)},
+
+ {CC"objectFieldOffset", CC"("FLD")J", FN_PTR(Unsafe_ObjectFieldOffset)},
+ {CC"staticFieldOffset", CC"("FLD")J", FN_PTR(Unsafe_StaticFieldOffset)},
+ {CC"staticFieldBase", CC"("FLD")"OBJ, FN_PTR(Unsafe_StaticFieldBaseFromField)},
+ {CC"ensureClassInitialized",CC"("CLS")V", FN_PTR(Unsafe_EnsureClassInitialized)},
+ {CC"arrayBaseOffset", CC"("CLS")I", FN_PTR(Unsafe_ArrayBaseOffset)},
+ {CC"arrayIndexScale", CC"("CLS")I", FN_PTR(Unsafe_ArrayIndexScale)},
+ {CC"addressSize", CC"()I", FN_PTR(Unsafe_AddressSize)},
+ {CC"pageSize", CC"()I", FN_PTR(Unsafe_PageSize)},
+ {CC"defineClass", CC"("DC_Args")"CLS, FN_PTR(Unsafe_DefineClass)},
+ {CC"allocateInstance", CC"("CLS")"OBJ, FN_PTR(Unsafe_AllocateInstance)},
+ {CC"monitorEnter", CC"("OBJ")V", FN_PTR(Unsafe_MonitorEnter)},
+ {CC"monitorExit", CC"("OBJ")V", FN_PTR(Unsafe_MonitorExit)},
+ {CC"tryMonitorEnter", CC"("OBJ")Z", FN_PTR(Unsafe_TryMonitorEnter)},
+ {CC"throwException", CC"("THR")V", FN_PTR(Unsafe_ThrowException)},
+ {CC"compareAndSwapObject", CC"("OBJ"J"OBJ""OBJ")Z", FN_PTR(Unsafe_CompareAndSwapObject)},
+ {CC"compareAndSwapInt", CC"("OBJ"J""I""I"")Z", FN_PTR(Unsafe_CompareAndSwapInt)},
+ {CC"compareAndSwapLong", CC"("OBJ"J""J""J"")Z", FN_PTR(Unsafe_CompareAndSwapLong)},
+ {CC"putOrderedObject", CC"("OBJ"J"OBJ")V", FN_PTR(Unsafe_SetOrderedObject)},
+ {CC"putOrderedInt", CC"("OBJ"JI)V", FN_PTR(Unsafe_SetOrderedInt)},
+ {CC"putOrderedLong", CC"("OBJ"JJ)V", FN_PTR(Unsafe_SetOrderedLong)},
+ {CC"park", CC"(ZJ)V", FN_PTR(Unsafe_Park)},
+ {CC"unpark", CC"("OBJ")V", FN_PTR(Unsafe_Unpark)}
};
JNINativeMethod loadavg_method[] = {
- {CC"getLoadAverage", CC"([DI)I", FN_PTR(Unsafe_Loadavg)}
+ {CC"getLoadAverage", CC"([DI)I", FN_PTR(Unsafe_Loadavg)}
};
JNINativeMethod prefetch_methods[] = {
@@ -1592,7 +1623,7 @@ JNINativeMethod prefetch_methods[] = {
{CC"prefetchWriteStatic",CC"("OBJ"J)V", FN_PTR(Unsafe_PrefetchWrite)}
};
-JNINativeMethod memcopy_methods[] = {
+JNINativeMethod memcopy_methods_17[] = {
{CC"copyMemory", CC"("OBJ"J"OBJ"JJ)V", FN_PTR(Unsafe_CopyMemory2)},
{CC"setMemory", CC"("OBJ"JJB)V", FN_PTR(Unsafe_SetMemory2)}
};
@@ -1610,6 +1641,12 @@ JNINativeMethod lform_methods[] = {
{CC"shouldBeInitialized",CC"("CLS")Z", FN_PTR(Unsafe_ShouldBeInitialized)},
};
+JNINativeMethod fence_methods[] = {
+ {CC"loadFence", CC"()V", FN_PTR(Unsafe_LoadFence)},
+ {CC"storeFence", CC"()V", FN_PTR(Unsafe_StoreFence)},
+ {CC"fullFence", CC"()V", FN_PTR(Unsafe_FullFence)},
+};
+
#undef CC
#undef FN_PTR
@@ -1622,12 +1659,32 @@ JNINativeMethod lform_methods[] = {
#undef MTH
#undef THR
#undef DC0_Args
-#undef DC1_Args
+#undef DC_Args
#undef DECLARE_GETSETOOP
#undef DECLARE_GETSETNATIVE
+/**
+ * Helper method to register native methods.
+ */
+static bool register_natives(const char* message, JNIEnv* env, jclass clazz, const JNINativeMethod* methods, jint nMethods) {
+ int status = env->RegisterNatives(clazz, methods, nMethods);
+ if (status < 0 || env->ExceptionOccurred()) {
+ if (PrintMiscellaneous && (Verbose || WizardMode)) {
+ tty->print_cr("Unsafe: failed registering %s", message);
+ }
+ env->ExceptionClear();
+ return false;
+ } else {
+ if (PrintMiscellaneous && (Verbose || WizardMode)) {
+ tty->print_cr("Unsafe: successfully registered %s", message);
+ }
+ return true;
+ }
+}
+
+
// This one function is exported, used by NativeLookup.
// The Unsafe_xxx functions above are called only from the interpreter.
// The optimizer looks at names and signatures to recognize
@@ -1637,83 +1694,57 @@ JVM_ENTRY(void, JVM_RegisterUnsafeMethods(JNIEnv *env, jclass unsafecls))
UnsafeWrapper("JVM_RegisterUnsafeMethods");
{
ThreadToNativeFromVM ttnfv(thread);
+
+ // Unsafe methods
{
- env->RegisterNatives(unsafecls, loadavg_method, sizeof(loadavg_method)/sizeof(JNINativeMethod));
- if (env->ExceptionOccurred()) {
- if (PrintMiscellaneous && (Verbose || WizardMode)) {
- tty->print_cr("Warning: SDK 1.6 Unsafe.loadavg not found.");
- }
- env->ExceptionClear();
+ bool success = false;
+ // We need to register the 1.6 methods first because the 1.8 methods would register fine on 1.7 and 1.6
+ if (!success) {
+ success = register_natives("1.6 methods", env, unsafecls, methods_16, sizeof(methods_16)/sizeof(JNINativeMethod));
}
- }
- {
- env->RegisterNatives(unsafecls, prefetch_methods, sizeof(prefetch_methods)/sizeof(JNINativeMethod));
- if (env->ExceptionOccurred()) {
- if (PrintMiscellaneous && (Verbose || WizardMode)) {
- tty->print_cr("Warning: SDK 1.6 Unsafe.prefetchRead/Write not found.");
- }
- env->ExceptionClear();
+ if (!success) {
+ success = register_natives("1.8 methods", env, unsafecls, methods_18, sizeof(methods_18)/sizeof(JNINativeMethod));
}
- }
- {
- env->RegisterNatives(unsafecls, memcopy_methods, sizeof(memcopy_methods)/sizeof(JNINativeMethod));
- if (env->ExceptionOccurred()) {
- if (PrintMiscellaneous && (Verbose || WizardMode)) {
- tty->print_cr("Warning: SDK 1.7 Unsafe.copyMemory not found.");
- }
- env->ExceptionClear();
- env->RegisterNatives(unsafecls, memcopy_methods_15, sizeof(memcopy_methods_15)/sizeof(JNINativeMethod));
- if (env->ExceptionOccurred()) {
- if (PrintMiscellaneous && (Verbose || WizardMode)) {
- tty->print_cr("Warning: SDK 1.5 Unsafe.copyMemory not found.");
- }
- env->ExceptionClear();
- }
+ if (!success) {
+ success = register_natives("1.5 methods", env, unsafecls, methods_15, sizeof(methods_15)/sizeof(JNINativeMethod));
}
- }
- if (EnableInvokeDynamic) {
- env->RegisterNatives(unsafecls, anonk_methods, sizeof(anonk_methods)/sizeof(JNINativeMethod));
- if (env->ExceptionOccurred()) {
- if (PrintMiscellaneous && (Verbose || WizardMode)) {
- tty->print_cr("Warning: SDK 1.7 Unsafe.defineClass (anonymous version) not found.");
- }
- env->ExceptionClear();
+ if (!success) {
+ success = register_natives("1.4.1 methods", env, unsafecls, methods_141, sizeof(methods_141)/sizeof(JNINativeMethod));
}
- }
- if (EnableInvokeDynamic) {
- env->RegisterNatives(unsafecls, lform_methods, sizeof(lform_methods)/sizeof(JNINativeMethod));
- if (env->ExceptionOccurred()) {
- if (PrintMiscellaneous && (Verbose || WizardMode)) {
- tty->print_cr("Warning: SDK 1.7 LambdaForm support in Unsafe not found.");
- }
- env->ExceptionClear();
+ if (!success) {
+ success = register_natives("1.4.0 methods", env, unsafecls, methods_140, sizeof(methods_140)/sizeof(JNINativeMethod));
}
+ guarantee(success, "register unsafe natives");
}
- int status = env->RegisterNatives(unsafecls, methods, sizeof(methods)/sizeof(JNINativeMethod));
- if (env->ExceptionOccurred()) {
- if (PrintMiscellaneous && (Verbose || WizardMode)) {
- tty->print_cr("Warning: SDK 1.6 version of Unsafe not found.");
+
+ // Unsafe.getLoadAverage
+ register_natives("1.6 loadavg method", env, unsafecls, loadavg_method, sizeof(loadavg_method)/sizeof(JNINativeMethod));
+
+ // Prefetch methods
+ register_natives("1.6 prefetch methods", env, unsafecls, prefetch_methods, sizeof(prefetch_methods)/sizeof(JNINativeMethod));
+
+ // Memory copy methods
+ {
+ bool success = false;
+ if (!success) {
+ success = register_natives("1.7 memory copy methods", env, unsafecls, memcopy_methods_17, sizeof(memcopy_methods_17)/sizeof(JNINativeMethod));
}
- env->ExceptionClear();
- // %%% For now, be backward compatible with an older class:
- status = env->RegisterNatives(unsafecls, methods_15, sizeof(methods_15)/sizeof(JNINativeMethod));
- }
- if (env->ExceptionOccurred()) {
- if (PrintMiscellaneous && (Verbose || WizardMode)) {
- tty->print_cr("Warning: SDK 1.5 version of Unsafe not found.");
+ if (!success) {
+ success = register_natives("1.5 memory copy methods", env, unsafecls, memcopy_methods_15, sizeof(memcopy_methods_15)/sizeof(JNINativeMethod));
}
- env->ExceptionClear();
- // %%% For now, be backward compatible with an older class:
- status = env->RegisterNatives(unsafecls, methods_141, sizeof(methods_141)/sizeof(JNINativeMethod));
}
- if (env->ExceptionOccurred()) {
- if (PrintMiscellaneous && (Verbose || WizardMode)) {
- tty->print_cr("Warning: SDK 1.4.1 version of Unsafe not found.");
- }
- env->ExceptionClear();
- // %%% For now, be backward compatible with an older class:
- status = env->RegisterNatives(unsafecls, methods_140, sizeof(methods_140)/sizeof(JNINativeMethod));
+
+ // Unsafe.defineAnonymousClass
+ if (EnableInvokeDynamic) {
+ register_natives("1.7 define anonymous class method", env, unsafecls, anonk_methods, sizeof(anonk_methods)/sizeof(JNINativeMethod));
}
- guarantee(status == 0, "register unsafe natives");
+
+ // Unsafe.shouldBeInitialized
+ if (EnableInvokeDynamic) {
+ register_natives("1.7 LambdaForm support", env, unsafecls, lform_methods, sizeof(lform_methods)/sizeof(JNINativeMethod));
+ }
+
+ // Fence methods
+ register_natives("1.8 fence methods", env, unsafecls, fence_methods, sizeof(fence_methods)/sizeof(JNINativeMethod));
}
JVM_END
diff --git a/src/share/vm/prims/whitebox.cpp b/src/share/vm/prims/whitebox.cpp
index 4758210ee..9b3cd297f 100644
--- a/src/share/vm/prims/whitebox.cpp
+++ b/src/share/vm/prims/whitebox.cpp
@@ -254,6 +254,24 @@ WB_ENTRY(jint, WB_GetCompileQueuesSize(JNIEnv* env, jobject o))
CompileBroker::queue_size(CompLevel_full_profile) /* C1 */;
WB_END
+WB_ENTRY(jboolean, WB_IsInStringTable(JNIEnv* env, jobject o, jstring javaString))
+ ResourceMark rm(THREAD);
+ int len;
+ jchar* name = java_lang_String::as_unicode_string(JNIHandles::resolve(javaString), len);
+ oop found_string = StringTable::the_table()->lookup(name, len);
+ if (found_string == NULL) {
+ return false;
+ }
+ return true;
+WB_END
+
+
+WB_ENTRY(void, WB_FullGC(JNIEnv* env, jobject o))
+ Universe::heap()->collector_policy()->set_should_clear_all_soft_refs(true);
+ Universe::heap()->collect(GCCause::_last_ditch_collection);
+WB_END
+
+
//Some convenience methods to deal with objects from java
int WhiteBox::offset_for_field(const char* field_name, oop object,
Symbol* signature_symbol) {
@@ -343,6 +361,8 @@ static JNINativeMethod methods[] = {
CC"(Ljava/lang/reflect/Method;)I", (void*)&WB_GetMethodCompilationLevel},
{CC"getCompileQueuesSize",
CC"()I", (void*)&WB_GetCompileQueuesSize},
+ {CC"isInStringTable", CC"(Ljava/lang/String;)Z", (void*)&WB_IsInStringTable },
+ {CC"fullGC", CC"()V", (void*)&WB_FullGC },
};
#undef CC
diff --git a/src/share/vm/runtime/arguments.cpp b/src/share/vm/runtime/arguments.cpp
index 8bb1a9ef3..c66c73ed2 100644
--- a/src/share/vm/runtime/arguments.cpp
+++ b/src/share/vm/runtime/arguments.cpp
@@ -1553,6 +1553,15 @@ void Arguments::set_g1_gc_flags() {
}
}
+julong Arguments::limit_by_allocatable_memory(julong limit) {
+ julong max_allocatable;
+ julong result = limit;
+ if (os::has_allocatable_memory_limit(&max_allocatable)) {
+ result = MIN2(result, max_allocatable / MaxVirtMemFraction);
+ }
+ return result;
+}
+
void Arguments::set_heap_size() {
if (!FLAG_IS_DEFAULT(DefaultMaxRAMFraction)) {
// Deprecated flag
@@ -1591,12 +1600,12 @@ void Arguments::set_heap_size() {
}
reasonable_max = MIN2(reasonable_max, max_coop_heap);
}
- reasonable_max = os::allocatable_physical_memory(reasonable_max);
+ reasonable_max = limit_by_allocatable_memory(reasonable_max);
if (!FLAG_IS_DEFAULT(InitialHeapSize)) {
// An initial heap size was specified on the command line,
// so be sure that the maximum size is consistent. Done
- // after call to allocatable_physical_memory because that
+ // after call to limit_by_allocatable_memory because that
// method might reduce the allocation size.
reasonable_max = MAX2(reasonable_max, (julong)InitialHeapSize);
}
@@ -1616,14 +1625,14 @@ void Arguments::set_heap_size() {
reasonable_minimum = MIN2(reasonable_minimum, (julong)MaxHeapSize);
- reasonable_minimum = os::allocatable_physical_memory(reasonable_minimum);
+ reasonable_minimum = limit_by_allocatable_memory(reasonable_minimum);
julong reasonable_initial = phys_mem / InitialRAMFraction;
reasonable_initial = MAX2(reasonable_initial, reasonable_minimum);
reasonable_initial = MIN2(reasonable_initial, (julong)MaxHeapSize);
- reasonable_initial = os::allocatable_physical_memory(reasonable_initial);
+ reasonable_initial = limit_by_allocatable_memory(reasonable_initial);
if (PrintGCDetails && Verbose) {
// Cannot use gclog_or_tty yet.
@@ -2609,9 +2618,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
initHeapSize = MIN2(total_memory / (julong)2,
total_memory - (julong)160*M);
- // Make sure that if we have a lot of memory we cap the 32 bit
- // process space. The 64bit VM version of this function is a nop.
- initHeapSize = os::allocatable_physical_memory(initHeapSize);
+ initHeapSize = limit_by_allocatable_memory(initHeapSize);
if (FLAG_IS_DEFAULT(MaxHeapSize)) {
FLAG_SET_CMDLINE(uintx, MaxHeapSize, initHeapSize);
@@ -3320,6 +3327,13 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
}
check_deprecated_gcs();
check_deprecated_gc_flags();
+ if (AssumeMP && !UseSerialGC) {
+ if (FLAG_IS_DEFAULT(ParallelGCThreads) && ParallelGCThreads == 1) {
+ warning("If the number of processors is expected to increase from one, then"
+ " you should configure the number of parallel GC threads appropriately"
+ " using -XX:ParallelGCThreads=N");
+ }
+ }
#else // INCLUDE_ALL_GCS
assert(verify_serial_gc_flags(), "SerialGC unset");
#endif // INCLUDE_ALL_GCS
diff --git a/src/share/vm/runtime/arguments.hpp b/src/share/vm/runtime/arguments.hpp
index d75fc9a82..0a4350ee3 100644
--- a/src/share/vm/runtime/arguments.hpp
+++ b/src/share/vm/runtime/arguments.hpp
@@ -312,6 +312,9 @@ class Arguments : AllStatic {
static void set_use_compressed_oops();
static void set_ergonomics_flags();
static void set_shared_spaces_flags();
+ // limits the given memory size by the maximum amount of memory this process is
+ // currently allowed to allocate or reserve.
+ static julong limit_by_allocatable_memory(julong size);
// Setup heap size
static void set_heap_size();
// Based on automatic selection criteria, should the
diff --git a/src/share/vm/runtime/globals.hpp b/src/share/vm/runtime/globals.hpp
index e73e5089b..5f1e0c646 100644
--- a/src/share/vm/runtime/globals.hpp
+++ b/src/share/vm/runtime/globals.hpp
@@ -457,6 +457,9 @@ class CommandLineFlags {
lp64_product(intx, ObjectAlignmentInBytes, 8, \
"Default object alignment in bytes, 8 is minimum") \
\
+ product(bool, AssumeMP, false, \
+ "Instruct the VM to assume multiple processors are available") \
+ \
/* UseMembar is theoretically a temp flag used for memory barrier \
* removal testing. It was supposed to be removed before FCS but has \
* been re-added (see 6401008) */ \
@@ -1404,6 +1407,10 @@ class CommandLineFlags {
"How much the GC can expand the eden by while the GC locker " \
"is active (as a percentage)") \
\
+ diagnostic(intx, GCLockerRetryAllocationCount, 2, \
+ "Number of times to retry allocations when" \
+ " blocked by the GC locker") \
+ \
develop(bool, UseCMSAdaptiveFreeLists, true, \
"Use Adaptive Free Lists in the CMS generation") \
\
@@ -1960,6 +1967,10 @@ class CommandLineFlags {
product(uintx, InitialRAMFraction, 64, \
"Fraction (1/n) of real memory used for initial heap size") \
\
+ develop(uintx, MaxVirtMemFraction, 2, \
+ "Maximum fraction (1/n) of virtual memory used for ergonomically" \
+ "determining maximum heap size") \
+ \
product(bool, UseAutoGCSelectPolicy, false, \
"Use automatic collection selection policy") \
\
@@ -2517,7 +2528,7 @@ class CommandLineFlags {
"disable locking assertions (for speed)") \
\
product(bool, RangeCheckElimination, true, \
- "Split loop iterations to eliminate range checks") \
+ "Eliminate range checks") \
\
develop_pd(bool, UncommonNullCast, \
"track occurrences of null in casts; adjust compiler tactics") \
diff --git a/src/share/vm/runtime/init.cpp b/src/share/vm/runtime/init.cpp
index 7ef17204b..62f295c7e 100644
--- a/src/share/vm/runtime/init.cpp
+++ b/src/share/vm/runtime/init.cpp
@@ -132,15 +132,6 @@ jint init_globals() {
javaClasses_init(); // must happen after vtable initialization
stubRoutines_init2(); // note: StubRoutines need 2-phase init
- // Although we'd like to, we can't easily do a heap verify
- // here because the main thread isn't yet a JavaThread, so
- // its TLAB may not be made parseable from the usual interfaces.
- if (VerifyBeforeGC && !UseTLAB &&
- Universe::heap()->total_collections() >= VerifyGCStartAt) {
- Universe::heap()->prepare_for_verify();
- Universe::verify(); // make sure we're starting with a clean slate
- }
-
// All the flags that get adjusted by VM_Version_init and os::init_2
// have been set so dump the flags now.
if (PrintFlagsFinal) {
diff --git a/src/share/vm/runtime/os.hpp b/src/share/vm/runtime/os.hpp
index d061a0848..82f45df60 100644
--- a/src/share/vm/runtime/os.hpp
+++ b/src/share/vm/runtime/os.hpp
@@ -180,11 +180,11 @@ class os: AllStatic {
// Interface for detecting multiprocessor system
static inline bool is_MP() {
assert(_processor_count > 0, "invalid processor count");
- return _processor_count > 1;
+ return _processor_count > 1 || AssumeMP;
}
static julong available_memory();
static julong physical_memory();
- static julong allocatable_physical_memory(julong size);
+ static bool has_allocatable_memory_limit(julong* limit);
static bool is_server_class_machine();
// number of CPUs
diff --git a/src/share/vm/runtime/thread.cpp b/src/share/vm/runtime/thread.cpp
index 322597fe4..577901a28 100644
--- a/src/share/vm/runtime/thread.cpp
+++ b/src/share/vm/runtime/thread.cpp
@@ -3423,12 +3423,6 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
// real raw monitor. VM is setup enough here for raw monitor enter.
JvmtiExport::transition_pending_onload_raw_monitors();
- if (VerifyBeforeGC &&
- Universe::heap()->total_collections() >= VerifyGCStartAt) {
- Universe::heap()->prepare_for_verify();
- Universe::verify(); // make sure we're starting with a clean slate
- }
-
// Fully start NMT
MemTracker::start();
@@ -3452,6 +3446,11 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
}
assert (Universe::is_fully_initialized(), "not initialized");
+ if (VerifyBeforeGC && VerifyGCStartAt == 0) {
+ Universe::heap()->prepare_for_verify();
+ Universe::verify(); // make sure we're starting with a clean slate
+ }
+
EXCEPTION_MARK;
// At this point, the Universe is initialized, but we have not executed
diff --git a/src/share/vm/runtime/vframe.cpp b/src/share/vm/runtime/vframe.cpp
index bc9ca0a41..1b05aaa79 100644
--- a/src/share/vm/runtime/vframe.cpp
+++ b/src/share/vm/runtime/vframe.cpp
@@ -391,40 +391,27 @@ vframeStream::vframeStream(JavaThread* thread, frame top_frame,
// Step back n frames, skip any pseudo frames in between.
// This function is used in Class.forName, Class.newInstance, Method.Invoke,
// AccessController.doPrivileged.
-//
-// NOTE that in JDK 1.4 this has been exposed to Java as
-// sun.reflect.Reflection.getCallerClass(), which can be inlined.
-// Inlined versions must match this routine's logic.
-// Native method prefixing logic does not need to match since
-// the method names don't match and inlining will not occur.
-// See, for example,
-// Parse::inline_native_Reflection_getCallerClass in
-// opto/library_call.cpp.
void vframeStreamCommon::security_get_caller_frame(int depth) {
- bool use_new_reflection = JDK_Version::is_gte_jdk14x_version() && UseNewReflection;
-
- while (!at_end()) {
- if (Universe::reflect_invoke_cache()->is_same_method(method())) {
- // This is Method.invoke() -- skip it
- } else if (use_new_reflection &&
- method()->method_holder()
- ->is_subclass_of(SystemDictionary::reflect_MethodAccessorImpl_klass())) {
- // This is an auxilary frame -- skip it
- } else if (method()->is_method_handle_intrinsic() ||
- method()->is_compiled_lambda_form()) {
- // This is an internal adapter frame for method handles -- skip it
- } else {
- // This is non-excluded frame, we need to count it against the depth
- if (depth-- <= 0) {
- // we have reached the desired depth, we are done
- break;
+ assert(depth >= 0, err_msg("invalid depth: %d", depth));
+ for (int n = 0; !at_end(); security_next()) {
+ if (!method()->is_ignored_by_security_stack_walk()) {
+ if (n == depth) {
+ // We have reached the desired depth; return.
+ return;
}
+ n++; // this is a non-skipped frame; count it against the depth
}
- if (method()->is_prefixed_native()) {
- skip_prefixed_method_and_wrappers();
- } else {
- next();
- }
+ }
+ // NOTE: At this point there were not enough frames on the stack
+ // to walk to depth. Callers of this method have to check for at_end.
+}
+
+
+void vframeStreamCommon::security_next() {
+ if (method()->is_prefixed_native()) {
+ skip_prefixed_method_and_wrappers(); // calls next()
+ } else {
+ next();
}
}
diff --git a/src/share/vm/runtime/vframe.hpp b/src/share/vm/runtime/vframe.hpp
index b6bf34fc9..2e7191c04 100644
--- a/src/share/vm/runtime/vframe.hpp
+++ b/src/share/vm/runtime/vframe.hpp
@@ -336,6 +336,7 @@ class vframeStreamCommon : StackObj {
_frame = _frame.sender(&_reg_map);
} while (!fill_from_frame());
}
+ void security_next();
bool at_end() const { return _mode == at_end_mode; }
diff --git a/src/share/vm/services/memTracker.hpp b/src/share/vm/services/memTracker.hpp
index 934daf06a..ebcc41500 100644
--- a/src/share/vm/services/memTracker.hpp
+++ b/src/share/vm/services/memTracker.hpp
@@ -86,13 +86,13 @@ class MemTracker : AllStatic {
static inline void set_autoShutdown(bool value) { }
static void shutdown(ShutdownReason reason) { }
- static inline bool shutdown_in_progress() { }
+ static inline bool shutdown_in_progress() { return false; }
static bool print_memory_usage(BaselineOutputer& out, size_t unit,
- bool summary_only = true) { }
+ bool summary_only = true) { return false; }
static bool compare_memory_usage(BaselineOutputer& out, size_t unit,
- bool summary_only = true) { }
+ bool summary_only = true) { return false; }
- static bool wbtest_wait_for_data_merge() { }
+ static bool wbtest_wait_for_data_merge() { return false; }
static inline void sync() { }
static inline void thread_exiting(JavaThread* thread) { }
diff --git a/src/share/vm/utilities/utf8.cpp b/src/share/vm/utilities/utf8.cpp
index da470b18c..8c013c9b3 100644
--- a/src/share/vm/utilities/utf8.cpp
+++ b/src/share/vm/utilities/utf8.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -180,11 +180,12 @@ int UTF8::quoted_ascii_length(const char* utf8_str, int utf8_length) {
}
// converts a utf8 string to quoted ascii
-void UTF8::as_quoted_ascii(const char* utf8_str, char* buf, int buflen) {
+void UTF8::as_quoted_ascii(const char* utf8_str, int utf8_length, char* buf, int buflen) {
const char *ptr = utf8_str;
+ const char *utf8_end = ptr + utf8_length;
char* p = buf;
char* end = buf + buflen;
- while (*ptr != '\0') {
+ while (ptr < utf8_end) {
jchar c;
ptr = UTF8::next(ptr, &c);
if (c >= 32 && c < 127) {
@@ -196,6 +197,7 @@ void UTF8::as_quoted_ascii(const char* utf8_str, char* buf, int buflen) {
p += 6;
}
}
+ assert(p < end, "sanity");
*p = '\0';
}
diff --git a/src/share/vm/utilities/utf8.hpp b/src/share/vm/utilities/utf8.hpp
index 69710fcce..354941e6d 100644
--- a/src/share/vm/utilities/utf8.hpp
+++ b/src/share/vm/utilities/utf8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -45,7 +45,7 @@ class UTF8 : AllStatic {
static int quoted_ascii_length(const char* utf8_str, int utf8_length);
// converts a utf8 string to quoted ascii
- static void as_quoted_ascii(const char* utf8_str, char* buf, int buflen);
+ static void as_quoted_ascii(const char* utf8_str, int utf8_length, char* buf, int buflen);
// converts a quoted ascii string to utf8 string. returns the original
// string unchanged if nothing needs to be done.
diff --git a/test/compiler/8009761/Test8009761.java b/test/compiler/8009761/Test8009761.java
index c897ab400..f588b82cd 100644
--- a/test/compiler/8009761/Test8009761.java
+++ b/test/compiler/8009761/Test8009761.java
@@ -25,7 +25,7 @@
* @test
* @bug 8009761
* @summary Deoptimization on sparc doesn't set Llast_SP correctly in the interpreter frames it creates
- * @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation Test8009761
+ * @run main/othervm -Xmixed -XX:-UseOnStackReplacement -XX:-BackgroundCompilation Test8009761
*
*/
diff --git a/test/gc/TestVerifyBeforeGCDuringStartup.java b/test/gc/TestVerifyBeforeGCDuringStartup.java
new file mode 100644
index 000000000..109e45e4b
--- /dev/null
+++ b/test/gc/TestVerifyBeforeGCDuringStartup.java
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/* @test TestVerifyBeforeGCDuringStartup.java
+ * @key gc
+ * @bug 8010463
+ * @summary Simple test run with -XX:+VerifyBeforeGC -XX:-UseTLAB to verify 8010463
+ * @library /testlibrary
+ */
+
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.ProcessTools;
+
+public class TestVerifyBeforeGCDuringStartup {
+ public static void main(String args[]) throws Exception {
+ ProcessBuilder pb =
+ ProcessTools.createJavaProcessBuilder(System.getProperty("test.vm.opts"),
+ "-XX:-UseTLAB",
+ "-XX:+UnlockDiagnosticVMOptions",
+ "-XX:+VerifyBeforeGC", "-version");
+ OutputAnalyzer output = new OutputAnalyzer(pb.start());
+ output.shouldContain("[Verifying");
+ output.shouldHaveExitValue(0);
+ }
+}
diff --git a/test/gc/metaspace/ClassMetaspaceSizeInJmapHeap.java b/test/gc/metaspace/ClassMetaspaceSizeInJmapHeap.java
index 37a5f3a4c..b3258466a 100644
--- a/test/gc/metaspace/ClassMetaspaceSizeInJmapHeap.java
+++ b/test/gc/metaspace/ClassMetaspaceSizeInJmapHeap.java
@@ -39,8 +39,10 @@ public class ClassMetaspaceSizeInJmapHeap {
public static void main(String[] args) throws Exception {
String pid = Integer.toString(ProcessTools.getProcessId());
- ProcessBuilder pb = new ProcessBuilder();
- pb.command(JDKToolFinder.getJDKTool("jmap"), "-heap", pid);
+ JDKToolLauncher jmap = JDKToolLauncher.create("jmap")
+ .addToolArg("-heap")
+ .addToolArg(pid);
+ ProcessBuilder pb = new ProcessBuilder(jmap.getCommand());
File out = new File("ClassMetaspaceSizeInJmapHeap.stdout.txt");
pb.redirectOutput(out);
diff --git a/test/runtime/7116786/Test7116786.java b/test/runtime/7116786/Test7116786.java
index 8c137ecdc..b91401925 100644
--- a/test/runtime/7116786/Test7116786.java
+++ b/test/runtime/7116786/Test7116786.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -338,9 +338,12 @@ class VerifyErrorCases {
"invalid constant pool index in ldc",
"Invalid index in ldc"),
- new Case("case58", "verifier.cpp", true, "verify_switch",
+ /* No longer a valid test case for bytecode version >= 51. Nonzero
+ * padding bytes are permitted with lookupswitch and tableswitch
+ * bytecodes as of JVMS 3d edition */
+ new Case("case58", "verifier.cpp", false, "verify_switch",
"bad switch padding",
- "Nonzero padding byte in lookswitch or tableswitch"),
+ "Nonzero padding byte in lookupswitch or tableswitch"),
new Case("case59", "verifier.cpp", true, "verify_switch",
"tableswitch low is greater than high",
diff --git a/test/runtime/interned/SanityTest.java b/test/runtime/interned/SanityTest.java
new file mode 100644
index 000000000..779d3fc78
--- /dev/null
+++ b/test/runtime/interned/SanityTest.java
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test SanityTest
+ * @summary Sanity check of String.intern() & GC
+ * @library /testlibrary /testlibrary/whitebox
+ * @build SanityTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI SanityTest
+ */
+
+import java.util.*;
+import sun.hotspot.WhiteBox;
+
+
+public class SanityTest {
+ public static Object tmp;
+ public static void main(String... args) {
+
+ WhiteBox wb = WhiteBox.getWhiteBox();
+ StringBuilder sb = new StringBuilder();
+ sb.append("1234x"); sb.append("x56789");
+ String str = sb.toString();
+
+ if (wb.isInStringTable(str)) {
+ throw new RuntimeException("String " + str + " is already interned");
+ }
+ str.intern();
+ if (!wb.isInStringTable(str)) {
+ throw new RuntimeException("String " + str + " is not interned");
+ }
+ str = sb.toString();
+ wb.fullGC();
+ if (wb.isInStringTable(str)) {
+ throw new RuntimeException("String " + str + " is in StringTable even after GC");
+ }
+ }
+}
diff --git a/test/testlibrary/com/oracle/java/testlibrary/JDKToolLauncher.java b/test/testlibrary/com/oracle/java/testlibrary/JDKToolLauncher.java
new file mode 100644
index 000000000..0f0c0a49d
--- /dev/null
+++ b/test/testlibrary/com/oracle/java/testlibrary/JDKToolLauncher.java
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package com.oracle.java.testlibrary;
+
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Arrays;
+
+import com.oracle.java.testlibrary.JDKToolFinder;
+import com.oracle.java.testlibrary.ProcessTools;
+
+/**
+ * A utility for constructing command lines for starting JDK tool processes.
+ *
+ * The JDKToolLauncher can in particular be combined with a
+ * java.lang.ProcessBuilder to easily run a JDK tool. For example, the
+ * following code run {@code jmap -heap} against a process with GC logging
+ * turned on for the {@code jmap} process:
+ *
+ * <pre>
+ * {@code
+ * JDKToolLauncher jmap = JDKToolLauncher.create("jmap")
+ * .addVMArg("-XX:+PrintGC");
+ * .addVMArg("-XX:+PrintGCDetails")
+ * .addToolArg("-heap")
+ * .addToolArg(pid);
+ * ProcessBuilder pb = new ProcessBuilder(jmap.getCommand());
+ * Process p = pb.start();
+ * }
+ * </pre>
+ */
+public class JDKToolLauncher {
+ private final String executable;
+ private final List<String> vmArgs = new ArrayList<String>();
+ private final List<String> toolArgs = new ArrayList<String>();
+
+ private JDKToolLauncher(String tool) {
+ executable = JDKToolFinder.getJDKTool(tool);
+ vmArgs.addAll(Arrays.asList(ProcessTools.getPlatformSpecificVMArgs()));
+ }
+
+ /**
+ * Creates a new JDKToolLauncher for the specified tool.
+ *
+ * @param tool The name of the tool
+ * @return A new JDKToolLauncher
+ */
+ public static JDKToolLauncher create(String tool) {
+ return new JDKToolLauncher(tool);
+ }
+
+ /**
+ * Adds an argument to the JVM running the tool.
+ *
+ * The JVM arguments are passed to the underlying JVM running the tool.
+ * Arguments will automatically be prepended with "-J".
+ *
+ * Any platform specific arguments required for running the tool are
+ * automatically added.
+ *
+ *
+ * @param arg The argument to VM running the tool
+ * @return The JDKToolLauncher instance
+ */
+ public JDKToolLauncher addVMArg(String arg) {
+ vmArgs.add("-J" + arg);
+ return this;
+ }
+
+ /**
+ * Adds an argument to the tool.
+ *
+ * @param arg The argument to the tool
+ * @return The JDKToolLauncher instance
+ */
+ public JDKToolLauncher addToolArg(String arg) {
+ toolArgs.add(arg);
+ return this;
+ }
+
+ /**
+ * Returns the command that can be used for running the tool.
+ *
+ * @return An array whose elements are the arguments of the command.
+ */
+ public String[] getCommand() {
+ List<String> command = new ArrayList<String>();
+ command.add(executable);
+ command.addAll(vmArgs);
+ command.addAll(toolArgs);
+ return command.toArray(new String[command.size()]);
+ }
+}
diff --git a/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java b/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java
index c9d23ef5f..d5d3ab525 100644
--- a/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java
+++ b/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java
@@ -94,4 +94,10 @@ public class WhiteBox {
public native int getMethodCompilationLevel(Method method);
public native boolean setDontInlineMethod(Method method, boolean value);
public native int getCompileQueuesSize();
+
+ //Intered strings
+ public native boolean isInStringTable(String str);
+
+ // force Full GC
+ public native void fullGC();
}