aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoramurillo <none@none>2012-02-03 18:04:09 -0800
committeramurillo <none@none>2012-02-03 18:04:09 -0800
commitf3162ff0b8278787df6103466ae0a122b60b9d66 (patch)
treee8400c2d241156b2dbc79ac53fa99b9eaed6629d
parentf2b000b6ea9e4b0450fbc5e039595041fb983b06 (diff)
parentf321affcec02fc34cbd39f1812a62b869b9b7990 (diff)
-rw-r--r--agent/src/share/classes/sun/jvm/hotspot/memory/LoaderConstraintTable.java11
-rw-r--r--agent/src/share/classes/sun/jvm/hotspot/memory/SystemDictionary.java8
-rw-r--r--make/Makefile80
-rw-r--r--make/bsd/makefiles/defs.make37
-rw-r--r--make/bsd/makefiles/universal.gmk109
-rw-r--r--make/defs.make20
-rw-r--r--make/hotspot_version2
-rw-r--r--src/cpu/sparc/vm/assembler_sparc.hpp5
-rw-r--r--src/cpu/sparc/vm/assembler_sparc.inline.hpp13
-rw-r--r--src/cpu/sparc/vm/c2_globals_sparc.hpp4
-rw-r--r--src/cpu/sparc/vm/frame_sparc.cpp1
-rw-r--r--src/cpu/sparc/vm/sharedRuntime_sparc.cpp395
-rw-r--r--src/cpu/x86/vm/c2_globals_x86.hpp4
-rw-r--r--src/cpu/x86/vm/frame_x86.cpp1
-rw-r--r--src/cpu/x86/vm/methodHandles_x86.cpp68
-rw-r--r--src/cpu/x86/vm/sharedRuntime_x86_32.cpp437
-rw-r--r--src/cpu/x86/vm/sharedRuntime_x86_64.cpp453
-rw-r--r--src/os/windows/vm/os_windows.cpp3
-rw-r--r--src/share/tools/ProjectCreator/BuildConfig.java3
-rw-r--r--src/share/vm/c1/c1_GraphBuilder.cpp131
-rw-r--r--src/share/vm/c1/c1_GraphBuilder.hpp4
-rw-r--r--src/share/vm/c1/c1_Runtime1.cpp8
-rw-r--r--src/share/vm/c1/c1_ValueMap.cpp1
-rw-r--r--src/share/vm/ci/bcEscapeAnalyzer.cpp6
-rw-r--r--src/share/vm/ci/ciEnv.hpp16
-rw-r--r--src/share/vm/classfile/dictionary.cpp5
-rw-r--r--src/share/vm/classfile/systemDictionary.cpp28
-rw-r--r--src/share/vm/classfile/systemDictionary.hpp14
-rw-r--r--src/share/vm/code/compiledIC.cpp3
-rw-r--r--src/share/vm/code/nmethod.cpp14
-rw-r--r--src/share/vm/code/nmethod.hpp8
-rw-r--r--src/share/vm/compiler/compileBroker.cpp84
-rw-r--r--src/share/vm/compiler/compileBroker.hpp12
-rw-r--r--src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp5
-rw-r--r--src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp16
-rw-r--r--src/share/vm/gc_implementation/g1/g1MarkSweep.cpp6
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp13
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp16
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp10
-rw-r--r--src/share/vm/gc_interface/collectedHeap.cpp33
-rw-r--r--src/share/vm/gc_interface/collectedHeap.hpp49
-rw-r--r--src/share/vm/memory/gcLocker.cpp89
-rw-r--r--src/share/vm/memory/gcLocker.hpp105
-rw-r--r--src/share/vm/memory/gcLocker.inline.hpp35
-rw-r--r--src/share/vm/memory/genCollectedHeap.cpp14
-rw-r--r--src/share/vm/memory/genMarkSweep.cpp6
-rw-r--r--src/share/vm/oops/arrayOop.cpp3
-rw-r--r--src/share/vm/oops/constantPoolOop.cpp4
-rw-r--r--src/share/vm/oops/instanceKlass.hpp4
-rw-r--r--src/share/vm/oops/klass.cpp2
-rw-r--r--src/share/vm/oops/klass.hpp4
-rw-r--r--src/share/vm/oops/methodOop.cpp7
-rw-r--r--src/share/vm/oops/methodOop.hpp4
-rw-r--r--src/share/vm/opto/loopnode.cpp4
-rw-r--r--src/share/vm/prims/jvm.cpp6
-rw-r--r--src/share/vm/prims/nativeLookup.cpp95
-rw-r--r--src/share/vm/prims/nativeLookup.hpp5
-rw-r--r--src/share/vm/runtime/arguments.cpp7
-rw-r--r--src/share/vm/runtime/deoptimization.cpp14
-rw-r--r--src/share/vm/runtime/frame.cpp2
-rw-r--r--src/share/vm/runtime/globals.hpp37
-rw-r--r--src/share/vm/runtime/init.cpp5
-rw-r--r--src/share/vm/runtime/mutex.cpp10
-rw-r--r--src/share/vm/runtime/safepoint.cpp61
-rw-r--r--src/share/vm/runtime/safepoint.hpp11
-rw-r--r--src/share/vm/runtime/sharedRuntime.cpp21
-rw-r--r--src/share/vm/runtime/sharedRuntime.hpp5
-rw-r--r--src/share/vm/runtime/thread.cpp29
-rw-r--r--src/share/vm/runtime/thread.hpp25
-rw-r--r--src/share/vm/runtime/vmStructs.cpp9
-rw-r--r--src/share/vm/trace/traceMacros.hpp4
-rw-r--r--src/share/vm/utilities/debug.cpp14
-rw-r--r--src/share/vm/utilities/debug.hpp33
-rw-r--r--src/share/vm/utilities/events.cpp231
-rw-r--r--src/share/vm/utilities/events.hpp245
-rw-r--r--src/share/vm/utilities/exceptions.cpp4
-rw-r--r--src/share/vm/utilities/hashtable.hpp4
-rw-r--r--src/share/vm/utilities/vmError.cpp12
-rw-r--r--test/compiler/7090976/Test7090976.java82
-rw-r--r--test/compiler/7141637/SpreadNullArg.java62
80 files changed, 2624 insertions, 826 deletions
diff --git a/agent/src/share/classes/sun/jvm/hotspot/memory/LoaderConstraintTable.java b/agent/src/share/classes/sun/jvm/hotspot/memory/LoaderConstraintTable.java
index 582789ac7..14d6c566b 100644
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/LoaderConstraintTable.java
+++ b/agent/src/share/classes/sun/jvm/hotspot/memory/LoaderConstraintTable.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -42,15 +42,6 @@ public class LoaderConstraintTable extends TwoOopHashtable {
private static synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("LoaderConstraintTable");
- nofBuckets = db.lookupIntConstant("LoaderConstraintTable::_nof_buckets").intValue();
- }
-
- // Fields
- private static int nofBuckets;
-
- // Accessors
- public static int getNumOfBuckets() {
- return nofBuckets;
}
public LoaderConstraintTable(Address addr) {
diff --git a/agent/src/share/classes/sun/jvm/hotspot/memory/SystemDictionary.java b/agent/src/share/classes/sun/jvm/hotspot/memory/SystemDictionary.java
index c999ff1a8..11adb3cf1 100644
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/SystemDictionary.java
+++ b/agent/src/share/classes/sun/jvm/hotspot/memory/SystemDictionary.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,7 +36,6 @@ public class SystemDictionary {
private static AddressField placeholdersField;
private static AddressField loaderConstraintTableField;
private static sun.jvm.hotspot.types.OopField javaSystemLoaderField;
- private static int nofBuckets;
private static sun.jvm.hotspot.types.OopField objectKlassField;
private static sun.jvm.hotspot.types.OopField classLoaderKlassField;
@@ -62,7 +61,6 @@ public class SystemDictionary {
placeholdersField = type.getAddressField("_placeholders");
loaderConstraintTableField = type.getAddressField("_loader_constraints");
javaSystemLoaderField = type.getOopField("_java_system_loader");
- nofBuckets = db.lookupIntConstant("SystemDictionary::_nof_buckets").intValue();
objectKlassField = type.getOopField(WK_KLASS("Object_klass"));
classLoaderKlassField = type.getOopField(WK_KLASS("ClassLoader_klass"));
@@ -142,10 +140,6 @@ public class SystemDictionary {
return newOop(javaSystemLoaderField.getValue());
}
- public static int getNumOfBuckets() {
- return nofBuckets;
- }
-
private static Oop newOop(OopHandle handle) {
return VM.getVM().getObjectHeap().newOop(handle);
}
diff --git a/make/Makefile b/make/Makefile
index 1d6af55a3..afbe68ee0 100644
--- a/make/Makefile
+++ b/make/Makefile
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -89,19 +89,31 @@ KERNEL_VM_TARGETS=productkernel fastdebugkernel optimizedkernel jvmgkernel
ZERO_VM_TARGETS=productzero fastdebugzero optimizedzero jvmgzero
SHARK_VM_TARGETS=productshark fastdebugshark optimizedshark jvmgshark
+COMMON_VM_PRODUCT_TARGETS=product product1 productkernel docs export_product
+COMMON_VM_FASTDEBUG_TARGETS=fastdebug fastdebug1 fastdebugkernel docs export_fastdebug
+COMMON_VM_DEBUG_TARGETS=jvmg jvmg1 jvmgkernel docs export_debug
+
# JDK directory list
JDK_DIRS=bin include jre lib demo
all: all_product all_fastdebug
-ifndef BUILD_CLIENT_ONLY
-all_product: product product1 productkernel docs export_product
-all_fastdebug: fastdebug fastdebug1 fastdebugkernel docs export_fastdebug
-all_debug: jvmg jvmg1 jvmgkernel docs export_debug
-else
+
+ifdef BUILD_CLIENT_ONLY
all_product: product1 docs export_product
all_fastdebug: fastdebug1 docs export_fastdebug
all_debug: jvmg1 docs export_debug
+else
+ifeq ($(MACOSX_UNIVERSAL),true)
+all_product: universal_product
+all_fastdebug: universal_fastdebug
+all_debug: universal_debug
+else
+all_product: $(COMMON_VM_PRODUCT_TARGETS)
+all_fastdebug: $(COMMON_VM_FASTDEBUG_TARGETS)
+all_debug: $(COMMON_VM_DEBUG_TARGETS)
endif
+endif
+
all_optimized: optimized optimized1 optimizedkernel docs export_optimized
allzero: all_productzero all_fastdebugzero
@@ -232,20 +244,19 @@ export_debug:
$(MAKE) VM_SUBDIR=${VM_DEBUG} EXPORT_SUBDIR=/debug generic_export
export_optimized:
$(MAKE) VM_SUBDIR=optimized EXPORT_SUBDIR=/optimized generic_export
-export_product_jdk:
+export_product_jdk::
$(MAKE) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR) \
VM_SUBDIR=product generic_export
-export_optimized_jdk:
+export_optimized_jdk::
$(MAKE) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR) \
VM_SUBDIR=optimized generic_export
-export_fastdebug_jdk:
+export_fastdebug_jdk::
$(MAKE) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR)/fastdebug \
VM_SUBDIR=fastdebug generic_export
-export_debug_jdk:
+export_debug_jdk::
$(MAKE) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR)/debug \
VM_SUBDIR=${VM_DEBUG} generic_export
-
# Export file copy rules
XUSAGE=$(HS_SRC_DIR)/share/vm/Xusage.txt
DOCS_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_docs
@@ -444,14 +455,14 @@ test_jdk:
endif
$(JDK_IMAGE_DIR)/bin/java -server -version
-copy_product_jdk:
+copy_product_jdk::
$(RM) -r $(JDK_IMAGE_DIR)
$(MKDIR) -p $(JDK_IMAGE_DIR)
($(CD) $(JDK_IMPORT_PATH) && \
$(TAR) -cf - $(JDK_DIRS)) | \
($(CD) $(JDK_IMAGE_DIR) && $(TAR) -xf -)
-copy_fastdebug_jdk:
+copy_fastdebug_jdk::
$(RM) -r $(JDK_IMAGE_DIR)/fastdebug
$(MKDIR) -p $(JDK_IMAGE_DIR)/fastdebug
if [ -d $(JDK_IMPORT_PATH)/fastdebug ] ; then \
@@ -464,7 +475,7 @@ copy_fastdebug_jdk:
($(CD) $(JDK_IMAGE_DIR)/fastdebug && $(TAR) -xf -) ; \
fi
-copy_debug_jdk:
+copy_debug_jdk::
$(RM) -r $(JDK_IMAGE_DIR)/debug
$(MKDIR) -p $(JDK_IMAGE_DIR)/debug
if [ -d $(JDK_IMPORT_PATH)/debug ] ; then \
@@ -481,36 +492,6 @@ copy_debug_jdk:
($(CD) $(JDK_IMAGE_DIR)/debug && $(TAR) -xf -) ; \
fi
-# macosx universal builds
-
-ifeq ($(MACOSX_UNIVERSAL), true)
-$(UNIVERSAL_LIPO_LIST):
- lipo -create -output $@ $(EXPORT_JRE_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@)
-
-$(UNIVERSAL_COPY_LIST):
- $(CP) $(EXPORT_JRE_LIB_DIR)/i386/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@) $@
-
-universalize: $(UNIVERSAL_LIPO_LIST) $(UNIVERSAL_COPY_LIST)
-endif
-
-universal_product:
- $(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 MACOSX_UNIVERSAL=true all_product
- $(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 MACOSX_UNIVERSAL=true all_product
- $(MKDIR) -p $(EXPORT_JRE_LIB_DIR)/{client,server}
- $(QUIETLY) $(MAKE) MACOSX_UNIVERSAL=true universalize
-
-universal_fastdebug:
- $(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 MACOSX_UNIVERSAL=true all_fastdebug
- $(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 MACOSX_UNIVERSAL=true all_fastdebug
- $(MKDIR) -p $(EXPORT_JRE_LIB_DIR)/{client,server}
- $(QUIETLY) $(MAKE) MACOSX_UNIVERSAL=true universalize
-
-universal_debug:
- $(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 MACOSX_UNIVERSAL=true all_debug
- $(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 MACOSX_UNIVERSAL=true all_debug
- $(MKDIR) -p $(EXPORT_JRE_LIB_DIR)/{client,server}
- $(QUIETLY) $(MAKE) MACOSX_UNIVERSAL=true universalize
-
#
# Check target
#
@@ -630,6 +611,13 @@ examples_help:
@$(ECHO) \
" $(MAKE) ALT_JDK_IMPORT_PATH=/opt/java/jdk$(JDK_VERSION)"
+# Universal build support
+ifeq ($(OS_VENDOR), Darwin)
+ifeq ($(MACOSX_UNIVERSAL),true)
+include $(GAMMADIR)/make/$(OSNAME)/makefiles/universal.gmk
+endif
+endif
+
# JPRT rule to build this workspace
include $(GAMMADIR)/make/jprt.gmk
@@ -639,6 +627,4 @@ include $(GAMMADIR)/make/jprt.gmk
export_product export_fastdebug export_debug export_optimized \
export_jdk_product export_jdk_fastdebug export_jdk_debug \
create_jdk copy_jdk update_jdk test_jdk \
- copy_product_jdk copy_fastdebug_jdk copy_debug_jdk universalize \
- universal_product
-
+ copy_product_jdk copy_fastdebug_jdk copy_debug_jdk
diff --git a/make/bsd/makefiles/defs.make b/make/bsd/makefiles/defs.make
index 2b160fefc..098664a8f 100644
--- a/make/bsd/makefiles/defs.make
+++ b/make/bsd/makefiles/defs.make
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -171,10 +171,33 @@ ADD_SA_BINARIES/zero =
EXPORT_LIST += $(ADD_SA_BINARIES/$(HS_ARCH))
-UNIVERSAL_LIPO_LIST += $(EXPORT_JRE_LIB_DIR)/libjsig.$(LIBRARY_SUFFIX)
-UNIVERSAL_LIPO_LIST += $(EXPORT_JRE_LIB_DIR)/libsaproc.$(LIBRARY_SUFFIX)
-UNIVERSAL_LIPO_LIST += $(EXPORT_JRE_LIB_DIR)/server/libjvm.$(LIBRARY_SUFFIX)
+# Universal build settings
+ifeq ($(OS_VENDOR), Darwin)
+ # Build universal binaries by default on Mac OS X
+ MACOSX_UNIVERSAL = true
+ ifneq ($(ALT_MACOSX_UNIVERSAL),)
+ MACOSX_UNIVERSAL = $(ALT_MACOSX_UNIVERSAL)
+ endif
+ MAKE_ARGS += MACOSX_UNIVERSAL=$(MACOSX_UNIVERSAL)
+
+ # Universal settings
+ ifeq ($(MACOSX_UNIVERSAL), true)
+
+ # Set universal export path but avoid using ARCH or PLATFORM subdirs
+ EXPORT_PATH=$(OUTPUTDIR)/export-universal$(EXPORT_SUBDIR)
+
+ # Set universal image dir
+ JDK_IMAGE_DIR=$(OUTPUTDIR)/jdk-universal$(EXPORT_SUBDIR)
-UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/server/Xusage.txt
-UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/client/Xusage.txt
-UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/client/libjvm.$(LIBRARY_SUFFIX)
+ # Binaries to 'universalize' if built
+ UNIVERSAL_LIPO_LIST += $(EXPORT_JRE_LIB_DIR)/libjsig.$(LIBRARY_SUFFIX)
+ UNIVERSAL_LIPO_LIST += $(EXPORT_JRE_LIB_DIR)/libsaproc.$(LIBRARY_SUFFIX)
+ UNIVERSAL_LIPO_LIST += $(EXPORT_JRE_LIB_DIR)/server/libjvm.$(LIBRARY_SUFFIX)
+ UNIVERSAL_LIPO_LIST += $(EXPORT_JRE_LIB_DIR)/client/libjvm.$(LIBRARY_SUFFIX)
+
+ # Files to simply copy in place
+ UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/server/Xusage.txt
+ UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/client/Xusage.txt
+
+ endif
+endif
diff --git a/make/bsd/makefiles/universal.gmk b/make/bsd/makefiles/universal.gmk
new file mode 100644
index 000000000..0b14e37b1
--- /dev/null
+++ b/make/bsd/makefiles/universal.gmk
@@ -0,0 +1,109 @@
+#
+# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+#
+
+# macosx universal builds
+universal_product:
+ $(MAKE) MACOSX_UNIVERSAL=true all_product_universal
+universal_fastdebug:
+ $(MAKE) MACOSX_UNIVERSAL=true all_fastdebug_universal
+universal_debug:
+ $(MAKE) MACOSX_UNIVERSAL=true all_debug_universal
+
+
+# Universal builds include 1 or more architectures in a single binary
+all_product_universal:
+# $(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 $(COMMON_VM_PRODUCT_TARGETS)
+ $(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 $(COMMON_VM_PRODUCT_TARGETS)
+ $(QUIETLY) $(MAKE) EXPORT_SUBDIR= universalize
+all_fastdebug_universal:
+# $(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 $(COMMON_VM_FASTDEBUG_TARGETS)
+ $(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 $(COMMON_VM_FASTDEBUG_TARGETS)
+ $(QUIETLY) $(MAKE) EXPORT_SUBDIR=/fastdebug universalize
+all_debug_universal:
+# $(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 $(COMMON_VM_DEBUG_TARGETS)
+ $(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 $(COMMON_VM_DEBUG_TARGETS)
+ $(QUIETLY) $(MAKE) EXPORT_SUBDIR=/debug universalize
+
+
+# Consolidate architecture builds into a single Universal binary
+universalize: $(UNIVERSAL_LIPO_LIST) $(UNIVERSAL_COPY_LIST)
+ $(RM) -r $(EXPORT_PATH)/jre/lib/{i386,amd64}
+
+
+# Package built libraries in a universal binary
+$(UNIVERSAL_LIPO_LIST):
+ BUILT_LIPO_FILES="`find $(EXPORT_JRE_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@) 2>/dev/null`"; \
+ if [ -n "$${BUILT_LIPO_FILES}" ]; then \
+ $(MKDIR) -p $(shell dirname $@); \
+ lipo -create -output $@ $${BUILT_LIPO_FILES}; \
+ fi
+
+
+# Copy built non-universal binaries in place
+$(UNIVERSAL_COPY_LIST):
+ BUILT_COPY_FILE="$(EXPORT_JRE_LIB_DIR)/i386/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@)"; \
+ if [ -f $${BUILT_COPY_FILE} ]; then \
+ $(MKDIR) -p $(shell dirname $@); \
+ $(CP) $${BUILT_COPY_FILE} $@; \
+ fi
+
+
+# Replace arch specific binaries with universal binaries
+export_universal:
+ $(RM) -r $(EXPORT_PATH)/jre/lib/{i386,amd64}
+ $(RM) -r $(JDK_IMAGE_DIR)/jre/lib/{i386,amd64}
+ $(RM) $(JDK_IMAGE_DIR)/jre/lib/{client,server}/libjsig.$(LIBRARY_SUFFIX)
+ ($(CD) $(EXPORT_PATH) && \
+ $(TAR) -cf - *) | \
+ ($(CD) $(JDK_IMAGE_DIR) && $(TAR) -xpf -)
+
+
+# Overlay universal binaries
+copy_universal:
+ $(RM) -r $(JDK_IMAGE_DIR)$(COPY_SUBDIR)/jre/lib/{i386,amd64}
+ $(RM) $(JDK_IMAGE_DIR)$(COPY_SUBDIR)/jre/lib/{client,server}/libjsig.$(LIBRARY_SUFFIX)
+ ($(CD) $(EXPORT_PATH)$(COPY_SUBDIR) && \
+ $(TAR) -cf - *) | \
+ ($(CD) $(JDK_IMAGE_DIR)$(COPY_SUBDIR) && $(TAR) -xpf -)
+
+
+# Additional processing for universal builds
+export_product_jdk::
+ $(MAKE) EXPORT_SUBDIR= export_universal
+export_optimized_jdk::
+ $(MAKE) EXPORT_SUBDIR= export_universal
+export_fastdebug_jdk::
+ $(MAKE) EXPORT_SUBDIR=/fastdebug export_universal
+export_debug_jdk::
+ $(MAKE) EXPORT_SUBDIR=/debug export_universal
+copy_product_jdk::
+ $(MAKE) COPY_SUBDIR= copy_universal
+copy_fastdebug_jdk::
+ $(MAKE) COPY_SUBDIR=/fastdebug copy_universal
+copy_debug_jdk::
+ $(MAKE) COPY_SUBDIR=/debug copy_universal
+
+.PHONY: universal_product universal_fastdebug universal_debug \
+ all_product_universal all_fastdebug_universal all_debug_universal \
+ universalize export_universal copy_universal
diff --git a/make/defs.make b/make/defs.make
index 3a355c769..af6d7be19 100644
--- a/make/defs.make
+++ b/make/defs.make
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -185,6 +185,15 @@ ifneq ($(ALT_BOOTDIR),)
BOOTDIR=$(ALT_BOOTDIR)
endif
+# Select name of the export directory and honor ALT overrides
+EXPORT_PATH=$(OUTPUTDIR)/export-$(PLATFORM)$(EXPORT_SUBDIR)
+ifneq ($(ALT_EXPORT_PATH),)
+ EXPORT_PATH=$(ALT_EXPORT_PATH)
+endif
+
+# Default jdk image if one is created for you with create_jdk
+JDK_IMAGE_DIR=$(OUTPUTDIR)/jdk-$(PLATFORM)
+
# The platform dependent defs.make defines platform specific variable such
# as ARCH, EXPORT_LIST etc. We must place the include here after BOOTDIR is defined.
include $(GAMMADIR)/make/$(OSNAME)/makefiles/defs.make
@@ -263,15 +272,6 @@ MAKE_ARGS += JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION)
# includes this make/defs.make file.
MAKE_ARGS += HOTSPOT_BUILD_VERSION=$(HOTSPOT_BUILD_VERSION)
-# Select name of export directory
-EXPORT_PATH=$(OUTPUTDIR)/export-$(PLATFORM)$(EXPORT_SUBDIR)
-ifneq ($(ALT_EXPORT_PATH),)
- EXPORT_PATH=$(ALT_EXPORT_PATH)
-endif
-
-# Default jdk image if one is created for you with create_jdk
-JDK_IMAGE_DIR=$(OUTPUTDIR)/jdk-$(PLATFORM)
-
# Various export sub directories
EXPORT_INCLUDE_DIR = $(EXPORT_PATH)/include
EXPORT_DOCS_DIR = $(EXPORT_PATH)/docs
diff --git a/make/hotspot_version b/make/hotspot_version
index 5a1698988..b4d41c561 100644
--- a/make/hotspot_version
+++ b/make/hotspot_version
@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2011
HS_MAJOR_VER=23
HS_MINOR_VER=0
-HS_BUILD_NUMBER=12
+HS_BUILD_NUMBER=13
JDK_MAJOR_VER=1
JDK_MINOR_VER=8
diff --git a/src/cpu/sparc/vm/assembler_sparc.hpp b/src/cpu/sparc/vm/assembler_sparc.hpp
index 44713a005..04f8a9810 100644
--- a/src/cpu/sparc/vm/assembler_sparc.hpp
+++ b/src/cpu/sparc/vm/assembler_sparc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2134,6 +2134,7 @@ public:
// address pseudos: make these names unlike instruction names to avoid confusion
inline intptr_t load_pc_address( Register reg, int bytes_to_skip );
inline void load_contents(const AddressLiteral& addrlit, Register d, int offset = 0);
+ inline void load_bool_contents(const AddressLiteral& addrlit, Register d, int offset = 0);
inline void load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset = 0);
inline void store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset = 0);
inline void store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset = 0);
@@ -2249,7 +2250,7 @@ public:
// this platform we assume byte size
inline void stbool(Register d, const Address& a) { stb(d, a); }
- inline void ldbool(const Address& a, Register d) { ldsb(a, d); }
+ inline void ldbool(const Address& a, Register d) { ldub(a, d); }
inline void movbool( bool boolconst, Register d) { mov( (int) boolconst, d); }
// klass oop manipulations if compressed
diff --git a/src/cpu/sparc/vm/assembler_sparc.inline.hpp b/src/cpu/sparc/vm/assembler_sparc.inline.hpp
index d9b1aa549..fce5c377a 100644
--- a/src/cpu/sparc/vm/assembler_sparc.inline.hpp
+++ b/src/cpu/sparc/vm/assembler_sparc.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -692,6 +692,17 @@ inline void MacroAssembler::load_contents(const AddressLiteral& addrlit, Registe
}
+inline void MacroAssembler::load_bool_contents(const AddressLiteral& addrlit, Register d, int offset) {
+ assert_not_delayed();
+ if (ForceUnreachable) {
+ patchable_sethi(addrlit, d);
+ } else {
+ sethi(addrlit, d);
+ }
+ ldub(d, addrlit.low10() + offset, d);
+}
+
+
inline void MacroAssembler::load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset) {
assert_not_delayed();
if (ForceUnreachable) {
diff --git a/src/cpu/sparc/vm/c2_globals_sparc.hpp b/src/cpu/sparc/vm/c2_globals_sparc.hpp
index f9d6684d1..68ecfefd5 100644
--- a/src/cpu/sparc/vm/c2_globals_sparc.hpp
+++ b/src/cpu/sparc/vm/c2_globals_sparc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -42,7 +42,7 @@ define_pd_global(bool, ProfileInterpreter, false);
#else
define_pd_global(bool, ProfileInterpreter, true);
#endif // CC_INTERP
-define_pd_global(bool, TieredCompilation, true);
+define_pd_global(bool, TieredCompilation, trueInTiered);
define_pd_global(intx, CompileThreshold, 10000);
define_pd_global(intx, BackEdgeThreshold, 140000);
diff --git a/src/cpu/sparc/vm/frame_sparc.cpp b/src/cpu/sparc/vm/frame_sparc.cpp
index f7bccc84a..1acc45748 100644
--- a/src/cpu/sparc/vm/frame_sparc.cpp
+++ b/src/cpu/sparc/vm/frame_sparc.cpp
@@ -28,6 +28,7 @@
#include "oops/markOop.hpp"
#include "oops/methodOop.hpp"
#include "oops/oop.inline.hpp"
+#include "prims/methodHandles.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
diff --git a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp
index 560ced690..23f00a674 100644
--- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp
+++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -321,6 +321,16 @@ static int reg2offset(VMReg r) {
return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
}
+static VMRegPair reg64_to_VMRegPair(Register r) {
+ VMRegPair ret;
+ if (wordSize == 8) {
+ ret.set2(r->as_VMReg());
+ } else {
+ ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg());
+ }
+ return ret;
+}
+
// ---------------------------------------------------------------------------
// Read the array of BasicTypes from a signature, and compute where the
// arguments should go. Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size)
@@ -1444,6 +1454,25 @@ static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
}
+static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
+ if (src.first()->is_stack()) {
+ if (dst.first()->is_stack()) {
+ // stack to stack
+ __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, L5);
+ __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
+ } else {
+ // stack to reg
+ __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
+ }
+ } else if (dst.first()->is_stack()) {
+ // reg to stack
+ __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
+ } else {
+ __ mov(src.first()->as_Register(), dst.first()->as_Register());
+ }
+}
+
+
// An oop arg. Must pass a handle not the oop itself
static void object_move(MacroAssembler* masm,
OopMap* map,
@@ -1748,6 +1777,166 @@ static void create_inner_frame(MacroAssembler* masm, bool* already_created) {
}
}
+
+static void save_or_restore_arguments(MacroAssembler* masm,
+ const int stack_slots,
+ const int total_in_args,
+ const int arg_save_area,
+ OopMap* map,
+ VMRegPair* in_regs,
+ BasicType* in_sig_bt) {
+ // if map is non-NULL then the code should store the values,
+ // otherwise it should load them.
+ if (map != NULL) {
+ // Fill in the map
+ for (int i = 0; i < total_in_args; i++) {
+ if (in_sig_bt[i] == T_ARRAY) {
+ if (in_regs[i].first()->is_stack()) {
+ int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
+ map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
+ } else if (in_regs[i].first()->is_Register()) {
+ map->set_oop(in_regs[i].first());
+ } else {
+ ShouldNotReachHere();
+ }
+ }
+ }
+ }
+
+ // Save or restore double word values
+ int handle_index = 0;
+ for (int i = 0; i < total_in_args; i++) {
+ int slot = handle_index + arg_save_area;
+ int offset = slot * VMRegImpl::stack_slot_size;
+ if (in_sig_bt[i] == T_LONG && in_regs[i].first()->is_Register()) {
+ const Register reg = in_regs[i].first()->as_Register();
+ if (reg->is_global()) {
+ handle_index += 2;
+ assert(handle_index <= stack_slots, "overflow");
+ if (map != NULL) {
+ __ stx(reg, SP, offset + STACK_BIAS);
+ } else {
+ __ ldx(SP, offset + STACK_BIAS, reg);
+ }
+ }
+ } else if (in_sig_bt[i] == T_DOUBLE && in_regs[i].first()->is_FloatRegister()) {
+ handle_index += 2;
+ assert(handle_index <= stack_slots, "overflow");
+ if (map != NULL) {
+ __ stf(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS);
+ } else {
+ __ ldf(FloatRegisterImpl::D, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister());
+ }
+ }
+ }
+ // Save floats
+ for (int i = 0; i < total_in_args; i++) {
+ int slot = handle_index + arg_save_area;
+ int offset = slot * VMRegImpl::stack_slot_size;
+ if (in_sig_bt[i] == T_FLOAT && in_regs[i].first()->is_FloatRegister()) {
+ handle_index++;
+ assert(handle_index <= stack_slots, "overflow");
+ if (map != NULL) {
+ __ stf(FloatRegisterImpl::S, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS);
+ } else {
+ __ ldf(FloatRegisterImpl::S, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister());
+ }
+ }
+ }
+
+}
+
+
+// Check GC_locker::needs_gc and enter the runtime if it's true. This
+// keeps a new JNI critical region from starting until a GC has been
+// forced. Save down any oops in registers and describe them in an
+// OopMap.
+static void check_needs_gc_for_critical_native(MacroAssembler* masm,
+ const int stack_slots,
+ const int total_in_args,
+ const int arg_save_area,
+ OopMapSet* oop_maps,
+ VMRegPair* in_regs,
+ BasicType* in_sig_bt) {
+ __ block_comment("check GC_locker::needs_gc");
+ Label cont;
+ AddressLiteral sync_state(GC_locker::needs_gc_address());
+ __ load_bool_contents(sync_state, G3_scratch);
+ __ cmp_zero_and_br(Assembler::equal, G3_scratch, cont);
+ __ delayed()->nop();
+
+ // Save down any values that are live in registers and call into the
+ // runtime to halt for a GC
+ OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
+ save_or_restore_arguments(masm, stack_slots, total_in_args,
+ arg_save_area, map, in_regs, in_sig_bt);
+
+ __ mov(G2_thread, L7_thread_cache);
+
+ __ set_last_Java_frame(SP, noreg);
+
+ __ block_comment("block_for_jni_critical");
+ __ call(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical), relocInfo::runtime_call_type);
+ __ delayed()->mov(L7_thread_cache, O0);
+ oop_maps->add_gc_map( __ offset(), map);
+
+ __ restore_thread(L7_thread_cache); // restore G2_thread
+ __ reset_last_Java_frame();
+
+ // Reload all the register arguments
+ save_or_restore_arguments(masm, stack_slots, total_in_args,
+ arg_save_area, NULL, in_regs, in_sig_bt);
+
+ __ bind(cont);
+#ifdef ASSERT
+ if (StressCriticalJNINatives) {
+ // Stress register saving
+ OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
+ save_or_restore_arguments(masm, stack_slots, total_in_args,
+ arg_save_area, map, in_regs, in_sig_bt);
+ // Destroy argument registers
+ for (int i = 0; i < total_in_args; i++) {
+ if (in_regs[i].first()->is_Register()) {
+ const Register reg = in_regs[i].first()->as_Register();
+ if (reg->is_global()) {
+ __ mov(G0, reg);
+ }
+ } else if (in_regs[i].first()->is_FloatRegister()) {
+ __ fneg(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), in_regs[i].first()->as_FloatRegister());
+ }
+ }
+
+ save_or_restore_arguments(masm, stack_slots, total_in_args,
+ arg_save_area, NULL, in_regs, in_sig_bt);
+ }
+#endif
+}
+
+// Unpack an array argument into a pointer to the body and the length
+// if the array is non-null, otherwise pass 0 for both.
+static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
+ // Pass the length, ptr pair
+ Label is_null, done;
+ if (reg.first()->is_stack()) {
+ VMRegPair tmp = reg64_to_VMRegPair(L2);
+ // Load the arg up from the stack
+ move_ptr(masm, reg, tmp);
+ reg = tmp;
+ }
+ __ cmp(reg.first()->as_Register(), G0);
+ __ brx(Assembler::equal, false, Assembler::pt, is_null);
+ __ delayed()->add(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type), L4);
+ move_ptr(masm, reg64_to_VMRegPair(L4), body_arg);
+ __ ld(reg.first()->as_Register(), arrayOopDesc::length_offset_in_bytes(), L4);
+ move32_64(masm, reg64_to_VMRegPair(L4), length_arg);
+ __ ba_short(done);
+ __ bind(is_null);
+ // Pass zeros
+ move_ptr(masm, reg64_to_VMRegPair(G0), body_arg);
+ move32_64(masm, reg64_to_VMRegPair(G0), length_arg);
+ __ bind(done);
+}
+
// ---------------------------------------------------------------------------
// Generate a native wrapper for a given method. The method takes arguments
// in the Java compiled code convention, marshals them to the native
@@ -1762,6 +1951,13 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
BasicType *in_sig_bt,
VMRegPair *in_regs,
BasicType ret_type) {
+ bool is_critical_native = true;
+ address native_func = method->critical_native_function();
+ if (native_func == NULL) {
+ native_func = method->native_function();
+ is_critical_native = false;
+ }
+ assert(native_func != NULL, "must have function");
// Native nmethod wrappers never take possesion of the oop arguments.
// So the caller will gc the arguments. The only thing we need an
@@ -1841,22 +2037,70 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// we convert the java signature to a C signature by inserting
// the hidden arguments as arg[0] and possibly arg[1] (static method)
- int total_c_args = total_in_args + 1;
- if (method->is_static()) {
- total_c_args++;
+ int total_c_args = total_in_args;
+ int total_save_slots = 6 * VMRegImpl::slots_per_word;
+ if (!is_critical_native) {
+ total_c_args += 1;
+ if (method->is_static()) {
+ total_c_args++;
+ }
+ } else {
+ for (int i = 0; i < total_in_args; i++) {
+ if (in_sig_bt[i] == T_ARRAY) {
+ // These have to be saved and restored across the safepoint
+ total_c_args++;
+ }
+ }
}
BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
- VMRegPair * out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
+ VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
+ BasicType* in_elem_bt = NULL;
int argc = 0;
- out_sig_bt[argc++] = T_ADDRESS;
- if (method->is_static()) {
- out_sig_bt[argc++] = T_OBJECT;
- }
+ if (!is_critical_native) {
+ out_sig_bt[argc++] = T_ADDRESS;
+ if (method->is_static()) {
+ out_sig_bt[argc++] = T_OBJECT;
+ }
- for (int i = 0; i < total_in_args ; i++ ) {
- out_sig_bt[argc++] = in_sig_bt[i];
+ for (int i = 0; i < total_in_args ; i++ ) {
+ out_sig_bt[argc++] = in_sig_bt[i];
+ }
+ } else {
+ Thread* THREAD = Thread::current();
+ in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
+ SignatureStream ss(method->signature());
+ for (int i = 0; i < total_in_args ; i++ ) {
+ if (in_sig_bt[i] == T_ARRAY) {
+ // Arrays are passed as int, elem* pair
+ out_sig_bt[argc++] = T_INT;
+ out_sig_bt[argc++] = T_ADDRESS;
+ Symbol* atype = ss.as_symbol(CHECK_NULL);
+ const char* at = atype->as_C_string();
+ if (strlen(at) == 2) {
+ assert(at[0] == '[', "must be");
+ switch (at[1]) {
+ case 'B': in_elem_bt[i] = T_BYTE; break;
+ case 'C': in_elem_bt[i] = T_CHAR; break;
+ case 'D': in_elem_bt[i] = T_DOUBLE; break;
+ case 'F': in_elem_bt[i] = T_FLOAT; break;
+ case 'I': in_elem_bt[i] = T_INT; break;
+ case 'J': in_elem_bt[i] = T_LONG; break;
+ case 'S': in_elem_bt[i] = T_SHORT; break;
+ case 'Z': in_elem_bt[i] = T_BOOLEAN; break;
+ default: ShouldNotReachHere();
+ }
+ }
+ } else {
+ out_sig_bt[argc++] = in_sig_bt[i];
+ in_elem_bt[i] = T_VOID;
+ }
+ if (in_sig_bt[i] != T_VOID) {
+ assert(in_sig_bt[i] == ss.type(), "must match");
+ ss.next();
+ }
+ }
}
// Now figure out where the args must be stored and how much stack space
@@ -1866,6 +2110,35 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
int out_arg_slots;
out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
+ if (is_critical_native) {
+ // Critical natives may have to call out so they need a save area
+ // for register arguments.
+ int double_slots = 0;
+ int single_slots = 0;
+ for ( int i = 0; i < total_in_args; i++) {
+ if (in_regs[i].first()->is_Register()) {
+ const Register reg = in_regs[i].first()->as_Register();
+ switch (in_sig_bt[i]) {
+ case T_ARRAY:
+ case T_BOOLEAN:
+ case T_BYTE:
+ case T_SHORT:
+ case T_CHAR:
+ case T_INT: assert(reg->is_in(), "don't need to save these"); break;
+ case T_LONG: if (reg->is_global()) double_slots++; break;
+ default: ShouldNotReachHere();
+ }
+ } else if (in_regs[i].first()->is_FloatRegister()) {
+ switch (in_sig_bt[i]) {
+ case T_FLOAT: single_slots++; break;
+ case T_DOUBLE: double_slots++; break;
+ default: ShouldNotReachHere();
+ }
+ }
+ }
+ total_save_slots = double_slots * 2 + single_slots;
+ }
+
// Compute framesize for the wrapper. We need to handlize all oops in
// registers. We must create space for them here that is disjoint from
// the windowed save area because we have no control over when we might
@@ -1885,12 +2158,11 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Now the space for the inbound oop handle area
- int oop_handle_offset = stack_slots;
- stack_slots += 6*VMRegImpl::slots_per_word;
+ int oop_handle_offset = round_to(stack_slots, 2);
+ stack_slots += total_save_slots;
// Now any space we need for handlizing a klass if static method
- int oop_temp_slot_offset = 0;
int klass_slot_offset = 0;
int klass_offset = -1;
int lock_slot_offset = 0;
@@ -1954,6 +2226,10 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ verify_thread();
+ if (is_critical_native) {
+ check_needs_gc_for_critical_native(masm, stack_slots, total_in_args,
+ oop_handle_offset, oop_maps, in_regs, in_sig_bt);
+ }
//
// We immediately shuffle the arguments so that any vm call we have to
@@ -1982,7 +2258,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// caller.
//
OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
- int c_arg = total_c_args - 1;
// Record sp-based slot for receiver on stack for non-static methods
int receiver_offset = -1;
@@ -2002,7 +2277,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
#endif /* ASSERT */
- for ( int i = total_in_args - 1; i >= 0 ; i--, c_arg-- ) {
+ for ( int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0 ; i--, c_arg-- ) {
#ifdef ASSERT
if (in_regs[i].first()->is_Register()) {
@@ -2019,7 +2294,13 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
switch (in_sig_bt[i]) {
case T_ARRAY:
+ if (is_critical_native) {
+ unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg], out_regs[c_arg - 1]);
+ c_arg--;
+ break;
+ }
case T_OBJECT:
+ assert(!is_critical_native, "no oop arguments");
object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
((i == 0) && (!is_static)),
&receiver_offset);
@@ -2029,7 +2310,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
case T_FLOAT:
float_move(masm, in_regs[i], out_regs[c_arg]);
- break;
+ break;
case T_DOUBLE:
assert( i + 1 < total_in_args &&
@@ -2051,7 +2332,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Pre-load a static method's oop into O1. Used both by locking code and
// the normal JNI call code.
- if (method->is_static()) {
+ if (method->is_static() && !is_critical_native) {
__ set_oop_constant(JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()), O1);
// Now handlize the static class mirror in O1. It's known not-null.
@@ -2064,13 +2345,13 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
const Register L6_handle = L6;
if (method->is_synchronized()) {
+ assert(!is_critical_native, "unhandled");
__ mov(O1, L6_handle);
}
// We have all of the arguments setup at this point. We MUST NOT touch any Oregs
// except O6/O7. So if we must call out we must push a new frame. We immediately
// push a new frame and flush the windows.
-
#ifdef _LP64
intptr_t thepc = (intptr_t) __ pc();
{
@@ -2202,32 +2483,28 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
}
// get JNIEnv* which is first argument to native
-
- __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0);
+ if (!is_critical_native) {
+ __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0);
+ }
// Use that pc we placed in O7 a while back as the current frame anchor
-
__ set_last_Java_frame(SP, O7);
- // Transition from _thread_in_Java to _thread_in_native.
- __ set(_thread_in_native, G3_scratch);
- __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
-
- // We flushed the windows ages ago now mark them as flushed
-
- // mark windows as flushed
+ // We flushed the windows ages ago now mark them as flushed before transitioning.
__ set(JavaFrameAnchor::flushed, G3_scratch);
+ __ st(G3_scratch, G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
- Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
+ // Transition from _thread_in_Java to _thread_in_native.
+ __ set(_thread_in_native, G3_scratch);
#ifdef _LP64
- AddressLiteral dest(method->native_function());
+ AddressLiteral dest(native_func);
__ relocate(relocInfo::runtime_call_type);
__ jumpl_to(dest, O7, O7);
#else
- __ call(method->native_function(), relocInfo::runtime_call_type);
+ __ call(native_func, relocInfo::runtime_call_type);
#endif
- __ delayed()->st(G3_scratch, flags);
+ __ delayed()->st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
__ restore_thread(L7_thread_cache); // restore G2_thread
@@ -2259,6 +2536,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
ShouldNotReachHere();
}
+ Label after_transition;
// must we block?
// Block, if necessary, before resuming in _thread_in_Java state.
@@ -2303,22 +2581,34 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// a distinct one for this pc
//
save_native_result(masm, ret_type, stack_slots);
- __ call_VM_leaf(L7_thread_cache,
- CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
- G2_thread);
+ if (!is_critical_native) {
+ __ call_VM_leaf(L7_thread_cache,
+ CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
+ G2_thread);
+ } else {
+ __ call_VM_leaf(L7_thread_cache,
+ CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition),
+ G2_thread);
+ }
// Restore any method result value
restore_native_result(masm, ret_type, stack_slots);
+
+ if (is_critical_native) {
+ // The call above performed the transition to thread_in_Java so
+ // skip the transition logic below.
+ __ ba(after_transition);
+ __ delayed()->nop();
+ }
+
__ bind(no_block);
}
// thread state is thread_in_native_trans. Any safepoint blocking has already
// happened so we can now change state to _thread_in_Java.
-
-
__ set(_thread_in_Java, G3_scratch);
__ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
-
+ __ bind(after_transition);
Label no_reguard;
__ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch);
@@ -2416,12 +2706,14 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ verify_oop(I0);
}
- // reset handle block
- __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5);
- __ st_ptr(G0, L5, JNIHandleBlock::top_offset_in_bytes());
+ if (!is_critical_native) {
+ // reset handle block
+ __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5);
+ __ st_ptr(G0, L5, JNIHandleBlock::top_offset_in_bytes());
- __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch);
- check_forward_pending_exception(masm, G3_scratch);
+ __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch);
+ check_forward_pending_exception(masm, G3_scratch);
+ }
// Return
@@ -2450,6 +2742,10 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
(is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
in_ByteSize(lock_offset),
oop_maps);
+
+ if (is_critical_native) {
+ nm->set_lazy_critical_native(true);
+ }
return nm;
}
@@ -2473,17 +2769,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
static int fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 };
static bool offsets_initialized = false;
-static VMRegPair reg64_to_VMRegPair(Register r) {
- VMRegPair ret;
- if (wordSize == 8) {
- ret.set2(r->as_VMReg());
- } else {
- ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg());
- }
- return ret;
-}
-
-
nmethod *SharedRuntime::generate_dtrace_nmethod(
MacroAssembler *masm, methodHandle method) {
diff --git a/src/cpu/x86/vm/c2_globals_x86.hpp b/src/cpu/x86/vm/c2_globals_x86.hpp
index da72d84e3..749c48f5e 100644
--- a/src/cpu/x86/vm/c2_globals_x86.hpp
+++ b/src/cpu/x86/vm/c2_globals_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -44,7 +44,7 @@ define_pd_global(bool, ProfileInterpreter, false);
#else
define_pd_global(bool, ProfileInterpreter, true);
#endif // CC_INTERP
-define_pd_global(bool, TieredCompilation, true);
+define_pd_global(bool, TieredCompilation, trueInTiered);
define_pd_global(intx, CompileThreshold, 10000);
define_pd_global(intx, BackEdgeThreshold, 100000);
diff --git a/src/cpu/x86/vm/frame_x86.cpp b/src/cpu/x86/vm/frame_x86.cpp
index 4255be664..4e87936a9 100644
--- a/src/cpu/x86/vm/frame_x86.cpp
+++ b/src/cpu/x86/vm/frame_x86.cpp
@@ -28,6 +28,7 @@
#include "oops/markOop.hpp"
#include "oops/methodOop.hpp"
#include "oops/oop.inline.hpp"
+#include "prims/methodHandles.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
diff --git a/src/cpu/x86/vm/methodHandles_x86.cpp b/src/cpu/x86/vm/methodHandles_x86.cpp
index a6ccfb830..55f0eb0ee 100644
--- a/src/cpu/x86/vm/methodHandles_x86.cpp
+++ b/src/cpu/x86/vm/methodHandles_x86.cpp
@@ -2364,23 +2364,19 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// grab another temp
Register rsi_temp = rsi;
- { if (rsi_temp == saved_last_sp) __ push(saved_last_sp); }
- // (preceding push must be done after argslot address is taken!)
-#define UNPUSH_RSI \
- { if (rsi_temp == saved_last_sp) __ pop(saved_last_sp); }
// arx_argslot points both to the array and to the first output arg
vmarg = Address(rax_argslot, 0);
// Get the array value.
- Register rsi_array = rsi_temp;
+ Register rdi_array = rdi_temp;
Register rdx_array_klass = rdx_temp;
BasicType elem_type = ek_adapter_opt_spread_type(ek);
int elem_slots = type2size[elem_type]; // 1 or 2
int array_slots = 1; // array is always a T_OBJECT
int length_offset = arrayOopDesc::length_offset_in_bytes();
int elem0_offset = arrayOopDesc::base_offset_in_bytes(elem_type);
- __ movptr(rsi_array, vmarg);
+ __ movptr(rdi_array, vmarg);
Label L_array_is_empty, L_insert_arg_space, L_copy_args, L_args_done;
if (length_can_be_zero) {
@@ -2391,12 +2387,30 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ testl(rbx_temp, rbx_temp);
__ jcc(Assembler::notZero, L_skip);
}
- __ testptr(rsi_array, rsi_array);
- __ jcc(Assembler::zero, L_array_is_empty);
+ __ testptr(rdi_array, rdi_array);
+ __ jcc(Assembler::notZero, L_skip);
+
+ // If 'rsi' contains the 'saved_last_sp' (this is only the
+ // case in a 32-bit version of the VM) we have to save 'rsi'
+ // on the stack because later on (at 'L_array_is_empty') 'rsi'
+ // will be overwritten.
+ { if (rsi_temp == saved_last_sp) __ push(saved_last_sp); }
+ // Also prepare a handy macro which restores 'rsi' if required.
+#define UNPUSH_RSI \
+ { if (rsi_temp == saved_last_sp) __ pop(saved_last_sp); }
+
+ __ jmp(L_array_is_empty);
__ bind(L_skip);
}
- __ null_check(rsi_array, oopDesc::klass_offset_in_bytes());
- __ load_klass(rdx_array_klass, rsi_array);
+ __ null_check(rdi_array, oopDesc::klass_offset_in_bytes());
+ __ load_klass(rdx_array_klass, rdi_array);
+
+ // Save 'rsi' if required (see comment above). Do this only
+ // after the null check such that the exception handler which is
+ // called in the case of a null pointer exception will not be
+ // confused by the extra value on the stack (it expects the
+ // return pointer on top of the stack)
+ { if (rsi_temp == saved_last_sp) __ push(saved_last_sp); }
// Check the array type.
Register rbx_klass = rbx_temp;
@@ -2404,18 +2418,18 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
load_klass_from_Class(_masm, rbx_klass);
Label ok_array_klass, bad_array_klass, bad_array_length;
- __ check_klass_subtype(rdx_array_klass, rbx_klass, rdi_temp, ok_array_klass);
+ __ check_klass_subtype(rdx_array_klass, rbx_klass, rsi_temp, ok_array_klass);
// If we get here, the type check failed!
__ jmp(bad_array_klass);
__ BIND(ok_array_klass);
// Check length.
if (length_constant >= 0) {
- __ cmpl(Address(rsi_array, length_offset), length_constant);
+ __ cmpl(Address(rdi_array, length_offset), length_constant);
} else {
Register rbx_vminfo = rbx_temp;
load_conversion_vminfo(_masm, rbx_vminfo, rcx_amh_conversion);
- __ cmpl(rbx_vminfo, Address(rsi_array, length_offset));
+ __ cmpl(rbx_vminfo, Address(rdi_array, length_offset));
}
__ jcc(Assembler::notEqual, bad_array_length);
@@ -2427,9 +2441,9 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize));
// 'stack_move' is negative number of words to insert
// This number already accounts for elem_slots.
- Register rdi_stack_move = rdi_temp;
- load_stack_move(_masm, rdi_stack_move, rcx_recv, true);
- __ cmpptr(rdi_stack_move, 0);
+ Register rsi_stack_move = rsi_temp;
+ load_stack_move(_masm, rsi_stack_move, rcx_recv, true);
+ __ cmpptr(rsi_stack_move, 0);
assert(stack_move_unit() < 0, "else change this comparison");
__ jcc(Assembler::less, L_insert_arg_space);
__ jcc(Assembler::equal, L_copy_args);
@@ -2440,12 +2454,12 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ jmp(L_args_done); // no spreading to do
__ BIND(L_insert_arg_space);
// come here in the usual case, stack_move < 0 (2 or more spread arguments)
- Register rsi_temp = rsi_array; // spill this
- insert_arg_slots(_masm, rdi_stack_move,
- rax_argslot, rbx_temp, rsi_temp);
+ Register rdi_temp = rdi_array; // spill this
+ insert_arg_slots(_masm, rsi_stack_move,
+ rax_argslot, rbx_temp, rdi_temp);
// reload the array since rsi was killed
// reload from rdx_argslot_limit since rax_argslot is now decremented
- __ movptr(rsi_array, Address(rdx_argslot_limit, -Interpreter::stackElementSize));
+ __ movptr(rdi_array, Address(rdx_argslot_limit, -Interpreter::stackElementSize));
} else if (length_constant >= 1) {
int new_slots = (length_constant * elem_slots) - array_slots;
insert_arg_slots(_masm, new_slots * stack_move_unit(),
@@ -2468,16 +2482,16 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
if (length_constant == -1) {
// [rax_argslot, rdx_argslot_limit) is the area we are inserting into.
// Array element [0] goes at rdx_argslot_limit[-wordSize].
- Register rsi_source = rsi_array;
- __ lea(rsi_source, Address(rsi_array, elem0_offset));
+ Register rdi_source = rdi_array;
+ __ lea(rdi_source, Address(rdi_array, elem0_offset));
Register rdx_fill_ptr = rdx_argslot_limit;
Label loop;
__ BIND(loop);
__ addptr(rdx_fill_ptr, -Interpreter::stackElementSize * elem_slots);
move_typed_arg(_masm, elem_type, true,
- Address(rdx_fill_ptr, 0), Address(rsi_source, 0),
- rbx_temp, rdi_temp);
- __ addptr(rsi_source, type2aelembytes(elem_type));
+ Address(rdx_fill_ptr, 0), Address(rdi_source, 0),
+ rbx_temp, rsi_temp);
+ __ addptr(rdi_source, type2aelembytes(elem_type));
__ cmpptr(rdx_fill_ptr, rax_argslot);
__ jcc(Assembler::above, loop);
} else if (length_constant == 0) {
@@ -2488,8 +2502,8 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
for (int index = 0; index < length_constant; index++) {
slot_offset -= Interpreter::stackElementSize * elem_slots; // fill backward
move_typed_arg(_masm, elem_type, true,
- Address(rax_argslot, slot_offset), Address(rsi_array, elem_offset),
- rbx_temp, rdi_temp);
+ Address(rax_argslot, slot_offset), Address(rdi_array, elem_offset),
+ rbx_temp, rsi_temp);
elem_offset += type2aelembytes(elem_type);
}
}
diff --git a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp
index 9b43aba25..c80f7c8af 100644
--- a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp
+++ b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1091,12 +1091,238 @@ void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_ty
}
}
+
+static void save_or_restore_arguments(MacroAssembler* masm,
+ const int stack_slots,
+ const int total_in_args,
+ const int arg_save_area,
+ OopMap* map,
+ VMRegPair* in_regs,
+ BasicType* in_sig_bt) {
+ // if map is non-NULL then the code should store the values,
+ // otherwise it should load them.
+ int handle_index = 0;
+ // Save down double word first
+ for ( int i = 0; i < total_in_args; i++) {
+ if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
+ int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
+ int offset = slot * VMRegImpl::stack_slot_size;
+ handle_index += 2;
+ assert(handle_index <= stack_slots, "overflow");
+ if (map != NULL) {
+ __ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
+ } else {
+ __ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
+ }
+ }
+ if (in_regs[i].first()->is_Register() && in_sig_bt[i] == T_LONG) {
+ int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
+ int offset = slot * VMRegImpl::stack_slot_size;
+ handle_index += 2;
+ assert(handle_index <= stack_slots, "overflow");
+ if (map != NULL) {
+ __ movl(Address(rsp, offset), in_regs[i].first()->as_Register());
+ if (in_regs[i].second()->is_Register()) {
+ __ movl(Address(rsp, offset + 4), in_regs[i].second()->as_Register());
+ }
+ } else {
+ __ movl(in_regs[i].first()->as_Register(), Address(rsp, offset));
+ if (in_regs[i].second()->is_Register()) {
+ __ movl(in_regs[i].second()->as_Register(), Address(rsp, offset + 4));
+ }
+ }
+ }
+ }
+ // Save or restore single word registers
+ for ( int i = 0; i < total_in_args; i++) {
+ if (in_regs[i].first()->is_Register()) {
+ int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
+ int offset = slot * VMRegImpl::stack_slot_size;
+ assert(handle_index <= stack_slots, "overflow");
+ if (in_sig_bt[i] == T_ARRAY && map != NULL) {
+ map->set_oop(VMRegImpl::stack2reg(slot));;
+ }
+
+ // Value is in an input register pass we must flush it to the stack
+ const Register reg = in_regs[i].first()->as_Register();
+ switch (in_sig_bt[i]) {
+ case T_ARRAY:
+ if (map != NULL) {
+ __ movptr(Address(rsp, offset), reg);
+ } else {
+ __ movptr(reg, Address(rsp, offset));
+ }
+ break;
+ case T_BOOLEAN:
+ case T_CHAR:
+ case T_BYTE:
+ case T_SHORT:
+ case T_INT:
+ if (map != NULL) {
+ __ movl(Address(rsp, offset), reg);
+ } else {
+ __ movl(reg, Address(rsp, offset));
+ }
+ break;
+ case T_OBJECT:
+ default: ShouldNotReachHere();
+ }
+ } else if (in_regs[i].first()->is_XMMRegister()) {
+ if (in_sig_bt[i] == T_FLOAT) {
+ int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
+ int offset = slot * VMRegImpl::stack_slot_size;
+ assert(handle_index <= stack_slots, "overflow");
+ if (map != NULL) {
+ __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
+ } else {
+ __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
+ }
+ }
+ } else if (in_regs[i].first()->is_stack()) {
+ if (in_sig_bt[i] == T_ARRAY && map != NULL) {
+ int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
+ map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
+ }
+ }
+ }
+}
+
+// Check GC_locker::needs_gc and enter the runtime if it's true. This
+// keeps a new JNI critical region from starting until a GC has been
+// forced. Save down any oops in registers and describe them in an
+// OopMap.
+static void check_needs_gc_for_critical_native(MacroAssembler* masm,
+ Register thread,
+ int stack_slots,
+ int total_c_args,
+ int total_in_args,
+ int arg_save_area,
+ OopMapSet* oop_maps,
+ VMRegPair* in_regs,
+ BasicType* in_sig_bt) {
+ __ block_comment("check GC_locker::needs_gc");
+ Label cont;
+ __ cmp8(ExternalAddress((address)GC_locker::needs_gc_address()), false);
+ __ jcc(Assembler::equal, cont);
+
+ // Save down any incoming oops and call into the runtime to halt for a GC
+
+ OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
+
+ save_or_restore_arguments(masm, stack_slots, total_in_args,
+ arg_save_area, map, in_regs, in_sig_bt);
+
+ address the_pc = __ pc();
+ oop_maps->add_gc_map( __ offset(), map);
+ __ set_last_Java_frame(thread, rsp, noreg, the_pc);
+
+ __ block_comment("block_for_jni_critical");
+ __ push(thread);
+ __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical)));
+ __ increment(rsp, wordSize);
+
+ __ get_thread(thread);
+ __ reset_last_Java_frame(thread, false, true);
+
+ save_or_restore_arguments(masm, stack_slots, total_in_args,
+ arg_save_area, NULL, in_regs, in_sig_bt);
+
+ __ bind(cont);
+#ifdef ASSERT
+ if (StressCriticalJNINatives) {
+ // Stress register saving
+ OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
+ save_or_restore_arguments(masm, stack_slots, total_in_args,
+ arg_save_area, map, in_regs, in_sig_bt);
+ // Destroy argument registers
+ for (int i = 0; i < total_in_args - 1; i++) {
+ if (in_regs[i].first()->is_Register()) {
+ const Register reg = in_regs[i].first()->as_Register();
+ __ xorptr(reg, reg);
+ } else if (in_regs[i].first()->is_XMMRegister()) {
+ __ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister());
+ } else if (in_regs[i].first()->is_FloatRegister()) {
+ ShouldNotReachHere();
+ } else if (in_regs[i].first()->is_stack()) {
+ // Nothing to do
+ } else {
+ ShouldNotReachHere();
+ }
+ if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) {
+ i++;
+ }
+ }
+
+ save_or_restore_arguments(masm, stack_slots, total_in_args,
+ arg_save_area, NULL, in_regs, in_sig_bt);
+ }
+#endif
+}
+
+// Unpack an array argument into a pointer to the body and the length
+// if the array is non-null, otherwise pass 0 for both.
+static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
+ Register tmp_reg = rax;
+ assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
+ "possible collision");
+ assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
+ "possible collision");
+
+ // Pass the length, ptr pair
+ Label is_null, done;
+ VMRegPair tmp(tmp_reg->as_VMReg());
+ if (reg.first()->is_stack()) {
+ // Load the arg up from the stack
+ simple_move32(masm, reg, tmp);
+ reg = tmp;
+ }
+ __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
+ __ jccb(Assembler::equal, is_null);
+ __ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
+ simple_move32(masm, tmp, body_arg);
+ // load the length relative to the body.
+ __ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
+ arrayOopDesc::base_offset_in_bytes(in_elem_type)));
+ simple_move32(masm, tmp, length_arg);
+ __ jmpb(done);
+ __ bind(is_null);
+ // Pass zeros
+ __ xorptr(tmp_reg, tmp_reg);
+ simple_move32(masm, tmp, body_arg);
+ simple_move32(masm, tmp, length_arg);
+ __ bind(done);
+}
+
+
// ---------------------------------------------------------------------------
// Generate a native wrapper for a given method. The method takes arguments
// in the Java compiled code convention, marshals them to the native
// convention (handlizes oops, etc), transitions to native, makes the call,
// returns to java state (possibly blocking), unhandlizes any result and
// returns.
+//
+// Critical native functions are a shorthand for the use of
+// GetPrimtiveArrayCritical and disallow the use of any other JNI
+// functions. The wrapper is expected to unpack the arguments before
+// passing them to the callee and perform checks before and after the
+// native call to ensure that they GC_locker
+// lock_critical/unlock_critical semantics are followed. Some other
+// parts of JNI setup are skipped like the tear down of the JNI handle
+// block and the check for pending exceptions it's impossible for them
+// to be thrown.
+//
+// They are roughly structured like this:
+// if (GC_locker::needs_gc())
+// SharedRuntime::block_for_jni_critical();
+// tranistion to thread_in_native
+// unpack arrray arguments and call native entry point
+// check for safepoint in progress
+// check if any thread suspend flags are set
+// call into JVM and possible unlock the JNI critical
+// if a GC was suppressed while in the critical native.
+// transition back to thread_in_Java
+// return to caller
+//
nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
methodHandle method,
int compile_id,
@@ -1105,6 +1331,13 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
BasicType *in_sig_bt,
VMRegPair *in_regs,
BasicType ret_type) {
+ bool is_critical_native = true;
+ address native_func = method->critical_native_function();
+ if (native_func == NULL) {
+ native_func = method->native_function();
+ is_critical_native = false;
+ }
+ assert(native_func != NULL, "must have function");
// An OopMap for lock (and class if static)
OopMapSet *oop_maps = new OopMapSet();
@@ -1115,30 +1348,72 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// we convert the java signature to a C signature by inserting
// the hidden arguments as arg[0] and possibly arg[1] (static method)
- int total_c_args = total_in_args + 1;
- if (method->is_static()) {
- total_c_args++;
+ int total_c_args = total_in_args;
+ if (!is_critical_native) {
+ total_c_args += 1;
+ if (method->is_static()) {
+ total_c_args++;
+ }
+ } else {
+ for (int i = 0; i < total_in_args; i++) {
+ if (in_sig_bt[i] == T_ARRAY) {
+ total_c_args++;
+ }
+ }
}
BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
- VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
+ VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
+ BasicType* in_elem_bt = NULL;
int argc = 0;
- out_sig_bt[argc++] = T_ADDRESS;
- if (method->is_static()) {
- out_sig_bt[argc++] = T_OBJECT;
- }
+ if (!is_critical_native) {
+ out_sig_bt[argc++] = T_ADDRESS;
+ if (method->is_static()) {
+ out_sig_bt[argc++] = T_OBJECT;
+ }
- int i;
- for (i = 0; i < total_in_args ; i++ ) {
- out_sig_bt[argc++] = in_sig_bt[i];
+ for (int i = 0; i < total_in_args ; i++ ) {
+ out_sig_bt[argc++] = in_sig_bt[i];
+ }
+ } else {
+ Thread* THREAD = Thread::current();
+ in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
+ SignatureStream ss(method->signature());
+ for (int i = 0; i < total_in_args ; i++ ) {
+ if (in_sig_bt[i] == T_ARRAY) {
+ // Arrays are passed as int, elem* pair
+ out_sig_bt[argc++] = T_INT;
+ out_sig_bt[argc++] = T_ADDRESS;
+ Symbol* atype = ss.as_symbol(CHECK_NULL);
+ const char* at = atype->as_C_string();
+ if (strlen(at) == 2) {
+ assert(at[0] == '[', "must be");
+ switch (at[1]) {
+ case 'B': in_elem_bt[i] = T_BYTE; break;
+ case 'C': in_elem_bt[i] = T_CHAR; break;
+ case 'D': in_elem_bt[i] = T_DOUBLE; break;
+ case 'F': in_elem_bt[i] = T_FLOAT; break;
+ case 'I': in_elem_bt[i] = T_INT; break;
+ case 'J': in_elem_bt[i] = T_LONG; break;
+ case 'S': in_elem_bt[i] = T_SHORT; break;
+ case 'Z': in_elem_bt[i] = T_BOOLEAN; break;
+ default: ShouldNotReachHere();
+ }
+ }
+ } else {
+ out_sig_bt[argc++] = in_sig_bt[i];
+ in_elem_bt[i] = T_VOID;
+ }
+ if (in_sig_bt[i] != T_VOID) {
+ assert(in_sig_bt[i] == ss.type(), "must match");
+ ss.next();
+ }
+ }
}
-
// Now figure out where the args must be stored and how much stack space
- // they require (neglecting out_preserve_stack_slots but space for storing
- // the 1st six register arguments). It's weird see int_stk_helper.
- //
+ // they require.
int out_arg_slots;
out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
@@ -1151,9 +1426,44 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
// Now the space for the inbound oop handle area
+ int total_save_slots = 2 * VMRegImpl::slots_per_word; // 2 arguments passed in registers
+ if (is_critical_native) {
+ // Critical natives may have to call out so they need a save area
+ // for register arguments.
+ int double_slots = 0;
+ int single_slots = 0;
+ for ( int i = 0; i < total_in_args; i++) {
+ if (in_regs[i].first()->is_Register()) {
+ const Register reg = in_regs[i].first()->as_Register();
+ switch (in_sig_bt[i]) {
+ case T_ARRAY:
+ case T_BOOLEAN:
+ case T_BYTE:
+ case T_SHORT:
+ case T_CHAR:
+ case T_INT: single_slots++; break;
+ case T_LONG: double_slots++; break;
+ default: ShouldNotReachHere();
+ }
+ } else if (in_regs[i].first()->is_XMMRegister()) {
+ switch (in_sig_bt[i]) {
+ case T_FLOAT: single_slots++; break;
+ case T_DOUBLE: double_slots++; break;
+ default: ShouldNotReachHere();
+ }
+ } else if (in_regs[i].first()->is_FloatRegister()) {
+ ShouldNotReachHere();
+ }
+ }
+ total_save_slots = double_slots * 2 + single_slots;
+ // align the save area
+ if (double_slots != 0) {
+ stack_slots = round_to(stack_slots, 2);
+ }
+ }
int oop_handle_offset = stack_slots;
- stack_slots += 2*VMRegImpl::slots_per_word;
+ stack_slots += total_save_slots;
// Now any space we need for handlizing a klass if static method
@@ -1161,7 +1471,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
int klass_offset = -1;
int lock_slot_offset = 0;
bool is_static = false;
- int oop_temp_slot_offset = 0;
if (method->is_static()) {
klass_slot_offset = stack_slots;
@@ -1221,7 +1530,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// First thing make an ic check to see if we should even be here
// We are free to use all registers as temps without saving them and
- // restoring them except rbp,. rbp, is the only callee save register
+ // restoring them except rbp. rbp is the only callee save register
// as far as the interpreter and the compiler(s) are concerned.
@@ -1230,7 +1539,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
Label hit;
Label exception_pending;
-
__ verify_oop(receiver);
__ cmpptr(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
__ jcc(Assembler::equal, hit);
@@ -1292,11 +1600,10 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Generate a new frame for the wrapper.
__ enter();
- // -2 because return address is already present and so is saved rbp,
+ // -2 because return address is already present and so is saved rbp
__ subptr(rsp, stack_size - 2*wordSize);
- // Frame is now completed as far a size and linkage.
-
+ // Frame is now completed as far as size and linkage.
int frame_complete = ((intptr_t)__ pc()) - start;
// Calculate the difference between rsp and rbp,. We need to know it
@@ -1319,7 +1626,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Compute the rbp, offset for any slots used after the jni call
int lock_slot_rbp_offset = (lock_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment;
- int oop_temp_slot_rbp_offset = (oop_temp_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment;
// We use rdi as a thread pointer because it is callee save and
// if we load it once it is usable thru the entire wrapper
@@ -1332,6 +1638,10 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ get_thread(thread);
+ if (is_critical_native) {
+ check_needs_gc_for_critical_native(masm, thread, stack_slots, total_c_args, total_in_args,
+ oop_handle_offset, oop_maps, in_regs, in_sig_bt);
+ }
//
// We immediately shuffle the arguments so that any vm call we have to
@@ -1353,7 +1663,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// vectors we have in our possession. We simply walk the java vector to
// get the source locations and the c vector to get the destinations.
- int c_arg = method->is_static() ? 2 : 1 ;
+ int c_arg = is_critical_native ? 0 : (method->is_static() ? 2 : 1 );
// Record rsp-based slot for receiver on stack for non-static methods
int receiver_offset = -1;
@@ -1373,10 +1683,16 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Are free to temporaries if we have to do stack to steck moves.
// All inbound args are referenced based on rbp, and all outbound args via rsp.
- for (i = 0; i < total_in_args ; i++, c_arg++ ) {
+ for (int i = 0; i < total_in_args ; i++, c_arg++ ) {
switch (in_sig_bt[i]) {
case T_ARRAY:
+ if (is_critical_native) {
+ unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
+ c_arg++;
+ break;
+ }
case T_OBJECT:
+ assert(!is_critical_native, "no oop arguments");
object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
((i == 0) && (!is_static)),
&receiver_offset);
@@ -1408,7 +1724,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Pre-load a static method's oop into rsi. Used both by locking code and
// the normal JNI call code.
- if (method->is_static()) {
+ if (method->is_static() && !is_critical_native) {
// load opp into a register
__ movoop(oop_handle_reg, JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()));
@@ -1463,6 +1779,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Lock a synchronized method
if (method->is_synchronized()) {
+ assert(!is_critical_native, "unhandled");
const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
@@ -1529,14 +1846,15 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// get JNIEnv* which is first argument to native
-
- __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
- __ movptr(Address(rsp, 0), rdx);
+ if (!is_critical_native) {
+ __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
+ __ movptr(Address(rsp, 0), rdx);
+ }
// Now set thread in native
__ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
- __ call(RuntimeAddress(method->native_function()));
+ __ call(RuntimeAddress(native_func));
// WARNING - on Windows Java Natives use pascal calling convention and pop the
// arguments off of the stack. We could just re-adjust the stack pointer here
@@ -1591,6 +1909,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
}
+ Label after_transition;
+
// check for safepoint operation in progress and/or pending suspend requests
{ Label Continue;
@@ -1611,17 +1931,29 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
//
save_native_result(masm, ret_type, stack_slots);
__ push(thread);
- __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
- JavaThread::check_special_condition_for_native_trans)));
+ if (!is_critical_native) {
+ __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
+ JavaThread::check_special_condition_for_native_trans)));
+ } else {
+ __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
+ JavaThread::check_special_condition_for_native_trans_and_transition)));
+ }
__ increment(rsp, wordSize);
// Restore any method result value
restore_native_result(masm, ret_type, stack_slots);
+ if (is_critical_native) {
+ // The call above performed the transition to thread_in_Java so
+ // skip the transition logic below.
+ __ jmpb(after_transition);
+ }
+
__ bind(Continue);
}
// change thread state
__ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
+ __ bind(after_transition);
Label reguard;
Label reguard_done;
@@ -1710,15 +2042,15 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ verify_oop(rax);
}
- // reset handle block
- __ movptr(rcx, Address(thread, JavaThread::active_handles_offset()));
-
- __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
-
- // Any exception pending?
- __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
- __ jcc(Assembler::notEqual, exception_pending);
+ if (!is_critical_native) {
+ // reset handle block
+ __ movptr(rcx, Address(thread, JavaThread::active_handles_offset()));
+ __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
+ // Any exception pending?
+ __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
+ __ jcc(Assembler::notEqual, exception_pending);
+ }
// no exception, we're almost done
@@ -1829,16 +2161,18 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// BEGIN EXCEPTION PROCESSING
- // Forward the exception
- __ bind(exception_pending);
+ if (!is_critical_native) {
+ // Forward the exception
+ __ bind(exception_pending);
- // remove possible return value from FPU register stack
- __ empty_FPU_stack();
+ // remove possible return value from FPU register stack
+ __ empty_FPU_stack();
- // pop our frame
- __ leave();
- // and forward the exception
- __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
+ // pop our frame
+ __ leave();
+ // and forward the exception
+ __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
+ }
__ flush();
@@ -1851,6 +2185,11 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
(is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
oop_maps);
+
+ if (is_critical_native) {
+ nm->set_lazy_critical_native(true);
+ }
+
return nm;
}
diff --git a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
index b03eb92d0..b7af4544d 100644
--- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
+++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -938,6 +938,25 @@ static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
}
}
+static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
+ if (src.first()->is_stack()) {
+ if (dst.first()->is_stack()) {
+ // stack to stack
+ __ movq(rax, Address(rbp, reg2offset_in(src.first())));
+ __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
+ } else {
+ // stack to reg
+ __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
+ }
+ } else if (dst.first()->is_stack()) {
+ // reg to stack
+ __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
+ } else {
+ if (dst.first() != src.first()) {
+ __ movq(dst.first()->as_Register(), src.first()->as_Register());
+ }
+ }
+}
// An oop arg. Must pass a handle not the oop itself
static void object_move(MacroAssembler* masm,
@@ -1152,6 +1171,203 @@ static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMR
}
}
+
+static void save_or_restore_arguments(MacroAssembler* masm,
+ const int stack_slots,
+ const int total_in_args,
+ const int arg_save_area,
+ OopMap* map,
+ VMRegPair* in_regs,
+ BasicType* in_sig_bt) {
+ // if map is non-NULL then the code should store the values,
+ // otherwise it should load them.
+ int handle_index = 0;
+ // Save down double word first
+ for ( int i = 0; i < total_in_args; i++) {
+ if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
+ int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
+ int offset = slot * VMRegImpl::stack_slot_size;
+ handle_index += 2;
+ assert(handle_index <= stack_slots, "overflow");
+ if (map != NULL) {
+ __ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
+ } else {
+ __ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
+ }
+ }
+ if (in_regs[i].first()->is_Register() &&
+ (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) {
+ int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
+ int offset = slot * VMRegImpl::stack_slot_size;
+ handle_index += 2;
+ assert(handle_index <= stack_slots, "overflow");
+ if (map != NULL) {
+ __ movq(Address(rsp, offset), in_regs[i].first()->as_Register());
+ if (in_sig_bt[i] == T_ARRAY) {
+ map->set_oop(VMRegImpl::stack2reg(slot));;
+ }
+ } else {
+ __ movq(in_regs[i].first()->as_Register(), Address(rsp, offset));
+ }
+ }
+ }
+ // Save or restore single word registers
+ for ( int i = 0; i < total_in_args; i++) {
+ if (in_regs[i].first()->is_Register()) {
+ int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
+ int offset = slot * VMRegImpl::stack_slot_size;
+ assert(handle_index <= stack_slots, "overflow");
+
+ // Value is in an input register pass we must flush it to the stack
+ const Register reg = in_regs[i].first()->as_Register();
+ switch (in_sig_bt[i]) {
+ case T_BOOLEAN:
+ case T_CHAR:
+ case T_BYTE:
+ case T_SHORT:
+ case T_INT:
+ if (map != NULL) {
+ __ movl(Address(rsp, offset), reg);
+ } else {
+ __ movl(reg, Address(rsp, offset));
+ }
+ break;
+ case T_ARRAY:
+ case T_LONG:
+ // handled above
+ break;
+ case T_OBJECT:
+ default: ShouldNotReachHere();
+ }
+ } else if (in_regs[i].first()->is_XMMRegister()) {
+ if (in_sig_bt[i] == T_FLOAT) {
+ int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
+ int offset = slot * VMRegImpl::stack_slot_size;
+ assert(handle_index <= stack_slots, "overflow");
+ if (map != NULL) {
+ __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
+ } else {
+ __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
+ }
+ }
+ } else if (in_regs[i].first()->is_stack()) {
+ if (in_sig_bt[i] == T_ARRAY && map != NULL) {
+ int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
+ map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
+ }
+ }
+ }
+}
+
+
+// Check GC_locker::needs_gc and enter the runtime if it's true. This
+// keeps a new JNI critical region from starting until a GC has been
+// forced. Save down any oops in registers and describe them in an
+// OopMap.
+static void check_needs_gc_for_critical_native(MacroAssembler* masm,
+ int stack_slots,
+ int total_c_args,
+ int total_in_args,
+ int arg_save_area,
+ OopMapSet* oop_maps,
+ VMRegPair* in_regs,
+ BasicType* in_sig_bt) {
+ __ block_comment("check GC_locker::needs_gc");
+ Label cont;
+ __ cmp8(ExternalAddress((address)GC_locker::needs_gc_address()), false);
+ __ jcc(Assembler::equal, cont);
+
+ // Save down any incoming oops and call into the runtime to halt for a GC
+
+ OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
+ save_or_restore_arguments(masm, stack_slots, total_in_args,
+ arg_save_area, map, in_regs, in_sig_bt);
+
+ address the_pc = __ pc();
+ oop_maps->add_gc_map( __ offset(), map);
+ __ set_last_Java_frame(rsp, noreg, the_pc);
+
+ __ block_comment("block_for_jni_critical");
+ __ movptr(c_rarg0, r15_thread);
+ __ mov(r12, rsp); // remember sp
+ __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
+ __ andptr(rsp, -16); // align stack as required by ABI
+ __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical)));
+ __ mov(rsp, r12); // restore sp
+ __ reinit_heapbase();
+
+ __ reset_last_Java_frame(false, true);
+
+ save_or_restore_arguments(masm, stack_slots, total_in_args,
+ arg_save_area, NULL, in_regs, in_sig_bt);
+
+ __ bind(cont);
+#ifdef ASSERT
+ if (StressCriticalJNINatives) {
+ // Stress register saving
+ OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
+ save_or_restore_arguments(masm, stack_slots, total_in_args,
+ arg_save_area, map, in_regs, in_sig_bt);
+ // Destroy argument registers
+ for (int i = 0; i < total_in_args - 1; i++) {
+ if (in_regs[i].first()->is_Register()) {
+ const Register reg = in_regs[i].first()->as_Register();
+ __ xorptr(reg, reg);
+ } else if (in_regs[i].first()->is_XMMRegister()) {
+ __ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister());
+ } else if (in_regs[i].first()->is_FloatRegister()) {
+ ShouldNotReachHere();
+ } else if (in_regs[i].first()->is_stack()) {
+ // Nothing to do
+ } else {
+ ShouldNotReachHere();
+ }
+ if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) {
+ i++;
+ }
+ }
+
+ save_or_restore_arguments(masm, stack_slots, total_in_args,
+ arg_save_area, NULL, in_regs, in_sig_bt);
+ }
+#endif
+}
+
+// Unpack an array argument into a pointer to the body and the length
+// if the array is non-null, otherwise pass 0 for both.
+static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
+ Register tmp_reg = rax;
+ assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
+ "possible collision");
+ assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
+ "possible collision");
+
+ // Pass the length, ptr pair
+ Label is_null, done;
+ VMRegPair tmp;
+ tmp.set_ptr(tmp_reg->as_VMReg());
+ if (reg.first()->is_stack()) {
+ // Load the arg up from the stack
+ move_ptr(masm, reg, tmp);
+ reg = tmp;
+ }
+ __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
+ __ jccb(Assembler::equal, is_null);
+ __ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
+ move_ptr(masm, tmp, body_arg);
+ // load the length relative to the body.
+ __ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
+ arrayOopDesc::base_offset_in_bytes(in_elem_type)));
+ move32_64(masm, tmp, length_arg);
+ __ jmpb(done);
+ __ bind(is_null);
+ // Pass zeros
+ __ xorptr(tmp_reg, tmp_reg);
+ move_ptr(masm, tmp, body_arg);
+ move32_64(masm, tmp, length_arg);
+ __ bind(done);
+}
+
// ---------------------------------------------------------------------------
// Generate a native wrapper for a given method. The method takes arguments
// in the Java compiled code convention, marshals them to the native
@@ -1166,10 +1382,14 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
BasicType *in_sig_bt,
VMRegPair *in_regs,
BasicType ret_type) {
- // Native nmethod wrappers never take possesion of the oop arguments.
- // So the caller will gc the arguments. The only thing we need an
- // oopMap for is if the call is static
- //
+ bool is_critical_native = true;
+ address native_func = method->critical_native_function();
+ if (native_func == NULL) {
+ native_func = method->native_function();
+ is_critical_native = false;
+ }
+ assert(native_func != NULL, "must have function");
+
// An OopMap for lock (and class if static)
OopMapSet *oop_maps = new OopMapSet();
intptr_t start = (intptr_t)__ pc();
@@ -1180,27 +1400,72 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// we convert the java signature to a C signature by inserting
// the hidden arguments as arg[0] and possibly arg[1] (static method)
- int total_c_args = total_in_args + 1;
- if (method->is_static()) {
- total_c_args++;
+ int total_c_args = total_in_args;
+ if (!is_critical_native) {
+ total_c_args += 1;
+ if (method->is_static()) {
+ total_c_args++;
+ }
+ } else {
+ for (int i = 0; i < total_in_args; i++) {
+ if (in_sig_bt[i] == T_ARRAY) {
+ total_c_args++;
+ }
+ }
}
BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
- VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
+ VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
+ BasicType* in_elem_bt = NULL;
int argc = 0;
- out_sig_bt[argc++] = T_ADDRESS;
- if (method->is_static()) {
- out_sig_bt[argc++] = T_OBJECT;
- }
+ if (!is_critical_native) {
+ out_sig_bt[argc++] = T_ADDRESS;
+ if (method->is_static()) {
+ out_sig_bt[argc++] = T_OBJECT;
+ }
- for (int i = 0; i < total_in_args ; i++ ) {
- out_sig_bt[argc++] = in_sig_bt[i];
+ for (int i = 0; i < total_in_args ; i++ ) {
+ out_sig_bt[argc++] = in_sig_bt[i];
+ }
+ } else {
+ Thread* THREAD = Thread::current();
+ in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
+ SignatureStream ss(method->signature());
+ for (int i = 0; i < total_in_args ; i++ ) {
+ if (in_sig_bt[i] == T_ARRAY) {
+ // Arrays are passed as int, elem* pair
+ out_sig_bt[argc++] = T_INT;
+ out_sig_bt[argc++] = T_ADDRESS;
+ Symbol* atype = ss.as_symbol(CHECK_NULL);
+ const char* at = atype->as_C_string();
+ if (strlen(at) == 2) {
+ assert(at[0] == '[', "must be");
+ switch (at[1]) {
+ case 'B': in_elem_bt[i] = T_BYTE; break;
+ case 'C': in_elem_bt[i] = T_CHAR; break;
+ case 'D': in_elem_bt[i] = T_DOUBLE; break;
+ case 'F': in_elem_bt[i] = T_FLOAT; break;
+ case 'I': in_elem_bt[i] = T_INT; break;
+ case 'J': in_elem_bt[i] = T_LONG; break;
+ case 'S': in_elem_bt[i] = T_SHORT; break;
+ case 'Z': in_elem_bt[i] = T_BOOLEAN; break;
+ default: ShouldNotReachHere();
+ }
+ }
+ } else {
+ out_sig_bt[argc++] = in_sig_bt[i];
+ in_elem_bt[i] = T_VOID;
+ }
+ if (in_sig_bt[i] != T_VOID) {
+ assert(in_sig_bt[i] == ss.type(), "must match");
+ ss.next();
+ }
+ }
}
// Now figure out where the args must be stored and how much stack space
// they require.
- //
int out_arg_slots;
out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
@@ -1213,13 +1478,47 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
// Now the space for the inbound oop handle area
+ int total_save_slots = 6 * VMRegImpl::slots_per_word; // 6 arguments passed in registers
+ if (is_critical_native) {
+ // Critical natives may have to call out so they need a save area
+ // for register arguments.
+ int double_slots = 0;
+ int single_slots = 0;
+ for ( int i = 0; i < total_in_args; i++) {
+ if (in_regs[i].first()->is_Register()) {
+ const Register reg = in_regs[i].first()->as_Register();
+ switch (in_sig_bt[i]) {
+ case T_ARRAY:
+ case T_BOOLEAN:
+ case T_BYTE:
+ case T_SHORT:
+ case T_CHAR:
+ case T_INT: single_slots++; break;
+ case T_LONG: double_slots++; break;
+ default: ShouldNotReachHere();
+ }
+ } else if (in_regs[i].first()->is_XMMRegister()) {
+ switch (in_sig_bt[i]) {
+ case T_FLOAT: single_slots++; break;
+ case T_DOUBLE: double_slots++; break;
+ default: ShouldNotReachHere();
+ }
+ } else if (in_regs[i].first()->is_FloatRegister()) {
+ ShouldNotReachHere();
+ }
+ }
+ total_save_slots = double_slots * 2 + single_slots;
+ // align the save area
+ if (double_slots != 0) {
+ stack_slots = round_to(stack_slots, 2);
+ }
+ }
int oop_handle_offset = stack_slots;
- stack_slots += 6*VMRegImpl::slots_per_word;
+ stack_slots += total_save_slots;
// Now any space we need for handlizing a klass if static method
- int oop_temp_slot_offset = 0;
int klass_slot_offset = 0;
int klass_offset = -1;
int lock_slot_offset = 0;
@@ -1272,7 +1571,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
int stack_size = stack_slots * VMRegImpl::stack_slot_size;
-
// First thing make an ic check to see if we should even be here
// We are free to use all registers as temps without saving them and
@@ -1283,22 +1581,22 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
const Register ic_reg = rax;
const Register receiver = j_rarg0;
- Label ok;
+ Label hit;
Label exception_pending;
assert_different_registers(ic_reg, receiver, rscratch1);
__ verify_oop(receiver);
__ load_klass(rscratch1, receiver);
__ cmpq(ic_reg, rscratch1);
- __ jcc(Assembler::equal, ok);
+ __ jcc(Assembler::equal, hit);
__ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
- __ bind(ok);
-
// Verified entry point must be aligned
__ align(8);
+ __ bind(hit);
+
int vep_offset = ((intptr_t)__ pc()) - start;
// The instruction at the verified entry point must be 5 bytes or longer
@@ -1319,9 +1617,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// -2 because return address is already present and so is saved rbp
__ subptr(rsp, stack_size - 2*wordSize);
- // Frame is now completed as far as size and linkage.
-
- int frame_complete = ((intptr_t)__ pc()) - start;
+ // Frame is now completed as far as size and linkage.
+ int frame_complete = ((intptr_t)__ pc()) - start;
#ifdef ASSERT
{
@@ -1341,7 +1638,10 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
const Register oop_handle_reg = r14;
-
+ if (is_critical_native) {
+ check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
+ oop_handle_offset, oop_maps, in_regs, in_sig_bt);
+ }
//
// We immediately shuffle the arguments so that any vm call we have to
@@ -1390,9 +1690,36 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
#endif /* ASSERT */
+ if (is_critical_native) {
+ // The mapping of Java and C arguments passed in registers are
+ // rotated by one, which helps when passing arguments to regular
+ // Java method but for critical natives that creates a cycle which
+ // can cause arguments to be killed before they are used. Break
+ // the cycle by moving the first argument into a temporary
+ // register.
+ for (int i = 0; i < total_c_args; i++) {
+ if (in_regs[i].first()->is_Register() &&
+ in_regs[i].first()->as_Register() == rdi) {
+ __ mov(rbx, rdi);
+ in_regs[i].set1(rbx->as_VMReg());
+ }
+ }
+ }
+ // This may iterate in two different directions depending on the
+ // kind of native it is. The reason is that for regular JNI natives
+ // the incoming and outgoing registers are offset upwards and for
+ // critical natives they are offset down.
int c_arg = total_c_args - 1;
- for ( int i = total_in_args - 1; i >= 0 ; i--, c_arg-- ) {
+ int stride = -1;
+ int init = total_in_args - 1;
+ if (is_critical_native) {
+ // stride forwards
+ c_arg = 0;
+ stride = 1;
+ init = 0;
+ }
+ for (int i = init, count = 0; count < total_in_args; i += stride, c_arg += stride, count++ ) {
#ifdef ASSERT
if (in_regs[i].first()->is_Register()) {
assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
@@ -1407,7 +1734,20 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
#endif /* ASSERT */
switch (in_sig_bt[i]) {
case T_ARRAY:
+ if (is_critical_native) {
+ unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
+ c_arg++;
+#ifdef ASSERT
+ if (out_regs[c_arg].first()->is_Register()) {
+ reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
+ } else if (out_regs[c_arg].first()->is_XMMRegister()) {
+ freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
+ }
+#endif
+ break;
+ }
case T_OBJECT:
+ assert(!is_critical_native, "no oop arguments");
object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
((i == 0) && (!is_static)),
&receiver_offset);
@@ -1443,7 +1783,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Pre-load a static method's oop into r14. Used both by locking code and
// the normal JNI call code.
- if (method->is_static()) {
+ if (method->is_static() && !is_critical_native) {
// load oop into a register
__ movoop(oop_handle_reg, JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()));
@@ -1509,6 +1849,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
Label lock_done;
if (method->is_synchronized()) {
+ assert(!is_critical_native, "unhandled");
const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
@@ -1572,13 +1913,14 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// get JNIEnv* which is first argument to native
-
- __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
+ if (!is_critical_native) {
+ __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
+ }
// Now set thread in native
__ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
- __ call(RuntimeAddress(method->native_function()));
+ __ call(RuntimeAddress(native_func));
// Either restore the MXCSR register after returning from the JNI Call
// or verify that it wasn't changed.
@@ -1634,6 +1976,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
}
}
+ Label after_transition;
// check for safepoint operation in progress and/or pending suspend requests
{
@@ -1659,16 +2002,28 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ mov(r12, rsp); // remember sp
__ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
__ andptr(rsp, -16); // align stack as required by ABI
- __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
+ if (!is_critical_native) {
+ __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
+ } else {
+ __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)));
+ }
__ mov(rsp, r12); // restore sp
__ reinit_heapbase();
// Restore any method result value
restore_native_result(masm, ret_type, stack_slots);
+
+ if (is_critical_native) {
+ // The call above performed the transition to thread_in_Java so
+ // skip the transition logic below.
+ __ jmpb(after_transition);
+ }
+
__ bind(Continue);
}
// change thread state
__ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
+ __ bind(after_transition);
Label reguard;
Label reguard_done;
@@ -1746,17 +2101,21 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ verify_oop(rax);
}
- // reset handle block
- __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
- __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
+ if (!is_critical_native) {
+ // reset handle block
+ __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
+ __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
+ }
// pop our frame
__ leave();
- // Any exception pending?
- __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
- __ jcc(Assembler::notEqual, exception_pending);
+ if (!is_critical_native) {
+ // Any exception pending?
+ __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
+ __ jcc(Assembler::notEqual, exception_pending);
+ }
// Return
@@ -1764,12 +2123,13 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Unexpected paths are out of line and go here
- // forward the exception
- __ bind(exception_pending);
-
- // and forward the exception
- __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
+ if (!is_critical_native) {
+ // forward the exception
+ __ bind(exception_pending);
+ // and forward the exception
+ __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
+ }
// Slow path locking & unlocking
if (method->is_synchronized()) {
@@ -1876,6 +2236,11 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
(is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
oop_maps);
+
+ if (is_critical_native) {
+ nm->set_lazy_critical_native(true);
+ }
+
return nm;
}
diff --git a/src/os/windows/vm/os_windows.cpp b/src/os/windows/vm/os_windows.cpp
index 798f6b350..970aab747 100644
--- a/src/os/windows/vm/os_windows.cpp
+++ b/src/os/windows/vm/os_windows.cpp
@@ -2088,7 +2088,6 @@ LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
#elif _M_AMD64
PCONTEXT ctx = exceptionInfo->ContextRecord;
address pc = (address)ctx->Rip;
- NOT_PRODUCT(Events::log("idiv overflow exception at " INTPTR_FORMAT , pc));
assert(pc[0] == 0xF7, "not an idiv opcode");
assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
assert(ctx->Rax == min_jint, "unexpected idiv exception");
@@ -2100,7 +2099,6 @@ LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
#else
PCONTEXT ctx = exceptionInfo->ContextRecord;
address pc = (address)ctx->Eip;
- NOT_PRODUCT(Events::log("idiv overflow exception at " INTPTR_FORMAT , pc));
assert(pc[0] == 0xF7, "not an idiv opcode");
assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
assert(ctx->Eax == min_jint, "unexpected idiv exception");
@@ -5336,4 +5334,3 @@ BOOL os::Advapi32Dll::AdvapiAvailable() {
}
#endif
-
diff --git a/src/share/tools/ProjectCreator/BuildConfig.java b/src/share/tools/ProjectCreator/BuildConfig.java
index 82d7279c6..92fbf7746 100644
--- a/src/share/tools/ProjectCreator/BuildConfig.java
+++ b/src/share/tools/ProjectCreator/BuildConfig.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -243,6 +243,7 @@ class BuildConfig {
sysDefines.add("_WINDOWS");
sysDefines.add("HOTSPOT_BUILD_USER=\\\""+System.getProperty("user.name")+"\\\"");
sysDefines.add("HOTSPOT_BUILD_TARGET=\\\""+get("Build")+"\\\"");
+ sysDefines.add("INCLUDE_TRACE");
sysDefines.add("_JNI_IMPLEMENTATION_");
if (vars.get("PlatformName").equals("Win32")) {
sysDefines.add("HOTSPOT_LIB_ARCH=\\\"i386\\\"");
diff --git a/src/share/vm/c1/c1_GraphBuilder.cpp b/src/share/vm/c1/c1_GraphBuilder.cpp
index fbda48f2f..89b42dc9c 100644
--- a/src/share/vm/c1/c1_GraphBuilder.cpp
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1592,6 +1592,7 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
// this happened while running the JCK invokevirtual tests under doit. TKR
ciMethod* cha_monomorphic_target = NULL;
ciMethod* exact_target = NULL;
+ Value better_receiver = NULL;
if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded() &&
!target->is_method_handle_invoke()) {
Value receiver = NULL;
@@ -1653,6 +1654,18 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
ciInstanceKlass* singleton = NULL;
if (target->holder()->nof_implementors() == 1) {
singleton = target->holder()->implementor(0);
+
+ assert(holder->is_interface(), "invokeinterface to non interface?");
+ ciInstanceKlass* decl_interface = (ciInstanceKlass*)holder;
+ // the number of implementors for decl_interface is less or
+ // equal to the number of implementors for target->holder() so
+ // if number of implementors of target->holder() == 1 then
+ // number of implementors for decl_interface is 0 or 1. If
+ // it's 0 then no class implements decl_interface and there's
+ // no point in inlining.
+ if (!holder->is_loaded() || decl_interface->nof_implementors() != 1) {
+ singleton = NULL;
+ }
}
if (singleton) {
cha_monomorphic_target = target->find_monomorphic_target(calling_klass, target->holder(), singleton);
@@ -1667,7 +1680,9 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
CheckCast* c = new CheckCast(klass, receiver, copy_state_for_exception());
c->set_incompatible_class_change_check();
c->set_direct_compare(klass->is_final());
- append_split(c);
+ // pass the result of the checkcast so that the compiler has
+ // more accurate type info in the inlinee
+ better_receiver = append_split(c);
}
}
}
@@ -1709,7 +1724,7 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
}
if (!success) {
// static binding => check if callee is ok
- success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL));
+ success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL), better_receiver);
}
CHECK_BAILOUT();
@@ -3034,7 +3049,7 @@ int GraphBuilder::recursive_inline_level(ciMethod* cur_callee) const {
}
-bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known) {
+bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, Value receiver) {
// Clear out any existing inline bailout condition
clear_inline_bailout();
@@ -3056,7 +3071,7 @@ bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known) {
} else if (callee->is_abstract()) {
INLINE_BAILOUT("abstract")
} else {
- return try_inline_full(callee, holder_known);
+ return try_inline_full(callee, holder_known, NULL, receiver);
}
}
@@ -3405,7 +3420,7 @@ void GraphBuilder::fill_sync_handler(Value lock, BlockBegin* sync_handler, bool
}
-bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, BlockBegin* cont_block) {
+bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, BlockBegin* cont_block, Value receiver) {
assert(!callee->is_native(), "callee must not be native");
if (CompilationPolicy::policy()->should_not_inline(compilation()->env(), callee)) {
INLINE_BAILOUT("inlining prohibited by policy");
@@ -3541,6 +3556,9 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, BlockBeg
Value arg = caller_state->stack_at_inc(i);
// NOTE: take base() of arg->type() to avoid problems storing
// constants
+ if (receiver != NULL && par_no == 0) {
+ arg = receiver;
+ }
store_local(callee_state, arg, arg->type()->base(), par_no);
}
}
@@ -3683,56 +3701,61 @@ bool GraphBuilder::for_method_handle_inline(ciMethod* callee) {
// Get the two MethodHandle inputs from the Phi.
Value op1 = phi->operand_at(0);
Value op2 = phi->operand_at(1);
- ciMethodHandle* mh1 = op1->type()->as_ObjectType()->constant_value()->as_method_handle();
- ciMethodHandle* mh2 = op2->type()->as_ObjectType()->constant_value()->as_method_handle();
-
- // Set the callee to have access to the class and signature in
- // the MethodHandleCompiler.
- mh1->set_callee(callee);
- mh1->set_caller(method());
- mh2->set_callee(callee);
- mh2->set_caller(method());
-
- // Get adapters for the MethodHandles.
- ciMethod* mh1_adapter = mh1->get_method_handle_adapter();
- ciMethod* mh2_adapter = mh2->get_method_handle_adapter();
-
- if (mh1_adapter != NULL && mh2_adapter != NULL) {
- set_inline_cleanup_info();
-
- // Build the If guard
- BlockBegin* one = new BlockBegin(next_bci());
- BlockBegin* two = new BlockBegin(next_bci());
- BlockBegin* end = new BlockBegin(next_bci());
- Instruction* iff = append(new If(phi, If::eql, false, op1, one, two, NULL, false));
- block()->set_end(iff->as_BlockEnd());
-
- // Connect up the states
- one->merge(block()->end()->state());
- two->merge(block()->end()->state());
-
- // Save the state for the second inlinee
- ValueStack* state_before = copy_state_before();
-
- // Parse first adapter
- _last = _block = one;
- if (!try_inline_full(mh1_adapter, /*holder_known=*/ true, end)) {
- restore_inline_cleanup_info();
- block()->clear_end(); // remove appended iff
- return false;
- }
+ ObjectType* op1type = op1->type()->as_ObjectType();
+ ObjectType* op2type = op2->type()->as_ObjectType();
+
+ if (op1type->is_constant() && op2type->is_constant()) {
+ ciMethodHandle* mh1 = op1type->constant_value()->as_method_handle();
+ ciMethodHandle* mh2 = op2type->constant_value()->as_method_handle();
+
+ // Set the callee to have access to the class and signature in
+ // the MethodHandleCompiler.
+ mh1->set_callee(callee);
+ mh1->set_caller(method());
+ mh2->set_callee(callee);
+ mh2->set_caller(method());
+
+ // Get adapters for the MethodHandles.
+ ciMethod* mh1_adapter = mh1->get_method_handle_adapter();
+ ciMethod* mh2_adapter = mh2->get_method_handle_adapter();
+
+ if (mh1_adapter != NULL && mh2_adapter != NULL) {
+ set_inline_cleanup_info();
+
+ // Build the If guard
+ BlockBegin* one = new BlockBegin(next_bci());
+ BlockBegin* two = new BlockBegin(next_bci());
+ BlockBegin* end = new BlockBegin(next_bci());
+ Instruction* iff = append(new If(phi, If::eql, false, op1, one, two, NULL, false));
+ block()->set_end(iff->as_BlockEnd());
+
+ // Connect up the states
+ one->merge(block()->end()->state());
+ two->merge(block()->end()->state());
+
+ // Save the state for the second inlinee
+ ValueStack* state_before = copy_state_before();
+
+ // Parse first adapter
+ _last = _block = one;
+ if (!try_inline_full(mh1_adapter, /*holder_known=*/ true, end, NULL)) {
+ restore_inline_cleanup_info();
+ block()->clear_end(); // remove appended iff
+ return false;
+ }
- // Parse second adapter
- _last = _block = two;
- _state = state_before;
- if (!try_inline_full(mh2_adapter, /*holder_known=*/ true, end)) {
- restore_inline_cleanup_info();
- block()->clear_end(); // remove appended iff
- return false;
- }
+ // Parse second adapter
+ _last = _block = two;
+ _state = state_before;
+ if (!try_inline_full(mh2_adapter, /*holder_known=*/ true, end, NULL)) {
+ restore_inline_cleanup_info();
+ block()->clear_end(); // remove appended iff
+ return false;
+ }
- connect_to_end(end);
- return true;
+ connect_to_end(end);
+ return true;
+ }
}
}
}
diff --git a/src/share/vm/c1/c1_GraphBuilder.hpp b/src/share/vm/c1/c1_GraphBuilder.hpp
index 8b8800e74..aa8f45fa4 100644
--- a/src/share/vm/c1/c1_GraphBuilder.hpp
+++ b/src/share/vm/c1/c1_GraphBuilder.hpp
@@ -337,9 +337,9 @@ class GraphBuilder VALUE_OBJ_CLASS_SPEC {
void fill_sync_handler(Value lock, BlockBegin* sync_handler, bool default_handler = false);
// inliners
- bool try_inline( ciMethod* callee, bool holder_known);
+ bool try_inline( ciMethod* callee, bool holder_known, Value receiver = NULL);
bool try_inline_intrinsics(ciMethod* callee);
- bool try_inline_full( ciMethod* callee, bool holder_known, BlockBegin* cont_block = NULL);
+ bool try_inline_full( ciMethod* callee, bool holder_known, BlockBegin* cont_block, Value receiver);
bool try_inline_jsr(int jsr_dest_bci);
// JSR 292 support
diff --git a/src/share/vm/c1/c1_Runtime1.cpp b/src/share/vm/c1/c1_Runtime1.cpp
index 9937e9d62..765dec480 100644
--- a/src/share/vm/c1/c1_Runtime1.cpp
+++ b/src/share/vm/c1/c1_Runtime1.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -597,7 +597,6 @@ address Runtime1::exception_handler_for_pc(JavaThread* thread) {
JRT_ENTRY(void, Runtime1::throw_range_check_exception(JavaThread* thread, int index))
NOT_PRODUCT(_throw_range_check_exception_count++;)
- Events::log("throw_range_check");
char message[jintAsStringSize];
sprintf(message, "%d", index);
SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message);
@@ -606,7 +605,6 @@ JRT_END
JRT_ENTRY(void, Runtime1::throw_index_exception(JavaThread* thread, int index))
NOT_PRODUCT(_throw_index_exception_count++;)
- Events::log("throw_index");
char message[16];
sprintf(message, "%d", index);
SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IndexOutOfBoundsException(), message);
@@ -804,11 +802,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
// Note also that in the presence of inlining it is not guaranteed
// that caller_method() == caller_code->method()
-
int bci = vfst.bci();
-
- Events::log("patch_code @ " INTPTR_FORMAT , caller_frame.pc());
-
Bytecodes::Code code = caller_method()->java_code_at(bci);
#ifndef PRODUCT
diff --git a/src/share/vm/c1/c1_ValueMap.cpp b/src/share/vm/c1/c1_ValueMap.cpp
index aa2bf5af5..1f3152085 100644
--- a/src/share/vm/c1/c1_ValueMap.cpp
+++ b/src/share/vm/c1/c1_ValueMap.cpp
@@ -125,6 +125,7 @@ Value ValueMap::find_insert(Value x) {
// otherwise it is possible that they are not evaluated
f->pin(Instruction::PinGlobalValueNumbering);
}
+ assert(x->type()->tag() == f->type()->tag(), "should have same type");
return f;
diff --git a/src/share/vm/ci/bcEscapeAnalyzer.cpp b/src/share/vm/ci/bcEscapeAnalyzer.cpp
index 630594af4..2037b49b0 100644
--- a/src/share/vm/ci/bcEscapeAnalyzer.cpp
+++ b/src/share/vm/ci/bcEscapeAnalyzer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -359,7 +359,7 @@ void BCEscapeAnalyzer::iterate_one_block(ciBlock *blk, StateInfo &state, Growabl
case Bytecodes::_nop:
break;
case Bytecodes::_aconst_null:
- state.apush(empty_map);
+ state.apush(unknown_obj);
break;
case Bytecodes::_iconst_m1:
case Bytecodes::_iconst_0:
@@ -392,6 +392,8 @@ void BCEscapeAnalyzer::iterate_one_block(ciBlock *blk, StateInfo &state, Growabl
if (tag.is_long() || tag.is_double()) {
// Only longs and doubles use 2 stack slots.
state.lpush();
+ } else if (tag.basic_type() == T_OBJECT) {
+ state.apush(unknown_obj);
} else {
state.spush();
}
diff --git a/src/share/vm/ci/ciEnv.hpp b/src/share/vm/ci/ciEnv.hpp
index 12f4bb226..d00c9f78b 100644
--- a/src/share/vm/ci/ciEnv.hpp
+++ b/src/share/vm/ci/ciEnv.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -284,6 +284,20 @@ public:
// Return state of appropriate compilability
int compilable() { return _compilable; }
+ const char* retry_message() const {
+ switch (_compilable) {
+ case ciEnv::MethodCompilable_not_at_tier:
+ return "retry at different tier";
+ case ciEnv::MethodCompilable_never:
+ return "not retryable";
+ case ciEnv::MethodCompilable:
+ return NULL;
+ default:
+ ShouldNotReachHere();
+ return NULL;
+ }
+ }
+
bool break_at_compile() { return _break_at_compile; }
void set_break_at_compile(bool z) { _break_at_compile = z; }
diff --git a/src/share/vm/classfile/dictionary.cpp b/src/share/vm/classfile/dictionary.cpp
index b15446c6f..4458f46d7 100644
--- a/src/share/vm/classfile/dictionary.cpp
+++ b/src/share/vm/classfile/dictionary.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -618,7 +618,8 @@ void Dictionary::print() {
ResourceMark rm;
HandleMark hm;
- tty->print_cr("Java system dictionary (classes=%d)", number_of_entries());
+ tty->print_cr("Java system dictionary (table_size=%d, classes=%d)",
+ table_size(), number_of_entries());
tty->print_cr("^ indicates that initiating loader is different from "
"defining loader");
diff --git a/src/share/vm/classfile/systemDictionary.cpp b/src/share/vm/classfile/systemDictionary.cpp
index 88ccc91b7..04bb9d9f5 100644
--- a/src/share/vm/classfile/systemDictionary.cpp
+++ b/src/share/vm/classfile/systemDictionary.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -64,6 +64,9 @@ SymbolPropertyTable* SystemDictionary::_invoke_method_table = NULL;
int SystemDictionary::_number_of_modifications = 0;
+int SystemDictionary::_sdgeneration = 0;
+const int SystemDictionary::_primelist[_prime_array_size] = {1009,2017,4049,5051,10103,
+ 20201,40423,99991};
oop SystemDictionary::_system_loader_lock_obj = NULL;
@@ -1178,8 +1181,8 @@ void SystemDictionary::set_shared_dictionary(HashtableBucket* t, int length,
klassOop SystemDictionary::find_shared_class(Symbol* class_name) {
if (shared_dictionary() != NULL) {
- unsigned int d_hash = dictionary()->compute_hash(class_name, Handle());
- int d_index = dictionary()->hash_to_index(d_hash);
+ unsigned int d_hash = shared_dictionary()->compute_hash(class_name, Handle());
+ int d_index = shared_dictionary()->hash_to_index(d_hash);
return shared_dictionary()->find_shared_class(d_index, d_hash, class_name);
} else {
return NULL;
@@ -1750,7 +1753,21 @@ void SystemDictionary::placeholders_do(OopClosure* blk) {
placeholders()->oops_do(blk);
}
-
+// Calculate a "good" systemdictionary size based
+// on predicted or current loaded classes count
+int SystemDictionary::calculate_systemdictionary_size(int classcount) {
+ int newsize = _old_default_sdsize;
+ if ((classcount > 0) && !DumpSharedSpaces) {
+ int desiredsize = classcount/_average_depth_goal;
+ for (newsize = _primelist[_sdgeneration]; _sdgeneration < _prime_array_size -1;
+ newsize = _primelist[++_sdgeneration]) {
+ if (desiredsize <= newsize) {
+ break;
+ }
+ }
+ }
+ return newsize;
+}
bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
bool result = dictionary()->do_unloading(is_alive);
constraints()->purge_loader_constraints(is_alive);
@@ -1873,7 +1890,8 @@ void SystemDictionary::initialize(TRAPS) {
// Allocate arrays
assert(dictionary() == NULL,
"SystemDictionary should only be initialized once");
- _dictionary = new Dictionary(_nof_buckets);
+ _sdgeneration = 0;
+ _dictionary = new Dictionary(calculate_systemdictionary_size(PredictedLoadedClassCount));
_placeholders = new PlaceholderTable(_nof_buckets);
_number_of_modifications = 0;
_loader_constraints = new LoaderConstraintTable(_loader_constraint_size);
diff --git a/src/share/vm/classfile/systemDictionary.hpp b/src/share/vm/classfile/systemDictionary.hpp
index 528ecafbc..3abc5054d 100644
--- a/src/share/vm/classfile/systemDictionary.hpp
+++ b/src/share/vm/classfile/systemDictionary.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -346,6 +346,8 @@ public:
// loaders. Returns "true" iff something was unloaded.
static bool do_unloading(BoolObjectClosure* is_alive);
+ static int calculate_systemdictionary_size(int loadedclasses);
+
// Applies "f->do_oop" to all root oops in the system dictionary.
static void oops_do(OopClosure* f);
@@ -538,12 +540,20 @@ public:
_loader_constraint_size = 107, // number of entries in constraint table
_resolution_error_size = 107, // number of entries in resolution error table
_invoke_method_size = 139, // number of entries in invoke method table
- _nof_buckets = 1009 // number of buckets in hash table
+ _nof_buckets = 1009, // number of buckets in hash table for placeholders
+ _old_default_sdsize = 1009, // backward compat for system dictionary size
+ _prime_array_size = 8, // array of primes for system dictionary size
+ _average_depth_goal = 3 // goal for lookup length
};
// Static variables
+ // hashtable sizes for system dictionary to allow growth
+ // prime numbers for system dictionary size
+ static int _sdgeneration;
+ static const int _primelist[_prime_array_size];
+
// Hashtable holding loaded classes.
static Dictionary* _dictionary;
diff --git a/src/share/vm/code/compiledIC.cpp b/src/share/vm/code/compiledIC.cpp
index 3496f70de..5b1172a6a 100644
--- a/src/share/vm/code/compiledIC.cpp
+++ b/src/share/vm/code/compiledIC.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -165,7 +165,6 @@ void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecod
instruction_address(), method->print_value_string(), entry);
}
- Events::log("compiledIC " INTPTR_FORMAT " --> megamorphic " INTPTR_FORMAT, this, (address)method());
// We can't check this anymore. With lazy deopt we could have already
// cleaned this IC entry before we even return. This is possible if
// we ran out of space in the inline cache buffer trying to do the
diff --git a/src/share/vm/code/nmethod.cpp b/src/share/vm/code/nmethod.cpp
index 31424e15f..65ee2b54b 100644
--- a/src/share/vm/code/nmethod.cpp
+++ b/src/share/vm/code/nmethod.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -462,6 +462,7 @@ void nmethod::init_defaults() {
_speculatively_disconnected = 0;
_has_unsafe_access = 0;
_has_method_handle_invokes = 0;
+ _lazy_critical_native = 0;
_marked_for_deoptimization = 0;
_lock_count = 0;
_stack_traversal_mark = 0;
@@ -704,7 +705,6 @@ nmethod::nmethod(
xtty->tail("print_native_nmethod");
}
}
- Events::log("Create nmethod " INTPTR_FORMAT, this);
}
// For dtrace wrappers
@@ -781,7 +781,6 @@ nmethod::nmethod(
xtty->tail("print_dtrace_nmethod");
}
}
- Events::log("Create nmethod " INTPTR_FORMAT, this);
}
#endif // def HAVE_DTRACE_H
@@ -889,13 +888,6 @@ nmethod::nmethod(
if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {
print_nmethod(printnmethods);
}
-
- // Note: Do not verify in here as the CodeCache_lock is
- // taken which would conflict with the CompiledIC_lock
- // which taken during the verification of call sites.
- // (was bug - gri 10/25/99)
-
- Events::log("Create nmethod " INTPTR_FORMAT, this);
}
@@ -1386,7 +1378,7 @@ void nmethod::flush() {
assert_locked_or_safepoint(CodeCache_lock);
// completely deallocate this method
- EventMark m("flushing nmethod " INTPTR_FORMAT " %s", this, "");
+ Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);
if (PrintMethodFlushing) {
tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
_compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()/1024);
diff --git a/src/share/vm/code/nmethod.hpp b/src/share/vm/code/nmethod.hpp
index 47c5a28cc..cad0eed1e 100644
--- a/src/share/vm/code/nmethod.hpp
+++ b/src/share/vm/code/nmethod.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -175,6 +175,7 @@ class nmethod : public CodeBlob {
// set during construction
unsigned int _has_unsafe_access:1; // May fault due to unsafe access.
unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
+ unsigned int _lazy_critical_native:1; // Lazy JNI critical native
// Protected by Patching_lock
unsigned char _state; // {alive, not_entrant, zombie, unloaded}
@@ -430,7 +431,10 @@ class nmethod : public CodeBlob {
void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
bool is_speculatively_disconnected() const { return _speculatively_disconnected; }
- void set_speculatively_disconnected(bool z) { _speculatively_disconnected = z; }
+ void set_speculatively_disconnected(bool z) { _speculatively_disconnected = z; }
+
+ bool is_lazy_critical_native() const { return _lazy_critical_native; }
+ void set_lazy_critical_native(bool z) { _lazy_critical_native = z; }
int comp_level() const { return _comp_level; }
diff --git a/src/share/vm/compiler/compileBroker.cpp b/src/share/vm/compiler/compileBroker.cpp
index c6f030b31..046f4d1ba 100644
--- a/src/share/vm/compiler/compileBroker.cpp
+++ b/src/share/vm/compiler/compileBroker.cpp
@@ -44,6 +44,7 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/sweeper.hpp"
#include "utilities/dtrace.hpp"
+#include "utilities/events.hpp"
#ifdef COMPILER1
#include "c1/c1_Compiler.hpp"
#endif
@@ -189,6 +190,43 @@ CompileTask* CompileBroker::_task_free_list = NULL;
GrowableArray<CompilerThread*>* CompileBroker::_method_threads = NULL;
+class CompilationLog : public StringEventLog {
+ public:
+ CompilationLog() : StringEventLog("Compilation events") {
+ }
+
+ void log_compile(JavaThread* thread, CompileTask* task) {
+ StringLogMessage lm;
+ stringStream msg = lm.stream();
+ // msg.time_stamp().update_to(tty->time_stamp().ticks());
+ task->print_compilation(&msg, true);
+ log(thread, "%s", (const char*)lm);
+ }
+
+ void log_nmethod(JavaThread* thread, nmethod* nm) {
+ log(thread, "nmethod " INTPTR_FORMAT " code ["INTPTR_FORMAT ", " INTPTR_FORMAT "]",
+ nm, nm->code_begin(), nm->code_end());
+ }
+
+ void log_failure(JavaThread* thread, CompileTask* task, const char* reason, const char* retry_message) {
+ StringLogMessage lm;
+ lm.print("%4d COMPILE SKIPPED: %s", task->compile_id(), reason);
+ if (retry_message != NULL) {
+ lm.append(" (%s)", retry_message);
+ }
+ lm.print("\n");
+ log(thread, "%s", (const char*)lm);
+ }
+};
+
+static CompilationLog* _compilation_log = NULL;
+
+void compileBroker_init() {
+ if (LogEvents) {
+ _compilation_log = new CompilationLog();
+ }
+}
+
CompileTaskWrapper::CompileTaskWrapper(CompileTask* task) {
CompilerThread* thread = CompilerThread::current();
thread->set_task(task);
@@ -326,8 +364,12 @@ void CompileTask::print_line() {
// ------------------------------------------------------------------
// CompileTask::print_compilation_impl
-void CompileTask::print_compilation_impl(outputStream* st, methodOop method, int compile_id, int comp_level, bool is_osr_method, int osr_bci, bool is_blocking, const char* msg) {
- st->print("%7d ", (int) st->time_stamp().milliseconds()); // print timestamp
+void CompileTask::print_compilation_impl(outputStream* st, methodOop method, int compile_id, int comp_level,
+ bool is_osr_method, int osr_bci, bool is_blocking,
+ const char* msg, bool short_form) {
+ if (!short_form) {
+ st->print("%7d ", (int) st->time_stamp().milliseconds()); // print timestamp
+ }
st->print("%4d ", compile_id); // print compilation number
// For unloaded methods the transition to zombie occurs after the
@@ -370,7 +412,9 @@ void CompileTask::print_compilation_impl(outputStream* st, methodOop method, int
if (msg != NULL) {
st->print(" %s", msg);
}
- st->cr();
+ if (!short_form) {
+ st->cr();
+ }
}
// ------------------------------------------------------------------
@@ -426,12 +470,12 @@ void CompileTask::print_inline_indent(int inline_level, outputStream* st) {
// ------------------------------------------------------------------
// CompileTask::print_compilation
-void CompileTask::print_compilation(outputStream* st) {
+void CompileTask::print_compilation(outputStream* st, bool short_form) {
oop rem = JNIHandles::resolve(method_handle());
assert(rem != NULL && rem->is_method(), "must be");
methodOop method = (methodOop) rem;
bool is_osr_method = osr_bci() != InvocationEntryBci;
- print_compilation_impl(st, method, compile_id(), comp_level(), is_osr_method, osr_bci(), is_blocking());
+ print_compilation_impl(st, method, compile_id(), comp_level(), is_osr_method, osr_bci(), is_blocking(), NULL, short_form);
}
// ------------------------------------------------------------------
@@ -1649,6 +1693,10 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
CompilerThread* thread = CompilerThread::current();
ResourceMark rm(thread);
+ if (LogEvents) {
+ _compilation_log->log_compile(thread, task);
+ }
+
// Common flags.
uint compile_id = task->compile_id();
int osr_bci = task->osr_bci();
@@ -1717,22 +1765,30 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
ci_env.record_method_not_compilable("compile failed", !TieredCompilation);
}
+ // Copy this bit to the enclosing block:
+ compilable = ci_env.compilable();
+
if (ci_env.failing()) {
- // Copy this bit to the enclosing block:
- compilable = ci_env.compilable();
+ const char* retry_message = ci_env.retry_message();
+ if (_compilation_log != NULL) {
+ _compilation_log->log_failure(thread, task, ci_env.failure_reason(), retry_message);
+ }
if (PrintCompilation) {
- const char* reason = ci_env.failure_reason();
- if (compilable == ciEnv::MethodCompilable_not_at_tier) {
- tty->print_cr("%4d COMPILE SKIPPED: %s (retry at different tier)", compile_id, reason);
- } else if (compilable == ciEnv::MethodCompilable_never) {
- tty->print_cr("%4d COMPILE SKIPPED: %s (not retryable)", compile_id, reason);
- } else if (compilable == ciEnv::MethodCompilable) {
- tty->print_cr("%4d COMPILE SKIPPED: %s", compile_id, reason);
+ tty->print("%4d COMPILE SKIPPED: %s", compile_id, ci_env.failure_reason());
+ if (retry_message != NULL) {
+ tty->print(" (%s)", retry_message);
}
+ tty->cr();
}
} else {
task->mark_success();
task->set_num_inlined_bytecodes(ci_env.num_inlined_bytecodes());
+ if (_compilation_log != NULL) {
+ nmethod* code = task->code();
+ if (code != NULL) {
+ _compilation_log->log_nmethod(thread, code);
+ }
+ }
}
}
pop_jni_handle_block();
diff --git a/src/share/vm/compiler/compileBroker.hpp b/src/share/vm/compiler/compileBroker.hpp
index ed559270f..1ee2c5419 100644
--- a/src/share/vm/compiler/compileBroker.hpp
+++ b/src/share/vm/compiler/compileBroker.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -98,12 +98,16 @@ class CompileTask : public CHeapObj {
void set_prev(CompileTask* prev) { _prev = prev; }
private:
- static void print_compilation_impl(outputStream* st, methodOop method, int compile_id, int comp_level, bool is_osr_method = false, int osr_bci = -1, bool is_blocking = false, const char* msg = NULL);
+ static void print_compilation_impl(outputStream* st, methodOop method, int compile_id, int comp_level,
+ bool is_osr_method = false, int osr_bci = -1, bool is_blocking = false,
+ const char* msg = NULL, bool short_form = false);
public:
- void print_compilation(outputStream* st = tty);
+ void print_compilation(outputStream* st = tty, bool short_form = false);
static void print_compilation(outputStream* st, const nmethod* nm, const char* msg = NULL) {
- print_compilation_impl(st, nm->method(), nm->compile_id(), nm->comp_level(), nm->is_osr_method(), nm->is_osr_method() ? nm->osr_entry_bci() : -1, /*is_blocking*/ false, msg);
+ print_compilation_impl(st, nm->method(), nm->compile_id(), nm->comp_level(),
+ nm->is_osr_method(), nm->is_osr_method() ? nm->osr_entry_bci() : -1, /*is_blocking*/ false,
+ msg);
}
static void print_inlining(outputStream* st, ciMethod* method, int inline_level, int bci, const char* msg = NULL);
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
index 39b57341e..986495cbe 100644
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -5594,6 +5594,7 @@ void CMSCollector::do_remark_parallel() {
GenCollectedHeap::StrongRootsScope srs(gch);
workers->run_task(&tsk);
} else {
+ ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
GenCollectedHeap::StrongRootsScope srs(gch);
tsk.work(0);
}
@@ -5608,6 +5609,8 @@ void CMSCollector::do_remark_non_parallel() {
ResourceMark rm;
HandleMark hm;
GenCollectedHeap* gch = GenCollectedHeap::heap();
+ ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
+
MarkRefsIntoAndScanClosure
mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
&_markStack, &_revisitStack, this,
diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
index 35470ea85..c3dd180be 100644
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
@@ -1238,9 +1238,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
SvcGCMarker sgcm(SvcGCMarker::FULL);
ResourceMark rm;
- if (PrintHeapAtGC) {
- Universe::print_heap_before_gc();
- }
+ print_heap_before_gc();
HRSPhaseSetter x(HRSPhaseFullGC);
verify_region_sets_optional();
@@ -1492,9 +1490,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
_hrs.verify_optional();
verify_region_sets_optional();
- if (PrintHeapAtGC) {
- Universe::print_heap_after_gc();
- }
+ print_heap_after_gc();
g1mm()->update_sizes();
post_full_gc_dump();
@@ -3560,9 +3556,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
SvcGCMarker sgcm(SvcGCMarker::MINOR);
ResourceMark rm;
- if (PrintHeapAtGC) {
- Universe::print_heap_before_gc();
- }
+ print_heap_before_gc();
HRSPhaseSetter x(HRSPhaseEvacuation);
verify_region_sets_optional();
@@ -3937,9 +3931,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
- if (PrintHeapAtGC) {
- Universe::print_heap_after_gc();
- }
+ print_heap_after_gc();
g1mm()->update_sizes();
if (G1SummarizeRSetStats &&
diff --git a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
index c12bb5660..f32030b45 100644
--- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
+++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -126,7 +126,6 @@ void G1MarkSweep::allocate_stacks() {
void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
- EventMark m("1 mark object");
TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty);
GenMarkSweep::trace(" 1");
@@ -292,7 +291,6 @@ void G1MarkSweep::mark_sweep_phase2() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
Generation* pg = g1h->perm_gen();
- EventMark m("2 compute new addresses");
TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty);
GenMarkSweep::trace("2");
@@ -337,7 +335,6 @@ void G1MarkSweep::mark_sweep_phase3() {
Generation* pg = g1h->perm_gen();
// Adjust the pointers to reflect the new locations
- EventMark m("3 adjust pointers");
TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty);
GenMarkSweep::trace("3");
@@ -402,7 +399,6 @@ void G1MarkSweep::mark_sweep_phase4() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
Generation* pg = g1h->perm_gen();
- EventMark m("4 compact heap");
TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty);
GenMarkSweep::trace("4");
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
index 01fb65912..52efba0de 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -132,9 +132,7 @@ void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
- if (PrintHeapAtGC) {
- Universe::print_heap_before_gc();
- }
+ heap->print_heap_before_gc();
// Fill in TLABs
heap->accumulate_statistics_all_tlabs();
@@ -377,9 +375,7 @@ void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
- if (PrintHeapAtGC) {
- Universe::print_heap_after_gc();
- }
+ heap->print_heap_after_gc();
heap->post_full_gc_dump();
@@ -504,7 +500,6 @@ void PSMarkSweep::deallocate_stacks() {
void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
- EventMark m("1 mark object");
TraceTime tm("phase 1", PrintGCDetails && Verbose, true, gclog_or_tty);
trace(" 1");
@@ -563,7 +558,6 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
void PSMarkSweep::mark_sweep_phase2() {
- EventMark m("2 compute new addresses");
TraceTime tm("phase 2", PrintGCDetails && Verbose, true, gclog_or_tty);
trace("2");
@@ -608,7 +602,6 @@ static PSAlwaysTrueClosure always_true;
void PSMarkSweep::mark_sweep_phase3() {
// Adjust the pointers to reflect the new locations
- EventMark m("3 adjust pointers");
TraceTime tm("phase 3", PrintGCDetails && Verbose, true, gclog_or_tty);
trace("3");
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
index c2215280a..091cf73dd 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -983,9 +983,7 @@ void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
// We need to track unique mark sweep invocations as well.
_total_invocations++;
- if (PrintHeapAtGC) {
- Universe::print_heap_before_gc();
- }
+ heap->print_heap_before_gc();
// Fill in TLABs
heap->accumulate_statistics_all_tlabs();
@@ -1838,7 +1836,6 @@ void PSParallelCompact::summary_phase_msg(SpaceId dst_space_id,
void PSParallelCompact::summary_phase(ParCompactionManager* cm,
bool maximum_compaction)
{
- EventMark m("2 summarize");
TraceTime tm("summary phase", print_phases(), true, gclog_or_tty);
// trace("2");
@@ -2237,9 +2234,7 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
collection_exit.update();
- if (PrintHeapAtGC) {
- Universe::print_heap_after_gc();
- }
+ heap->print_heap_after_gc();
if (PrintGCTaskTimeStamps) {
gclog_or_tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " "
INT64_FORMAT,
@@ -2352,7 +2347,6 @@ GCTaskManager* const PSParallelCompact::gc_task_manager() {
void PSParallelCompact::marking_phase(ParCompactionManager* cm,
bool maximum_heap_compaction) {
// Recursively traverse all live objects and mark them
- EventMark m("1 mark object");
TraceTime tm("marking phase", print_phases(), true, gclog_or_tty);
ParallelScavengeHeap* heap = gc_heap();
@@ -2438,7 +2432,6 @@ static PSAlwaysTrueClosure always_true;
void PSParallelCompact::adjust_roots() {
// Adjust the pointers to reflect the new locations
- EventMark m("3 adjust roots");
TraceTime tm("adjust roots", print_phases(), true, gclog_or_tty);
// General strong roots.
@@ -2469,7 +2462,6 @@ void PSParallelCompact::adjust_roots() {
}
void PSParallelCompact::compact_perm(ParCompactionManager* cm) {
- EventMark m("4 compact perm");
TraceTime tm("compact perm gen", print_phases(), true, gclog_or_tty);
// trace("4");
@@ -2647,7 +2639,6 @@ void PSParallelCompact::enqueue_region_stealing_tasks(
}
void PSParallelCompact::compact() {
- EventMark m("5 compact");
// trace("5");
TraceTime tm("compaction phase", print_phases(), true, gclog_or_tty);
@@ -3502,4 +3493,3 @@ void PSParallelCompact::compact_prologue() {
_updated_int_array_klass_obj = (klassOop)
summary_data().calc_new_pointer(Universe::intArrayKlassObj());
}
-
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
index 0cf826eca..f3cf14203 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -295,9 +295,7 @@ bool PSScavenge::invoke_no_policy() {
heap->record_gen_tops_before_GC();
}
- if (PrintHeapAtGC) {
- Universe::print_heap_before_gc();
- }
+ heap->print_heap_before_gc();
assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
@@ -643,9 +641,7 @@ bool PSScavenge::invoke_no_policy() {
Universe::verify(false);
}
- if (PrintHeapAtGC) {
- Universe::print_heap_after_gc();
- }
+ heap->print_heap_after_gc();
if (ZapUnusedHeapArea) {
young_gen->eden_space()->check_mangled_unused_area_complete();
diff --git a/src/share/vm/gc_interface/collectedHeap.cpp b/src/share/vm/gc_interface/collectedHeap.cpp
index 413248ca4..c65e1291f 100644
--- a/src/share/vm/gc_interface/collectedHeap.cpp
+++ b/src/share/vm/gc_interface/collectedHeap.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -51,6 +51,31 @@ int CollectedHeap::_fire_out_of_memory_count = 0;
size_t CollectedHeap::_filler_array_max_size = 0;
+template <>
+void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
+ st->print_cr("GC heap %s", m.is_before ? "before" : "after");
+ st->print_raw(m);
+}
+
+void GCHeapLog::log_heap(bool before) {
+ if (!should_log()) {
+ return;
+ }
+
+ jlong timestamp = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
+ MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag);
+ int index = compute_log_index();
+ _records[index].thread = NULL; // Its the GC thread so it's not that interesting.
+ _records[index].timestamp = timestamp;
+ _records[index].data.is_before = before;
+ stringStream st(_records[index].data.buffer(), _records[index].data.size());
+ if (before) {
+ Universe::print_heap_before_gc(&st);
+ } else {
+ Universe::print_heap_after_gc(&st);
+ }
+}
+
// Memory state functions.
@@ -81,6 +106,12 @@ CollectedHeap::CollectedHeap() : _n_par_threads(0)
80, GCCause::to_string(_gc_lastcause), CHECK);
}
_defer_initial_card_mark = false; // strengthened by subclass in pre_initialize() below.
+ // Create the ring log
+ if (LogEvents) {
+ _gc_heap_log = new GCHeapLog();
+ } else {
+ _gc_heap_log = NULL;
+ }
}
void CollectedHeap::pre_initialize() {
diff --git a/src/share/vm/gc_interface/collectedHeap.hpp b/src/share/vm/gc_interface/collectedHeap.hpp
index c54dbbfc2..5f1b284e7 100644
--- a/src/share/vm/gc_interface/collectedHeap.hpp
+++ b/src/share/vm/gc_interface/collectedHeap.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,7 @@
#include "runtime/handles.hpp"
#include "runtime/perfData.hpp"
#include "runtime/safepoint.hpp"
+#include "utilities/events.hpp"
// A "CollectedHeap" is an implementation of a java heap for HotSpot. This
// is an abstract class: there may be many different kinds of heaps. This
@@ -43,6 +44,29 @@ class AdaptiveSizePolicy;
class Thread;
class CollectorPolicy;
+class GCMessage : public FormatBuffer<1024> {
+ public:
+ bool is_before;
+
+ public:
+ GCMessage() {}
+};
+
+class GCHeapLog : public EventLogBase<GCMessage> {
+ private:
+ void log_heap(bool before);
+
+ public:
+ GCHeapLog() : EventLogBase<GCMessage>("GC Heap History") {}
+
+ void log_heap_before() {
+ log_heap(true);
+ }
+ void log_heap_after() {
+ log_heap(false);
+ }
+};
+
//
// CollectedHeap
// SharedHeap
@@ -62,6 +86,8 @@ class CollectedHeap : public CHeapObj {
// Used for filler objects (static, but initialized in ctor).
static size_t _filler_array_max_size;
+ GCHeapLog* _gc_heap_log;
+
// Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 is being used
bool _defer_initial_card_mark;
@@ -618,6 +644,27 @@ class CollectedHeap : public CHeapObj {
// Default implementation does nothing.
virtual void print_tracing_info() const = 0;
+ // If PrintHeapAtGC is set call the appropriate routi
+ void print_heap_before_gc() {
+ if (PrintHeapAtGC) {
+ Universe::print_heap_before_gc();
+ }
+ if (_gc_heap_log != NULL) {
+ _gc_heap_log->log_heap_before();
+ }
+ }
+ void print_heap_after_gc() {
+ if (PrintHeapAtGC) {
+ Universe::print_heap_after_gc();
+ }
+ if (_gc_heap_log != NULL) {
+ _gc_heap_log->log_heap_after();
+ }
+ }
+
+ // Allocate GCHeapLog during VM startup
+ static void initialize_heap_log();
+
// Heap verification
virtual void verify(bool allow_dirty, bool silent, VerifyOption option) = 0;
diff --git a/src/share/vm/memory/gcLocker.cpp b/src/share/vm/memory/gcLocker.cpp
index db0a79d4d..9acd3616a 100644
--- a/src/share/vm/memory/gcLocker.cpp
+++ b/src/share/vm/memory/gcLocker.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,38 +31,93 @@ volatile jint GC_locker::_jni_lock_count = 0;
volatile jint GC_locker::_lock_count = 0;
volatile bool GC_locker::_needs_gc = false;
volatile bool GC_locker::_doing_gc = false;
+jlong GC_locker::_wait_begin = 0;
+
+#ifdef ASSERT
+volatile jint GC_locker::_debug_jni_lock_count = 0;
+#endif
+
+
+#ifdef ASSERT
+void GC_locker::verify_critical_count() {
+ if (SafepointSynchronize::is_at_safepoint()) {
+ assert(!needs_gc() || _debug_jni_lock_count == _jni_lock_count, "must agree");
+ int count = 0;
+ // Count the number of threads with critical operations in progress
+ for (JavaThread* thr = Threads::first(); thr; thr = thr->next()) {
+ if (thr->in_critical()) {
+ count++;
+ }
+ }
+ if (_jni_lock_count != count) {
+ tty->print_cr("critical counts don't match: %d != %d", _jni_lock_count, count);
+ for (JavaThread* thr = Threads::first(); thr; thr = thr->next()) {
+ if (thr->in_critical()) {
+ tty->print_cr(INTPTR_FORMAT " in_critical %d", thr, thr->in_critical());
+ }
+ }
+ }
+ assert(_jni_lock_count == count, "must be equal");
+ }
+}
+#endif
+
+bool GC_locker::check_active_before_gc() {
+ assert(SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
+ if (is_active() && !_needs_gc) {
+ verify_critical_count();
+ _needs_gc = true;
+ if (PrintJNIGCStalls && PrintGCDetails) {
+ ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
+ _wait_begin = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
+ gclog_or_tty->print_cr(INT64_FORMAT ": Setting _needs_gc. Thread \"%s\" %d locked.",
+ _wait_begin, Thread::current()->name(), _jni_lock_count);
+ }
+
+ }
+ return is_active();
+}
void GC_locker::stall_until_clear() {
assert(!JavaThread::current()->in_critical(), "Would deadlock");
- if (PrintJNIGCStalls && PrintGCDetails) {
- ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
- gclog_or_tty->print_cr(
- "Allocation failed. Thread \"%s\" is stalled by JNI critical section.",
- JavaThread::current()->name());
- }
MutexLocker ml(JNICritical_lock);
+
+ if (needs_gc()) {
+ if (PrintJNIGCStalls && PrintGCDetails) {
+ ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
+ gclog_or_tty->print_cr(INT64_FORMAT ": Allocation failed. Thread \"%s\" is stalled by JNI critical section, %d locked.",
+ (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) - _wait_begin, Thread::current()->name(), _jni_lock_count);
+ }
+ }
+
// Wait for _needs_gc to be cleared
- while (GC_locker::needs_gc()) {
+ while (needs_gc()) {
JNICritical_lock->wait();
}
}
-void GC_locker::jni_lock_slow() {
+void GC_locker::jni_lock(JavaThread* thread) {
+ assert(!thread->in_critical(), "shouldn't currently be in a critical region");
MutexLocker mu(JNICritical_lock);
// Block entering threads if we know at least one thread is in a
// JNI critical region and we need a GC.
// We check that at least one thread is in a critical region before
// blocking because blocked threads are woken up by a thread exiting
// a JNI critical region.
- while ((is_jni_active() && needs_gc()) || _doing_gc) {
+ while ((needs_gc() && is_jni_active()) || _doing_gc) {
JNICritical_lock->wait();
}
- jni_lock();
+ thread->enter_critical();
+ _jni_lock_count++;
+ increment_debug_jni_lock_count();
}
-void GC_locker::jni_unlock_slow() {
+void GC_locker::jni_unlock(JavaThread* thread) {
+ assert(thread->in_last_critical(), "should be exiting critical region");
MutexLocker mu(JNICritical_lock);
- jni_unlock();
+ _jni_lock_count--;
+ decrement_debug_jni_lock_count();
+ thread->exit_critical();
if (needs_gc() && !is_jni_active()) {
// We're the last thread out. Cause a GC to occur.
// GC will also check is_active, so this check is not
@@ -74,11 +129,17 @@ void GC_locker::jni_unlock_slow() {
{
// Must give up the lock while at a safepoint
MutexUnlocker munlock(JNICritical_lock);
+ if (PrintJNIGCStalls && PrintGCDetails) {
+ ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
+ gclog_or_tty->print_cr(INT64_FORMAT ": Thread \"%s\" is performing GC after exiting critical section, %d locked",
+ (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) - _wait_begin, Thread::current()->name(), _jni_lock_count);
+ }
Universe::heap()->collect(GCCause::_gc_locker);
}
_doing_gc = false;
}
- clear_needs_gc();
+
+ _needs_gc = false;
JNICritical_lock->notify_all();
}
}
diff --git a/src/share/vm/memory/gcLocker.hpp b/src/share/vm/memory/gcLocker.hpp
index c9d913abf..ae4cb7f53 100644
--- a/src/share/vm/memory/gcLocker.hpp
+++ b/src/share/vm/memory/gcLocker.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -51,53 +51,70 @@
class GC_locker: public AllStatic {
private:
- static volatile jint _jni_lock_count; // number of jni active instances
+ // The _jni_lock_count keeps track of the number of threads that are
+ // currently in a critical region. It's only kept up to date when
+ // _needs_gc is true. The current value is computed during
+ // safepointing and decremented during the slow path of GC_locker
+ // unlocking.
+ static volatile jint _jni_lock_count; // number of jni active instances.
+
static volatile jint _lock_count; // number of other active instances
static volatile bool _needs_gc; // heap is filling, we need a GC
// note: bool is typedef'd as jint
static volatile bool _doing_gc; // unlock_critical() is doing a GC
+ static jlong _wait_begin; // Timestamp for the setting of _needs_gc.
+ // Used only by printing code.
+
+#ifdef ASSERT
+ // This lock count is updated for all operations and is used to
+ // validate the jni_lock_count that is computed during safepoints.
+ static volatile jint _debug_jni_lock_count;
+#endif
+
// Accessors
static bool is_jni_active() {
+ assert(_needs_gc, "only valid when _needs_gc is set");
return _jni_lock_count > 0;
}
- static void set_needs_gc() {
- assert(SafepointSynchronize::is_at_safepoint(),
- "needs_gc is only set at a safepoint");
- _needs_gc = true;
- }
-
- static void clear_needs_gc() {
- assert_lock_strong(JNICritical_lock);
- _needs_gc = false;
- }
-
- static void jni_lock() {
- Atomic::inc(&_jni_lock_count);
- CHECK_UNHANDLED_OOPS_ONLY(
- if (CheckUnhandledOops) { Thread::current()->_gc_locked_out_count++; })
- assert(Universe::heap() == NULL || !Universe::heap()->is_gc_active(),
- "locking failed");
- }
-
- static void jni_unlock() {
- Atomic::dec(&_jni_lock_count);
- CHECK_UNHANDLED_OOPS_ONLY(
- if (CheckUnhandledOops) { Thread::current()->_gc_locked_out_count--; })
- }
+ // At a safepoint, visit all threads and count the number of active
+ // critical sections. This is used to ensure that all active
+ // critical sections are exited before a new one is started.
+ static void verify_critical_count() NOT_DEBUG_RETURN;
- static void jni_lock_slow();
- static void jni_unlock_slow();
+ static void jni_lock(JavaThread* thread);
+ static void jni_unlock(JavaThread* thread);
public:
// Accessors
static bool is_active();
static bool needs_gc() { return _needs_gc; }
+
// Shorthand
- static bool is_active_and_needs_gc() { return is_active() && needs_gc();}
+ static bool is_active_and_needs_gc() { return needs_gc() && is_active(); }
- // Calls set_needs_gc() if is_active() is true. Returns is_active().
+ // In debug mode track the locking state at all times
+ static void increment_debug_jni_lock_count() {
+#ifdef ASSERT
+ assert(_debug_jni_lock_count >= 0, "bad value");
+ Atomic::inc(&_debug_jni_lock_count);
+#endif
+ }
+ static void decrement_debug_jni_lock_count() {
+#ifdef ASSERT
+ assert(_debug_jni_lock_count > 0, "bad value");
+ Atomic::dec(&_debug_jni_lock_count);
+#endif
+ }
+
+ // Set the current lock count
+ static void set_jni_lock_count(int count) {
+ _jni_lock_count = count;
+ verify_critical_count();
+ }
+
+ // Sets _needs_gc if is_active() is true. Returns is_active().
static bool check_active_before_gc();
// Stalls the caller (who should not be in a jni critical section)
@@ -131,22 +148,24 @@ class GC_locker: public AllStatic {
// JNI critical regions are the only participants in this scheme
// because they are, by spec, well bounded while in a critical region.
//
- // Each of the following two method is split into a fast path and a slow
- // path. JNICritical_lock is only grabbed in the slow path.
+ // Each of the following two method is split into a fast path and a
+ // slow path. JNICritical_lock is only grabbed in the slow path.
// _needs_gc is initially false and every java thread will go
- // through the fast path (which does the same thing as the slow path
- // when _needs_gc is false). When GC happens at a safepoint,
- // GC_locker::is_active() is checked. Since there is no safepoint in the
- // fast path of lock_critical() and unlock_critical(), there is no race
- // condition between the fast path and GC. After _needs_gc is set at a
- // safepoint, every thread will go through the slow path after the safepoint.
- // Since after a safepoint, each of the following two methods is either
- // entered from the method entry and falls into the slow path, or is
- // resumed from the safepoints in the method, which only exist in the slow
- // path. So when _needs_gc is set, the slow path is always taken, till
- // _needs_gc is cleared.
+ // through the fast path, which simply increments or decrements the
+ // current thread's critical count. When GC happens at a safepoint,
+ // GC_locker::is_active() is checked. Since there is no safepoint in
+ // the fast path of lock_critical() and unlock_critical(), there is
+ // no race condition between the fast path and GC. After _needs_gc
+ // is set at a safepoint, every thread will go through the slow path
+ // after the safepoint. Since after a safepoint, each of the
+ // following two methods is either entered from the method entry and
+ // falls into the slow path, or is resumed from the safepoints in
+ // the method, which only exist in the slow path. So when _needs_gc
+ // is set, the slow path is always taken, till _needs_gc is cleared.
static void lock_critical(JavaThread* thread);
static void unlock_critical(JavaThread* thread);
+
+ static address needs_gc_address() { return (address) &_needs_gc; }
};
diff --git a/src/share/vm/memory/gcLocker.inline.hpp b/src/share/vm/memory/gcLocker.inline.hpp
index 5c63e077b..64a04bffd 100644
--- a/src/share/vm/memory/gcLocker.inline.hpp
+++ b/src/share/vm/memory/gcLocker.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,16 +28,11 @@
#include "memory/gcLocker.hpp"
inline bool GC_locker::is_active() {
+ assert(_needs_gc || SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
+ verify_critical_count();
return _lock_count > 0 || _jni_lock_count > 0;
}
-inline bool GC_locker::check_active_before_gc() {
- if (is_active()) {
- set_needs_gc();
- }
- return is_active();
-}
-
inline void GC_locker::lock() {
// cast away volatile
Atomic::inc(&_lock_count);
@@ -56,24 +51,28 @@ inline void GC_locker::unlock() {
inline void GC_locker::lock_critical(JavaThread* thread) {
if (!thread->in_critical()) {
- if (!needs_gc()) {
- jni_lock();
- } else {
- jni_lock_slow();
+ if (needs_gc()) {
+ // jni_lock call calls enter_critical under the lock so that the
+ // global lock count and per thread count are in agreement.
+ jni_lock(thread);
+ return;
}
+ increment_debug_jni_lock_count();
}
thread->enter_critical();
}
inline void GC_locker::unlock_critical(JavaThread* thread) {
- thread->exit_critical();
- if (!thread->in_critical()) {
- if (!needs_gc()) {
- jni_unlock();
- } else {
- jni_unlock_slow();
+ if (thread->in_last_critical()) {
+ if (needs_gc()) {
+ // jni_unlock call calls exit_critical under the lock so that
+ // the global lock count and per thread count are in agreement.
+ jni_unlock(thread);
+ return;
}
+ decrement_debug_jni_lock_count();
}
+ thread->exit_critical();
}
#endif // SHARE_VM_MEMORY_GCLOCKER_INLINE_HPP
diff --git a/src/share/vm/memory/genCollectedHeap.cpp b/src/share/vm/memory/genCollectedHeap.cpp
index b4de7f19e..3cd791d35 100644
--- a/src/share/vm/memory/genCollectedHeap.cpp
+++ b/src/share/vm/memory/genCollectedHeap.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -479,11 +479,9 @@ void GenCollectedHeap::do_collection(bool full,
const size_t perm_prev_used = perm_gen()->used();
- if (PrintHeapAtGC) {
- Universe::print_heap_before_gc();
- if (Verbose) {
- gclog_or_tty->print_cr("GC Cause: %s", GCCause::to_string(gc_cause()));
- }
+ print_heap_before_gc();
+ if (Verbose) {
+ gclog_or_tty->print_cr("GC Cause: %s", GCCause::to_string(gc_cause()));
}
{
@@ -685,9 +683,7 @@ void GenCollectedHeap::do_collection(bool full,
AdaptiveSizePolicy* sp = gen_policy()->size_policy();
AdaptiveSizePolicyOutput(sp, total_collections());
- if (PrintHeapAtGC) {
- Universe::print_heap_after_gc();
- }
+ print_heap_after_gc();
#ifdef TRACESPINNING
ParallelTaskTerminator::print_termination_counts();
diff --git a/src/share/vm/memory/genMarkSweep.cpp b/src/share/vm/memory/genMarkSweep.cpp
index 94a73dd31..925d968b6 100644
--- a/src/share/vm/memory/genMarkSweep.cpp
+++ b/src/share/vm/memory/genMarkSweep.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -254,7 +254,6 @@ void GenMarkSweep::deallocate_stacks() {
void GenMarkSweep::mark_sweep_phase1(int level,
bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
- EventMark m("1 mark object");
TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty);
trace(" 1");
@@ -325,7 +324,6 @@ void GenMarkSweep::mark_sweep_phase2() {
GenCollectedHeap* gch = GenCollectedHeap::heap();
Generation* pg = gch->perm_gen();
- EventMark m("2 compute new addresses");
TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty);
trace("2");
@@ -350,7 +348,6 @@ void GenMarkSweep::mark_sweep_phase3(int level) {
Generation* pg = gch->perm_gen();
// Adjust the pointers to reflect the new locations
- EventMark m("3 adjust pointers");
TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty);
trace("3");
@@ -411,7 +408,6 @@ void GenMarkSweep::mark_sweep_phase4() {
GenCollectedHeap* gch = GenCollectedHeap::heap();
Generation* pg = gch->perm_gen();
- EventMark m("4 compact heap");
TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty);
trace("4");
diff --git a/src/share/vm/oops/arrayOop.cpp b/src/share/vm/oops/arrayOop.cpp
index c8239c3d9..ee94c91f2 100644
--- a/src/share/vm/oops/arrayOop.cpp
+++ b/src/share/vm/oops/arrayOop.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
#ifndef PRODUCT
#include "oops/arrayOop.hpp"
+#include "oops/oop.inline.hpp"
#include "utilities/globalDefinitions.hpp"
bool arrayOopDesc::check_max_length_overflow(BasicType type) {
diff --git a/src/share/vm/oops/constantPoolOop.cpp b/src/share/vm/oops/constantPoolOop.cpp
index 87280c671..493249d94 100644
--- a/src/share/vm/oops/constantPoolOop.cpp
+++ b/src/share/vm/oops/constantPoolOop.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -269,7 +269,7 @@ klassOop constantPoolOopDesc::klass_ref_at_if_loaded_check(constantPoolHandle th
methodOop constantPoolOopDesc::method_at_if_loaded(constantPoolHandle cpool,
int which, Bytecodes::Code invoke_code) {
assert(!constantPoolCacheOopDesc::is_secondary_index(which), "no indy instruction here");
- if (cpool->cache() == NULL) return false; // nothing to load yet
+ if (cpool->cache() == NULL) return NULL; // nothing to load yet
int cache_index = which - CPCACHE_INDEX_TAG;
if (!(cache_index >= 0 && cache_index < cpool->cache()->length())) {
if (PrintMiscellaneous && (Verbose||WizardMode)) {
diff --git a/src/share/vm/oops/instanceKlass.hpp b/src/share/vm/oops/instanceKlass.hpp
index f07f82f4c..7104ace3d 100644
--- a/src/share/vm/oops/instanceKlass.hpp
+++ b/src/share/vm/oops/instanceKlass.hpp
@@ -570,9 +570,9 @@ class instanceKlass: public Klass {
void set_method_annotations_of(int idnum, typeArrayOop anno)
{ set_methods_annotations_of(idnum, anno, &_methods_annotations); }
void set_method_parameter_annotations_of(int idnum, typeArrayOop anno)
- { set_methods_annotations_of(idnum, anno, &_methods_annotations); }
+ { set_methods_annotations_of(idnum, anno, &_methods_parameter_annotations); }
void set_method_default_annotations_of(int idnum, typeArrayOop anno)
- { set_methods_annotations_of(idnum, anno, &_methods_annotations); }
+ { set_methods_annotations_of(idnum, anno, &_methods_default_annotations); }
// allocation
DEFINE_ALLOCATE_PERMANENT(instanceKlass);
diff --git a/src/share/vm/oops/klass.cpp b/src/share/vm/oops/klass.cpp
index e2997eaac..345d0b224 100644
--- a/src/share/vm/oops/klass.cpp
+++ b/src/share/vm/oops/klass.cpp
@@ -158,9 +158,7 @@ klassOop Klass::base_create_klass_oop(KlassHandle& klass, int size,
kl->set_next_sibling(NULL);
kl->set_alloc_count(0);
kl->set_alloc_size(0);
-#ifdef TRACE_SET_KLASS_TRACE_ID
TRACE_SET_KLASS_TRACE_ID(kl, 0);
-#endif
kl->set_prototype_header(markOopDesc::prototype());
kl->set_biased_lock_revocation_count(0);
diff --git a/src/share/vm/oops/klass.hpp b/src/share/vm/oops/klass.hpp
index d4f5d504f..035f44c9d 100644
--- a/src/share/vm/oops/klass.hpp
+++ b/src/share/vm/oops/klass.hpp
@@ -265,9 +265,7 @@ class Klass : public Klass_vtbl {
markOop _prototype_header; // Used when biased locking is both enabled and disabled for this type
jint _biased_lock_revocation_count;
-#ifdef TRACE_DEFINE_KLASS_TRACE_ID
TRACE_DEFINE_KLASS_TRACE_ID;
-#endif
public:
// returns the enclosing klassOop
@@ -688,9 +686,7 @@ class Klass : public Klass_vtbl {
jlong last_biased_lock_bulk_revocation_time() { return _last_biased_lock_bulk_revocation_time; }
void set_last_biased_lock_bulk_revocation_time(jlong cur_time) { _last_biased_lock_bulk_revocation_time = cur_time; }
-#ifdef TRACE_DEFINE_KLASS_METHODS
TRACE_DEFINE_KLASS_METHODS;
-#endif
// garbage collection support
virtual void follow_weak_klass_links(
diff --git a/src/share/vm/oops/methodOop.cpp b/src/share/vm/oops/methodOop.cpp
index 879ec0152..6decdd72b 100644
--- a/src/share/vm/oops/methodOop.cpp
+++ b/src/share/vm/oops/methodOop.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -596,6 +596,11 @@ void methodOopDesc::clear_native_function() {
clear_code();
}
+address methodOopDesc::critical_native_function() {
+ methodHandle mh(this);
+ return NativeLookup::lookup_critical_entry(mh);
+}
+
void methodOopDesc::set_signature_handler(address handler) {
address* signature_handler = signature_handler_addr();
diff --git a/src/share/vm/oops/methodOop.hpp b/src/share/vm/oops/methodOop.hpp
index cc9520a7f..60bb34bb2 100644
--- a/src/share/vm/oops/methodOop.hpp
+++ b/src/share/vm/oops/methodOop.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -403,6 +403,8 @@ class methodOopDesc : public oopDesc {
native_bind_event_is_interesting = true
};
address native_function() const { return *(native_function_addr()); }
+ address critical_native_function();
+
// Must specify a real function (not NULL).
// Use clear_native_function() to unregister.
void set_native_function(address function, bool post_event_flag);
diff --git a/src/share/vm/opto/loopnode.cpp b/src/share/vm/opto/loopnode.cpp
index 6c7b8edb8..2d045e47d 100644
--- a/src/share/vm/opto/loopnode.cpp
+++ b/src/share/vm/opto/loopnode.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -898,7 +898,7 @@ Node *LoopLimitNode::Identity( PhaseTransform *phase ) {
Node* CountedLoopNode::match_incr_with_optional_truncation(
Node* expr, Node** trunc1, Node** trunc2, const TypeInt** trunc_type) {
// Quick cutouts:
- if (expr == NULL || expr->req() != 3) return false;
+ if (expr == NULL || expr->req() != 3) return NULL;
Node *t1 = NULL;
Node *t2 = NULL;
diff --git a/src/share/vm/prims/jvm.cpp b/src/share/vm/prims/jvm.cpp
index fe51d11e6..4c503fea3 100644
--- a/src/share/vm/prims/jvm.cpp
+++ b/src/share/vm/prims/jvm.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2716,7 +2716,9 @@ JVM_ENTRY(void, JVM_StopThread(JNIEnv* env, jobject jthread, jobject throwable))
}
oop java_thread = JNIHandles::resolve_non_null(jthread);
JavaThread* receiver = java_lang_Thread::thread(java_thread);
- Events::log("JVM_StopThread thread JavaThread " INTPTR_FORMAT " as oop " INTPTR_FORMAT " [exception " INTPTR_FORMAT "]", receiver, (address)java_thread, throwable);
+ Events::log_exception(JavaThread::current(),
+ "JVM_StopThread thread JavaThread " INTPTR_FORMAT " as oop " INTPTR_FORMAT " [exception " INTPTR_FORMAT "]",
+ receiver, (address)java_thread, throwable);
// First check if thread is alive
if (receiver != NULL) {
// Check if exception is getting thrown at self (use oop equality, since the
diff --git a/src/share/vm/prims/nativeLookup.cpp b/src/share/vm/prims/nativeLookup.cpp
index 874c54dbc..f8c627dcb 100644
--- a/src/share/vm/prims/nativeLookup.cpp
+++ b/src/share/vm/prims/nativeLookup.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -91,6 +91,19 @@ char* NativeLookup::pure_jni_name(methodHandle method) {
}
+char* NativeLookup::critical_jni_name(methodHandle method) {
+ stringStream st;
+ // Prefix
+ st.print("JavaCritical_");
+ // Klass name
+ mangle_name_on(&st, method->klass_name());
+ st.print("_");
+ // Method name
+ mangle_name_on(&st, method->name());
+ return st.as_string();
+}
+
+
char* NativeLookup::long_jni_name(methodHandle method) {
// Signature ignore the wrapping parenteses and the trailing return type
stringStream st;
@@ -193,6 +206,34 @@ address NativeLookup::lookup_style(methodHandle method, char* pure_name, const c
}
+address NativeLookup::lookup_critical_style(methodHandle method, char* pure_name, const char* long_name, int args_size, bool os_style) {
+ if (!method->has_native_function()) {
+ return NULL;
+ }
+
+ address current_entry = method->native_function();
+
+ char dll_name[JVM_MAXPATHLEN];
+ int offset;
+ if (os::dll_address_to_library_name(current_entry, dll_name, sizeof(dll_name), &offset)) {
+ char ebuf[32];
+ void* dll = os::dll_load(dll_name, ebuf, sizeof(ebuf));
+ if (dll != NULL) {
+ // Compute complete JNI name for style
+ stringStream st;
+ if (os_style) os::print_jni_name_prefix_on(&st, args_size);
+ st.print_raw(pure_name);
+ st.print_raw(long_name);
+ if (os_style) os::print_jni_name_suffix_on(&st, args_size);
+ char* jni_name = st.as_string();
+ return (address)os::dll_lookup(dll, jni_name);
+ }
+ }
+
+ return NULL;
+}
+
+
// Check all the formats of native implementation name to see if there is one
// for the specified method.
address NativeLookup::lookup_entry(methodHandle method, bool& in_base_library, TRAPS) {
@@ -228,6 +269,58 @@ address NativeLookup::lookup_entry(methodHandle method, bool& in_base_library, T
return entry; // NULL indicates not found
}
+// Check all the formats of native implementation name to see if there is one
+// for the specified method.
+address NativeLookup::lookup_critical_entry(methodHandle method) {
+ if (!CriticalJNINatives) return NULL;
+
+ if (method->is_synchronized() ||
+ !method->is_static()) {
+ // Only static non-synchronized methods are allowed
+ return NULL;
+ }
+
+ ResourceMark rm;
+ address entry = NULL;
+
+ Symbol* signature = method->signature();
+ for (int end = 0; end < signature->utf8_length(); end++) {
+ if (signature->byte_at(end) == 'L') {
+ // Don't allow object types
+ return NULL;
+ }
+ }
+
+ // Compute critical name
+ char* critical_name = critical_jni_name(method);
+
+ // Compute argument size
+ int args_size = 1 // JNIEnv
+ + (method->is_static() ? 1 : 0) // class for static methods
+ + method->size_of_parameters(); // actual parameters
+
+
+ // 1) Try JNI short style
+ entry = lookup_critical_style(method, critical_name, "", args_size, true);
+ if (entry != NULL) return entry;
+
+ // Compute long name
+ char* long_name = long_jni_name(method);
+
+ // 2) Try JNI long style
+ entry = lookup_critical_style(method, critical_name, long_name, args_size, true);
+ if (entry != NULL) return entry;
+
+ // 3) Try JNI short style without os prefix/suffix
+ entry = lookup_critical_style(method, critical_name, "", args_size, false);
+ if (entry != NULL) return entry;
+
+ // 4) Try JNI long style without os prefix/suffix
+ entry = lookup_critical_style(method, critical_name, long_name, args_size, false);
+
+ return entry; // NULL indicates not found
+}
+
// Check if there are any JVM TI prefixes which have been applied to the native method name.
// If any are found, remove them before attemping the look up of the
// native implementation again.
diff --git a/src/share/vm/prims/nativeLookup.hpp b/src/share/vm/prims/nativeLookup.hpp
index bf2dab85d..c7ffeb3cd 100644
--- a/src/share/vm/prims/nativeLookup.hpp
+++ b/src/share/vm/prims/nativeLookup.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,15 +36,18 @@ class NativeLookup : AllStatic {
// JNI name computation
static char* pure_jni_name(methodHandle method);
static char* long_jni_name(methodHandle method);
+ static char* critical_jni_name(methodHandle method);
// Style specific lookup
static address lookup_style(methodHandle method, char* pure_name, const char* long_name, int args_size, bool os_style, bool& in_base_library, TRAPS);
+ static address lookup_critical_style(methodHandle method, char* pure_name, const char* long_name, int args_size, bool os_style);
static address lookup_base (methodHandle method, bool& in_base_library, TRAPS);
static address lookup_entry(methodHandle method, bool& in_base_library, TRAPS);
static address lookup_entry_prefixed(methodHandle method, bool& in_base_library, TRAPS);
public:
// Lookup native function. May throw UnsatisfiedLinkError.
static address lookup(methodHandle method, bool& in_base_library, TRAPS);
+ static address lookup_critical_entry(methodHandle method);
// Lookup native functions in base library.
static address base_library_lookup(const char* class_name, const char* method_name, const char* signature);
diff --git a/src/share/vm/runtime/arguments.cpp b/src/share/vm/runtime/arguments.cpp
index bbef39590..891887853 100644
--- a/src/share/vm/runtime/arguments.cpp
+++ b/src/share/vm/runtime/arguments.cpp
@@ -1400,10 +1400,11 @@ void Arguments::set_ergonomics_flags() {
void Arguments::set_parallel_gc_flags() {
assert(UseParallelGC || UseParallelOldGC, "Error");
- // If parallel old was requested, automatically enable parallel scavenge.
- if (UseParallelOldGC && !UseParallelGC && FLAG_IS_DEFAULT(UseParallelGC)) {
- FLAG_SET_DEFAULT(UseParallelGC, true);
+ // Enable ParallelOld unless it was explicitly disabled (cmd line or rc file).
+ if (FLAG_IS_DEFAULT(UseParallelOldGC)) {
+ FLAG_SET_DEFAULT(UseParallelOldGC, true);
}
+ FLAG_SET_DEFAULT(UseParallelGC, true);
// If no heap maximum was requested explicitly, use some reasonable fraction
// of the physical memory, up to a maximum of 1GB.
diff --git a/src/share/vm/runtime/deoptimization.cpp b/src/share/vm/runtime/deoptimization.cpp
index 44eeee8b9..0e2a9839b 100644
--- a/src/share/vm/runtime/deoptimization.cpp
+++ b/src/share/vm/runtime/deoptimization.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -339,7 +339,6 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
#ifdef ASSERT
assert(cb->is_deoptimization_stub() || cb->is_uncommon_trap_stub(), "just checking");
- Events::log("fetch unroll sp " INTPTR_FORMAT, unpack_sp);
#endif
#else
intptr_t* unpack_sp = stub_frame.sender(&dummy_map).unextended_sp();
@@ -577,6 +576,8 @@ JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_m
tty->print_cr("DEOPT UNPACKING thread " INTPTR_FORMAT " vframeArray " INTPTR_FORMAT " mode %d", thread, array, exec_mode);
}
#endif
+ Events::log(thread, "DEOPT UNPACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT " mode %d",
+ stub_frame.pc(), stub_frame.sp(), exec_mode);
UnrollBlock* info = array->unroll_block();
@@ -981,6 +982,7 @@ void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects) {
#endif // COMPILER2
vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk) {
+ Events::log(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, fr.pc(), fr.sp());
#ifndef PRODUCT
if (TraceDeoptimization) {
@@ -1026,7 +1028,6 @@ vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, Re
// Compare the vframeArray to the collected vframes
assert(array->structural_compare(thread, chunk), "just checking");
- Events::log("# vframes = %d", (intptr_t)chunk->length());
#ifndef PRODUCT
if (TraceDeoptimization) {
@@ -1124,8 +1125,6 @@ void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr) {
gather_statistics(Reason_constraint, Action_none, Bytecodes::_illegal);
- EventMark m("Deoptimization (pc=" INTPTR_FORMAT ", sp=" INTPTR_FORMAT ")", fr.pc(), fr.id());
-
// Patch the nmethod so that when execution returns to it we will
// deopt the execution state and return to the interpreter.
fr.deoptimize(thread);
@@ -1239,6 +1238,10 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint tra
// before we are done with it.
nmethodLocker nl(fr.pc());
+ // Log a message
+ Events::log_deopt_message(thread, "Uncommon trap %d fr.pc " INTPTR_FORMAT,
+ trap_request, fr.pc());
+
{
ResourceMark rm;
@@ -1249,7 +1252,6 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint tra
DeoptAction action = trap_request_action(trap_request);
jint unloaded_class_index = trap_request_index(trap_request); // CP idx or -1
- Events::log("Uncommon trap occurred @" INTPTR_FORMAT " unloaded_class_index = %d", fr.pc(), (int) trap_request);
vframe* vf = vframe::new_vframe(&fr, &reg_map, thread);
compiledVFrame* cvf = compiledVFrame::cast(vf);
diff --git a/src/share/vm/runtime/frame.cpp b/src/share/vm/runtime/frame.cpp
index ad6778c77..7ae9aa8d5 100644
--- a/src/share/vm/runtime/frame.cpp
+++ b/src/share/vm/runtime/frame.cpp
@@ -570,7 +570,7 @@ void frame::print_value_on(outputStream* st, JavaThread *thread) const {
InterpreterCodelet* desc = Interpreter::codelet_containing(pc());
if (desc != NULL) {
st->print("~");
- desc->print();
+ desc->print_on(st);
NOT_PRODUCT(begin = desc->code_begin(); end = desc->code_end();)
} else {
st->print("~interpreter");
diff --git a/src/share/vm/runtime/globals.hpp b/src/share/vm/runtime/globals.hpp
index 5778d2442..a94b27160 100644
--- a/src/share/vm/runtime/globals.hpp
+++ b/src/share/vm/runtime/globals.hpp
@@ -26,6 +26,17 @@
#define SHARE_VM_RUNTIME_GLOBALS_HPP
#include "utilities/debug.hpp"
+
+// use this for flags that are true per default in the tiered build
+// but false in non-tiered builds, and vice versa
+#ifdef TIERED
+#define trueInTiered true
+#define falseInTiered false
+#else
+#define trueInTiered false
+#define falseInTiered true
+#endif
+
#ifdef TARGET_ARCH_x86
# include "globals_x86.hpp"
#endif
@@ -353,16 +364,6 @@ class CommandLineFlags {
#define falseInProduct true
#endif
-// use this for flags that are true per default in the tiered build
-// but false in non-tiered builds, and vice versa
-#ifdef TIERED
-#define trueInTiered true
-#define falseInTiered false
-#else
-#define trueInTiered false
-#define falseInTiered true
-#endif
-
#ifdef JAVASE_EMBEDDED
#define falseInEmbedded false
#else
@@ -658,6 +659,12 @@ class CommandLineFlags {
develop(bool, SpecialArraysEquals, true, \
"special version of Arrays.equals(char[],char[])") \
\
+ product(bool, CriticalJNINatives, true, \
+ "check for critical JNI entry points") \
+ \
+ notproduct(bool, StressCriticalJNINatives, false, \
+ "Exercise register saving code in critical natives") \
+ \
product(bool, UseSSE42Intrinsics, false, \
"SSE4.2 versions of intrinsics") \
\
@@ -735,8 +742,11 @@ class CommandLineFlags {
product(bool, MaxFDLimit, true, \
"Bump the number of file descriptors to max in solaris.") \
\
- notproduct(bool, LogEvents, trueInDebug, \
- "Enable Event log") \
+ diagnostic(bool, LogEvents, true, \
+ "Enable the various ring buffer event logs") \
+ \
+ diagnostic(intx, LogEventsBufferEntries, 10, \
+ "Enable the various ring buffer event logs") \
\
product(bool, BytecodeVerificationRemote, true, \
"Enables the Java bytecode verifier for remote classes") \
@@ -1042,6 +1052,9 @@ class CommandLineFlags {
notproduct(bool, PrintSystemDictionaryAtExit, false, \
"Prints the system dictionary at exit") \
\
+ experimental(intx, PredictedLoadedClassCount, 0, \
+ "Experimental: Tune loaded class cache starting size.") \
+ \
diagnostic(bool, UnsyncloadClass, false, \
"Unstable: VM calls loadClass unsynchronized. Custom " \
"class loader must call VM synchronized for findClass " \
diff --git a/src/share/vm/runtime/init.cpp b/src/share/vm/runtime/init.cpp
index 4176cd82d..c0e870ea2 100644
--- a/src/share/vm/runtime/init.cpp
+++ b/src/share/vm/runtime/init.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -65,7 +65,7 @@ void vtableStubs_init();
void InlineCacheBuffer_init();
void compilerOracle_init();
void compilationPolicy_init();
-
+void compileBroker_init();
// Initialization after compiler initialization
bool universe_post_init(); // must happen after compiler_init
@@ -120,6 +120,7 @@ jint init_globals() {
InlineCacheBuffer_init();
compilerOracle_init();
compilationPolicy_init();
+ compileBroker_init();
VMRegImpl::set_regName();
if (!universe_post_init()) {
diff --git a/src/share/vm/runtime/mutex.cpp b/src/share/vm/runtime/mutex.cpp
index dddd27cc2..9fc156089 100644
--- a/src/share/vm/runtime/mutex.cpp
+++ b/src/share/vm/runtime/mutex.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1296,10 +1296,6 @@ void Monitor::set_owner_implementation(Thread *new_owner) {
assert(this->rank() >= 0, "bad lock rank");
- if (LogMultipleMutexLocking && locks != NULL) {
- Events::log("thread " INTPTR_FORMAT " locks %s, already owns %s", new_owner, name(), locks->name());
- }
-
// Deadlock avoidance rules require us to acquire Mutexes only in
// a global total order. For example m1 is the lowest ranked mutex
// that the thread holds and m2 is the mutex the thread is trying
@@ -1343,10 +1339,6 @@ void Monitor::set_owner_implementation(Thread *new_owner) {
#ifdef ASSERT
Monitor *locks = old_owner->owned_locks();
- if (LogMultipleMutexLocking && locks != this) {
- Events::log("thread " INTPTR_FORMAT " unlocks %s, still owns %s", old_owner, this->name(), locks->name());
- }
-
// remove "this" from the owned locks list
Monitor *prev = NULL;
diff --git a/src/share/vm/runtime/safepoint.cpp b/src/share/vm/runtime/safepoint.cpp
index d33893120..b7910d680 100644
--- a/src/share/vm/runtime/safepoint.cpp
+++ b/src/share/vm/runtime/safepoint.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -95,6 +95,7 @@
SafepointSynchronize::SynchronizeState volatile SafepointSynchronize::_state = SafepointSynchronize::_not_synchronized;
volatile int SafepointSynchronize::_waiting_to_block = 0;
volatile int SafepointSynchronize::_safepoint_counter = 0;
+int SafepointSynchronize::_current_jni_active_count = 0;
long SafepointSynchronize::_end_of_last_safepoint = 0;
static volatile int PageArmed = 0 ; // safepoint polling page is RO|RW vs PROT_NONE
static volatile int TryingToBlock = 0 ; // proximate value -- for advisory use only
@@ -135,9 +136,11 @@ void SafepointSynchronize::begin() {
RuntimeService::record_safepoint_begin();
- {
MutexLocker mu(Safepoint_lock);
+ // Reset the count of active JNI critical threads
+ _current_jni_active_count = 0;
+
// Set number of threads to wait for, before we initiate the callbacks
_waiting_to_block = nof_threads;
TryingToBlock = 0 ;
@@ -375,6 +378,9 @@ void SafepointSynchronize::begin() {
OrderAccess::fence();
+ // Update the count of active JNI critical regions
+ GC_locker::set_jni_lock_count(_current_jni_active_count);
+
if (TraceSafepoint) {
VM_Operation *op = VMThread::vm_operation();
tty->print_cr("Entering safepoint region: %s", (op != NULL) ? op->name() : "no vm operation");
@@ -392,7 +398,6 @@ void SafepointSynchronize::begin() {
// Record how much time spend on the above cleanup tasks
update_statistics_on_cleanup_end(os::javaTimeNanos());
}
- }
}
// Wake up all threads, so they are ready to resume execution after the safepoint
@@ -539,6 +544,42 @@ bool SafepointSynchronize::safepoint_safe(JavaThread *thread, JavaThreadState st
}
+// See if the thread is running inside a lazy critical native and
+// update the thread critical count if so. Also set a suspend flag to
+// cause the native wrapper to return into the JVM to do the unlock
+// once the native finishes.
+void SafepointSynchronize::check_for_lazy_critical_native(JavaThread *thread, JavaThreadState state) {
+ if (state == _thread_in_native &&
+ thread->has_last_Java_frame() &&
+ thread->frame_anchor()->walkable()) {
+ // This thread might be in a critical native nmethod so look at
+ // the top of the stack and increment the critical count if it
+ // is.
+ frame wrapper_frame = thread->last_frame();
+ CodeBlob* stub_cb = wrapper_frame.cb();
+ if (stub_cb != NULL &&
+ stub_cb->is_nmethod() &&
+ stub_cb->as_nmethod_or_null()->is_lazy_critical_native()) {
+ // A thread could potentially be in a critical native across
+ // more than one safepoint, so only update the critical state on
+ // the first one. When it returns it will perform the unlock.
+ if (!thread->do_critical_native_unlock()) {
+#ifdef ASSERT
+ if (!thread->in_critical()) {
+ GC_locker::increment_debug_jni_lock_count();
+ }
+#endif
+ thread->enter_critical();
+ // Make sure the native wrapper calls back on return to
+ // perform the needed critical unlock.
+ thread->set_critical_native_unlock();
+ }
+ }
+ }
+}
+
+
+
// -------------------------------------------------------------------------------------------------------
// Implementation of Safepoint callback point
@@ -585,6 +626,11 @@ void SafepointSynchronize::block(JavaThread *thread) {
_waiting_to_block--;
thread->safepoint_state()->set_has_called_back(true);
+ if (thread->in_critical()) {
+ // Notice that this thread is in a critical section
+ increment_jni_active_count();
+ }
+
// Consider (_waiting_to_block < 2) to pipeline the wakeup of the VM thread
if (_waiting_to_block == 0) {
Safepoint_lock->notify_all();
@@ -861,8 +907,13 @@ void ThreadSafepointState::examine_state_of_thread() {
// running, but are actually at a safepoint. We will happily
// agree and update the safepoint state here.
if (SafepointSynchronize::safepoint_safe(_thread, state)) {
- roll_forward(_at_safepoint);
- return;
+ roll_forward(_at_safepoint);
+ SafepointSynchronize::check_for_lazy_critical_native(_thread, state);
+ if (_thread->in_critical()) {
+ // Notice that this thread is in a critical section
+ SafepointSynchronize::increment_jni_active_count();
+ }
+ return;
}
if (state == _thread_in_vm) {
diff --git a/src/share/vm/runtime/safepoint.hpp b/src/share/vm/runtime/safepoint.hpp
index 550715524..71255a27d 100644
--- a/src/share/vm/runtime/safepoint.hpp
+++ b/src/share/vm/runtime/safepoint.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
#include "code/nmethod.hpp"
#include "memory/allocation.hpp"
#include "runtime/extendedPC.hpp"
+#include "runtime/mutexLocker.hpp"
#include "runtime/os.hpp"
#include "utilities/ostream.hpp"
@@ -92,6 +93,7 @@ class SafepointSynchronize : AllStatic {
private:
static volatile SynchronizeState _state; // Threads might read this flag directly, without acquireing the Threads_lock
static volatile int _waiting_to_block; // number of threads we are waiting for to block
+ static int _current_jni_active_count; // Counts the number of active critical natives during the safepoint
// This counter is used for fast versions of jni_Get<Primitive>Field.
// An even value means there is no ongoing safepoint operations.
@@ -138,6 +140,8 @@ public:
static bool safepoint_safe(JavaThread *thread, JavaThreadState state);
+ static void check_for_lazy_critical_native(JavaThread *thread, JavaThreadState state);
+
// Query
inline static bool is_at_safepoint() { return _state == _synchronized; }
inline static bool is_synchronizing() { return _state == _synchronizing; }
@@ -146,6 +150,11 @@ public:
return (_state != _not_synchronized);
}
+ inline static void increment_jni_active_count() {
+ assert_locked_or_safepoint(Safepoint_lock);
+ _current_jni_active_count++;
+ }
+
// Called when a thread volantary blocks
static void block(JavaThread *thread);
static void signal_thread_at_safepoint() { _waiting_to_block--; }
diff --git a/src/share/vm/runtime/sharedRuntime.cpp b/src/share/vm/runtime/sharedRuntime.cpp
index bea5e6bfd..e865200ec 100644
--- a/src/share/vm/runtime/sharedRuntime.cpp
+++ b/src/share/vm/runtime/sharedRuntime.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -886,9 +886,9 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread,
// for AbortVMOnException flag
NOT_PRODUCT(Exceptions::debug_check_abort("java.lang.NullPointerException"));
if (exception_kind == IMPLICIT_NULL) {
- Events::log("Implicit null exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc);
+ Events::log_exception(thread, "Implicit null exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc);
} else {
- Events::log("Implicit division by zero exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc);
+ Events::log_exception(thread, "Implicit division by zero exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc);
}
return target_pc;
}
@@ -1541,7 +1541,6 @@ methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, TRAPS) {
if (caller.is_compiled_frame() && !caller.is_deoptimized_frame()) {
address pc = caller.pc();
- Events::log("update call-site at pc " INTPTR_FORMAT, pc);
// Default call_addr is the location of the "basic" call.
// Determine the address of the call we a reresolving. With
@@ -2679,6 +2678,20 @@ nmethod *AdapterHandlerLibrary::create_native_wrapper(methodHandle method, int c
return nm;
}
+JRT_ENTRY_NO_ASYNC(void, SharedRuntime::block_for_jni_critical(JavaThread* thread))
+ assert(thread == JavaThread::current(), "must be");
+ // The code is about to enter a JNI lazy critical native method and
+ // _needs_gc is true, so if this thread is already in a critical
+ // section then just return, otherwise this thread should block
+ // until needs_gc has been cleared.
+ if (thread->in_critical()) {
+ return;
+ }
+ // Lock and unlock a critical section to give the system a chance to block
+ GC_locker::lock_critical(thread);
+ GC_locker::unlock_critical(thread);
+JRT_END
+
#ifdef HAVE_DTRACE_H
// Create a dtrace nmethod for this method. The wrapper converts the
// java compiled calling convention to the native convention, makes a dummy call
diff --git a/src/share/vm/runtime/sharedRuntime.hpp b/src/share/vm/runtime/sharedRuntime.hpp
index 145dacd4b..9650c6e11 100644
--- a/src/share/vm/runtime/sharedRuntime.hpp
+++ b/src/share/vm/runtime/sharedRuntime.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -462,6 +462,9 @@ class SharedRuntime: AllStatic {
VMRegPair *regs,
BasicType ret_type );
+ // Block before entering a JNI critical method
+ static void block_for_jni_critical(JavaThread* thread);
+
#ifdef HAVE_DTRACE_H
// Generate a dtrace wrapper for a given method. The method takes arguments
// in the Java compiled code convention, marshals them to the native
diff --git a/src/share/vm/runtime/thread.cpp b/src/share/vm/runtime/thread.cpp
index 18c04a905..a351de941 100644
--- a/src/share/vm/runtime/thread.cpp
+++ b/src/share/vm/runtime/thread.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,7 @@
#include "interpreter/linkResolver.hpp"
#include "interpreter/oopMapCache.hpp"
#include "jvmtifiles/jvmtiEnv.hpp"
+#include "memory/gcLocker.inline.hpp"
#include "memory/oopFactory.hpp"
#include "memory/universe.inline.hpp"
#include "oops/instanceKlass.hpp"
@@ -1600,8 +1601,6 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
// java.lang.Thread.dispatchUncaughtException
if (uncaught_exception.not_null()) {
Handle group(this, java_lang_Thread::threadGroup(threadObj()));
- Events::log("uncaught exception INTPTR_FORMAT " " INTPTR_FORMAT " " INTPTR_FORMAT",
- (address)uncaught_exception(), (address)threadObj(), (address)group());
{
EXCEPTION_MARK;
// Check if the method Thread.dispatchUncaughtException() exists. If so
@@ -2280,6 +2279,26 @@ void JavaThread::check_special_condition_for_native_trans(JavaThread *thread) {
}
}
+// This is a variant of the normal
+// check_special_condition_for_native_trans with slightly different
+// semantics for use by critical native wrappers. It does all the
+// normal checks but also performs the transition back into
+// thread_in_Java state. This is required so that critical natives
+// can potentially block and perform a GC if they are the last thread
+// exiting the GC_locker.
+void JavaThread::check_special_condition_for_native_trans_and_transition(JavaThread *thread) {
+ check_special_condition_for_native_trans(thread);
+
+ // Finish the transition
+ thread->set_thread_state(_thread_in_Java);
+
+ if (thread->do_critical_native_unlock()) {
+ ThreadInVMfromJavaNoAsyncException tiv(thread);
+ GC_locker::unlock_critical(thread);
+ thread->clear_critical_native_unlock();
+ }
+}
+
// We need to guarantee the Threads_lock here, since resumes are not
// allowed during safepoint synchronization
// Can only resume from an external suspension
@@ -3885,7 +3904,7 @@ void Threads::add(JavaThread* p, bool force_daemon) {
ThreadService::add_thread(p, daemon);
// Possible GC point.
- Events::log("Thread added: " INTPTR_FORMAT, p);
+ Events::log(p, "Thread added: " INTPTR_FORMAT, p);
}
void Threads::remove(JavaThread* p) {
@@ -3930,7 +3949,7 @@ void Threads::remove(JavaThread* p) {
} // unlock Threads_lock
// Since Events::log uses a lock, we grab it outside the Threads_lock
- Events::log("Thread exited: " INTPTR_FORMAT, p);
+ Events::log(p, "Thread exited: " INTPTR_FORMAT, p);
}
// Threads_lock must be held when this is called (or must be called during a safepoint)
diff --git a/src/share/vm/runtime/thread.hpp b/src/share/vm/runtime/thread.hpp
index 37bbf2937..459326375 100644
--- a/src/share/vm/runtime/thread.hpp
+++ b/src/share/vm/runtime/thread.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -182,7 +182,8 @@ class Thread: public ThreadShadow {
_ext_suspended = 0x40000000U, // thread has self-suspended
_deopt_suspend = 0x10000000U, // thread needs to self suspend for deopt
- _has_async_exception = 0x00000001U // there is a pending async exception
+ _has_async_exception = 0x00000001U, // there is a pending async exception
+ _critical_native_unlock = 0x00000002U // Must call back to unlock JNI critical lock
};
// various suspension related flags - atomically updated
@@ -350,6 +351,15 @@ class Thread: public ThreadShadow {
clear_suspend_flag(_has_async_exception);
}
+ bool do_critical_native_unlock() const { return (_suspend_flags & _critical_native_unlock) != 0; }
+
+ void set_critical_native_unlock() {
+ set_suspend_flag(_critical_native_unlock);
+ }
+ void clear_critical_native_unlock() {
+ clear_suspend_flag(_critical_native_unlock);
+ }
+
// Support for Unhandled Oop detection
#ifdef CHECK_UNHANDLED_OOPS
private:
@@ -1038,6 +1048,11 @@ class JavaThread: public Thread {
// Check for async exception in addition to safepoint and suspend request.
static void check_special_condition_for_native_trans(JavaThread *thread);
+ // Same as check_special_condition_for_native_trans but finishes the
+ // transition into thread_in_Java mode so that it can potentially
+ // block.
+ static void check_special_condition_for_native_trans_and_transition(JavaThread *thread);
+
bool is_ext_suspend_completed(bool called_by_wait, int delay, uint32_t *bits);
bool is_ext_suspend_completed_with_lock(uint32_t *bits) {
MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
@@ -1310,8 +1325,10 @@ class JavaThread: public Thread {
// JNI critical regions. These can nest.
bool in_critical() { return _jni_active_critical > 0; }
- void enter_critical() { assert(Thread::current() == this,
- "this must be current thread");
+ bool in_last_critical() { return _jni_active_critical == 1; }
+ void enter_critical() { assert(Thread::current() == this ||
+ Thread::current()->is_VM_thread() && SafepointSynchronize::is_synchronizing(),
+ "this must be current thread or synchronizing");
_jni_active_critical++; }
void exit_critical() { assert(Thread::current() == this,
"this must be current thread");
diff --git a/src/share/vm/runtime/vmStructs.cpp b/src/share/vm/runtime/vmStructs.cpp
index eb765f0cf..65da74a81 100644
--- a/src/share/vm/runtime/vmStructs.cpp
+++ b/src/share/vm/runtime/vmStructs.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2261,13 +2261,6 @@ static inline uint64_t cast_uint64_t(size_t x)
\
declare_constant(SymbolTable::symbol_table_size) \
\
- /********************/ \
- /* SystemDictionary */ \
- /********************/ \
- \
- declare_constant(SystemDictionary::_loader_constraint_size) \
- declare_constant(SystemDictionary::_nof_buckets) \
- \
/***********************************/ \
/* LoaderConstraintTable constants */ \
/***********************************/ \
diff --git a/src/share/vm/trace/traceMacros.hpp b/src/share/vm/trace/traceMacros.hpp
index e92593529..221f4d0f8 100644
--- a/src/share/vm/trace/traceMacros.hpp
+++ b/src/share/vm/trace/traceMacros.hpp
@@ -40,4 +40,8 @@
#define TRACE_START() true
#define TRACE_INITIALIZE() 0
+#define TRACE_SET_KLASS_TRACE_ID(x1, x2) do { } while (0)
+#define TRACE_DEFINE_KLASS_METHODS typedef int ___IGNORED_hs_trace_type1
+#define TRACE_DEFINE_KLASS_TRACE_ID typedef int ___IGNORED_hs_trace_type2
+
#endif
diff --git a/src/share/vm/utilities/debug.cpp b/src/share/vm/utilities/debug.cpp
index b96e5bdf6..effeeecb5 100644
--- a/src/share/vm/utilities/debug.cpp
+++ b/src/share/vm/utilities/debug.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -601,18 +601,6 @@ extern "C" void flush() {
}
-extern "C" void events() {
- Command c("events");
- Events::print_last(tty, 50);
-}
-
-
-extern "C" void nevents(int n) {
- Command c("events");
- Events::print_last(tty, n);
-}
-
-
// Given a heap address that was valid before the most recent GC, if
// the oop that used to contain it is still live, prints the new
// location of the oop and the address. Useful for tracking down
diff --git a/src/share/vm/utilities/debug.hpp b/src/share/vm/utilities/debug.hpp
index 63c69e63f..0ba644249 100644
--- a/src/share/vm/utilities/debug.hpp
+++ b/src/share/vm/utilities/debug.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,16 +33,23 @@
// Simple class to format the ctor arguments into a fixed-sized buffer.
template <size_t bufsz = 256>
class FormatBuffer {
-public:
+ public:
inline FormatBuffer(const char * format, ...);
inline void append(const char* format, ...);
+ inline void print(const char* format, ...);
+ inline void printv(const char* format, va_list ap);
operator const char *() const { return _buf; }
-private:
+ char* buffer() { return _buf; }
+ int size() { return bufsz; }
+
+ private:
FormatBuffer(const FormatBuffer &); // prevent copies
-private:
+ protected:
char _buf[bufsz];
+
+ inline FormatBuffer();
};
template <size_t bufsz>
@@ -54,6 +61,24 @@ FormatBuffer<bufsz>::FormatBuffer(const char * format, ...) {
}
template <size_t bufsz>
+FormatBuffer<bufsz>::FormatBuffer() {
+ _buf[0] = '\0';
+}
+
+template <size_t bufsz>
+void FormatBuffer<bufsz>::print(const char * format, ...) {
+ va_list argp;
+ va_start(argp, format);
+ jio_vsnprintf(_buf, bufsz, format, argp);
+ va_end(argp);
+}
+
+template <size_t bufsz>
+void FormatBuffer<bufsz>::printv(const char * format, va_list argp) {
+ jio_vsnprintf(_buf, bufsz, format, argp);
+}
+
+template <size_t bufsz>
void FormatBuffer<bufsz>::append(const char* format, ...) {
// Given that the constructor does a vsnprintf we can assume that
// _buf is already initialized.
diff --git a/src/share/vm/utilities/events.cpp b/src/share/vm/utilities/events.cpp
index 62341195d..9c1ecb546 100644
--- a/src/share/vm/utilities/events.cpp
+++ b/src/share/vm/utilities/events.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
#include "memory/allocation.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/osThread.hpp"
+#include "runtime/threadCritical.hpp"
#include "runtime/threadLocalStorage.hpp"
#include "runtime/timer.hpp"
#include "utilities/events.hpp"
@@ -43,184 +44,40 @@
#endif
-#ifndef PRODUCT
+EventLog* Events::_logs = NULL;
+StringEventLog* Events::_messages = NULL;
+StringEventLog* Events::_exceptions = NULL;
+StringEventLog* Events::_deopt_messages = NULL;
-////////////////////////////////////////////////////////////////////////////
-// Event
-
-typedef u4 EventID;
-
-class Event VALUE_OBJ_CLASS_SPEC {
- private:
- jlong _time_tick;
- intx _thread_id;
- const char* _format;
- int _indent;
- intptr_t _arg_1;
- intptr_t _arg_2;
- intptr_t _arg_3;
-
- // only EventBuffer::add_event() can assign event id
- friend class EventBuffer;
- EventID _id;
-
- public:
-
- void clear() { _format = NULL; }
-
- EventID id() const { return _id; }
-
- void fill(int indent, const char* format, intptr_t arg_1, intptr_t arg_2, intptr_t arg_3) {
- _format = format;
- _arg_1 = arg_1;
- _arg_2 = arg_2;
- _arg_3 = arg_3;
-
- _indent = indent;
-
- _thread_id = os::current_thread_id();
- _time_tick = os::elapsed_counter();
- }
-
- void print_on(outputStream *st) {
- if (_format == NULL) return;
- st->print(" %d", _thread_id);
- st->print(" %3.2g ", (double)_time_tick / os::elapsed_frequency());
- st->fill_to(20);
- for (int index = 0; index < _indent; index++) {
- st->print("| ");
- }
- st->print_cr(_format, _arg_1, _arg_2, _arg_3);
- }
-};
-
-////////////////////////////////////////////////////////////////////////////
-// EventBuffer
-//
-// Simple lock-free event queue. Every event has a unique 32-bit id.
-// It's fine if two threads add events at the same time, because they
-// will get different event id, and then write to different buffer location.
-// However, it is assumed that add_event() is quick enough (or buffer size
-// is big enough), so when one thread is adding event, there can't be more
-// than "size" events created by other threads; otherwise we'll end up having
-// two threads writing to the same location.
-
-class EventBuffer : AllStatic {
- private:
- static Event* buffer;
- static int size;
- static jint indent;
- static volatile EventID _current_event_id;
-
- static EventID get_next_event_id() {
- return (EventID)Atomic::add(1, (jint*)&_current_event_id);
- }
-
- public:
- static void inc_indent() { Atomic::inc(&indent); }
- static void dec_indent() { Atomic::dec(&indent); }
-
- static bool get_event(EventID id, Event* event) {
- int index = (int)(id % size);
- if (buffer[index].id() == id) {
- memcpy(event, &buffer[index], sizeof(Event));
- // check id again; if buffer[index] is being updated by another thread,
- // event->id() will contain different value.
- return (event->id() == id);
- } else {
- // id does not match - id is invalid, or event is overwritten
- return false;
- }
- }
-
- // add a new event to the queue; if EventBuffer is full, this call will
- // overwrite the oldest event in the queue
- static EventID add_event(const char* format,
- intptr_t arg_1, intptr_t arg_2, intptr_t arg_3) {
- // assign a unique id
- EventID id = get_next_event_id();
-
- // event will be copied to buffer[index]
- int index = (int)(id % size);
-
- // first, invalidate id, buffer[index] can't have event with id = index + 2
- buffer[index]._id = index + 2;
-
- // make sure everyone has seen that buffer[index] is invalid
- OrderAccess::fence();
-
- // ... before updating its value
- buffer[index].fill(indent, format, arg_1, arg_2, arg_3);
-
- // finally, set up real event id, now buffer[index] contains valid event
- OrderAccess::release_store(&(buffer[index]._id), id);
-
- return id;
- }
-
- static void print_last(outputStream *st, int number) {
- st->print_cr("[Last %d events in the event buffer]", number);
- st->print_cr("-<thd>-<elapsed sec>-<description>---------------------");
-
- int count = 0;
- EventID id = _current_event_id;
- while (count < number) {
- Event event;
- if (get_event(id, &event)) {
- event.print_on(st);
- }
- id--;
- count++;
- }
- }
-
- static void print_all(outputStream* st) {
- print_last(st, size);
- }
-
- static void init() {
- // Allocate the event buffer
- size = EventLogLength;
- buffer = NEW_C_HEAP_ARRAY(Event, size);
-
- _current_event_id = 0;
+EventLog::EventLog() {
+ // This normally done during bootstrap when we're only single
+ // threaded but use a ThreadCritical to ensure inclusion in case
+ // some are created slightly late.
+ ThreadCritical tc;
+ _next = Events::_logs;
+ Events::_logs = this;
+}
- // Clear the event buffer
- for (int index = 0; index < size; index++) {
- buffer[index]._id = index + 1; // index + 1 is invalid id
- buffer[index].clear();
- }
+// For each registered event logger, print out the current contents of
+// the buffer. This is normally called when the JVM is crashing.
+void Events::print_all(outputStream* out) {
+ EventLog* log = _logs;
+ while (log != NULL) {
+ log->print_log_on(out);
+ log = log->next();
}
-};
-
-Event* EventBuffer::buffer;
-int EventBuffer::size;
-volatile EventID EventBuffer::_current_event_id;
-int EventBuffer::indent;
-
-////////////////////////////////////////////////////////////////////////////
-// Events
+}
-// Events::log() is safe for signal handlers
-void Events::log(const char* format, ...) {
+void Events::init() {
if (LogEvents) {
- va_list ap;
- va_start(ap, format);
- intptr_t arg_1 = va_arg(ap, intptr_t);
- intptr_t arg_2 = va_arg(ap, intptr_t);
- intptr_t arg_3 = va_arg(ap, intptr_t);
- va_end(ap);
-
- EventBuffer::add_event(format, arg_1, arg_2, arg_3);
+ _messages = new StringEventLog("Events");
+ _exceptions = new StringEventLog("Internal exceptions");
+ _deopt_messages = new StringEventLog("Deoptimization events");
}
}
-void Events::print_all(outputStream *st) {
- EventBuffer::print_all(st);
-}
-
-void Events::print_last(outputStream *st, int number) {
- EventBuffer::print_last(st, number);
+void eventlog_init() {
+ Events::init();
}
///////////////////////////////////////////////////////////////////////////
@@ -230,37 +87,17 @@ EventMark::EventMark(const char* format, ...) {
if (LogEvents) {
va_list ap;
va_start(ap, format);
- intptr_t arg_1 = va_arg(ap, intptr_t);
- intptr_t arg_2 = va_arg(ap, intptr_t);
- intptr_t arg_3 = va_arg(ap, intptr_t);
+ // Save a copy of begin message and log it.
+ _buffer.printv(format, ap);
+ Events::log(NULL, _buffer);
va_end(ap);
-
- EventBuffer::add_event(format, arg_1, arg_2, arg_3);
- EventBuffer::inc_indent();
}
}
EventMark::~EventMark() {
if (LogEvents) {
- EventBuffer::dec_indent();
- EventBuffer::add_event("done", 0, 0, 0);
+ // Append " done" to the begin message and log it
+ _buffer.append(" done");
+ Events::log(NULL, _buffer);
}
}
-
-///////////////////////////////////////////////////////////////////////////
-
-void eventlog_init() {
- EventBuffer::init();
-}
-
-int print_all_events(outputStream *st) {
- EventBuffer::print_all(st);
- return 1;
-}
-
-#else
-
-void eventlog_init() {}
-int print_all_events(outputStream *st) { return 0; }
-
-#endif // PRODUCT
diff --git a/src/share/vm/utilities/events.hpp b/src/share/vm/utilities/events.hpp
index 6a50643da..89a357912 100644
--- a/src/share/vm/utilities/events.hpp
+++ b/src/share/vm/utilities/events.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,10 @@
#define SHARE_VM_UTILITIES_EVENTS_HPP
#include "memory/allocation.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/thread.hpp"
#include "utilities/top.hpp"
+#include "utilities/vmError.hpp"
// Events and EventMark provide interfaces to log events taking place in the vm.
// This facility is extremly useful for post-mortem debugging. The eventlog
@@ -47,26 +50,246 @@
// Max 3 arguments are saved for each logged event.
//
+// The base event log dumping class that is registered for dumping at
+// crash time. This is a very generic interface that is mainly here
+// for completeness. Normally the templated EventLogBase would be
+// subclassed to provide different log types.
+class EventLog : public CHeapObj {
+ friend class Events;
+
+ private:
+ EventLog* _next;
+
+ EventLog* next() const { return _next; }
+
+ public:
+ // Automatically registers the log so that it will be printed during
+ // crashes.
+ EventLog();
+
+ virtual void print_log_on(outputStream* out) = 0;
+};
+
+
+// A templated subclass of EventLog that provides basic ring buffer
+// functionality. Most event loggers should subclass this, possibly
+// providing a more featureful log function if the existing copy
+// semantics aren't appropriate. The name is used as the label of the
+// log when it is dumped during a crash.
+template <class T> class EventLogBase : public EventLog {
+ template <class X> class EventRecord {
+ public:
+ jlong timestamp;
+ Thread* thread;
+ X data;
+ };
+
+ protected:
+ Mutex _mutex;
+ const char* _name;
+ int _length;
+ int _index;
+ int _count;
+ EventRecord<T>* _records;
+
+ public:
+ EventLogBase<T>(const char* name, int length = LogEventsBufferEntries):
+ _name(name),
+ _length(length),
+ _count(0),
+ _index(0),
+ _mutex(Mutex::event, name) {
+ _records = new EventRecord<T>[length];
+ }
+
+ // move the ring buffer to next open slot and return the index of
+ // the slot to use for the current message. Should only be called
+ // while mutex is held.
+ int compute_log_index() {
+ int index = _index;
+ if (_count < _length) _count++;
+ _index++;
+ if (_index >= _length) _index = 0;
+ return index;
+ }
+
+ bool should_log() {
+ // Don't bother adding new entries when we're crashing. This also
+ // avoids mutating the ring buffer when printing the log.
+ return !VMError::fatal_error_in_progress();
+ }
+
+ // Print the contents of the log
+ void print_log_on(outputStream* out);
+
+ private:
+ void print_log_impl(outputStream* out);
+
+ // Print a single element. A templated implementation might need to
+ // be declared by subclasses.
+ void print(outputStream* out, T& e);
+
+ void print(outputStream* out, EventRecord<T>& e) {
+ out->print("Event: " INT64_FORMAT " ", e.timestamp);
+ if (e.thread != NULL) {
+ out->print("Thread " INTPTR_FORMAT " ", e.thread);
+ }
+ print(out, e.data);
+ }
+};
+
+// A simple wrapper class for fixed size text messages.
+class StringLogMessage : public FormatBuffer<132> {
+ public:
+ // Wrap this buffer in a stringStream.
+ stringStream stream() {
+ return stringStream(_buf, sizeof(_buf));
+ }
+};
+
+// A simple ring buffer of fixed size text messages.
+class StringEventLog : public EventLogBase<StringLogMessage> {
+ public:
+ StringEventLog(const char* name, int count = LogEventsBufferEntries) : EventLogBase<StringLogMessage>(name, count) {}
+
+ void logv(Thread* thread, const char* format, va_list ap) {
+ if (!should_log()) return;
+
+ jlong timestamp = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
+ MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag);
+ int index = compute_log_index();
+ _records[index].thread = thread;
+ _records[index].timestamp = timestamp;
+ _records[index].data.printv(format, ap);
+ }
+
+ void log(Thread* thread, const char* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ logv(thread, format, ap);
+ va_end(ap);
+ }
+
+};
+
+
+
class Events : AllStatic {
+ friend class EventLog;
+
+ private:
+ static EventLog* _logs;
+
+ // A log for generic messages that aren't well categorized.
+ static StringEventLog* _messages;
+
+ // A log for internal exception related messages, like internal
+ // throws and implicit exceptions.
+ static StringEventLog* _exceptions;
+
+ // Deoptization related messages
+ static StringEventLog* _deopt_messages;
+
public:
- // Logs an event, format as printf
- static void log(const char* format, ...) PRODUCT_RETURN;
+ static void print_all(outputStream* out);
+
+ static void print() {
+ print_all(tty);
+ }
+
+ // Logs a generic message with timestamp and format as printf.
+ static void log(Thread* thread, const char* format, ...);
- // Prints all events in the buffer
- static void print_all(outputStream* st) PRODUCT_RETURN;
+ // Log exception related message
+ static void log_exception(Thread* thread, const char* format, ...);
- // Prints last number events from the event buffer
- static void print_last(outputStream *st, int number) PRODUCT_RETURN;
+ static void log_deopt_message(Thread* thread, const char* format, ...);
+
+ // Register default loggers
+ static void init();
};
+
+inline void Events::log(Thread* thread, const char* format, ...) {
+ if (LogEvents) {
+ va_list ap;
+ va_start(ap, format);
+ _messages->logv(thread, format, ap);
+ va_end(ap);
+ }
+}
+
+inline void Events::log_exception(Thread* thread, const char* format, ...) {
+ if (LogEvents) {
+ va_list ap;
+ va_start(ap, format);
+ _exceptions->logv(thread, format, ap);
+ va_end(ap);
+ }
+}
+
+inline void Events::log_deopt_message(Thread* thread, const char* format, ...) {
+ if (LogEvents) {
+ va_list ap;
+ va_start(ap, format);
+ _deopt_messages->logv(thread, format, ap);
+ va_end(ap);
+ }
+}
+
+
+template <class T>
+inline void EventLogBase<T>::print_log_on(outputStream* out) {
+ if (ThreadLocalStorage::get_thread_slow() == NULL) {
+ // Not a regular Java thread so don't bother locking
+ print_log_impl(out);
+ } else {
+ MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag);
+ print_log_impl(out);
+ }
+}
+
+// Dump the ring buffer entries that current have entries.
+template <class T>
+inline void EventLogBase<T>::print_log_impl(outputStream* out) {
+ out->print_cr("%s (%d events):", _name, _count);
+ if (_count == 0) {
+ out->print_cr("No events");
+ return;
+ }
+
+ if (_count < _length) {
+ for (int i = 0; i < _count; i++) {
+ print(out, _records[i]);
+ }
+ } else {
+ for (int i = _index; i < _length; i++) {
+ print(out, _records[i]);
+ }
+ for (int i = 0; i < _index; i++) {
+ print(out, _records[i]);
+ }
+ }
+ out->cr();
+}
+
+// Implement a printing routine for the StringLogMessage
+template <>
+inline void EventLogBase<StringLogMessage>::print(outputStream* out, StringLogMessage& lm) {
+ out->print_raw(lm);
+ out->cr();
+}
+
+// Place markers for the beginning and end up of a set of events.
+// These end up in the default log.
class EventMark : public StackObj {
+ StringLogMessage _buffer;
+
public:
// log a begin event, format as printf
- EventMark(const char* format, ...) PRODUCT_RETURN;
+ EventMark(const char* format, ...);
// log an end event
- ~EventMark() PRODUCT_RETURN;
+ ~EventMark();
};
-int print_all_events(outputStream *st);
-
#endif // SHARE_VM_UTILITIES_EVENTS_HPP
diff --git a/src/share/vm/utilities/exceptions.cpp b/src/share/vm/utilities/exceptions.cpp
index 91960278b..874d8e5b1 100644
--- a/src/share/vm/utilities/exceptions.cpp
+++ b/src/share/vm/utilities/exceptions.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -160,7 +160,7 @@ void Exceptions::_throw(Thread* thread, const char* file, int line, Handle h_exc
thread->set_pending_exception(h_exception(), file, line);
// vm log
- Events::log("throw_exception " INTPTR_FORMAT, (address)h_exception());
+ Events::log_exception(thread, "Threw " INTPTR_FORMAT " at %s:%d", (address)h_exception(), file, line);
}
diff --git a/src/share/vm/utilities/hashtable.hpp b/src/share/vm/utilities/hashtable.hpp
index a4f0e9012..527470938 100644
--- a/src/share/vm/utilities/hashtable.hpp
+++ b/src/share/vm/utilities/hashtable.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -183,7 +183,6 @@ protected:
// Accessor
int entry_size() const { return _entry_size; }
- int table_size() { return _table_size; }
// The following method is MT-safe and may be used with caution.
BasicHashtableEntry* bucket(int i);
@@ -195,6 +194,7 @@ protected:
BasicHashtableEntry* new_entry(unsigned int hashValue);
public:
+ int table_size() { return _table_size; }
void set_entry(int index, BasicHashtableEntry* entry);
void add_entry(int index, BasicHashtableEntry* entry);
diff --git a/src/share/vm/utilities/vmError.cpp b/src/share/vm/utilities/vmError.cpp
index a212f7646..2b4253257 100644
--- a/src/share/vm/utilities/vmError.cpp
+++ b/src/share/vm/utilities/vmError.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,6 +36,7 @@
#include "utilities/decoder.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/errorReporter.hpp"
+#include "utilities/events.hpp"
#include "utilities/top.hpp"
#include "utilities/vmError.hpp"
@@ -693,7 +694,14 @@ void VMError::report(outputStream* st) {
st->cr();
}
- STEP(200, "(printing dynamic libraries)" )
+ STEP(200, "(printing ring buffers)" )
+
+ if (_verbose) {
+ Events::print_all(st);
+ st->cr();
+ }
+
+ STEP(205, "(printing dynamic libraries)" )
if (_verbose) {
// dynamic libraries, or memory map
diff --git a/test/compiler/7090976/Test7090976.java b/test/compiler/7090976/Test7090976.java
new file mode 100644
index 000000000..84ff20a5d
--- /dev/null
+++ b/test/compiler/7090976/Test7090976.java
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 7090976
+ * @summary Eclipse/CDT causes a JVM crash while indexing C++ code
+ *
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement Test7090976
+ */
+
+public class Test7090976 {
+
+ static interface I1 {
+ public void m1();
+ };
+
+ static interface I2 {
+ public void m2();
+ };
+
+ static interface I extends I1,I2 {
+ }
+
+ static class A implements I1 {
+ int v = 0;
+ int v2;
+
+ public void m1() {
+ v2 = v;
+ }
+ }
+
+ static class B implements I2 {
+ Object v = new Object();
+ Object v2;
+
+ public void m2() {
+ v2 = v;
+ }
+ }
+
+ private void test(A a)
+ {
+ if (a instanceof I) {
+ I i = (I)a;
+ i.m1();
+ i.m2();
+ }
+ }
+
+ public static void main(String[] args)
+ {
+ Test7090976 t = new Test7090976();
+ A a = new A();
+ B b = new B();
+ for (int i = 0; i < 10000; i++) {
+ t.test(a);
+ }
+ }
+}
diff --git a/test/compiler/7141637/SpreadNullArg.java b/test/compiler/7141637/SpreadNullArg.java
new file mode 100644
index 000000000..3f3524e19
--- /dev/null
+++ b/test/compiler/7141637/SpreadNullArg.java
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2011 SAP AG. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test SpreadNullArg
+ * @bug 7141637
+ * @summary verifies that the MethodHandle spread adapter can gracefully handle null arguments.
+ * @run main SpreadNullArg
+ * @author volker.simonis@gmail.com
+ */
+
+import java.lang.invoke.MethodHandle;
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.MethodType;
+
+public class SpreadNullArg {
+
+ public static void main(String args[]) {
+
+ MethodType mt_ref_arg = MethodType.methodType(int.class, Integer.class);
+ MethodHandle mh_spreadInvoker = MethodHandles.spreadInvoker(mt_ref_arg, 0);
+ MethodHandle mh_spread_target;
+ int result = 42;
+
+ try {
+ mh_spread_target =
+ MethodHandles.lookup().findStatic(SpreadNullArg.class, "target_spread_arg", mt_ref_arg);
+ result = (int) mh_spreadInvoker.invokeExact(mh_spread_target, (Object[]) null);
+ } catch(NullPointerException e) {
+ // Expected exception - do nothing!
+ } catch(Throwable e) {
+ throw new Error(e);
+ }
+
+ if (result != 42) throw new Error("Expected NullPointerException was not thrown");
+ }
+
+ public static int target_spread_arg(Integer i1) {
+ return i1.intValue();
+ }
+
+}