aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoramurillo <none@none>2012-12-14 14:19:35 -0800
committeramurillo <none@none>2012-12-14 14:19:35 -0800
commitb1dddf697338b4e139906bd20b5c2afc8c3b1e33 (patch)
tree685be562b6b44ed1db28d2c93b9afafb19a519e6
parent7b4eed936aa397c464e736c545f6b4e0795ac977 (diff)
parent5ab36c6437b7987beee0e375b77b5878f18be3ba (diff)
-rw-r--r--agent/src/share/classes/sun/jvm/hotspot/oops/ConstMethod.java14
-rw-r--r--agent/src/share/classes/sun/jvm/hotspot/oops/Method.java10
-rw-r--r--make/hotspot_version2
-rw-r--r--src/cpu/sparc/vm/cppInterpreter_sparc.cpp31
-rw-r--r--src/cpu/sparc/vm/methodHandles_sparc.cpp6
-rw-r--r--src/cpu/sparc/vm/templateInterpreter_sparc.cpp23
-rw-r--r--src/cpu/sparc/vm/templateTable_sparc.cpp3
-rw-r--r--src/cpu/x86/vm/cppInterpreter_x86.cpp27
-rw-r--r--src/cpu/x86/vm/methodHandles_x86.cpp6
-rw-r--r--src/cpu/x86/vm/templateInterpreter_x86_32.cpp21
-rw-r--r--src/cpu/x86/vm/templateInterpreter_x86_64.cpp27
-rw-r--r--src/share/vm/classfile/javaClasses.cpp6
-rw-r--r--src/share/vm/classfile/javaClasses.hpp8
-rw-r--r--src/share/vm/classfile/symbolTable.cpp4
-rw-r--r--src/share/vm/gc_implementation/g1/concurrentMark.cpp300
-rw-r--r--src/share/vm/gc_implementation/g1/concurrentMark.hpp51
-rw-r--r--src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp8
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/adjoiningVirtualSpaces.cpp1
-rw-r--r--src/share/vm/gc_implementation/shared/gcStats.cpp1
-rw-r--r--src/share/vm/oops/constMethod.hpp17
-rw-r--r--src/share/vm/oops/method.hpp17
-rw-r--r--src/share/vm/runtime/arguments.cpp22
-rw-r--r--src/share/vm/runtime/vmStructs.cpp4
23 files changed, 415 insertions, 194 deletions
diff --git a/agent/src/share/classes/sun/jvm/hotspot/oops/ConstMethod.java b/agent/src/share/classes/sun/jvm/hotspot/oops/ConstMethod.java
index 3e3082020..ad495d73b 100644
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/ConstMethod.java
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ConstMethod.java
@@ -69,6 +69,8 @@ public class ConstMethod extends VMObject {
signatureIndex = new CIntField(type.getCIntegerField("_signature_index"), 0);
idnum = new CIntField(type.getCIntegerField("_method_idnum"), 0);
maxStack = new CIntField(type.getCIntegerField("_max_stack"), 0);
+ maxLocals = new CIntField(type.getCIntegerField("_max_locals"), 0);
+ sizeOfParameters = new CIntField(type.getCIntegerField("_size_of_parameters"), 0);
// start of byte code
bytecodeOffset = type.getSize();
@@ -96,6 +98,8 @@ public class ConstMethod extends VMObject {
private static CIntField signatureIndex;
private static CIntField idnum;
private static CIntField maxStack;
+ private static CIntField maxLocals;
+ private static CIntField sizeOfParameters;
// start of bytecode
private static long bytecodeOffset;
@@ -151,6 +155,14 @@ public class ConstMethod extends VMObject {
return maxStack.getValue(this);
}
+ public long getMaxLocals() {
+ return maxLocals.getValue(this);
+ }
+
+ public long getSizeOfParameters() {
+ return sizeOfParameters.getValue(this);
+ }
+
public Symbol getName() {
return getMethod().getName();
}
@@ -247,6 +259,8 @@ public class ConstMethod extends VMObject {
visitor.doCInt(signatureIndex, true);
visitor.doCInt(codeSize, true);
visitor.doCInt(maxStack, true);
+ visitor.doCInt(maxLocals, true);
+ visitor.doCInt(sizeOfParameters, true);
}
// Accessors
diff --git a/agent/src/share/classes/sun/jvm/hotspot/oops/Method.java b/agent/src/share/classes/sun/jvm/hotspot/oops/Method.java
index a12d0b1e8..31dc39c54 100644
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/Method.java
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/Method.java
@@ -50,8 +50,6 @@ public class Method extends Metadata {
constMethod = type.getAddressField("_constMethod");
methodData = type.getAddressField("_method_data");
methodSize = new CIntField(type.getCIntegerField("_method_size"), 0);
- maxLocals = new CIntField(type.getCIntegerField("_max_locals"), 0);
- sizeOfParameters = new CIntField(type.getCIntegerField("_size_of_parameters"), 0);
accessFlags = new CIntField(type.getCIntegerField("_access_flags"), 0);
code = type.getAddressField("_code");
vtableIndex = new CIntField(type.getCIntegerField("_vtable_index"), 0);
@@ -83,8 +81,6 @@ public class Method extends Metadata {
private static AddressField constMethod;
private static AddressField methodData;
private static CIntField methodSize;
- private static CIntField maxLocals;
- private static CIntField sizeOfParameters;
private static CIntField accessFlags;
private static CIntField vtableIndex;
private static CIntField invocationCounter;
@@ -134,8 +130,8 @@ public class Method extends Metadata {
/** WARNING: this is in words, not useful in this system; use getObjectSize() instead */
public long getMethodSize() { return methodSize.getValue(this); }
public long getMaxStack() { return getConstMethod().getMaxStack(); }
- public long getMaxLocals() { return maxLocals.getValue(this); }
- public long getSizeOfParameters() { return sizeOfParameters.getValue(this); }
+ public long getMaxLocals() { return getConstMethod().getMaxLocals(); }
+ public long getSizeOfParameters() { return getConstMethod().getSizeOfParameters(); }
public long getNameIndex() { return getConstMethod().getNameIndex(); }
public long getSignatureIndex() { return getConstMethod().getSignatureIndex(); }
public long getGenericSignatureIndex() { return getConstMethod().getGenericSignatureIndex(); }
@@ -282,8 +278,6 @@ public class Method extends Metadata {
public void iterateFields(MetadataVisitor visitor) {
visitor.doCInt(methodSize, true);
- visitor.doCInt(maxLocals, true);
- visitor.doCInt(sizeOfParameters, true);
visitor.doCInt(accessFlags, true);
}
diff --git a/make/hotspot_version b/make/hotspot_version
index 4dc306b68..7883b9bfb 100644
--- a/make/hotspot_version
+++ b/make/hotspot_version
@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2012
HS_MAJOR_VER=25
HS_MINOR_VER=0
-HS_BUILD_NUMBER=12
+HS_BUILD_NUMBER=13
JDK_MAJOR_VER=1
JDK_MINOR_VER=8
diff --git a/src/cpu/sparc/vm/cppInterpreter_sparc.cpp b/src/cpu/sparc/vm/cppInterpreter_sparc.cpp
index eacb4d3fd..7031597b6 100644
--- a/src/cpu/sparc/vm/cppInterpreter_sparc.cpp
+++ b/src/cpu/sparc/vm/cppInterpreter_sparc.cpp
@@ -582,7 +582,9 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// the following temporary registers are used during frame creation
const Register Gtmp1 = G3_scratch ;
const Register Gtmp2 = G1_scratch;
- const Address size_of_parameters(G5_method, 0, in_bytes(Method::size_of_parameters_offset()));
+ const Register RconstMethod = Gtmp1;
+ const Address constMethod(G5_method, 0, in_bytes(Method::const_offset()));
+ const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
bool inc_counter = UseCompiler || CountCompiledCalls;
@@ -618,6 +620,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
}
#endif // ASSERT
+ __ ld_ptr(constMethod, RconstMethod);
__ lduh(size_of_parameters, Gtmp1);
__ sll(Gtmp1, LogBytesPerWord, Gtmp2); // parameter size in bytes
__ add(Gargs, Gtmp2, Gargs); // points to first local + BytesPerWord
@@ -1047,8 +1050,6 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
const Register Gtmp = G3_scratch;
const Address constMethod (G5_method, 0, in_bytes(Method::const_offset()));
const Address access_flags (G5_method, 0, in_bytes(Method::access_flags_offset()));
- const Address size_of_parameters(G5_method, 0, in_bytes(Method::size_of_parameters_offset()));
- const Address size_of_locals (G5_method, 0, in_bytes(Method::size_of_locals_offset()));
// slop factor is two extra slots on the expression stack so that
// we always have room to store a result when returning from a call without parameters
@@ -1066,6 +1067,9 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
// Now compute new frame size
if (native) {
+ const Register RconstMethod = Gtmp;
+ const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
+ __ ld_ptr(constMethod, RconstMethod);
__ lduh( size_of_parameters, Gtmp );
__ calc_mem_param_words(Gtmp, Gtmp); // space for native call parameters passed on the stack in words
} else {
@@ -1236,9 +1240,13 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
}
if (init_value != noreg) {
Label clear_loop;
+ const Register RconstMethod = O1;
+ const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
+ const Address size_of_locals (RconstMethod, 0, in_bytes(ConstMethod::size_of_locals_offset()));
// NOTE: If you change the frame layout, this code will need to
// be updated!
+ __ ld_ptr( constMethod, RconstMethod );
__ lduh( size_of_locals, O2 );
__ lduh( size_of_parameters, O1 );
__ sll( O2, LogBytesPerWord, O2);
@@ -1483,13 +1491,16 @@ void CppInterpreterGenerator::adjust_callers_stack(Register args) {
//
// assert_different_registers(state, prev_state);
const Register Gtmp = G3_scratch;
+ const RconstMethod = G3_scratch;
const Register tmp = O2;
- const Address size_of_parameters(G5_method, 0, in_bytes(Method::size_of_parameters_offset()));
- const Address size_of_locals (G5_method, 0, in_bytes(Method::size_of_locals_offset()));
+ const Address constMethod(G5_method, 0, in_bytes(Method::const_offset()));
+ const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
+ const Address size_of_locals (RconstMethod, 0, in_bytes(ConstMethod::size_of_locals_offset()));
+ __ ld_ptr(constMethod, RconstMethod);
__ lduh(size_of_parameters, tmp);
- __ sll(tmp, LogBytesPerWord, Gtmp); // parameter size in bytes
- __ add(args, Gtmp, Gargs); // points to first local + BytesPerWord
+ __ sll(tmp, LogBytesPerWord, Gargs); // parameter size in bytes
+ __ add(args, Gargs, Gargs); // points to first local + BytesPerWord
// NEW
__ add(Gargs, -wordSize, Gargs); // points to first local[0]
// determine extra space for non-argument locals & adjust caller's SP
@@ -1541,8 +1552,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
const Address constMethod (G5_method, 0, in_bytes(Method::const_offset()));
const Address access_flags (G5_method, 0, in_bytes(Method::access_flags_offset()));
- const Address size_of_parameters(G5_method, 0, in_bytes(Method::size_of_parameters_offset()));
- const Address size_of_locals (G5_method, 0, in_bytes(Method::size_of_locals_offset()));
address entry_point = __ pc();
__ mov(G0, prevState); // no current activation
@@ -1750,7 +1759,9 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
__ ld_ptr(STATE(_result._to_call._callee), L4_scratch); // called method
__ ld_ptr(STATE(_stack), L1_scratch); // get top of java expr stack
- __ lduh(L4_scratch, in_bytes(Method::size_of_parameters_offset()), L2_scratch); // get parameter size
+ // get parameter size
+ __ ld_ptr(L4_scratch, in_bytes(Method::const_offset()), L2_scratch);
+ __ lduh(L2_scratch, in_bytes(ConstMethod::size_of_parameters_offset()), L2_scratch);
__ sll(L2_scratch, LogBytesPerWord, L2_scratch ); // parameter size in bytes
__ add(L1_scratch, L2_scratch, L1_scratch); // stack destination for result
__ ld(L4_scratch, in_bytes(Method::result_index_offset()), L3_scratch); // called method result type index
diff --git a/src/cpu/sparc/vm/methodHandles_sparc.cpp b/src/cpu/sparc/vm/methodHandles_sparc.cpp
index f4637bf76..546cfaf08 100644
--- a/src/cpu/sparc/vm/methodHandles_sparc.cpp
+++ b/src/cpu/sparc/vm/methodHandles_sparc.cpp
@@ -171,7 +171,8 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
if (VerifyMethodHandles && !for_compiler_entry) {
// make sure recv is already on stack
- __ load_sized_value(Address(method_temp, Method::size_of_parameters_offset()),
+ __ ld_ptr(method_temp, in_bytes(Method::const_offset()), temp2);
+ __ load_sized_value(Address(temp2, ConstMethod::size_of_parameters_offset()),
temp2,
sizeof(u2), /*is_signed*/ false);
// assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
@@ -233,7 +234,8 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid);
assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic");
if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) {
- __ load_sized_value(Address(G5_method, Method::size_of_parameters_offset()),
+ __ ld_ptr(G5_method, in_bytes(Method::const_offset()), O4_param_size);
+ __ load_sized_value(Address(O4_param_size, ConstMethod::size_of_parameters_offset()),
O4_param_size,
sizeof(u2), /*is_signed*/ false);
// assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
diff --git a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp
index 2de866adb..bd5085610 100644
--- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp
+++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp
@@ -494,9 +494,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
// (gri - 2/25/2000)
- const Address size_of_parameters(G5_method, Method::size_of_parameters_offset());
- const Address size_of_locals (G5_method, Method::size_of_locals_offset());
- const Address constMethod (G5_method, Method::const_offset());
int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong );
const int extra_space =
@@ -506,11 +503,15 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
(native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0);
const Register Glocals_size = G3;
+ const Register RconstMethod = Glocals_size;
const Register Otmp1 = O3;
const Register Otmp2 = O4;
// Lscratch can't be used as a temporary because the call_stub uses
// it to assert that the stack frame was setup correctly.
+ const Address constMethod (G5_method, Method::const_offset());
+ const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
+ __ ld_ptr( constMethod, RconstMethod );
__ lduh( size_of_parameters, Glocals_size);
// Gargs points to first local + BytesPerWord
@@ -530,6 +531,8 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
//
// Compute number of locals in method apart from incoming parameters
//
+ const Address size_of_locals (Otmp1, ConstMethod::size_of_locals_offset());
+ __ ld_ptr( constMethod, Otmp1 );
__ lduh( size_of_locals, Otmp1 );
__ sub( Otmp1, Glocals_size, Glocals_size );
__ round_to( Glocals_size, WordsPerLong );
@@ -1256,8 +1259,7 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// make sure registers are different!
assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
- const Address size_of_parameters(G5_method, Method::size_of_parameters_offset());
- const Address size_of_locals (G5_method, Method::size_of_locals_offset());
+ const Address constMethod (G5_method, Method::const_offset());
// Seems like G5_method is live at the point this is used. So we could make this look consistent
// and use in the asserts.
const Address access_flags (Lmethod, Method::access_flags_offset());
@@ -1307,8 +1309,13 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
init_value = G0;
Label clear_loop;
+ const Register RconstMethod = O1;
+ const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
+ const Address size_of_locals (RconstMethod, ConstMethod::size_of_locals_offset());
+
// NOTE: If you change the frame layout, this code will need to
// be updated!
+ __ ld_ptr( constMethod, RconstMethod );
__ lduh( size_of_locals, O2 );
__ lduh( size_of_parameters, O1 );
__ sll( O2, Interpreter::logStackElementSize, O2);
@@ -1823,9 +1830,13 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
const Register Gtmp1 = G3_scratch;
const Register Gtmp2 = G1_scratch;
+ const Register RconstMethod = Gtmp1;
+ const Address constMethod(Lmethod, Method::const_offset());
+ const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
// Compute size of arguments for saving when returning to deoptimized caller
- __ lduh(Lmethod, in_bytes(Method::size_of_parameters_offset()), Gtmp1);
+ __ ld_ptr(constMethod, RconstMethod);
+ __ lduh(size_of_parameters, Gtmp1);
__ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1);
__ sub(Llocals, Gtmp1, Gtmp2);
__ add(Gtmp2, wordSize, Gtmp2);
diff --git a/src/cpu/sparc/vm/templateTable_sparc.cpp b/src/cpu/sparc/vm/templateTable_sparc.cpp
index 5b1887dbb..931342ded 100644
--- a/src/cpu/sparc/vm/templateTable_sparc.cpp
+++ b/src/cpu/sparc/vm/templateTable_sparc.cpp
@@ -3040,7 +3040,8 @@ void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) {
Register Rtemp = G4_scratch;
// Load receiver from stack slot
- __ lduh(G5_method, in_bytes(Method::size_of_parameters_offset()), G4_scratch);
+ __ ld_ptr(G5_method, in_bytes(Method::const_offset()), G4_scratch);
+ __ lduh(G4_scratch, in_bytes(ConstMethod::size_of_parameters_offset()), G4_scratch);
__ load_receiver(G4_scratch, O0);
// receiver NULL check
diff --git a/src/cpu/x86/vm/cppInterpreter_x86.cpp b/src/cpu/x86/vm/cppInterpreter_x86.cpp
index 946c400e5..9c3a2f31a 100644
--- a/src/cpu/x86/vm/cppInterpreter_x86.cpp
+++ b/src/cpu/x86/vm/cppInterpreter_x86.cpp
@@ -611,8 +611,6 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
// C++ interpreter only
// rsi/r13 - previous interpreter state pointer
- const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
-
// InterpreterRuntime::frequency_counter_overflow takes one argument
// indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
// The call returns the address of the verified entry point for the method or NULL
@@ -977,15 +975,16 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// to save/restore.
address entry_point = __ pc();
- const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
- const Address size_of_locals (rbx, Method::size_of_locals_offset());
+ const Address constMethod (rbx, Method::const_offset());
const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset());
const Address access_flags (rbx, Method::access_flags_offset());
+ const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset());
// rsi/r13 == state/locals rdi == prevstate
const Register locals = rdi;
// get parameter size (always needed)
+ __ movptr(rcx, constMethod);
__ load_unsigned_short(rcx, size_of_parameters);
// rbx: Method*
@@ -994,6 +993,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// for natives the size of locals is zero
// compute beginning of parameters /locals
+
__ lea(locals, Address(rsp, rcx, Address::times_ptr, -wordSize));
// initialize fixed part of activation frame
@@ -1107,11 +1107,14 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
const Register method = rbx;
const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rdi);
const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); // rcx|rscratch1
+ const Address constMethod (method, Method::const_offset());
+ const Address size_of_parameters(t, ConstMethod::size_of_parameters_offset());
// allocate space for parameters
__ movptr(method, STATE(_method));
__ verify_method_ptr(method);
- __ load_unsigned_short(t, Address(method, Method::size_of_parameters_offset()));
+ __ movptr(t, constMethod);
+ __ load_unsigned_short(t, size_of_parameters);
__ shll(t, 2);
#ifdef _LP64
__ subptr(rsp, t);
@@ -1700,15 +1703,17 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// save sender sp
__ push(rcx);
- const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
- const Address size_of_locals (rbx, Method::size_of_locals_offset());
+ const Address constMethod (rbx, Method::const_offset());
const Address access_flags (rbx, Method::access_flags_offset());
+ const Address size_of_parameters(rdx, ConstMethod::size_of_parameters_offset());
+ const Address size_of_locals (rdx, ConstMethod::size_of_locals_offset());
// const Address monitor_block_top (rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
// const Address monitor_block_bot (rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
// const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock));
// get parameter size (always needed)
+ __ movptr(rdx, constMethod);
__ load_unsigned_short(rcx, size_of_parameters);
// rbx: Method*
@@ -1989,7 +1994,9 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
__ movptr(rbx, STATE(_result._to_call._callee));
// callee left args on top of expression stack, remove them
- __ load_unsigned_short(rcx, Address(rbx, Method::size_of_parameters_offset()));
+ __ movptr(rcx, constMethod);
+ __ load_unsigned_short(rcx, Address(rcx, ConstMethod::size_of_parameters_offset()));
+
__ lea(rsp, Address(rsp, rcx, Address::times_ptr));
__ movl(rcx, Address(rbx, Method::result_index_offset()));
@@ -2159,7 +2166,9 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// Make it look like call_stub calling conventions
// Get (potential) receiver
- __ load_unsigned_short(rcx, size_of_parameters); // get size of parameters in words
+ // get size of parameters in words
+ __ movptr(rcx, constMethod);
+ __ load_unsigned_short(rcx, Address(rcx, ConstMethod::size_of_parameters_offset()));
ExternalAddress recursive(CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation));
__ pushptr(recursive.addr()); // make it look good in the debugger
diff --git a/src/cpu/x86/vm/methodHandles_x86.cpp b/src/cpu/x86/vm/methodHandles_x86.cpp
index 7da3a2c42..95d5123f4 100644
--- a/src/cpu/x86/vm/methodHandles_x86.cpp
+++ b/src/cpu/x86/vm/methodHandles_x86.cpp
@@ -169,8 +169,9 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
if (VerifyMethodHandles && !for_compiler_entry) {
// make sure recv is already on stack
+ __ movptr(temp2, Address(method_temp, Method::const_offset()));
__ load_sized_value(temp2,
- Address(method_temp, Method::size_of_parameters_offset()),
+ Address(temp2, ConstMethod::size_of_parameters_offset()),
sizeof(u2), /*is_signed*/ false);
// assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
Label L;
@@ -234,8 +235,9 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid);
assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic");
if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) {
+ __ movptr(rdx_argp, Address(rbx_method, Method::const_offset()));
__ load_sized_value(rdx_argp,
- Address(rbx_method, Method::size_of_parameters_offset()),
+ Address(rdx_argp, ConstMethod::size_of_parameters_offset()),
sizeof(u2), /*is_signed*/ false);
// assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
rdx_first_arg_addr = __ argument_address(rdx_argp, -1);
diff --git a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp
index 19a2a45c9..aabc3dbba 100644
--- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp
+++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp
@@ -424,8 +424,6 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
// C++ interpreter only
// rsi - previous interpreter state pointer
- const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
-
// InterpreterRuntime::frequency_counter_overflow takes one argument
// indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
// The call returns the address of the verified entry point for the method or NULL
@@ -868,12 +866,13 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// rsi: previous interpreter state (C++ interpreter) must preserve
address entry_point = __ pc();
-
- const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
+ const Address constMethod (rbx, Method::const_offset());
const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset());
const Address access_flags (rbx, Method::access_flags_offset());
+ const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset());
// get parameter size (always needed)
+ __ movptr(rcx, constMethod);
__ load_unsigned_short(rcx, size_of_parameters);
// native calls don't need the stack size check since they have no expression stack
@@ -988,7 +987,9 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// allocate space for parameters
__ get_method(method);
- __ load_unsigned_short(t, Address(method, Method::size_of_parameters_offset()));
+ __ movptr(t, Address(method, Method::const_offset()));
+ __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
+
__ shlptr(t, Interpreter::logStackElementSize);
__ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror
__ subptr(rsp, t);
@@ -1297,13 +1298,14 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// rsi: sender sp
address entry_point = __ pc();
-
- const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
- const Address size_of_locals (rbx, Method::size_of_locals_offset());
+ const Address constMethod (rbx, Method::const_offset());
const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset());
const Address access_flags (rbx, Method::access_flags_offset());
+ const Address size_of_parameters(rdx, ConstMethod::size_of_parameters_offset());
+ const Address size_of_locals (rdx, ConstMethod::size_of_locals_offset());
// get parameter size (always needed)
+ __ movptr(rdx, constMethod);
__ load_unsigned_short(rcx, size_of_parameters);
// rbx,: Method*
@@ -1734,7 +1736,8 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// Compute size of arguments for saving when returning to deoptimized caller
__ get_method(rax);
- __ load_unsigned_short(rax, Address(rax, in_bytes(Method::size_of_parameters_offset())));
+ __ movptr(rax, Address(rax, Method::const_offset()));
+ __ load_unsigned_short(rax, Address(rax, ConstMethod::size_of_parameters_offset()));
__ shlptr(rax, Interpreter::logStackElementSize);
__ restore_locals();
__ subptr(rdi, rax);
diff --git a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp
index e2b46fc78..3e3cc0fc1 100644
--- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp
+++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp
@@ -369,9 +369,6 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
// Everything as it was on entry
// rdx is not restored. Doesn't appear to really be set.
- const Address size_of_parameters(rbx,
- Method::size_of_parameters_offset());
-
// InterpreterRuntime::frequency_counter_overflow takes two
// arguments, the first (thread) is passed by call_VM, the second
// indicates if the counter overflow occurs at a backwards branch
@@ -844,14 +841,17 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
address entry_point = __ pc();
- const Address size_of_parameters(rbx, Method::
- size_of_parameters_offset());
+ const Address constMethod (rbx, Method::const_offset());
const Address invocation_counter(rbx, Method::
invocation_counter_offset() +
InvocationCounter::counter_offset());
const Address access_flags (rbx, Method::access_flags_offset());
+ const Address size_of_parameters(rcx, ConstMethod::
+ size_of_parameters_offset());
+
// get parameter size (always needed)
+ __ movptr(rcx, constMethod);
__ load_unsigned_short(rcx, size_of_parameters);
// native calls don't need the stack size check since they have no
@@ -967,9 +967,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// allocate space for parameters
__ get_method(method);
- __ load_unsigned_short(t,
- Address(method,
- Method::size_of_parameters_offset()));
+ __ movptr(t, Address(method, Method::const_offset()));
+ __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
__ shll(t, Interpreter::logStackElementSize);
__ subptr(rsp, t);
@@ -1302,15 +1301,18 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// r13: sender sp
address entry_point = __ pc();
- const Address size_of_parameters(rbx,
- Method::size_of_parameters_offset());
- const Address size_of_locals(rbx, Method::size_of_locals_offset());
+ const Address constMethod(rbx, Method::const_offset());
const Address invocation_counter(rbx,
Method::invocation_counter_offset() +
InvocationCounter::counter_offset());
const Address access_flags(rbx, Method::access_flags_offset());
+ const Address size_of_parameters(rdx,
+ ConstMethod::size_of_parameters_offset());
+ const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset());
+
// get parameter size (always needed)
+ __ movptr(rdx, constMethod);
__ load_unsigned_short(rcx, size_of_parameters);
// rbx: Method*
@@ -1752,7 +1754,8 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// Compute size of arguments for saving when returning to
// deoptimized caller
__ get_method(rax);
- __ load_unsigned_short(rax, Address(rax, in_bytes(Method::
+ __ movptr(rax, Address(rax, Method::const_offset()));
+ __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod::
size_of_parameters_offset())));
__ shll(rax, Interpreter::logStackElementSize);
__ restore_locals(); // XXX do we need this?
diff --git a/src/share/vm/classfile/javaClasses.cpp b/src/share/vm/classfile/javaClasses.cpp
index 80b15fbd2..514e15b3f 100644
--- a/src/share/vm/classfile/javaClasses.cpp
+++ b/src/share/vm/classfile/javaClasses.cpp
@@ -327,14 +327,14 @@ jchar* java_lang_String::as_unicode_string(oop java_string, int& length) {
return result;
}
-unsigned int java_lang_String::to_hash(oop java_string) {
+unsigned int java_lang_String::hash_code(oop java_string) {
int length = java_lang_String::length(java_string);
- // Zero length string will hash to zero with String.toHash() function.
+ // Zero length string will hash to zero with String.hashCode() function.
if (length == 0) return 0;
typeArrayOop value = java_lang_String::value(java_string);
int offset = java_lang_String::offset(java_string);
- return java_lang_String::to_hash(value->char_at_addr(offset), length);
+ return java_lang_String::hash_code(value->char_at_addr(offset), length);
}
char* java_lang_String::as_quoted_ascii(oop java_string) {
diff --git a/src/share/vm/classfile/javaClasses.hpp b/src/share/vm/classfile/javaClasses.hpp
index 9359d5b3c..a60872b36 100644
--- a/src/share/vm/classfile/javaClasses.hpp
+++ b/src/share/vm/classfile/javaClasses.hpp
@@ -166,8 +166,8 @@ class java_lang_String : AllStatic {
// objects in the shared archive file.
// hash P(31) from Kernighan & Ritchie
//
- // For this reason, THIS ALGORITHM MUST MATCH String.toHash().
- template <typename T> static unsigned int to_hash(T* s, int len) {
+ // For this reason, THIS ALGORITHM MUST MATCH String.hashCode().
+ template <typename T> static unsigned int hash_code(T* s, int len) {
unsigned int h = 0;
while (len-- > 0) {
h = 31*h + (unsigned int) *s;
@@ -175,10 +175,10 @@ class java_lang_String : AllStatic {
}
return h;
}
- static unsigned int to_hash(oop java_string);
+ static unsigned int hash_code(oop java_string);
// This is the string hash code used by the StringTable, which may be
- // the same as String.toHash or an alternate hash code.
+ // the same as String.hashCode or an alternate hash code.
static unsigned int hash_string(oop java_string);
static bool equals(oop java_string, jchar* chars, int len);
diff --git a/src/share/vm/classfile/symbolTable.cpp b/src/share/vm/classfile/symbolTable.cpp
index 3c9366205..87de232d7 100644
--- a/src/share/vm/classfile/symbolTable.cpp
+++ b/src/share/vm/classfile/symbolTable.cpp
@@ -179,7 +179,7 @@ Symbol* SymbolTable::lookup(int index, const char* name,
unsigned int SymbolTable::hash_symbol(const char* s, int len) {
return use_alternate_hashcode() ?
AltHashing::murmur3_32(seed(), (const jbyte*)s, len) :
- java_lang_String::to_hash(s, len);
+ java_lang_String::hash_code(s, len);
}
@@ -617,7 +617,7 @@ bool StringTable::_needs_rehashing = false;
// Pick hashing algorithm
unsigned int StringTable::hash_string(const jchar* s, int len) {
return use_alternate_hashcode() ? AltHashing::murmur3_32(seed(), s, len) :
- java_lang_String::to_hash(s, len);
+ java_lang_String::hash_code(s, len);
}
oop StringTable::lookup(int index, jchar* name,
diff --git a/src/share/vm/gc_implementation/g1/concurrentMark.cpp b/src/share/vm/gc_implementation/g1/concurrentMark.cpp
index d3dc1eb7b..e03b2f41c 100644
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp
@@ -46,27 +46,11 @@
// Concurrent marking bit map wrapper
-CMBitMapRO::CMBitMapRO(ReservedSpace rs, int shifter) :
- _bm((uintptr_t*)NULL,0),
+CMBitMapRO::CMBitMapRO(int shifter) :
+ _bm(),
_shifter(shifter) {
- _bmStartWord = (HeapWord*)(rs.base());
- _bmWordSize = rs.size()/HeapWordSize; // rs.size() is in bytes
- ReservedSpace brs(ReservedSpace::allocation_align_size_up(
- (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
-
- MemTracker::record_virtual_memory_type((address)brs.base(), mtGC);
-
- guarantee(brs.is_reserved(), "couldn't allocate concurrent marking bit map");
- // For now we'll just commit all of the bit map up fromt.
- // Later on we'll try to be more parsimonious with swap.
- guarantee(_virtual_space.initialize(brs, brs.size()),
- "couldn't reseve backing store for concurrent marking bit map");
- assert(_virtual_space.committed_size() == brs.size(),
- "didn't reserve backing store for all of concurrent marking bit map?");
- _bm.set_map((uintptr_t*)_virtual_space.low());
- assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
- _bmWordSize, "inconsistency in bit map sizing");
- _bm.set_size(_bmWordSize >> _shifter);
+ _bmStartWord = 0;
+ _bmWordSize = 0;
}
HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
@@ -108,15 +92,40 @@ int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
}
#ifndef PRODUCT
-bool CMBitMapRO::covers(ReservedSpace rs) const {
+bool CMBitMapRO::covers(ReservedSpace heap_rs) const {
// assert(_bm.map() == _virtual_space.low(), "map inconsistency");
assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
"size inconsistency");
- return _bmStartWord == (HeapWord*)(rs.base()) &&
- _bmWordSize == rs.size()>>LogHeapWordSize;
+ return _bmStartWord == (HeapWord*)(heap_rs.base()) &&
+ _bmWordSize == heap_rs.size()>>LogHeapWordSize;
}
#endif
+bool CMBitMap::allocate(ReservedSpace heap_rs) {
+ _bmStartWord = (HeapWord*)(heap_rs.base());
+ _bmWordSize = heap_rs.size()/HeapWordSize; // heap_rs.size() is in bytes
+ ReservedSpace brs(ReservedSpace::allocation_align_size_up(
+ (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
+ if (!brs.is_reserved()) {
+ warning("ConcurrentMark marking bit map allocation failure");
+ return false;
+ }
+ MemTracker::record_virtual_memory_type((address)brs.base(), mtGC);
+ // For now we'll just commit all of the bit map up front.
+ // Later on we'll try to be more parsimonious with swap.
+ if (!_virtual_space.initialize(brs, brs.size())) {
+ warning("ConcurrentMark marking bit map backing store failure");
+ return false;
+ }
+ assert(_virtual_space.committed_size() == brs.size(),
+ "didn't reserve backing store for all of concurrent marking bit map?");
+ _bm.set_map((uintptr_t*)_virtual_space.low());
+ assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
+ _bmWordSize, "inconsistency in bit map sizing");
+ _bm.set_size(_bmWordSize >> _shifter);
+ return true;
+}
+
void CMBitMap::clearAll() {
_bm.clear();
return;
@@ -163,20 +172,79 @@ CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
#endif
{}
-void CMMarkStack::allocate(size_t size) {
- _base = NEW_C_HEAP_ARRAY(oop, size, mtGC);
- if (_base == NULL) {
- vm_exit_during_initialization("Failed to allocate CM region mark stack");
+bool CMMarkStack::allocate(size_t capacity) {
+ // allocate a stack of the requisite depth
+ ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
+ if (!rs.is_reserved()) {
+ warning("ConcurrentMark MarkStack allocation failure");
+ return false;
}
- _index = 0;
- _capacity = (jint) size;
+ MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
+ if (!_virtual_space.initialize(rs, rs.size())) {
+ warning("ConcurrentMark MarkStack backing store failure");
+ // Release the virtual memory reserved for the marking stack
+ rs.release();
+ return false;
+ }
+ assert(_virtual_space.committed_size() == rs.size(),
+ "Didn't reserve backing store for all of ConcurrentMark stack?");
+ _base = (oop*) _virtual_space.low();
+ setEmpty();
+ _capacity = (jint) capacity;
_saved_index = -1;
NOT_PRODUCT(_max_depth = 0);
+ return true;
+}
+
+void CMMarkStack::expand() {
+ // Called, during remark, if we've overflown the marking stack during marking.
+ assert(isEmpty(), "stack should been emptied while handling overflow");
+ assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
+ // Clear expansion flag
+ _should_expand = false;
+ if (_capacity == (jint) MarkStackSizeMax) {
+ if (PrintGCDetails && Verbose) {
+ gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit");
+ }
+ return;
+ }
+ // Double capacity if possible
+ jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
+ // Do not give up existing stack until we have managed to
+ // get the double capacity that we desired.
+ ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
+ sizeof(oop)));
+ if (rs.is_reserved()) {
+ // Release the backing store associated with old stack
+ _virtual_space.release();
+ // Reinitialize virtual space for new stack
+ if (!_virtual_space.initialize(rs, rs.size())) {
+ fatal("Not enough swap for expanded marking stack capacity");
+ }
+ _base = (oop*)(_virtual_space.low());
+ _index = 0;
+ _capacity = new_capacity;
+ } else {
+ if (PrintGCDetails && Verbose) {
+ // Failed to double capacity, continue;
+ gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
+ SIZE_FORMAT"K to " SIZE_FORMAT"K",
+ _capacity / K, new_capacity / K);
+ }
+ }
+}
+
+void CMMarkStack::set_should_expand() {
+ // If we're resetting the marking state because of an
+ // marking stack overflow, record that we should, if
+ // possible, expand the stack.
+ _should_expand = _cm->has_overflown();
}
CMMarkStack::~CMMarkStack() {
if (_base != NULL) {
- FREE_C_HEAP_ARRAY(oop, _base, mtGC);
+ _base = NULL;
+ _virtual_space.release();
}
}
@@ -217,7 +285,7 @@ void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) {
jint res = Atomic::cmpxchg(next_index, &_index, index);
if (res == index) {
for (int i = 0; i < n; i++) {
- int ind = index + i;
+ int ind = index + i;
assert(ind < _capacity, "By overflow test above.");
_base[ind] = ptr_arr[i];
}
@@ -228,7 +296,6 @@ void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) {
}
}
-
void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
jint start = _index;
@@ -244,9 +311,9 @@ void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
assert(ind < _capacity, "By overflow test above.");
_base[ind] = ptr_arr[i];
}
+ NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
}
-
bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
jint index = _index;
@@ -255,7 +322,7 @@ bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
return false;
} else {
int k = MIN2(max, index);
- jint new_ind = index - k;
+ jint new_ind = index - k;
for (int j = 0; j < k; j++) {
ptr_arr[j] = _base[new_ind + j];
}
@@ -404,9 +471,10 @@ uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
return MAX2((n_par_threads + 2) / 4, 1U);
}
-ConcurrentMark::ConcurrentMark(ReservedSpace rs, uint max_regions) :
- _markBitMap1(rs, MinObjAlignment - 1),
- _markBitMap2(rs, MinObjAlignment - 1),
+ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
+ _g1h(g1h),
+ _markBitMap1(MinObjAlignment - 1),
+ _markBitMap2(MinObjAlignment - 1),
_parallel_marking_threads(0),
_max_parallel_marking_threads(0),
@@ -415,10 +483,10 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs, uint max_regions) :
_cleanup_sleep_factor(0.0),
_cleanup_task_overhead(1.0),
_cleanup_list("Cleanup List"),
- _region_bm((BitMap::idx_t) max_regions, false /* in_resource_area*/),
- _card_bm((rs.size() + CardTableModRefBS::card_size - 1) >>
- CardTableModRefBS::card_shift,
- false /* in_resource_area*/),
+ _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
+ _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >>
+ CardTableModRefBS::card_shift,
+ false /* in_resource_area*/),
_prevMarkBitMap(&_markBitMap1),
_nextMarkBitMap(&_markBitMap2),
@@ -449,7 +517,8 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs, uint max_regions) :
_parallel_workers(NULL),
_count_card_bitmaps(NULL),
- _count_marked_bytes(NULL) {
+ _count_marked_bytes(NULL),
+ _completed_initialization(false) {
CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
if (verbose_level < no_verbose) {
verbose_level = no_verbose;
@@ -464,61 +533,34 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs, uint max_regions) :
"heap end = "PTR_FORMAT, _heap_start, _heap_end);
}
- _markStack.allocate(MarkStackSize);
+ if (!_markBitMap1.allocate(heap_rs)) {
+ warning("Failed to allocate first CM bit map");
+ return;
+ }
+ if (!_markBitMap2.allocate(heap_rs)) {
+ warning("Failed to allocate second CM bit map");
+ return;
+ }
// Create & start a ConcurrentMark thread.
_cmThread = new ConcurrentMarkThread(this);
assert(cmThread() != NULL, "CM Thread should have been created");
assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
- _g1h = G1CollectedHeap::heap();
assert(CGC_lock != NULL, "Where's the CGC_lock?");
- assert(_markBitMap1.covers(rs), "_markBitMap1 inconsistency");
- assert(_markBitMap2.covers(rs), "_markBitMap2 inconsistency");
+ assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency");
+ assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency");
SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
satb_qs.set_buffer_size(G1SATBBufferSize);
_root_regions.init(_g1h, this);
- _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
- _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
-
- _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC);
- _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
-
- BitMap::idx_t card_bm_size = _card_bm.size();
-
- // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
- _active_tasks = _max_worker_id;
- for (uint i = 0; i < _max_worker_id; ++i) {
- CMTaskQueue* task_queue = new CMTaskQueue();
- task_queue->initialize();
- _task_queues->register_queue(i, task_queue);
-
- _count_card_bitmaps[i] = BitMap(card_bm_size, false);
- _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, (size_t) max_regions, mtGC);
-
- _tasks[i] = new CMTask(i, this,
- _count_marked_bytes[i],
- &_count_card_bitmaps[i],
- task_queue, _task_queues);
-
- _accum_task_vtime[i] = 0.0;
- }
-
- // Calculate the card number for the bottom of the heap. Used
- // in biasing indexes into the accounting card bitmaps.
- _heap_bottom_card_num =
- intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
- CardTableModRefBS::card_shift);
-
- // Clear all the liveness counting data
- clear_all_count_data();
-
if (ConcGCThreads > ParallelGCThreads) {
- vm_exit_during_initialization("Can't have more ConcGCThreads "
- "than ParallelGCThreads.");
+ warning("Can't have more ConcGCThreads (" UINT32_FORMAT ") "
+ "than ParallelGCThreads (" UINT32_FORMAT ").",
+ ConcGCThreads, ParallelGCThreads);
+ return;
}
if (ParallelGCThreads == 0) {
// if we are not running with any parallel GC threads we will not
@@ -590,9 +632,86 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs, uint max_regions) :
}
}
+ if (FLAG_IS_DEFAULT(MarkStackSize)) {
+ uintx mark_stack_size =
+ MIN2(MarkStackSizeMax,
+ MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE)));
+ // Verify that the calculated value for MarkStackSize is in range.
+ // It would be nice to use the private utility routine from Arguments.
+ if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
+ warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): "
+ "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
+ mark_stack_size, 1, MarkStackSizeMax);
+ return;
+ }
+ FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size);
+ } else {
+ // Verify MarkStackSize is in range.
+ if (FLAG_IS_CMDLINE(MarkStackSize)) {
+ if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
+ if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
+ warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): "
+ "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
+ MarkStackSize, 1, MarkStackSizeMax);
+ return;
+ }
+ } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
+ if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
+ warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")"
+ " or for MarkStackSizeMax (" UINTX_FORMAT ")",
+ MarkStackSize, MarkStackSizeMax);
+ return;
+ }
+ }
+ }
+ }
+
+ if (!_markStack.allocate(MarkStackSize)) {
+ warning("Failed to allocate CM marking stack");
+ return;
+ }
+
+ _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
+ _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
+
+ _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC);
+ _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
+
+ BitMap::idx_t card_bm_size = _card_bm.size();
+
+ // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
+ _active_tasks = _max_worker_id;
+
+ size_t max_regions = (size_t) _g1h->max_regions();
+ for (uint i = 0; i < _max_worker_id; ++i) {
+ CMTaskQueue* task_queue = new CMTaskQueue();
+ task_queue->initialize();
+ _task_queues->register_queue(i, task_queue);
+
+ _count_card_bitmaps[i] = BitMap(card_bm_size, false);
+ _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC);
+
+ _tasks[i] = new CMTask(i, this,
+ _count_marked_bytes[i],
+ &_count_card_bitmaps[i],
+ task_queue, _task_queues);
+
+ _accum_task_vtime[i] = 0.0;
+ }
+
+ // Calculate the card number for the bottom of the heap. Used
+ // in biasing indexes into the accounting card bitmaps.
+ _heap_bottom_card_num =
+ intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
+ CardTableModRefBS::card_shift);
+
+ // Clear all the liveness counting data
+ clear_all_count_data();
+
// so that the call below can read a sensible value
- _heap_start = (HeapWord*) rs.base();
+ _heap_start = (HeapWord*) heap_rs.base();
set_non_marking_state();
+ _completed_initialization = true;
}
void ConcurrentMark::update_g1_committed(bool force) {
@@ -1165,6 +1284,11 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
assert(!restart_for_overflow(), "sanity");
}
+ // Expand the marking stack, if we have to and if we can.
+ if (_markStack.should_expand()) {
+ _markStack.expand();
+ }
+
// Reset the marking state if marking completed
if (!restart_for_overflow()) {
set_non_marking_state();
@@ -2785,7 +2909,7 @@ void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
// Verify entries on the task queues
for (uint i = 0; i < _max_worker_id; i += 1) {
cl.set_phase(VerifyNoCSetOopsQueues, i);
- OopTaskQueue* queue = _task_queues->queue(i);
+ CMTaskQueue* queue = _task_queues->queue(i);
queue->oops_do(&cl);
}
}
@@ -2840,8 +2964,8 @@ void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
#endif // PRODUCT
void ConcurrentMark::clear_marking_state(bool clear_overflow) {
- _markStack.setEmpty();
- _markStack.clear_overflow();
+ _markStack.set_should_expand();
+ _markStack.setEmpty(); // Also clears the _markStack overflow flag
if (clear_overflow) {
clear_has_overflown();
} else {
@@ -2850,7 +2974,7 @@ void ConcurrentMark::clear_marking_state(bool clear_overflow) {
_finger = _heap_start;
for (uint i = 0; i < _max_worker_id; ++i) {
- OopTaskQueue* queue = _task_queues->queue(i);
+ CMTaskQueue* queue = _task_queues->queue(i);
queue->set_empty();
}
}
diff --git a/src/share/vm/gc_implementation/g1/concurrentMark.hpp b/src/share/vm/gc_implementation/g1/concurrentMark.hpp
index 5bac7a6b3..8089c9829 100644
--- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp
@@ -63,7 +63,7 @@ class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
public:
// constructor
- CMBitMapRO(ReservedSpace rs, int shifter);
+ CMBitMapRO(int shifter);
enum { do_yield = true };
@@ -117,8 +117,11 @@ class CMBitMap : public CMBitMapRO {
public:
// constructor
- CMBitMap(ReservedSpace rs, int shifter) :
- CMBitMapRO(rs, shifter) {}
+ CMBitMap(int shifter) :
+ CMBitMapRO(shifter) {}
+
+ // Allocates the back store for the marking bitmap
+ bool allocate(ReservedSpace heap_rs);
// write marks
void mark(HeapWord* addr) {
@@ -155,17 +158,18 @@ class CMBitMap : public CMBitMapRO {
MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr);
};
-// Represents a marking stack used by the CM collector.
-// Ideally this should be GrowableArray<> just like MSC's marking stack(s).
+// Represents a marking stack used by ConcurrentMarking in the G1 collector.
class CMMarkStack VALUE_OBJ_CLASS_SPEC {
+ VirtualSpace _virtual_space; // Underlying backing store for actual stack
ConcurrentMark* _cm;
oop* _base; // bottom of stack
- jint _index; // one more than last occupied index
- jint _capacity; // max #elements
- jint _saved_index; // value of _index saved at start of GC
- NOT_PRODUCT(jint _max_depth;) // max depth plumbed during run
+ jint _index; // one more than last occupied index
+ jint _capacity; // max #elements
+ jint _saved_index; // value of _index saved at start of GC
+ NOT_PRODUCT(jint _max_depth;) // max depth plumbed during run
- bool _overflow;
+ bool _overflow;
+ bool _should_expand;
DEBUG_ONLY(bool _drain_in_progress;)
DEBUG_ONLY(bool _drain_in_progress_yields;)
@@ -173,7 +177,13 @@ class CMMarkStack VALUE_OBJ_CLASS_SPEC {
CMMarkStack(ConcurrentMark* cm);
~CMMarkStack();
- void allocate(size_t size);
+#ifndef PRODUCT
+ jint max_depth() const {
+ return _max_depth;
+ }
+#endif
+
+ bool allocate(size_t capacity);
oop pop() {
if (!isEmpty()) {
@@ -231,11 +241,17 @@ class CMMarkStack VALUE_OBJ_CLASS_SPEC {
bool isEmpty() { return _index == 0; }
bool isFull() { return _index == _capacity; }
- int maxElems() { return _capacity; }
+ int maxElems() { return _capacity; }
bool overflow() { return _overflow; }
void clear_overflow() { _overflow = false; }
+ bool should_expand() const { return _should_expand; }
+ void set_should_expand();
+
+ // Expand the stack, typically in response to an overflow condition
+ void expand();
+
int size() { return _index; }
void setEmpty() { _index = 0; clear_overflow(); }
@@ -344,6 +360,7 @@ public:
class ConcurrentMarkThread;
class ConcurrentMark: public CHeapObj<mtGC> {
+ friend class CMMarkStack;
friend class ConcurrentMarkThread;
friend class CMTask;
friend class CMBitMapClosure;
@@ -577,6 +594,9 @@ protected:
// the card bitmaps.
intptr_t _heap_bottom_card_num;
+ // Set to true when initialization is complete
+ bool _completed_initialization;
+
public:
// Manipulation of the global mark stack.
// Notice that the first mark_stack_push is CAS-based, whereas the
@@ -636,7 +656,7 @@ public:
return _task_queues->steal(worker_id, hash_seed, obj);
}
- ConcurrentMark(ReservedSpace rs, uint max_regions);
+ ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs);
~ConcurrentMark();
ConcurrentMarkThread* cmThread() { return _cmThread; }
@@ -907,6 +927,11 @@ public:
// Should *not* be called from parallel code.
inline bool mark_and_count(oop obj);
+ // Returns true if initialization was successfully completed.
+ bool completed_initialization() const {
+ return _completed_initialization;
+ }
+
protected:
// Clear all the per-task bitmaps and arrays used to store the
// counting data.
diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
index 70b45fea4..448c57077 100644
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
@@ -2079,7 +2079,11 @@ jint G1CollectedHeap::initialize() {
// Create the ConcurrentMark data structure and thread.
// (Must do this late, so that "max_regions" is defined.)
- _cm = new ConcurrentMark(heap_rs, max_regions());
+ _cm = new ConcurrentMark(this, heap_rs);
+ if (_cm == NULL || !_cm->completed_initialization()) {
+ vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
+ return JNI_ENOMEM;
+ }
_cmThread = _cm->cmThread();
// Initialize the from_card cache structure of HeapRegionRemSet.
@@ -2087,7 +2091,7 @@ jint G1CollectedHeap::initialize() {
// Now expand into the initial heap size.
if (!expand(init_byte_size)) {
- vm_exit_during_initialization("Failed to allocate initial heap.");
+ vm_shutdown_during_initialization("Failed to allocate initial heap.");
return JNI_ENOMEM;
}
diff --git a/src/share/vm/gc_implementation/parallelScavenge/adjoiningVirtualSpaces.cpp b/src/share/vm/gc_implementation/parallelScavenge/adjoiningVirtualSpaces.cpp
index e21f7a7a6..e5ca1f5c0 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/adjoiningVirtualSpaces.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/adjoiningVirtualSpaces.cpp
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp"
+#include "memory/allocation.inline.hpp"
#include "runtime/java.hpp"
AdjoiningVirtualSpaces::AdjoiningVirtualSpaces(ReservedSpace rs,
diff --git a/src/share/vm/gc_implementation/shared/gcStats.cpp b/src/share/vm/gc_implementation/shared/gcStats.cpp
index faef280f6..745f8f3ab 100644
--- a/src/share/vm/gc_implementation/shared/gcStats.cpp
+++ b/src/share/vm/gc_implementation/shared/gcStats.cpp
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "gc_implementation/shared/gcStats.hpp"
#include "gc_implementation/shared/gcUtil.hpp"
+#include "memory/allocation.inline.hpp"
GCStats::GCStats() {
_avg_promoted = new AdaptivePaddedNoZeroDevAverage(
diff --git a/src/share/vm/oops/constMethod.hpp b/src/share/vm/oops/constMethod.hpp
index 771bf7b16..2819b3e77 100644
--- a/src/share/vm/oops/constMethod.hpp
+++ b/src/share/vm/oops/constMethod.hpp
@@ -46,6 +46,7 @@
// | interp_kind | flags | code_size |
// | name index | signature index |
// | method_idnum | max_stack |
+// | max_locals | size_of_parameters |
// |------------------------------------------------------|
// | |
// | byte codes |
@@ -150,7 +151,8 @@ private:
// initially corresponds to the index into the methods array.
// but this may change with redefinition
u2 _max_stack; // Maximum number of entries on the expression stack
-
+ u2 _max_locals; // Number of local variables used by this method
+ u2 _size_of_parameters; // size of the parameter block (receiver + arguments) in words
// Constructor
ConstMethod(int byte_code_size,
@@ -338,6 +340,11 @@ public:
static ByteSize max_stack_offset()
{ return byte_offset_of(ConstMethod, _max_stack); }
+ static ByteSize size_of_locals_offset()
+ { return byte_offset_of(ConstMethod, _max_locals); }
+ static ByteSize size_of_parameters_offset()
+ { return byte_offset_of(ConstMethod, _size_of_parameters); }
+
// Unique id for the method
static const u2 MAX_IDNUM;
@@ -349,6 +356,14 @@ public:
int max_stack() const { return _max_stack; }
void set_max_stack(int size) { _max_stack = size; }
+ // max locals
+ int max_locals() const { return _max_locals; }
+ void set_max_locals(int size) { _max_locals = size; }
+
+ // size of parameters
+ int size_of_parameters() const { return _size_of_parameters; }
+ void set_size_of_parameters(int size) { _size_of_parameters = size; }
+
// Deallocation for RedefineClasses
void deallocate_contents(ClassLoaderData* loader_data);
bool is_klass() const { return false; }
diff --git a/src/share/vm/oops/method.hpp b/src/share/vm/oops/method.hpp
index ff7bc0ee5..9e5e3b9da 100644
--- a/src/share/vm/oops/method.hpp
+++ b/src/share/vm/oops/method.hpp
@@ -73,8 +73,7 @@
// |------------------------------------------------------|
// | result_index (C++ interpreter only) |
// |------------------------------------------------------|
-// | method_size | max_locals |
-// | size_of_parameters | intrinsic_id| flags |
+// | method_size | intrinsic_id| flags |
// |------------------------------------------------------|
// | throwout_count | num_breakpoints |
// |------------------------------------------------------|
@@ -116,8 +115,6 @@ class Method : public Metadata {
int _result_index; // C++ interpreter needs for converting results to/from stack
#endif
u2 _method_size; // size of this object
- u2 _max_locals; // Number of local variables used by this method
- u2 _size_of_parameters; // size of the parameter block (receiver + arguments) in words
u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none)
u1 _jfr_towrite : 1, // Flags
_force_inline : 1,
@@ -292,8 +289,8 @@ class Method : public Metadata {
void set_max_stack(int size) { constMethod()->set_max_stack(size); }
// max locals
- int max_locals() const { return _max_locals; }
- void set_max_locals(int size) { _max_locals = size; }
+ int max_locals() const { return constMethod()->max_locals(); }
+ void set_max_locals(int size) { constMethod()->set_max_locals(size); }
int highest_comp_level() const;
void set_highest_comp_level(int level);
@@ -311,7 +308,8 @@ class Method : public Metadata {
void set_interpreter_throwout_count(int count) { _interpreter_throwout_count = count; }
// size of parameters
- int size_of_parameters() const { return _size_of_parameters; }
+ int size_of_parameters() const { return constMethod()->size_of_parameters(); }
+ void set_size_of_parameters(int size) { constMethod()->set_size_of_parameters(size); }
bool has_stackmap_table() const {
return constMethod()->has_stackmap_table();
@@ -588,8 +586,6 @@ class Method : public Metadata {
#ifdef CC_INTERP
static ByteSize result_index_offset() { return byte_offset_of(Method, _result_index ); }
#endif /* CC_INTERP */
- static ByteSize size_of_locals_offset() { return byte_offset_of(Method, _max_locals ); }
- static ByteSize size_of_parameters_offset() { return byte_offset_of(Method, _size_of_parameters); }
static ByteSize from_compiled_offset() { return byte_offset_of(Method, _from_compiled_entry); }
static ByteSize code_offset() { return byte_offset_of(Method, _code); }
static ByteSize invocation_counter_offset() { return byte_offset_of(Method, _invocation_counter); }
@@ -796,9 +792,6 @@ class Method : public Metadata {
Array<AnnotationArray*>* methods_default_annotations,
bool idempotent = false);
- // size of parameters
- void set_size_of_parameters(int size) { _size_of_parameters = size; }
-
// Deallocation function for redefine classes or if an error occurs
void deallocate_contents(ClassLoaderData* loader_data);
diff --git a/src/share/vm/runtime/arguments.cpp b/src/share/vm/runtime/arguments.cpp
index f850e05cb..21e5cce7b 100644
--- a/src/share/vm/runtime/arguments.cpp
+++ b/src/share/vm/runtime/arguments.cpp
@@ -1499,13 +1499,12 @@ void Arguments::set_g1_gc_flags() {
Abstract_VM_Version::parallel_worker_threads());
}
- if (FLAG_IS_DEFAULT(MarkStackSize)) {
- FLAG_SET_DEFAULT(MarkStackSize, 128 * TASKQUEUE_SIZE);
- }
- if (PrintGCDetails && Verbose) {
- tty->print_cr("MarkStackSize: %uk MarkStackSizeMax: %uk",
- MarkStackSize / K, MarkStackSizeMax / K);
- tty->print_cr("ConcGCThreads: %u", ConcGCThreads);
+ // MarkStackSize will be set (if it hasn't been set by the user)
+ // when concurrent marking is initialized.
+ // Its value will be based upon the number of parallel marking threads.
+ // But we do set the maximum mark stack size here.
+ if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
+ FLAG_SET_DEFAULT(MarkStackSizeMax, 128 * TASKQUEUE_SIZE);
}
if (FLAG_IS_DEFAULT(GCTimeRatio) || GCTimeRatio == 0) {
@@ -1517,6 +1516,12 @@ void Arguments::set_g1_gc_flags() {
// is allocation). We might consider increase it further.
FLAG_SET_DEFAULT(GCTimeRatio, 9);
}
+
+ if (PrintGCDetails && Verbose) {
+ tty->print_cr("MarkStackSize: %uk MarkStackSizeMax: %uk",
+ MarkStackSize / K, MarkStackSizeMax / K);
+ tty->print_cr("ConcGCThreads: %u", ConcGCThreads);
+ }
}
void Arguments::set_heap_size() {
@@ -1980,6 +1985,9 @@ bool Arguments::check_vm_args_consistency() {
status = status && verify_min_value(ClassMetaspaceSize, 1*M,
"ClassMetaspaceSize");
+ status = status && verify_interval(MarkStackSizeMax,
+ 1, (max_jint - 1), "MarkStackSizeMax");
+
#ifdef SPARC
if (UseConcMarkSweepGC || UseG1GC) {
// Issue a stern warning if the user has explicitly set
diff --git a/src/share/vm/runtime/vmStructs.cpp b/src/share/vm/runtime/vmStructs.cpp
index 9346d6c72..9ac10e26d 100644
--- a/src/share/vm/runtime/vmStructs.cpp
+++ b/src/share/vm/runtime/vmStructs.cpp
@@ -355,8 +355,6 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
nonstatic_field(Method, _access_flags, AccessFlags) \
nonstatic_field(Method, _vtable_index, int) \
nonstatic_field(Method, _method_size, u2) \
- nonstatic_field(Method, _max_locals, u2) \
- nonstatic_field(Method, _size_of_parameters, u2) \
nonstatic_field(Method, _interpreter_throwout_count, u2) \
nonstatic_field(Method, _number_of_breakpoints, u2) \
nonstatic_field(Method, _invocation_counter, InvocationCounter) \
@@ -378,6 +376,8 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
nonstatic_field(ConstMethod, _signature_index, u2) \
nonstatic_field(ConstMethod, _method_idnum, u2) \
nonstatic_field(ConstMethod, _max_stack, u2) \
+ nonstatic_field(ConstMethod, _max_locals, u2) \
+ nonstatic_field(ConstMethod, _size_of_parameters, u2) \
nonstatic_field(ObjArrayKlass, _element_klass, Klass*) \
nonstatic_field(ObjArrayKlass, _bottom_klass, Klass*) \
volatile_nonstatic_field(Symbol, _refcount, int) \