aboutsummaryrefslogtreecommitdiff
path: root/src/cpu/x86/vm/x86_32.ad
diff options
context:
space:
mode:
Diffstat (limited to 'src/cpu/x86/vm/x86_32.ad')
-rw-r--r--src/cpu/x86/vm/x86_32.ad87
1 files changed, 31 insertions, 56 deletions
diff --git a/src/cpu/x86/vm/x86_32.ad b/src/cpu/x86/vm/x86_32.ad
index f0bd571a8..cd4b7c730 100644
--- a/src/cpu/x86/vm/x86_32.ad
+++ b/src/cpu/x86/vm/x86_32.ad
@@ -487,6 +487,11 @@ int Compile::ConstantTable::calculate_table_base_offset() const {
return 0; // absolute addressing, no offset
}
+bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
+void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
+ ShouldNotReachHere();
+}
+
void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
// Empty encoding
}
@@ -1292,59 +1297,6 @@ uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
//=============================================================================
-uint size_exception_handler() {
- // NativeCall instruction size is the same as NativeJump.
- // exception handler starts out as jump and can be patched to
- // a call be deoptimization. (4932387)
- // Note that this value is also credited (in output.cpp) to
- // the size of the code section.
- return NativeJump::instruction_size;
-}
-
-// Emit exception handler code. Stuff framesize into a register
-// and call a VM stub routine.
-int emit_exception_handler(CodeBuffer& cbuf) {
-
- // Note that the code buffer's insts_mark is always relative to insts.
- // That's why we must use the macroassembler to generate a handler.
- MacroAssembler _masm(&cbuf);
- address base =
- __ start_a_stub(size_exception_handler());
- if (base == NULL) return 0; // CodeBuffer::expand failed
- int offset = __ offset();
- __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
- assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
- __ end_a_stub();
- return offset;
-}
-
-uint size_deopt_handler() {
- // NativeCall instruction size is the same as NativeJump.
- // exception handler starts out as jump and can be patched to
- // a call be deoptimization. (4932387)
- // Note that this value is also credited (in output.cpp) to
- // the size of the code section.
- return 5 + NativeJump::instruction_size; // pushl(); jmp;
-}
-
-// Emit deopt handler code.
-int emit_deopt_handler(CodeBuffer& cbuf) {
-
- // Note that the code buffer's insts_mark is always relative to insts.
- // That's why we must use the macroassembler to generate a handler.
- MacroAssembler _masm(&cbuf);
- address base =
- __ start_a_stub(size_exception_handler());
- if (base == NULL) return 0; // CodeBuffer::expand failed
- int offset = __ offset();
- InternalAddress here(__ pc());
- __ pushptr(here.addr());
-
- __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
- assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
- __ end_a_stub();
- return offset;
-}
int Matcher::regnum_to_fpu_offset(int regnum) {
return regnum - 32; // The FP registers are in the second chunk
@@ -1389,6 +1341,9 @@ const int Matcher::long_cmove_cost() { return 1; }
// No CMOVF/CMOVD with SSE/SSE2
const int Matcher::float_cmove_cost() { return (UseSSE>=1) ? ConditionalMoveLimit : 0; }
+// Does the CPU require late expand (see block.cpp for description of late expand)?
+const bool Matcher::require_postalloc_expand = false;
+
// Should the Matcher clone shifts on addressing modes, expecting them to
// be subsumed into complex addressing expressions or compute them into
// registers? True for Intel but false for most RISCs
@@ -3219,7 +3174,7 @@ frame %{
// automatically biased by the preserve_stack_slots field above.
c_calling_convention %{
// This is obviously always outgoing
- (void) SharedRuntime::c_calling_convention(sig_bt, regs, length);
+ (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
%}
// Location of C & interpreter return values
@@ -6587,6 +6542,7 @@ instruct storeSSL(stackSlotL dst, eRegL src) %{
instruct membar_acquire() %{
match(MemBarAcquire);
+ match(LoadFence);
ins_cost(400);
size(0);
@@ -6607,6 +6563,7 @@ instruct membar_acquire_lock() %{
instruct membar_release() %{
match(MemBarRelease);
+ match(StoreFence);
ins_cost(400);
size(0);
@@ -12915,13 +12872,31 @@ instruct RethrowException()
// inlined locking and unlocking
+instruct cmpFastLockRTM(eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI tmp, eDXRegI scr, rRegI cx1, rRegI cx2) %{
+ predicate(Compile::current()->use_rtm());
+ match(Set cr (FastLock object box));
+ effect(TEMP tmp, TEMP scr, TEMP cx1, TEMP cx2, USE_KILL box);
+ ins_cost(300);
+ format %{ "FASTLOCK $object,$box\t! kills $box,$tmp,$scr,$cx1,$cx2" %}
+ ins_encode %{
+ __ fast_lock($object$$Register, $box$$Register, $tmp$$Register,
+ $scr$$Register, $cx1$$Register, $cx2$$Register,
+ _counters, _rtm_counters, _stack_rtm_counters,
+ ((Method*)(ra_->C->method()->constant_encoding()))->method_data(),
+ true, ra_->C->profile_rtm());
+ %}
+ ins_pipe(pipe_slow);
+%}
+
instruct cmpFastLock(eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI tmp, eRegP scr) %{
+ predicate(!Compile::current()->use_rtm());
match(Set cr (FastLock object box));
effect(TEMP tmp, TEMP scr, USE_KILL box);
ins_cost(300);
format %{ "FASTLOCK $object,$box\t! kills $box,$tmp,$scr" %}
ins_encode %{
- __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register, _counters);
+ __ fast_lock($object$$Register, $box$$Register, $tmp$$Register,
+ $scr$$Register, noreg, noreg, _counters, NULL, NULL, NULL, false, false);
%}
ins_pipe(pipe_slow);
%}
@@ -12932,7 +12907,7 @@ instruct cmpFastUnlock(eFlagsReg cr, eRegP object, eAXRegP box, eRegP tmp ) %{
ins_cost(300);
format %{ "FASTUNLOCK $object,$box\t! kills $box,$tmp" %}
ins_encode %{
- __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
+ __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register, ra_->C->use_rtm());
%}
ins_pipe(pipe_slow);
%}