/*
* This file is part of the Jikes RVM project (http://jikesrvm.org).
*
* This file is licensed to You under the Eclipse Public License (EPL);
* You may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.opensource.org/licenses/eclipse-1.0.php
*
* See the COPYRIGHT.txt file distributed with this work for information
* regarding copyright ownership.
*/
package org.jikesrvm.compilers.opt.mir2mc.ia32;
import static org.jikesrvm.compilers.common.assembler.ia32.AssemblerConstants.CONDITION;
import static org.jikesrvm.compilers.opt.OptimizingCompilerException.opt_assert;
import static org.jikesrvm.compilers.opt.ir.Operators.BBEND_opcode;
import static org.jikesrvm.compilers.opt.ir.Operators.IG_PATCH_POINT_opcode;
import static org.jikesrvm.compilers.opt.ir.Operators.LABEL_opcode;
import static org.jikesrvm.compilers.opt.ir.Operators.READ_CEILING_opcode;
import static org.jikesrvm.compilers.opt.ir.Operators.UNINT_BEGIN_opcode;
import static org.jikesrvm.compilers.opt.ir.Operators.UNINT_END_opcode;
import static org.jikesrvm.compilers.opt.ir.Operators.WRITE_FLOOR_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_ADC_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_ADDSD_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_ADDSS_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_ADD_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_AND_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CALL_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPEQSD_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPEQSS_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPLESD_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPLESS_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPLTSD_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPLTSS_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPNESD_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPNESS_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPNLESD_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPNLESS_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPNLTSD_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPNLTSS_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPORDSD_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPORDSS_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPUNORDSD_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPUNORDSS_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMP_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CVTSD2SI_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CVTSD2SS_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CVTSI2SD_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CVTSI2SS_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CVTSS2SD_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CVTSS2SI_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CVTTSD2SI_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CVTTSS2SI_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_DIVSD_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_DIVSS_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_INT_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_JCC_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_JMP_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_LEA_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_LOCK_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_METHODSTART_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_MOVAPD_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_MOVAPS_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_MOVD_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_MOVLPD_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_MOVQ_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_MOVSD_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_MOVSS_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_MOV_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_MULSD_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_MULSS_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_OFFSET_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_OR_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_PUSH_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_RET_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_SBB_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_SQRTSD_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_SUBSD_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_SUBSS_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_TEST_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_UCOMISD_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_UCOMISS_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_XORPD_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_XORPS_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_XOR_opcode;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.MIR_LOWTABLESWITCH;
import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.MIR_LOWTABLESWITCH_opcode;
import static org.jikesrvm.compilers.opt.regalloc.ia32.PhysicalRegisterConstants.DOUBLE_REG;
import static org.jikesrvm.compilers.opt.regalloc.ia32.PhysicalRegisterConstants.FIRST_DOUBLE;
import static org.jikesrvm.compilers.opt.regalloc.ia32.PhysicalRegisterConstants.FIRST_INT;
import static org.jikesrvm.compilers.opt.regalloc.ia32.PhysicalRegisterConstants.FIRST_SPECIAL;
import static org.jikesrvm.compilers.opt.regalloc.ia32.PhysicalRegisterConstants.INT_REG;
import static org.jikesrvm.compilers.opt.regalloc.ia32.PhysicalRegisterConstants.ST0;
import static org.jikesrvm.compilers.opt.regalloc.ia32.PhysicalRegisterConstants.ST1;
import static org.jikesrvm.ia32.ArchConstants.SSE2_FULL;
import static org.jikesrvm.ia32.RegisterConstants.FP0;
import static org.jikesrvm.ia32.RegisterConstants.FP1;
import static org.jikesrvm.ia32.TrapConstants.RVM_TRAP_BASE;
import static org.jikesrvm.util.Bits.fits;
import java.util.ArrayList;
import java.util.Enumeration;
import org.jikesrvm.VM;
import org.jikesrvm.architecture.MachineRegister;
import org.jikesrvm.compilers.common.assembler.ForwardReference;
import org.jikesrvm.compilers.common.assembler.ia32.Assembler;
import org.jikesrvm.compilers.opt.OptimizingCompilerException;
import org.jikesrvm.compilers.opt.ir.IR;
import org.jikesrvm.compilers.opt.ir.Instruction;
import org.jikesrvm.compilers.opt.ir.Operator;
import org.jikesrvm.compilers.opt.ir.Register;
import org.jikesrvm.compilers.opt.ir.ia32.ArchOperator;
import org.jikesrvm.compilers.opt.ir.ia32.MIR_BinaryAcc;
import org.jikesrvm.compilers.opt.ir.ia32.MIR_Branch;
import org.jikesrvm.compilers.opt.ir.ia32.MIR_Call;
import org.jikesrvm.compilers.opt.ir.ia32.MIR_Compare;
import org.jikesrvm.compilers.opt.ir.ia32.MIR_CondBranch;
import org.jikesrvm.compilers.opt.ir.ia32.MIR_Divide;
import org.jikesrvm.compilers.opt.ir.ia32.MIR_Lea;
import org.jikesrvm.compilers.opt.ir.ia32.MIR_LowTableSwitch;
import org.jikesrvm.compilers.opt.ir.ia32.MIR_Move;
import org.jikesrvm.compilers.opt.ir.ia32.MIR_Return;
import org.jikesrvm.compilers.opt.ir.ia32.MIR_Test;
import org.jikesrvm.compilers.opt.ir.ia32.MIR_Unary;
import org.jikesrvm.compilers.opt.ir.ia32.MIR_UnaryNoRes;
import org.jikesrvm.compilers.opt.ir.ia32.PhysicalRegisterSet;
import org.jikesrvm.compilers.opt.ir.operand.IntConstantOperand;
import org.jikesrvm.compilers.opt.ir.operand.LongConstantOperand;
import org.jikesrvm.compilers.opt.ir.operand.MemoryOperand;
import org.jikesrvm.compilers.opt.ir.operand.Operand;
import org.jikesrvm.compilers.opt.ir.operand.RegisterOperand;
import org.jikesrvm.compilers.opt.ir.operand.StackLocationOperand;
import org.jikesrvm.compilers.opt.ir.operand.TrapCodeOperand;
import org.jikesrvm.compilers.opt.ir.operand.ia32.IA32ConditionOperand;
import org.jikesrvm.compilers.opt.mir2mc.MachineCodeOffsets;
import org.jikesrvm.ia32.RegisterConstants.FPR;
import org.jikesrvm.ia32.RegisterConstants.GPR;
import org.jikesrvm.ia32.RegisterConstants.MM;
import org.jikesrvm.ia32.RegisterConstants.XMM;
import org.jikesrvm.util.Bits;
import org.vmmagic.pragma.NoInline;
import org.vmmagic.unboxed.Offset;
/**
* This class provides support functionality used by the generated
* Assembler; it handles basic impedance-matching functionality
* such as determining which addressing mode is suitable for a given
* IA32MemoryOperand. This class also provides some boilerplate
* methods that do not depend on how instructions should actually be
* assembled, like the top-level generateCode driver. This class is
* not meant to be used in isolation, but rather to provide support
* from the Assembler.
*/
abstract class AssemblerBase extends Assembler {
private static final boolean DEBUG_ESTIMATE = false;
/**
* Hold EBP register object for use in estimating size of memory operands.
*/
private final Register EBP;
/**
* Hold EBP register object for use in estimating size of memory operands.
*/
private final Register ESP;
/**
* Operators with byte arguments
*/
private static final Operator[] byteSizeOperators;
/**
* Operators with word arguments
*/
private static final Operator[] wordSizeOperators;
/**
* Operators with quad arguments
*/
private static final Operator[] quadSizeOperators;
protected final MachineCodeOffsets mcOffsets;
protected final IR ir;
static {
ArrayList<Operator> temp = new ArrayList<Operator>();
for (Operator opr : ArchOperator.operatorArray()) {
if (opr != null && opr.toString().indexOf("__b") != -1) {
temp.add(opr);
}
}
byteSizeOperators = temp.toArray(new Operator[temp.size()]);
temp.clear();
for (Operator opr : ArchOperator.operatorArray()) {
if (opr != null && opr.toString().indexOf("__w") != -1) {
temp.add(opr);
}
}
wordSizeOperators = temp.toArray(new Operator[temp.size()]);
for (Operator opr : ArchOperator.operatorArray()) {
if (opr != null && opr.toString().indexOf("__q") != -1) {
temp.add(opr);
}
}
quadSizeOperators = temp.toArray(new Operator[temp.size()]);
}
/**
* Construct Assembler object
*
* @param bytecodeSize initial machine code buffer size.
* @param shouldPrint whether to dump generated machine code.
* @param ir the IR object for the opt compilation
*
* @see Assembler
*/
AssemblerBase(int bytecodeSize, boolean shouldPrint, IR ir) {
super(bytecodeSize, shouldPrint);
EBP = ir.regpool.getPhysicalRegisterSet().asIA32().getEBP();
ESP = ir.regpool.getPhysicalRegisterSet().asIA32().getESP();
mcOffsets = ir.MIRInfo.mcOffsets;
this.ir = ir;
}
/**
* Should code created by this assembler instance be allocated in the
* hot code code space? The default answer for opt compiled code is yes
* (otherwise why are we opt compiling it?).
*/
@Override
protected boolean isHotCode() {
return true;
}
/**
* Is the given operand an immediate? In the IA32 assembly, one
* cannot specify floating-point constants, so the possible
* immediates we may see are IntegerConstants and
* TrapConstants (a trap constant really is an integer), and
* jump targets for which the exact offset is known.
*
* @see #getImm
*
* @param op the operand being queried
* @return true if op represents an immediate
*/
boolean isImm(Operand op) {
boolean isKnownJumpTarget = op.isBranch() &&
mcOffsets.getMachineCodeOffset(op.asBranch().target) >= 0;
return (op instanceof IntConstantOperand) ||
(VM.BuildFor64Addr && op instanceof LongConstantOperand) ||
(op instanceof TrapCodeOperand) ||
isKnownJumpTarget;
}
/**
* Return the IA32 ISA encoding of the immediate value
* represented by the the given operand. This method assumes the
* operand is an immediate and will likely throw a
* ClassCastException if this not the case. It treats
* BranchOperands somewhat differently than isImm does: in
* case a branch target is not resolved, it simply returns a wrong
* answer and trusts the caller to ignore it. This behavior
* simplifies life when generating code for ImmOrLabel operands.
*
* @see #isImm
*
* @param op the operand being queried
* @return the immediate value represented by the operand
*/
int getImm(Operand op) {
if (op.isIntConstant()) {
return op.asIntConstant().value;
} else if (VM.BuildFor64Addr && op.isLongConstant()) {
long v = op.asLongConstant().value;
if (!Bits.fits(v, 32)) {
throw new OptimizingCompilerException("Invalid immediate operand " + v);
}
return (int)v;
} else if (op.isBranch()) {
// used by ImmOrLabel stuff
return mcOffsets.getMachineCodeOffset(op.asBranch().target);
} else {
return ((TrapCodeOperand) op).getTrapCode() + RVM_TRAP_BASE;
}
}
/**
* Return the IA32 ISA encoding of the immediate value
* represented by the the given operand. This method assumes the
* operand is an immediate and will likely throw a
* ClassCastException if this not the case. It treats
* BranchOperands somewhat differently than isImm does: in
* case a branch target is not resolved, it simply returns a wrong
* answer and trusts the caller to ignore it. This behavior
* simplifies life when generating code for ImmOrLabel operands.
*
* @see #isImm
*
* @param op the operand being queried
* @return the immediate value represented by the operand
*/
long getImmQuad(Operand op) {
if (VM.BuildFor64Addr && op.isLongConstant()) {
return op.asLongConstant().value;
} else {
return getImm(op);
}
}
/**
* Is the given operand a register operand?
*
* @see #getReg
*
* @param op the operand being queried
* @return true if op is an RegisterOperand
*/
boolean isReg(Operand op) {
return op.isRegister();
}
boolean isGPR_Reg(Operand op) {
return isReg(op);
}
boolean isFPR_Reg(Operand op) {
return isReg(op);
}
boolean isMM_Reg(Operand op) {
return false; // MM registers not currently supported in the OPT compiler
}
boolean isXMM_Reg(Operand op) {
return op.isRegister() && (op.isFloat() || op.isDouble());
}
/**
* Return the machine-level register number corresponding to a given integer
* Register. The optimizing compiler has its own notion of register
* numbers, which is not the same as the numbers used by the IA32 ISA. This
* method takes an optimizing compiler register and translates it into the
* appropriate machine-level encoding. This method is not applied directly to
* operands, but rather to register objects.
*
* @see #getBase
* @see #getIndex
*
* @param reg the register being queried
* @return the 3 bit machine-level encoding of reg
*/
private GPR getGPMachineRegister(Register reg) {
if (VM.VerifyAssertions) {
opt_assert(PhysicalRegisterSet.getPhysicalRegisterType(reg) == INT_REG);
}
return GPR.lookup(reg.number - FIRST_INT);
}
/**
* Return the machine-level register number corresponding to a
* given Register. The optimizing compiler has its own notion
* of register numbers, which is not the same as the numbers used
* by the IA32 ISA. This method takes an optimizing compiler
* register and translates it into the appropriate machine-level
* encoding. This method is not applied directly to operands, but
* rather to register objects.
*
* @see #getReg
* @see #getBase
* @see #getIndex
*
* @param reg the register being queried
* @return the 3 bit machine-level encoding of reg
*/
private MachineRegister getMachineRegister(Register reg) {
int type = PhysicalRegisterSet.getPhysicalRegisterType(reg);
MachineRegister result;
if (type == INT_REG) {
result = GPR.lookup(reg.number - FIRST_INT);
} else {
if (VM.VerifyAssertions) opt_assert(type == DOUBLE_REG);
if (SSE2_FULL) {
if (reg.number < FIRST_SPECIAL) {
result = XMM.lookup(reg.number - FIRST_DOUBLE);
} else if (reg.number == ST0) {
result = FP0;
} else {
if (VM.VerifyAssertions) opt_assert(reg.number == ST1);
result = FP1;
}
} else {
result = FPR.lookup(reg.number - FIRST_DOUBLE);
}
}
return result;
}
/**
* Given a register operand, return the 3 bit IA32 ISA encoding
* of that register. This function translates an optimizing
* compiler register operand into the 3 bit IA32 ISA encoding that
* can be passed to the Assembler. This function assumes its
* operand is a register operand, and will blow up if it is not;
* use isReg to check operands passed to this method.
*
* @see #isReg
*
* @param op the register operand being queried
* @return the 3 bit IA32 ISA encoding of op
*/
MachineRegister getReg(Operand op) {
return getMachineRegister(op.asRegister().getRegister());
}
GPR getGPR_Reg(Operand op) {
return getGPMachineRegister(op.asRegister().getRegister());
}
FPR getFPR_Reg(Operand op) {
return (FPR)getMachineRegister(op.asRegister().getRegister());
}
MM getMM_Reg(Operand op) {
throw new OptimizingCompilerException("MM registers not currently supported in the opt compiler");
}
XMM getXMM_Reg(Operand op) {
return (XMM)getMachineRegister(op.asRegister().getRegister());
}
/**
* Given a memory operand, return the 3 bit IA32 ISA encoding
* of its base regsiter. This function translates the optimizing
* compiler register operand representing the base of the given
* memory operand into the 3 bit IA32 ISA encoding that
* can be passed to the Assembler. This function assumes its
* operand is a memory operand, and will blow up if it is not;
* one should confirm an operand really has a base register before
* invoking this method on it.
*
* @see #isRegDisp
* @see #isRegIdx
* @see #isRegInd
*
* @param op the register operand being queried
* @return the 3 bit IA32 ISA encoding of the base register of op
*/
GPR getBase(Operand op) {
return getGPMachineRegister(((MemoryOperand) op).base.getRegister());
}
/**
* Given a memory operand, return the 3 bit IA32 ISA encoding
* of its index register. This function translates the optimizing
* compiler register operand representing the index of the given
* memory operand into the 3 bit IA32 ISA encoding that
* can be passed to the Assembler. This function assumes its
* operand is a memory operand, and will blow up if it is not;
* one should confirm an operand really has an index register before
* invoking this method on it.
*
* @see #isRegIdx
* @see #isRegOff
*
* @param op the register operand being queried
* @return the 3 bit IA32 ISA encoding of the index register of op
*/
GPR getIndex(Operand op) {
return getGPMachineRegister(((MemoryOperand) op).index.getRegister());
}
/**
* Given a memory operand, return the 2 bit IA32 ISA encoding
* of its scale, suitable for passing to the Assembler to mask
* into a SIB byte. This function assumes its operand is a memory
* operand, and will blow up if it is not; one should confirm an
* operand really has a scale before invoking this method on it.
*
* @see #isRegIdx
* @see #isRegOff
*
* @param op the register operand being queried
* @return the IA32 ISA encoding of the scale of op
*/
short getScale(Operand op) {
return ((MemoryOperand) op).scale;
}
/**
* Given a memory operand, return the 2 bit IA32 ISA encoding
* of its scale, suitable for passing to the Assembler to mask
* into a SIB byte. This function assumes its operand is a memory
* operand, and will blow up if it is not; one should confirm an
* operand really has a scale before invoking this method on it.
*
* @see #isRegIdx
* @see #isRegOff
*
* @param op the register operand being queried
* @return the IA32 ISA encoding of the scale of op
*/
Offset getDisp(Operand op) {
return ((MemoryOperand) op).disp;
}
/**
* Determine if a given operand is a memory operand representing
* register-displacement mode addressing. This method takes an
* arbitrary operand, checks whether it is a memory operand, and,
* if it is, checks whether it should be assembled as IA32
* register-displacement mode. That is, does it have a non-zero
* displacement and a base register, but no scale and no index
* register?
*
* @param op the operand being queried
* @return true if op should be assembled as register-displacement mode
*/
boolean isRegDisp(Operand op) {
if (op instanceof MemoryOperand) {
MemoryOperand mop = (MemoryOperand) op;
return (mop.base != null) && (mop.index == null) && (!mop.disp.isZero()) && (mop.scale == 0);
} else {
return false;
}
}
/**
* Determine if a given operand is a memory operand representing
* absolute mode addressing. This method takes an
* arbitrary operand, checks whether it is a memory operand, and,
* if it is, checks whether it should be assembled as IA32
* absolute address mode. That is, does it have a non-zero
* displacement, but no scale, no scale and no index register?
*
* @param op the operand being queried
* @return true if op should be assembled as absolute mode
*/
boolean isAbs(Operand op) {
if (op instanceof MemoryOperand) {
MemoryOperand mop = (MemoryOperand) op;
return (mop.base == null) && (mop.index == null) && (!mop.disp.isZero()) && (mop.scale == 0);
} else {
return false;
}
}
/**
* Determine if a given operand is a memory operand representing
* register-indirect mode addressing. This method takes an
* arbitrary operand, checks whether it is a memory operand, and,
* if it is, checks whether it should be assembled as IA32
* register-displacement mode. That is, does it have a base
* register, but no displacement, no scale and no index
* register?
*
* @param op the operand being queried
* @return true if op should be assembled as register-indirect mode
*/
boolean isRegInd(Operand op) {
if (op instanceof MemoryOperand) {
MemoryOperand mop = (MemoryOperand) op;
return (mop.base != null) && (mop.index == null) && (mop.disp.isZero()) && (mop.scale == 0);
} else {
return false;
}
}
/**
* Determine if a given operand is a memory operand representing
* register-offset mode addressing. This method takes an
* arbitrary operand, checks whether it is a memory operand, and,
* if it is, checks whether it should be assembled as IA32
* register-offset mode. That is, does it have a non-zero
* displacement, a scale parameter and an index register, but no
* base register?
*
* @param op the operand being queried
* @return true if op should be assembled as register-offset mode
*/
boolean isRegOff(Operand op) {
if (op instanceof MemoryOperand) {
MemoryOperand mop = (MemoryOperand) op;
return (mop.base == null) && (mop.index != null);
} else {
return false;
}
}
/**
* Determine if a given operand is a memory operand representing
* the full glory of scaled-index-base addressing. This method takes an
* arbitrary operand, checks whether it is a memory operand, and,
* if it is, checks whether it should be assembled as IA32
* SIB mode. That is, does it have a non-zero
* displacement, a scale parameter, a base register and an index
* register?
*
* @param op the operand being queried
* @return true if op should be assembled as SIB mode
*/
boolean isRegIdx(Operand op) {
if (op instanceof MemoryOperand) {
return !(isAbs(op) || isRegInd(op) || isRegDisp(op) || isRegOff(op));
} else {
return false;
}
}
/**
* Return the condition bits of a given optimizing compiler
* condition operand. This method returns the IA32 ISA bits
* representing a given condition operand, suitable for passing to
* the Assembler to encode into the opcode of a SET, Jcc or
* CMOV instruction. This being IA32, there are of course
* exceptions in the binary encoding of conditions (see FCMOV),
* but the Assembler handles that. This function assumes its
* argument is an IA32ConditionOperand, and will blow up if it
* is not.
*
* @param op the operand being queried
* @return the bits that (usually) represent the given condition
* in the IA32 ISA */
byte getCond(Operand op) {
return ((IA32ConditionOperand) op).value;
}
/**
* Is the given operand an IA32 condition operand?
*
* @param op the operand being queried
* @return true if op is an IA32 condition operand
*/
boolean isCond(Operand op) {
return (op instanceof IA32ConditionOperand);
}
/**
* Return the label representing the target of the given branch
* operand. These labels are used to represent branch targets
* that have not yet been assembled, and so cannot be given
* concrete machine code offsets. All instructions are numbered
* just prior to assembly, and these numbers are used as labels.
* This method also returns 0 (not a valid label) for int
* constants to simplify generation of branches (the branch
* generation code will ignore this invalid label; it is used to
* prevent type exceptions). This method assumes its operand is a
* branch operand (or an int) and will blow up if it is not.
*
* @param op the branch operand being queried
* @return the label representing the branch target
*/
int getLabel(Operand op) {
if (op instanceof IntConstantOperand) {
// used by ImmOrLabel stuff
return 0;
} else {
int offset = mcOffsets.getMachineCodeOffset(op.asBranch().target);
if (offset < 0) {
return -offset;
} else {
return -1;
}
}
}
/**
* Is the given operand a branch target that requires a label?
*
* @see #getLabel
*
* @param op the operand being queried
* @return true if it represents a branch requiring a label target
*/
boolean isLabel(Operand op) {
if (op.isBranch()) {
return mcOffsets.getMachineCodeOffset(op.asBranch().target) < 0;
} else {
return false;
}
}
/**
* Is the given operand a branch target?
*
* @see #getLabel
* @see #isLabel
*
* @param op the operand being queried
* @return true if it represents a branch target
*/
@NoInline
boolean isImmOrLabel(Operand op) {
// TODO: Remove NoInlinePragma, work around for leave SSA bug
return (isImm(op) || isLabel(op));
}
/**
* Does the given instruction operate upon byte-sized data? The
* opt compiler does not represent the size of register data, so
* this method typically looks at the memory operand, if any, and
* checks whether that is a byte. This does not work for the
* size-converting moves (MOVSX and MOVZX), and those instructions
* use the operator convention that __b on the end of the operator
* name means operate upon byte data.
*
* @param inst the instruction being queried
* @return {@code true} if inst operates upon byte data
*/
boolean isByte(Instruction inst) {
for (Operator opr : byteSizeOperators) {
if (opr == inst.operator()) {
return true;
}
}
for (int i = 0; i < inst.getNumberOfOperands(); i++) {
Operand op = inst.getOperand(i);
if (op instanceof MemoryOperand) {
return (((MemoryOperand) op).size == 1);
}
}
return false;
}
/**
* Does the given instruction operate upon word-sized data? The
* opt compiler does not represent the size of register data, so
* this method typically looks at the memory operand, if any, and
* checks whether that is a word. This does not work for the
* size-converting moves (MOVSX and MOVZX), and those instructions
* use the operator convention that __w on the end of the operator
* name means operate upon word data.
*
* @param inst the instruction being queried
* @return true if inst operates upon word data
*/
boolean isWord(Instruction inst) {
for (Operator opr : wordSizeOperators) {
if (opr == inst.operator()) {
return true;
}
}
for (int i = 0; i < inst.getNumberOfOperands(); i++) {
Operand op = inst.getOperand(i);
if (op instanceof MemoryOperand) {
return (((MemoryOperand) op).size == 2);
}
}
return false;
}
/**
* Does the given instruction operate upon quad-sized data
* <em>for the purposes of assembling the instruction</em>?
* The opt compiler does not represent the size of register data, so
* it is necessary to determine whether to emit a quad instruction.
* As described above, this method is only concerned with quad data
* that changes the instruction. For example, this method will return
* {@code false} for {@code FSTP}. {@code FSTP} operates on quad-data
* but the instruction's operation is the same for 32-bit and 64-bit
* mode, so it is not a quad instruction for the purposes of this method.
* <p>
* This method typically looks at the memory operand, if any, and
* checks whether that is a byte. This method also recognizes
* the operator convention that __q on the end of the operator
* name means operate upon quad data. Moreover, it looks at data types
* for x64.
*
* @param inst the instruction being queried
* @return {@code true} if instruction operates upon quad data <b>AND</b>
* is treated as a quad instruction for the purpose of assembling the
* machine code
*/
boolean isQuad(Instruction inst) {
for (Operator opr : quadSizeOperators) {
if (opr == inst.operator()) {
return true;
}
}
if (VM.BuildFor32Addr) {
for (int i = 0; i < inst.getNumberOfOperands(); i++) {
Operand op = inst.getOperand(i);
if (op instanceof MemoryOperand) {
return ((MemoryOperand) op).size == 8;
}
}
} else { // 64-bit
for (int i = 0; i < inst.getNumberOfOperands(); i++) {
Operand op = inst.getOperand(i);
if (op == null) {
// The operand may only be null for a few cases.
if (VM.VerifyAssertions) {
// Return has 2 return operands on IA32 because it
// must be able to return a 64-bit value. On x64, only
// one of the operands is needed, the other one is null.
boolean isReturn = MIR_Return.conforms(inst);
if (isReturn) {
VM._assert(i == MIR_Return.indexOfVal2(inst));
}
// Guards may be null for divides
boolean isDivide = MIR_Divide.conforms(inst);
if (isDivide) {
VM._assert(i == MIR_Divide.indexOfGuard(inst));
}
// For all other cases, all operands must be non null
String msg = inst.toString();
VM._assert(isReturn || isDivide, msg);
}
continue;
}
if (op.isLong() || op.isRef() || op.isAddress()) {
return true;
}
boolean quadMemOp = false;
if (op instanceof MemoryOperand) {
quadMemOp = op.asMemory().size == 8;
} else if (op instanceof StackLocationOperand) {
quadMemOp = op.asStackLocation().getSize() == 8;
}
// Other operands may cause the instruction to be 64 bit
// even if this one won't
if (quadMemOp) {
return true;
}
}
}
return false;
}
/**
* Given a forward branch instruction and its target,
* determine (conservatively) if the relative offset to the
* target is less than 127 bytes
* @param start the branch instruction
* @param target the value of the mcOffset of the target label
* @return {@code true} if the relative offset will be less than 127, false otherwise
*/
protected boolean targetIsClose(Instruction start, int target) {
Instruction inst = start.nextInstructionInCodeOrder();
final int budget = 120; // slight fudge factor could be 127
int offset = 0;
while (true) {
if (offset <= budget) return false;
if (mcOffsets.getMachineCodeOffset(inst) == target) {
return true;
}
offset += estimateSize(inst, offset);
inst = inst.nextInstructionInCodeOrder();
}
}
protected int estimateSize(Instruction inst, int offset) {
switch (inst.getOpcode()) {
case LABEL_opcode:
return (4 - offset) & 3; // return size of nop required for alignment
case BBEND_opcode:
case READ_CEILING_opcode:
case WRITE_FLOOR_opcode:
case UNINT_BEGIN_opcode:
case UNINT_END_opcode: {
// these generate no code
return 0;
}
case IA32_METHODSTART_opcode:
return 12;
// Generated from the same case in Assembler
case IA32_ADC_opcode:
case IA32_ADD_opcode:
case IA32_AND_opcode:
case IA32_OR_opcode:
case IA32_SBB_opcode:
case IA32_XOR_opcode: {
int size = 2; // opcode + modr/m
size += operandCost(MIR_BinaryAcc.getResult(inst), true);
size += operandCost(MIR_BinaryAcc.getValue(inst), true);
return size;
}
case IA32_CMP_opcode: {
int size = 2; // opcode + modr/m
size += operandCost(MIR_Compare.getVal1(inst), true);
size += operandCost(MIR_Compare.getVal2(inst), true);
return size;
}
case IA32_TEST_opcode: {
int size = 2; // opcode + modr/m
size += operandCost(MIR_Test.getVal1(inst), false);
size += operandCost(MIR_Test.getVal2(inst), false);
return size;
}
case IA32_ADDSD_opcode:
case IA32_SUBSD_opcode:
case IA32_MULSD_opcode:
case IA32_DIVSD_opcode:
case IA32_XORPD_opcode:
case IA32_SQRTSD_opcode:
case IA32_ADDSS_opcode:
case IA32_SUBSS_opcode:
case IA32_MULSS_opcode:
case IA32_DIVSS_opcode:
case IA32_XORPS_opcode: {
int size = 4; // opcode + modr/m
Operand value = MIR_BinaryAcc.getValue(inst);
size += operandCost(value, false);
return size;
}
case IA32_UCOMISS_opcode: {
int size = 3; // opcode + modr/m
Operand val2 = MIR_Compare.getVal2(inst);
size += operandCost(val2, false);
return size;
}
case IA32_UCOMISD_opcode: {
int size = 4; // opcode + modr/m
Operand val2 = MIR_Compare.getVal2(inst);
size += operandCost(val2, false);
return size;
}
case IA32_CVTSI2SS_opcode:
case IA32_CVTSI2SD_opcode:
case IA32_CVTSS2SD_opcode:
case IA32_CVTSD2SS_opcode:
case IA32_CVTSD2SI_opcode:
case IA32_CVTTSD2SI_opcode:
case IA32_CVTSS2SI_opcode:
case IA32_CVTTSS2SI_opcode: {
int size = 4; // opcode + modr/m
Operand result = MIR_Unary.getResult(inst);
Operand value = MIR_Unary.getVal(inst);
size += operandCost(result, false);
size += operandCost(value, false);
return size;
}
case IA32_CMPEQSD_opcode:
case IA32_CMPLTSD_opcode:
case IA32_CMPLESD_opcode:
case IA32_CMPUNORDSD_opcode:
case IA32_CMPNESD_opcode:
case IA32_CMPNLTSD_opcode:
case IA32_CMPNLESD_opcode:
case IA32_CMPORDSD_opcode:
case IA32_CMPEQSS_opcode:
case IA32_CMPLTSS_opcode:
case IA32_CMPLESS_opcode:
case IA32_CMPUNORDSS_opcode:
case IA32_CMPNESS_opcode:
case IA32_CMPNLTSS_opcode:
case IA32_CMPNLESS_opcode:
case IA32_CMPORDSS_opcode: {
int size = 5; // opcode + modr/m + type
Operand value = MIR_BinaryAcc.getValue(inst);
size += operandCost(value, false);
return size;
}
case IA32_MOVD_opcode:
case IA32_MOVAPD_opcode:
case IA32_MOVAPS_opcode:
case IA32_MOVLPD_opcode:
case IA32_MOVQ_opcode:
case IA32_MOVSS_opcode:
case IA32_MOVSD_opcode: {
int size = 4; // opcode + modr/m
Operand result = MIR_Move.getResult(inst);
Operand value = MIR_Move.getValue(inst);
size += operandCost(result, false);
size += operandCost(value, false);
return size;
}
case IA32_PUSH_opcode: {
Operand op = MIR_UnaryNoRes.getVal(inst);
int size = 0;
if (op instanceof RegisterOperand) {
size += 1;
} else if (op instanceof IntConstantOperand) {
if (fits(((IntConstantOperand) op).value, 8)) {
size += 2;
} else {
size += 5;
}
} else {
size += (2 + operandCost(op, true));
}
return size;
}
case IA32_LEA_opcode: {
int size = 2; // opcode + 1 byte modr/m
size += operandCost(MIR_Lea.getResult(inst), false);
size += operandCost(MIR_Lea.getValue(inst), false);
return size;
}
case IA32_MOV_opcode: {
int size = 2; // opcode + modr/m
Operand result = MIR_Move.getResult(inst);
Operand value = MIR_Move.getValue(inst);
size += operandCost(result, false);
size += operandCost(value, false);
return size;
}
case MIR_LOWTABLESWITCH_opcode:
return MIR_LowTableSwitch.getNumberOfTargets(inst) * 4 + 14;
case IA32_OFFSET_opcode:
return 4;
case IA32_JCC_opcode:
case IA32_JMP_opcode:
return 6; // assume long form
case IA32_LOCK_opcode:
return 1;
case IG_PATCH_POINT_opcode:
return 8;
case IA32_INT_opcode:
return 2;
case IA32_RET_opcode:
return 3;
case IA32_CALL_opcode:
Operand target = MIR_Call.getTarget(inst);
if (isImmOrLabel(target)) {
return 5; // opcode + 32bit immediate
} else {
return 2 + operandCost(target, false); // opcode + modr/m
}
default: {
int size = 3; // 2 bytes opcode + 1 byte modr/m
for (Enumeration<Operand> opEnum = inst.getRootOperands(); opEnum.hasMoreElements();) {
Operand op = opEnum.nextElement();
size += operandCost(op, false);
}
return size;
}
}
}
private int operandCost(Operand op, boolean shortFormImmediate) {
if (op instanceof MemoryOperand) {
MemoryOperand mop = (MemoryOperand) op;
// If it's a 2byte mem location, we're going to need an override prefix
int prefix = mop.size == 2 ? 1 : 0;
// Deal with EBP wierdness
if (mop.base != null && mop.base.getRegister() == EBP) {
if (mop.index != null) {
// forced into SIB + 32 bit displacement no matter what disp is
return prefix + 5;
}
if (fits(mop.disp, 8)) {
return prefix + 1;
} else {
return prefix + 4;
}
}
if (mop.index != null && mop.index.getRegister() == EBP) {
// forced into SIB + 32 bit displacement no matter what disp is
return prefix + 5;
}
// Deal with ESP wierdness -- requires SIB byte even when index is null
if (mop.base != null && mop.base.getRegister() == ESP) {
if (fits(mop.disp, 8)) {
return prefix + 2;
} else {
return prefix + 5;
}
}
if (mop.index == null) {
// just displacement to worry about
if (mop.disp.isZero()) {
return prefix + 0;
} else if (fits(mop.disp, 8)) {
return prefix + 1;
} else {
return prefix + 4;
}
} else {
// have a SIB
if (mop.base == null && mop.scale != 0) {
// forced to 32 bit displacement even if it would fit in 8
return prefix + 5;
} else {
if (mop.disp.isZero()) {
return prefix + 1;
} else if (fits(mop.disp, 8)) {
return prefix + 2;
} else {
return prefix + 5;
}
}
}
} else if (op instanceof IntConstantOperand) {
if (shortFormImmediate && fits(((IntConstantOperand) op).value, 8)) {
return 1;
} else {
return 4;
}
} else {
return 0;
}
}
/**
* Emit the given instruction, assuming that
* it is a MIR_CondBranch instruction
* and has a JCC operator
*
* @param inst the instruction to assemble
*/
protected void doJCC(Instruction inst) {
byte cond = getCond(MIR_CondBranch.getCond(inst));
if (isImm(MIR_CondBranch.getTarget(inst))) {
emitJCC_Cond_Imm(cond, getImm(MIR_CondBranch.getTarget(inst)));
} else {
if (VM.VerifyAssertions && !isLabel(MIR_CondBranch.getTarget(inst))) {
throw new OptimizingCompilerException("Unexpected operand " + inst.toString());
}
int sourceLabel = -mcOffsets.getMachineCodeOffset(inst);
int targetLabel = getLabel(MIR_CondBranch.getTarget(inst));
int delta = targetLabel - sourceLabel;
if (VM.VerifyAssertions) opt_assert(delta >= 0);
if (delta < 10 || (delta < 90 && targetIsClose(inst, -targetLabel))) {
int miStart = mi;
ForwardReference r = new ForwardReference.ShortBranch(mi, targetLabel);
forwardRefs = ForwardReference.enqueue(forwardRefs, r);
setMachineCodes(mi++, (byte) (0x70 + cond));
mi += 1; // leave space for displacement
if (lister != null) lister.I(miStart, "J" + CONDITION[cond], 0);
} else {
emitJCC_Cond_Label(cond, targetLabel);
}
}
}
/**
* Emit the given instruction, assuming that
* it is a MIR_Branch instruction
* and has a JMP operator
*
* @param inst the instruction to assemble
*/
protected void doJMP(Instruction inst) {
if (isImm(MIR_Branch.getTarget(inst))) {
emitJMP_Imm(getImm(MIR_Branch.getTarget(inst)));
} else if (isLabel(MIR_Branch.getTarget(inst))) {
int sourceLabel = -mcOffsets.getMachineCodeOffset(inst);
int targetLabel = getLabel(MIR_Branch.getTarget(inst));
int delta = targetLabel - sourceLabel;
if (VM.VerifyAssertions) opt_assert(delta >= 0);
if (delta < 10 || (delta < 90 && targetIsClose(inst, -targetLabel))) {
int miStart = mi;
ForwardReference r = new ForwardReference.ShortBranch(mi, targetLabel);
forwardRefs = ForwardReference.enqueue(forwardRefs, r);
setMachineCodes(mi++, (byte) 0xEB);
mi += 1; // leave space for displacement
if (lister != null) lister.I(miStart, "JMP", 0);
} else {
emitJMP_Label(getLabel(MIR_Branch.getTarget(inst)));
}
} else if (isReg(MIR_Branch.getTarget(inst))) {
emitJMP_Reg(getGPR_Reg(MIR_Branch.getTarget(inst)));
} else if (isAbs(MIR_Branch.getTarget(inst))) {
emitJMP_Abs(getDisp(MIR_Branch.getTarget(inst)).toWord().toAddress());
} else if (isRegDisp(MIR_Branch.getTarget(inst))) {
emitJMP_RegDisp(getBase(MIR_Branch.getTarget(inst)), getDisp(MIR_Branch.getTarget(inst)));
} else if (isRegOff(MIR_Branch.getTarget(inst))) {
emitJMP_RegOff(getIndex(MIR_Branch.getTarget(inst)),
getScale(MIR_Branch.getTarget(inst)),
getDisp(MIR_Branch.getTarget(inst)));
} else if (isRegIdx(MIR_Branch.getTarget(inst))) {
emitJMP_RegIdx(getBase(MIR_Branch.getTarget(inst)),
getIndex(MIR_Branch.getTarget(inst)),
getScale(MIR_Branch.getTarget(inst)),
getDisp(MIR_Branch.getTarget(inst)));
} else if (isRegInd(MIR_Branch.getTarget(inst))) {
emitJMP_RegInd(getBase(MIR_Branch.getTarget(inst)));
} else {
if (VM.VerifyAssertions) {
throw new OptimizingCompilerException("Unexpected operand " + inst.toString());
}
}
}
/**
* Emit the given instruction, assuming that
* it is a MIR_LowTableSwitch instruction
* and has a MIR_LOWTABLESWITCH operator
*
* @param inst the instruction to assemble
*/
protected void doLOWTABLESWITCH(Instruction inst) {
int n = MIR_LowTableSwitch.getNumberOfTargets(inst); // n = number of normal cases (0..n-1)
GPR ms = GPR.lookup(MIR_LowTableSwitch.getMethodStart(inst).getRegister().number);
GPR idx = GPR.lookup(MIR_LowTableSwitch.getIndex(inst).getRegister().number);
emitTableswitchCode(ms, idx);
// Emit data for the tableswitch, i.e. the addresses that will be
// loaded for the cases
for (int i = 0; i < n; i++) {
Operand target = MIR_LowTableSwitch.getTarget(inst, i);
emitOFFSET_Imm_ImmOrLabel(i, getImm(target), getLabel(target));
}
}
protected void doIMMQ_MOV(Instruction inst) {
Operand result = MIR_Move.getResult(inst);
if (isGPR_Reg(result)) {
if (isQuad(inst)) {
Operand value = MIR_Move.getValue(inst);
if (isImm(value)) {
emitMOV_Reg_Imm_Quad(getGPR_Reg(result),
getImmQuad(value));
return;
}
}
}
throw new OptimizingCompilerException("Unexpected operand/imm " +
inst.toString());
}
/**
* Debugging support (return a printable representation of the machine code).
*
* @param instr An integer to be interpreted as a PowerPC instruction
* @param offset the mcoffset (in bytes) of the instruction
* @return a printable representation of the machine code
*/
public String disasm(int instr, int offset) {
OptimizingCompilerException.TODO("Assembler: disassembler");
return null;
}
/**
* Assembles the given instruction.
*
* @param inst the instruction to assemble
*/
public abstract void doInst(Instruction inst);
/**
* generate machine code into ir.machinecode.
* @return the number of machinecode instructions generated
*/
public int generateCode() {
int count = 0;
for (Instruction p = ir.firstInstructionInCodeOrder(); p != null; p = p.nextInstructionInCodeOrder()) {
// Set the mc offset of all instructions to their negative position.
// A positive value in their position means they have been created
// by the assembler.
count++;
mcOffsets.setMachineCodeOffset(p, -count);
if (p.operator() == MIR_LOWTABLESWITCH) {
// Table switch kludge, as these will occupy multiple slots in the
// generated assembler
count += MIR_LowTableSwitch.getNumberOfTargets(p);
}
}
for (Instruction p = ir.firstInstructionInCodeOrder(); p != null; p = p.nextInstructionInCodeOrder()) {
if (DEBUG_ESTIMATE) {
int start = getMachineCodeIndex();
int estimate = estimateSize(p, start);
doInst(p);
int end = getMachineCodeIndex();
if (end - start > estimate) {
VM.sysWriteln("Bad estimate: " + (end - start) + " " + estimate + " " + p);
VM.sysWrite("\tMachine code: ");
writeLastInstruction(start);
VM.sysWriteln();
}
} else {
doInst(p);
}
}
if (lister != null) lister.printListing();
ir.MIRInfo.machinecode = getMachineCodes();
return ir.MIRInfo.machinecode.length();
}
}