summaryrefslogtreecommitdiff
path: root/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/ARM/AsmParser/ARMAsmParser.cpp')
-rw-r--r--lib/Target/ARM/AsmParser/ARMAsmParser.cpp532
1 files changed, 384 insertions, 148 deletions
diff --git a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
index 97b642c99f80..807d62547337 100644
--- a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
@@ -527,6 +527,7 @@ class ARMAsmParser : public MCTargetAsmParser {
OperandMatchResultTy parseCoprocRegOperand(OperandVector &);
OperandMatchResultTy parseCoprocOptionOperand(OperandVector &);
OperandMatchResultTy parseMemBarrierOptOperand(OperandVector &);
+ OperandMatchResultTy parseTraceSyncBarrierOptOperand(OperandVector &);
OperandMatchResultTy parseInstSyncBarrierOptOperand(OperandVector &);
OperandMatchResultTy parseProcIFlagsOperand(OperandVector &);
OperandMatchResultTy parseMSRMaskOperand(OperandVector &);
@@ -561,6 +562,8 @@ class ARMAsmParser : public MCTargetAsmParser {
bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
bool isITBlockTerminator(MCInst &Inst) const;
void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands);
+ bool validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands,
+ bool Load, bool ARMMode, bool Writeback);
public:
enum ARMMatchResultTy {
@@ -644,6 +647,7 @@ class ARMOperand : public MCParsedAsmOperand {
k_Immediate,
k_MemBarrierOpt,
k_InstSyncBarrierOpt,
+ k_TraceSyncBarrierOpt,
k_Memory,
k_PostIndexRegister,
k_MSRMask,
@@ -694,6 +698,10 @@ class ARMOperand : public MCParsedAsmOperand {
ARM_ISB::InstSyncBOpt Val;
};
+ struct TSBOptOp {
+ ARM_TSB::TraceSyncBOpt Val;
+ };
+
struct IFlagsOp {
ARM_PROC::IFlags Val;
};
@@ -790,6 +798,7 @@ class ARMOperand : public MCParsedAsmOperand {
struct CoprocOptionOp CoprocOption;
struct MBOptOp MBOpt;
struct ISBOptOp ISBOpt;
+ struct TSBOptOp TSBOpt;
struct ITMaskOp ITMask;
struct IFlagsOp IFlags;
struct MMaskOp MMask;
@@ -879,6 +888,11 @@ public:
return ISBOpt.Val;
}
+ ARM_TSB::TraceSyncBOpt getTraceSyncBarrierOpt() const {
+ assert(Kind == k_TraceSyncBarrierOpt && "Invalid access!");
+ return TSBOpt.Val;
+ }
+
ARM_PROC::IFlags getProcIFlags() const {
assert(Kind == k_ProcIFlags && "Invalid access!");
return IFlags.Val;
@@ -1028,7 +1042,12 @@ public:
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
- int64_t Value = -CE->getValue();
+ // isImm0_4095Neg is used with 32-bit immediates only.
+ // 32-bit immediates are zero extended to 64-bit when parsed,
+ // thus simple -CE->getValue() results in a big negative number,
+ // not a small positive number as intended
+ if ((CE->getValue() >> 32) > 0) return false;
+ uint32_t Value = -static_cast<uint32_t>(CE->getValue());
return Value > 0 && Value < 4096;
}
@@ -1150,10 +1169,31 @@ public:
bool isToken() const override { return Kind == k_Token; }
bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
- bool isMem() const override { return Kind == k_Memory; }
+ bool isTraceSyncBarrierOpt() const { return Kind == k_TraceSyncBarrierOpt; }
+ bool isMem() const override {
+ if (Kind != k_Memory)
+ return false;
+ if (Memory.BaseRegNum &&
+ !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum))
+ return false;
+ if (Memory.OffsetRegNum &&
+ !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.OffsetRegNum))
+ return false;
+ return true;
+ }
bool isShifterImm() const { return Kind == k_ShifterImmediate; }
- bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
- bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
+ bool isRegShiftedReg() const {
+ return Kind == k_ShiftedRegister &&
+ ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
+ RegShiftedReg.SrcReg) &&
+ ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
+ RegShiftedReg.ShiftReg);
+ }
+ bool isRegShiftedImm() const {
+ return Kind == k_ShiftedImmediate &&
+ ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
+ RegShiftedImm.SrcReg);
+ }
bool isRotImm() const { return Kind == k_RotateImmediate; }
bool isModImm() const { return Kind == k_ModifiedImmediate; }
@@ -1192,9 +1232,12 @@ public:
bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; }
bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
- bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
+ bool isPostIdxRegShifted() const {
+ return Kind == k_PostIndexRegister &&
+ ARMMCRegisterClasses[ARM::GPRRegClassID].contains(PostIdxReg.RegNum);
+ }
bool isPostIdxReg() const {
- return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
+ return isPostIdxRegShifted() && PostIdxReg.ShiftTy == ARM_AM::no_shift;
}
bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
if (!isMem())
@@ -1331,10 +1374,10 @@ public:
}
bool isAM3Offset() const {
- if (Kind != k_Immediate && Kind != k_PostIndexRegister)
+ if (isPostIdxReg())
+ return true;
+ if (!isImm())
return false;
- if (Kind == k_PostIndexRegister)
- return PostIdxReg.ShiftTy == ARM_AM::no_shift;
// Immediate offset in range [-255, 255].
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
@@ -1834,7 +1877,22 @@ public:
return ARM_AM::isNEONi32splat(~Value);
}
- bool isNEONByteReplicate(unsigned NumBytes) const {
+ static bool isValidNEONi32vmovImm(int64_t Value) {
+ // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
+ // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
+ return ((Value & 0xffffffffffffff00) == 0) ||
+ ((Value & 0xffffffffffff00ff) == 0) ||
+ ((Value & 0xffffffffff00ffff) == 0) ||
+ ((Value & 0xffffffff00ffffff) == 0) ||
+ ((Value & 0xffffffffffff00ff) == 0xff) ||
+ ((Value & 0xffffffffff00ffff) == 0xffff);
+ }
+
+ bool isNEONReplicate(unsigned Width, unsigned NumElems, bool Inv) const {
+ assert((Width == 8 || Width == 16 || Width == 32) &&
+ "Invalid element width");
+ assert(NumElems * Width <= 64 && "Invalid result width");
+
if (!isImm())
return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
@@ -1844,18 +1902,49 @@ public:
int64_t Value = CE->getValue();
if (!Value)
return false; // Don't bother with zero.
+ if (Inv)
+ Value = ~Value;
- unsigned char B = Value & 0xff;
- for (unsigned i = 1; i < NumBytes; ++i) {
- Value >>= 8;
- if ((Value & 0xff) != B)
+ uint64_t Mask = (1ull << Width) - 1;
+ uint64_t Elem = Value & Mask;
+ if (Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0)
+ return false;
+ if (Width == 32 && !isValidNEONi32vmovImm(Elem))
+ return false;
+
+ for (unsigned i = 1; i < NumElems; ++i) {
+ Value >>= Width;
+ if ((Value & Mask) != Elem)
return false;
}
return true;
}
- bool isNEONi16ByteReplicate() const { return isNEONByteReplicate(2); }
- bool isNEONi32ByteReplicate() const { return isNEONByteReplicate(4); }
+ bool isNEONByteReplicate(unsigned NumBytes) const {
+ return isNEONReplicate(8, NumBytes, false);
+ }
+
+ static void checkNeonReplicateArgs(unsigned FromW, unsigned ToW) {
+ assert((FromW == 8 || FromW == 16 || FromW == 32) &&
+ "Invalid source width");
+ assert((ToW == 16 || ToW == 32 || ToW == 64) &&
+ "Invalid destination width");
+ assert(FromW < ToW && "ToW is not less than FromW");
+ }
+
+ template<unsigned FromW, unsigned ToW>
+ bool isNEONmovReplicate() const {
+ checkNeonReplicateArgs(FromW, ToW);
+ if (ToW == 64 && isNEONi64splat())
+ return false;
+ return isNEONReplicate(FromW, ToW / FromW, false);
+ }
+
+ template<unsigned FromW, unsigned ToW>
+ bool isNEONinvReplicate() const {
+ checkNeonReplicateArgs(FromW, ToW);
+ return isNEONReplicate(FromW, ToW / FromW, true);
+ }
bool isNEONi32vmov() const {
if (isNEONByteReplicate(4))
@@ -1866,16 +1955,7 @@ public:
// Must be a constant.
if (!CE)
return false;
- int64_t Value = CE->getValue();
- // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
- // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
- // FIXME: This is probably wrong and a copy and paste from previous example
- return (Value >= 0 && Value < 256) ||
- (Value >= 0x0100 && Value <= 0xff00) ||
- (Value >= 0x010000 && Value <= 0xff0000) ||
- (Value >= 0x01000000 && Value <= 0xff000000) ||
- (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
- (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
+ return isValidNEONi32vmovImm(CE->getValue());
}
bool isNEONi32vmovNeg() const {
@@ -1883,16 +1963,7 @@ public:
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
// Must be a constant.
if (!CE) return false;
- int64_t Value = ~CE->getValue();
- // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
- // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
- // FIXME: This is probably wrong and a copy and paste from previous example
- return (Value >= 0 && Value < 256) ||
- (Value >= 0x0100 && Value <= 0xff00) ||
- (Value >= 0x010000 && Value <= 0xff0000) ||
- (Value >= 0x01000000 && Value <= 0xff000000) ||
- (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
- (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
+ return isValidNEONi32vmovImm(~CE->getValue());
}
bool isNEONi64splat() const {
@@ -2189,7 +2260,7 @@ public:
// The operand is actually an imm0_4095, but we have its
// negation in the assembly source, so twiddle it here.
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- Inst.addOperand(MCOperand::createImm(-CE->getValue()));
+ Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
}
void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
@@ -2234,6 +2305,11 @@ public:
Inst.addOperand(MCOperand::createImm(unsigned(getInstSyncBarrierOpt())));
}
+ void addTraceSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::createImm(unsigned(getTraceSyncBarrierOpt())));
+ }
+
void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
@@ -2710,62 +2786,87 @@ public:
Inst.addOperand(MCOperand::createImm(Value));
}
- void addNEONinvByteReplicateOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
+ void addNEONi8ReplicateOperands(MCInst &Inst, bool Inv) const {
// The immediate encodes the type of constant as well as the value.
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- unsigned Value = CE->getValue();
assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
Inst.getOpcode() == ARM::VMOVv16i8) &&
- "All vmvn instructions that wants to replicate non-zero byte "
- "always must be replaced with VMOVv8i8 or VMOVv16i8.");
- unsigned B = ((~Value) & 0xff);
+ "All instructions that wants to replicate non-zero byte "
+ "always must be replaced with VMOVv8i8 or VMOVv16i8.");
+ unsigned Value = CE->getValue();
+ if (Inv)
+ Value = ~Value;
+ unsigned B = Value & 0xff;
B |= 0xe00; // cmode = 0b1110
Inst.addOperand(MCOperand::createImm(B));
}
- void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
+ void addNEONinvi8ReplicateOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- // The immediate encodes the type of constant as well as the value.
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- unsigned Value = CE->getValue();
+ addNEONi8ReplicateOperands(Inst, true);
+ }
+
+ static unsigned encodeNeonVMOVImmediate(unsigned Value) {
if (Value >= 256 && Value <= 0xffff)
Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
else if (Value > 0xffff && Value <= 0xffffff)
Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
else if (Value > 0xffffff)
Value = (Value >> 24) | 0x600;
- Inst.addOperand(MCOperand::createImm(Value));
+ return Value;
}
- void addNEONvmovByteReplicateOperands(MCInst &Inst, unsigned N) const {
+ void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The immediate encodes the type of constant as well as the value.
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- unsigned Value = CE->getValue();
- assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
- Inst.getOpcode() == ARM::VMOVv16i8) &&
- "All instructions that wants to replicate non-zero byte "
- "always must be replaced with VMOVv8i8 or VMOVv16i8.");
- unsigned B = Value & 0xff;
- B |= 0xe00; // cmode = 0b1110
- Inst.addOperand(MCOperand::createImm(B));
+ unsigned Value = encodeNeonVMOVImmediate(CE->getValue());
+ Inst.addOperand(MCOperand::createImm(Value));
+ }
+
+ void addNEONvmovi8ReplicateOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ addNEONi8ReplicateOperands(Inst, false);
+ }
+
+ void addNEONvmovi16ReplicateOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ assert((Inst.getOpcode() == ARM::VMOVv4i16 ||
+ Inst.getOpcode() == ARM::VMOVv8i16 ||
+ Inst.getOpcode() == ARM::VMVNv4i16 ||
+ Inst.getOpcode() == ARM::VMVNv8i16) &&
+ "All instructions that want to replicate non-zero half-word "
+ "always must be replaced with V{MOV,MVN}v{4,8}i16.");
+ uint64_t Value = CE->getValue();
+ unsigned Elem = Value & 0xffff;
+ if (Elem >= 256)
+ Elem = (Elem >> 8) | 0x200;
+ Inst.addOperand(MCOperand::createImm(Elem));
}
void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The immediate encodes the type of constant as well as the value.
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- unsigned Value = ~CE->getValue();
- if (Value >= 256 && Value <= 0xffff)
- Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
- else if (Value > 0xffff && Value <= 0xffffff)
- Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
- else if (Value > 0xffffff)
- Value = (Value >> 24) | 0x600;
+ unsigned Value = encodeNeonVMOVImmediate(~CE->getValue());
Inst.addOperand(MCOperand::createImm(Value));
}
+ void addNEONvmovi32ReplicateOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ assert((Inst.getOpcode() == ARM::VMOVv2i32 ||
+ Inst.getOpcode() == ARM::VMOVv4i32 ||
+ Inst.getOpcode() == ARM::VMVNv2i32 ||
+ Inst.getOpcode() == ARM::VMVNv4i32) &&
+ "All instructions that want to replicate non-zero word "
+ "always must be replaced with V{MOV,MVN}v{2,4}i32.");
+ uint64_t Value = CE->getValue();
+ unsigned Elem = encodeNeonVMOVImmediate(Value & 0xffffffff);
+ Inst.addOperand(MCOperand::createImm(Elem));
+ }
+
void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The immediate encodes the type of constant as well as the value.
@@ -3064,6 +3165,15 @@ public:
return Op;
}
+ static std::unique_ptr<ARMOperand>
+ CreateTraceSyncBarrierOpt(ARM_TSB::TraceSyncBOpt Opt, SMLoc S) {
+ auto Op = make_unique<ARMOperand>(k_TraceSyncBarrierOpt);
+ Op->TSBOpt.Val = Opt;
+ Op->StartLoc = S;
+ Op->EndLoc = S;
+ return Op;
+ }
+
static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags,
SMLoc S) {
auto Op = make_unique<ARMOperand>(k_ProcIFlags);
@@ -3133,6 +3243,9 @@ void ARMOperand::print(raw_ostream &OS) const {
case k_InstSyncBarrierOpt:
OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
break;
+ case k_TraceSyncBarrierOpt:
+ OS << "<ARM_TSB::" << TraceSyncBOptToString(getTraceSyncBarrierOpt()) << ">";
+ break;
case k_Memory:
OS << "<memory "
<< " base:" << Memory.BaseRegNum;
@@ -4122,6 +4235,24 @@ ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) {
return MatchOperand_Success;
}
+OperandMatchResultTy
+ARMAsmParser::parseTraceSyncBarrierOptOperand(OperandVector &Operands) {
+ MCAsmParser &Parser = getParser();
+ SMLoc S = Parser.getTok().getLoc();
+ const AsmToken &Tok = Parser.getTok();
+
+ if (Tok.isNot(AsmToken::Identifier))
+ return MatchOperand_NoMatch;
+
+ if (!Tok.getString().equals_lower("csync"))
+ return MatchOperand_NoMatch;
+
+ Parser.Lex(); // Eat identifier token.
+
+ Operands.push_back(ARMOperand::CreateTraceSyncBarrierOpt(ARM_TSB::CSYNC, S));
+ return MatchOperand_Success;
+}
+
/// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
OperandMatchResultTy
ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) {
@@ -4215,6 +4346,18 @@ ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
SMLoc S = Parser.getTok().getLoc();
const AsmToken &Tok = Parser.getTok();
+
+ if (Tok.is(AsmToken::Integer)) {
+ int64_t Val = Tok.getIntVal();
+ if (Val > 255 || Val < 0) {
+ return MatchOperand_NoMatch;
+ }
+ unsigned SYSmvalue = Val & 0xFF;
+ Parser.Lex();
+ Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
+ return MatchOperand_Success;
+ }
+
if (!Tok.is(AsmToken::Identifier))
return MatchOperand_NoMatch;
StringRef Mask = Tok.getString();
@@ -5450,7 +5593,7 @@ bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
return false;
}
-/// \brief Given a mnemonic, split out possible predication code and carry
+/// Given a mnemonic, split out possible predication code and carry
/// setting letters to form a canonical mnemonic and flags.
//
// FIXME: Would be nice to autogen this.
@@ -5541,7 +5684,7 @@ StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
return Mnemonic;
}
-/// \brief Given a canonical mnemonic, determine if the instruction ever allows
+/// Given a canonical mnemonic, determine if the instruction ever allows
/// inclusion of carry set or predication code operands.
//
// FIXME: It would be nice to autogen this.
@@ -5585,6 +5728,7 @@ void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
Mnemonic != "isb" && Mnemonic != "pld" && Mnemonic != "pli" &&
Mnemonic != "pldw" && Mnemonic != "ldc2" && Mnemonic != "ldc2l" &&
Mnemonic != "stc2" && Mnemonic != "stc2l" &&
+ Mnemonic != "tsb" &&
!Mnemonic.startswith("rfe") && !Mnemonic.startswith("srs");
} else if (isThumbOne()) {
if (hasV6MOps())
@@ -5595,7 +5739,7 @@ void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
CanAcceptPredicationCode = true;
}
-// \brief Some Thumb instructions have two operand forms that are not
+// Some Thumb instructions have two operand forms that are not
// available as three operand, convert to two operand form if possible.
//
// FIXME: We would really like to be able to tablegen'erate this.
@@ -6214,6 +6358,65 @@ bool ARMAsmParser::validatetSTMRegList(const MCInst &Inst,
return false;
}
+bool ARMAsmParser::validateLDRDSTRD(MCInst &Inst,
+ const OperandVector &Operands,
+ bool Load, bool ARMMode, bool Writeback) {
+ unsigned RtIndex = Load || !Writeback ? 0 : 1;
+ unsigned Rt = MRI->getEncodingValue(Inst.getOperand(RtIndex).getReg());
+ unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(RtIndex + 1).getReg());
+
+ if (ARMMode) {
+ // Rt can't be R14.
+ if (Rt == 14)
+ return Error(Operands[3]->getStartLoc(),
+ "Rt can't be R14");
+
+ // Rt must be even-numbered.
+ if ((Rt & 1) == 1)
+ return Error(Operands[3]->getStartLoc(),
+ "Rt must be even-numbered");
+
+ // Rt2 must be Rt + 1.
+ if (Rt2 != Rt + 1) {
+ if (Load)
+ return Error(Operands[3]->getStartLoc(),
+ "destination operands must be sequential");
+ else
+ return Error(Operands[3]->getStartLoc(),
+ "source operands must be sequential");
+ }
+
+ // FIXME: Diagnose m == 15
+ // FIXME: Diagnose ldrd with m == t || m == t2.
+ }
+
+ if (!ARMMode && Load) {
+ if (Rt2 == Rt)
+ return Error(Operands[3]->getStartLoc(),
+ "destination operands can't be identical");
+ }
+
+ if (Writeback) {
+ unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg());
+
+ if (Rn == Rt || Rn == Rt2) {
+ if (Load)
+ return Error(Operands[3]->getStartLoc(),
+ "base register needs to be different from destination "
+ "registers");
+ else
+ return Error(Operands[3]->getStartLoc(),
+ "source register and base register can't be identical");
+ }
+
+ // FIXME: Diagnose ldrd/strd with writeback and n == 15.
+ // (Except the immediate form of ldrd?)
+ }
+
+ return false;
+}
+
+
// FIXME: We would really like to be able to tablegen'erate this.
bool ARMAsmParser::validateInstruction(MCInst &Inst,
const OperandVector &Operands) {
@@ -6227,7 +6430,8 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
// The instruction must be predicable.
if (!MCID.isPredicable())
return Error(Loc, "instructions in IT block must be predicable");
- unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
+ ARMCC::CondCodes Cond = ARMCC::CondCodes(
+ Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm());
if (Cond != currentITCond()) {
// Find the condition code Operand to get its SMLoc information.
SMLoc CondLoc;
@@ -6235,9 +6439,9 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
if (static_cast<ARMOperand &>(*Operands[I]).isCondCode())
CondLoc = Operands[I]->getStartLoc();
return Error(CondLoc, "incorrect condition in IT block; got '" +
- StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
- "', but expected '" +
- ARMCondCodeToString(ARMCC::CondCodes(currentITCond())) + "'");
+ StringRef(ARMCondCodeToString(Cond)) +
+ "', but expected '" +
+ ARMCondCodeToString(currentITCond()) + "'");
}
// Check for non-'al' condition codes outside of the IT block.
} else if (isThumbTwo() && MCID.isPredicable() &&
@@ -6259,51 +6463,43 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
const unsigned Opcode = Inst.getOpcode();
switch (Opcode) {
+ case ARM::t2IT: {
+ // Encoding is unpredictable if it ever results in a notional 'NV'
+ // predicate. Since we don't parse 'NV' directly this means an 'AL'
+ // predicate with an "else" mask bit.
+ unsigned Cond = Inst.getOperand(0).getImm();
+ unsigned Mask = Inst.getOperand(1).getImm();
+
+ // Mask hasn't been modified to the IT instruction encoding yet so
+ // conditions only allowing a 't' are a block of 1s starting at bit 3
+ // followed by all 0s. Easiest way is to just list the 4 possibilities.
+ if (Cond == ARMCC::AL && Mask != 8 && Mask != 12 && Mask != 14 &&
+ Mask != 15)
+ return Error(Loc, "unpredictable IT predicate sequence");
+ break;
+ }
case ARM::LDRD:
+ if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/true,
+ /*Writeback*/false))
+ return true;
+ break;
case ARM::LDRD_PRE:
- case ARM::LDRD_POST: {
- const unsigned RtReg = Inst.getOperand(0).getReg();
-
- // Rt can't be R14.
- if (RtReg == ARM::LR)
- return Error(Operands[3]->getStartLoc(),
- "Rt can't be R14");
-
- const unsigned Rt = MRI->getEncodingValue(RtReg);
- // Rt must be even-numbered.
- if ((Rt & 1) == 1)
- return Error(Operands[3]->getStartLoc(),
- "Rt must be even-numbered");
-
- // Rt2 must be Rt + 1.
- const unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
- if (Rt2 != Rt + 1)
- return Error(Operands[3]->getStartLoc(),
- "destination operands must be sequential");
-
- if (Opcode == ARM::LDRD_PRE || Opcode == ARM::LDRD_POST) {
- const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg());
- // For addressing modes with writeback, the base register needs to be
- // different from the destination registers.
- if (Rn == Rt || Rn == Rt2)
- return Error(Operands[3]->getStartLoc(),
- "base register needs to be different from destination "
- "registers");
- }
-
- return false;
- }
+ case ARM::LDRD_POST:
+ if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/true,
+ /*Writeback*/true))
+ return true;
+ break;
case ARM::t2LDRDi8:
+ if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/false,
+ /*Writeback*/false))
+ return true;
+ break;
case ARM::t2LDRD_PRE:
- case ARM::t2LDRD_POST: {
- // Rt2 must be different from Rt.
- unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
- unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
- if (Rt2 == Rt)
- return Error(Operands[3]->getStartLoc(),
- "destination operands can't be identical");
- return false;
- }
+ case ARM::t2LDRD_POST:
+ if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/false,
+ /*Writeback*/true))
+ return true;
+ break;
case ARM::t2BXJ: {
const unsigned RmReg = Inst.getOperand(0).getReg();
// Rm = SP is no longer unpredictable in v8-A
@@ -6312,35 +6508,39 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
"r13 (SP) is an unpredictable operand to BXJ");
return false;
}
- case ARM::STRD: {
- // Rt2 must be Rt + 1.
- unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
- unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
- if (Rt2 != Rt + 1)
- return Error(Operands[3]->getStartLoc(),
- "source operands must be sequential");
- return false;
- }
+ case ARM::STRD:
+ if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/true,
+ /*Writeback*/false))
+ return true;
+ break;
case ARM::STRD_PRE:
- case ARM::STRD_POST: {
- // Rt2 must be Rt + 1.
- unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
- unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(2).getReg());
- if (Rt2 != Rt + 1)
- return Error(Operands[3]->getStartLoc(),
- "source operands must be sequential");
- return false;
- }
+ case ARM::STRD_POST:
+ if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/true,
+ /*Writeback*/true))
+ return true;
+ break;
+ case ARM::t2STRD_PRE:
+ case ARM::t2STRD_POST:
+ if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/false,
+ /*Writeback*/true))
+ return true;
+ break;
case ARM::STR_PRE_IMM:
case ARM::STR_PRE_REG:
+ case ARM::t2STR_PRE:
case ARM::STR_POST_IMM:
case ARM::STR_POST_REG:
+ case ARM::t2STR_POST:
case ARM::STRH_PRE:
+ case ARM::t2STRH_PRE:
case ARM::STRH_POST:
+ case ARM::t2STRH_POST:
case ARM::STRB_PRE_IMM:
case ARM::STRB_PRE_REG:
+ case ARM::t2STRB_PRE:
case ARM::STRB_POST_IMM:
- case ARM::STRB_POST_REG: {
+ case ARM::STRB_POST_REG:
+ case ARM::t2STRB_POST: {
// Rt must be different from Rn.
const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
@@ -6352,18 +6552,28 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
}
case ARM::LDR_PRE_IMM:
case ARM::LDR_PRE_REG:
+ case ARM::t2LDR_PRE:
case ARM::LDR_POST_IMM:
case ARM::LDR_POST_REG:
+ case ARM::t2LDR_POST:
case ARM::LDRH_PRE:
+ case ARM::t2LDRH_PRE:
case ARM::LDRH_POST:
+ case ARM::t2LDRH_POST:
case ARM::LDRSH_PRE:
+ case ARM::t2LDRSH_PRE:
case ARM::LDRSH_POST:
+ case ARM::t2LDRSH_POST:
case ARM::LDRB_PRE_IMM:
case ARM::LDRB_PRE_REG:
+ case ARM::t2LDRB_PRE:
case ARM::LDRB_POST_IMM:
case ARM::LDRB_POST_REG:
+ case ARM::t2LDRB_POST:
case ARM::LDRSB_PRE:
- case ARM::LDRSB_POST: {
+ case ARM::t2LDRSB_PRE:
+ case ARM::LDRSB_POST:
+ case ARM::t2LDRSB_POST: {
// Rt must be different from Rn.
const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
@@ -6374,7 +6584,9 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
return false;
}
case ARM::SBFX:
- case ARM::UBFX: {
+ case ARM::t2SBFX:
+ case ARM::UBFX:
+ case ARM::t2UBFX: {
// Width must be in range [1, 32-lsb].
unsigned LSB = Inst.getOperand(2).getImm();
unsigned Widthm1 = Inst.getOperand(3).getImm();
@@ -6592,19 +6804,40 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
break;
}
case ARM::HINT:
- case ARM::t2HINT:
- if (hasRAS()) {
- // ESB is not predicable (pred must be AL)
- unsigned Imm8 = Inst.getOperand(0).getImm();
- unsigned Pred = Inst.getOperand(1).getImm();
- if (Imm8 == 0x10 && Pred != ARMCC::AL)
- return Error(Operands[1]->getStartLoc(), "instruction 'esb' is not "
- "predicable, but condition "
- "code specified");
- }
- // Without the RAS extension, this behaves as any other unallocated hint.
+ case ARM::t2HINT: {
+ unsigned Imm8 = Inst.getOperand(0).getImm();
+ unsigned Pred = Inst.getOperand(1).getImm();
+ // ESB is not predicable (pred must be AL). Without the RAS extension, this
+ // behaves as any other unallocated hint.
+ if (Imm8 == 0x10 && Pred != ARMCC::AL && hasRAS())
+ return Error(Operands[1]->getStartLoc(), "instruction 'esb' is not "
+ "predicable, but condition "
+ "code specified");
+ if (Imm8 == 0x14 && Pred != ARMCC::AL)
+ return Error(Operands[1]->getStartLoc(), "instruction 'csdb' is not "
+ "predicable, but condition "
+ "code specified");
+ break;
+ }
+ case ARM::VMOVRRS: {
+ // Source registers must be sequential.
+ const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(2).getReg());
+ const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(3).getReg());
+ if (Sm1 != Sm + 1)
+ return Error(Operands[5]->getStartLoc(),
+ "source operands must be sequential");
break;
}
+ case ARM::VMOVSRR: {
+ // Destination registers must be sequential.
+ const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(0).getReg());
+ const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
+ if (Sm1 != Sm + 1)
+ return Error(Operands[3]->getStartLoc(),
+ "destination operands must be sequential");
+ break;
+ }
+ }
return false;
}
@@ -10173,10 +10406,11 @@ ARMAsmParser::FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
Message.Message = "too many operands for instruction";
} else {
Message.Message = "invalid operand for instruction";
- DEBUG(dbgs() << "Missing diagnostic string for operand class " <<
- getMatchClassName((MatchClassKind)I.getOperandClass())
- << I.getOperandClass() << ", error " << I.getOperandError()
- << ", opcode " << MII.getName(I.getOpcode()) << "\n");
+ LLVM_DEBUG(
+ dbgs() << "Missing diagnostic string for operand class "
+ << getMatchClassName((MatchClassKind)I.getOperandClass())
+ << I.getOperandClass() << ", error " << I.getOperandError()
+ << ", opcode " << MII.getName(I.getOpcode()) << "\n");
}
NearMissesOut.emplace_back(Message);
break;
@@ -10203,6 +10437,8 @@ ARMAsmParser::FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
if (!isThumb() && (MissingFeatures & Feature_IsThumb2) &&
(MissingFeatures & ~(Feature_IsThumb2 | Feature_IsThumb)))
break;
+ if (isMClass() && (MissingFeatures & Feature_HasNEON))
+ break;
NearMissMessage Message;
Message.Loc = IDLoc;