aboutsummaryrefslogtreecommitdiff
path: root/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/ARM/AsmParser/ARMAsmParser.cpp')
-rw-r--r--lib/Target/ARM/AsmParser/ARMAsmParser.cpp1739
1 files changed, 1465 insertions, 274 deletions
diff --git a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
index 3832b0112b87..1da9452f1d22 100644
--- a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
@@ -1,19 +1,20 @@
//===- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions -------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "ARMFeatures.h"
-#include "InstPrinter/ARMInstPrinter.h"
+#include "ARMBaseInstrInfo.h"
#include "Utils/ARMBaseInfo.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "MCTargetDesc/ARMBaseInfo.h"
+#include "MCTargetDesc/ARMInstPrinter.h"
#include "MCTargetDesc/ARMMCExpr.h"
#include "MCTargetDesc/ARMMCTargetDesc.h"
+#include "TargetInfo/ARMTargetInfo.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/None.h"
@@ -69,6 +70,10 @@
using namespace llvm;
+namespace llvm {
+extern const MCInstrDesc ARMInsts[];
+} // end namespace llvm
+
namespace {
enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly };
@@ -90,6 +95,16 @@ static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes",
enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
+static inline unsigned extractITMaskBit(unsigned Mask, unsigned Position) {
+ // Position==0 means we're not in an IT block at all. Position==1
+ // means we want the first state bit, which is always 0 (Then).
+ // Position==2 means we want the second state bit, stored at bit 3
+ // of Mask, and so on downwards. So (5 - Position) will shift the
+ // right bit down to bit 0, including the always-0 bit at bit 4 for
+ // the mandatory initial Then.
+ return (Mask >> (5 - Position) & 1);
+}
+
class UnwindContext {
using Locs = SmallVector<SMLoc, 4>;
@@ -165,6 +180,7 @@ public:
}
};
+
class ARMAsmParser : public MCTargetAsmParser {
const MCRegisterInfo *MRI;
UnwindContext UC;
@@ -225,11 +241,10 @@ class ARMAsmParser : public MCTargetAsmParser {
}
// Emit the IT instruction
- unsigned Mask = getITMaskEncoding();
MCInst ITInst;
ITInst.setOpcode(ARM::t2IT);
ITInst.addOperand(MCOperand::createImm(ITState.Cond));
- ITInst.addOperand(MCOperand::createImm(Mask));
+ ITInst.addOperand(MCOperand::createImm(ITState.Mask));
Out.EmitInstruction(ITInst, getSTI());
// Emit the conditonal instructions
@@ -287,27 +302,10 @@ class ARMAsmParser : public MCTargetAsmParser {
return MRI->getSubReg(QReg, ARM::dsub_0);
}
- // Get the encoding of the IT mask, as it will appear in an IT instruction.
- unsigned getITMaskEncoding() {
- assert(inITBlock());
- unsigned Mask = ITState.Mask;
- unsigned TZ = countTrailingZeros(Mask);
- if ((ITState.Cond & 1) == 0) {
- assert(Mask && TZ <= 3 && "illegal IT mask value!");
- Mask ^= (0xE << TZ) & 0xF;
- }
- return Mask;
- }
-
// Get the condition code corresponding to the current IT block slot.
ARMCC::CondCodes currentITCond() {
- unsigned MaskBit;
- if (ITState.CurPosition == 1)
- MaskBit = 1;
- else
- MaskBit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
-
- return MaskBit ? ITState.Cond : ARMCC::getOppositeCondition(ITState.Cond);
+ unsigned MaskBit = extractITMaskBit(ITState.Mask, ITState.CurPosition);
+ return MaskBit ? ARMCC::getOppositeCondition(ITState.Cond) : ITState.Cond;
}
// Invert the condition of the current IT block slot without changing any
@@ -337,7 +335,7 @@ class ARMAsmParser : public MCTargetAsmParser {
// Keep any existing condition bits.
NewMask |= ITState.Mask & (0xE << TZ);
// Insert the new condition bit.
- NewMask |= (Cond == ITState.Cond) << TZ;
+ NewMask |= (Cond != ITState.Cond) << TZ;
// Move the trailing 1 down one bit.
NewMask |= 1 << (TZ - 1);
ITState.Mask = NewMask;
@@ -352,9 +350,10 @@ class ARMAsmParser : public MCTargetAsmParser {
ITState.IsExplicit = false;
}
- // Create a new explicit IT block with the given condition and mask. The mask
- // should be in the parsed format, with a 1 implying 't', regardless of the
- // low bit of the condition.
+ // Create a new explicit IT block with the given condition and mask.
+ // The mask should be in the format used in ARMOperand and
+ // MCOperand, with a 1 implying 'e', regardless of the low bit of
+ // the condition.
void startExplicitITBlock(ARMCC::CondCodes Cond, unsigned Mask) {
assert(!inITBlock());
ITState.Cond = Cond;
@@ -363,6 +362,18 @@ class ARMAsmParser : public MCTargetAsmParser {
ITState.IsExplicit = true;
}
+ struct {
+ unsigned Mask : 4;
+ unsigned CurPosition;
+ } VPTState;
+ bool inVPTBlock() { return VPTState.CurPosition != ~0U; }
+ void forwardVPTPosition() {
+ if (!inVPTBlock()) return;
+ unsigned TZ = countTrailingZeros(VPTState.Mask);
+ if (++VPTState.CurPosition == 5 - TZ)
+ VPTState.CurPosition = ~0U;
+ }
+
void Note(SMLoc L, const Twine &Msg, SMRange Range = None) {
return getParser().Note(L, Msg, Range);
}
@@ -383,7 +394,7 @@ class ARMAsmParser : public MCTargetAsmParser {
int tryParseRegister();
bool tryParseRegisterWithWriteBack(OperandVector &);
int tryParseShiftRegister(OperandVector &);
- bool parseRegisterList(OperandVector &);
+ bool parseRegisterList(OperandVector &, bool EnforceOrder = true);
bool parseMemory(OperandVector &);
bool parseOperand(OperandVector &, StringRef Mnemonic);
bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
@@ -421,12 +432,15 @@ class ARMAsmParser : public MCTargetAsmParser {
bool parseDirectiveAlign(SMLoc L);
bool parseDirectiveThumbSet(SMLoc L);
- StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
- bool &CarrySetting, unsigned &ProcessorIMod,
- StringRef &ITMask);
- void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
- bool &CanAcceptCarrySet,
- bool &CanAcceptPredicationCode);
+ bool isMnemonicVPTPredicable(StringRef Mnemonic, StringRef ExtraToken);
+ StringRef splitMnemonic(StringRef Mnemonic, StringRef ExtraToken,
+ unsigned &PredicationCode,
+ unsigned &VPTPredicationCode, bool &CarrySetting,
+ unsigned &ProcessorIMod, StringRef &ITMask);
+ void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef ExtraToken,
+ StringRef FullInst, bool &CanAcceptCarrySet,
+ bool &CanAcceptPredicationCode,
+ bool &CanAcceptVPTPredicationCode);
void tryConvertingToTwoOperandForm(StringRef Mnemonic, bool CarrySetting,
OperandVector &Operands);
@@ -478,7 +492,15 @@ class ARMAsmParser : public MCTargetAsmParser {
bool hasV8MMainline() const {
return getSTI().getFeatureBits()[ARM::HasV8MMainlineOps];
}
-
+ bool hasV8_1MMainline() const {
+ return getSTI().getFeatureBits()[ARM::HasV8_1MMainlineOps];
+ }
+ bool hasMVE() const {
+ return getSTI().getFeatureBits()[ARM::HasMVEIntegerOps];
+ }
+ bool hasMVEFloat() const {
+ return getSTI().getFeatureBits()[ARM::HasMVEFloatOps];
+ }
bool has8MSecExt() const {
return getSTI().getFeatureBits()[ARM::Feature8MSecExt];
}
@@ -491,8 +513,8 @@ class ARMAsmParser : public MCTargetAsmParser {
return getSTI().getFeatureBits()[ARM::FeatureDSP];
}
- bool hasD16() const {
- return getSTI().getFeatureBits()[ARM::FeatureD16];
+ bool hasD32() const {
+ return getSTI().getFeatureBits()[ARM::FeatureD32];
}
bool hasV8_1aOps() const {
@@ -505,7 +527,7 @@ class ARMAsmParser : public MCTargetAsmParser {
void SwitchMode() {
MCSubtargetInfo &STI = copySTI();
- uint64_t FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
+ auto FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
setAvailableFeatures(FB);
}
@@ -556,11 +578,13 @@ class ARMAsmParser : public MCTargetAsmParser {
// Asm Match Converter Methods
void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
void cvtThumbBranches(MCInst &Inst, const OperandVector &);
+ void cvtMVEVMOVQtoDReg(MCInst &Inst, const OperandVector &);
bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out);
bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands);
bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
+ bool shouldOmitVectorPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
bool isITBlockTerminator(MCInst &Inst) const;
void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands);
bool validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands,
@@ -597,6 +621,8 @@ public:
// Not in an ITBlock to start with.
ITState.CurPosition = ~0U;
+ VPTState.CurPosition = ~0U;
+
NextSymbolIsThumb = false;
}
@@ -642,6 +668,7 @@ public:
class ARMOperand : public MCParsedAsmOperand {
enum KindTy {
k_CondCode,
+ k_VPTPred,
k_CCOut,
k_ITCondMask,
k_CoprocNum,
@@ -659,8 +686,11 @@ class ARMOperand : public MCParsedAsmOperand {
k_VectorIndex,
k_Register,
k_RegisterList,
+ k_RegisterListWithAPSR,
k_DPRRegisterList,
k_SPRRegisterList,
+ k_FPSRegisterListWithVPR,
+ k_FPDRegisterListWithVPR,
k_VectorList,
k_VectorListAllLanes,
k_VectorListIndexed,
@@ -681,6 +711,10 @@ class ARMOperand : public MCParsedAsmOperand {
ARMCC::CondCodes Val;
};
+ struct VCCOp {
+ ARMVCC::VPTCodes Val;
+ };
+
struct CopOp {
unsigned Val;
};
@@ -797,6 +831,7 @@ class ARMOperand : public MCParsedAsmOperand {
union {
struct CCOp CC;
+ struct VCCOp VCC;
struct CopOp Cop;
struct CoprocOptionOp CoprocOption;
struct MBOptOp MBOpt;
@@ -845,6 +880,11 @@ public:
return CC.Val;
}
+ ARMVCC::VPTCodes getVPTPred() const {
+ assert(isVPTPred() && "Invalid access!");
+ return VCC.Val;
+ }
+
unsigned getCoproc() const {
assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
return Cop.Val;
@@ -861,8 +901,11 @@ public:
}
const SmallVectorImpl<unsigned> &getRegList() const {
- assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
- Kind == k_SPRRegisterList) && "Invalid access!");
+ assert((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||
+ Kind == k_DPRRegisterList || Kind == k_SPRRegisterList ||
+ Kind == k_FPSRegisterListWithVPR ||
+ Kind == k_FPDRegisterListWithVPR) &&
+ "Invalid access!");
return Registers;
}
@@ -915,6 +958,7 @@ public:
bool isCoprocReg() const { return Kind == k_CoprocReg; }
bool isCoprocOption() const { return Kind == k_CoprocOption; }
bool isCondCode() const { return Kind == k_CondCode; }
+ bool isVPTPred() const { return Kind == k_VPTPred; }
bool isCCOut() const { return Kind == k_CCOut; }
bool isITMask() const { return Kind == k_ITCondMask; }
bool isITCondCode() const { return Kind == k_CondCode; }
@@ -970,6 +1014,18 @@ public:
return false;
}
+ // checks whether this operand is an offset suitable for the LE /
+ // LETP instructions in Arm v8.1M
+ bool isLEOffset() const {
+ if (!isImm()) return false;
+ if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
+ if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
+ int64_t Val = CE->getValue();
+ return Val < 0 && Val >= -4094 && (Val & 1) == 0;
+ }
+ return false;
+ }
+
// checks whether this operand is a memory operand computed as an offset
// applied to PC. the offset may have 8 bits of magnitude and is represented
// with two bits of shift. textually it may be either [pc, #imm], #imm or
@@ -982,7 +1038,7 @@ public:
if (!CE) return false;
Val = CE->getValue();
}
- else if (isMem()) {
+ else if (isGPRMem()) {
if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
if(Memory.BaseRegNum != ARM::PC) return false;
Val = Memory.OffsetImm->getValue();
@@ -1016,7 +1072,14 @@ public:
int64_t Value = CE->getValue();
return ((Value & 3) == 0) && Value >= N && Value <= M;
}
-
+ template<int64_t N, int64_t M>
+ bool isImmediateS2() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ return ((Value & 1) == 0) && Value >= N && Value <= M;
+ }
bool isFBits16() const {
return isImmediate<0, 17>();
}
@@ -1026,6 +1089,21 @@ public:
bool isImm8s4() const {
return isImmediateS4<-1020, 1020>();
}
+ bool isImm7s4() const {
+ return isImmediateS4<-508, 508>();
+ }
+ bool isImm7Shift0() const {
+ return isImmediate<-127, 127>();
+ }
+ bool isImm7Shift1() const {
+ return isImmediateS2<-255, 255>();
+ }
+ bool isImm7Shift2() const {
+ return isImmediateS4<-511, 511>();
+ }
+ bool isImm7() const {
+ return isImmediate<-127, 127>();
+ }
bool isImm0_1020s4() const {
return isImmediateS4<0, 1020>();
}
@@ -1098,6 +1176,34 @@ public:
return isImmediate<1, 33>();
}
+ template<int shift>
+ bool isExpImmValue(uint64_t Value) const {
+ uint64_t mask = (1 << shift) - 1;
+ if ((Value & mask) != 0 || (Value >> shift) > 0xff)
+ return false;
+ return true;
+ }
+
+ template<int shift>
+ bool isExpImm() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+
+ return isExpImmValue<shift>(CE->getValue());
+ }
+
+ template<int shift, int size>
+ bool isInvertedExpImm() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+
+ uint64_t OriginalValue = CE->getValue();
+ uint64_t InvertedValue = OriginalValue ^ (((uint64_t)1 << size) - 1);
+ return isExpImmValue<shift>(InvertedValue);
+ }
+
bool isPKHLSLImm() const {
return isImmediate<0, 32>();
}
@@ -1167,13 +1273,34 @@ public:
bool isReg() const override { return Kind == k_Register; }
bool isRegList() const { return Kind == k_RegisterList; }
+ bool isRegListWithAPSR() const {
+ return Kind == k_RegisterListWithAPSR || Kind == k_RegisterList;
+ }
bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
+ bool isFPSRegListWithVPR() const { return Kind == k_FPSRegisterListWithVPR; }
+ bool isFPDRegListWithVPR() const { return Kind == k_FPDRegisterListWithVPR; }
bool isToken() const override { return Kind == k_Token; }
bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
bool isTraceSyncBarrierOpt() const { return Kind == k_TraceSyncBarrierOpt; }
bool isMem() const override {
+ return isGPRMem() || isMVEMem();
+ }
+ bool isMVEMem() const {
+ if (Kind != k_Memory)
+ return false;
+ if (Memory.BaseRegNum &&
+ !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum) &&
+ !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Memory.BaseRegNum))
+ return false;
+ if (Memory.OffsetRegNum &&
+ !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
+ Memory.OffsetRegNum))
+ return false;
+ return true;
+ }
+ bool isGPRMem() const {
if (Kind != k_Memory)
return false;
if (Memory.BaseRegNum &&
@@ -1198,6 +1325,16 @@ public:
RegShiftedImm.SrcReg);
}
bool isRotImm() const { return Kind == k_RotateImmediate; }
+
+ template<unsigned Min, unsigned Max>
+ bool isPowerTwoInRange() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ return Value > 0 && countPopulation((uint64_t)Value) == 1 &&
+ Value >= Min && Value <= Max;
+ }
bool isModImm() const { return Kind == k_ModifiedImmediate; }
bool isModImmNot() const {
@@ -1243,14 +1380,50 @@ public:
return isPostIdxRegShifted() && PostIdxReg.ShiftTy == ARM_AM::no_shift;
}
bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
- if (!isMem())
+ if (!isGPRMem())
+ return false;
+ // No offset of any kind.
+ return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
+ (alignOK || Memory.Alignment == Alignment);
+ }
+ bool isMemNoOffsetT2(bool alignOK = false, unsigned Alignment = 0) const {
+ if (!isGPRMem())
+ return false;
+
+ if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
+ Memory.BaseRegNum))
+ return false;
+
+ // No offset of any kind.
+ return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
+ (alignOK || Memory.Alignment == Alignment);
+ }
+ bool isMemNoOffsetT2NoSp(bool alignOK = false, unsigned Alignment = 0) const {
+ if (!isGPRMem())
+ return false;
+
+ if (!ARMMCRegisterClasses[ARM::rGPRRegClassID].contains(
+ Memory.BaseRegNum))
return false;
+
+ // No offset of any kind.
+ return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
+ (alignOK || Memory.Alignment == Alignment);
+ }
+ bool isMemNoOffsetT(bool alignOK = false, unsigned Alignment = 0) const {
+ if (!isGPRMem())
+ return false;
+
+ if (!ARMMCRegisterClasses[ARM::tGPRRegClassID].contains(
+ Memory.BaseRegNum))
+ return false;
+
// No offset of any kind.
return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
(alignOK || Memory.Alignment == Alignment);
}
bool isMemPCRelImm12() const {
- if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
+ if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Base register must be PC.
if (Memory.BaseRegNum != ARM::PC)
@@ -1337,7 +1510,7 @@ public:
}
bool isAddrMode2() const {
- if (!isMem() || Memory.Alignment != 0) return false;
+ if (!isGPRMem() || Memory.Alignment != 0) return false;
// Check for register offset.
if (Memory.OffsetRegNum) return true;
// Immediate offset in range [-4095, 4095].
@@ -1362,7 +1535,7 @@ public:
// and we reject it.
if (isImm() && !isa<MCConstantExpr>(getImm()))
return true;
- if (!isMem() || Memory.Alignment != 0) return false;
+ if (!isGPRMem() || Memory.Alignment != 0) return false;
// No shifts are legal for AM3.
if (Memory.ShiftType != ARM_AM::no_shift) return false;
// Check for register offset.
@@ -1396,7 +1569,7 @@ public:
// and we reject it.
if (isImm() && !isa<MCConstantExpr>(getImm()))
return true;
- if (!isMem() || Memory.Alignment != 0) return false;
+ if (!isGPRMem() || Memory.Alignment != 0) return false;
// Check for register offset.
if (Memory.OffsetRegNum) return false;
// Immediate offset in range [-1020, 1020] and a multiple of 4.
@@ -1412,7 +1585,7 @@ public:
// and we reject it.
if (isImm() && !isa<MCConstantExpr>(getImm()))
return true;
- if (!isMem() || Memory.Alignment != 0) return false;
+ if (!isGPRMem() || Memory.Alignment != 0) return false;
// Check for register offset.
if (Memory.OffsetRegNum) return false;
// Immediate offset in range [-510, 510] and a multiple of 2.
@@ -1423,14 +1596,14 @@ public:
}
bool isMemTBB() const {
- if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
+ if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
return false;
return true;
}
bool isMemTBH() const {
- if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
+ if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
Memory.Alignment != 0 )
return false;
@@ -1438,13 +1611,13 @@ public:
}
bool isMemRegOffset() const {
- if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
+ if (!isGPRMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
return false;
return true;
}
bool isT2MemRegOffset() const {
- if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
+ if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
return false;
// Only lsl #{0, 1, 2, 3} allowed.
@@ -1458,7 +1631,7 @@ public:
bool isMemThumbRR() const {
// Thumb reg+reg addressing is simple. Just two registers, a base and
// an offset. No shifts, negations or any other complicating factors.
- if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
+ if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
return false;
return isARMLowRegister(Memory.BaseRegNum) &&
@@ -1466,7 +1639,7 @@ public:
}
bool isMemThumbRIs4() const {
- if (!isMem() || Memory.OffsetRegNum != 0 ||
+ if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
!isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
return false;
// Immediate offset, multiple of 4 in range [0, 124].
@@ -1476,7 +1649,7 @@ public:
}
bool isMemThumbRIs2() const {
- if (!isMem() || Memory.OffsetRegNum != 0 ||
+ if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
!isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
return false;
// Immediate offset, multiple of 4 in range [0, 62].
@@ -1486,7 +1659,7 @@ public:
}
bool isMemThumbRIs1() const {
- if (!isMem() || Memory.OffsetRegNum != 0 ||
+ if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
!isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
return false;
// Immediate offset in range [0, 31].
@@ -1496,7 +1669,7 @@ public:
}
bool isMemThumbSPI() const {
- if (!isMem() || Memory.OffsetRegNum != 0 ||
+ if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
return false;
// Immediate offset, multiple of 4 in range [0, 1020].
@@ -1511,7 +1684,7 @@ public:
// and we reject it.
if (isImm() && !isa<MCConstantExpr>(getImm()))
return true;
- if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
+ if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Immediate offset a multiple of 4 in range [-1020, 1020].
if (!Memory.OffsetImm) return true;
@@ -1520,9 +1693,24 @@ public:
return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
Val == std::numeric_limits<int32_t>::min();
}
-
+ bool isMemImm7s4Offset() const {
+ // If we have an immediate that's not a constant, treat it as a label
+ // reference needing a fixup. If it is a constant, it's something else
+ // and we reject it.
+ if (isImm() && !isa<MCConstantExpr>(getImm()))
+ return true;
+ if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
+ !ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
+ Memory.BaseRegNum))
+ return false;
+ // Immediate offset a multiple of 4 in range [-508, 508].
+ if (!Memory.OffsetImm) return true;
+ int64_t Val = Memory.OffsetImm->getValue();
+ // Special case, #-0 is INT32_MIN.
+ return (Val >= -508 && Val <= 508 && (Val & 3) == 0) || Val == INT32_MIN;
+ }
bool isMemImm0_1020s4Offset() const {
- if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
+ if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Immediate offset a multiple of 4 in range [0, 1020].
if (!Memory.OffsetImm) return true;
@@ -1531,7 +1719,7 @@ public:
}
bool isMemImm8Offset() const {
- if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
+ if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Base reg of PC isn't allowed for these encodings.
if (Memory.BaseRegNum == ARM::PC) return false;
@@ -1542,8 +1730,81 @@ public:
(Val > -256 && Val < 256);
}
+ template<unsigned Bits, unsigned RegClassID>
+ bool isMemImm7ShiftedOffset() const {
+ if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
+ !ARMMCRegisterClasses[RegClassID].contains(Memory.BaseRegNum))
+ return false;
+
+ // Expect an immediate offset equal to an element of the range
+ // [-127, 127], shifted left by Bits.
+
+ if (!Memory.OffsetImm) return true;
+ int64_t Val = Memory.OffsetImm->getValue();
+
+ // INT32_MIN is a special-case value (indicating the encoding with
+ // zero offset and the subtract bit set)
+ if (Val == INT32_MIN)
+ return true;
+
+ unsigned Divisor = 1U << Bits;
+
+ // Check that the low bits are zero
+ if (Val % Divisor != 0)
+ return false;
+
+ // Check that the remaining offset is within range.
+ Val /= Divisor;
+ return (Val >= -127 && Val <= 127);
+ }
+
+ template <int shift> bool isMemRegRQOffset() const {
+ if (!isMVEMem() || Memory.OffsetImm != 0 || Memory.Alignment != 0)
+ return false;
+
+ if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
+ Memory.BaseRegNum))
+ return false;
+ if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
+ Memory.OffsetRegNum))
+ return false;
+
+ if (shift == 0 && Memory.ShiftType != ARM_AM::no_shift)
+ return false;
+
+ if (shift > 0 &&
+ (Memory.ShiftType != ARM_AM::uxtw || Memory.ShiftImm != shift))
+ return false;
+
+ return true;
+ }
+
+ template <int shift> bool isMemRegQOffset() const {
+ if (!isMVEMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
+ return false;
+
+ if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
+ Memory.BaseRegNum))
+ return false;
+
+ if(!Memory.OffsetImm) return true;
+ static_assert(shift < 56,
+ "Such that we dont shift by a value higher than 62");
+ int64_t Val = Memory.OffsetImm->getValue();
+
+ // The value must be a multiple of (1 << shift)
+ if ((Val & ((1U << shift) - 1)) != 0)
+ return false;
+
+ // And be in the right range, depending on the amount that it is shifted
+ // by. Shift 0, is equal to 7 unsigned bits, the sign bit is set
+ // separately.
+ int64_t Range = (1U << (7+shift)) - 1;
+ return (Val == INT32_MIN) || (Val > -Range && Val < Range);
+ }
+
bool isMemPosImm8Offset() const {
- if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
+ if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Immediate offset in range [0, 255].
if (!Memory.OffsetImm) return true;
@@ -1552,7 +1813,7 @@ public:
}
bool isMemNegImm8Offset() const {
- if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
+ if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Base reg of PC isn't allowed for these encodings.
if (Memory.BaseRegNum == ARM::PC) return false;
@@ -1564,7 +1825,7 @@ public:
}
bool isMemUImm12Offset() const {
- if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
+ if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Immediate offset in range [0, 4095].
if (!Memory.OffsetImm) return true;
@@ -1580,7 +1841,7 @@ public:
if (isImm() && !isa<MCConstantExpr>(getImm()))
return true;
- if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
+ if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Immediate offset in range [-4095, 4095].
if (!Memory.OffsetImm) return true;
@@ -1631,6 +1892,12 @@ public:
return VectorList.Count == 1;
}
+ bool isVecListTwoMQ() const {
+ return isSingleSpacedVectorList() && VectorList.Count == 2 &&
+ ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
+ VectorList.RegNum);
+ }
+
bool isVecListDPair() const {
if (!isSingleSpacedVectorList()) return false;
return (ARMMCRegisterClasses[ARM::DPairRegClassID]
@@ -1664,6 +1931,12 @@ public:
return VectorList.Count == 4;
}
+ bool isVecListFourMQ() const {
+ return isSingleSpacedVectorList() && VectorList.Count == 4 &&
+ ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
+ VectorList.RegNum);
+ }
+
bool isSingleSpacedVectorAllLanes() const {
return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
}
@@ -1806,23 +2079,24 @@ public:
return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
}
- bool isVectorIndex8() const {
- if (Kind != k_VectorIndex) return false;
- return VectorIndex.Val < 8;
- }
+ bool isVectorIndex() const { return Kind == k_VectorIndex; }
- bool isVectorIndex16() const {
+ template <unsigned NumLanes>
+ bool isVectorIndexInRange() const {
if (Kind != k_VectorIndex) return false;
- return VectorIndex.Val < 4;
+ return VectorIndex.Val < NumLanes;
}
- bool isVectorIndex32() const {
- if (Kind != k_VectorIndex) return false;
- return VectorIndex.Val < 2;
- }
- bool isVectorIndex64() const {
+ bool isVectorIndex8() const { return isVectorIndexInRange<8>(); }
+ bool isVectorIndex16() const { return isVectorIndexInRange<4>(); }
+ bool isVectorIndex32() const { return isVectorIndexInRange<2>(); }
+ bool isVectorIndex64() const { return isVectorIndexInRange<1>(); }
+
+ template<int PermittedValue, int OtherPermittedValue>
+ bool isMVEPairVectorIndex() const {
if (Kind != k_VectorIndex) return false;
- return VectorIndex.Val < 1;
+ return VectorIndex.Val == PermittedValue ||
+ VectorIndex.Val == OtherPermittedValue;
}
bool isNEONi8splat() const {
@@ -1992,6 +2266,51 @@ public:
return (Value % Angle == Remainder && Value <= 270);
}
+ bool isMVELongShift() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ // Must be a constant.
+ if (!CE) return false;
+ uint64_t Value = CE->getValue();
+ return Value >= 1 && Value <= 32;
+ }
+
+ bool isITCondCodeNoAL() const {
+ if (!isITCondCode()) return false;
+ ARMCC::CondCodes CC = getCondCode();
+ return CC != ARMCC::AL;
+ }
+
+ bool isITCondCodeRestrictedI() const {
+ if (!isITCondCode())
+ return false;
+ ARMCC::CondCodes CC = getCondCode();
+ return CC == ARMCC::EQ || CC == ARMCC::NE;
+ }
+
+ bool isITCondCodeRestrictedS() const {
+ if (!isITCondCode())
+ return false;
+ ARMCC::CondCodes CC = getCondCode();
+ return CC == ARMCC::LT || CC == ARMCC::GT || CC == ARMCC::LE ||
+ CC == ARMCC::GE;
+ }
+
+ bool isITCondCodeRestrictedU() const {
+ if (!isITCondCode())
+ return false;
+ ARMCC::CondCodes CC = getCondCode();
+ return CC == ARMCC::HS || CC == ARMCC::HI;
+ }
+
+ bool isITCondCodeRestrictedFP() const {
+ if (!isITCondCode())
+ return false;
+ ARMCC::CondCodes CC = getCondCode();
+ return CC == ARMCC::EQ || CC == ARMCC::NE || CC == ARMCC::LT ||
+ CC == ARMCC::GT || CC == ARMCC::LE || CC == ARMCC::GE;
+ }
+
void addExpr(MCInst &Inst, const MCExpr *Expr) const {
// Add as immediates when possible. Null MCExpr = 0.
if (!Expr)
@@ -2019,6 +2338,30 @@ public:
Inst.addOperand(MCOperand::createReg(RegNum));
}
+ void addVPTPredNOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 2 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::createImm(unsigned(getVPTPred())));
+ unsigned RegNum = getVPTPred() == ARMVCC::None ? 0: ARM::P0;
+ Inst.addOperand(MCOperand::createReg(RegNum));
+ }
+
+ void addVPTPredROperands(MCInst &Inst, unsigned N) const {
+ assert(N == 3 && "Invalid number of operands!");
+ addVPTPredNOperands(Inst, N-1);
+ unsigned RegNum;
+ if (getVPTPred() == ARMVCC::None) {
+ RegNum = 0;
+ } else {
+ unsigned NextOpIndex = Inst.getNumOperands();
+ const MCInstrDesc &MCID = ARMInsts[Inst.getOpcode()];
+ int TiedOp = MCID.getOperandConstraint(NextOpIndex, MCOI::TIED_TO);
+ assert(TiedOp >= 0 &&
+ "Inactive register in vpred_r is not tied to an output!");
+ RegNum = Inst.getOperand(TiedOp).getReg();
+ }
+ Inst.addOperand(MCOperand::createReg(RegNum));
+ }
+
void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(getCoproc()));
@@ -2044,6 +2387,11 @@ public:
Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
}
+ void addITCondCodeInvOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::createImm(unsigned(ARMCC::getOppositeCondition(getCondCode()))));
+ }
+
void addCCOutOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getReg()));
@@ -2089,6 +2437,14 @@ public:
Inst.addOperand(MCOperand::createReg(*I));
}
+ void addRegListWithAPSROperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const SmallVectorImpl<unsigned> &RegList = getRegList();
+ for (SmallVectorImpl<unsigned>::const_iterator
+ I = RegList.begin(), E = RegList.end(); I != E; ++I)
+ Inst.addOperand(MCOperand::createReg(*I));
+ }
+
void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
addRegListOperands(Inst, N);
}
@@ -2097,6 +2453,14 @@ public:
addRegListOperands(Inst, N);
}
+ void addFPSRegListWithVPROperands(MCInst &Inst, unsigned N) const {
+ addRegListOperands(Inst, N);
+ }
+
+ void addFPDRegListWithVPROperands(MCInst &Inst, unsigned N) const {
+ addRegListOperands(Inst, N);
+ }
+
void addRotImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// Encoded as val>>3. The printer handles display as 8, 16, 24.
@@ -2184,6 +2548,42 @@ public:
Inst.addOperand(MCOperand::createImm(CE->getValue()));
}
+ void addImm7s4Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ // FIXME: We really want to scale the value here, but the VSTR/VLDR_VSYSR
+ // instruction don't encode operands that way yet.
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::createImm(CE->getValue()));
+ }
+
+ void addImm7Shift0Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ assert(CE != nullptr && "Invalid operand type!");
+ Inst.addOperand(MCOperand::createImm(CE->getValue()));
+ }
+
+ void addImm7Shift1Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ assert(CE != nullptr && "Invalid operand type!");
+ Inst.addOperand(MCOperand::createImm(CE->getValue()));
+ }
+
+ void addImm7Shift2Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ assert(CE != nullptr && "Invalid operand type!");
+ Inst.addOperand(MCOperand::createImm(CE->getValue()));
+ }
+
+ void addImm7Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ assert(CE != nullptr && "Invalid operand type!");
+ Inst.addOperand(MCOperand::createImm(CE->getValue()));
+ }
+
void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The immediate is scaled by four in the encoding and is stored
@@ -2293,7 +2693,7 @@ public:
return;
}
- assert(isMem() && "Unknown value type!");
+ assert(isGPRMem() && "Unknown value type!");
assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!");
Inst.addOperand(MCOperand::createImm(Memory.OffsetImm->getValue()));
}
@@ -2318,6 +2718,21 @@ public:
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
}
+ void addMemNoOffsetT2Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
+ }
+
+ void addMemNoOffsetT2NoSpOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
+ }
+
+ void addMemNoOffsetTOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
+ }
+
void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
int32_t Imm = Memory.OffsetImm->getValue();
@@ -2535,6 +2950,22 @@ public:
Inst.addOperand(MCOperand::createImm(Val));
}
+ void addMemImm7s4OffsetOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 2 && "Invalid number of operands!");
+ // If we have an immediate that's not a constant, treat it as a label
+ // reference needing a fixup. If it is a constant, it's something else
+ // and we reject it.
+ if (isImm()) {
+ Inst.addOperand(MCOperand::createExpr(getImm()));
+ Inst.addOperand(MCOperand::createImm(0));
+ return;
+ }
+
+ int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
+ Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
+ Inst.addOperand(MCOperand::createImm(Val));
+ }
+
void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
// The lower two bits are always zero and as such are not encoded.
@@ -2543,19 +2974,17 @@ public:
Inst.addOperand(MCOperand::createImm(Val));
}
- void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
+ void addMemImmOffsetOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
Inst.addOperand(MCOperand::createImm(Val));
}
- void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
- addMemImm8OffsetOperands(Inst, N);
- }
-
- void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
- addMemImm8OffsetOperands(Inst, N);
+ void addMemRegRQOffsetOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 2 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
+ Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
}
void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
@@ -2699,6 +3128,12 @@ public:
Inst.addOperand(MCOperand::createImm(Imm));
}
+ void addPowerTwoOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::createImm(CE->getValue()));
+ }
+
void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask())));
@@ -2719,6 +3154,37 @@ public:
Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
}
+ void addMVEVecListOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+
+ // When we come here, the VectorList field will identify a range
+ // of q-registers by its base register and length, and it will
+ // have already been error-checked to be the expected length of
+ // range and contain only q-regs in the range q0-q7. So we can
+ // count on the base register being in the range q0-q6 (for 2
+ // regs) or q0-q4 (for 4)
+ //
+ // The MVE instructions taking a register range of this kind will
+ // need an operand in the QQPR or QQQQPR class, representing the
+ // entire range as a unit. So we must translate into that class,
+ // by finding the index of the base register in the MQPR reg
+ // class, and returning the super-register at the corresponding
+ // index in the target class.
+
+ const MCRegisterClass *RC_in = &ARMMCRegisterClasses[ARM::MQPRRegClassID];
+ const MCRegisterClass *RC_out = (VectorList.Count == 2) ?
+ &ARMMCRegisterClasses[ARM::QQPRRegClassID] :
+ &ARMMCRegisterClasses[ARM::QQQQPRRegClassID];
+
+ unsigned I, E = RC_out->getNumRegs();
+ for (I = 0; I < E; I++)
+ if (RC_in->getRegister(I) == VectorList.RegNum)
+ break;
+ assert(I < E && "Invalid vector list start register!");
+
+ Inst.addOperand(MCOperand::createReg(RC_out->getRegister(I)));
+ }
+
void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
@@ -2745,6 +3211,16 @@ public:
Inst.addOperand(MCOperand::createImm(getVectorIndex()));
}
+ void addMVEVectorIndexOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::createImm(getVectorIndex()));
+ }
+
+ void addMVEPairVectorIndexOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::createImm(getVectorIndex()));
+ }
+
void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The immediate encodes the type of constant as well as the value.
@@ -2913,6 +3389,15 @@ public:
return Op;
}
+ static std::unique_ptr<ARMOperand> CreateVPTPred(ARMVCC::VPTCodes CC,
+ SMLoc S) {
+ auto Op = make_unique<ARMOperand>(k_VPTPred);
+ Op->VCC.Val = CC;
+ Op->StartLoc = S;
+ Op->EndLoc = S;
+ return Op;
+ }
+
static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) {
auto Op = make_unique<ARMOperand>(k_CoprocNum);
Op->Cop.Val = CopVal;
@@ -3044,19 +3529,31 @@ public:
assert(Regs.size() > 0 && "RegList contains no registers?");
KindTy Kind = k_RegisterList;
- if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().second))
- Kind = k_DPRRegisterList;
- else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
- contains(Regs.front().second))
- Kind = k_SPRRegisterList;
+ if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
+ Regs.front().second)) {
+ if (Regs.back().second == ARM::VPR)
+ Kind = k_FPDRegisterListWithVPR;
+ else
+ Kind = k_DPRRegisterList;
+ } else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
+ Regs.front().second)) {
+ if (Regs.back().second == ARM::VPR)
+ Kind = k_FPSRegisterListWithVPR;
+ else
+ Kind = k_SPRRegisterList;
+ }
// Sort based on the register encoding values.
array_pod_sort(Regs.begin(), Regs.end());
+ if (Kind == k_RegisterList && Regs.back().second == ARM::APSR)
+ Kind = k_RegisterListWithAPSR;
+
auto Op = make_unique<ARMOperand>(Kind);
for (SmallVectorImpl<std::pair<unsigned, unsigned>>::const_iterator
I = Regs.begin(), E = Regs.end(); I != E; ++I)
Op->Registers.push_back(I->second);
+
Op->StartLoc = StartLoc;
Op->EndLoc = EndLoc;
return Op;
@@ -3217,15 +3714,18 @@ void ARMOperand::print(raw_ostream &OS) const {
case k_CondCode:
OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
break;
+ case k_VPTPred:
+ OS << "<ARMVCC::" << ARMVPTPredToString(getVPTPred()) << ">";
+ break;
case k_CCOut:
OS << "<ccout " << RegName(getReg()) << ">";
break;
case k_ITCondMask: {
static const char *const MaskStr[] = {
- "(invalid)", "(teee)", "(tee)", "(teet)",
- "(te)", "(tete)", "(tet)", "(tett)",
- "(t)", "(ttee)", "(tte)", "(ttet)",
- "(tt)", "(ttte)", "(ttt)", "(tttt)"
+ "(invalid)", "(tttt)", "(ttt)", "(ttte)",
+ "(tt)", "(ttet)", "(tte)", "(ttee)",
+ "(t)", "(tett)", "(tet)", "(tete)",
+ "(te)", "(teet)", "(tee)", "(teee)",
};
assert((ITMask.Mask & 0xf) == ITMask.Mask);
OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
@@ -3324,8 +3824,11 @@ void ARMOperand::print(raw_ostream &OS) const {
<< ", width: " << Bitfield.Width << ">";
break;
case k_RegisterList:
+ case k_RegisterListWithAPSR:
case k_DPRRegisterList:
- case k_SPRRegisterList: {
+ case k_SPRRegisterList:
+ case k_FPSRegisterListWithVPR:
+ case k_FPDRegisterListWithVPR: {
OS << "<register_list ";
const SmallVectorImpl<unsigned> &RegList = getRegList();
@@ -3423,7 +3926,7 @@ int ARMAsmParser::tryParseRegister() {
}
// Some FPUs only have 16 D registers, so D16-D31 are invalid
- if (hasD16() && RegNum >= ARM::D16 && RegNum <= ARM::D31)
+ if (!hasD32() && RegNum >= ARM::D16 && RegNum <= ARM::D31)
return -1;
Parser.Lex(); // Eat identifier token.
@@ -3662,11 +4165,10 @@ ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
if (Tok.isNot(AsmToken::Identifier))
return MatchOperand_NoMatch;
- int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
+ int Num = MatchCoprocessorOperandName(Tok.getString().lower(), 'p');
if (Num == -1)
return MatchOperand_NoMatch;
- // ARMv7 and v8 don't allow cp10/cp11 due to VFP/NEON specific instructions
- if ((hasV7Ops() || hasV8Ops()) && (Num == 10 || Num == 11))
+ if (!isValidCoprocessorNumber(Num, getSTI().getFeatureBits()))
return MatchOperand_NoMatch;
Parser.Lex(); // Eat identifier token.
@@ -3685,7 +4187,7 @@ ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
if (Tok.isNot(AsmToken::Identifier))
return MatchOperand_NoMatch;
- int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
+ int Reg = MatchCoprocessorOperandName(Tok.getString().lower(), 'c');
if (Reg == -1)
return MatchOperand_NoMatch;
@@ -3752,7 +4254,8 @@ static unsigned getNextRegister(unsigned Reg) {
}
/// Parse a register list.
-bool ARMAsmParser::parseRegisterList(OperandVector &Operands) {
+bool ARMAsmParser::parseRegisterList(OperandVector &Operands,
+ bool EnforceOrder) {
MCAsmParser &Parser = getParser();
if (Parser.getTok().isNot(AsmToken::LCurly))
return TokError("Token is not a Left Curly Brace");
@@ -3785,6 +4288,8 @@ bool ARMAsmParser::parseRegisterList(OperandVector &Operands) {
RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
+ else if (ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
+ RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
else
return Error(RegLoc, "invalid register in register list");
@@ -3838,14 +4343,32 @@ bool ARMAsmParser::parseRegisterList(OperandVector &Operands) {
Reg = getDRegFromQReg(Reg);
isQReg = true;
}
+ if (!RC->contains(Reg) &&
+ RC->getID() == ARMMCRegisterClasses[ARM::GPRRegClassID].getID() &&
+ ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg)) {
+ // switch the register classes, as GPRwithAPSRnospRegClassID is a partial
+ // subset of GPRRegClassId except it contains APSR as well.
+ RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
+ }
+ if (Reg == ARM::VPR && (RC == &ARMMCRegisterClasses[ARM::SPRRegClassID] ||
+ RC == &ARMMCRegisterClasses[ARM::DPRRegClassID])) {
+ RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
+ EReg = MRI->getEncodingValue(Reg);
+ Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
+ continue;
+ }
// The register must be in the same register class as the first.
if (!RC->contains(Reg))
return Error(RegLoc, "invalid register in register list");
- // List must be monotonically increasing.
- if (MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
+ // In most cases, the list must be monotonically increasing. An
+ // exception is CLRM, which is order-independent anyway, so
+ // there's no potential for confusion if you write clrm {r2,r1}
+ // instead of clrm {r1,r2}.
+ if (EnforceOrder &&
+ MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
Warning(RegLoc, "register list not in ascending order");
- else
+ else if (!ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
return Error(RegLoc, "register list not in ascending order");
}
if (MRI->getEncodingValue(Reg) == MRI->getEncodingValue(OldReg)) {
@@ -3855,6 +4378,7 @@ bool ARMAsmParser::parseRegisterList(OperandVector &Operands) {
}
// VFP register lists must also be contiguous.
if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
+ RC != &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID] &&
Reg != OldReg + 1)
return Error(RegLoc, "non-contiguous register range");
EReg = MRI->getEncodingValue(Reg);
@@ -3944,7 +4468,7 @@ ARMAsmParser::parseVectorList(OperandVector &Operands) {
// As an extension (to match gas), support a plain D register or Q register
// (without encosing curly braces) as a single or double entry list,
// respectively.
- if (Parser.getTok().is(AsmToken::Identifier)) {
+ if (!hasMVE() && Parser.getTok().is(AsmToken::Identifier)) {
SMLoc E = Parser.getTok().getEndLoc();
int Reg = tryParseRegister();
if (Reg == -1)
@@ -4012,9 +4536,14 @@ ARMAsmParser::parseVectorList(OperandVector &Operands) {
unsigned Count = 1;
int Spacing = 0;
unsigned FirstReg = Reg;
+
+ if (hasMVE() && !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg)) {
+ Error(Parser.getTok().getLoc(), "vector register in range Q0-Q7 expected");
+ return MatchOperand_ParseFail;
+ }
// The list is of D registers, but we also allow Q regs and just interpret
// them as the two D sub-registers.
- if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
+ else if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
FirstReg = Reg = getDRegFromQReg(Reg);
Spacing = 1; // double-spacing requires explicit D registers, otherwise
// it's ambiguous with four-register single spaced.
@@ -4044,14 +4573,17 @@ ARMAsmParser::parseVectorList(OperandVector &Operands) {
return MatchOperand_ParseFail;
}
// Allow Q regs and just interpret them as the two D sub-registers.
- if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
+ if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
EndReg = getDRegFromQReg(EndReg) + 1;
// If the register is the same as the start reg, there's nothing
// more to do.
if (Reg == EndReg)
continue;
// The register must be in the same register class as the first.
- if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
+ if ((hasMVE() &&
+ !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(EndReg)) ||
+ (!hasMVE() &&
+ !ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg))) {
Error(AfterMinusLoc, "invalid register in register list");
return MatchOperand_ParseFail;
}
@@ -4084,13 +4616,21 @@ ARMAsmParser::parseVectorList(OperandVector &Operands) {
Error(RegLoc, "register expected");
return MatchOperand_ParseFail;
}
+
+ if (hasMVE()) {
+ if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg)) {
+ Error(RegLoc, "vector register in range Q0-Q7 expected");
+ return MatchOperand_ParseFail;
+ }
+ Spacing = 1;
+ }
// vector register lists must be contiguous.
// It's OK to use the enumeration values directly here rather, as the
// VFP register classes have the enum sorted properly.
//
// The list is of D registers, but we also allow Q regs and just interpret
// them as the two D sub-registers.
- if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
+ else if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
if (!Spacing)
Spacing = 1; // Register range implies a single spaced list.
else if (Spacing == 2) {
@@ -4151,30 +4691,20 @@ ARMAsmParser::parseVectorList(OperandVector &Operands) {
switch (LaneKind) {
case NoLanes:
+ case AllLanes: {
// Two-register operands have been converted to the
// composite register classes.
- if (Count == 2) {
- const MCRegisterClass *RC = (Spacing == 1) ?
- &ARMMCRegisterClasses[ARM::DPairRegClassID] :
- &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
- FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
- }
- Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
- (Spacing == 2), S, E));
- break;
- case AllLanes:
- // Two-register operands have been converted to the
- // composite register classes.
- if (Count == 2) {
+ if (Count == 2 && !hasMVE()) {
const MCRegisterClass *RC = (Spacing == 1) ?
&ARMMCRegisterClasses[ARM::DPairRegClassID] :
&ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
}
- Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
- (Spacing == 2),
- S, E));
+ auto Create = (LaneKind == NoLanes ? ARMOperand::CreateVectorList :
+ ARMOperand::CreateVectorListAllLanes);
+ Operands.push_back(Create(FirstReg, Count, (Spacing == 2), S, E));
break;
+ }
case IndexedLane:
Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
LaneIndex,
@@ -5061,6 +5591,21 @@ void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2);
}
+void ARMAsmParser::cvtMVEVMOVQtoDReg(
+ MCInst &Inst, const OperandVector &Operands) {
+
+ // mnemonic, condition code, Rt, Rt2, Qd, idx, Qd again, idx2
+ assert(Operands.size() == 8);
+
+ ((ARMOperand &)*Operands[2]).addRegOperands(Inst, 1); // Rt
+ ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1); // Rt2
+ ((ARMOperand &)*Operands[4]).addRegOperands(Inst, 1); // Qd
+ ((ARMOperand &)*Operands[5]).addMVEPairVectorIndexOperands(Inst, 1); // idx
+ // skip second copy of Qd in Operands[6]
+ ((ARMOperand &)*Operands[7]).addMVEPairVectorIndexOperands(Inst, 1); // idx2
+ ((ARMOperand &)*Operands[1]).addCondCodeOperands(Inst, 2); // condition code
+}
+
/// Parse an ARM memory expression, return false if successful else return true
/// or an error. The first token must be a '[' when called.
bool ARMAsmParser::parseMemory(OperandVector &Operands) {
@@ -5275,6 +5820,8 @@ bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
St = ARM_AM::ror;
else if (ShiftName == "rrx" || ShiftName == "RRX")
St = ARM_AM::rrx;
+ else if (ShiftName == "uxtw" || ShiftName == "UXTW")
+ St = ARM_AM::uxtw;
else
return Error(Loc, "illegal shift operator");
Parser.Lex(); // Eat shift type token.
@@ -5463,7 +6010,7 @@ bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
case AsmToken::LBrac:
return parseMemory(Operands);
case AsmToken::LCurly:
- return parseRegisterList(Operands);
+ return parseRegisterList(Operands, !Mnemonic.startswith("clr"));
case AsmToken::Dollar:
case AsmToken::Hash:
// #42 -> immediate.
@@ -5595,6 +6142,9 @@ bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
case MCObjectFileInfo::IsWasm:
CurrentFormat = WASM;
break;
+ case MCObjectFileInfo::IsXCOFF:
+ llvm_unreachable("unexpected object format");
+ break;
}
if (~Prefix->SupportedFormats & CurrentFormat) {
@@ -5621,11 +6171,14 @@ bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
// FIXME: Would be nice to autogen this.
// FIXME: This is a bit of a maze of special cases.
StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
+ StringRef ExtraToken,
unsigned &PredicationCode,
+ unsigned &VPTPredicationCode,
bool &CarrySetting,
unsigned &ProcessorIMod,
StringRef &ITMask) {
PredicationCode = ARMCC::AL;
+ VPTPredicationCode = ARMVCC::None;
CarrySetting = false;
ProcessorIMod = 0;
@@ -5649,7 +6202,12 @@ StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
Mnemonic == "bxns" || Mnemonic == "blxns" ||
Mnemonic == "vudot" || Mnemonic == "vsdot" ||
Mnemonic == "vcmla" || Mnemonic == "vcadd" ||
- Mnemonic == "vfmal" || Mnemonic == "vfmsl")
+ Mnemonic == "vfmal" || Mnemonic == "vfmsl" ||
+ Mnemonic == "wls" || Mnemonic == "le" || Mnemonic == "dls" ||
+ Mnemonic == "csel" || Mnemonic == "csinc" ||
+ Mnemonic == "csinv" || Mnemonic == "csneg" || Mnemonic == "cinc" ||
+ Mnemonic == "cinv" || Mnemonic == "cneg" || Mnemonic == "cset" ||
+ Mnemonic == "csetm")
return Mnemonic;
// First, split out any predication code. Ignore mnemonics we know aren't
@@ -5657,7 +6215,18 @@ StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
- Mnemonic != "sbcs" && Mnemonic != "rscs") {
+ Mnemonic != "sbcs" && Mnemonic != "rscs" &&
+ !(hasMVE() &&
+ (Mnemonic == "vmine" ||
+ Mnemonic == "vshle" || Mnemonic == "vshlt" || Mnemonic == "vshllt" ||
+ Mnemonic == "vrshle" || Mnemonic == "vrshlt" ||
+ Mnemonic == "vmvne" || Mnemonic == "vorne" ||
+ Mnemonic == "vnege" || Mnemonic == "vnegt" ||
+ Mnemonic == "vmule" || Mnemonic == "vmult" ||
+ Mnemonic == "vrintne" ||
+ Mnemonic == "vcmult" || Mnemonic == "vcmule" ||
+ Mnemonic == "vpsele" || Mnemonic == "vpselt" ||
+ Mnemonic.startswith("vq")))) {
unsigned CC = ARMCondCodeFromString(Mnemonic.substr(Mnemonic.size()-2));
if (CC != ~0U) {
Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
@@ -5677,7 +6246,8 @@ StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
Mnemonic == "vfms" || Mnemonic == "vfnms" || Mnemonic == "fconsts" ||
- Mnemonic == "bxns" || Mnemonic == "blxns" ||
+ Mnemonic == "bxns" || Mnemonic == "blxns" || Mnemonic == "vfmas" ||
+ Mnemonic == "vmlas" ||
(Mnemonic == "movs" && isThumb()))) {
Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
CarrySetting = true;
@@ -5698,12 +6268,36 @@ StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
}
}
+ if (isMnemonicVPTPredicable(Mnemonic, ExtraToken) && Mnemonic != "vmovlt" &&
+ Mnemonic != "vshllt" && Mnemonic != "vrshrnt" && Mnemonic != "vshrnt" &&
+ Mnemonic != "vqrshrunt" && Mnemonic != "vqshrunt" &&
+ Mnemonic != "vqrshrnt" && Mnemonic != "vqshrnt" && Mnemonic != "vmullt" &&
+ Mnemonic != "vqmovnt" && Mnemonic != "vqmovunt" &&
+ Mnemonic != "vqmovnt" && Mnemonic != "vmovnt" && Mnemonic != "vqdmullt" &&
+ Mnemonic != "vpnot" && Mnemonic != "vcvtt" && Mnemonic != "vcvt") {
+ unsigned CC = ARMVectorCondCodeFromString(Mnemonic.substr(Mnemonic.size()-1));
+ if (CC != ~0U) {
+ Mnemonic = Mnemonic.slice(0, Mnemonic.size()-1);
+ VPTPredicationCode = CC;
+ }
+ return Mnemonic;
+ }
+
// The "it" instruction has the condition mask on the end of the mnemonic.
if (Mnemonic.startswith("it")) {
ITMask = Mnemonic.slice(2, Mnemonic.size());
Mnemonic = Mnemonic.slice(0, 2);
}
+ if (Mnemonic.startswith("vpst")) {
+ ITMask = Mnemonic.slice(4, Mnemonic.size());
+ Mnemonic = Mnemonic.slice(0, 4);
+ }
+ else if (Mnemonic.startswith("vpt")) {
+ ITMask = Mnemonic.slice(3, Mnemonic.size());
+ Mnemonic = Mnemonic.slice(0, 3);
+ }
+
return Mnemonic;
}
@@ -5711,9 +6305,14 @@ StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
/// inclusion of carry set or predication code operands.
//
// FIXME: It would be nice to autogen this.
-void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
+void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic,
+ StringRef ExtraToken,
+ StringRef FullInst,
bool &CanAcceptCarrySet,
- bool &CanAcceptPredicationCode) {
+ bool &CanAcceptPredicationCode,
+ bool &CanAcceptVPTPredicationCode) {
+ CanAcceptVPTPredicationCode = isMnemonicVPTPredicable(Mnemonic, ExtraToken);
+
CanAcceptCarrySet =
Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
@@ -5742,7 +6341,18 @@ void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
Mnemonic == "vcmla" || Mnemonic == "vcadd" ||
Mnemonic == "vfmal" || Mnemonic == "vfmsl" ||
Mnemonic == "sb" || Mnemonic == "ssbb" ||
- Mnemonic == "pssbb") {
+ Mnemonic == "pssbb" ||
+ Mnemonic == "bfcsel" || Mnemonic == "wls" ||
+ Mnemonic == "dls" || Mnemonic == "le" || Mnemonic == "csel" ||
+ Mnemonic == "csinc" || Mnemonic == "csinv" || Mnemonic == "csneg" ||
+ Mnemonic == "cinc" || Mnemonic == "cinv" || Mnemonic == "cneg" ||
+ Mnemonic == "cset" || Mnemonic == "csetm" ||
+ Mnemonic.startswith("vpt") || Mnemonic.startswith("vpst") ||
+ (hasMVE() &&
+ (Mnemonic.startswith("vst2") || Mnemonic.startswith("vld2") ||
+ Mnemonic.startswith("vst4") || Mnemonic.startswith("vld4") ||
+ Mnemonic.startswith("wlstp") || Mnemonic.startswith("dlstp") ||
+ Mnemonic.startswith("letp")))) {
// These mnemonics are never predicable
CanAcceptPredicationCode = false;
} else if (!isThumb()) {
@@ -5976,7 +6586,8 @@ bool ARMAsmParser::shouldOmitPredicateOperand(StringRef Mnemonic,
OperandVector &Operands) {
// VRINT{Z, X} have a predicate operand in VFP, but not in NEON
unsigned RegIdx = 3;
- if ((Mnemonic == "vrintz" || Mnemonic == "vrintx") &&
+ if ((((Mnemonic == "vrintz" || Mnemonic == "vrintx") && !hasMVE()) ||
+ Mnemonic == "vrintr") &&
(static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f32" ||
static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f16")) {
if (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
@@ -5994,6 +6605,47 @@ bool ARMAsmParser::shouldOmitPredicateOperand(StringRef Mnemonic,
return false;
}
+bool ARMAsmParser::shouldOmitVectorPredicateOperand(StringRef Mnemonic,
+ OperandVector &Operands) {
+ if (!hasMVE() || Operands.size() < 3)
+ return true;
+
+ if (Mnemonic.startswith("vld2") || Mnemonic.startswith("vld4") ||
+ Mnemonic.startswith("vst2") || Mnemonic.startswith("vst4"))
+ return true;
+
+ if (Mnemonic.startswith("vctp") || Mnemonic.startswith("vpnot"))
+ return false;
+
+ if (Mnemonic.startswith("vmov") &&
+ !(Mnemonic.startswith("vmovl") || Mnemonic.startswith("vmovn") ||
+ Mnemonic.startswith("vmovx"))) {
+ for (auto &Operand : Operands) {
+ if (static_cast<ARMOperand &>(*Operand).isVectorIndex() ||
+ ((*Operand).isReg() &&
+ (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
+ (*Operand).getReg()) ||
+ ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
+ (*Operand).getReg())))) {
+ return true;
+ }
+ }
+ return false;
+ } else {
+ for (auto &Operand : Operands) {
+ // We check the larger class QPR instead of just the legal class
+ // MQPR, to more accurately report errors when using Q registers
+ // outside of the allowed range.
+ if (static_cast<ARMOperand &>(*Operand).isVectorIndex() ||
+ (Operand->isReg() &&
+ (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
+ Operand->getReg()))))
+ return false;
+ }
+ return true;
+ }
+}
+
static bool isDataTypeToken(StringRef Tok) {
return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
@@ -6010,7 +6662,8 @@ static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
}
-static void applyMnemonicAliases(StringRef &Mnemonic, uint64_t Features,
+static void applyMnemonicAliases(StringRef &Mnemonic,
+ const FeatureBitset &Features,
unsigned VariantID);
// The GNU assembler has aliases of ldrd and strd with the second register
@@ -6033,7 +6686,7 @@ void ARMAsmParser::fixupGNULDRDAlias(StringRef Mnemonic,
if (!Op2.isReg())
return;
- if (!Op3.isMem())
+ if (!Op3.isGPRMem())
return;
const MCRegisterClass &GPR = MRI->getRegClass(ARM::GPRRegClassID);
@@ -6068,7 +6721,7 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
// The generic tblgen'erated code does this later, at the start of
// MatchInstructionImpl(), but that's too late for aliases that include
// any sort of suffix.
- uint64_t AvailableFeatures = getAvailableFeatures();
+ const FeatureBitset &AvailableFeatures = getAvailableFeatures();
unsigned AssemblerDialect = getParser().getAssemblerDialect();
applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect);
@@ -6084,14 +6737,16 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
// Create the leading tokens for the mnemonic, split by '.' characters.
size_t Start = 0, Next = Name.find('.');
StringRef Mnemonic = Name.slice(Start, Next);
+ StringRef ExtraToken = Name.slice(Next, Name.find(' ', Next + 1));
// Split out the predication code and carry setting flag from the mnemonic.
unsigned PredicationCode;
+ unsigned VPTPredicationCode;
unsigned ProcessorIMod;
bool CarrySetting;
StringRef ITMask;
- Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
- ProcessorIMod, ITMask);
+ Mnemonic = splitMnemonic(Mnemonic, ExtraToken, PredicationCode, VPTPredicationCode,
+ CarrySetting, ProcessorIMod, ITMask);
// In Thumb1, only the branch (B) instruction can be predicated.
if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
@@ -6100,15 +6755,24 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
- // Handle the IT instruction ITMask. Convert it to a bitmask. This
- // is the mask as it will be for the IT encoding if the conditional
- // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
- // where the conditional bit0 is zero, the instruction post-processing
- // will adjust the mask accordingly.
- if (Mnemonic == "it") {
- SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
+ // Handle the mask for IT and VPT instructions. In ARMOperand and
+ // MCOperand, this is stored in a format independent of the
+ // condition code: the lowest set bit indicates the end of the
+ // encoding, and above that, a 1 bit indicates 'else', and an 0
+ // indicates 'then'. E.g.
+ // IT -> 1000
+ // ITx -> x100 (ITT -> 0100, ITE -> 1100)
+ // ITxy -> xy10 (e.g. ITET -> 1010)
+ // ITxyz -> xyz1 (e.g. ITEET -> 1101)
+ if (Mnemonic == "it" || Mnemonic.startswith("vpt") ||
+ Mnemonic.startswith("vpst")) {
+ SMLoc Loc = Mnemonic == "it" ? SMLoc::getFromPointer(NameLoc.getPointer() + 2) :
+ Mnemonic == "vpt" ? SMLoc::getFromPointer(NameLoc.getPointer() + 3) :
+ SMLoc::getFromPointer(NameLoc.getPointer() + 4);
if (ITMask.size() > 3) {
- return Error(Loc, "too many conditions on IT instruction");
+ if (Mnemonic == "it")
+ return Error(Loc, "too many conditions on IT instruction");
+ return Error(Loc, "too many conditions on VPT instruction");
}
unsigned Mask = 8;
for (unsigned i = ITMask.size(); i != 0; --i) {
@@ -6117,7 +6781,7 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
}
Mask >>= 1;
- if (ITMask[i - 1] == 't')
+ if (ITMask[i - 1] == 'e')
Mask |= 8;
}
Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
@@ -6133,8 +6797,9 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
// ConditionCode operands to match the mnemonic "as written" and then we let
// the matcher deal with finding the right instruction or generating an
// appropriate error.
- bool CanAcceptCarrySet, CanAcceptPredicationCode;
- getMnemonicAcceptInfo(Mnemonic, Name, CanAcceptCarrySet, CanAcceptPredicationCode);
+ bool CanAcceptCarrySet, CanAcceptPredicationCode, CanAcceptVPTPredicationCode;
+ getMnemonicAcceptInfo(Mnemonic, ExtraToken, Name, CanAcceptCarrySet,
+ CanAcceptPredicationCode, CanAcceptVPTPredicationCode);
// If we had a carry-set on an instruction that can't do that, issue an
// error.
@@ -6149,6 +6814,13 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
"' is not predicable, but condition code specified");
}
+ // If we had a VPT predication code on an instruction that can't do that, issue an
+ // error.
+ if (!CanAcceptVPTPredicationCode && VPTPredicationCode != ARMVCC::None) {
+ return Error(NameLoc, "instruction '" + Mnemonic +
+ "' is not VPT predicable, but VPT code T/E is specified");
+ }
+
// Add the carry setting operand, if necessary.
if (CanAcceptCarrySet) {
SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
@@ -6161,7 +6833,24 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
CarrySetting);
Operands.push_back(ARMOperand::CreateCondCode(
- ARMCC::CondCodes(PredicationCode), Loc));
+ ARMCC::CondCodes(PredicationCode), Loc));
+ }
+
+ // Add the VPT predication code operand, if necessary.
+ // FIXME: We don't add them for the instructions filtered below as these can
+ // have custom operands which need special parsing. This parsing requires
+ // the operand to be in the same place in the OperandVector as their
+ // definition in tblgen. Since these instructions may also have the
+ // scalar predication operand we do not add the vector one and leave until
+ // now to fix it up.
+ if (CanAcceptVPTPredicationCode && Mnemonic != "vmov" &&
+ !Mnemonic.startswith("vcmp") &&
+ !(Mnemonic.startswith("vcvt") && Mnemonic != "vcvta" &&
+ Mnemonic != "vcvtn" && Mnemonic != "vcvtp" && Mnemonic != "vcvtm")) {
+ SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
+ CarrySetting);
+ Operands.push_back(ARMOperand::CreateVPTPred(
+ ARMVCC::VPTCodes(VPTPredicationCode), Loc));
}
// Add the processor imod operand, if necessary.
@@ -6177,7 +6866,7 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
while (Next != StringRef::npos) {
Start = Next;
Next = Name.find('.', Start + 1);
- StringRef ExtraToken = Name.slice(Start, Next);
+ ExtraToken = Name.slice(Start, Next);
// Some NEON instructions have an optional datatype suffix that is
// completely ignored. Check for that.
@@ -6233,57 +6922,173 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
// Some instructions have the same mnemonic, but don't always
// have a predicate. Distinguish them here and delete the
- // predicate if needed.
+ // appropriate predicate if needed. This could be either the scalar
+ // predication code or the vector predication code.
if (PredicationCode == ARMCC::AL &&
shouldOmitPredicateOperand(Mnemonic, Operands))
Operands.erase(Operands.begin() + 1);
- // ARM mode 'blx' need special handling, as the register operand version
- // is predicable, but the label operand version is not. So, we can't rely
- // on the Mnemonic based checking to correctly figure out when to put
- // a k_CondCode operand in the list. If we're trying to match the label
- // version, remove the k_CondCode operand here.
- if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
- static_cast<ARMOperand &>(*Operands[2]).isImm())
- Operands.erase(Operands.begin() + 1);
- // Adjust operands of ldrexd/strexd to MCK_GPRPair.
- // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
- // a single GPRPair reg operand is used in the .td file to replace the two
- // GPRs. However, when parsing from asm, the two GRPs cannot be automatically
- // expressed as a GPRPair, so we have to manually merge them.
- // FIXME: We would really like to be able to tablegen'erate this.
- if (!isThumb() && Operands.size() > 4 &&
- (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
- Mnemonic == "stlexd")) {
- bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
- unsigned Idx = isLoad ? 2 : 3;
- ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]);
- ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]);
-
- const MCRegisterClass& MRC = MRI->getRegClass(ARM::GPRRegClassID);
- // Adjust only if Op1 and Op2 are GPRs.
- if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) &&
- MRC.contains(Op2.getReg())) {
- unsigned Reg1 = Op1.getReg();
- unsigned Reg2 = Op2.getReg();
- unsigned Rt = MRI->getEncodingValue(Reg1);
- unsigned Rt2 = MRI->getEncodingValue(Reg2);
-
- // Rt2 must be Rt + 1 and Rt must be even.
- if (Rt + 1 != Rt2 || (Rt & 1)) {
- return Error(Op2.getStartLoc(),
- isLoad ? "destination operands must be sequential"
- : "source operands must be sequential");
+ if (hasMVE()) {
+ if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands) &&
+ Mnemonic == "vmov" && PredicationCode == ARMCC::LT) {
+ // Very nasty hack to deal with the vector predicated variant of vmovlt
+ // the scalar predicated vmov with condition 'lt'. We can not tell them
+ // apart until we have parsed their operands.
+ Operands.erase(Operands.begin() + 1);
+ Operands.erase(Operands.begin());
+ SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
+ SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
+ Mnemonic.size() - 1 + CarrySetting);
+ Operands.insert(Operands.begin(),
+ ARMOperand::CreateVPTPred(ARMVCC::None, PLoc));
+ Operands.insert(Operands.begin(),
+ ARMOperand::CreateToken(StringRef("vmovlt"), MLoc));
+ } else if (Mnemonic == "vcvt" && PredicationCode == ARMCC::NE &&
+ !shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
+ // Another nasty hack to deal with the ambiguity between vcvt with scalar
+ // predication 'ne' and vcvtn with vector predication 'e'. As above we
+ // can only distinguish between the two after we have parsed their
+ // operands.
+ Operands.erase(Operands.begin() + 1);
+ Operands.erase(Operands.begin());
+ SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
+ SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
+ Mnemonic.size() - 1 + CarrySetting);
+ Operands.insert(Operands.begin(),
+ ARMOperand::CreateVPTPred(ARMVCC::Else, PLoc));
+ Operands.insert(Operands.begin(),
+ ARMOperand::CreateToken(StringRef("vcvtn"), MLoc));
+ } else if (Mnemonic == "vmul" && PredicationCode == ARMCC::LT &&
+ !shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
+ // Another hack, this time to distinguish between scalar predicated vmul
+ // with 'lt' predication code and the vector instruction vmullt with
+ // vector predication code "none"
+ Operands.erase(Operands.begin() + 1);
+ Operands.erase(Operands.begin());
+ SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
+ Operands.insert(Operands.begin(),
+ ARMOperand::CreateToken(StringRef("vmullt"), MLoc));
+ }
+ // For vmov and vcmp, as mentioned earlier, we did not add the vector
+ // predication code, since these may contain operands that require
+ // special parsing. So now we have to see if they require vector
+ // predication and replace the scalar one with the vector predication
+ // operand if that is the case.
+ else if (Mnemonic == "vmov" || Mnemonic.startswith("vcmp") ||
+ (Mnemonic.startswith("vcvt") && !Mnemonic.startswith("vcvta") &&
+ !Mnemonic.startswith("vcvtn") && !Mnemonic.startswith("vcvtp") &&
+ !Mnemonic.startswith("vcvtm"))) {
+ if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
+ // We could not split the vector predicate off vcvt because it might
+ // have been the scalar vcvtt instruction. Now we know its a vector
+ // instruction, we still need to check whether its the vector
+ // predicated vcvt with 'Then' predication or the vector vcvtt. We can
+ // distinguish the two based on the suffixes, if it is any of
+ // ".f16.f32", ".f32.f16", ".f16.f64" or ".f64.f16" then it is the vcvtt.
+ if (Mnemonic.startswith("vcvtt") && Operands.size() >= 4) {
+ auto Sz1 = static_cast<ARMOperand &>(*Operands[2]);
+ auto Sz2 = static_cast<ARMOperand &>(*Operands[3]);
+ if (!(Sz1.isToken() && Sz1.getToken().startswith(".f") &&
+ Sz2.isToken() && Sz2.getToken().startswith(".f"))) {
+ Operands.erase(Operands.begin());
+ SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
+ VPTPredicationCode = ARMVCC::Then;
+
+ Mnemonic = Mnemonic.substr(0, 4);
+ Operands.insert(Operands.begin(),
+ ARMOperand::CreateToken(Mnemonic, MLoc));
+ }
+ }
+ Operands.erase(Operands.begin() + 1);
+ SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
+ Mnemonic.size() + CarrySetting);
+ Operands.insert(Operands.begin() + 1,
+ ARMOperand::CreateVPTPred(
+ ARMVCC::VPTCodes(VPTPredicationCode), PLoc));
+ }
+ } else if (CanAcceptVPTPredicationCode) {
+ // For all other instructions, make sure only one of the two
+ // predication operands is left behind, depending on whether we should
+ // use the vector predication.
+ if (shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
+ if (CanAcceptPredicationCode)
+ Operands.erase(Operands.begin() + 2);
+ else
+ Operands.erase(Operands.begin() + 1);
+ } else if (CanAcceptPredicationCode && PredicationCode == ARMCC::AL) {
+ Operands.erase(Operands.begin() + 1);
}
- unsigned NewReg = MRI->getMatchingSuperReg(Reg1, ARM::gsub_0,
- &(MRI->getRegClass(ARM::GPRPairRegClassID)));
- Operands[Idx] =
- ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc());
- Operands.erase(Operands.begin() + Idx + 1);
}
}
+ if (VPTPredicationCode != ARMVCC::None) {
+ bool usedVPTPredicationCode = false;
+ for (unsigned I = 1; I < Operands.size(); ++I)
+ if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred())
+ usedVPTPredicationCode = true;
+ if (!usedVPTPredicationCode) {
+ // If we have a VPT predication code and we haven't just turned it
+ // into an operand, then it was a mistake for splitMnemonic to
+ // separate it from the rest of the mnemonic in the first place,
+ // and this may lead to wrong disassembly (e.g. scalar floating
+ // point VCMPE is actually a different instruction from VCMP, so
+ // we mustn't treat them the same). In that situation, glue it
+ // back on.
+ Mnemonic = Name.slice(0, Mnemonic.size() + 1);
+ Operands.erase(Operands.begin());
+ Operands.insert(Operands.begin(),
+ ARMOperand::CreateToken(Mnemonic, NameLoc));
+ }
+ }
+
+ // ARM mode 'blx' need special handling, as the register operand version
+ // is predicable, but the label operand version is not. So, we can't rely
+ // on the Mnemonic based checking to correctly figure out when to put
+ // a k_CondCode operand in the list. If we're trying to match the label
+ // version, remove the k_CondCode operand here.
+ if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
+ static_cast<ARMOperand &>(*Operands[2]).isImm())
+ Operands.erase(Operands.begin() + 1);
+
+ // Adjust operands of ldrexd/strexd to MCK_GPRPair.
+ // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
+ // a single GPRPair reg operand is used in the .td file to replace the two
+ // GPRs. However, when parsing from asm, the two GRPs cannot be
+ // automatically
+ // expressed as a GPRPair, so we have to manually merge them.
+ // FIXME: We would really like to be able to tablegen'erate this.
+ if (!isThumb() && Operands.size() > 4 &&
+ (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
+ Mnemonic == "stlexd")) {
+ bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
+ unsigned Idx = isLoad ? 2 : 3;
+ ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]);
+ ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]);
+
+ const MCRegisterClass &MRC = MRI->getRegClass(ARM::GPRRegClassID);
+ // Adjust only if Op1 and Op2 are GPRs.
+ if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) &&
+ MRC.contains(Op2.getReg())) {
+ unsigned Reg1 = Op1.getReg();
+ unsigned Reg2 = Op2.getReg();
+ unsigned Rt = MRI->getEncodingValue(Reg1);
+ unsigned Rt2 = MRI->getEncodingValue(Reg2);
+
+ // Rt2 must be Rt + 1 and Rt must be even.
+ if (Rt + 1 != Rt2 || (Rt & 1)) {
+ return Error(Op2.getStartLoc(),
+ isLoad ? "destination operands must be sequential"
+ : "source operands must be sequential");
+ }
+ unsigned NewReg = MRI->getMatchingSuperReg(
+ Reg1, ARM::gsub_0, &(MRI->getRegClass(ARM::GPRPairRegClassID)));
+ Operands[Idx] =
+ ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc());
+ Operands.erase(Operands.begin() + Idx + 1);
+ }
+ }
+
// GNU Assembler extension (compatibility).
fixupGNULDRDAlias(Mnemonic, Operands);
@@ -6442,6 +7247,17 @@ bool ARMAsmParser::validateLDRDSTRD(MCInst &Inst,
return false;
}
+static int findFirstVectorPredOperandIdx(const MCInstrDesc &MCID) {
+ for (unsigned i = 0; i < MCID.NumOperands; ++i) {
+ if (ARM::isVpred(MCID.OpInfo[i].OperandType))
+ return i;
+ }
+ return -1;
+}
+
+static bool isVectorPredicable(const MCInstrDesc &MCID) {
+ return findFirstVectorPredOperandIdx(MCID) != -1;
+}
// FIXME: We would really like to be able to tablegen'erate this.
bool ARMAsmParser::validateInstruction(MCInst &Inst,
@@ -6473,12 +7289,25 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
} else if (isThumbTwo() && MCID.isPredicable() &&
Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
ARMCC::AL && Inst.getOpcode() != ARM::tBcc &&
- Inst.getOpcode() != ARM::t2Bcc) {
+ Inst.getOpcode() != ARM::t2Bcc &&
+ Inst.getOpcode() != ARM::t2BFic) {
return Error(Loc, "predicated instructions must be in IT block");
} else if (!isThumb() && !useImplicitITARM() && MCID.isPredicable() &&
Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
ARMCC::AL) {
return Warning(Loc, "predicated instructions should be in IT block");
+ } else if (!MCID.isPredicable()) {
+ // Check the instruction doesn't have a predicate operand anyway
+ // that it's not allowed to use. Sometimes this happens in order
+ // to keep instructions the same shape even though one cannot
+ // legally be predicated, e.g. vmul.f16 vs vmul.f32.
+ for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
+ if (MCID.OpInfo[i].isPredicate()) {
+ if (Inst.getOperand(i).getImm() != ARMCC::AL)
+ return Error(Loc, "instruction is not predicable");
+ break;
+ }
+ }
}
// PC-setting instructions in an IT block, but not the last instruction of
@@ -6487,6 +7316,28 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
return Error(Loc, "instruction must be outside of IT block or the last instruction in an IT block");
}
+ if (inVPTBlock() && !instIsBreakpoint(Inst)) {
+ unsigned Bit = extractITMaskBit(VPTState.Mask, VPTState.CurPosition);
+ if (!isVectorPredicable(MCID))
+ return Error(Loc, "instruction in VPT block must be predicable");
+ unsigned Pred = Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm();
+ unsigned VPTPred = Bit ? ARMVCC::Else : ARMVCC::Then;
+ if (Pred != VPTPred) {
+ SMLoc PredLoc;
+ for (unsigned I = 1; I < Operands.size(); ++I)
+ if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred())
+ PredLoc = Operands[I]->getStartLoc();
+ return Error(PredLoc, "incorrect predication in VPT block; got '" +
+ StringRef(ARMVPTPredToString(ARMVCC::VPTCodes(Pred))) +
+ "', but expected '" +
+ ARMVPTPredToString(ARMVCC::VPTCodes(VPTPred)) + "'");
+ }
+ }
+ else if (isVectorPredicable(MCID) &&
+ Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm() !=
+ ARMVCC::None)
+ return Error(Loc, "VPT predicated instructions must be in VPT block");
+
const unsigned Opcode = Inst.getOpcode();
switch (Opcode) {
case ARM::t2IT: {
@@ -6496,11 +7347,10 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
unsigned Cond = Inst.getOperand(0).getImm();
unsigned Mask = Inst.getOperand(1).getImm();
- // Mask hasn't been modified to the IT instruction encoding yet so
- // conditions only allowing a 't' are a block of 1s starting at bit 3
- // followed by all 0s. Easiest way is to just list the 4 possibilities.
- if (Cond == ARMCC::AL && Mask != 8 && Mask != 12 && Mask != 14 &&
- Mask != 15)
+ // Conditions only allowing a 't' are those with no set bit except
+ // the lowest-order one that indicates the end of the sequence. In
+ // other words, powers of 2.
+ if (Cond == ARMCC::AL && countPopulation(Mask) != 1)
return Error(Loc, "unpredictable IT predicate sequence");
break;
}
@@ -6609,6 +7459,54 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
"destination register and base register can't be identical");
return false;
}
+
+ case ARM::MVE_VLDRBU8_rq:
+ case ARM::MVE_VLDRBU16_rq:
+ case ARM::MVE_VLDRBS16_rq:
+ case ARM::MVE_VLDRBU32_rq:
+ case ARM::MVE_VLDRBS32_rq:
+ case ARM::MVE_VLDRHU16_rq:
+ case ARM::MVE_VLDRHU16_rq_u:
+ case ARM::MVE_VLDRHU32_rq:
+ case ARM::MVE_VLDRHU32_rq_u:
+ case ARM::MVE_VLDRHS32_rq:
+ case ARM::MVE_VLDRHS32_rq_u:
+ case ARM::MVE_VLDRWU32_rq:
+ case ARM::MVE_VLDRWU32_rq_u:
+ case ARM::MVE_VLDRDU64_rq:
+ case ARM::MVE_VLDRDU64_rq_u:
+ case ARM::MVE_VLDRWU32_qi:
+ case ARM::MVE_VLDRWU32_qi_pre:
+ case ARM::MVE_VLDRDU64_qi:
+ case ARM::MVE_VLDRDU64_qi_pre: {
+ // Qd must be different from Qm.
+ unsigned QdIdx = 0, QmIdx = 2;
+ bool QmIsPointer = false;
+ switch (Opcode) {
+ case ARM::MVE_VLDRWU32_qi:
+ case ARM::MVE_VLDRDU64_qi:
+ QmIdx = 1;
+ QmIsPointer = true;
+ break;
+ case ARM::MVE_VLDRWU32_qi_pre:
+ case ARM::MVE_VLDRDU64_qi_pre:
+ QdIdx = 1;
+ QmIsPointer = true;
+ break;
+ }
+
+ const unsigned Qd = MRI->getEncodingValue(Inst.getOperand(QdIdx).getReg());
+ const unsigned Qm = MRI->getEncodingValue(Inst.getOperand(QmIdx).getReg());
+
+ if (Qd == Qm) {
+ return Error(Operands[3]->getStartLoc(),
+ Twine("destination vector register and vector ") +
+ (QmIsPointer ? "pointer" : "offset") +
+ " register can't be identical");
+ }
+ return false;
+ }
+
case ARM::SBFX:
case ARM::t2SBFX:
case ARM::UBFX:
@@ -6776,6 +7674,20 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
}
break;
+ case ARM::t2ADDri:
+ case ARM::t2ADDri12:
+ case ARM::t2ADDrr:
+ case ARM::t2ADDrs:
+ case ARM::t2SUBri:
+ case ARM::t2SUBri12:
+ case ARM::t2SUBrr:
+ case ARM::t2SUBrs:
+ if (Inst.getOperand(0).getReg() == ARM::SP &&
+ Inst.getOperand(1).getReg() != ARM::SP)
+ return Error(Operands[4]->getStartLoc(),
+ "source register must be sp if destination is sp");
+ break;
+
// Final range checking for Thumb unconditional branch instructions.
case ARM::tB:
if (!(static_cast<ARMOperand &>(*Operands[2])).isSignedOffset<11, 1>())
@@ -6845,6 +7757,61 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
"code specified");
break;
}
+ case ARM::t2BFi:
+ case ARM::t2BFr:
+ case ARM::t2BFLi:
+ case ARM::t2BFLr: {
+ if (!static_cast<ARMOperand &>(*Operands[2]).isUnsignedOffset<4, 1>() ||
+ (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0))
+ return Error(Operands[2]->getStartLoc(),
+ "branch location out of range or not a multiple of 2");
+
+ if (Opcode == ARM::t2BFi) {
+ if (!static_cast<ARMOperand &>(*Operands[3]).isSignedOffset<16, 1>())
+ return Error(Operands[3]->getStartLoc(),
+ "branch target out of range or not a multiple of 2");
+ } else if (Opcode == ARM::t2BFLi) {
+ if (!static_cast<ARMOperand &>(*Operands[3]).isSignedOffset<18, 1>())
+ return Error(Operands[3]->getStartLoc(),
+ "branch target out of range or not a multiple of 2");
+ }
+ break;
+ }
+ case ARM::t2BFic: {
+ if (!static_cast<ARMOperand &>(*Operands[1]).isUnsignedOffset<4, 1>() ||
+ (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0))
+ return Error(Operands[1]->getStartLoc(),
+ "branch location out of range or not a multiple of 2");
+
+ if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<16, 1>())
+ return Error(Operands[2]->getStartLoc(),
+ "branch target out of range or not a multiple of 2");
+
+ assert(Inst.getOperand(0).isImm() == Inst.getOperand(2).isImm() &&
+ "branch location and else branch target should either both be "
+ "immediates or both labels");
+
+ if (Inst.getOperand(0).isImm() && Inst.getOperand(2).isImm()) {
+ int Diff = Inst.getOperand(2).getImm() - Inst.getOperand(0).getImm();
+ if (Diff != 4 && Diff != 2)
+ return Error(
+ Operands[3]->getStartLoc(),
+ "else branch target must be 2 or 4 greater than the branch location");
+ }
+ break;
+ }
+ case ARM::t2CLRM: {
+ for (unsigned i = 2; i < Inst.getNumOperands(); i++) {
+ if (Inst.getOperand(i).isReg() &&
+ !ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(
+ Inst.getOperand(i).getReg())) {
+ return Error(Operands[2]->getStartLoc(),
+ "invalid register in register list. Valid registers are "
+ "r0-r12, lr/r14 and APSR.");
+ }
+ }
+ break;
+ }
case ARM::DSB:
case ARM::t2DSB: {
@@ -6892,6 +7859,39 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
"list of registers must be at least 1 and at most 16");
break;
}
+ case ARM::MVE_VQDMULLs32bh:
+ case ARM::MVE_VQDMULLs32th:
+ case ARM::MVE_VCMULf32:
+ case ARM::MVE_VMULLs32bh:
+ case ARM::MVE_VMULLs32th:
+ case ARM::MVE_VMULLu32bh:
+ case ARM::MVE_VMULLu32th: {
+ if (Operands[3]->getReg() == Operands[4]->getReg()) {
+ return Error (Operands[3]->getStartLoc(),
+ "Qd register and Qn register can't be identical");
+ }
+ if (Operands[3]->getReg() == Operands[5]->getReg()) {
+ return Error (Operands[3]->getStartLoc(),
+ "Qd register and Qm register can't be identical");
+ }
+ break;
+ }
+ case ARM::MVE_VMOV_rr_q: {
+ if (Operands[4]->getReg() != Operands[6]->getReg())
+ return Error (Operands[4]->getStartLoc(), "Q-registers must be the same");
+ if (static_cast<ARMOperand &>(*Operands[5]).getVectorIndex() !=
+ static_cast<ARMOperand &>(*Operands[7]).getVectorIndex() + 2)
+ return Error (Operands[5]->getStartLoc(), "Q-register indexes must be 2 and 0 or 3 and 1");
+ break;
+ }
+ case ARM::MVE_VMOV_q_rr: {
+ if (Operands[2]->getReg() != Operands[4]->getReg())
+ return Error (Operands[2]->getStartLoc(), "Q-registers must be the same");
+ if (static_cast<ARMOperand &>(*Operands[3]).getVectorIndex() !=
+ static_cast<ARMOperand &>(*Operands[5]).getVectorIndex() + 2)
+ return Error (Operands[3]->getStartLoc(), "Q-register indexes must be 2 and 0 or 3 and 1");
+ break;
+ }
}
return false;
@@ -7168,6 +8168,50 @@ bool ARMAsmParser::processInstruction(MCInst &Inst,
}
switch (Inst.getOpcode()) {
+ case ARM::MVE_VORNIZ0v4i32:
+ case ARM::MVE_VORNIZ0v8i16:
+ case ARM::MVE_VORNIZ8v4i32:
+ case ARM::MVE_VORNIZ8v8i16:
+ case ARM::MVE_VORNIZ16v4i32:
+ case ARM::MVE_VORNIZ24v4i32:
+ case ARM::MVE_VANDIZ0v4i32:
+ case ARM::MVE_VANDIZ0v8i16:
+ case ARM::MVE_VANDIZ8v4i32:
+ case ARM::MVE_VANDIZ8v8i16:
+ case ARM::MVE_VANDIZ16v4i32:
+ case ARM::MVE_VANDIZ24v4i32: {
+ unsigned Opcode;
+ bool imm16 = false;
+ switch(Inst.getOpcode()) {
+ case ARM::MVE_VORNIZ0v4i32: Opcode = ARM::MVE_VORRIZ0v4i32; break;
+ case ARM::MVE_VORNIZ0v8i16: Opcode = ARM::MVE_VORRIZ0v8i16; imm16 = true; break;
+ case ARM::MVE_VORNIZ8v4i32: Opcode = ARM::MVE_VORRIZ8v4i32; break;
+ case ARM::MVE_VORNIZ8v8i16: Opcode = ARM::MVE_VORRIZ8v8i16; imm16 = true; break;
+ case ARM::MVE_VORNIZ16v4i32: Opcode = ARM::MVE_VORRIZ16v4i32; break;
+ case ARM::MVE_VORNIZ24v4i32: Opcode = ARM::MVE_VORRIZ24v4i32; break;
+ case ARM::MVE_VANDIZ0v4i32: Opcode = ARM::MVE_VBICIZ0v4i32; break;
+ case ARM::MVE_VANDIZ0v8i16: Opcode = ARM::MVE_VBICIZ0v8i16; imm16 = true; break;
+ case ARM::MVE_VANDIZ8v4i32: Opcode = ARM::MVE_VBICIZ8v4i32; break;
+ case ARM::MVE_VANDIZ8v8i16: Opcode = ARM::MVE_VBICIZ8v8i16; imm16 = true; break;
+ case ARM::MVE_VANDIZ16v4i32: Opcode = ARM::MVE_VBICIZ16v4i32; break;
+ case ARM::MVE_VANDIZ24v4i32: Opcode = ARM::MVE_VBICIZ24v4i32; break;
+ default: llvm_unreachable("unexpected opcode");
+ }
+
+ MCInst TmpInst;
+ TmpInst.setOpcode(Opcode);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(1));
+
+ // invert immediate
+ unsigned imm = ~Inst.getOperand(2).getImm() & (imm16 ? 0xffff : 0xffffffff);
+ TmpInst.addOperand(MCOperand::createImm(imm));
+
+ TmpInst.addOperand(Inst.getOperand(3));
+ TmpInst.addOperand(Inst.getOperand(4));
+ Inst = TmpInst;
+ return true;
+ }
// Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction.
case ARM::LDRT_POST:
case ARM::LDRBT_POST: {
@@ -8990,15 +10034,11 @@ bool ARMAsmParser::processInstruction(MCInst &Inst,
}
case ARM::ITasm:
case ARM::t2IT: {
- MCOperand &MO = Inst.getOperand(1);
- unsigned Mask = MO.getImm();
- ARMCC::CondCodes Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
-
// Set up the IT block state according to the IT instruction we just
// matched.
assert(!inITBlock() && "nested IT blocks?!");
- startExplicitITBlock(Cond, Mask);
- MO.setImm(getITMaskEncoding());
+ startExplicitITBlock(ARMCC::CondCodes(Inst.getOperand(0).getImm()),
+ Inst.getOperand(1).getImm());
break;
}
case ARM::t2LSLrr:
@@ -9074,6 +10114,35 @@ bool ARMAsmParser::processInstruction(MCInst &Inst,
return true;
}
return false;
+ case ARM::MVE_VPST:
+ case ARM::MVE_VPTv16i8:
+ case ARM::MVE_VPTv8i16:
+ case ARM::MVE_VPTv4i32:
+ case ARM::MVE_VPTv16u8:
+ case ARM::MVE_VPTv8u16:
+ case ARM::MVE_VPTv4u32:
+ case ARM::MVE_VPTv16s8:
+ case ARM::MVE_VPTv8s16:
+ case ARM::MVE_VPTv4s32:
+ case ARM::MVE_VPTv4f32:
+ case ARM::MVE_VPTv8f16:
+ case ARM::MVE_VPTv16i8r:
+ case ARM::MVE_VPTv8i16r:
+ case ARM::MVE_VPTv4i32r:
+ case ARM::MVE_VPTv16u8r:
+ case ARM::MVE_VPTv8u16r:
+ case ARM::MVE_VPTv4u32r:
+ case ARM::MVE_VPTv16s8r:
+ case ARM::MVE_VPTv8s16r:
+ case ARM::MVE_VPTv4s32r:
+ case ARM::MVE_VPTv4f32r:
+ case ARM::MVE_VPTv8f16r: {
+ assert(!inVPTBlock() && "Nested VPT blocks are not allowed");
+ MCOperand &MO = Inst.getOperand(0);
+ VPTState.Mask = MO.getImm();
+ VPTState.CurPosition = 0;
+ break;
+ }
}
return false;
}
@@ -9138,18 +10207,50 @@ unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
return Match_RequiresV8;
}
- // Use of SP for VMRS/VMSR is only allowed in ARM mode with the exception of
- // ARMv8-A.
- if ((Inst.getOpcode() == ARM::VMRS || Inst.getOpcode() == ARM::VMSR) &&
- Inst.getOperand(0).getReg() == ARM::SP && (isThumb() && !hasV8Ops()))
- return Match_InvalidOperand;
+ switch (Inst.getOpcode()) {
+ case ARM::VMRS:
+ case ARM::VMSR:
+ case ARM::VMRS_FPCXTS:
+ case ARM::VMRS_FPCXTNS:
+ case ARM::VMSR_FPCXTS:
+ case ARM::VMSR_FPCXTNS:
+ case ARM::VMRS_FPSCR_NZCVQC:
+ case ARM::VMSR_FPSCR_NZCVQC:
+ case ARM::FMSTAT:
+ case ARM::VMRS_VPR:
+ case ARM::VMRS_P0:
+ case ARM::VMSR_VPR:
+ case ARM::VMSR_P0:
+ // Use of SP for VMRS/VMSR is only allowed in ARM mode with the exception of
+ // ARMv8-A.
+ if (Inst.getOperand(0).isReg() && Inst.getOperand(0).getReg() == ARM::SP &&
+ (isThumb() && !hasV8Ops()))
+ return Match_InvalidOperand;
+ break;
+ default:
+ break;
+ }
for (unsigned I = 0; I < MCID.NumOperands; ++I)
if (MCID.OpInfo[I].RegClass == ARM::rGPRRegClassID) {
// rGPRRegClass excludes PC, and also excluded SP before ARMv8
- if ((Inst.getOperand(I).getReg() == ARM::SP) && !hasV8Ops())
+ const auto &Op = Inst.getOperand(I);
+ if (!Op.isReg()) {
+ // This can happen in awkward cases with tied operands, e.g. a
+ // writeback load/store with a complex addressing mode in
+ // which there's an output operand corresponding to the
+ // updated written-back base register: the Tablegen-generated
+ // AsmMatcher will have written a placeholder operand to that
+ // slot in the form of an immediate 0, because it can't
+ // generate the register part of the complex addressing-mode
+ // operand ahead of time.
+ continue;
+ }
+
+ unsigned Reg = Op.getReg();
+ if ((Reg == ARM::SP) && !hasV8Ops())
return Match_RequiresV8;
- else if (Inst.getOperand(I).getReg() == ARM::PC)
+ else if (Reg == ARM::PC)
return Match_InvalidOperand;
}
@@ -9268,7 +10369,7 @@ unsigned ARMAsmParser::MatchInstruction(OperandVector &Operands, MCInst &Inst,
return PlainMatchResult;
}
-static std::string ARMMnemonicSpellCheck(StringRef S, uint64_t FBS,
+static std::string ARMMnemonicSpellCheck(StringRef S, const FeatureBitset &FBS,
unsigned VariantID = 0);
static const char *getSubtargetFeatureName(uint64_t Val);
@@ -9296,6 +10397,7 @@ bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
// Still progress the IT block, otherwise one wrong condition causes
// nasty cascading errors.
forwardITPosition();
+ forwardVPTPosition();
return true;
}
@@ -9322,6 +10424,7 @@ bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
// and process gets a consistent answer about whether we're in an IT
// block.
forwardITPosition();
+ forwardVPTPosition();
// ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
// doesn't actually encode.
@@ -9341,7 +10444,7 @@ bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
ReportNearMisses(NearMisses, IDLoc, Operands);
return true;
case Match_MnemonicFail: {
- uint64_t FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
+ FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
std::string Suggestion = ARMMnemonicSpellCheck(
((ARMOperand &)*Operands[0]).getToken(), FBS);
return Error(IDLoc, "invalid instruction" + Suggestion,
@@ -10384,11 +11487,11 @@ ARMAsmParser::getCustomOperandDiag(ARMMatchResultTy MatchError) {
: "operand must be a register in range [r0, r12] or r14";
// DPR contains 16 registers for some FPUs, and 32 for others.
case Match_DPR:
- return hasD16() ? "operand must be a register in range [d0, d15]"
- : "operand must be a register in range [d0, d31]";
+ return hasD32() ? "operand must be a register in range [d0, d31]"
+ : "operand must be a register in range [d0, d15]";
case Match_DPR_RegList:
- return hasD16() ? "operand must be a list of registers in range [d0, d15]"
- : "operand must be a list of registers in range [d0, d31]";
+ return hasD32() ? "operand must be a list of registers in range [d0, d31]"
+ : "operand must be a list of registers in range [d0, d15]";
// For all other diags, use the static string from tablegen.
default:
@@ -10416,7 +11519,7 @@ ARMAsmParser::FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
// variants of an instruction that take 8- and 16-bit immediates, we want
// to only report the widest one.
std::multimap<unsigned, unsigned> OperandMissesSeen;
- SmallSet<uint64_t, 4> FeatureMissesSeen;
+ SmallSet<FeatureBitset, 4> FeatureMissesSeen;
bool ReportedTooFewOperands = false;
// Process the near-misses in reverse order, so that we see more general ones
@@ -10467,7 +11570,7 @@ ARMAsmParser::FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
break;
}
case NearMissInfo::NearMissFeature: {
- uint64_t MissingFeatures = I.getFeatures();
+ const FeatureBitset &MissingFeatures = I.getFeatures();
// Don't report the same set of features twice.
if (FeatureMissesSeen.count(MissingFeatures))
break;
@@ -10475,20 +11578,21 @@ ARMAsmParser::FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
// Special case: don't report a feature set which includes arm-mode for
// targets that don't have ARM mode.
- if ((MissingFeatures & Feature_IsARM) && !hasARM())
+ if (MissingFeatures.test(Feature_IsARMBit) && !hasARM())
break;
// Don't report any near-misses that both require switching instruction
// set, and adding other subtarget features.
- if (isThumb() && (MissingFeatures & Feature_IsARM) &&
- (MissingFeatures & ~Feature_IsARM))
+ if (isThumb() && MissingFeatures.test(Feature_IsARMBit) &&
+ MissingFeatures.count() > 1)
break;
- if (!isThumb() && (MissingFeatures & Feature_IsThumb) &&
- (MissingFeatures & ~Feature_IsThumb))
+ if (!isThumb() && MissingFeatures.test(Feature_IsThumbBit) &&
+ MissingFeatures.count() > 1)
break;
- if (!isThumb() && (MissingFeatures & Feature_IsThumb2) &&
- (MissingFeatures & ~(Feature_IsThumb2 | Feature_IsThumb)))
+ if (!isThumb() && MissingFeatures.test(Feature_IsThumb2Bit) &&
+ (MissingFeatures & ~FeatureBitset({Feature_IsThumb2Bit,
+ Feature_IsThumbBit})).any())
break;
- if (isMClass() && (MissingFeatures & Feature_HasNEON))
+ if (isMClass() && MissingFeatures.test(Feature_HasNEONBit))
break;
NearMissMessage Message;
@@ -10496,14 +11600,10 @@ ARMAsmParser::FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
raw_svector_ostream OS(Message.Message);
OS << "instruction requires:";
- uint64_t Mask = 1;
- for (unsigned MaskPos = 0; MaskPos < (sizeof(MissingFeatures) * 8 - 1);
- ++MaskPos) {
- if (MissingFeatures & Mask) {
- OS << " " << getSubtargetFeatureName(MissingFeatures & Mask);
- }
- Mask <<= 1;
- }
+ for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i)
+ if (MissingFeatures.test(i))
+ OS << ' ' << getSubtargetFeatureName(i);
+
NearMissesOut.emplace_back(Message);
break;
@@ -10579,38 +11679,44 @@ void ARMAsmParser::ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses,
}
}
-// FIXME: This structure should be moved inside ARMTargetParser
-// when we start to table-generate them, and we can use the ARM
-// flags below, that were generated by table-gen.
-static const struct {
- const unsigned Kind;
- const uint64_t ArchCheck;
- const FeatureBitset Features;
-} Extensions[] = {
- { ARM::AEK_CRC, Feature_HasV8, {ARM::FeatureCRC} },
- { ARM::AEK_CRYPTO, Feature_HasV8,
- {ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8} },
- { ARM::AEK_FP, Feature_HasV8, {ARM::FeatureFPARMv8} },
- { (ARM::AEK_HWDIVTHUMB | ARM::AEK_HWDIVARM), Feature_HasV7 | Feature_IsNotMClass,
- {ARM::FeatureHWDivThumb, ARM::FeatureHWDivARM} },
- { ARM::AEK_MP, Feature_HasV7 | Feature_IsNotMClass, {ARM::FeatureMP} },
- { ARM::AEK_SIMD, Feature_HasV8, {ARM::FeatureNEON, ARM::FeatureFPARMv8} },
- { ARM::AEK_SEC, Feature_HasV6K, {ARM::FeatureTrustZone} },
- // FIXME: Only available in A-class, isel not predicated
- { ARM::AEK_VIRT, Feature_HasV7, {ARM::FeatureVirtualization} },
- { ARM::AEK_FP16, Feature_HasV8_2a, {ARM::FeatureFPARMv8, ARM::FeatureFullFP16} },
- { ARM::AEK_RAS, Feature_HasV8, {ARM::FeatureRAS} },
- // FIXME: Unsupported extensions.
- { ARM::AEK_OS, Feature_None, {} },
- { ARM::AEK_IWMMXT, Feature_None, {} },
- { ARM::AEK_IWMMXT2, Feature_None, {} },
- { ARM::AEK_MAVERICK, Feature_None, {} },
- { ARM::AEK_XSCALE, Feature_None, {} },
-};
-
/// parseDirectiveArchExtension
/// ::= .arch_extension [no]feature
bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) {
+ // FIXME: This structure should be moved inside ARMTargetParser
+ // when we start to table-generate them, and we can use the ARM
+ // flags below, that were generated by table-gen.
+ static const struct {
+ const unsigned Kind;
+ const FeatureBitset ArchCheck;
+ const FeatureBitset Features;
+ } Extensions[] = {
+ { ARM::AEK_CRC, {Feature_HasV8Bit}, {ARM::FeatureCRC} },
+ { ARM::AEK_CRYPTO, {Feature_HasV8Bit},
+ {ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8} },
+ { ARM::AEK_FP, {Feature_HasV8Bit},
+ {ARM::FeatureVFP2_D16_SP, ARM::FeatureFPARMv8} },
+ { (ARM::AEK_HWDIVTHUMB | ARM::AEK_HWDIVARM),
+ {Feature_HasV7Bit, Feature_IsNotMClassBit},
+ {ARM::FeatureHWDivThumb, ARM::FeatureHWDivARM} },
+ { ARM::AEK_MP, {Feature_HasV7Bit, Feature_IsNotMClassBit},
+ {ARM::FeatureMP} },
+ { ARM::AEK_SIMD, {Feature_HasV8Bit},
+ {ARM::FeatureNEON, ARM::FeatureVFP2_D16_SP, ARM::FeatureFPARMv8} },
+ { ARM::AEK_SEC, {Feature_HasV6KBit}, {ARM::FeatureTrustZone} },
+ // FIXME: Only available in A-class, isel not predicated
+ { ARM::AEK_VIRT, {Feature_HasV7Bit}, {ARM::FeatureVirtualization} },
+ { ARM::AEK_FP16, {Feature_HasV8_2aBit},
+ {ARM::FeatureFPARMv8, ARM::FeatureFullFP16} },
+ { ARM::AEK_RAS, {Feature_HasV8Bit}, {ARM::FeatureRAS} },
+ { ARM::AEK_LOB, {Feature_HasV8_1MMainlineBit}, {ARM::FeatureLOB} },
+ // FIXME: Unsupported extensions.
+ { ARM::AEK_OS, {}, {} },
+ { ARM::AEK_IWMMXT, {}, {} },
+ { ARM::AEK_IWMMXT2, {}, {} },
+ { ARM::AEK_MAVERICK, {}, {} },
+ { ARM::AEK_XSCALE, {}, {} },
+ };
+
MCAsmParser &Parser = getParser();
if (getLexer().isNot(AsmToken::Identifier))
@@ -10646,12 +11752,12 @@ bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) {
"allowed for the current base architecture");
MCSubtargetInfo &STI = copySTI();
- FeatureBitset ToggleFeatures = EnableFeature
- ? (~STI.getFeatureBits() & Extension.Features)
- : ( STI.getFeatureBits() & Extension.Features);
-
- uint64_t Features =
- ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
+ if (EnableFeature) {
+ STI.SetFeatureBitsTransitively(Extension.Features);
+ } else {
+ STI.ClearFeatureBitsTransitively(Extension.Features);
+ }
+ FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
setAvailableFeatures(Features);
return false;
}
@@ -10675,6 +11781,18 @@ unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
if (CE->getValue() == 0)
return Match_Success;
break;
+ case MCK__35_8:
+ if (Op.isImm())
+ if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
+ if (CE->getValue() == 8)
+ return Match_Success;
+ break;
+ case MCK__35_16:
+ if (Op.isImm())
+ if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
+ if (CE->getValue() == 16)
+ return Match_Success;
+ break;
case MCK_ModImm:
if (Op.isImm()) {
const MCExpr *SOExpr = Op.getImm();
@@ -10698,3 +11816,76 @@ unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
}
return Match_InvalidOperand;
}
+
+bool ARMAsmParser::isMnemonicVPTPredicable(StringRef Mnemonic,
+ StringRef ExtraToken) {
+ if (!hasMVE())
+ return false;
+
+ return Mnemonic.startswith("vabav") || Mnemonic.startswith("vaddv") ||
+ Mnemonic.startswith("vaddlv") || Mnemonic.startswith("vminnmv") ||
+ Mnemonic.startswith("vminnmav") || Mnemonic.startswith("vminv") ||
+ Mnemonic.startswith("vminav") || Mnemonic.startswith("vmaxnmv") ||
+ Mnemonic.startswith("vmaxnmav") || Mnemonic.startswith("vmaxv") ||
+ Mnemonic.startswith("vmaxav") || Mnemonic.startswith("vmladav") ||
+ Mnemonic.startswith("vrmlaldavh") || Mnemonic.startswith("vrmlalvh") ||
+ Mnemonic.startswith("vmlsdav") || Mnemonic.startswith("vmlav") ||
+ Mnemonic.startswith("vmlaldav") || Mnemonic.startswith("vmlalv") ||
+ Mnemonic.startswith("vmaxnm") || Mnemonic.startswith("vminnm") ||
+ Mnemonic.startswith("vmax") || Mnemonic.startswith("vmin") ||
+ Mnemonic.startswith("vshlc") || Mnemonic.startswith("vmovlt") ||
+ Mnemonic.startswith("vmovlb") || Mnemonic.startswith("vshll") ||
+ Mnemonic.startswith("vrshrn") || Mnemonic.startswith("vshrn") ||
+ Mnemonic.startswith("vqrshrun") || Mnemonic.startswith("vqshrun") ||
+ Mnemonic.startswith("vqrshrn") || Mnemonic.startswith("vqshrn") ||
+ Mnemonic.startswith("vbic") || Mnemonic.startswith("vrev64") ||
+ Mnemonic.startswith("vrev32") || Mnemonic.startswith("vrev16") ||
+ Mnemonic.startswith("vmvn") || Mnemonic.startswith("veor") ||
+ Mnemonic.startswith("vorn") || Mnemonic.startswith("vorr") ||
+ Mnemonic.startswith("vand") || Mnemonic.startswith("vmul") ||
+ Mnemonic.startswith("vqrdmulh") || Mnemonic.startswith("vqdmulh") ||
+ Mnemonic.startswith("vsub") || Mnemonic.startswith("vadd") ||
+ Mnemonic.startswith("vqsub") || Mnemonic.startswith("vqadd") ||
+ Mnemonic.startswith("vabd") || Mnemonic.startswith("vrhadd") ||
+ Mnemonic.startswith("vhsub") || Mnemonic.startswith("vhadd") ||
+ Mnemonic.startswith("vdup") || Mnemonic.startswith("vcls") ||
+ Mnemonic.startswith("vclz") || Mnemonic.startswith("vneg") ||
+ Mnemonic.startswith("vabs") || Mnemonic.startswith("vqneg") ||
+ Mnemonic.startswith("vqabs") ||
+ (Mnemonic.startswith("vrint") && Mnemonic != "vrintr") ||
+ Mnemonic.startswith("vcmla") || Mnemonic.startswith("vfma") ||
+ Mnemonic.startswith("vfms") || Mnemonic.startswith("vcadd") ||
+ Mnemonic.startswith("vadd") || Mnemonic.startswith("vsub") ||
+ Mnemonic.startswith("vshl") || Mnemonic.startswith("vqshl") ||
+ Mnemonic.startswith("vqrshl") || Mnemonic.startswith("vrshl") ||
+ Mnemonic.startswith("vsri") || Mnemonic.startswith("vsli") ||
+ Mnemonic.startswith("vrshr") || Mnemonic.startswith("vshr") ||
+ Mnemonic.startswith("vpsel") || Mnemonic.startswith("vcmp") ||
+ Mnemonic.startswith("vqdmladh") || Mnemonic.startswith("vqrdmladh") ||
+ Mnemonic.startswith("vqdmlsdh") || Mnemonic.startswith("vqrdmlsdh") ||
+ Mnemonic.startswith("vcmul") || Mnemonic.startswith("vrmulh") ||
+ Mnemonic.startswith("vqmovn") || Mnemonic.startswith("vqmovun") ||
+ Mnemonic.startswith("vmovnt") || Mnemonic.startswith("vmovnb") ||
+ Mnemonic.startswith("vmaxa") || Mnemonic.startswith("vmaxnma") ||
+ Mnemonic.startswith("vhcadd") || Mnemonic.startswith("vadc") ||
+ Mnemonic.startswith("vsbc") || Mnemonic.startswith("vrshr") ||
+ Mnemonic.startswith("vshr") || Mnemonic.startswith("vstrb") ||
+ Mnemonic.startswith("vldrb") ||
+ (Mnemonic.startswith("vstrh") && Mnemonic != "vstrhi") ||
+ (Mnemonic.startswith("vldrh") && Mnemonic != "vldrhi") ||
+ Mnemonic.startswith("vstrw") || Mnemonic.startswith("vldrw") ||
+ Mnemonic.startswith("vldrd") || Mnemonic.startswith("vstrd") ||
+ Mnemonic.startswith("vqdmull") || Mnemonic.startswith("vbrsr") ||
+ Mnemonic.startswith("vfmas") || Mnemonic.startswith("vmlas") ||
+ Mnemonic.startswith("vmla") || Mnemonic.startswith("vqdmlash") ||
+ Mnemonic.startswith("vqdmlah") || Mnemonic.startswith("vqrdmlash") ||
+ Mnemonic.startswith("vqrdmlah") || Mnemonic.startswith("viwdup") ||
+ Mnemonic.startswith("vdwdup") || Mnemonic.startswith("vidup") ||
+ Mnemonic.startswith("vddup") || Mnemonic.startswith("vctp") ||
+ Mnemonic.startswith("vpnot") || Mnemonic.startswith("vbic") ||
+ Mnemonic.startswith("vrmlsldavh") || Mnemonic.startswith("vmlsldav") ||
+ Mnemonic.startswith("vcvt") ||
+ (Mnemonic.startswith("vmov") &&
+ !(ExtraToken == ".f16" || ExtraToken == ".32" ||
+ ExtraToken == ".16" || ExtraToken == ".8"));
+}