diff options
Diffstat (limited to 'contrib/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp')
| -rw-r--r-- | contrib/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp | 2084 |
1 files changed, 1394 insertions, 690 deletions
diff --git a/contrib/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/contrib/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp index ac9ff51f69f1..a51c41d70915 100644 --- a/contrib/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp +++ b/contrib/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp @@ -66,6 +66,12 @@ enum class RegKind { SVEPredicateVector }; +enum RegConstraintEqualityTy { + EqualsReg, + EqualsSuperReg, + EqualsSubReg +}; + class AArch64AsmParser : public MCTargetAsmParser { private: StringRef Mnemonic; ///< Instruction mnemonic. @@ -85,19 +91,18 @@ private: AArch64CC::CondCode parseCondCodeString(StringRef Cond); bool parseCondCode(OperandVector &Operands, bool invertCondCode); unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind); - int tryParseRegister(); - int tryMatchVectorRegister(StringRef &Kind, bool expected); bool parseRegister(OperandVector &Operands); bool parseSymbolicImmVal(const MCExpr *&ImmVal); - bool parseVectorList(OperandVector &Operands); + bool parseNeonVectorList(OperandVector &Operands); + bool parseOptionalMulOperand(OperandVector &Operands); bool parseOperand(OperandVector &Operands, bool isCondCode, bool invertCondCode); - bool showMatchError(SMLoc Loc, unsigned ErrCode, OperandVector &Operands); + bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo, + OperandVector &Operands); bool parseDirectiveArch(SMLoc L); bool parseDirectiveCPU(SMLoc L); - bool parseDirectiveWord(unsigned Size, SMLoc L); bool parseDirectiveInst(SMLoc L); bool parseDirectiveTLSDescCall(SMLoc L); @@ -121,25 +126,36 @@ private: /// } - OperandMatchResultTy tryParseSVERegister(int &Reg, StringRef &Kind, - RegKind MatchKind); + OperandMatchResultTy tryParseScalarRegister(unsigned &Reg); + OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind, + RegKind MatchKind); OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands); OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands); OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands); OperandMatchResultTy tryParseSysReg(OperandVector &Operands); OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands); + template <bool IsSVEPrefetch = false> OperandMatchResultTy tryParsePrefetch(OperandVector &Operands); OperandMatchResultTy tryParsePSBHint(OperandVector &Operands); OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands); OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands); + template<bool AddFPZeroAsLiteral> OperandMatchResultTy tryParseFPImm(OperandVector &Operands); - OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands); + OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands); OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands); bool tryParseNeonVectorRegister(OperandVector &Operands); + OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands); OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands); - template <bool ParseSuffix> + template <bool ParseShiftExtend, + RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg> + OperandMatchResultTy tryParseGPROperand(OperandVector &Operands); + template <bool ParseShiftExtend, bool ParseSuffix> OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands); OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands); + template <RegKind VectorKind> + OperandMatchResultTy tryParseVectorList(OperandVector &Operands, + bool ExpectMatch = false); + OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands); public: enum AArch64MatchResultTy { @@ -158,10 +174,19 @@ public: if (S.getTargetStreamer() == nullptr) new AArch64TargetStreamer(S); + // Alias .hword/.word/xword to the target-independent .2byte/.4byte/.8byte + // directives as they have the same form and semantics: + /// ::= (.hword | .word | .xword ) [ expression (, expression)* ] + Parser.addAliasForDirective(".hword", ".2byte"); + Parser.addAliasForDirective(".word", ".4byte"); + Parser.addAliasForDirective(".xword", ".8byte"); + // Initialize the set of available features. setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits())); } + bool regsEqual(const MCParsedAsmOperand &Op1, + const MCParsedAsmOperand &Op2) const override; bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, OperandVector &Operands) override; bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override; @@ -204,18 +229,45 @@ private: bool IsSuffix; // Is the operand actually a suffix on the mnemonic. }; + // Separate shift/extend operand. + struct ShiftExtendOp { + AArch64_AM::ShiftExtendType Type; + unsigned Amount; + bool HasExplicitAmount; + }; + struct RegOp { unsigned RegNum; RegKind Kind; - int ElementWidth; + + // The register may be allowed as a different register class, + // e.g. for GPR64as32 or GPR32as64. + RegConstraintEqualityTy EqualityTy; + + // In some cases the shift/extend needs to be explicitly parsed together + // with the register, rather than as a separate operand. This is needed + // for addressing modes where the instruction as a whole dictates the + // scaling/extend, rather than specific bits in the instruction. + // By parsing them as a single operand, we avoid the need to pass an + // extra operand in all CodeGen patterns (because all operands need to + // have an associated value), and we avoid the need to update TableGen to + // accept operands that have no associated bits in the instruction. + // + // An added benefit of parsing them together is that the assembler + // can give a sensible diagnostic if the scaling is not correct. + // + // The default is 'lsl #0' (HasExplicitAmount = false) if no + // ShiftExtend is specified. + ShiftExtendOp ShiftExtend; }; struct VectorListOp { unsigned RegNum; unsigned Count; unsigned NumElements; - unsigned ElementKind; + unsigned ElementWidth; + RegKind RegisterKind; }; struct VectorIndexOp { @@ -236,7 +288,8 @@ private: }; struct FPImmOp { - unsigned Val; // Encoded 8-bit representation. + uint64_t Val; // APFloat value bitcasted to uint64_t. + bool IsExact; // describes whether parsed value was exact. }; struct BarrierOp { @@ -269,12 +322,6 @@ private: unsigned Val; }; - struct ShiftExtendOp { - AArch64_AM::ShiftExtendType Type; - unsigned Amount; - bool HasExplicitAmount; - }; - struct ExtendOp { unsigned Val; }; @@ -388,9 +435,14 @@ public: return CondCode.Code; } - unsigned getFPImm() const { - assert(Kind == k_FPImm && "Invalid access!"); - return FPImm.Val; + APFloat getFPImm() const { + assert (Kind == k_FPImm && "Invalid access!"); + return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true)); + } + + bool getFPImmIsExact() const { + assert (Kind == k_FPImm && "Invalid access!"); + return FPImm.IsExact; } unsigned getBarrier() const { @@ -408,6 +460,11 @@ public: return Reg.RegNum; } + RegConstraintEqualityTy getRegEqualityTy() const { + assert(Kind == k_Register && "Invalid access!"); + return Reg.EqualityTy; + } + unsigned getVectorListStart() const { assert(Kind == k_VectorList && "Invalid access!"); return VectorList.RegNum; @@ -454,66 +511,88 @@ public: } AArch64_AM::ShiftExtendType getShiftExtendType() const { - assert(Kind == k_ShiftExtend && "Invalid access!"); - return ShiftExtend.Type; + if (Kind == k_ShiftExtend) + return ShiftExtend.Type; + if (Kind == k_Register) + return Reg.ShiftExtend.Type; + llvm_unreachable("Invalid access!"); } unsigned getShiftExtendAmount() const { - assert(Kind == k_ShiftExtend && "Invalid access!"); - return ShiftExtend.Amount; + if (Kind == k_ShiftExtend) + return ShiftExtend.Amount; + if (Kind == k_Register) + return Reg.ShiftExtend.Amount; + llvm_unreachable("Invalid access!"); } bool hasShiftExtendAmount() const { - assert(Kind == k_ShiftExtend && "Invalid access!"); - return ShiftExtend.HasExplicitAmount; + if (Kind == k_ShiftExtend) + return ShiftExtend.HasExplicitAmount; + if (Kind == k_Register) + return Reg.ShiftExtend.HasExplicitAmount; + llvm_unreachable("Invalid access!"); } bool isImm() const override { return Kind == k_Immediate; } bool isMem() const override { return false; } - bool isSImm9() const { + + bool isUImm6() const { if (!isImm()) return false; const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); if (!MCE) return false; int64_t Val = MCE->getValue(); - return (Val >= -256 && Val < 256); + return (Val >= 0 && Val < 64); } - bool isSImm10s8() const { - if (!isImm()) - return false; - const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); - if (!MCE) - return false; - int64_t Val = MCE->getValue(); - return (Val >= -4096 && Val < 4089 && (Val & 7) == 0); + + template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); } + + template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const { + return isImmScaled<Bits, Scale>(true); } - bool isSImm7s4() const { - if (!isImm()) - return false; - const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); - if (!MCE) - return false; - int64_t Val = MCE->getValue(); - return (Val >= -256 && Val <= 252 && (Val & 3) == 0); + + template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const { + return isImmScaled<Bits, Scale>(false); } - bool isSImm7s8() const { + + template <int Bits, int Scale> + DiagnosticPredicate isImmScaled(bool Signed) const { if (!isImm()) - return false; + return DiagnosticPredicateTy::NoMatch; + const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); if (!MCE) - return false; + return DiagnosticPredicateTy::NoMatch; + + int64_t MinVal, MaxVal; + if (Signed) { + int64_t Shift = Bits - 1; + MinVal = (int64_t(1) << Shift) * -Scale; + MaxVal = ((int64_t(1) << Shift) - 1) * Scale; + } else { + MinVal = 0; + MaxVal = ((int64_t(1) << Bits) - 1) * Scale; + } + int64_t Val = MCE->getValue(); - return (Val >= -512 && Val <= 504 && (Val & 7) == 0); + if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0) + return DiagnosticPredicateTy::Match; + + return DiagnosticPredicateTy::NearMatch; } - bool isSImm7s16() const { + + DiagnosticPredicate isSVEPattern() const { if (!isImm()) - return false; - const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); + return DiagnosticPredicateTy::NoMatch; + auto *MCE = dyn_cast<MCConstantExpr>(getImm()); if (!MCE) - return false; + return DiagnosticPredicateTy::NoMatch; int64_t Val = MCE->getValue(); - return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0); + if (Val >= 0 && Val < 32) + return DiagnosticPredicateTy::Match; + return DiagnosticPredicateTy::NearMatch; } bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const { @@ -535,7 +614,9 @@ public: ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC || ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC || - ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) { + ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 || + ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 || + ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) { // Note that we don't range-check the addend. It's adjusted modulo page // size when converted, so there is no "out of range" condition when using // @pageoff. @@ -572,48 +653,47 @@ public: return (Val >= N && Val <= M); } - bool isLogicalImm32() const { + // NOTE: Also used for isLogicalImmNot as anything that can be represented as + // a logical immediate can always be represented when inverted. + template <typename T> + bool isLogicalImm() const { if (!isImm()) return false; const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); if (!MCE) return false; + int64_t Val = MCE->getValue(); - if (Val >> 32 != 0 && Val >> 32 != ~0LL) + int64_t SVal = typename std::make_signed<T>::type(Val); + int64_t UVal = typename std::make_unsigned<T>::type(Val); + if (Val != SVal && Val != UVal) return false; - Val &= 0xFFFFFFFF; - return AArch64_AM::isLogicalImmediate(Val, 32); - } - bool isLogicalImm64() const { - if (!isImm()) - return false; - const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); - if (!MCE) - return false; - return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64); + return AArch64_AM::isLogicalImmediate(UVal, sizeof(T) * 8); } - bool isLogicalImm32Not() const { - if (!isImm()) - return false; - const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); - if (!MCE) - return false; - int64_t Val = ~MCE->getValue() & 0xFFFFFFFF; - return AArch64_AM::isLogicalImmediate(Val, 32); - } + bool isShiftedImm() const { return Kind == k_ShiftedImm; } - bool isLogicalImm64Not() const { - if (!isImm()) - return false; - const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); - if (!MCE) - return false; - return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64); - } + /// Returns the immediate value as a pair of (imm, shift) if the immediate is + /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted + /// immediate that can be shifted by 'Shift'. + template <unsigned Width> + Optional<std::pair<int64_t, unsigned> > getShiftedVal() const { + if (isShiftedImm() && Width == getShiftedImmShift()) + if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal())) + return std::make_pair(CE->getValue(), Width); + + if (isImm()) + if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) { + int64_t Val = CE->getValue(); + if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val)) + return std::make_pair(Val >> Width, Width); + else + return std::make_pair(Val, 0u); + } - bool isShiftedImm() const { return Kind == k_ShiftedImm; } + return {}; + } bool isAddSubImm() const { if (!isShiftedImm() && !isImm()) @@ -646,12 +726,14 @@ public: || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC - || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12; + || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 + || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 + || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12; } - // If it's a constant, it should be a real immediate in range: - if (auto *CE = dyn_cast<MCConstantExpr>(Expr)) - return CE->getValue() >= 0 && CE->getValue() <= 0xfff; + // If it's a constant, it should be a real immediate in range. + if (auto ShiftedVal = getShiftedVal<12>()) + return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff; // If it's an expression, we hope for the best and let the fixup/relocation // code deal with it. @@ -662,20 +744,56 @@ public: if (!isShiftedImm() && !isImm()) return false; - const MCExpr *Expr; + // Otherwise it should be a real negative immediate in range. + if (auto ShiftedVal = getShiftedVal<12>()) + return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff; - // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'. - if (isShiftedImm()) { - unsigned Shift = ShiftedImm.ShiftAmount; - Expr = ShiftedImm.Val; - if (Shift != 0 && Shift != 12) - return false; - } else - Expr = getImm(); + return false; + } + + // Signed value in the range -128 to +127. For element widths of + // 16 bits or higher it may also be a signed multiple of 256 in the + // range -32768 to +32512. + // For element-width of 8 bits a range of -128 to 255 is accepted, + // since a copy of a byte can be either signed/unsigned. + template <typename T> + DiagnosticPredicate isSVECpyImm() const { + if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm()))) + return DiagnosticPredicateTy::NoMatch; + + bool IsByte = + std::is_same<int8_t, typename std::make_signed<T>::type>::value; + if (auto ShiftedImm = getShiftedVal<8>()) + if (!(IsByte && ShiftedImm->second) && + AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first) + << ShiftedImm->second)) + return DiagnosticPredicateTy::Match; - // Otherwise it should be a real negative immediate in range: - const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); - return CE != nullptr && CE->getValue() < 0 && -CE->getValue() <= 0xfff; + return DiagnosticPredicateTy::NearMatch; + } + + // Unsigned value in the range 0 to 255. For element widths of + // 16 bits or higher it may also be a signed multiple of 256 in the + // range 0 to 65280. + template <typename T> DiagnosticPredicate isSVEAddSubImm() const { + if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm()))) + return DiagnosticPredicateTy::NoMatch; + + bool IsByte = + std::is_same<int8_t, typename std::make_signed<T>::type>::value; + if (auto ShiftedImm = getShiftedVal<8>()) + if (!(IsByte && ShiftedImm->second) && + AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first + << ShiftedImm->second)) + return DiagnosticPredicateTy::Match; + + return DiagnosticPredicateTy::NearMatch; + } + + template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const { + if (isLogicalImm<T>() && !isSVECpyImm<T>()) + return DiagnosticPredicateTy::Match; + return DiagnosticPredicateTy::NoMatch; } bool isCondCode() const { return Kind == k_CondCode; } @@ -792,7 +910,11 @@ public: return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth); } - bool isFPImm() const { return Kind == k_FPImm; } + bool isFPImm() const { + return Kind == k_FPImm && + AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1; + } + bool isBarrier() const { return Kind == k_Barrier; } bool isSysReg() const { return Kind == k_SysReg; } @@ -810,6 +932,7 @@ public: bool isSystemPStateFieldWithImm0_1() const { if (!isSysReg()) return false; return (SysReg.PStateField == AArch64PState::PAN || + SysReg.PStateField == AArch64PState::DIT || SysReg.PStateField == AArch64PState::UAO); } @@ -840,6 +963,8 @@ public: RegKind RK; switch (Class) { case AArch64::ZPRRegClassID: + case AArch64::ZPR_3bRegClassID: + case AArch64::ZPR_4bRegClassID: RK = RegKind::SVEDataVector; break; case AArch64::PPRRegClassID: @@ -854,10 +979,56 @@ public: AArch64MCRegisterClasses[Class].contains(getReg()); } + template <unsigned Class> bool isFPRasZPR() const { + return Kind == k_Register && Reg.Kind == RegKind::Scalar && + AArch64MCRegisterClasses[Class].contains(getReg()); + } + + template <int ElementWidth, unsigned Class> + DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const { + if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector) + return DiagnosticPredicateTy::NoMatch; + + if (isSVEVectorReg<Class>() && + (ElementWidth == 0 || Reg.ElementWidth == ElementWidth)) + return DiagnosticPredicateTy::Match; + + return DiagnosticPredicateTy::NearMatch; + } + template <int ElementWidth, unsigned Class> - bool isSVEVectorRegOfWidth() const { - return isSVEVectorReg<Class>() && - (ElementWidth == -1 || Reg.ElementWidth == ElementWidth); + DiagnosticPredicate isSVEDataVectorRegOfWidth() const { + if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector) + return DiagnosticPredicateTy::NoMatch; + + if (isSVEVectorReg<Class>() && + (ElementWidth == 0 || Reg.ElementWidth == ElementWidth)) + return DiagnosticPredicateTy::Match; + + return DiagnosticPredicateTy::NearMatch; + } + + template <int ElementWidth, unsigned Class, + AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth, + bool ShiftWidthAlwaysSame> + DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const { + auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>(); + if (!VectorMatch.isMatch()) + return DiagnosticPredicateTy::NoMatch; + + // Give a more specific diagnostic when the user has explicitly typed in + // a shift-amount that does not match what is expected, but for which + // there is also an unscaled addressing mode (e.g. sxtw/uxtw). + bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8); + if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW || + ShiftExtendTy == AArch64_AM::SXTW) && + !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8) + return DiagnosticPredicateTy::NoMatch; + + if (MatchShift && ShiftExtendTy == getShiftExtendType()) + return DiagnosticPredicateTy::Match; + + return DiagnosticPredicateTy::NearMatch; } bool isGPR32as64() const { @@ -865,6 +1036,11 @@ public: AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum); } + bool isGPR64as32() const { + return Kind == k_Register && Reg.Kind == RegKind::Scalar && + AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum); + } + bool isWSeqPair() const { return Kind == k_Register && Reg.Kind == RegKind::Scalar && AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains( @@ -877,58 +1053,65 @@ public: Reg.RegNum); } - bool isGPR64sp0() const { - return Kind == k_Register && Reg.Kind == RegKind::Scalar && - AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum); - } - template<int64_t Angle, int64_t Remainder> - bool isComplexRotation() const { - if (!isImm()) return false; + DiagnosticPredicate isComplexRotation() const { + if (!isImm()) return DiagnosticPredicateTy::NoMatch; const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); - if (!CE) return false; + if (!CE) return DiagnosticPredicateTy::NoMatch; uint64_t Value = CE->getValue(); - return (Value % Angle == Remainder && Value <= 270); + if (Value % Angle == Remainder && Value <= 270) + return DiagnosticPredicateTy::Match; + return DiagnosticPredicateTy::NearMatch; + } + + template <unsigned RegClassID> bool isGPR64() const { + return Kind == k_Register && Reg.Kind == RegKind::Scalar && + AArch64MCRegisterClasses[RegClassID].contains(getReg()); + } + + template <unsigned RegClassID, int ExtWidth> + DiagnosticPredicate isGPR64WithShiftExtend() const { + if (Kind != k_Register || Reg.Kind != RegKind::Scalar) + return DiagnosticPredicateTy::NoMatch; + + if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL && + getShiftExtendAmount() == Log2_32(ExtWidth / 8)) + return DiagnosticPredicateTy::Match; + return DiagnosticPredicateTy::NearMatch; } /// Is this a vector list with the type implicit (presumably attached to the /// instruction itself)? - template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const { + template <RegKind VectorKind, unsigned NumRegs> + bool isImplicitlyTypedVectorList() const { return Kind == k_VectorList && VectorList.Count == NumRegs && - !VectorList.ElementKind; + VectorList.NumElements == 0 && + VectorList.RegisterKind == VectorKind; } - template <unsigned NumRegs, unsigned NumElements, char ElementKind> + template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements, + unsigned ElementWidth> bool isTypedVectorList() const { if (Kind != k_VectorList) return false; if (VectorList.Count != NumRegs) return false; - if (VectorList.ElementKind != ElementKind) + if (VectorList.RegisterKind != VectorKind) + return false; + if (VectorList.ElementWidth != ElementWidth) return false; return VectorList.NumElements == NumElements; } - bool isVectorIndex1() const { - return Kind == k_VectorIndex && VectorIndex.Val == 1; - } - - bool isVectorIndexB() const { - return Kind == k_VectorIndex && VectorIndex.Val < 16; - } - - bool isVectorIndexH() const { - return Kind == k_VectorIndex && VectorIndex.Val < 8; - } - - bool isVectorIndexS() const { - return Kind == k_VectorIndex && VectorIndex.Val < 4; - } - - bool isVectorIndexD() const { - return Kind == k_VectorIndex && VectorIndex.Val < 2; + template <int Min, int Max> + DiagnosticPredicate isVectorIndex() const { + if (Kind != k_VectorIndex) + return DiagnosticPredicateTy::NoMatch; + if (VectorIndex.Val >= Min && VectorIndex.Val <= Max) + return DiagnosticPredicateTy::Match; + return DiagnosticPredicateTy::NearMatch; } bool isToken() const override { return Kind == k_Token; } @@ -949,6 +1132,39 @@ public: ST == AArch64_AM::ASR || ST == AArch64_AM::ROR || ST == AArch64_AM::MSL); } + + template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const { + if (Kind != k_FPImm) + return DiagnosticPredicateTy::NoMatch; + + if (getFPImmIsExact()) { + // Lookup the immediate from table of supported immediates. + auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum); + assert(Desc && "Unknown enum value"); + + // Calculate its FP value. + APFloat RealVal(APFloat::IEEEdouble()); + if (RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero) != + APFloat::opOK) + llvm_unreachable("FP immediate is not exact"); + + if (getFPImm().bitwiseIsEqual(RealVal)) + return DiagnosticPredicateTy::Match; + } + + return DiagnosticPredicateTy::NearMatch; + } + + template <unsigned ImmA, unsigned ImmB> + DiagnosticPredicate isExactFPImm() const { + DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch; + if ((Res = isExactFPImm<ImmA>())) + return DiagnosticPredicateTy::Match; + if ((Res = isExactFPImm<ImmB>())) + return DiagnosticPredicateTy::Match; + return Res; + } + bool isExtend() const { if (!isShiftExtend()) return false; @@ -1081,7 +1297,7 @@ public: // ambiguity in the matcher. template<int Width> bool isSImm9OffsetFB() const { - return isSImm9() && !isUImm12Offset<Width / 8>(); + return isSImm<9>() && !isUImm12Offset<Width / 8>(); } bool isAdrpLabel() const { @@ -1143,6 +1359,33 @@ public: Inst.addOperand(MCOperand::createReg(Reg)); } + void addGPR64as32Operands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + assert( + AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())); + + const MCRegisterInfo *RI = Ctx.getRegisterInfo(); + uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister( + RI->getEncodingValue(getReg())); + + Inst.addOperand(MCOperand::createReg(Reg)); + } + + template <int Width> + void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const { + unsigned Base; + switch (Width) { + case 8: Base = AArch64::B0; break; + case 16: Base = AArch64::H0; break; + case 32: Base = AArch64::S0; break; + case 64: Base = AArch64::D0; break; + case 128: Base = AArch64::Q0; break; + default: + llvm_unreachable("Unsupported width"); + } + Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base)); + } + void addVectorReg64Operands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); assert( @@ -1162,55 +1405,45 @@ public: Inst.addOperand(MCOperand::createReg(getReg())); } - template <unsigned NumRegs> - void addVectorList64Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - static const unsigned FirstRegs[] = { AArch64::D0, - AArch64::D0_D1, - AArch64::D0_D1_D2, - AArch64::D0_D1_D2_D3 }; - unsigned FirstReg = FirstRegs[NumRegs - 1]; - - Inst.addOperand( - MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0)); - } - - template <unsigned NumRegs> - void addVectorList128Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - static const unsigned FirstRegs[] = { AArch64::Q0, - AArch64::Q0_Q1, - AArch64::Q0_Q1_Q2, - AArch64::Q0_Q1_Q2_Q3 }; - unsigned FirstReg = FirstRegs[NumRegs - 1]; - - Inst.addOperand( - MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0)); - } - - void addVectorIndex1Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - Inst.addOperand(MCOperand::createImm(getVectorIndex())); - } + enum VecListIndexType { + VecListIdx_DReg = 0, + VecListIdx_QReg = 1, + VecListIdx_ZReg = 2, + }; - void addVectorIndexBOperands(MCInst &Inst, unsigned N) const { + template <VecListIndexType RegTy, unsigned NumRegs> + void addVectorListOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - Inst.addOperand(MCOperand::createImm(getVectorIndex())); - } - - void addVectorIndexHOperands(MCInst &Inst, unsigned N) const { + static const unsigned FirstRegs[][5] = { + /* DReg */ { AArch64::Q0, + AArch64::D0, AArch64::D0_D1, + AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 }, + /* QReg */ { AArch64::Q0, + AArch64::Q0, AArch64::Q0_Q1, + AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 }, + /* ZReg */ { AArch64::Z0, + AArch64::Z0, AArch64::Z0_Z1, + AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 } + }; + + assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) && + " NumRegs must be <= 4 for ZRegs"); + + unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs]; + Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() - + FirstRegs[(unsigned)RegTy][0])); + } + + void addVectorIndexOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); Inst.addOperand(MCOperand::createImm(getVectorIndex())); } - void addVectorIndexSOperands(MCInst &Inst, unsigned N) const { + template <unsigned ImmIs0, unsigned ImmIs1> + void addExactFPImmOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - Inst.addOperand(MCOperand::createImm(getVectorIndex())); - } - - void addVectorIndexDOperands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - Inst.addOperand(MCOperand::createImm(getVectorIndex())); + assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand"); + Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>()))); } void addImmOperands(MCInst &Inst, unsigned N) const { @@ -1221,9 +1454,13 @@ public: addExpr(Inst, getImm()); } - void addAddSubImmOperands(MCInst &Inst, unsigned N) const { + template <int Shift> + void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const { assert(N == 2 && "Invalid number of operands!"); - if (isShiftedImm()) { + if (auto ShiftedVal = getShiftedVal<Shift>()) { + Inst.addOperand(MCOperand::createImm(ShiftedVal->first)); + Inst.addOperand(MCOperand::createImm(ShiftedVal->second)); + } else if (isShiftedImm()) { addExpr(Inst, getShiftedImmVal()); Inst.addOperand(MCOperand::createImm(getShiftedImmShift())); } else { @@ -1232,16 +1469,14 @@ public: } } - void addAddSubImmNegOperands(MCInst &Inst, unsigned N) const { + template <int Shift> + void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const { assert(N == 2 && "Invalid number of operands!"); - - const MCExpr *MCE = isShiftedImm() ? getShiftedImmVal() : getImm(); - const MCConstantExpr *CE = cast<MCConstantExpr>(MCE); - int64_t Val = -CE->getValue(); - unsigned ShiftAmt = isShiftedImm() ? ShiftedImm.ShiftAmount : 0; - - Inst.addOperand(MCOperand::createImm(Val)); - Inst.addOperand(MCOperand::createImm(ShiftAmt)); + if (auto ShiftedVal = getShiftedVal<Shift>()) { + Inst.addOperand(MCOperand::createImm(-ShiftedVal->first)); + Inst.addOperand(MCOperand::createImm(ShiftedVal->second)); + } else + llvm_unreachable("Not a shifted negative immediate"); } void addCondCodeOperands(MCInst &Inst, unsigned N) const { @@ -1274,155 +1509,34 @@ public: Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale)); } - void addSImm9Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - - void addSImm10s8Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8)); - } - - void addSImm7s4Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue() / 4)); - } - - void addSImm7s8Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8)); - } - - void addSImm7s16Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue() / 16)); - } - - void addImm0_1Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - - void addImm0_7Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - - void addImm1_8Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - - void addImm0_15Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - - void addImm1_16Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); - assert(MCE && "Invalid constant immediate operand!"); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - - void addImm0_31Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - - void addImm1_31Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - - void addImm1_32Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - - void addImm0_63Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - - void addImm1_63Operands(MCInst &Inst, unsigned N) const { + void addUImm6Operands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); Inst.addOperand(MCOperand::createImm(MCE->getValue())); } - void addImm1_64Operands(MCInst &Inst, unsigned N) const { + template <int Scale> + void addImmScaledOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - - void addImm0_127Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - - void addImm0_255Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - - void addImm0_65535Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - - void addImm32_63Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - - void addLogicalImm32Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); - uint64_t encoding = - AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32); - Inst.addOperand(MCOperand::createImm(encoding)); - } - - void addLogicalImm64Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); - uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64); - Inst.addOperand(MCOperand::createImm(encoding)); + Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale)); } - void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const { + template <typename T> + void addLogicalImmOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); - int64_t Val = ~MCE->getValue() & 0xFFFFFFFF; - uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32); + typename std::make_unsigned<T>::type Val = MCE->getValue(); + uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8); Inst.addOperand(MCOperand::createImm(encoding)); } - void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const { + template <typename T> + void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); - uint64_t encoding = - AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64); + typename std::make_unsigned<T>::type Val = ~MCE->getValue(); + uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8); Inst.addOperand(MCOperand::createImm(encoding)); } @@ -1477,7 +1591,8 @@ public: void addFPImmOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - Inst.addOperand(MCOperand::createImm(getFPImm())); + Inst.addOperand(MCOperand::createImm( + AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()))); } void addBarrierOperands(MCInst &Inst, unsigned N) const { @@ -1611,35 +1726,49 @@ public: } static std::unique_ptr<AArch64Operand> - CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx) { + CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx, + RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg, + AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL, + unsigned ShiftAmount = 0, + unsigned HasExplicitAmount = false) { auto Op = make_unique<AArch64Operand>(k_Register, Ctx); Op->Reg.RegNum = RegNum; Op->Reg.Kind = Kind; + Op->Reg.ElementWidth = 0; + Op->Reg.EqualityTy = EqTy; + Op->Reg.ShiftExtend.Type = ExtTy; + Op->Reg.ShiftExtend.Amount = ShiftAmount; + Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount; Op->StartLoc = S; Op->EndLoc = E; return Op; } static std::unique_ptr<AArch64Operand> - CreateReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth, - SMLoc S, SMLoc E, MCContext &Ctx) { - auto Op = make_unique<AArch64Operand>(k_Register, Ctx); - Op->Reg.RegNum = RegNum; + CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth, + SMLoc S, SMLoc E, MCContext &Ctx, + AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL, + unsigned ShiftAmount = 0, + unsigned HasExplicitAmount = false) { + assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || + Kind == RegKind::SVEPredicateVector) && + "Invalid vector kind"); + auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount, + HasExplicitAmount); Op->Reg.ElementWidth = ElementWidth; - Op->Reg.Kind = Kind; - Op->StartLoc = S; - Op->EndLoc = E; return Op; } static std::unique_ptr<AArch64Operand> CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements, - char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) { + unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E, + MCContext &Ctx) { auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx); Op->VectorList.RegNum = RegNum; Op->VectorList.Count = Count; Op->VectorList.NumElements = NumElements; - Op->VectorList.ElementKind = ElementKind; + Op->VectorList.ElementWidth = ElementWidth; + Op->VectorList.RegisterKind = RegisterKind; Op->StartLoc = S; Op->EndLoc = E; return Op; @@ -1684,10 +1813,11 @@ public: return Op; } - static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S, - MCContext &Ctx) { + static std::unique_ptr<AArch64Operand> + CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) { auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx); - Op->FPImm.Val = Val; + Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue(); + Op->FPImm.IsExact = IsExact; Op->StartLoc = S; Op->EndLoc = S; return Op; @@ -1775,8 +1905,10 @@ public: void AArch64Operand::print(raw_ostream &OS) const { switch (Kind) { case k_FPImm: - OS << "<fpimm " << getFPImm() << "(" - << AArch64_AM::getFPImmFloat(getFPImm()) << ") >"; + OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue(); + if (!getFPImmIsExact()) + OS << " (inexact)"; + OS << ">"; break; case k_Barrier: { StringRef Name = getBarrierName(); @@ -1799,9 +1931,6 @@ void AArch64Operand::print(raw_ostream &OS) const { case k_CondCode: OS << "<condcode " << getCondCode() << ">"; break; - case k_Register: - OS << "<register " << getReg() << ">"; - break; case k_VectorList: { OS << "<vectorlist "; unsigned Reg = getVectorListStart(); @@ -1833,6 +1962,11 @@ void AArch64Operand::print(raw_ostream &OS) const { case k_PSBHint: OS << getPSBHintName(); break; + case k_Register: + OS << "<register " << getReg() << ">"; + if (!getShiftExtendAmount() && !hasShiftExtendAmount()) + break; + LLVM_FALLTHROUGH; case k_ShiftExtend: OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #" << getShiftExtendAmount(); @@ -1887,29 +2021,65 @@ static unsigned MatchNeonVectorRegName(StringRef Name) { .Default(0); } -static bool isValidVectorKind(StringRef Name) { - return StringSwitch<bool>(Name.lower()) - .Case(".8b", true) - .Case(".16b", true) - .Case(".4h", true) - .Case(".8h", true) - .Case(".2s", true) - .Case(".4s", true) - .Case(".1d", true) - .Case(".2d", true) - .Case(".1q", true) - // Accept the width neutral ones, too, for verbose syntax. If those - // aren't used in the right places, the token operand won't match so - // all will work out. - .Case(".b", true) - .Case(".h", true) - .Case(".s", true) - .Case(".d", true) - // Needed for fp16 scalar pairwise reductions - .Case(".2h", true) - // another special case for the ARMv8.2a dot product operand - .Case(".4b", true) - .Default(false); +/// Returns an optional pair of (#elements, element-width) if Suffix +/// is a valid vector kind. Where the number of elements in a vector +/// or the vector width is implicit or explicitly unknown (but still a +/// valid suffix kind), 0 is used. +static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix, + RegKind VectorKind) { + std::pair<int, int> Res = {-1, -1}; + + switch (VectorKind) { + case RegKind::NeonVector: + Res = + StringSwitch<std::pair<int, int>>(Suffix.lower()) + .Case("", {0, 0}) + .Case(".1d", {1, 64}) + .Case(".1q", {1, 128}) + // '.2h' needed for fp16 scalar pairwise reductions + .Case(".2h", {2, 16}) + .Case(".2s", {2, 32}) + .Case(".2d", {2, 64}) + // '.4b' is another special case for the ARMv8.2a dot product + // operand + .Case(".4b", {4, 8}) + .Case(".4h", {4, 16}) + .Case(".4s", {4, 32}) + .Case(".8b", {8, 8}) + .Case(".8h", {8, 16}) + .Case(".16b", {16, 8}) + // Accept the width neutral ones, too, for verbose syntax. If those + // aren't used in the right places, the token operand won't match so + // all will work out. + .Case(".b", {0, 8}) + .Case(".h", {0, 16}) + .Case(".s", {0, 32}) + .Case(".d", {0, 64}) + .Default({-1, -1}); + break; + case RegKind::SVEPredicateVector: + case RegKind::SVEDataVector: + Res = StringSwitch<std::pair<int, int>>(Suffix.lower()) + .Case("", {0, 0}) + .Case(".b", {0, 8}) + .Case(".h", {0, 16}) + .Case(".s", {0, 32}) + .Case(".d", {0, 64}) + .Case(".q", {0, 128}) + .Default({-1, -1}); + break; + default: + llvm_unreachable("Unsupported RegKind"); + } + + if (Res == std::make_pair(-1, -1)) + return Optional<std::pair<int, int>>(); + + return Optional<std::pair<int, int>>(Res); +} + +static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) { + return parseVectorKind(Suffix, VectorKind).hasValue(); } static unsigned matchSVEDataVectorRegName(StringRef Name) { @@ -1970,40 +2140,12 @@ static unsigned matchSVEPredicateVectorRegName(StringRef Name) { .Default(0); } -static bool isValidSVEKind(StringRef Name) { - return StringSwitch<bool>(Name.lower()) - .Case(".b", true) - .Case(".h", true) - .Case(".s", true) - .Case(".d", true) - .Case(".q", true) - .Default(false); -} - -static void parseValidVectorKind(StringRef Name, unsigned &NumElements, - char &ElementKind) { - assert(isValidVectorKind(Name)); - - ElementKind = Name.lower()[Name.size() - 1]; - NumElements = 0; - - if (Name.size() == 2) - return; - - // Parse the lane count - Name = Name.drop_front(); - while (isdigit(Name.front())) { - NumElements = 10 * NumElements + (Name.front() - '0'); - Name = Name.drop_front(); - } -} - bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) { StartLoc = getLoc(); - RegNo = tryParseRegister(); + auto Res = tryParseScalarRegister(RegNo); EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1); - return (RegNo == (unsigned)-1); + return Res != MatchOperand_Success; } // Matches a register name or register alias previously defined by '.req' @@ -2024,6 +2166,15 @@ unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name, return Kind == RegKind::Scalar ? RegNum : 0; if (!RegNum) { + // Handle a few common aliases of registers. + if (auto RegNum = StringSwitch<unsigned>(Name.lower()) + .Case("fp", AArch64::FP) + .Case("lr", AArch64::LR) + .Case("x31", AArch64::XZR) + .Case("w31", AArch64::WZR) + .Default(0)) + return Kind == RegKind::Scalar ? RegNum : 0; + // Check for aliases registered via .req. Canonicalize to lower case. // That's more consistent since register names are case insensitive, and // it's how the original entry was passed in from MC/MCParser/AsmParser. @@ -2038,65 +2189,24 @@ unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name, return RegNum; } -/// tryParseRegister - Try to parse a register name. The token must be an +/// tryParseScalarRegister - Try to parse a register name. The token must be an /// Identifier when called, and if it is a register name the token is eaten and /// the register is added to the operand list. -int AArch64AsmParser::tryParseRegister() { +OperandMatchResultTy +AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) { MCAsmParser &Parser = getParser(); const AsmToken &Tok = Parser.getTok(); if (Tok.isNot(AsmToken::Identifier)) - return -1; + return MatchOperand_NoMatch; std::string lowerCase = Tok.getString().lower(); - unsigned RegNum = matchRegisterNameAlias(lowerCase, RegKind::Scalar); - - // Also handle a few aliases of registers. - if (RegNum == 0) - RegNum = StringSwitch<unsigned>(lowerCase) - .Case("fp", AArch64::FP) - .Case("lr", AArch64::LR) - .Case("x31", AArch64::XZR) - .Case("w31", AArch64::WZR) - .Default(0); - - if (RegNum == 0) - return -1; + unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar); + if (Reg == 0) + return MatchOperand_NoMatch; + RegNum = Reg; Parser.Lex(); // Eat identifier token. - return RegNum; -} - -/// tryMatchVectorRegister - Try to parse a vector register name with optional -/// kind specifier. If it is a register specifier, eat the token and return it. -int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) { - MCAsmParser &Parser = getParser(); - if (Parser.getTok().isNot(AsmToken::Identifier)) { - TokError("vector register expected"); - return -1; - } - - StringRef Name = Parser.getTok().getString(); - // If there is a kind specifier, it's separated from the register name by - // a '.'. - size_t Start = 0, Next = Name.find('.'); - StringRef Head = Name.slice(Start, Next); - unsigned RegNum = matchRegisterNameAlias(Head, RegKind::NeonVector); - - if (RegNum) { - if (Next != StringRef::npos) { - Kind = Name.slice(Next, StringRef::npos); - if (!isValidVectorKind(Kind)) { - TokError("invalid vector kind qualifier"); - return -1; - } - } - Parser.Lex(); // Eat the register token. - return RegNum; - } - - if (expected) - TokError("vector register expected"); - return -1; + return MatchOperand_Success; } /// tryParseSysCROperand - Try to parse a system instruction CR operand name. @@ -2130,11 +2240,32 @@ AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) { } /// tryParsePrefetch - Try to parse a prefetch operand. +template <bool IsSVEPrefetch> OperandMatchResultTy AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) { MCAsmParser &Parser = getParser(); SMLoc S = getLoc(); const AsmToken &Tok = Parser.getTok(); + + auto LookupByName = [](StringRef N) { + if (IsSVEPrefetch) { + if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N)) + return Optional<unsigned>(Res->Encoding); + } else if (auto Res = AArch64PRFM::lookupPRFMByName(N)) + return Optional<unsigned>(Res->Encoding); + return Optional<unsigned>(); + }; + + auto LookupByEncoding = [](unsigned E) { + if (IsSVEPrefetch) { + if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E)) + return Optional<StringRef>(Res->Name); + } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E)) + return Optional<StringRef>(Res->Name); + return Optional<StringRef>(); + }; + unsigned MaxVal = IsSVEPrefetch ? 15 : 31; + // Either an identifier for named values or a 5-bit immediate. // Eat optional hash. if (parseOptionalToken(AsmToken::Hash) || @@ -2149,31 +2280,32 @@ AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) { return MatchOperand_ParseFail; } unsigned prfop = MCE->getValue(); - if (prfop > 31) { - TokError("prefetch operand out of range, [0,31] expected"); + if (prfop > MaxVal) { + TokError("prefetch operand out of range, [0," + utostr(MaxVal) + + "] expected"); return MatchOperand_ParseFail; } - auto PRFM = AArch64PRFM::lookupPRFMByEncoding(MCE->getValue()); + auto PRFM = LookupByEncoding(MCE->getValue()); Operands.push_back(AArch64Operand::CreatePrefetch( - prfop, PRFM ? PRFM->Name : "", S, getContext())); + prfop, PRFM.getValueOr(""), S, getContext())); return MatchOperand_Success; } if (Tok.isNot(AsmToken::Identifier)) { - TokError("pre-fetch hint expected"); + TokError("prefetch hint expected"); return MatchOperand_ParseFail; } - auto PRFM = AArch64PRFM::lookupPRFMByName(Tok.getString()); + auto PRFM = LookupByName(Tok.getString()); if (!PRFM) { - TokError("pre-fetch hint expected"); + TokError("prefetch hint expected"); return MatchOperand_ParseFail; } Parser.Lex(); // Eat identifier token. Operands.push_back(AArch64Operand::CreatePrefetch( - PRFM->Encoding, Tok.getString(), S, getContext())); + *PRFM, Tok.getString(), S, getContext())); return MatchOperand_Success; } @@ -2258,17 +2390,21 @@ AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) { SMLoc S = getLoc(); const MCExpr *Expr; - parseOptionalToken(AsmToken::Hash); - if (getParser().parseExpression(Expr)) - return MatchOperand_ParseFail; + const AsmToken &Tok = getParser().getTok(); + if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) { + if (getParser().parseExpression(Expr)) + return MatchOperand_ParseFail; - SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); - Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext())); + SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); + Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext())); - return MatchOperand_Success; + return MatchOperand_Success; + } + return MatchOperand_NoMatch; } /// tryParseFPImm - A floating point immediate expression operand. +template<bool AddFPZeroAsLiteral> OperandMatchResultTy AArch64AsmParser::tryParseFPImm(OperandVector &Operands) { MCAsmParser &Parser = getParser(); @@ -2280,50 +2416,50 @@ AArch64AsmParser::tryParseFPImm(OperandVector &Operands) { bool isNegative = parseOptionalToken(AsmToken::Minus); const AsmToken &Tok = Parser.getTok(); - if (Tok.is(AsmToken::Real) || Tok.is(AsmToken::Integer)) { - int64_t Val; - if (Tok.is(AsmToken::Integer) && !isNegative && Tok.getString().startswith("0x")) { - Val = Tok.getIntVal(); - if (Val > 255 || Val < 0) { - TokError("encoded floating point value out of range"); - return MatchOperand_ParseFail; - } - } else { - APFloat RealVal(APFloat::IEEEdouble(), Tok.getString()); - if (isNegative) - RealVal.changeSign(); + if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) { + if (!Hash) + return MatchOperand_NoMatch; + TokError("invalid floating point immediate"); + return MatchOperand_ParseFail; + } - uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); - Val = AArch64_AM::getFP64Imm(APInt(64, IntVal)); - - // Check for out of range values. As an exception we let Zero through, - // but as tokens instead of an FPImm so that it can be matched by the - // appropriate alias if one exists. - if (RealVal.isPosZero()) { - Parser.Lex(); // Eat the token. - Operands.push_back(AArch64Operand::CreateToken("#0", false, S, getContext())); - Operands.push_back(AArch64Operand::CreateToken(".0", false, S, getContext())); - return MatchOperand_Success; - } else if (Val == -1) { - TokError("expected compatible register or floating-point constant"); - return MatchOperand_ParseFail; - } + // Parse hexadecimal representation. + if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) { + if (Tok.getIntVal() > 255 || isNegative) { + TokError("encoded floating point value out of range"); + return MatchOperand_ParseFail; } - Parser.Lex(); // Eat the token. - Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext())); - return MatchOperand_Success; + + APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal())); + Operands.push_back( + AArch64Operand::CreateFPImm(F, true, S, getContext())); + } else { + // Parse FP representation. + APFloat RealVal(APFloat::IEEEdouble()); + auto Status = + RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero); + if (isNegative) + RealVal.changeSign(); + + if (AddFPZeroAsLiteral && RealVal.isPosZero()) { + Operands.push_back( + AArch64Operand::CreateToken("#0", false, S, getContext())); + Operands.push_back( + AArch64Operand::CreateToken(".0", false, S, getContext())); + } else + Operands.push_back(AArch64Operand::CreateFPImm( + RealVal, Status == APFloat::opOK, S, getContext())); } - if (!Hash) - return MatchOperand_NoMatch; + Parser.Lex(); // Eat the token. - TokError("invalid floating point immediate"); - return MatchOperand_ParseFail; + return MatchOperand_Success; } -/// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand +/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with +/// a shift suffix, for example '#1, lsl #12'. OperandMatchResultTy -AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) { +AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) { MCAsmParser &Parser = getParser(); SMLoc S = getLoc(); @@ -2337,18 +2473,9 @@ AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) { if (parseSymbolicImmVal(Imm)) return MatchOperand_ParseFail; else if (Parser.getTok().isNot(AsmToken::Comma)) { - uint64_t ShiftAmount = 0; - const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm); - if (MCE) { - int64_t Val = MCE->getValue(); - if (Val > 0xfff && (Val & 0xfff) == 0) { - Imm = MCConstantExpr::create(Val >> 12, getContext()); - ShiftAmount = 12; - } - } SMLoc E = Parser.getTok().getLoc(); - Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E, - getContext())); + Operands.push_back( + AArch64Operand::CreateImm(Imm, S, E, getContext())); return MatchOperand_Success; } @@ -2380,6 +2507,13 @@ AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) { } Parser.Lex(); // Eat the number + // Just in case the optional lsl #0 is used for immediates other than zero. + if (ShiftAmount == 0 && Imm != 0) { + SMLoc E = Parser.getTok().getLoc(); + Operands.push_back(AArch64Operand::CreateImm(Imm, S, E, getContext())); + return MatchOperand_Success; + } + SMLoc E = Parser.getTok().getLoc(); Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E, getContext())); @@ -2408,6 +2542,22 @@ AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) { .Case("al", AArch64CC::AL) .Case("nv", AArch64CC::NV) .Default(AArch64CC::Invalid); + + if (CC == AArch64CC::Invalid && + getSTI().getFeatureBits()[AArch64::FeatureSVE]) + CC = StringSwitch<AArch64CC::CondCode>(Cond.lower()) + .Case("none", AArch64CC::EQ) + .Case("any", AArch64CC::NE) + .Case("nlast", AArch64CC::HS) + .Case("last", AArch64CC::LO) + .Case("first", AArch64CC::MI) + .Case("nfrst", AArch64CC::PL) + .Case("pmore", AArch64CC::HI) + .Case("plast", AArch64CC::LS) + .Case("tcont", AArch64CC::GE) + .Case("tstop", AArch64CC::LT) + .Default(AArch64CC::Invalid); + return CC; } @@ -2515,6 +2665,10 @@ static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) { Str += "ARMv8.1a"; else if (FBS[AArch64::HasV8_2aOps]) Str += "ARMv8.2a"; + else if (FBS[AArch64::HasV8_3aOps]) + Str += "ARMv8.3a"; + else if (FBS[AArch64::HasV8_4aOps]) + Str += "ARMv8.4a"; else Str += "(unknown)"; } @@ -2625,9 +2779,11 @@ AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) { MCAsmParser &Parser = getParser(); const AsmToken &Tok = Parser.getTok(); + if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) { + TokError("'csync' operand expected"); + return MatchOperand_ParseFail; // Can be either a #imm style literal or an option name - if (parseOptionalToken(AsmToken::Hash) || - Tok.is(AsmToken::Integer)) { + } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) { // Immediate operand. const MCExpr *ImmVal; SMLoc ExprLoc = getLoc(); @@ -2653,18 +2809,23 @@ AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) { return MatchOperand_ParseFail; } + auto TSB = AArch64TSB::lookupTSBByName(Tok.getString()); // The only valid named option for ISB is 'sy' auto DB = AArch64DB::lookupDBByName(Tok.getString()); if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) { TokError("'sy' or #imm operand expected"); return MatchOperand_ParseFail; - } else if (!DB) { + // The only valid named option for TSB is 'csync' + } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) { + TokError("'csync' operand expected"); + return MatchOperand_ParseFail; + } else if (!DB && !TSB) { TokError("invalid barrier option name"); return MatchOperand_ParseFail; } Operands.push_back(AArch64Operand::CreateBarrier( - DB->Encoding, Tok.getString(), getLoc(), getContext())); + DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(), getContext())); Parser.Lex(); // Consume the option return MatchOperand_Success; @@ -2708,12 +2869,20 @@ bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) { SMLoc S = getLoc(); // Check for a vector register specifier first. StringRef Kind; - int64_t Reg = tryMatchVectorRegister(Kind, false); - if (Reg == -1) + unsigned Reg; + OperandMatchResultTy Res = + tryParseVectorRegister(Reg, Kind, RegKind::NeonVector); + if (Res != MatchOperand_Success) + return true; + + const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector); + if (!KindRes) return true; + + unsigned ElementWidth = KindRes->second; Operands.push_back( - AArch64Operand::CreateReg(Reg, RegKind::NeonVector, S, getLoc(), - getContext())); + AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth, + S, getLoc(), getContext())); // If there was an explicit qualifier, that goes on as a literal text // operand. @@ -2721,36 +2890,41 @@ bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) { Operands.push_back( AArch64Operand::CreateToken(Kind, false, S, getContext())); - // If there is an index specifier following the register, parse that too. + return tryParseVectorIndex(Operands) == MatchOperand_ParseFail; +} + +OperandMatchResultTy +AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) { SMLoc SIdx = getLoc(); if (parseOptionalToken(AsmToken::LBrac)) { const MCExpr *ImmVal; if (getParser().parseExpression(ImmVal)) - return false; + return MatchOperand_NoMatch; const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); if (!MCE) { TokError("immediate value expected for vector index"); - return false; + return MatchOperand_ParseFail;; } SMLoc E = getLoc(); if (parseToken(AsmToken::RBrac, "']' expected")) - return false; + return MatchOperand_ParseFail;; Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx, E, getContext())); + return MatchOperand_Success; } - return false; + return MatchOperand_NoMatch; } -// tryParseSVEDataVectorRegister - Try to parse a SVE vector register name with +// tryParseVectorRegister - Try to parse a vector register name with // optional kind specifier. If it is a register specifier, eat the token // and return it. OperandMatchResultTy -AArch64AsmParser::tryParseSVERegister(int &Reg, StringRef &Kind, - RegKind MatchKind) { +AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind, + RegKind MatchKind) { MCAsmParser &Parser = getParser(); const AsmToken &Tok = Parser.getTok(); @@ -2767,8 +2941,8 @@ AArch64AsmParser::tryParseSVERegister(int &Reg, StringRef &Kind, if (RegNum) { if (Next != StringRef::npos) { Kind = Name.slice(Next, StringRef::npos); - if (!isValidSVEKind(Kind)) { - TokError("invalid sve vector kind qualifier"); + if (!isValidVectorKind(Kind, MatchKind)) { + TokError("invalid vector kind qualifier"); return MatchOperand_ParseFail; } } @@ -2787,45 +2961,64 @@ AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) { // Check for a SVE predicate register specifier first. const SMLoc S = getLoc(); StringRef Kind; - int RegNum = -1; - auto Res = tryParseSVERegister(RegNum, Kind, RegKind::SVEPredicateVector); + unsigned RegNum; + auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector); if (Res != MatchOperand_Success) return Res; - unsigned ElementWidth = StringSwitch<unsigned>(Kind.lower()) - .Case("", -1) - .Case(".b", 8) - .Case(".h", 16) - .Case(".s", 32) - .Case(".d", 64) - .Case(".q", 128) - .Default(0); - - if (!ElementWidth) + const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector); + if (!KindRes) return MatchOperand_NoMatch; + unsigned ElementWidth = KindRes->second; + Operands.push_back(AArch64Operand::CreateVectorReg( + RegNum, RegKind::SVEPredicateVector, ElementWidth, S, + getLoc(), getContext())); + + // Not all predicates are followed by a '/m' or '/z'. + MCAsmParser &Parser = getParser(); + if (Parser.getTok().isNot(AsmToken::Slash)) + return MatchOperand_Success; + + // But when they do they shouldn't have an element type suffix. + if (!Kind.empty()) { + Error(S, "not expecting size suffix"); + return MatchOperand_ParseFail; + } + + // Add a literal slash as operand Operands.push_back( - AArch64Operand::CreateReg(RegNum, RegKind::SVEPredicateVector, - ElementWidth, S, getLoc(), getContext())); + AArch64Operand::CreateToken("/" , false, getLoc(), getContext())); + Parser.Lex(); // Eat the slash. + + // Zeroing or merging? + auto Pred = Parser.getTok().getString().lower(); + if (Pred != "z" && Pred != "m") { + Error(getLoc(), "expecting 'm' or 'z' predication"); + return MatchOperand_ParseFail; + } + + // Add zero/merge token. + const char *ZM = Pred == "z" ? "z" : "m"; + Operands.push_back( + AArch64Operand::CreateToken(ZM, false, getLoc(), getContext())); + + Parser.Lex(); // Eat zero/merge token. return MatchOperand_Success; } -/// parseRegister - Parse a non-vector register operand. +/// parseRegister - Parse a register operand. bool AArch64AsmParser::parseRegister(OperandVector &Operands) { - SMLoc S = getLoc(); - // Try for a vector (neon) register. + // Try for a Neon vector register. if (!tryParseNeonVectorRegister(Operands)) return false; - // Try for a scalar register. - int64_t Reg = tryParseRegister(); - if (Reg == -1) - return true; - Operands.push_back(AArch64Operand::CreateReg(Reg, RegKind::Scalar, S, - getLoc(), getContext())); + // Otherwise try for a scalar register. + if (tryParseGPROperand<false>(Operands) == MatchOperand_Success) + return false; - return false; + return true; } bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) { @@ -2876,6 +3069,8 @@ bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) { .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1) .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC) .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE) + .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12) + .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12) .Default(AArch64MCExpr::VK_INVALID); if (RefKind == AArch64MCExpr::VK_INVALID) @@ -2896,33 +3091,74 @@ bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) { return false; } -/// parseVectorList - Parse a vector list operand for AdvSIMD instructions. -bool AArch64AsmParser::parseVectorList(OperandVector &Operands) { +template <RegKind VectorKind> +OperandMatchResultTy +AArch64AsmParser::tryParseVectorList(OperandVector &Operands, + bool ExpectMatch) { MCAsmParser &Parser = getParser(); - assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket"); + if (!Parser.getTok().is(AsmToken::LCurly)) + return MatchOperand_NoMatch; + + // Wrapper around parse function + auto ParseVector = [this, &Parser](unsigned &Reg, StringRef &Kind, SMLoc Loc, + bool NoMatchIsError) { + auto RegTok = Parser.getTok(); + auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind); + if (ParseRes == MatchOperand_Success) { + if (parseVectorKind(Kind, VectorKind)) + return ParseRes; + llvm_unreachable("Expected a valid vector kind"); + } + + if (RegTok.isNot(AsmToken::Identifier) || + ParseRes == MatchOperand_ParseFail || + (ParseRes == MatchOperand_NoMatch && NoMatchIsError)) { + Error(Loc, "vector register expected"); + return MatchOperand_ParseFail; + } + + return MatchOperand_NoMatch; + }; + SMLoc S = getLoc(); + auto LCurly = Parser.getTok(); Parser.Lex(); // Eat left bracket token. + StringRef Kind; - int64_t FirstReg = tryMatchVectorRegister(Kind, true); - if (FirstReg == -1) - return true; + unsigned FirstReg; + auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch); + + // Put back the original left bracket if there was no match, so that + // different types of list-operands can be matched (e.g. SVE, Neon). + if (ParseRes == MatchOperand_NoMatch) + Parser.getLexer().UnLex(LCurly); + + if (ParseRes != MatchOperand_Success) + return ParseRes; + int64_t PrevReg = FirstReg; unsigned Count = 1; if (parseOptionalToken(AsmToken::Minus)) { SMLoc Loc = getLoc(); StringRef NextKind; - int64_t Reg = tryMatchVectorRegister(NextKind, true); - if (Reg == -1) - return true; + + unsigned Reg; + ParseRes = ParseVector(Reg, NextKind, getLoc(), true); + if (ParseRes != MatchOperand_Success) + return ParseRes; + // Any Kind suffices must match on all regs in the list. - if (Kind != NextKind) - return Error(Loc, "mismatched register size suffix"); + if (Kind != NextKind) { + Error(Loc, "mismatched register size suffix"); + return MatchOperand_ParseFail; + } unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg); if (Space == 0 || Space > 3) { - return Error(Loc, "invalid number of vectors"); + Error(Loc, "invalid number of vectors"); + return MatchOperand_ParseFail; } Count += Space; @@ -2931,17 +3167,23 @@ bool AArch64AsmParser::parseVectorList(OperandVector &Operands) { while (parseOptionalToken(AsmToken::Comma)) { SMLoc Loc = getLoc(); StringRef NextKind; - int64_t Reg = tryMatchVectorRegister(NextKind, true); - if (Reg == -1) - return true; + unsigned Reg; + ParseRes = ParseVector(Reg, NextKind, getLoc(), true); + if (ParseRes != MatchOperand_Success) + return ParseRes; + // Any Kind suffices must match on all regs in the list. - if (Kind != NextKind) - return Error(Loc, "mismatched register size suffix"); + if (Kind != NextKind) { + Error(Loc, "mismatched register size suffix"); + return MatchOperand_ParseFail; + } // Registers must be incremental (with wraparound at 31) if (getContext().getRegisterInfo()->getEncodingValue(Reg) != - (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) - return Error(Loc, "registers must be sequential"); + (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) { + Error(Loc, "registers must be sequential"); + return MatchOperand_ParseFail; + } PrevReg = Reg; ++Count; @@ -2949,83 +3191,146 @@ bool AArch64AsmParser::parseVectorList(OperandVector &Operands) { } if (parseToken(AsmToken::RCurly, "'}' expected")) - return true; + return MatchOperand_ParseFail; - if (Count > 4) - return Error(S, "invalid number of vectors"); + if (Count > 4) { + Error(S, "invalid number of vectors"); + return MatchOperand_ParseFail; + } unsigned NumElements = 0; - char ElementKind = 0; - if (!Kind.empty()) - parseValidVectorKind(Kind, NumElements, ElementKind); + unsigned ElementWidth = 0; + if (!Kind.empty()) { + if (const auto &VK = parseVectorKind(Kind, VectorKind)) + std::tie(NumElements, ElementWidth) = *VK; + } Operands.push_back(AArch64Operand::CreateVectorList( - FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext())); + FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(), + getContext())); - // If there is an index specifier following the list, parse that too. - SMLoc SIdx = getLoc(); - if (parseOptionalToken(AsmToken::LBrac)) { // Eat left bracket token. - const MCExpr *ImmVal; - if (getParser().parseExpression(ImmVal)) - return false; - const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); - if (!MCE) { - TokError("immediate value expected for vector index"); - return false; - } + return MatchOperand_Success; +} - SMLoc E = getLoc(); - if (parseToken(AsmToken::RBrac, "']' expected")) - return false; +/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions. +bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) { + auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true); + if (ParseRes != MatchOperand_Success) + return true; - Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx, - E, getContext())); - } - return false; + return tryParseVectorIndex(Operands) == MatchOperand_ParseFail; } OperandMatchResultTy AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) { - MCAsmParser &Parser = getParser(); - const AsmToken &Tok = Parser.getTok(); - if (!Tok.is(AsmToken::Identifier)) - return MatchOperand_NoMatch; + SMLoc StartLoc = getLoc(); - unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), RegKind::Scalar); - - MCContext &Ctx = getContext(); - const MCRegisterInfo *RI = Ctx.getRegisterInfo(); - if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum)) - return MatchOperand_NoMatch; - - SMLoc S = getLoc(); - Parser.Lex(); // Eat register + unsigned RegNum; + OperandMatchResultTy Res = tryParseScalarRegister(RegNum); + if (Res != MatchOperand_Success) + return Res; if (!parseOptionalToken(AsmToken::Comma)) { - Operands.push_back( - AArch64Operand::CreateReg(RegNum, RegKind::Scalar, S, getLoc(), Ctx)); + Operands.push_back(AArch64Operand::CreateReg( + RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext())); return MatchOperand_Success; } parseOptionalToken(AsmToken::Hash); - if (Parser.getTok().isNot(AsmToken::Integer)) { + if (getParser().getTok().isNot(AsmToken::Integer)) { Error(getLoc(), "index must be absent or #0"); return MatchOperand_ParseFail; } const MCExpr *ImmVal; - if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) || + if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) || cast<MCConstantExpr>(ImmVal)->getValue() != 0) { Error(getLoc(), "index must be absent or #0"); return MatchOperand_ParseFail; } - Operands.push_back( - AArch64Operand::CreateReg(RegNum, RegKind::Scalar, S, getLoc(), Ctx)); + Operands.push_back(AArch64Operand::CreateReg( + RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext())); return MatchOperand_Success; } +template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy> +OperandMatchResultTy +AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) { + SMLoc StartLoc = getLoc(); + + unsigned RegNum; + OperandMatchResultTy Res = tryParseScalarRegister(RegNum); + if (Res != MatchOperand_Success) + return Res; + + // No shift/extend is the default. + if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) { + Operands.push_back(AArch64Operand::CreateReg( + RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy)); + return MatchOperand_Success; + } + + // Eat the comma + getParser().Lex(); + + // Match the shift + SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd; + Res = tryParseOptionalShiftExtend(ExtOpnd); + if (Res != MatchOperand_Success) + return Res; + + auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get()); + Operands.push_back(AArch64Operand::CreateReg( + RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy, + Ext->getShiftExtendType(), Ext->getShiftExtendAmount(), + Ext->hasShiftExtendAmount())); + + return MatchOperand_Success; +} + +bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) { + MCAsmParser &Parser = getParser(); + + // Some SVE instructions have a decoration after the immediate, i.e. + // "mul vl". We parse them here and add tokens, which must be present in the + // asm string in the tablegen instruction. + bool NextIsVL = Parser.getLexer().peekTok().getString().equals_lower("vl"); + bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash); + if (!Parser.getTok().getString().equals_lower("mul") || + !(NextIsVL || NextIsHash)) + return true; + + Operands.push_back( + AArch64Operand::CreateToken("mul", false, getLoc(), getContext())); + Parser.Lex(); // Eat the "mul" + + if (NextIsVL) { + Operands.push_back( + AArch64Operand::CreateToken("vl", false, getLoc(), getContext())); + Parser.Lex(); // Eat the "vl" + return false; + } + + if (NextIsHash) { + Parser.Lex(); // Eat the # + SMLoc S = getLoc(); + + // Parse immediate operand. + const MCExpr *ImmVal; + if (!Parser.parseExpression(ImmVal)) + if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) { + Operands.push_back(AArch64Operand::CreateImm( + MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(), + getContext())); + return MatchOperand_Success; + } + } + + return Error(getLoc(), "expected 'vl' or '#<imm>'"); +} + /// parseOperand - Parse a arm instruction operand. For now this parses the /// operand regardless of the mnemonic. bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode, @@ -3069,7 +3374,7 @@ bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode, return parseOperand(Operands, false, false); } case AsmToken::LCurly: - return parseVectorList(Operands); + return parseNeonVectorList(Operands); case AsmToken::Identifier: { // If we're expecting a Condition Code operand, then just parse that. if (isCondCode) @@ -3079,6 +3384,11 @@ bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode, if (!parseRegister(Operands)) return false; + // See if this is a "mul vl" decoration or "mul #<int>" operand used + // by SVE instructions. + if (!parseOptionalMulOperand(Operands)) + return false; + // This could be an optional "shift" or "extend" operand. OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands); // We can only continue if no tokens were eaten. @@ -3122,7 +3432,7 @@ bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode, uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" && Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" && - Mnemonic != "fcmlt") + Mnemonic != "fcmlt" && Mnemonic != "fcmne") return TokError("unexpected floating point literal"); else if (IntVal != 0 || isNegative) return TokError("expected floating-point constant #0.0"); @@ -3193,6 +3503,30 @@ bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode, } } +bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1, + const MCParsedAsmOperand &Op2) const { + auto &AOp1 = static_cast<const AArch64Operand&>(Op1); + auto &AOp2 = static_cast<const AArch64Operand&>(Op2); + if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg && + AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg) + return MCTargetAsmParser::regsEqual(Op1, Op2); + + assert(AOp1.isScalarReg() && AOp2.isScalarReg() && + "Testing equality of non-scalar registers not supported"); + + // Check if a registers match their sub/super register classes. + if (AOp1.getRegEqualityTy() == EqualsSuperReg) + return getXRegFromWReg(Op1.getReg()) == Op2.getReg(); + if (AOp1.getRegEqualityTy() == EqualsSubReg) + return getWRegFromXReg(Op1.getReg()) == Op2.getReg(); + if (AOp2.getRegEqualityTy() == EqualsSuperReg) + return getXRegFromWReg(Op2.getReg()) == Op1.getReg(); + if (AOp2.getRegEqualityTy() == EqualsSubReg) + return getWRegFromXReg(Op2.getReg()) == Op1.getReg(); + + return false; +} + /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its /// operands. bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info, @@ -3451,7 +3785,39 @@ bool AArch64AsmParser::validateInstruction(MCInst &Inst, "is also a source"); break; } + case AArch64::STXRB: + case AArch64::STXRH: + case AArch64::STXRW: + case AArch64::STXRX: + case AArch64::STLXRB: + case AArch64::STLXRH: + case AArch64::STLXRW: + case AArch64::STLXRX: { + unsigned Rs = Inst.getOperand(0).getReg(); + unsigned Rt = Inst.getOperand(1).getReg(); + unsigned Rn = Inst.getOperand(2).getReg(); + if (RI->isSubRegisterEq(Rt, Rs) || + (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP)) + return Error(Loc[0], + "unpredictable STXR instruction, status is also a source"); + break; + } + case AArch64::STXPW: + case AArch64::STXPX: + case AArch64::STLXPW: + case AArch64::STLXPX: { + unsigned Rs = Inst.getOperand(0).getReg(); + unsigned Rt1 = Inst.getOperand(1).getReg(); + unsigned Rt2 = Inst.getOperand(2).getReg(); + unsigned Rn = Inst.getOperand(3).getReg(); + if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) || + (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP)) + return Error(Loc[0], + "unpredictable STXP instruction, status is also a source"); + break; } + } + // Now check immediate ranges. Separate from the above as there is overlap // in the instructions being checked and this keeps the nested conditionals @@ -3488,7 +3854,9 @@ bool AArch64AsmParser::validateInstruction(MCInst &Inst, ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC || - ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) && + ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 || + ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 || + ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) && (Inst.getOpcode() == AArch64::ADDXri || Inst.getOpcode() == AArch64::ADDWri)) return false; @@ -3512,8 +3880,23 @@ static std::string AArch64MnemonicSpellCheck(StringRef S, uint64_t FBS, unsigned VariantID = 0); bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode, + uint64_t ErrorInfo, OperandVector &Operands) { switch (ErrCode) { + case Match_InvalidTiedOperand: { + RegConstraintEqualityTy EqTy = + static_cast<const AArch64Operand &>(*Operands[ErrorInfo]) + .getRegEqualityTy(); + switch (EqTy) { + case RegConstraintEqualityTy::EqualsSubReg: + return Error(Loc, "operand must be 64-bit form of destination register"); + case RegConstraintEqualityTy::EqualsSuperReg: + return Error(Loc, "operand must be 32-bit form of destination register"); + case RegConstraintEqualityTy::EqualsReg: + return Error(Loc, "operand must match destination register"); + } + llvm_unreachable("Unknown RegConstraintEqualityTy"); + } case Match_MissingFeature: return Error(Loc, "instruction requires a CPU feature not currently enabled"); @@ -3547,9 +3930,27 @@ bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode, case Match_InvalidFPImm: return Error(Loc, "expected compatible register or floating-point constant"); + case Match_InvalidMemoryIndexedSImm6: + return Error(Loc, "index must be an integer in range [-32, 31]."); + case Match_InvalidMemoryIndexedSImm5: + return Error(Loc, "index must be an integer in range [-16, 15]."); + case Match_InvalidMemoryIndexed1SImm4: + return Error(Loc, "index must be an integer in range [-8, 7]."); + case Match_InvalidMemoryIndexed2SImm4: + return Error(Loc, "index must be a multiple of 2 in range [-16, 14]."); + case Match_InvalidMemoryIndexed3SImm4: + return Error(Loc, "index must be a multiple of 3 in range [-24, 21]."); + case Match_InvalidMemoryIndexed4SImm4: + return Error(Loc, "index must be a multiple of 4 in range [-32, 28]."); + case Match_InvalidMemoryIndexed16SImm4: + return Error(Loc, "index must be a multiple of 16 in range [-128, 112]."); + case Match_InvalidMemoryIndexed1SImm6: + return Error(Loc, "index must be an integer in range [-32, 31]."); + case Match_InvalidMemoryIndexedSImm8: + return Error(Loc, "index must be an integer in range [-128, 127]."); case Match_InvalidMemoryIndexedSImm9: return Error(Loc, "index must be an integer in range [-256, 255]."); - case Match_InvalidMemoryIndexedSImm10: + case Match_InvalidMemoryIndexed8SImm10: return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088]."); case Match_InvalidMemoryIndexed4SImm7: return Error(Loc, "index must be a multiple of 4 in range [-256, 252]."); @@ -3557,6 +3958,20 @@ bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode, return Error(Loc, "index must be a multiple of 8 in range [-512, 504]."); case Match_InvalidMemoryIndexed16SImm7: return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008]."); + case Match_InvalidMemoryIndexed8UImm5: + return Error(Loc, "index must be a multiple of 8 in range [0, 248]."); + case Match_InvalidMemoryIndexed4UImm5: + return Error(Loc, "index must be a multiple of 4 in range [0, 124]."); + case Match_InvalidMemoryIndexed2UImm5: + return Error(Loc, "index must be a multiple of 2 in range [0, 62]."); + case Match_InvalidMemoryIndexed8UImm6: + return Error(Loc, "index must be a multiple of 8 in range [0, 504]."); + case Match_InvalidMemoryIndexed4UImm6: + return Error(Loc, "index must be a multiple of 4 in range [0, 252]."); + case Match_InvalidMemoryIndexed2UImm6: + return Error(Loc, "index must be a multiple of 2 in range [0, 126]."); + case Match_InvalidMemoryIndexed1UImm6: + return Error(Loc, "index must be in range [0, 63]."); case Match_InvalidMemoryWExtend8: return Error(Loc, "expected 'uxtw' or 'sxtw' with optional shift of #0"); @@ -3621,16 +4036,44 @@ bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode, return Error(Loc, "immediate must be an integer in range [1, 32]."); case Match_InvalidImm1_64: return Error(Loc, "immediate must be an integer in range [1, 64]."); - case Match_InvalidIndex1: + case Match_InvalidSVEAddSubImm8: + return Error(Loc, "immediate must be an integer in range [0, 255]" + " with a shift amount of 0"); + case Match_InvalidSVEAddSubImm16: + case Match_InvalidSVEAddSubImm32: + case Match_InvalidSVEAddSubImm64: + return Error(Loc, "immediate must be an integer in range [0, 255] or a " + "multiple of 256 in range [256, 65280]"); + case Match_InvalidSVECpyImm8: + return Error(Loc, "immediate must be an integer in range [-128, 255]" + " with a shift amount of 0"); + case Match_InvalidSVECpyImm16: + return Error(Loc, "immediate must be an integer in range [-128, 127] or a " + "multiple of 256 in range [-32768, 65280]"); + case Match_InvalidSVECpyImm32: + case Match_InvalidSVECpyImm64: + return Error(Loc, "immediate must be an integer in range [-128, 127] or a " + "multiple of 256 in range [-32768, 32512]"); + case Match_InvalidIndexRange1_1: return Error(Loc, "expected lane specifier '[1]'"); - case Match_InvalidIndexB: + case Match_InvalidIndexRange0_15: return Error(Loc, "vector lane must be an integer in range [0, 15]."); - case Match_InvalidIndexH: + case Match_InvalidIndexRange0_7: return Error(Loc, "vector lane must be an integer in range [0, 7]."); - case Match_InvalidIndexS: + case Match_InvalidIndexRange0_3: return Error(Loc, "vector lane must be an integer in range [0, 3]."); - case Match_InvalidIndexD: + case Match_InvalidIndexRange0_1: return Error(Loc, "vector lane must be an integer in range [0, 1]."); + case Match_InvalidSVEIndexRange0_63: + return Error(Loc, "vector lane must be an integer in range [0, 63]."); + case Match_InvalidSVEIndexRange0_31: + return Error(Loc, "vector lane must be an integer in range [0, 31]."); + case Match_InvalidSVEIndexRange0_15: + return Error(Loc, "vector lane must be an integer in range [0, 15]."); + case Match_InvalidSVEIndexRange0_7: + return Error(Loc, "vector lane must be an integer in range [0, 7]."); + case Match_InvalidSVEIndexRange0_3: + return Error(Loc, "vector lane must be an integer in range [0, 3]."); case Match_InvalidLabel: return Error(Loc, "expected label or encodable integer pc offset"); case Match_MRS: @@ -3647,6 +4090,84 @@ bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode, ComputeAvailableFeatures(STI->getFeatureBits())); return Error(Loc, "unrecognized instruction mnemonic" + Suggestion); } + case Match_InvalidGPR64shifted8: + return Error(Loc, "register must be x0..x30 or xzr, without shift"); + case Match_InvalidGPR64shifted16: + return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'"); + case Match_InvalidGPR64shifted32: + return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'"); + case Match_InvalidGPR64shifted64: + return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'"); + case Match_InvalidGPR64NoXZRshifted8: + return Error(Loc, "register must be x0..x30 without shift"); + case Match_InvalidGPR64NoXZRshifted16: + return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'"); + case Match_InvalidGPR64NoXZRshifted32: + return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'"); + case Match_InvalidGPR64NoXZRshifted64: + return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'"); + case Match_InvalidZPR32UXTW8: + case Match_InvalidZPR32SXTW8: + return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'"); + case Match_InvalidZPR32UXTW16: + case Match_InvalidZPR32SXTW16: + return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'"); + case Match_InvalidZPR32UXTW32: + case Match_InvalidZPR32SXTW32: + return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'"); + case Match_InvalidZPR32UXTW64: + case Match_InvalidZPR32SXTW64: + return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'"); + case Match_InvalidZPR64UXTW8: + case Match_InvalidZPR64SXTW8: + return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'"); + case Match_InvalidZPR64UXTW16: + case Match_InvalidZPR64SXTW16: + return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'"); + case Match_InvalidZPR64UXTW32: + case Match_InvalidZPR64SXTW32: + return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'"); + case Match_InvalidZPR64UXTW64: + case Match_InvalidZPR64SXTW64: + return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'"); + case Match_InvalidZPR32LSL8: + return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'"); + case Match_InvalidZPR32LSL16: + return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'"); + case Match_InvalidZPR32LSL32: + return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'"); + case Match_InvalidZPR32LSL64: + return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'"); + case Match_InvalidZPR64LSL8: + return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'"); + case Match_InvalidZPR64LSL16: + return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'"); + case Match_InvalidZPR64LSL32: + return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'"); + case Match_InvalidZPR64LSL64: + return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'"); + case Match_InvalidZPR0: + return Error(Loc, "expected register without element width sufix"); + case Match_InvalidZPR8: + case Match_InvalidZPR16: + case Match_InvalidZPR32: + case Match_InvalidZPR64: + case Match_InvalidZPR128: + return Error(Loc, "invalid element width"); + case Match_InvalidZPR_3b8: + return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b"); + case Match_InvalidZPR_3b16: + return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h"); + case Match_InvalidZPR_3b32: + return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s"); + case Match_InvalidZPR_4b16: + return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h"); + case Match_InvalidZPR_4b32: + return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s"); + case Match_InvalidZPR_4b64: + return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d"); + case Match_InvalidSVEPattern: + return Error(Loc, "invalid predicate pattern"); case Match_InvalidSVEPredicateAnyReg: case Match_InvalidSVEPredicateBReg: case Match_InvalidSVEPredicateHReg: @@ -3659,6 +4180,12 @@ bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode, case Match_InvalidSVEPredicate3bSReg: case Match_InvalidSVEPredicate3bDReg: return Error(Loc, "restricted predicate has range [0, 7]."); + case Match_InvalidSVEExactFPImmOperandHalfOne: + return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0."); + case Match_InvalidSVEExactFPImmOperandHalfTwo: + return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0."); + case Match_InvalidSVEExactFPImmOperandZeroOne: + return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0."); default: llvm_unreachable("unexpected error code!"); } @@ -4012,7 +4539,7 @@ bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, return Error(IDLoc, Msg); } case Match_MnemonicFail: - return showMatchError(IDLoc, MatchResult, Operands); + return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands); case Match_InvalidOperand: { SMLoc ErrorLoc = IDLoc; @@ -4031,8 +4558,9 @@ bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix()) MatchResult = Match_InvalidSuffix; - return showMatchError(ErrorLoc, MatchResult, Operands); + return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands); } + case Match_InvalidTiedOperand: case Match_InvalidMemoryIndexed1: case Match_InvalidMemoryIndexed2: case Match_InvalidMemoryIndexed4: @@ -4058,11 +4586,27 @@ bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, case Match_InvalidMemoryXExtend32: case Match_InvalidMemoryXExtend64: case Match_InvalidMemoryXExtend128: + case Match_InvalidMemoryIndexed1SImm4: + case Match_InvalidMemoryIndexed2SImm4: + case Match_InvalidMemoryIndexed3SImm4: + case Match_InvalidMemoryIndexed4SImm4: + case Match_InvalidMemoryIndexed1SImm6: + case Match_InvalidMemoryIndexed16SImm4: case Match_InvalidMemoryIndexed4SImm7: case Match_InvalidMemoryIndexed8SImm7: case Match_InvalidMemoryIndexed16SImm7: + case Match_InvalidMemoryIndexed8UImm5: + case Match_InvalidMemoryIndexed4UImm5: + case Match_InvalidMemoryIndexed2UImm5: + case Match_InvalidMemoryIndexed1UImm6: + case Match_InvalidMemoryIndexed2UImm6: + case Match_InvalidMemoryIndexed4UImm6: + case Match_InvalidMemoryIndexed8UImm6: + case Match_InvalidMemoryIndexedSImm6: + case Match_InvalidMemoryIndexedSImm5: + case Match_InvalidMemoryIndexedSImm8: case Match_InvalidMemoryIndexedSImm9: - case Match_InvalidMemoryIndexedSImm10: + case Match_InvalidMemoryIndexed8SImm10: case Match_InvalidImm0_1: case Match_InvalidImm0_7: case Match_InvalidImm0_15: @@ -4075,15 +4619,73 @@ bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, case Match_InvalidImm1_16: case Match_InvalidImm1_32: case Match_InvalidImm1_64: - case Match_InvalidIndex1: - case Match_InvalidIndexB: - case Match_InvalidIndexH: - case Match_InvalidIndexS: - case Match_InvalidIndexD: + case Match_InvalidSVEAddSubImm8: + case Match_InvalidSVEAddSubImm16: + case Match_InvalidSVEAddSubImm32: + case Match_InvalidSVEAddSubImm64: + case Match_InvalidSVECpyImm8: + case Match_InvalidSVECpyImm16: + case Match_InvalidSVECpyImm32: + case Match_InvalidSVECpyImm64: + case Match_InvalidIndexRange1_1: + case Match_InvalidIndexRange0_15: + case Match_InvalidIndexRange0_7: + case Match_InvalidIndexRange0_3: + case Match_InvalidIndexRange0_1: + case Match_InvalidSVEIndexRange0_63: + case Match_InvalidSVEIndexRange0_31: + case Match_InvalidSVEIndexRange0_15: + case Match_InvalidSVEIndexRange0_7: + case Match_InvalidSVEIndexRange0_3: case Match_InvalidLabel: case Match_InvalidComplexRotationEven: case Match_InvalidComplexRotationOdd: + case Match_InvalidGPR64shifted8: + case Match_InvalidGPR64shifted16: + case Match_InvalidGPR64shifted32: + case Match_InvalidGPR64shifted64: + case Match_InvalidGPR64NoXZRshifted8: + case Match_InvalidGPR64NoXZRshifted16: + case Match_InvalidGPR64NoXZRshifted32: + case Match_InvalidGPR64NoXZRshifted64: + case Match_InvalidZPR32UXTW8: + case Match_InvalidZPR32UXTW16: + case Match_InvalidZPR32UXTW32: + case Match_InvalidZPR32UXTW64: + case Match_InvalidZPR32SXTW8: + case Match_InvalidZPR32SXTW16: + case Match_InvalidZPR32SXTW32: + case Match_InvalidZPR32SXTW64: + case Match_InvalidZPR64UXTW8: + case Match_InvalidZPR64SXTW8: + case Match_InvalidZPR64UXTW16: + case Match_InvalidZPR64SXTW16: + case Match_InvalidZPR64UXTW32: + case Match_InvalidZPR64SXTW32: + case Match_InvalidZPR64UXTW64: + case Match_InvalidZPR64SXTW64: + case Match_InvalidZPR32LSL8: + case Match_InvalidZPR32LSL16: + case Match_InvalidZPR32LSL32: + case Match_InvalidZPR32LSL64: + case Match_InvalidZPR64LSL8: + case Match_InvalidZPR64LSL16: + case Match_InvalidZPR64LSL32: + case Match_InvalidZPR64LSL64: + case Match_InvalidZPR0: + case Match_InvalidZPR8: + case Match_InvalidZPR16: + case Match_InvalidZPR32: + case Match_InvalidZPR64: + case Match_InvalidZPR128: + case Match_InvalidZPR_3b8: + case Match_InvalidZPR_3b16: + case Match_InvalidZPR_3b32: + case Match_InvalidZPR_4b16: + case Match_InvalidZPR_4b32: + case Match_InvalidZPR_4b64: case Match_InvalidSVEPredicateAnyReg: + case Match_InvalidSVEPattern: case Match_InvalidSVEPredicateBReg: case Match_InvalidSVEPredicateHReg: case Match_InvalidSVEPredicateSReg: @@ -4093,6 +4695,9 @@ bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, case Match_InvalidSVEPredicate3bHReg: case Match_InvalidSVEPredicate3bSReg: case Match_InvalidSVEPredicate3bDReg: + case Match_InvalidSVEExactFPImmOperandHalfOne: + case Match_InvalidSVEExactFPImmOperandHalfTwo: + case Match_InvalidSVEExactFPImmOperandZeroOne: case Match_MSR: case Match_MRS: { if (ErrorInfo >= Operands.size()) @@ -4102,7 +4707,7 @@ bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc(); if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; - return showMatchError(ErrorLoc, MatchResult, Operands); + return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands); } } @@ -4122,12 +4727,6 @@ bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) { parseDirectiveArch(Loc); else if (IDVal == ".cpu") parseDirectiveCPU(Loc); - else if (IDVal == ".hword") - parseDirectiveWord(2, Loc); - else if (IDVal == ".word") - parseDirectiveWord(4, Loc); - else if (IDVal == ".xword") - parseDirectiveWord(8, Loc); else if (IDVal == ".tlsdesccall") parseDirectiveTLSDescCall(Loc); else if (IDVal == ".ltorg" || IDVal == ".pool") @@ -4150,7 +4749,11 @@ static const struct { const char *Name; const FeatureBitset Features; } ExtensionMap[] = { - { "crc", {AArch64::FeatureCRC} }, + { "crc", {AArch64::FeatureCRC} }, + { "sm4", {AArch64::FeatureSM4} }, + { "sha3", {AArch64::FeatureSHA3} }, + { "sha2", {AArch64::FeatureSHA2} }, + { "aes", {AArch64::FeatureAES} }, { "crypto", {AArch64::FeatureCrypto} }, { "fp", {AArch64::FeatureFPARMv8} }, { "simd", {AArch64::FeatureNEON} }, @@ -4164,6 +4767,54 @@ static const struct { { "profile", {} }, }; +static void ExpandCryptoAEK(AArch64::ArchKind ArchKind, + SmallVector<StringRef, 4> &RequestedExtensions) { + const bool NoCrypto = + (std::find(RequestedExtensions.begin(), RequestedExtensions.end(), + "nocrypto") != std::end(RequestedExtensions)); + const bool Crypto = + (std::find(RequestedExtensions.begin(), RequestedExtensions.end(), + "crypto") != std::end(RequestedExtensions)); + + if (!NoCrypto && Crypto) { + switch (ArchKind) { + default: + // Map 'generic' (and others) to sha2 and aes, because + // that was the traditional meaning of crypto. + case AArch64::ArchKind::ARMV8_1A: + case AArch64::ArchKind::ARMV8_2A: + case AArch64::ArchKind::ARMV8_3A: + RequestedExtensions.push_back("sha2"); + RequestedExtensions.push_back("aes"); + break; + case AArch64::ArchKind::ARMV8_4A: + RequestedExtensions.push_back("sm4"); + RequestedExtensions.push_back("sha3"); + RequestedExtensions.push_back("sha2"); + RequestedExtensions.push_back("aes"); + break; + } + } else if (NoCrypto) { + switch (ArchKind) { + default: + // Map 'generic' (and others) to sha2 and aes, because + // that was the traditional meaning of crypto. + case AArch64::ArchKind::ARMV8_1A: + case AArch64::ArchKind::ARMV8_2A: + case AArch64::ArchKind::ARMV8_3A: + RequestedExtensions.push_back("nosha2"); + RequestedExtensions.push_back("noaes"); + break; + case AArch64::ArchKind::ARMV8_4A: + RequestedExtensions.push_back("nosm4"); + RequestedExtensions.push_back("nosha3"); + RequestedExtensions.push_back("nosha2"); + RequestedExtensions.push_back("noaes"); + break; + } + } +} + /// parseDirectiveArch /// ::= .arch token bool AArch64AsmParser::parseDirectiveArch(SMLoc L) { @@ -4194,6 +4845,8 @@ bool AArch64AsmParser::parseDirectiveArch(SMLoc L) { if (!ExtensionString.empty()) ExtensionString.split(RequestedExtensions, '+'); + ExpandCryptoAEK(ID, RequestedExtensions); + FeatureBitset Features = STI.getFeatureBits(); for (auto Name : RequestedExtensions) { bool EnableFeature = true; @@ -4253,6 +4906,8 @@ bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) { STI.setDefaultFeatures(CPU, ""); CurLoc = incrementLoc(CurLoc, CPU.size()); + ExpandCryptoAEK(llvm::AArch64::getCPUArchKind(CPU), RequestedExtensions); + FeatureBitset Features = STI.getFeatureBits(); for (auto Name : RequestedExtensions) { // Advance source location past '+'. @@ -4292,22 +4947,6 @@ bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) { return false; } -/// parseDirectiveWord -/// ::= .word [ expression (, expression)* ] -bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) { - auto parseOp = [&]() -> bool { - const MCExpr *Value; - if (getParser().parseExpression(Value)) - return true; - getParser().getStreamer().EmitValue(Value, Size, L); - return false; - }; - - if (parseMany(parseOp)) - return true; - return false; -} - /// parseDirectiveInst /// ::= .inst opcode [, ...] bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) { @@ -4418,46 +5057,50 @@ bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) { MCAsmParser &Parser = getParser(); Parser.Lex(); // Eat the '.req' token. SMLoc SRegLoc = getLoc(); - int RegNum = tryParseRegister(); RegKind RegisterKind = RegKind::Scalar; + unsigned RegNum; + OperandMatchResultTy ParseRes = tryParseScalarRegister(RegNum); - if (RegNum == -1) { + if (ParseRes != MatchOperand_Success) { StringRef Kind; RegisterKind = RegKind::NeonVector; - RegNum = tryMatchVectorRegister(Kind, false); - if (!Kind.empty()) + ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector); + + if (ParseRes == MatchOperand_ParseFail) + return true; + + if (ParseRes == MatchOperand_Success && !Kind.empty()) return Error(SRegLoc, "vector register without type specifier expected"); } - if (RegNum == -1) { + if (ParseRes != MatchOperand_Success) { StringRef Kind; RegisterKind = RegKind::SVEDataVector; - OperandMatchResultTy Res = - tryParseSVERegister(RegNum, Kind, RegKind::SVEDataVector); + ParseRes = + tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector); - if (Res == MatchOperand_ParseFail) + if (ParseRes == MatchOperand_ParseFail) return true; - if (Res == MatchOperand_Success && !Kind.empty()) + if (ParseRes == MatchOperand_Success && !Kind.empty()) return Error(SRegLoc, "sve vector register without type specifier expected"); } - if (RegNum == -1) { + if (ParseRes != MatchOperand_Success) { StringRef Kind; RegisterKind = RegKind::SVEPredicateVector; - OperandMatchResultTy Res = - tryParseSVERegister(RegNum, Kind, RegKind::SVEPredicateVector); + ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector); - if (Res == MatchOperand_ParseFail) + if (ParseRes == MatchOperand_ParseFail) return true; - if (Res == MatchOperand_Success && !Kind.empty()) + if (ParseRes == MatchOperand_Success && !Kind.empty()) return Error(SRegLoc, "sve predicate register without type specifier expected"); } - if (RegNum == -1) + if (ParseRes != MatchOperand_Success) return Error(SRegLoc, "register name or alias expected"); // Shouldn't be anything else. @@ -4519,7 +5162,7 @@ AArch64AsmParser::classifySymbolRef(const MCExpr *Expr, BE->getOpcode() != MCBinaryExpr::Sub) return false; - // See if the addend is is a constant, otherwise there's more going + // See if the addend is a constant, otherwise there's more going // on here than we can deal with. auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS()); if (!AddendExpr) @@ -4620,10 +5263,11 @@ AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) { return MatchOperand_ParseFail; } - int FirstReg = tryParseRegister(); - if (FirstReg == -1) { + unsigned FirstReg; + OperandMatchResultTy Res = tryParseScalarRegister(FirstReg); + if (Res != MatchOperand_Success) return MatchOperand_ParseFail; - } + const MCRegisterClass &WRegClass = AArch64MCRegisterClasses[AArch64::GPR32RegClassID]; const MCRegisterClass &XRegClass = @@ -4646,19 +5290,18 @@ AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) { return MatchOperand_ParseFail; } - SMLoc M = getLoc(); if (getParser().getTok().isNot(AsmToken::Comma)) { - Error(M, "expected comma"); + Error(getLoc(), "expected comma"); return MatchOperand_ParseFail; } // Eat the comma getParser().Lex(); SMLoc E = getLoc(); - int SecondReg = tryParseRegister(); - if (SecondReg ==-1) { + unsigned SecondReg; + Res = tryParseScalarRegister(SecondReg); + if (Res != MatchOperand_Success) return MatchOperand_ParseFail; - } if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 || (isXReg && !XRegClass.contains(SecondReg)) || @@ -4683,16 +5326,16 @@ AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) { return MatchOperand_Success; } -template <bool ParseSuffix> +template <bool ParseShiftExtend, bool ParseSuffix> OperandMatchResultTy AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) { const SMLoc S = getLoc(); // Check for a SVE vector register specifier first. - int RegNum = -1; + unsigned RegNum; StringRef Kind; OperandMatchResultTy Res = - tryParseSVERegister(RegNum, Kind, RegKind::SVEDataVector); + tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector); if (Res != MatchOperand_Success) return Res; @@ -4700,20 +5343,81 @@ AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) { if (ParseSuffix && Kind.empty()) return MatchOperand_NoMatch; - unsigned ElementWidth = StringSwitch<unsigned>(Kind.lower()) - .Case("", -1) - .Case(".b", 8) - .Case(".h", 16) - .Case(".s", 32) - .Case(".d", 64) - .Case(".q", 128) - .Default(0); - if (!ElementWidth) + const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector); + if (!KindRes) + return MatchOperand_NoMatch; + + unsigned ElementWidth = KindRes->second; + + // No shift/extend is the default. + if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) { + Operands.push_back(AArch64Operand::CreateVectorReg( + RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext())); + + OperandMatchResultTy Res = tryParseVectorIndex(Operands); + if (Res == MatchOperand_ParseFail) + return MatchOperand_ParseFail; + return MatchOperand_Success; + } + + // Eat the comma + getParser().Lex(); + + // Match the shift + SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd; + Res = tryParseOptionalShiftExtend(ExtOpnd); + if (Res != MatchOperand_Success) + return Res; + + auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get()); + Operands.push_back(AArch64Operand::CreateVectorReg( + RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(), + getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(), + Ext->hasShiftExtendAmount())); + + return MatchOperand_Success; +} + +OperandMatchResultTy +AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) { + MCAsmParser &Parser = getParser(); + + SMLoc SS = getLoc(); + const AsmToken &TokE = Parser.getTok(); + bool IsHash = TokE.is(AsmToken::Hash); + + if (!IsHash && TokE.isNot(AsmToken::Identifier)) return MatchOperand_NoMatch; + int64_t Pattern; + if (IsHash) { + Parser.Lex(); // Eat hash + + // Parse the immediate operand. + const MCExpr *ImmVal; + SS = getLoc(); + if (Parser.parseExpression(ImmVal)) + return MatchOperand_ParseFail; + + auto *MCE = dyn_cast<MCConstantExpr>(ImmVal); + if (!MCE) + return MatchOperand_ParseFail; + + Pattern = MCE->getValue(); + } else { + // Parse the pattern + auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString()); + if (!Pat) + return MatchOperand_NoMatch; + + Parser.Lex(); + Pattern = Pat->Encoding; + assert(Pattern >= 0 && Pattern < 32); + } + Operands.push_back( - AArch64Operand::CreateReg(RegNum, RegKind::SVEDataVector, ElementWidth, - S, S, getContext())); + AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()), + SS, getLoc(), getContext())); return MatchOperand_Success; } |
