aboutsummaryrefslogtreecommitdiff
path: root/clang/lib/Basic/Targets/AArch64.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib/Basic/Targets/AArch64.cpp')
-rw-r--r--clang/lib/Basic/Targets/AArch64.cpp673
1 files changed, 544 insertions, 129 deletions
diff --git a/clang/lib/Basic/Targets/AArch64.cpp b/clang/lib/Basic/Targets/AArch64.cpp
index 60ef52ac3f0d..dfed95f0513f 100644
--- a/clang/lib/Basic/Targets/AArch64.cpp
+++ b/clang/lib/Basic/Targets/AArch64.cpp
@@ -18,46 +18,106 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/AArch64TargetParser.h"
+#include "llvm/Support/ARMTargetParserCommon.h"
+#include <optional>
using namespace clang;
using namespace clang::targets;
-const Builtin::Info AArch64TargetInfo::BuiltinInfo[] = {
+static constexpr Builtin::Info BuiltinInfo[] = {
#define BUILTIN(ID, TYPE, ATTRS) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#include "clang/Basic/BuiltinsNEON.def"
#define BUILTIN(ID, TYPE, ATTRS) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#include "clang/Basic/BuiltinsSVE.def"
#define BUILTIN(ID, TYPE, ATTRS) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#define LANGBUILTIN(ID, TYPE, ATTRS, LANG) \
- {#ID, TYPE, ATTRS, nullptr, LANG, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, LANG},
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE) \
- {#ID, TYPE, ATTRS, HEADER, LANGS, FEATURE},
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::HEADER, LANGS},
#include "clang/Basic/BuiltinsAArch64.def"
};
-static StringRef getArchVersionString(llvm::AArch64::ArchKind Kind) {
- switch (Kind) {
- case llvm::AArch64::ArchKind::ARMV9A:
- case llvm::AArch64::ArchKind::ARMV9_1A:
- case llvm::AArch64::ArchKind::ARMV9_2A:
- case llvm::AArch64::ArchKind::ARMV9_3A:
- return "9";
- default:
- return "8";
- }
-}
-
-StringRef AArch64TargetInfo::getArchProfile() const {
- switch (ArchKind) {
- case llvm::AArch64::ArchKind::ARMV8R:
- return "R";
- default:
- return "A";
+void AArch64TargetInfo::setArchFeatures() {
+ if (*ArchInfo == llvm::AArch64::ARMV8R) {
+ HasDotProd = true;
+ HasDIT = true;
+ HasFlagM = true;
+ HasRCPC = true;
+ FPU |= NeonMode;
+ HasCCPP = true;
+ HasCRC = true;
+ HasLSE = true;
+ HasRDM = true;
+ } else if (ArchInfo->Version.getMajor() == 8) {
+ if (ArchInfo->Version.getMinor() >= 7u) {
+ HasWFxT = true;
+ }
+ if (ArchInfo->Version.getMinor() >= 6u) {
+ HasBFloat16 = true;
+ HasMatMul = true;
+ }
+ if (ArchInfo->Version.getMinor() >= 5u) {
+ HasAlternativeNZCV = true;
+ HasFRInt3264 = true;
+ HasSSBS = true;
+ HasSB = true;
+ HasPredRes = true;
+ HasBTI = true;
+ }
+ if (ArchInfo->Version.getMinor() >= 4u) {
+ HasDotProd = true;
+ HasDIT = true;
+ HasFlagM = true;
+ }
+ if (ArchInfo->Version.getMinor() >= 3u) {
+ HasRCPC = true;
+ FPU |= NeonMode;
+ }
+ if (ArchInfo->Version.getMinor() >= 2u) {
+ HasCCPP = true;
+ }
+ if (ArchInfo->Version.getMinor() >= 1u) {
+ HasCRC = true;
+ HasLSE = true;
+ HasRDM = true;
+ }
+ } else if (ArchInfo->Version.getMajor() == 9) {
+ if (ArchInfo->Version.getMinor() >= 2u) {
+ HasWFxT = true;
+ }
+ if (ArchInfo->Version.getMinor() >= 1u) {
+ HasBFloat16 = true;
+ HasMatMul = true;
+ }
+ FPU |= SveMode;
+ HasSVE2 = true;
+ HasFullFP16 = true;
+ HasAlternativeNZCV = true;
+ HasFRInt3264 = true;
+ HasSSBS = true;
+ HasSB = true;
+ HasPredRes = true;
+ HasBTI = true;
+ HasDotProd = true;
+ HasDIT = true;
+ HasFlagM = true;
+ HasRCPC = true;
+ FPU |= NeonMode;
+ HasCCPP = true;
+ HasCRC = true;
+ HasLSE = true;
+ HasRDM = true;
}
}
@@ -77,7 +137,9 @@ AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
// All AArch64 implementations support ARMv8 FP, which makes half a legal type.
HasLegalHalfType = true;
+ HalfArgsAndReturns = true;
HasFloat16 = true;
+ HasStrictFP = true;
if (Triple.isArch64Bit())
LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
@@ -162,7 +224,7 @@ bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
return Name == "generic" ||
- llvm::AArch64::parseCPUArch(Name) != llvm::AArch64::ArchKind::INVALID;
+ llvm::AArch64::parseCpu(Name).Arch != llvm::AArch64::INVALID;
}
bool AArch64TargetInfo::setCPU(const std::string &Name) {
@@ -191,6 +253,7 @@ void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
MacroBuilder &Builder) const {
Builder.defineMacro("__ARM_FEATURE_COMPLEX", "1");
Builder.defineMacro("__ARM_FEATURE_JCVT", "1");
+ Builder.defineMacro("__ARM_FEATURE_PAUTH", "1");
// Also include the Armv8.2 defines
getTargetDefinesARMV82A(Opts, Builder);
}
@@ -204,6 +267,7 @@ void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
MacroBuilder &Builder) const {
Builder.defineMacro("__ARM_FEATURE_FRINT", "1");
+ Builder.defineMacro("__ARM_FEATURE_BTI", "1");
// Also include the Armv8.4 defines
getTargetDefinesARMV84A(Opts, Builder);
}
@@ -230,6 +294,12 @@ void AArch64TargetInfo::getTargetDefinesARMV88A(const LangOptions &Opts,
getTargetDefinesARMV87A(Opts, Builder);
}
+void AArch64TargetInfo::getTargetDefinesARMV89A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Also include the Armv8.8 defines
+ getTargetDefinesARMV88A(Opts, Builder);
+}
+
void AArch64TargetInfo::getTargetDefinesARMV9A(const LangOptions &Opts,
MacroBuilder &Builder) const {
// Armv9-A maps to Armv8.5-A
@@ -254,6 +324,12 @@ void AArch64TargetInfo::getTargetDefinesARMV93A(const LangOptions &Opts,
getTargetDefinesARMV88A(Opts, Builder);
}
+void AArch64TargetInfo::getTargetDefinesARMV94A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Armv9.4-A maps to Armv8.9-A
+ getTargetDefinesARMV89A(Opts, Builder);
+}
+
void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
// Target identification.
@@ -278,8 +354,10 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
// ACLE predefines. Many can only have one possible value on v8 AArch64.
Builder.defineMacro("__ARM_ACLE", "200");
- Builder.defineMacro("__ARM_ARCH", getArchVersionString(ArchKind));
- Builder.defineMacro("__ARM_ARCH_PROFILE", "'" + getArchProfile() + "'");
+ Builder.defineMacro("__ARM_ARCH",
+ std::to_string(ArchInfo->Version.getMajor()));
+ Builder.defineMacro("__ARM_ARCH_PROFILE",
+ std::string("'") + (char)ArchInfo->Profile + "'");
Builder.defineMacro("__ARM_64BIT_STATE", "1");
Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
@@ -341,6 +419,12 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasCRC)
Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
+ if (HasRCPC)
+ Builder.defineMacro("__ARM_FEATURE_RCPC", "1");
+
+ if (HasFMV)
+ Builder.defineMacro("__HAVE_FUNCTION_MULTI_VERSIONING", "1");
+
// The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
// macros for AES, SHA2, SHA3 and SM4
if (HasAES && HasSHA2)
@@ -362,6 +446,9 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__ARM_FEATURE_SM4", "1");
}
+ if (HasPAuth)
+ Builder.defineMacro("__ARM_FEATURE_PAUTH", "1");
+
if (HasUnaligned)
Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
@@ -438,46 +525,37 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasMOPS)
Builder.defineMacro("__ARM_FEATURE_MOPS", "1");
- switch (ArchKind) {
- default:
- break;
- case llvm::AArch64::ArchKind::ARMV8_1A:
+ if (HasD128)
+ Builder.defineMacro("__ARM_FEATURE_SYSREG128", "1");
+
+ if (*ArchInfo == llvm::AArch64::ARMV8_1A)
getTargetDefinesARMV81A(Opts, Builder);
- break;
- case llvm::AArch64::ArchKind::ARMV8_2A:
+ else if (*ArchInfo == llvm::AArch64::ARMV8_2A)
getTargetDefinesARMV82A(Opts, Builder);
- break;
- case llvm::AArch64::ArchKind::ARMV8_3A:
+ else if (*ArchInfo == llvm::AArch64::ARMV8_3A)
getTargetDefinesARMV83A(Opts, Builder);
- break;
- case llvm::AArch64::ArchKind::ARMV8_4A:
+ else if (*ArchInfo == llvm::AArch64::ARMV8_4A)
getTargetDefinesARMV84A(Opts, Builder);
- break;
- case llvm::AArch64::ArchKind::ARMV8_5A:
+ else if (*ArchInfo == llvm::AArch64::ARMV8_5A)
getTargetDefinesARMV85A(Opts, Builder);
- break;
- case llvm::AArch64::ArchKind::ARMV8_6A:
+ else if (*ArchInfo == llvm::AArch64::ARMV8_6A)
getTargetDefinesARMV86A(Opts, Builder);
- break;
- case llvm::AArch64::ArchKind::ARMV8_7A:
+ else if (*ArchInfo == llvm::AArch64::ARMV8_7A)
getTargetDefinesARMV87A(Opts, Builder);
- break;
- case llvm::AArch64::ArchKind::ARMV8_8A:
+ else if (*ArchInfo == llvm::AArch64::ARMV8_8A)
getTargetDefinesARMV88A(Opts, Builder);
- break;
- case llvm::AArch64::ArchKind::ARMV9A:
+ else if (*ArchInfo == llvm::AArch64::ARMV8_9A)
+ getTargetDefinesARMV89A(Opts, Builder);
+ else if (*ArchInfo == llvm::AArch64::ARMV9A)
getTargetDefinesARMV9A(Opts, Builder);
- break;
- case llvm::AArch64::ArchKind::ARMV9_1A:
+ else if (*ArchInfo == llvm::AArch64::ARMV9_1A)
getTargetDefinesARMV91A(Opts, Builder);
- break;
- case llvm::AArch64::ArchKind::ARMV9_2A:
+ else if (*ArchInfo == llvm::AArch64::ARMV9_2A)
getTargetDefinesARMV92A(Opts, Builder);
- break;
- case llvm::AArch64::ArchKind::ARMV9_3A:
+ else if (*ArchInfo == llvm::AArch64::ARMV9_3A)
getTargetDefinesARMV93A(Opts, Builder);
- break;
- }
+ else if (*ArchInfo == llvm::AArch64::ARMV9_4A)
+ getTargetDefinesARMV94A(Opts, Builder);
// All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
@@ -489,18 +567,21 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__FP_FAST_FMA", "1");
Builder.defineMacro("__FP_FAST_FMAF", "1");
+ // C/C++ operators work on both VLS and VLA SVE types
+ if (FPU & SveMode)
+ Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS", "2");
+
if (Opts.VScaleMin && Opts.VScaleMin == Opts.VScaleMax) {
Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.VScaleMin * 128));
- Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS");
}
}
ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
- return llvm::makeArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
- Builtin::FirstTSBuiltin);
+ return llvm::ArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
+ Builtin::FirstTSBuiltin);
}
-Optional<std::pair<unsigned, unsigned>>
+std::optional<std::pair<unsigned, unsigned>>
AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts) const {
if (LangOpts.VScaleMin || LangOpts.VScaleMax)
return std::pair<unsigned, unsigned>(
@@ -509,140 +590,311 @@ AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts) const {
if (hasFeature("sve"))
return std::pair<unsigned, unsigned>(1, 16);
- return None;
+ return std::nullopt;
+}
+
+unsigned AArch64TargetInfo::multiVersionSortPriority(StringRef Name) const {
+ if (Name == "default")
+ return 0;
+ for (const auto &E : llvm::AArch64::Extensions)
+ if (Name == E.Name)
+ return E.FmvPriority;
+ return 0;
+}
+
+unsigned AArch64TargetInfo::multiVersionFeatureCost() const {
+ // Take the maximum priority as per feature cost, so more features win.
+ return llvm::AArch64::ExtensionInfo::MaxFMVPriority;
+}
+
+bool AArch64TargetInfo::getFeatureDepOptions(StringRef Name,
+ std::string &FeatureVec) const {
+ FeatureVec = "";
+ for (const auto &E : llvm::AArch64::Extensions) {
+ if (Name == E.Name) {
+ FeatureVec = E.DependentFeatures;
+ break;
+ }
+ }
+ return FeatureVec != "";
+}
+
+bool AArch64TargetInfo::validateCpuSupports(StringRef FeatureStr) const {
+ for (const auto &E : llvm::AArch64::Extensions)
+ if (FeatureStr == E.Name)
+ return true;
+ return false;
}
bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
return llvm::StringSwitch<bool>(Feature)
- .Cases("aarch64", "arm64", "arm", true)
- .Case("neon", FPU & NeonMode)
- .Cases("sve", "sve2", "sve2-bitperm", "sve2-aes", "sve2-sha3", "sve2-sm4", "f64mm", "f32mm", "i8mm", "bf16", FPU & SveMode)
- .Case("ls64", HasLS64)
- .Default(false);
+ .Cases("aarch64", "arm64", "arm", true)
+ .Case("fmv", HasFMV)
+ .Cases("neon", "fp", "simd", FPU & NeonMode)
+ .Case("jscvt", HasJSCVT)
+ .Case("fcma", HasFCMA)
+ .Case("rng", HasRandGen)
+ .Case("flagm", HasFlagM)
+ .Case("flagm2", HasAlternativeNZCV)
+ .Case("fp16fml", HasFP16FML)
+ .Case("dotprod", HasDotProd)
+ .Case("sm4", HasSM4)
+ .Case("rdm", HasRDM)
+ .Case("lse", HasLSE)
+ .Case("crc", HasCRC)
+ .Case("sha2", HasSHA2)
+ .Case("sha3", HasSHA3)
+ .Cases("aes", "pmull", HasAES)
+ .Cases("fp16", "fullfp16", HasFullFP16)
+ .Case("dit", HasDIT)
+ .Case("dpb", HasCCPP)
+ .Case("dpb2", HasCCDP)
+ .Case("rcpc", HasRCPC)
+ .Case("frintts", HasFRInt3264)
+ .Case("i8mm", HasMatMul)
+ .Case("bf16", HasBFloat16)
+ .Case("sve", FPU & SveMode)
+ .Case("sve-bf16", FPU & SveMode && HasBFloat16)
+ .Case("sve-i8mm", FPU & SveMode && HasMatMul)
+ .Case("f32mm", FPU & SveMode && HasMatmulFP32)
+ .Case("f64mm", FPU & SveMode && HasMatmulFP64)
+ .Case("sve2", FPU & SveMode && HasSVE2)
+ .Case("sve2-pmull128", FPU & SveMode && HasSVE2AES)
+ .Case("sve2-bitperm", FPU & SveMode && HasSVE2BitPerm)
+ .Case("sve2-sha3", FPU & SveMode && HasSVE2SHA3)
+ .Case("sve2-sm4", FPU & SveMode && HasSVE2SM4)
+ .Case("sme", HasSME)
+ .Case("sme-f64f64", HasSMEF64)
+ .Case("sme-i16i64", HasSMEI64)
+ .Cases("memtag", "memtag2", HasMTE)
+ .Case("sb", HasSB)
+ .Case("predres", HasPredRes)
+ .Cases("ssbs", "ssbs2", HasSSBS)
+ .Case("bti", HasBTI)
+ .Cases("ls64", "ls64_v", "ls64_accdata", HasLS64)
+ .Case("wfxt", HasWFxT)
+ .Default(false);
+}
+
+void AArch64TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
+ StringRef Name, bool Enabled) const {
+ Features[Name] = Enabled;
+ // If the feature is an architecture feature (like v8.2a), add all previous
+ // architecture versions and any dependant target features.
+ const llvm::AArch64::ArchInfo &ArchInfo =
+ llvm::AArch64::ArchInfo::findBySubArch(Name);
+
+ if (ArchInfo == llvm::AArch64::INVALID)
+ return; // Not an architecure, nothing more to do.
+
+ for (const auto *OtherArch : llvm::AArch64::ArchInfos)
+ if (ArchInfo.implies(*OtherArch))
+ Features[OtherArch->getSubArch()] = Enabled;
+
+ // Set any features implied by the architecture
+ uint64_t Extensions =
+ llvm::AArch64::getDefaultExtensions("generic", ArchInfo);
+ std::vector<StringRef> CPUFeats;
+ if (llvm::AArch64::getExtensionFeatures(Extensions, CPUFeats)) {
+ for (auto F : CPUFeats) {
+ assert(F[0] == '+' && "Expected + in target feature!");
+ Features[F.drop_front(1)] = true;
+ }
+ }
}
bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) {
- FPU = FPUMode;
- HasCRC = false;
- HasAES = false;
- HasSHA2 = false;
- HasSHA3 = false;
- HasSM4 = false;
- HasUnaligned = true;
- HasFullFP16 = false;
- HasDotProd = false;
- HasFP16FML = false;
- HasMTE = false;
- HasTME = false;
- HasLS64 = false;
- HasRandGen = false;
- HasMatMul = false;
- HasBFloat16 = false;
- HasSVE2 = false;
- HasSVE2AES = false;
- HasSVE2SHA3 = false;
- HasSVE2SM4 = false;
- HasSVE2BitPerm = false;
- HasMatmulFP64 = false;
- HasMatmulFP32 = false;
- HasLSE = false;
- HasMOPS = false;
-
- ArchKind = llvm::AArch64::ArchKind::INVALID;
-
for (const auto &Feature : Features) {
- if (Feature == "+neon")
+ if (Feature == "-neon")
+ HasNoNeon = true;
+ if (Feature == "-sve")
+ HasNoSVE = true;
+
+ if (Feature == "+neon" || Feature == "+fp-armv8")
+ FPU |= NeonMode;
+ if (Feature == "+jscvt") {
+ HasJSCVT = true;
+ FPU |= NeonMode;
+ }
+ if (Feature == "+fcma") {
+ HasFCMA = true;
FPU |= NeonMode;
+ }
+
if (Feature == "+sve") {
+ FPU |= NeonMode;
FPU |= SveMode;
HasFullFP16 = true;
}
if (Feature == "+sve2") {
+ FPU |= NeonMode;
FPU |= SveMode;
HasFullFP16 = true;
HasSVE2 = true;
}
if (Feature == "+sve2-aes") {
+ FPU |= NeonMode;
FPU |= SveMode;
HasFullFP16 = true;
HasSVE2 = true;
HasSVE2AES = true;
}
if (Feature == "+sve2-sha3") {
+ FPU |= NeonMode;
FPU |= SveMode;
HasFullFP16 = true;
HasSVE2 = true;
HasSVE2SHA3 = true;
}
if (Feature == "+sve2-sm4") {
+ FPU |= NeonMode;
FPU |= SveMode;
HasFullFP16 = true;
HasSVE2 = true;
HasSVE2SM4 = true;
}
if (Feature == "+sve2-bitperm") {
+ FPU |= NeonMode;
FPU |= SveMode;
HasFullFP16 = true;
HasSVE2 = true;
HasSVE2BitPerm = true;
}
if (Feature == "+f32mm") {
+ FPU |= NeonMode;
FPU |= SveMode;
+ HasFullFP16 = true;
HasMatmulFP32 = true;
}
if (Feature == "+f64mm") {
+ FPU |= NeonMode;
FPU |= SveMode;
+ HasFullFP16 = true;
HasMatmulFP64 = true;
}
+ if (Feature == "+sme") {
+ HasSME = true;
+ HasBFloat16 = true;
+ }
+ if (Feature == "+sme-f64f64") {
+ HasSME = true;
+ HasSMEF64 = true;
+ HasBFloat16 = true;
+ }
+ if (Feature == "+sme-i16i64") {
+ HasSME = true;
+ HasSMEI64 = true;
+ HasBFloat16 = true;
+ }
+ if (Feature == "+sb")
+ HasSB = true;
+ if (Feature == "+predres")
+ HasPredRes = true;
+ if (Feature == "+ssbs")
+ HasSSBS = true;
+ if (Feature == "+bti")
+ HasBTI = true;
+ if (Feature == "+wfxt")
+ HasWFxT = true;
+ if (Feature == "-fmv")
+ HasFMV = false;
if (Feature == "+crc")
HasCRC = true;
- if (Feature == "+aes")
+ if (Feature == "+rcpc")
+ HasRCPC = true;
+ if (Feature == "+aes") {
+ FPU |= NeonMode;
HasAES = true;
- if (Feature == "+sha2")
+ }
+ if (Feature == "+sha2") {
+ FPU |= NeonMode;
HasSHA2 = true;
+ }
if (Feature == "+sha3") {
+ FPU |= NeonMode;
HasSHA2 = true;
HasSHA3 = true;
}
- if (Feature == "+sm4")
+ if (Feature == "+rdm") {
+ FPU |= NeonMode;
+ HasRDM = true;
+ }
+ if (Feature == "+dit")
+ HasDIT = true;
+ if (Feature == "+cccp")
+ HasCCPP = true;
+ if (Feature == "+ccdp") {
+ HasCCPP = true;
+ HasCCDP = true;
+ }
+ if (Feature == "+fptoint")
+ HasFRInt3264 = true;
+ if (Feature == "+sm4") {
+ FPU |= NeonMode;
HasSM4 = true;
+ }
if (Feature == "+strict-align")
HasUnaligned = false;
- if (Feature == "+v8a")
- ArchKind = llvm::AArch64::ArchKind::ARMV8A;
- if (Feature == "+v8.1a")
- ArchKind = llvm::AArch64::ArchKind::ARMV8_1A;
- if (Feature == "+v8.2a")
- ArchKind = llvm::AArch64::ArchKind::ARMV8_2A;
- if (Feature == "+v8.3a")
- ArchKind = llvm::AArch64::ArchKind::ARMV8_3A;
- if (Feature == "+v8.4a")
- ArchKind = llvm::AArch64::ArchKind::ARMV8_4A;
- if (Feature == "+v8.5a")
- ArchKind = llvm::AArch64::ArchKind::ARMV8_5A;
- if (Feature == "+v8.6a")
- ArchKind = llvm::AArch64::ArchKind::ARMV8_6A;
- if (Feature == "+v8.7a")
- ArchKind = llvm::AArch64::ArchKind::ARMV8_7A;
- if (Feature == "+v8.8a")
- ArchKind = llvm::AArch64::ArchKind::ARMV8_8A;
- if (Feature == "+v9a")
- ArchKind = llvm::AArch64::ArchKind::ARMV9A;
- if (Feature == "+v9.1a")
- ArchKind = llvm::AArch64::ArchKind::ARMV9_1A;
- if (Feature == "+v9.2a")
- ArchKind = llvm::AArch64::ArchKind::ARMV9_2A;
- if (Feature == "+v9.3a")
- ArchKind = llvm::AArch64::ArchKind::ARMV9_3A;
+ // All predecessor archs are added but select the latest one for ArchKind.
+ if (Feature == "+v8a" && ArchInfo->Version < llvm::AArch64::ARMV8A.Version)
+ ArchInfo = &llvm::AArch64::ARMV8A;
+ if (Feature == "+v8.1a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV8_1A.Version)
+ ArchInfo = &llvm::AArch64::ARMV8_1A;
+ if (Feature == "+v8.2a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV8_2A.Version)
+ ArchInfo = &llvm::AArch64::ARMV8_2A;
+ if (Feature == "+v8.3a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV8_3A.Version)
+ ArchInfo = &llvm::AArch64::ARMV8_3A;
+ if (Feature == "+v8.4a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV8_4A.Version)
+ ArchInfo = &llvm::AArch64::ARMV8_4A;
+ if (Feature == "+v8.5a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV8_5A.Version)
+ ArchInfo = &llvm::AArch64::ARMV8_5A;
+ if (Feature == "+v8.6a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV8_6A.Version)
+ ArchInfo = &llvm::AArch64::ARMV8_6A;
+ if (Feature == "+v8.7a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV8_7A.Version)
+ ArchInfo = &llvm::AArch64::ARMV8_7A;
+ if (Feature == "+v8.8a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV8_8A.Version)
+ ArchInfo = &llvm::AArch64::ARMV8_8A;
+ if (Feature == "+v8.9a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV8_9A.Version)
+ ArchInfo = &llvm::AArch64::ARMV8_9A;
+ if (Feature == "+v9a" && ArchInfo->Version < llvm::AArch64::ARMV9A.Version)
+ ArchInfo = &llvm::AArch64::ARMV9A;
+ if (Feature == "+v9.1a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV9_1A.Version)
+ ArchInfo = &llvm::AArch64::ARMV9_1A;
+ if (Feature == "+v9.2a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV9_2A.Version)
+ ArchInfo = &llvm::AArch64::ARMV9_2A;
+ if (Feature == "+v9.3a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV9_3A.Version)
+ ArchInfo = &llvm::AArch64::ARMV9_3A;
+ if (Feature == "+v9.4a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV9_4A.Version)
+ ArchInfo = &llvm::AArch64::ARMV9_4A;
if (Feature == "+v8r")
- ArchKind = llvm::AArch64::ArchKind::ARMV8R;
- if (Feature == "+fullfp16")
+ ArchInfo = &llvm::AArch64::ARMV8R;
+ if (Feature == "+fullfp16") {
+ FPU |= NeonMode;
HasFullFP16 = true;
- if (Feature == "+dotprod")
+ }
+ if (Feature == "+dotprod") {
+ FPU |= NeonMode;
HasDotProd = true;
- if (Feature == "+fp16fml")
+ }
+ if (Feature == "+fp16fml") {
+ FPU |= NeonMode;
+ HasFullFP16 = true;
HasFP16FML = true;
+ }
if (Feature == "+mte")
HasMTE = true;
if (Feature == "+tme")
@@ -661,12 +913,175 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasRandGen = true;
if (Feature == "+flagm")
HasFlagM = true;
+ if (Feature == "+altnzcv") {
+ HasFlagM = true;
+ HasAlternativeNZCV = true;
+ }
if (Feature == "+mops")
HasMOPS = true;
+ if (Feature == "+d128")
+ HasD128 = true;
+ }
+
+ // Check features that are manually disabled by command line options.
+ // This needs to be checked after architecture-related features are handled,
+ // making sure they are properly disabled when required.
+ for (const auto &Feature : Features) {
+ if (Feature == "-d128")
+ HasD128 = false;
}
setDataLayout();
+ setArchFeatures();
+
+ if (HasNoNeon) {
+ FPU &= ~NeonMode;
+ FPU &= ~SveMode;
+ }
+ if (HasNoSVE)
+ FPU &= ~SveMode;
+
+ return true;
+}
+
+bool AArch64TargetInfo::initFeatureMap(
+ llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const {
+ std::vector<std::string> UpdatedFeaturesVec;
+ // Parse the CPU and add any implied features.
+ const llvm::AArch64::ArchInfo &Arch = llvm::AArch64::parseCpu(CPU).Arch;
+ if (Arch != llvm::AArch64::INVALID) {
+ uint64_t Exts = llvm::AArch64::getDefaultExtensions(CPU, Arch);
+ std::vector<StringRef> CPUFeats;
+ llvm::AArch64::getExtensionFeatures(Exts, CPUFeats);
+ for (auto F : CPUFeats) {
+ assert((F[0] == '+' || F[0] == '-') && "Expected +/- in target feature!");
+ UpdatedFeaturesVec.push_back(F.str());
+ }
+ }
+
+ // Process target and dependent features. This is done in two loops collecting
+ // them into UpdatedFeaturesVec: first to add dependent '+'features,
+ // second to add target '+/-'features that can later disable some of
+ // features added on the first loop.
+ for (const auto &Feature : FeaturesVec)
+ if ((Feature[0] == '?' || Feature[0] == '+')) {
+ std::string Options;
+ if (AArch64TargetInfo::getFeatureDepOptions(Feature.substr(1), Options)) {
+ SmallVector<StringRef, 1> AttrFeatures;
+ StringRef(Options).split(AttrFeatures, ",");
+ for (auto F : AttrFeatures)
+ UpdatedFeaturesVec.push_back(F.str());
+ }
+ }
+ for (const auto &Feature : FeaturesVec)
+ if (Feature[0] == '+') {
+ std::string F;
+ llvm::AArch64::getFeatureOption(Feature, F);
+ UpdatedFeaturesVec.push_back(F);
+ } else if (Feature[0] != '?')
+ UpdatedFeaturesVec.push_back(Feature);
+
+ return TargetInfo::initFeatureMap(Features, Diags, CPU, UpdatedFeaturesVec);
+}
+
+// Parse AArch64 Target attributes, which are a comma separated list of:
+// "arch=<arch>" - parsed to features as per -march=..
+// "cpu=<cpu>" - parsed to features as per -mcpu=.., with CPU set to <cpu>
+// "tune=<cpu>" - TuneCPU set to <cpu>
+// "feature", "no-feature" - Add (or remove) feature.
+// "+feature", "+nofeature" - Add (or remove) feature.
+ParsedTargetAttr AArch64TargetInfo::parseTargetAttr(StringRef Features) const {
+ ParsedTargetAttr Ret;
+ if (Features == "default")
+ return Ret;
+ SmallVector<StringRef, 1> AttrFeatures;
+ Features.split(AttrFeatures, ",");
+ bool FoundArch = false;
+
+ auto SplitAndAddFeatures = [](StringRef FeatString,
+ std::vector<std::string> &Features) {
+ SmallVector<StringRef, 8> SplitFeatures;
+ FeatString.split(SplitFeatures, StringRef("+"), -1, false);
+ for (StringRef Feature : SplitFeatures) {
+ StringRef FeatureName = llvm::AArch64::getArchExtFeature(Feature);
+ if (!FeatureName.empty())
+ Features.push_back(FeatureName.str());
+ else
+ // Pushing the original feature string to give a sema error later on
+ // when they get checked.
+ if (Feature.startswith("no"))
+ Features.push_back("-" + Feature.drop_front(2).str());
+ else
+ Features.push_back("+" + Feature.str());
+ }
+ };
+
+ for (auto &Feature : AttrFeatures) {
+ Feature = Feature.trim();
+ if (Feature.startswith("fpmath="))
+ continue;
+
+ if (Feature.startswith("branch-protection=")) {
+ Ret.BranchProtection = Feature.split('=').second.trim();
+ continue;
+ }
+
+ if (Feature.startswith("arch=")) {
+ if (FoundArch)
+ Ret.Duplicate = "arch=";
+ FoundArch = true;
+ std::pair<StringRef, StringRef> Split =
+ Feature.split("=").second.trim().split("+");
+ const llvm::AArch64::ArchInfo &AI = llvm::AArch64::parseArch(Split.first);
+
+ // Parse the architecture version, adding the required features to
+ // Ret.Features.
+ if (AI == llvm::AArch64::INVALID)
+ continue;
+ Ret.Features.push_back(AI.ArchFeature.str());
+ // Add any extra features, after the +
+ SplitAndAddFeatures(Split.second, Ret.Features);
+ } else if (Feature.startswith("cpu=")) {
+ if (!Ret.CPU.empty())
+ Ret.Duplicate = "cpu=";
+ else {
+ // Split the cpu string into "cpu=", "cortex-a710" and any remaining
+ // "+feat" features.
+ std::pair<StringRef, StringRef> Split =
+ Feature.split("=").second.trim().split("+");
+ Ret.CPU = Split.first;
+ SplitAndAddFeatures(Split.second, Ret.Features);
+ }
+ } else if (Feature.startswith("tune=")) {
+ if (!Ret.Tune.empty())
+ Ret.Duplicate = "tune=";
+ else
+ Ret.Tune = Feature.split("=").second.trim();
+ } else if (Feature.startswith("+")) {
+ SplitAndAddFeatures(Feature, Ret.Features);
+ } else if (Feature.startswith("no-")) {
+ StringRef FeatureName =
+ llvm::AArch64::getArchExtFeature(Feature.split("-").second);
+ if (!FeatureName.empty())
+ Ret.Features.push_back("-" + FeatureName.drop_front(1).str());
+ else
+ Ret.Features.push_back("-" + Feature.split("-").second.str());
+ } else {
+ // Try parsing the string to the internal target feature name. If it is
+ // invalid, add the original string (which could already be an internal
+ // name). These should be checked later by isValidFeatureName.
+ StringRef FeatureName = llvm::AArch64::getArchExtFeature(Feature);
+ if (!FeatureName.empty())
+ Ret.Features.push_back(FeatureName.str());
+ else
+ Ret.Features.push_back("+" + Feature.str());
+ }
+ }
+ return Ret;
+}
+bool AArch64TargetInfo::hasBFloat16Type() const {
return true;
}
@@ -731,7 +1146,7 @@ const char *const AArch64TargetInfo::GCCRegNames[] = {
};
ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
- return llvm::makeArrayRef(GCCRegNames);
+ return llvm::ArrayRef(GCCRegNames);
}
const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
@@ -774,7 +1189,7 @@ const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
};
ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
- return llvm::makeArrayRef(GCCRegAliases);
+ return llvm::ArrayRef(GCCRegAliases);
}
bool AArch64TargetInfo::validateAsmConstraint(