aboutsummaryrefslogtreecommitdiff
path: root/include/llvm/IR
diff options
context:
space:
mode:
Diffstat (limited to 'include/llvm/IR')
-rw-r--r--include/llvm/IR/Attributes.h49
-rw-r--r--include/llvm/IR/AutoUpgrade.h10
-rw-r--r--include/llvm/IR/BasicBlock.h5
-rw-r--r--include/llvm/IR/CallSite.h9
-rw-r--r--include/llvm/IR/CallingConv.h13
-rw-r--r--include/llvm/IR/Constant.h6
-rw-r--r--include/llvm/IR/ConstantRange.h10
-rw-r--r--include/llvm/IR/DataLayout.h125
-rw-r--r--include/llvm/IR/DebugInfoFlags.def6
-rw-r--r--include/llvm/IR/DebugInfoMetadata.h4
-rw-r--r--include/llvm/IR/DerivedTypes.h75
-rw-r--r--include/llvm/IR/DiagnosticInfo.h25
-rw-r--r--include/llvm/IR/FixedMetadataKinds.def43
-rw-r--r--include/llvm/IR/Function.h15
-rw-r--r--include/llvm/IR/GlobalAlias.h4
-rw-r--r--include/llvm/IR/GlobalIFunc.h4
-rw-r--r--include/llvm/IR/GlobalIndirectSymbol.h8
-rw-r--r--include/llvm/IR/GlobalObject.h26
-rw-r--r--include/llvm/IR/GlobalVariable.h1
-rw-r--r--include/llvm/IR/IRBuilder.h90
-rw-r--r--include/llvm/IR/InlineAsm.h1
-rw-r--r--include/llvm/IR/InstrTypes.h12
-rw-r--r--include/llvm/IR/Instruction.h10
-rw-r--r--include/llvm/IR/Instructions.h99
-rw-r--r--include/llvm/IR/IntrinsicInst.h23
-rw-r--r--include/llvm/IR/Intrinsics.h10
-rw-r--r--include/llvm/IR/Intrinsics.td295
-rw-r--r--include/llvm/IR/IntrinsicsAArch64.td125
-rw-r--r--include/llvm/IR/IntrinsicsAMDGPU.td121
-rw-r--r--include/llvm/IR/IntrinsicsARM.td9
-rw-r--r--include/llvm/IR/IntrinsicsBPF.td3
-rw-r--r--include/llvm/IR/IntrinsicsMips.td16
-rw-r--r--include/llvm/IR/IntrinsicsNVVM.td125
-rw-r--r--include/llvm/IR/IntrinsicsWebAssembly.td58
-rw-r--r--include/llvm/IR/IntrinsicsX86.td12
-rw-r--r--include/llvm/IR/LLVMContext.h31
-rw-r--r--include/llvm/IR/MDBuilder.h5
-rw-r--r--include/llvm/IR/Metadata.h4
-rw-r--r--include/llvm/IR/Module.h1
-rw-r--r--include/llvm/IR/ModuleSummaryIndex.h18
-rw-r--r--include/llvm/IR/ModuleSummaryIndexYAML.h2
-rw-r--r--include/llvm/IR/Operator.h21
-rw-r--r--include/llvm/IR/PassManager.h5
-rw-r--r--include/llvm/IR/PassManagerInternal.h2
-rw-r--r--include/llvm/IR/PatternMatch.h155
-rw-r--r--include/llvm/IR/RemarkStreamer.h28
-rw-r--r--include/llvm/IR/Type.h15
-rw-r--r--include/llvm/IR/User.h2
-rw-r--r--include/llvm/IR/Value.h58
-rw-r--r--include/llvm/IR/ValueMap.h15
50 files changed, 1269 insertions, 540 deletions
diff --git a/include/llvm/IR/Attributes.h b/include/llvm/IR/Attributes.h
index 06cc09e1cfc7..e6b280465f72 100644
--- a/include/llvm/IR/Attributes.h
+++ b/include/llvm/IR/Attributes.h
@@ -22,6 +22,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Config/llvm-config.h"
+#include "llvm/Support/Alignment.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
#include <bitset>
#include <cassert>
@@ -94,8 +95,8 @@ public:
/// Return a uniquified Attribute object that has the specific
/// alignment set.
- static Attribute getWithAlignment(LLVMContext &Context, uint64_t Align);
- static Attribute getWithStackAlignment(LLVMContext &Context, uint64_t Align);
+ static Attribute getWithAlignment(LLVMContext &Context, Align Alignment);
+ static Attribute getWithStackAlignment(LLVMContext &Context, Align Alignment);
static Attribute getWithDereferenceableBytes(LLVMContext &Context,
uint64_t Bytes);
static Attribute getWithDereferenceableOrNullBytes(LLVMContext &Context,
@@ -150,11 +151,11 @@ public:
/// Returns the alignment field of an attribute as a byte alignment
/// value.
- unsigned getAlignment() const;
+ MaybeAlign getAlignment() const;
/// Returns the stack alignment field of an attribute as a byte
/// alignment value.
- unsigned getStackAlignment() const;
+ MaybeAlign getStackAlignment() const;
/// Returns the number of dereferenceable bytes from the
/// dereferenceable attribute.
@@ -284,8 +285,8 @@ public:
/// Return the target-dependent attribute object.
Attribute getAttribute(StringRef Kind) const;
- unsigned getAlignment() const;
- unsigned getStackAlignment() const;
+ MaybeAlign getAlignment() const;
+ MaybeAlign getStackAlignment() const;
uint64_t getDereferenceableBytes() const;
uint64_t getDereferenceableOrNullBytes() const;
Type *getByValType() const;
@@ -603,16 +604,16 @@ public:
}
/// Return the alignment of the return value.
- unsigned getRetAlignment() const;
+ MaybeAlign getRetAlignment() const;
/// Return the alignment for the specified function parameter.
- unsigned getParamAlignment(unsigned ArgNo) const;
+ MaybeAlign getParamAlignment(unsigned ArgNo) const;
/// Return the byval type for the specified function parameter.
Type *getParamByValType(unsigned ArgNo) const;
/// Get the stack alignment.
- unsigned getStackAlignment(unsigned Index) const;
+ MaybeAlign getStackAlignment(unsigned Index) const;
/// Get the number of dereferenceable bytes (or zero if unknown).
uint64_t getDereferenceableBytes(unsigned Index) const;
@@ -704,9 +705,9 @@ template <> struct DenseMapInfo<AttributeList> {
/// equality, presence of attributes, etc.
class AttrBuilder {
std::bitset<Attribute::EndAttrKinds> Attrs;
- std::map<std::string, std::string> TargetDepAttrs;
- uint64_t Alignment = 0;
- uint64_t StackAlignment = 0;
+ std::map<std::string, std::string, std::less<>> TargetDepAttrs;
+ MaybeAlign Alignment;
+ MaybeAlign StackAlignment;
uint64_t DerefBytes = 0;
uint64_t DerefOrNullBytes = 0;
uint64_t AllocSizeArgs = 0;
@@ -773,10 +774,10 @@ public:
bool hasAlignmentAttr() const;
/// Retrieve the alignment attribute, if it exists.
- uint64_t getAlignment() const { return Alignment; }
+ MaybeAlign getAlignment() const { return Alignment; }
/// Retrieve the stack alignment attribute, if it exists.
- uint64_t getStackAlignment() const { return StackAlignment; }
+ MaybeAlign getStackAlignment() const { return StackAlignment; }
/// Retrieve the number of dereferenceable bytes, if the
/// dereferenceable attribute exists (zero is returned otherwise).
@@ -793,13 +794,29 @@ public:
/// doesn't exist, pair(0, 0) is returned.
std::pair<unsigned, Optional<unsigned>> getAllocSizeArgs() const;
+ /// This turns an alignment into the form used internally in Attribute.
+ /// This call has no effect if Align is not set.
+ AttrBuilder &addAlignmentAttr(MaybeAlign Align);
+
/// This turns an int alignment (which must be a power of 2) into the
/// form used internally in Attribute.
- AttrBuilder &addAlignmentAttr(unsigned Align);
+ /// This call has no effect if Align is 0.
+ /// Deprecated, use the version using a MaybeAlign.
+ inline AttrBuilder &addAlignmentAttr(unsigned Align) {
+ return addAlignmentAttr(MaybeAlign(Align));
+ }
+
+ /// This turns a stack alignment into the form used internally in Attribute.
+ /// This call has no effect if Align is not set.
+ AttrBuilder &addStackAlignmentAttr(MaybeAlign Align);
/// This turns an int stack alignment (which must be a power of 2) into
/// the form used internally in Attribute.
- AttrBuilder &addStackAlignmentAttr(unsigned Align);
+ /// This call has no effect if Align is 0.
+ /// Deprecated, use the version using a MaybeAlign.
+ inline AttrBuilder &addStackAlignmentAttr(unsigned Align) {
+ return addStackAlignmentAttr(MaybeAlign(Align));
+ }
/// This turns the number of dereferenceable bytes into the form used
/// internally in Attribute.
diff --git a/include/llvm/IR/AutoUpgrade.h b/include/llvm/IR/AutoUpgrade.h
index 017ad93d8a2a..66f38e5b55d1 100644
--- a/include/llvm/IR/AutoUpgrade.h
+++ b/include/llvm/IR/AutoUpgrade.h
@@ -54,9 +54,9 @@ namespace llvm {
/// module is modified.
bool UpgradeModuleFlags(Module &M);
- /// This checks for objc retain release marker which should be upgraded. It
- /// returns true if module is modified.
- bool UpgradeRetainReleaseMarker(Module &M);
+ /// Convert calls to ARC runtime functions to intrinsic calls and upgrade the
+ /// old retain release marker to new module flag format.
+ void UpgradeARCRuntime(Module &M);
void UpgradeSectionAttributes(Module &M);
@@ -87,6 +87,10 @@ namespace llvm {
/// Upgrade the loop attachment metadata node.
MDNode *upgradeInstructionLoopAttachment(MDNode &N);
+ /// Upgrade the datalayout string by adding a section for address space
+ /// pointers.
+ std::string UpgradeDataLayoutString(StringRef DL, StringRef Triple);
+
} // End llvm namespace
#endif
diff --git a/include/llvm/IR/BasicBlock.h b/include/llvm/IR/BasicBlock.h
index 69555af50e1f..d594145f8636 100644
--- a/include/llvm/IR/BasicBlock.h
+++ b/include/llvm/IR/BasicBlock.h
@@ -192,6 +192,11 @@ public:
std::function<bool(Instruction &)>>>
instructionsWithoutDebug();
+ /// Return the size of the basic block ignoring debug instructions
+ filter_iterator<BasicBlock::const_iterator,
+ std::function<bool(const Instruction &)>>::difference_type
+ sizeWithoutDebug() const;
+
/// Unlink 'this' from the containing function, but do not delete it.
void removeFromParent();
diff --git a/include/llvm/IR/CallSite.h b/include/llvm/IR/CallSite.h
index b47a96c5d5fa..13b1ae8d0e32 100644
--- a/include/llvm/IR/CallSite.h
+++ b/include/llvm/IR/CallSite.h
@@ -854,6 +854,15 @@ public:
return CI.ParameterEncoding[0];
}
+ /// Return the use of the callee value in the underlying instruction. Only
+ /// valid for callback calls!
+ const Use &getCalleeUseForCallback() const {
+ int CalleeArgIdx = getCallArgOperandNoForCallee();
+ assert(CalleeArgIdx >= 0 &&
+ unsigned(CalleeArgIdx) < getInstruction()->getNumOperands());
+ return getInstruction()->getOperandUse(CalleeArgIdx);
+ }
+
/// Return the pointer to function that is being called.
Value *getCalledValue() const {
if (isDirectCall())
diff --git a/include/llvm/IR/CallingConv.h b/include/llvm/IR/CallingConv.h
index 399c6ad521fa..c1c979c2e2ab 100644
--- a/include/llvm/IR/CallingConv.h
+++ b/include/llvm/IR/CallingConv.h
@@ -75,6 +75,11 @@ namespace CallingConv {
// CXX_FAST_TLS - Calling convention for access functions.
CXX_FAST_TLS = 17,
+ /// Tail - This calling convention attemps to make calls as fast as
+ /// possible while guaranteeing that tail call optimization can always
+ /// be performed.
+ Tail = 18,
+
// Target - This is the start of the target-specific calling conventions,
// e.g. fastcall and thiscall on X86.
FirstTargetCC = 64,
@@ -222,6 +227,14 @@ namespace CallingConv {
// Calling convention between AArch64 Advanced SIMD functions
AArch64_VectorCall = 97,
+ /// Calling convention between AArch64 SVE functions
+ AArch64_SVE_VectorCall = 98,
+
+ /// Calling convention for emscripten __invoke_* functions. The first
+ /// argument is required to be the function ptr being indirectly called.
+ /// The remainder matches the regular calling convention.
+ WASM_EmscriptenInvoke = 99,
+
/// The highest possible calling convention ID. Must be some 2^k - 1.
MaxID = 1023
};
diff --git a/include/llvm/IR/Constant.h b/include/llvm/IR/Constant.h
index 931576651224..2b6a6e4141b9 100644
--- a/include/llvm/IR/Constant.h
+++ b/include/llvm/IR/Constant.h
@@ -86,6 +86,12 @@ public:
/// floating-point constant with all NaN elements.
bool isNaN() const;
+ /// Return true if this constant and a constant 'Y' are element-wise equal.
+ /// This is identical to just comparing the pointers, with the exception that
+ /// for vectors, if only one of the constants has an `undef` element in some
+ /// lane, the constants still match.
+ bool isElementWiseEqual(Value *Y) const;
+
/// Return true if this is a vector constant that includes any undefined
/// elements.
bool containsUndefElement() const;
diff --git a/include/llvm/IR/ConstantRange.h b/include/llvm/IR/ConstantRange.h
index 91f3f31abe17..964f9e8e9bc9 100644
--- a/include/llvm/IR/ConstantRange.h
+++ b/include/llvm/IR/ConstantRange.h
@@ -330,9 +330,13 @@ public:
/// from an addition of a value in this range and a value in \p Other.
ConstantRange add(const ConstantRange &Other) const;
- /// Return a new range representing the possible values resulting from a
- /// known NSW addition of a value in this range and \p Other constant.
- ConstantRange addWithNoSignedWrap(const APInt &Other) const;
+ /// Return a new range representing the possible values resulting
+ /// from an addition with wrap type \p NoWrapKind of a value in this
+ /// range and a value in \p Other.
+ /// If the result range is disjoint, the preferred range is determined by the
+ /// \p PreferredRangeType.
+ ConstantRange addWithNoWrap(const ConstantRange &Other, unsigned NoWrapKind,
+ PreferredRangeType RangeType = Smallest) const;
/// Return a new range representing the possible values resulting
/// from a subtraction of a value in this range and a value in \p Other.
diff --git a/include/llvm/IR/DataLayout.h b/include/llvm/IR/DataLayout.h
index ac9770a15120..85093dd218f8 100644
--- a/include/llvm/IR/DataLayout.h
+++ b/include/llvm/IR/DataLayout.h
@@ -25,10 +25,11 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Type.h"
-#include "llvm/Pass.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/Alignment.h"
+#include "llvm/Support/TypeSize.h"
#include <cassert>
#include <cstdint>
#include <string>
@@ -71,11 +72,11 @@ struct LayoutAlignElem {
/// Alignment type from \c AlignTypeEnum
unsigned AlignType : 8;
unsigned TypeBitWidth : 24;
- unsigned ABIAlign : 16;
- unsigned PrefAlign : 16;
+ Align ABIAlign;
+ Align PrefAlign;
- static LayoutAlignElem get(AlignTypeEnum align_type, unsigned abi_align,
- unsigned pref_align, uint32_t bit_width);
+ static LayoutAlignElem get(AlignTypeEnum align_type, Align abi_align,
+ Align pref_align, uint32_t bit_width);
bool operator==(const LayoutAlignElem &rhs) const;
};
@@ -87,15 +88,15 @@ struct LayoutAlignElem {
/// \note The unusual order of elements in the structure attempts to reduce
/// padding and make the structure slightly more cache friendly.
struct PointerAlignElem {
- unsigned ABIAlign;
- unsigned PrefAlign;
+ Align ABIAlign;
+ Align PrefAlign;
uint32_t TypeByteWidth;
uint32_t AddressSpace;
uint32_t IndexWidth;
/// Initializer
- static PointerAlignElem get(uint32_t AddressSpace, unsigned ABIAlign,
- unsigned PrefAlign, uint32_t TypeByteWidth,
+ static PointerAlignElem get(uint32_t AddressSpace, Align ABIAlign,
+ Align PrefAlign, uint32_t TypeByteWidth,
uint32_t IndexWidth);
bool operator==(const PointerAlignElem &rhs) const;
@@ -120,10 +121,10 @@ private:
bool BigEndian;
unsigned AllocaAddrSpace;
- unsigned StackNaturalAlign;
+ MaybeAlign StackNaturalAlign;
unsigned ProgramAddrSpace;
- unsigned FunctionPtrAlign;
+ MaybeAlign FunctionPtrAlign;
FunctionPtrAlignType TheFunctionPtrAlignType;
enum ManglingModeT {
@@ -172,16 +173,15 @@ private:
/// well-defined bitwise representation.
SmallVector<unsigned, 8> NonIntegralAddressSpaces;
- void setAlignment(AlignTypeEnum align_type, unsigned abi_align,
- unsigned pref_align, uint32_t bit_width);
- unsigned getAlignmentInfo(AlignTypeEnum align_type, uint32_t bit_width,
- bool ABIAlign, Type *Ty) const;
- void setPointerAlignment(uint32_t AddrSpace, unsigned ABIAlign,
- unsigned PrefAlign, uint32_t TypeByteWidth,
- uint32_t IndexWidth);
+ void setAlignment(AlignTypeEnum align_type, Align abi_align, Align pref_align,
+ uint32_t bit_width);
+ Align getAlignmentInfo(AlignTypeEnum align_type, uint32_t bit_width,
+ bool ABIAlign, Type *Ty) const;
+ void setPointerAlignment(uint32_t AddrSpace, Align ABIAlign, Align PrefAlign,
+ uint32_t TypeByteWidth, uint32_t IndexWidth);
/// Internal helper method that returns requested alignment for type.
- unsigned getAlignment(Type *Ty, bool abi_or_pref) const;
+ Align getAlignment(Type *Ty, bool abi_or_pref) const;
/// Parses a target data specification string. Assert if the string is
/// malformed.
@@ -261,17 +261,21 @@ public:
bool isIllegalInteger(uint64_t Width) const { return !isLegalInteger(Width); }
/// Returns true if the given alignment exceeds the natural stack alignment.
- bool exceedsNaturalStackAlignment(unsigned Align) const {
- return (StackNaturalAlign != 0) && (Align > StackNaturalAlign);
+ bool exceedsNaturalStackAlignment(Align Alignment) const {
+ return StackNaturalAlign && (Alignment > StackNaturalAlign);
+ }
+
+ Align getStackAlignment() const {
+ assert(StackNaturalAlign && "StackNaturalAlign must be defined");
+ return *StackNaturalAlign;
}
- unsigned getStackAlignment() const { return StackNaturalAlign; }
unsigned getAllocaAddrSpace() const { return AllocaAddrSpace; }
/// Returns the alignment of function pointers, which may or may not be
/// related to the alignment of functions.
/// \see getFunctionPtrAlignType
- unsigned getFunctionPtrAlign() const { return FunctionPtrAlign; }
+ MaybeAlign getFunctionPtrAlign() const { return FunctionPtrAlign; }
/// Return the type of function pointer alignment.
/// \see getFunctionPtrAlign
@@ -344,12 +348,12 @@ public:
}
/// Layout pointer alignment
- unsigned getPointerABIAlignment(unsigned AS) const;
+ Align getPointerABIAlignment(unsigned AS) const;
/// Return target's alignment for stack-based pointers
/// FIXME: The defaults need to be removed once all of
/// the backends/clients are updated.
- unsigned getPointerPrefAlignment(unsigned AS = 0) const;
+ Align getPointerPrefAlignment(unsigned AS = 0) const;
/// Layout pointer size
/// FIXME: The defaults need to be removed once all of
@@ -433,23 +437,33 @@ public:
/// Returns the number of bits necessary to hold the specified type.
///
+ /// If Ty is a scalable vector type, the scalable property will be set and
+ /// the runtime size will be a positive integer multiple of the base size.
+ ///
/// For example, returns 36 for i36 and 80 for x86_fp80. The type passed must
/// have a size (Type::isSized() must return true).
- uint64_t getTypeSizeInBits(Type *Ty) const;
+ TypeSize getTypeSizeInBits(Type *Ty) const;
/// Returns the maximum number of bytes that may be overwritten by
/// storing the specified type.
///
+ /// If Ty is a scalable vector type, the scalable property will be set and
+ /// the runtime size will be a positive integer multiple of the base size.
+ ///
/// For example, returns 5 for i36 and 10 for x86_fp80.
- uint64_t getTypeStoreSize(Type *Ty) const {
- return (getTypeSizeInBits(Ty) + 7) / 8;
+ TypeSize getTypeStoreSize(Type *Ty) const {
+ TypeSize BaseSize = getTypeSizeInBits(Ty);
+ return { (BaseSize.getKnownMinSize() + 7) / 8, BaseSize.isScalable() };
}
/// Returns the maximum number of bits that may be overwritten by
/// storing the specified type; always a multiple of 8.
///
+ /// If Ty is a scalable vector type, the scalable property will be set and
+ /// the runtime size will be a positive integer multiple of the base size.
+ ///
/// For example, returns 40 for i36 and 80 for x86_fp80.
- uint64_t getTypeStoreSizeInBits(Type *Ty) const {
+ TypeSize getTypeStoreSizeInBits(Type *Ty) const {
return 8 * getTypeStoreSize(Ty);
}
@@ -464,9 +478,12 @@ public:
/// Returns the offset in bytes between successive objects of the
/// specified type, including alignment padding.
///
+ /// If Ty is a scalable vector type, the scalable property will be set and
+ /// the runtime size will be a positive integer multiple of the base size.
+ ///
/// This is the amount that alloca reserves for this type. For example,
/// returns 12 or 16 for x86_fp80, depending on alignment.
- uint64_t getTypeAllocSize(Type *Ty) const {
+ TypeSize getTypeAllocSize(Type *Ty) const {
// Round up to the next alignment boundary.
return alignTo(getTypeStoreSize(Ty), getABITypeAlignment(Ty));
}
@@ -474,18 +491,28 @@ public:
/// Returns the offset in bits between successive objects of the
/// specified type, including alignment padding; always a multiple of 8.
///
+ /// If Ty is a scalable vector type, the scalable property will be set and
+ /// the runtime size will be a positive integer multiple of the base size.
+ ///
/// This is the amount that alloca reserves for this type. For example,
/// returns 96 or 128 for x86_fp80, depending on alignment.
- uint64_t getTypeAllocSizeInBits(Type *Ty) const {
+ TypeSize getTypeAllocSizeInBits(Type *Ty) const {
return 8 * getTypeAllocSize(Ty);
}
/// Returns the minimum ABI-required alignment for the specified type.
unsigned getABITypeAlignment(Type *Ty) const;
+ /// Helper function to return `Alignment` if it's set or the result of
+ /// `getABITypeAlignment(Ty)`, in any case the result is a valid alignment.
+ inline Align getValueOrABITypeAlignment(MaybeAlign Alignment,
+ Type *Ty) const {
+ return Alignment ? *Alignment : Align(getABITypeAlignment(Ty));
+ }
+
/// Returns the minimum ABI-required alignment for an integer type of
/// the specified bitwidth.
- unsigned getABIIntegerTypeAlignment(unsigned BitWidth) const;
+ Align getABIIntegerTypeAlignment(unsigned BitWidth) const;
/// Returns the preferred stack/global alignment for the specified
/// type.
@@ -493,10 +520,6 @@ public:
/// This is always at least as good as the ABI alignment.
unsigned getPrefTypeAlignment(Type *Ty) const;
- /// Returns the preferred alignment for the specified type, returned as
- /// log2 of the value (a shift amount).
- unsigned getPreferredTypeAlignmentShift(Type *Ty) const;
-
/// Returns an integer type with size at least as big as that of a
/// pointer in the given address space.
IntegerType *getIntPtrType(LLVMContext &C, unsigned AddressSpace = 0) const;
@@ -561,7 +584,7 @@ inline LLVMTargetDataRef wrap(const DataLayout *P) {
/// based on the DataLayout structure.
class StructLayout {
uint64_t StructSize;
- unsigned StructAlignment;
+ Align StructAlignment;
unsigned IsPadded : 1;
unsigned NumElements : 31;
uint64_t MemberOffsets[1]; // variable sized array!
@@ -571,7 +594,7 @@ public:
uint64_t getSizeInBits() const { return 8 * StructSize; }
- unsigned getAlignment() const { return StructAlignment; }
+ Align getAlignment() const { return StructAlignment; }
/// Returns whether the struct has padding or not between its fields.
/// NB: Padding in nested element is not taken into account.
@@ -598,13 +621,13 @@ private:
// The implementation of this method is provided inline as it is particularly
// well suited to constant folding when called on a specific Type subclass.
-inline uint64_t DataLayout::getTypeSizeInBits(Type *Ty) const {
+inline TypeSize DataLayout::getTypeSizeInBits(Type *Ty) const {
assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!");
switch (Ty->getTypeID()) {
case Type::LabelTyID:
- return getPointerSizeInBits(0);
+ return TypeSize::Fixed(getPointerSizeInBits(0));
case Type::PointerTyID:
- return getPointerSizeInBits(Ty->getPointerAddressSpace());
+ return TypeSize::Fixed(getPointerSizeInBits(Ty->getPointerAddressSpace()));
case Type::ArrayTyID: {
ArrayType *ATy = cast<ArrayType>(Ty);
return ATy->getNumElements() *
@@ -612,26 +635,30 @@ inline uint64_t DataLayout::getTypeSizeInBits(Type *Ty) const {
}
case Type::StructTyID:
// Get the layout annotation... which is lazily created on demand.
- return getStructLayout(cast<StructType>(Ty))->getSizeInBits();
+ return TypeSize::Fixed(
+ getStructLayout(cast<StructType>(Ty))->getSizeInBits());
case Type::IntegerTyID:
- return Ty->getIntegerBitWidth();
+ return TypeSize::Fixed(Ty->getIntegerBitWidth());
case Type::HalfTyID:
- return 16;
+ return TypeSize::Fixed(16);
case Type::FloatTyID:
- return 32;
+ return TypeSize::Fixed(32);
case Type::DoubleTyID:
case Type::X86_MMXTyID:
- return 64;
+ return TypeSize::Fixed(64);
case Type::PPC_FP128TyID:
case Type::FP128TyID:
- return 128;
+ return TypeSize::Fixed(128);
// In memory objects this is always aligned to a higher boundary, but
// only 80 bits contain information.
case Type::X86_FP80TyID:
- return 80;
+ return TypeSize::Fixed(80);
case Type::VectorTyID: {
VectorType *VTy = cast<VectorType>(Ty);
- return VTy->getNumElements() * getTypeSizeInBits(VTy->getElementType());
+ auto EltCnt = VTy->getElementCount();
+ uint64_t MinBits = EltCnt.Min *
+ getTypeSizeInBits(VTy->getElementType()).getFixedSize();
+ return TypeSize(MinBits, EltCnt.Scalable);
}
default:
llvm_unreachable("DataLayout::getTypeSizeInBits(): Unsupported type");
diff --git a/include/llvm/IR/DebugInfoFlags.def b/include/llvm/IR/DebugInfoFlags.def
index 07e3d6bdc9e5..f90c580f10ef 100644
--- a/include/llvm/IR/DebugInfoFlags.def
+++ b/include/llvm/IR/DebugInfoFlags.def
@@ -31,7 +31,8 @@ HANDLE_DI_FLAG(2, Protected)
HANDLE_DI_FLAG(3, Public)
HANDLE_DI_FLAG((1 << 2), FwdDecl)
HANDLE_DI_FLAG((1 << 3), AppleBlock)
-HANDLE_DI_FLAG((1 << 4), BlockByrefStruct)
+// Used to be BlockByRef, can be reused for anything except DICompositeType.
+HANDLE_DI_FLAG((1 << 4), ReservedBit4)
HANDLE_DI_FLAG((1 << 5), Virtual)
HANDLE_DI_FLAG((1 << 6), Artificial)
HANDLE_DI_FLAG((1 << 7), Explicit)
@@ -42,8 +43,7 @@ HANDLE_DI_FLAG((1 << 11), Vector)
HANDLE_DI_FLAG((1 << 12), StaticMember)
HANDLE_DI_FLAG((1 << 13), LValueReference)
HANDLE_DI_FLAG((1 << 14), RValueReference)
-// 15 was formerly ExternalTypeRef, but this was never used.
-HANDLE_DI_FLAG((1 << 15), Reserved)
+HANDLE_DI_FLAG((1 << 15), ExportSymbols)
HANDLE_DI_FLAG((1 << 16), SingleInheritance)
HANDLE_DI_FLAG((2 << 16), MultipleInheritance)
HANDLE_DI_FLAG((3 << 16), VirtualInheritance)
diff --git a/include/llvm/IR/DebugInfoMetadata.h b/include/llvm/IR/DebugInfoMetadata.h
index 9dc6dfbb0f68..28a59576b7c6 100644
--- a/include/llvm/IR/DebugInfoMetadata.h
+++ b/include/llvm/IR/DebugInfoMetadata.h
@@ -650,7 +650,6 @@ public:
}
bool isForwardDecl() const { return getFlags() & FlagFwdDecl; }
bool isAppleBlockExtension() const { return getFlags() & FlagAppleBlock; }
- bool isBlockByrefStruct() const { return getFlags() & FlagBlockByrefStruct; }
bool isVirtual() const { return getFlags() & FlagVirtual; }
bool isArtificial() const { return getFlags() & FlagArtificial; }
bool isObjectPointer() const { return getFlags() & FlagObjectPointer; }
@@ -668,6 +667,7 @@ public:
}
bool isBigEndian() const { return getFlags() & FlagBigEndian; }
bool isLittleEndian() const { return getFlags() & FlagLittleEndian; }
+ bool getExportSymbols() const { return getFlags() & FlagExportSymbols; }
static bool classof(const Metadata *MD) {
switch (MD->getMetadataID()) {
@@ -2569,7 +2569,7 @@ public:
/// (This is the only configuration of entry values that is supported.)
bool isEntryValue() const {
return getNumElements() > 0 &&
- getElement(0) == dwarf::DW_OP_entry_value;
+ getElement(0) == dwarf::DW_OP_LLVM_entry_value;
}
};
diff --git a/include/llvm/IR/DerivedTypes.h b/include/llvm/IR/DerivedTypes.h
index 3c1d4278905f..20097ef3f31a 100644
--- a/include/llvm/IR/DerivedTypes.h
+++ b/include/llvm/IR/DerivedTypes.h
@@ -23,7 +23,7 @@
#include "llvm/IR/Type.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
-#include "llvm/Support/ScalableSize.h"
+#include "llvm/Support/TypeSize.h"
#include <cassert>
#include <cstdint>
@@ -62,6 +62,11 @@ public:
/// Get or create an IntegerType instance.
static IntegerType *get(LLVMContext &C, unsigned NumBits);
+ /// Returns type twice as wide the input type.
+ IntegerType *getExtendedType() const {
+ return Type::getIntNTy(getContext(), 2 * getScalarSizeInBits());
+ }
+
/// Get the number of bits in this IntegerType
unsigned getBitWidth() const { return getSubclassData(); }
@@ -470,21 +475,47 @@ public:
/// This static method is like getInteger except that the element types are
/// twice as wide as the elements in the input type.
static VectorType *getExtendedElementVectorType(VectorType *VTy) {
- unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
- Type *EltTy = IntegerType::get(VTy->getContext(), EltBits * 2);
- return VectorType::get(EltTy, VTy->getElementCount());
+ assert(VTy->isIntOrIntVectorTy() && "VTy expected to be a vector of ints.");
+ auto *EltTy = cast<IntegerType>(VTy->getElementType());
+ return VectorType::get(EltTy->getExtendedType(), VTy->getElementCount());
}
- /// This static method is like getInteger except that the element types are
- /// half as wide as the elements in the input type.
+ // This static method gets a VectorType with the same number of elements as
+ // the input type, and the element type is an integer or float type which
+ // is half as wide as the elements in the input type.
static VectorType *getTruncatedElementVectorType(VectorType *VTy) {
- unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
- assert((EltBits & 1) == 0 &&
- "Cannot truncate vector element with odd bit-width");
- Type *EltTy = IntegerType::get(VTy->getContext(), EltBits / 2);
+ Type *EltTy;
+ if (VTy->getElementType()->isFloatingPointTy()) {
+ switch(VTy->getElementType()->getTypeID()) {
+ case DoubleTyID:
+ EltTy = Type::getFloatTy(VTy->getContext());
+ break;
+ case FloatTyID:
+ EltTy = Type::getHalfTy(VTy->getContext());
+ break;
+ default:
+ llvm_unreachable("Cannot create narrower fp vector element type");
+ }
+ } else {
+ unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
+ assert((EltBits & 1) == 0 &&
+ "Cannot truncate vector element with odd bit-width");
+ EltTy = IntegerType::get(VTy->getContext(), EltBits / 2);
+ }
return VectorType::get(EltTy, VTy->getElementCount());
}
+ // This static method returns a VectorType with a smaller number of elements
+ // of a larger type than the input element type. For example, a <16 x i8>
+ // subdivided twice would return <4 x i32>
+ static VectorType *getSubdividedVectorType(VectorType *VTy, int NumSubdivs) {
+ for (int i = 0; i < NumSubdivs; ++i) {
+ VTy = VectorType::getDoubleElementsVectorType(VTy);
+ VTy = VectorType::getTruncatedElementVectorType(VTy);
+ }
+ return VTy;
+ }
+
/// This static method returns a VectorType with half as many elements as the
/// input type and the same element type.
static VectorType *getHalfElementsVectorType(VectorType *VTy) {
@@ -540,6 +571,10 @@ bool Type::getVectorIsScalable() const {
return cast<VectorType>(this)->isScalable();
}
+ElementCount Type::getVectorElementCount() const {
+ return cast<VectorType>(this)->getElementCount();
+}
+
/// Class to represent pointers.
class PointerType : public Type {
explicit PointerType(Type *ElType, unsigned AddrSpace);
@@ -577,6 +612,26 @@ public:
}
};
+Type *Type::getExtendedType() const {
+ assert(
+ isIntOrIntVectorTy() &&
+ "Original type expected to be a vector of integers or a scalar integer.");
+ if (auto *VTy = dyn_cast<VectorType>(this))
+ return VectorType::getExtendedElementVectorType(
+ const_cast<VectorType *>(VTy));
+ return cast<IntegerType>(this)->getExtendedType();
+}
+
+Type *Type::getWithNewBitWidth(unsigned NewBitWidth) const {
+ assert(
+ isIntOrIntVectorTy() &&
+ "Original type expected to be a vector of integers or a scalar integer.");
+ Type *NewType = getIntNTy(getContext(), NewBitWidth);
+ if (isVectorTy())
+ NewType = VectorType::get(NewType, getVectorElementCount());
+ return NewType;
+}
+
unsigned Type::getPointerAddressSpace() const {
return cast<PointerType>(getScalarType())->getAddressSpace();
}
diff --git a/include/llvm/IR/DiagnosticInfo.h b/include/llvm/IR/DiagnosticInfo.h
index 373663289dbd..ec469982d378 100644
--- a/include/llvm/IR/DiagnosticInfo.h
+++ b/include/llvm/IR/DiagnosticInfo.h
@@ -74,8 +74,10 @@ enum DiagnosticKind {
DK_LastMachineRemark = DK_MachineOptimizationRemarkAnalysis,
DK_MIRParser,
DK_PGOProfile,
+ DK_MisExpect,
DK_Unsupported,
- DK_FirstPluginKind
+ DK_FirstPluginKind // Must be last value to work with
+ // getNextAvailablePluginDiagnosticKind
};
/// Get the next available kind ID for a plugin diagnostic.
@@ -663,7 +665,7 @@ public:
private:
/// The IR value (currently basic block) that the optimization operates on.
/// This is currently used to provide run-time hotness information with PGO.
- const Value *CodeRegion;
+ const Value *CodeRegion = nullptr;
};
/// Diagnostic information for applied optimization remarks.
@@ -1002,6 +1004,25 @@ public:
void print(DiagnosticPrinter &DP) const override;
};
+/// Diagnostic information for MisExpect analysis.
+class DiagnosticInfoMisExpect : public DiagnosticInfoWithLocationBase {
+public:
+ DiagnosticInfoMisExpect(const Instruction *Inst, Twine &Msg);
+
+ /// \see DiagnosticInfo::print.
+ void print(DiagnosticPrinter &DP) const override;
+
+ static bool classof(const DiagnosticInfo *DI) {
+ return DI->getKind() == DK_MisExpect;
+ }
+
+ const Twine &getMsg() const { return Msg; }
+
+private:
+ /// Message to report.
+ const Twine &Msg;
+};
+
} // end namespace llvm
#endif // LLVM_IR_DIAGNOSTICINFO_H
diff --git a/include/llvm/IR/FixedMetadataKinds.def b/include/llvm/IR/FixedMetadataKinds.def
new file mode 100644
index 000000000000..0e1ffef58672
--- /dev/null
+++ b/include/llvm/IR/FixedMetadataKinds.def
@@ -0,0 +1,43 @@
+/*===-- FixedMetadataKinds.def - Fixed metadata kind IDs -------*- C++ -*-=== *\
+|*
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+|* See https://llvm.org/LICENSE.txt for license information.
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+|*
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_FIXED_MD_KIND
+#error "LLVM_FIXED_MD_KIND(EnumID, Name, Value) is not defined."
+#endif
+
+LLVM_FIXED_MD_KIND(MD_dbg, "dbg", 0)
+LLVM_FIXED_MD_KIND(MD_tbaa, "tbaa", 1)
+LLVM_FIXED_MD_KIND(MD_prof, "prof", 2)
+LLVM_FIXED_MD_KIND(MD_fpmath, "fpmath", 3)
+LLVM_FIXED_MD_KIND(MD_range, "range", 4)
+LLVM_FIXED_MD_KIND(MD_tbaa_struct, "tbaa.struct", 5)
+LLVM_FIXED_MD_KIND(MD_invariant_load, "invariant.load", 6)
+LLVM_FIXED_MD_KIND(MD_alias_scope, "alias.scope", 7)
+LLVM_FIXED_MD_KIND(MD_noalias, "noalias", 8)
+LLVM_FIXED_MD_KIND(MD_nontemporal, "nontemporal", 9)
+LLVM_FIXED_MD_KIND(MD_mem_parallel_loop_access,
+ "llvm.mem.parallel_loop_access", 10)
+LLVM_FIXED_MD_KIND(MD_nonnull, "nonnull", 11)
+LLVM_FIXED_MD_KIND(MD_dereferenceable, "dereferenceable", 12)
+LLVM_FIXED_MD_KIND(MD_dereferenceable_or_null, "dereferenceable_or_null", 13)
+LLVM_FIXED_MD_KIND(MD_make_implicit, "make.implicit", 14)
+LLVM_FIXED_MD_KIND(MD_unpredictable, "unpredictable", 15)
+LLVM_FIXED_MD_KIND(MD_invariant_group, "invariant.group", 16)
+LLVM_FIXED_MD_KIND(MD_align, "align", 17)
+LLVM_FIXED_MD_KIND(MD_loop, "llvm.loop", 18)
+LLVM_FIXED_MD_KIND(MD_type, "type", 19)
+LLVM_FIXED_MD_KIND(MD_section_prefix, "section_prefix", 20)
+LLVM_FIXED_MD_KIND(MD_absolute_symbol, "absolute_symbol", 21)
+LLVM_FIXED_MD_KIND(MD_associated, "associated", 22)
+LLVM_FIXED_MD_KIND(MD_callees, "callees", 23)
+LLVM_FIXED_MD_KIND(MD_irr_loop, "irr_loop", 24)
+LLVM_FIXED_MD_KIND(MD_access_group, "llvm.access.group", 25)
+LLVM_FIXED_MD_KIND(MD_callback, "callback", 26)
+LLVM_FIXED_MD_KIND(MD_preserve_access_index, "llvm.preserve.access.index", 27)
+LLVM_FIXED_MD_KIND(MD_misexpect, "misexpect", 28)
+LLVM_FIXED_MD_KIND(MD_vcall_visibility, "vcall_visibility", 29)
diff --git a/include/llvm/IR/Function.h b/include/llvm/IR/Function.h
index 7fa61e12f431..d586a9460d2b 100644
--- a/include/llvm/IR/Function.h
+++ b/include/llvm/IR/Function.h
@@ -343,7 +343,10 @@ public:
unsigned getFnStackAlignment() const {
if (!hasFnAttribute(Attribute::StackAlignment))
return 0;
- return AttributeSets.getStackAlignment(AttributeList::FunctionIndex);
+ if (const auto MA =
+ AttributeSets.getStackAlignment(AttributeList::FunctionIndex))
+ return MA->value();
+ return 0;
}
/// hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm
@@ -433,7 +436,9 @@ public:
/// Extract the alignment for a call or parameter (0=unknown).
unsigned getParamAlignment(unsigned ArgNo) const {
- return AttributeSets.getParamAlignment(ArgNo);
+ if (const auto MA = AttributeSets.getParamAlignment(ArgNo))
+ return MA->value();
+ return 0;
}
/// Extract the byval type for a parameter.
@@ -710,6 +715,12 @@ public:
return Arguments + NumArgs;
}
+ Argument* getArg(unsigned i) const {
+ assert (i < NumArgs && "getArg() out of range!");
+ CheckLazyArguments();
+ return Arguments + i;
+ }
+
iterator_range<arg_iterator> args() {
return make_range(arg_begin(), arg_end());
}
diff --git a/include/llvm/IR/GlobalAlias.h b/include/llvm/IR/GlobalAlias.h
index 3cd405701300..f2d9b9676ec9 100644
--- a/include/llvm/IR/GlobalAlias.h
+++ b/include/llvm/IR/GlobalAlias.h
@@ -58,10 +58,6 @@ public:
// Linkage, Type, Parent and AddressSpace taken from the Aliasee.
static GlobalAlias *create(const Twine &Name, GlobalValue *Aliasee);
- void copyAttributesFrom(const GlobalValue *Src) {
- GlobalValue::copyAttributesFrom(Src);
- }
-
/// removeFromParent - This method unlinks 'this' from the containing module,
/// but does not delete it.
///
diff --git a/include/llvm/IR/GlobalIFunc.h b/include/llvm/IR/GlobalIFunc.h
index bc0d3c053cce..0fdae917878a 100644
--- a/include/llvm/IR/GlobalIFunc.h
+++ b/include/llvm/IR/GlobalIFunc.h
@@ -46,10 +46,6 @@ public:
LinkageTypes Linkage, const Twine &Name,
Constant *Resolver, Module *Parent);
- void copyAttributesFrom(const GlobalIFunc *Src) {
- GlobalValue::copyAttributesFrom(Src);
- }
-
/// This method unlinks 'this' from the containing module, but does not
/// delete it.
void removeFromParent();
diff --git a/include/llvm/IR/GlobalIndirectSymbol.h b/include/llvm/IR/GlobalIndirectSymbol.h
index 8bc3f90b94aa..d996237aa3ef 100644
--- a/include/llvm/IR/GlobalIndirectSymbol.h
+++ b/include/llvm/IR/GlobalIndirectSymbol.h
@@ -42,6 +42,10 @@ public:
/// Provide fast operand accessors
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant);
+ void copyAttributesFrom(const GlobalValue *Src) {
+ GlobalValue::copyAttributesFrom(Src);
+ }
+
/// These methods set and retrieve indirect symbol.
void setIndirectSymbol(Constant *Symbol) {
setOperand(0, Symbol);
@@ -54,9 +58,7 @@ public:
static_cast<const GlobalIndirectSymbol *>(this)->getIndirectSymbol());
}
- const GlobalObject *getBaseObject() const {
- return dyn_cast<GlobalObject>(getIndirectSymbol()->stripInBoundsOffsets());
- }
+ const GlobalObject *getBaseObject() const;
GlobalObject *getBaseObject() {
return const_cast<GlobalObject *>(
static_cast<const GlobalIndirectSymbol *>(this)->getBaseObject());
diff --git a/include/llvm/IR/GlobalObject.h b/include/llvm/IR/GlobalObject.h
index b8ab6140ebe7..ce81eb9f0719 100644
--- a/include/llvm/IR/GlobalObject.h
+++ b/include/llvm/IR/GlobalObject.h
@@ -17,6 +17,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Value.h"
+#include "llvm/Support/Alignment.h"
#include <string>
#include <utility>
@@ -27,6 +28,20 @@ class MDNode;
class Metadata;
class GlobalObject : public GlobalValue {
+public:
+ // VCallVisibility - values for visibility metadata attached to vtables. This
+ // describes the scope in which a virtual call could end up being dispatched
+ // through this vtable.
+ enum VCallVisibility {
+ // Type is potentially visible to external code.
+ VCallVisibilityPublic = 0,
+ // Type is only visible to code which will be in the current Module after
+ // LTO internalization.
+ VCallVisibilityLinkageUnit = 1,
+ // Type is only visible to code in the current Module.
+ VCallVisibilityTranslationUnit = 2,
+ };
+
protected:
GlobalObject(Type *Ty, ValueTy VTy, Use *Ops, unsigned NumOps,
LinkageTypes Linkage, const Twine &Name,
@@ -58,9 +73,14 @@ public:
unsigned getAlignment() const {
unsigned Data = getGlobalValueSubClassData();
unsigned AlignmentData = Data & AlignmentMask;
- return (1u << AlignmentData) >> 1;
+ MaybeAlign Align = decodeMaybeAlign(AlignmentData);
+ return Align ? Align->value() : 0;
}
- void setAlignment(unsigned Align);
+
+ /// FIXME: Remove this setter once the migration to MaybeAlign is over.
+ LLVM_ATTRIBUTE_DEPRECATED(void setAlignment(unsigned Align),
+ "Please use `void setAlignment(MaybeAlign Align)`");
+ void setAlignment(MaybeAlign Align);
unsigned getGlobalObjectSubClassData() const {
unsigned ValueData = getGlobalValueSubClassData();
@@ -158,6 +178,8 @@ public:
void copyMetadata(const GlobalObject *Src, unsigned Offset);
void addTypeMetadata(unsigned Offset, Metadata *TypeID);
+ void addVCallVisibilityMetadata(VCallVisibility Visibility);
+ VCallVisibility getVCallVisibility() const;
protected:
void copyAttributesFrom(const GlobalObject *Src);
diff --git a/include/llvm/IR/GlobalVariable.h b/include/llvm/IR/GlobalVariable.h
index 2e2c8c477913..2c730bc312e4 100644
--- a/include/llvm/IR/GlobalVariable.h
+++ b/include/llvm/IR/GlobalVariable.h
@@ -243,6 +243,7 @@ public:
bool hasImplicitSection() const {
return getAttributes().hasAttribute("bss-section") ||
getAttributes().hasAttribute("data-section") ||
+ getAttributes().hasAttribute("relro-section") ||
getAttributes().hasAttribute("rodata-section");
}
diff --git a/include/llvm/IR/IRBuilder.h b/include/llvm/IR/IRBuilder.h
index a74364dffb2e..d1ddb75cde9b 100644
--- a/include/llvm/IR/IRBuilder.h
+++ b/include/llvm/IR/IRBuilder.h
@@ -1461,7 +1461,7 @@ public:
if (Value *V = foldConstant(Opc, LHS, RHS, Name)) return V;
Instruction *BinOp = BinaryOperator::Create(Opc, LHS, RHS);
if (isa<FPMathOperator>(BinOp))
- BinOp = setFPAttrs(BinOp, FPMathTag, FMF);
+ setFPAttrs(BinOp, FPMathTag, FMF);
return Insert(BinOp, Name);
}
@@ -1479,7 +1479,8 @@ public:
CallInst *C = CreateIntrinsic(ID, {L->getType()},
{L, R, RoundingV, ExceptV}, nullptr, Name);
- return cast<CallInst>(setFPAttrs(C, FPMathTag, UseFMF));
+ setFPAttrs(C, FPMathTag, UseFMF);
+ return C;
}
Value *CreateNeg(Value *V, const Twine &Name = "",
@@ -1504,7 +1505,7 @@ public:
MDNode *FPMathTag = nullptr) {
if (auto *VC = dyn_cast<Constant>(V))
return Insert(Folder.CreateFNeg(VC), Name);
- return Insert(setFPAttrs(BinaryOperator::CreateFNeg(V), FPMathTag, FMF),
+ return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), FPMathTag, FMF),
Name);
}
@@ -1514,9 +1515,7 @@ public:
const Twine &Name = "") {
if (auto *VC = dyn_cast<Constant>(V))
return Insert(Folder.CreateFNeg(VC), Name);
- // TODO: This should return UnaryOperator::CreateFNeg(...) once we are
- // confident that they are optimized sufficiently.
- return Insert(setFPAttrs(BinaryOperator::CreateFNeg(V), nullptr,
+ return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), nullptr,
FMFSource->getFastMathFlags()),
Name);
}
@@ -1534,7 +1533,7 @@ public:
return Insert(Folder.CreateUnOp(Opc, VC), Name);
Instruction *UnOp = UnaryOperator::Create(Opc, V);
if (isa<FPMathOperator>(UnOp))
- UnOp = setFPAttrs(UnOp, FPMathTag, FMF);
+ setFPAttrs(UnOp, FPMathTag, FMF);
return Insert(UnOp, Name);
}
@@ -1612,19 +1611,19 @@ public:
LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align,
const char *Name) {
LoadInst *LI = CreateLoad(Ty, Ptr, Name);
- LI->setAlignment(Align);
+ LI->setAlignment(MaybeAlign(Align));
return LI;
}
LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align,
const Twine &Name = "") {
LoadInst *LI = CreateLoad(Ty, Ptr, Name);
- LI->setAlignment(Align);
+ LI->setAlignment(MaybeAlign(Align));
return LI;
}
LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align,
bool isVolatile, const Twine &Name = "") {
LoadInst *LI = CreateLoad(Ty, Ptr, isVolatile, Name);
- LI->setAlignment(Align);
+ LI->setAlignment(MaybeAlign(Align));
return LI;
}
@@ -1649,7 +1648,7 @@ public:
StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, unsigned Align,
bool isVolatile = false) {
StoreInst *SI = CreateStore(Val, Ptr, isVolatile);
- SI->setAlignment(Align);
+ SI->setAlignment(MaybeAlign(Align));
return SI;
}
@@ -1913,11 +1912,17 @@ public:
return V;
}
- Value *CreateFPToUI(Value *V, Type *DestTy, const Twine &Name = ""){
+ Value *CreateFPToUI(Value *V, Type *DestTy, const Twine &Name = "") {
+ if (IsFPConstrained)
+ return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptoui,
+ V, DestTy, nullptr, Name);
return CreateCast(Instruction::FPToUI, V, DestTy, Name);
}
- Value *CreateFPToSI(Value *V, Type *DestTy, const Twine &Name = ""){
+ Value *CreateFPToSI(Value *V, Type *DestTy, const Twine &Name = "") {
+ if (IsFPConstrained)
+ return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptosi,
+ V, DestTy, nullptr, Name);
return CreateCast(Instruction::FPToSI, V, DestTy, Name);
}
@@ -1931,10 +1936,17 @@ public:
Value *CreateFPTrunc(Value *V, Type *DestTy,
const Twine &Name = "") {
+ if (IsFPConstrained)
+ return CreateConstrainedFPCast(
+ Intrinsic::experimental_constrained_fptrunc, V, DestTy, nullptr,
+ Name);
return CreateCast(Instruction::FPTrunc, V, DestTy, Name);
}
Value *CreateFPExt(Value *V, Type *DestTy, const Twine &Name = "") {
+ if (IsFPConstrained)
+ return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fpext,
+ V, DestTy, nullptr, Name);
return CreateCast(Instruction::FPExt, V, DestTy, Name);
}
@@ -2046,6 +2058,37 @@ public:
return Insert(CastInst::CreateFPCast(V, DestTy), Name);
}
+ CallInst *CreateConstrainedFPCast(
+ Intrinsic::ID ID, Value *V, Type *DestTy,
+ Instruction *FMFSource = nullptr, const Twine &Name = "",
+ MDNode *FPMathTag = nullptr,
+ Optional<ConstrainedFPIntrinsic::RoundingMode> Rounding = None,
+ Optional<ConstrainedFPIntrinsic::ExceptionBehavior> Except = None) {
+ Value *ExceptV = getConstrainedFPExcept(Except);
+
+ FastMathFlags UseFMF = FMF;
+ if (FMFSource)
+ UseFMF = FMFSource->getFastMathFlags();
+
+ CallInst *C;
+ switch (ID) {
+ default: {
+ Value *RoundingV = getConstrainedFPRounding(Rounding);
+ C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, RoundingV, ExceptV},
+ nullptr, Name);
+ } break;
+ case Intrinsic::experimental_constrained_fpext:
+ case Intrinsic::experimental_constrained_fptoui:
+ case Intrinsic::experimental_constrained_fptosi:
+ C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, ExceptV}, nullptr,
+ Name);
+ break;
+ }
+ if (isa<FPMathOperator>(C))
+ setFPAttrs(C, FPMathTag, UseFMF);
+ return C;
+ }
+
// Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a
// compile time error, instead of converting the string to bool for the
// isSigned parameter.
@@ -2187,7 +2230,10 @@ public:
PHINode *CreatePHI(Type *Ty, unsigned NumReservedValues,
const Twine &Name = "") {
- return Insert(PHINode::Create(Ty, NumReservedValues), Name);
+ PHINode *Phi = PHINode::Create(Ty, NumReservedValues);
+ if (isa<FPMathOperator>(Phi))
+ setFPAttrs(Phi, nullptr /* MDNode* */, FMF);
+ return Insert(Phi, Name);
}
CallInst *CreateCall(FunctionType *FTy, Value *Callee,
@@ -2195,7 +2241,7 @@ public:
MDNode *FPMathTag = nullptr) {
CallInst *CI = CallInst::Create(FTy, Callee, Args, DefaultOperandBundles);
if (isa<FPMathOperator>(CI))
- CI = cast<CallInst>(setFPAttrs(CI, FPMathTag, FMF));
+ setFPAttrs(CI, FPMathTag, FMF);
return Insert(CI, Name);
}
@@ -2204,7 +2250,7 @@ public:
const Twine &Name = "", MDNode *FPMathTag = nullptr) {
CallInst *CI = CallInst::Create(FTy, Callee, Args, OpBundles);
if (isa<FPMathOperator>(CI))
- CI = cast<CallInst>(setFPAttrs(CI, FPMathTag, FMF));
+ setFPAttrs(CI, FPMathTag, FMF);
return Insert(CI, Name);
}
@@ -2252,7 +2298,7 @@ public:
Sel = addBranchMetadata(Sel, Prof, Unpred);
}
if (isa<FPMathOperator>(Sel))
- Sel = cast<SelectInst>(setFPAttrs(Sel, nullptr /* MDNode* */, FMF));
+ setFPAttrs(Sel, nullptr /* MDNode* */, FMF);
return Insert(Sel, Name);
}
@@ -2454,7 +2500,7 @@ public:
}
Value *CreatePreserveArrayAccessIndex(Value *Base, unsigned Dimension,
- unsigned LastIndex) {
+ unsigned LastIndex, MDNode *DbgInfo) {
assert(isa<PointerType>(Base->getType()) &&
"Invalid Base ptr type for preserve.array.access.index.");
auto *BaseType = Base->getType();
@@ -2476,6 +2522,8 @@ public:
Value *DimV = getInt32(Dimension);
CallInst *Fn =
CreateCall(FnPreserveArrayAccessIndex, {Base, DimV, LastIndexV});
+ if (DbgInfo)
+ Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
return Fn;
}
@@ -2493,7 +2541,8 @@ public:
Value *DIIndex = getInt32(FieldIndex);
CallInst *Fn =
CreateCall(FnPreserveUnionAccessIndex, {Base, DIIndex});
- Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
+ if (DbgInfo)
+ Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
return Fn;
}
@@ -2516,7 +2565,8 @@ public:
Value *DIIndex = getInt32(FieldIndex);
CallInst *Fn = CreateCall(FnPreserveStructAccessIndex,
{Base, GEPIndex, DIIndex});
- Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
+ if (DbgInfo)
+ Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
return Fn;
}
diff --git a/include/llvm/IR/InlineAsm.h b/include/llvm/IR/InlineAsm.h
index 2aac807623a9..72d8ad1501ae 100644
--- a/include/llvm/IR/InlineAsm.h
+++ b/include/llvm/IR/InlineAsm.h
@@ -244,6 +244,7 @@ public:
Constraint_m,
Constraint_o,
Constraint_v,
+ Constraint_A,
Constraint_Q,
Constraint_R,
Constraint_S,
diff --git a/include/llvm/IR/InstrTypes.h b/include/llvm/IR/InstrTypes.h
index ca419b50da6b..7fb94e9d8c22 100644
--- a/include/llvm/IR/InstrTypes.h
+++ b/include/llvm/IR/InstrTypes.h
@@ -975,7 +975,7 @@ public:
static Type* makeCmpResultType(Type* opnd_type) {
if (VectorType* vt = dyn_cast<VectorType>(opnd_type)) {
return VectorType::get(Type::getInt1Ty(opnd_type->getContext()),
- vt->getNumElements());
+ vt->getElementCount());
}
return Type::getInt1Ty(opnd_type->getContext());
}
@@ -1567,11 +1567,17 @@ public:
}
/// Extract the alignment of the return value.
- unsigned getRetAlignment() const { return Attrs.getRetAlignment(); }
+ unsigned getRetAlignment() const {
+ if (const auto MA = Attrs.getRetAlignment())
+ return MA->value();
+ return 0;
+ }
/// Extract the alignment for a call or parameter (0=unknown).
unsigned getParamAlignment(unsigned ArgNo) const {
- return Attrs.getParamAlignment(ArgNo);
+ if (const auto MA = Attrs.getParamAlignment(ArgNo))
+ return MA->value();
+ return 0;
}
/// Extract the byval type for a call or parameter.
diff --git a/include/llvm/IR/Instruction.h b/include/llvm/IR/Instruction.h
index 6a9a74bd16f0..803f6977b32c 100644
--- a/include/llvm/IR/Instruction.h
+++ b/include/llvm/IR/Instruction.h
@@ -229,6 +229,16 @@ public:
return hasMetadataHashEntry();
}
+ /// Return true if this instruction has the given type of metadata attached.
+ bool hasMetadata(unsigned KindID) const {
+ return getMetadata(KindID) != nullptr;
+ }
+
+ /// Return true if this instruction has the given type of metadata attached.
+ bool hasMetadata(StringRef Kind) const {
+ return getMetadata(Kind) != nullptr;
+ }
+
/// Get the metadata of given kind attached to this Instruction.
/// If the metadata is not found then return null.
MDNode *getMetadata(unsigned KindID) const {
diff --git a/include/llvm/IR/Instructions.h b/include/llvm/IR/Instructions.h
index 215ce45c7b75..fa980df03ef0 100644
--- a/include/llvm/IR/Instructions.h
+++ b/include/llvm/IR/Instructions.h
@@ -110,9 +110,11 @@ public:
/// Return the alignment of the memory that is being allocated by the
/// instruction.
unsigned getAlignment() const {
- return (1u << (getSubclassDataFromInstruction() & 31)) >> 1;
+ if (const auto MA = decodeMaybeAlign(getSubclassDataFromInstruction() & 31))
+ return MA->value();
+ return 0;
}
- void setAlignment(unsigned Align);
+ void setAlignment(MaybeAlign Align);
/// Return true if this alloca is in the entry block of the function and is a
/// constant size. If so, the code generator will fold it into the
@@ -182,15 +184,15 @@ public:
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
BasicBlock *InsertAtEnd);
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
- unsigned Align, Instruction *InsertBefore = nullptr);
+ MaybeAlign Align, Instruction *InsertBefore = nullptr);
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
- unsigned Align, BasicBlock *InsertAtEnd);
+ MaybeAlign Align, BasicBlock *InsertAtEnd);
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
- unsigned Align, AtomicOrdering Order,
+ MaybeAlign Align, AtomicOrdering Order,
SyncScope::ID SSID = SyncScope::System,
Instruction *InsertBefore = nullptr);
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
- unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
+ MaybeAlign Align, AtomicOrdering Order, SyncScope::ID SSID,
BasicBlock *InsertAtEnd);
// Deprecated [opaque pointer types]
@@ -209,20 +211,20 @@ public:
BasicBlock *InsertAtEnd)
: LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
isVolatile, InsertAtEnd) {}
- LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
+ LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, MaybeAlign Align,
Instruction *InsertBefore = nullptr)
: LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
isVolatile, Align, InsertBefore) {}
- LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
+ LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, MaybeAlign Align,
BasicBlock *InsertAtEnd)
: LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
isVolatile, Align, InsertAtEnd) {}
- LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
+ LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, MaybeAlign Align,
AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
Instruction *InsertBefore = nullptr)
: LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
isVolatile, Align, Order, SSID, InsertBefore) {}
- LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
+ LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, MaybeAlign Align,
AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd)
: LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
isVolatile, Align, Order, SSID, InsertAtEnd) {}
@@ -238,10 +240,13 @@ public:
/// Return the alignment of the access that is being performed.
unsigned getAlignment() const {
- return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1;
+ if (const auto MA =
+ decodeMaybeAlign((getSubclassDataFromInstruction() >> 1) & 31))
+ return MA->value();
+ return 0;
}
- void setAlignment(unsigned Align);
+ void setAlignment(MaybeAlign Align);
/// Returns the ordering constraint of this load instruction.
AtomicOrdering getOrdering() const {
@@ -332,17 +337,15 @@ public:
StoreInst(Value *Val, Value *Ptr, bool isVolatile = false,
Instruction *InsertBefore = nullptr);
StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
- StoreInst(Value *Val, Value *Ptr, bool isVolatile,
- unsigned Align, Instruction *InsertBefore = nullptr);
- StoreInst(Value *Val, Value *Ptr, bool isVolatile,
- unsigned Align, BasicBlock *InsertAtEnd);
- StoreInst(Value *Val, Value *Ptr, bool isVolatile,
- unsigned Align, AtomicOrdering Order,
- SyncScope::ID SSID = SyncScope::System,
+ StoreInst(Value *Val, Value *Ptr, bool isVolatile, MaybeAlign Align,
Instruction *InsertBefore = nullptr);
- StoreInst(Value *Val, Value *Ptr, bool isVolatile,
- unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
+ StoreInst(Value *Val, Value *Ptr, bool isVolatile, MaybeAlign Align,
BasicBlock *InsertAtEnd);
+ StoreInst(Value *Val, Value *Ptr, bool isVolatile, MaybeAlign Align,
+ AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
+ Instruction *InsertBefore = nullptr);
+ StoreInst(Value *Val, Value *Ptr, bool isVolatile, MaybeAlign Align,
+ AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
// allocate space for exactly two operands
void *operator new(size_t s) {
@@ -363,10 +366,13 @@ public:
/// Return the alignment of the access that is being performed
unsigned getAlignment() const {
- return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1;
+ if (const auto MA =
+ decodeMaybeAlign((getSubclassDataFromInstruction() >> 1) & 31))
+ return MA->value();
+ return 0;
}
- void setAlignment(unsigned Align);
+ void setAlignment(MaybeAlign Align);
/// Returns the ordering constraint of this store instruction.
AtomicOrdering getOrdering() const {
@@ -1764,6 +1770,10 @@ public:
void setTrueValue(Value *V) { Op<1>() = V; }
void setFalseValue(Value *V) { Op<2>() = V; }
+ /// Swap the true and false values of the select instruction.
+ /// This doesn't swap prof metadata.
+ void swapValues() { Op<1>().swap(Op<2>()); }
+
/// Return a string if the specified operands are invalid
/// for a select operation, otherwise return null.
static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
@@ -3455,16 +3465,7 @@ public:
class SwitchInstProfUpdateWrapper {
SwitchInst &SI;
Optional<SmallVector<uint32_t, 8> > Weights = None;
-
- // Sticky invalid state is needed to safely ignore operations with prof data
- // in cases where SwitchInstProfUpdateWrapper is created from SwitchInst
- // with inconsistent prof data. TODO: once we fix all prof data
- // inconsistencies we can turn invalid state to assertions.
- enum {
- Invalid,
- Initialized,
- Changed
- } State = Invalid;
+ bool Changed = false;
protected:
static MDNode *getProfBranchWeightsMD(const SwitchInst &SI);
@@ -3482,7 +3483,7 @@ public:
SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); }
~SwitchInstProfUpdateWrapper() {
- if (State == Changed)
+ if (Changed)
SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
}
@@ -3938,6 +3939,9 @@ class CallBrInst : public CallBase {
ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
+ /// Should the Indirect Destinations change, scan + update the Arg list.
+ void updateArgBlockAddresses(unsigned i, BasicBlock *B);
+
/// Compute the number of operands to allocate.
static int ComputeNumOperands(int NumArgs, int NumIndirectDests,
int NumBundleInputs = 0) {
@@ -4075,7 +4079,7 @@ public:
return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1));
}
BasicBlock *getIndirectDest(unsigned i) const {
- return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i));
+ return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i));
}
SmallVector<BasicBlock *, 16> getIndirectDests() const {
SmallVector<BasicBlock *, 16> IndirectDests;
@@ -4087,6 +4091,7 @@ public:
*(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
}
void setIndirectDest(unsigned i, BasicBlock *B) {
+ updateArgBlockAddresses(i, B);
*(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
}
@@ -4096,11 +4101,10 @@ public:
return i == 0 ? getDefaultDest() : getIndirectDest(i - 1);
}
- void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
- assert(idx < getNumIndirectDests() + 1 &&
+ void setSuccessor(unsigned i, BasicBlock *NewSucc) {
+ assert(i < getNumIndirectDests() + 1 &&
"Successor # out of range for callbr!");
- *(&Op<-1>() - getNumIndirectDests() -1 + idx) =
- reinterpret_cast<Value *>(NewSucc);
+ return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc);
}
unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }
@@ -5251,31 +5255,38 @@ public:
/// A helper function that returns the pointer operand of a load or store
/// instruction. Returns nullptr if not load or store.
-inline Value *getLoadStorePointerOperand(Value *V) {
+inline const Value *getLoadStorePointerOperand(const Value *V) {
if (auto *Load = dyn_cast<LoadInst>(V))
return Load->getPointerOperand();
if (auto *Store = dyn_cast<StoreInst>(V))
return Store->getPointerOperand();
return nullptr;
}
+inline Value *getLoadStorePointerOperand(Value *V) {
+ return const_cast<Value *>(
+ getLoadStorePointerOperand(static_cast<const Value *>(V)));
+}
/// A helper function that returns the pointer operand of a load, store
/// or GEP instruction. Returns nullptr if not load, store, or GEP.
-inline Value *getPointerOperand(Value *V) {
+inline const Value *getPointerOperand(const Value *V) {
if (auto *Ptr = getLoadStorePointerOperand(V))
return Ptr;
if (auto *Gep = dyn_cast<GetElementPtrInst>(V))
return Gep->getPointerOperand();
return nullptr;
}
+inline Value *getPointerOperand(Value *V) {
+ return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V)));
+}
/// A helper function that returns the alignment of load or store instruction.
-inline unsigned getLoadStoreAlignment(Value *I) {
+inline MaybeAlign getLoadStoreAlignment(Value *I) {
assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
"Expected Load or Store instruction");
if (auto *LI = dyn_cast<LoadInst>(I))
- return LI->getAlignment();
- return cast<StoreInst>(I)->getAlignment();
+ return MaybeAlign(LI->getAlignment());
+ return MaybeAlign(cast<StoreInst>(I)->getAlignment());
}
/// A helper function that returns the address space of the pointer operand of
diff --git a/include/llvm/IR/IntrinsicInst.h b/include/llvm/IR/IntrinsicInst.h
index 438bdb29b706..c989b4a2e72a 100644
--- a/include/llvm/IR/IntrinsicInst.h
+++ b/include/llvm/IR/IntrinsicInst.h
@@ -259,6 +259,8 @@ namespace llvm {
case Intrinsic::experimental_constrained_fdiv:
case Intrinsic::experimental_constrained_frem:
case Intrinsic::experimental_constrained_fma:
+ case Intrinsic::experimental_constrained_fptosi:
+ case Intrinsic::experimental_constrained_fptoui:
case Intrinsic::experimental_constrained_fptrunc:
case Intrinsic::experimental_constrained_fpext:
case Intrinsic::experimental_constrained_sqrt:
@@ -271,12 +273,16 @@ namespace llvm {
case Intrinsic::experimental_constrained_log:
case Intrinsic::experimental_constrained_log10:
case Intrinsic::experimental_constrained_log2:
+ case Intrinsic::experimental_constrained_lrint:
+ case Intrinsic::experimental_constrained_llrint:
case Intrinsic::experimental_constrained_rint:
case Intrinsic::experimental_constrained_nearbyint:
case Intrinsic::experimental_constrained_maxnum:
case Intrinsic::experimental_constrained_minnum:
case Intrinsic::experimental_constrained_ceil:
case Intrinsic::experimental_constrained_floor:
+ case Intrinsic::experimental_constrained_lround:
+ case Intrinsic::experimental_constrained_llround:
case Intrinsic::experimental_constrained_round:
case Intrinsic::experimental_constrained_trunc:
return true;
@@ -405,11 +411,11 @@ namespace llvm {
setArgOperand(ARG_DEST, Ptr);
}
- void setDestAlignment(unsigned Align) {
+ void setDestAlignment(unsigned Alignment) {
removeParamAttr(ARG_DEST, Attribute::Alignment);
- if (Align > 0)
- addParamAttr(ARG_DEST,
- Attribute::getWithAlignment(getContext(), Align));
+ if (Alignment > 0)
+ addParamAttr(ARG_DEST, Attribute::getWithAlignment(getContext(),
+ Align(Alignment)));
}
void setLength(Value *L) {
@@ -454,11 +460,12 @@ namespace llvm {
BaseCL::setArgOperand(ARG_SOURCE, Ptr);
}
- void setSourceAlignment(unsigned Align) {
+ void setSourceAlignment(unsigned Alignment) {
BaseCL::removeParamAttr(ARG_SOURCE, Attribute::Alignment);
- if (Align > 0)
- BaseCL::addParamAttr(ARG_SOURCE, Attribute::getWithAlignment(
- BaseCL::getContext(), Align));
+ if (Alignment > 0)
+ BaseCL::addParamAttr(ARG_SOURCE,
+ Attribute::getWithAlignment(BaseCL::getContext(),
+ Align(Alignment)));
}
};
diff --git a/include/llvm/IR/Intrinsics.h b/include/llvm/IR/Intrinsics.h
index f38f92022d21..9e4ebd915afc 100644
--- a/include/llvm/IR/Intrinsics.h
+++ b/include/llvm/IR/Intrinsics.h
@@ -100,7 +100,8 @@ namespace Intrinsic {
Integer, Vector, Pointer, Struct,
Argument, ExtendArgument, TruncArgument, HalfVecArgument,
SameVecWidthArgument, PtrToArgument, PtrToElt, VecOfAnyPtrsToElt,
- VecElementArgument
+ VecElementArgument, ScalableVecArgument, Subdivide2Argument,
+ Subdivide4Argument, VecOfBitcastsToInt
} Kind;
union {
@@ -125,14 +126,17 @@ namespace Intrinsic {
assert(Kind == Argument || Kind == ExtendArgument ||
Kind == TruncArgument || Kind == HalfVecArgument ||
Kind == SameVecWidthArgument || Kind == PtrToArgument ||
- Kind == PtrToElt || Kind == VecElementArgument);
+ Kind == PtrToElt || Kind == VecElementArgument ||
+ Kind == Subdivide2Argument || Kind == Subdivide4Argument ||
+ Kind == VecOfBitcastsToInt);
return Argument_Info >> 3;
}
ArgKind getArgumentKind() const {
assert(Kind == Argument || Kind == ExtendArgument ||
Kind == TruncArgument || Kind == HalfVecArgument ||
Kind == SameVecWidthArgument || Kind == PtrToArgument ||
- Kind == VecElementArgument);
+ Kind == VecElementArgument || Kind == Subdivide2Argument ||
+ Kind == Subdivide4Argument || Kind == VecOfBitcastsToInt);
return (ArgKind)(Argument_Info & 7);
}
diff --git a/include/llvm/IR/Intrinsics.td b/include/llvm/IR/Intrinsics.td
index d660f8278437..7a0263f88c2a 100644
--- a/include/llvm/IR/Intrinsics.td
+++ b/include/llvm/IR/Intrinsics.td
@@ -63,6 +63,12 @@ class NoCapture<int argNo> : IntrinsicProperty {
int ArgNo = argNo;
}
+// NoAlias - The specified argument pointer is not aliasing other "noalias" pointer
+// arguments of the intrinsic wrt. the intrinsic scope.
+class NoAlias<int argNo> : IntrinsicProperty {
+ int ArgNo = argNo;
+}
+
// Returned - The specified argument is always the return value of the
// intrinsic.
class Returned<int argNo> : IntrinsicProperty {
@@ -181,6 +187,16 @@ class LLVMVectorElementType<int num> : LLVMMatchType<num>;
// vector type, but change the element count to be half as many
class LLVMHalfElementsVectorType<int num> : LLVMMatchType<num>;
+// Match the type of another intrinsic parameter that is expected to be a
+// vector type (i.e. <N x iM>) but with each element subdivided to
+// form a vector with more elements that are smaller than the original.
+class LLVMSubdivide2VectorType<int num> : LLVMMatchType<num>;
+class LLVMSubdivide4VectorType<int num> : LLVMMatchType<num>;
+
+// Match the element count and bit width of another intrinsic parameter, but
+// change the element type to an integer.
+class LLVMVectorOfBitcastsToInt<int num> : LLVMMatchType<num>;
+
def llvm_void_ty : LLVMType<isVoid>;
let isAny = 1 in {
def llvm_any_ty : LLVMType<Any>;
@@ -407,9 +423,9 @@ def int_objc_arc_annotation_bottomup_bbend : Intrinsic<[],
//===--------------------- Code Generator Intrinsics ----------------------===//
//
def int_returnaddress : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty], [IntrNoMem, ImmArg<0>]>;
-def int_addressofreturnaddress : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
-def int_frameaddress : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty], [IntrNoMem, ImmArg<0>]>;
-def int_sponentry : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
+def int_addressofreturnaddress : Intrinsic<[llvm_anyptr_ty], [], [IntrNoMem]>;
+def int_frameaddress : Intrinsic<[llvm_anyptr_ty], [llvm_i32_ty], [IntrNoMem, ImmArg<0>]>;
+def int_sponentry : Intrinsic<[llvm_anyptr_ty], [], [IntrNoMem]>;
def int_read_register : Intrinsic<[llvm_anyint_ty], [llvm_metadata_ty],
[IntrReadMem], "llvm.read_register">;
def int_write_register : Intrinsic<[], [llvm_metadata_ty, llvm_anyint_ty],
@@ -451,8 +467,8 @@ def int_thread_pointer : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>,
// from being reordered overly much with respect to nearby access to the same
// memory while not impeding optimization.
def int_prefetch
- : Intrinsic<[], [ llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty ],
- [ IntrInaccessibleMemOrArgMemOnly, ReadOnly<0>, NoCapture<0>,
+ : Intrinsic<[], [ llvm_anyptr_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty ],
+ [ IntrInaccessibleMemOrArgMemOnly, IntrWillReturn, ReadOnly<0>, NoCapture<0>,
ImmArg<1>, ImmArg<2>]>;
def int_pcmarker : Intrinsic<[], [llvm_i32_ty]>;
@@ -460,7 +476,7 @@ def int_readcyclecounter : Intrinsic<[llvm_i64_ty]>;
// The assume intrinsic is marked as arbitrarily writing so that proper
// control dependencies will be maintained.
-def int_assume : Intrinsic<[], [llvm_i1_ty], []>;
+def int_assume : Intrinsic<[], [llvm_i1_ty], [IntrWillReturn]>;
// Stack Protector Intrinsic - The stackprotector intrinsic writes the stack
// guard to the correct place on the stack frame.
@@ -493,23 +509,23 @@ def int_instrprof_value_profile : Intrinsic<[],
def int_memcpy : Intrinsic<[],
[llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty,
llvm_i1_ty],
- [IntrArgMemOnly, NoCapture<0>, NoCapture<1>,
- WriteOnly<0>, ReadOnly<1>, ImmArg<3>]>;
+ [IntrArgMemOnly, IntrWillReturn, NoCapture<0>, NoCapture<1>,
+ NoAlias<0>, NoAlias<1>, WriteOnly<0>, ReadOnly<1>, ImmArg<3>]>;
def int_memmove : Intrinsic<[],
[llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty,
llvm_i1_ty],
- [IntrArgMemOnly, NoCapture<0>, NoCapture<1>,
+ [IntrArgMemOnly, IntrWillReturn, NoCapture<0>, NoCapture<1>,
ReadOnly<1>, ImmArg<3>]>;
def int_memset : Intrinsic<[],
[llvm_anyptr_ty, llvm_i8_ty, llvm_anyint_ty,
llvm_i1_ty],
- [IntrArgMemOnly, NoCapture<0>, WriteOnly<0>,
+ [IntrArgMemOnly, IntrWillReturn, NoCapture<0>, WriteOnly<0>,
ImmArg<3>]>;
// FIXME: Add version of these floating point intrinsics which allow non-default
// rounding modes and FP exception handling.
-let IntrProperties = [IntrNoMem, IntrSpeculatable] in {
+let IntrProperties = [IntrNoMem, IntrSpeculatable, IntrWillReturn] in {
def int_fma : Intrinsic<[llvm_anyfloat_ty],
[LLVMMatchType<0>, LLVMMatchType<0>,
LLVMMatchType<0>]>;
@@ -551,19 +567,19 @@ let IntrProperties = [IntrNoMem, IntrSpeculatable] in {
def int_minnum : Intrinsic<[llvm_anyfloat_ty],
[LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem, IntrSpeculatable, Commutative]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn, Commutative]
>;
def int_maxnum : Intrinsic<[llvm_anyfloat_ty],
[LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem, IntrSpeculatable, Commutative]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn, Commutative]
>;
def int_minimum : Intrinsic<[llvm_anyfloat_ty],
[LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem, IntrSpeculatable, Commutative]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn, Commutative]
>;
def int_maximum : Intrinsic<[llvm_anyfloat_ty],
[LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem, IntrSpeculatable, Commutative]
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn, Commutative]
>;
// NOTE: these are internal interfaces.
@@ -576,13 +592,13 @@ def int_siglongjmp : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty], [IntrNoReturn]>;
def int_objectsize : Intrinsic<[llvm_anyint_ty],
[llvm_anyptr_ty, llvm_i1_ty,
llvm_i1_ty, llvm_i1_ty],
- [IntrNoMem, IntrSpeculatable, ImmArg<1>, ImmArg<2>, ImmArg<3>]>,
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg<1>, ImmArg<2>, ImmArg<3>]>,
GCCBuiltin<"__builtin_object_size">;
//===--------------- Constrained Floating Point Intrinsics ----------------===//
//
-let IntrProperties = [IntrInaccessibleMemOnly] in {
+let IntrProperties = [IntrInaccessibleMemOnly, IntrWillReturn] in {
def int_experimental_constrained_fadd : Intrinsic<[ llvm_anyfloat_ty ],
[ LLVMMatchType<0>,
LLVMMatchType<0>,
@@ -616,6 +632,14 @@ let IntrProperties = [IntrInaccessibleMemOnly] in {
llvm_metadata_ty,
llvm_metadata_ty ]>;
+ def int_experimental_constrained_fptosi : Intrinsic<[ llvm_anyint_ty ],
+ [ llvm_anyfloat_ty,
+ llvm_metadata_ty ]>;
+
+ def int_experimental_constrained_fptoui : Intrinsic<[ llvm_anyint_ty ],
+ [ llvm_anyfloat_ty,
+ llvm_metadata_ty ]>;
+
def int_experimental_constrained_fptrunc : Intrinsic<[ llvm_anyfloat_ty ],
[ llvm_anyfloat_ty,
llvm_metadata_ty,
@@ -679,6 +703,14 @@ let IntrProperties = [IntrInaccessibleMemOnly] in {
[ LLVMMatchType<0>,
llvm_metadata_ty,
llvm_metadata_ty ]>;
+ def int_experimental_constrained_lrint : Intrinsic<[ llvm_anyint_ty ],
+ [ llvm_anyfloat_ty,
+ llvm_metadata_ty,
+ llvm_metadata_ty ]>;
+ def int_experimental_constrained_llrint : Intrinsic<[ llvm_anyint_ty ],
+ [ llvm_anyfloat_ty,
+ llvm_metadata_ty,
+ llvm_metadata_ty ]>;
def int_experimental_constrained_maxnum : Intrinsic<[ llvm_anyfloat_ty ],
[ LLVMMatchType<0>,
LLVMMatchType<0>,
@@ -697,6 +729,12 @@ let IntrProperties = [IntrInaccessibleMemOnly] in {
[ LLVMMatchType<0>,
llvm_metadata_ty,
llvm_metadata_ty ]>;
+ def int_experimental_constrained_lround : Intrinsic<[ llvm_anyint_ty ],
+ [ llvm_anyfloat_ty,
+ llvm_metadata_ty ]>;
+ def int_experimental_constrained_llround : Intrinsic<[ llvm_anyint_ty ],
+ [ llvm_anyfloat_ty,
+ llvm_metadata_ty ]>;
def int_experimental_constrained_round : Intrinsic<[ llvm_anyfloat_ty ],
[ LLVMMatchType<0>,
llvm_metadata_ty,
@@ -706,18 +744,19 @@ let IntrProperties = [IntrInaccessibleMemOnly] in {
llvm_metadata_ty,
llvm_metadata_ty ]>;
}
-// FIXME: Add intrinsics for fcmp, fptoui and fptosi.
+// FIXME: Add intrinsic for fcmp.
+// FIXME: Consider maybe adding intrinsics for sitofp, uitofp.
//===------------------------- Expect Intrinsics --------------------------===//
//
def int_expect : Intrinsic<[llvm_anyint_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]>;
+ [LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem, IntrWillReturn]>;
//===-------------------- Bit Manipulation Intrinsics ---------------------===//
//
// None of these intrinsics accesses memory at all.
-let IntrProperties = [IntrNoMem, IntrSpeculatable] in {
+let IntrProperties = [IntrNoMem, IntrSpeculatable, IntrWillReturn] in {
def int_bswap: Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>]>;
def int_ctpop: Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>]>;
def int_bitreverse : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>]>;
@@ -727,7 +766,7 @@ let IntrProperties = [IntrNoMem, IntrSpeculatable] in {
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>]>;
}
-let IntrProperties = [IntrNoMem, IntrSpeculatable, ImmArg<1>] in {
+let IntrProperties = [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg<1>] in {
def int_ctlz : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, llvm_i1_ty]>;
def int_cttz : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, llvm_i1_ty]>;
}
@@ -739,7 +778,7 @@ let IntrProperties = [IntrNoMem, IntrSpeculatable, ImmArg<1>] in {
// mean the optimizers can change them aggressively. Special handling
// needed in a few places. These synthetic intrinsics have no
// side-effects and just mark information about their operands.
-let IntrProperties = [IntrNoMem, IntrSpeculatable] in {
+let IntrProperties = [IntrNoMem, IntrSpeculatable, IntrWillReturn] in {
def int_dbg_declare : Intrinsic<[],
[llvm_metadata_ty,
llvm_metadata_ty,
@@ -796,21 +835,21 @@ def int_eh_sjlj_setup_dispatch : Intrinsic<[], []>;
def int_var_annotation : Intrinsic<[],
[llvm_ptr_ty, llvm_ptr_ty,
llvm_ptr_ty, llvm_i32_ty],
- [], "llvm.var.annotation">;
+ [IntrWillReturn], "llvm.var.annotation">;
def int_ptr_annotation : Intrinsic<[LLVMAnyPointerType<llvm_anyint_ty>],
[LLVMMatchType<0>, llvm_ptr_ty, llvm_ptr_ty,
llvm_i32_ty],
- [], "llvm.ptr.annotation">;
+ [IntrWillReturn], "llvm.ptr.annotation">;
def int_annotation : Intrinsic<[llvm_anyint_ty],
[LLVMMatchType<0>, llvm_ptr_ty,
llvm_ptr_ty, llvm_i32_ty],
- [], "llvm.annotation">;
+ [IntrWillReturn], "llvm.annotation">;
// Annotates the current program point with metadata strings which are emitted
// as CodeView debug info records. This is expensive, as it disables inlining
// and is modelled as having side effects.
def int_codeview_annotation : Intrinsic<[], [llvm_metadata_ty],
- [IntrInaccessibleMemOnly, IntrNoDuplicate],
+ [IntrInaccessibleMemOnly, IntrNoDuplicate, IntrWillReturn],
"llvm.codeview.annotation">;
//===------------------------ Trampoline Intrinsics -----------------------===//
@@ -828,79 +867,77 @@ def int_adjust_trampoline : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty],
//
// Expose the carry flag from add operations on two integrals.
-def int_sadd_with_overflow : Intrinsic<[llvm_anyint_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
- [LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem, IntrSpeculatable]>;
-def int_uadd_with_overflow : Intrinsic<[llvm_anyint_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
- [LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem, IntrSpeculatable]>;
-
-def int_ssub_with_overflow : Intrinsic<[llvm_anyint_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
- [LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem, IntrSpeculatable]>;
-def int_usub_with_overflow : Intrinsic<[llvm_anyint_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
- [LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem, IntrSpeculatable]>;
-
-def int_smul_with_overflow : Intrinsic<[llvm_anyint_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
- [LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem, IntrSpeculatable]>;
-def int_umul_with_overflow : Intrinsic<[llvm_anyint_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
- [LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem, IntrSpeculatable]>;
-
+let IntrProperties = [IntrNoMem, IntrSpeculatable, IntrWillReturn] in {
+ def int_sadd_with_overflow : Intrinsic<[llvm_anyint_ty,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
+ [LLVMMatchType<0>, LLVMMatchType<0>]>;
+ def int_uadd_with_overflow : Intrinsic<[llvm_anyint_ty,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
+ [LLVMMatchType<0>, LLVMMatchType<0>]>;
+
+ def int_ssub_with_overflow : Intrinsic<[llvm_anyint_ty,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
+ [LLVMMatchType<0>, LLVMMatchType<0>]>;
+ def int_usub_with_overflow : Intrinsic<[llvm_anyint_ty,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
+ [LLVMMatchType<0>, LLVMMatchType<0>]>;
+
+ def int_smul_with_overflow : Intrinsic<[llvm_anyint_ty,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
+ [LLVMMatchType<0>, LLVMMatchType<0>]>;
+ def int_umul_with_overflow : Intrinsic<[llvm_anyint_ty,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
+ [LLVMMatchType<0>, LLVMMatchType<0>]>;
+}
//===------------------------- Saturation Arithmetic Intrinsics ---------------------===//
//
def int_sadd_sat : Intrinsic<[llvm_anyint_ty],
[LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem, IntrSpeculatable, Commutative]>;
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn, Commutative]>;
def int_uadd_sat : Intrinsic<[llvm_anyint_ty],
[LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem, IntrSpeculatable, Commutative]>;
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn, Commutative]>;
def int_ssub_sat : Intrinsic<[llvm_anyint_ty],
[LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem, IntrSpeculatable]>;
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
def int_usub_sat : Intrinsic<[llvm_anyint_ty],
[LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem, IntrSpeculatable]>;
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
//===------------------------- Fixed Point Arithmetic Intrinsics ---------------------===//
//
def int_smul_fix : Intrinsic<[llvm_anyint_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable, Commutative, ImmArg<2>]>;
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn, Commutative, ImmArg<2>]>;
def int_umul_fix : Intrinsic<[llvm_anyint_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable, Commutative, ImmArg<2>]>;
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn, Commutative, ImmArg<2>]>;
//===------------------- Fixed Point Saturation Arithmetic Intrinsics ----------------===//
//
def int_smul_fix_sat : Intrinsic<[llvm_anyint_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable, Commutative, ImmArg<2>]>;
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn, Commutative, ImmArg<2>]>;
+def int_umul_fix_sat : Intrinsic<[llvm_anyint_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn, Commutative, ImmArg<2>]>;
//===------------------------- Memory Use Markers -------------------------===//
//
def int_lifetime_start : Intrinsic<[],
[llvm_i64_ty, llvm_anyptr_ty],
- [IntrArgMemOnly, NoCapture<1>, ImmArg<0>]>;
+ [IntrArgMemOnly, IntrWillReturn, NoCapture<1>, ImmArg<0>]>;
def int_lifetime_end : Intrinsic<[],
[llvm_i64_ty, llvm_anyptr_ty],
- [IntrArgMemOnly, NoCapture<1>, ImmArg<0>]>;
+ [IntrArgMemOnly, IntrWillReturn, NoCapture<1>, ImmArg<0>]>;
def int_invariant_start : Intrinsic<[llvm_descriptor_ty],
[llvm_i64_ty, llvm_anyptr_ty],
- [IntrArgMemOnly, NoCapture<1>, ImmArg<0>]>;
+ [IntrArgMemOnly, IntrWillReturn, NoCapture<1>, ImmArg<0>]>;
def int_invariant_end : Intrinsic<[],
[llvm_descriptor_ty, llvm_i64_ty,
llvm_anyptr_ty],
- [IntrArgMemOnly, NoCapture<2>, ImmArg<1>]>;
+ [IntrArgMemOnly, IntrWillReturn, NoCapture<2>, ImmArg<1>]>;
// launder.invariant.group can't be marked with 'readnone' (IntrNoMem),
// because it would cause CSE of two barriers with the same argument.
@@ -916,12 +953,12 @@ def int_invariant_end : Intrinsic<[],
// might change in the future.
def int_launder_invariant_group : Intrinsic<[llvm_anyptr_ty],
[LLVMMatchType<0>],
- [IntrInaccessibleMemOnly, IntrSpeculatable]>;
+ [IntrInaccessibleMemOnly, IntrSpeculatable, IntrWillReturn]>;
def int_strip_invariant_group : Intrinsic<[llvm_anyptr_ty],
[LLVMMatchType<0>],
- [IntrSpeculatable, IntrNoMem]>;
+ [IntrSpeculatable, IntrNoMem, IntrWillReturn]>;
//===------------------------ Stackmap Intrinsics -------------------------===//
//
@@ -964,6 +1001,14 @@ def int_coro_id : Intrinsic<[llvm_token_ty], [llvm_i32_ty, llvm_ptr_ty,
llvm_ptr_ty, llvm_ptr_ty],
[IntrArgMemOnly, IntrReadMem,
ReadNone<1>, ReadOnly<2>, NoCapture<2>]>;
+def int_coro_id_retcon : Intrinsic<[llvm_token_ty],
+ [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty,
+ llvm_ptr_ty, llvm_ptr_ty, llvm_ptr_ty],
+ []>;
+def int_coro_id_retcon_once : Intrinsic<[llvm_token_ty],
+ [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty,
+ llvm_ptr_ty, llvm_ptr_ty, llvm_ptr_ty],
+ []>;
def int_coro_alloc : Intrinsic<[llvm_i1_ty], [llvm_token_ty], []>;
def int_coro_begin : Intrinsic<[llvm_ptr_ty], [llvm_token_ty, llvm_ptr_ty],
[WriteOnly<1>]>;
@@ -979,6 +1024,13 @@ def int_coro_size : Intrinsic<[llvm_anyint_ty], [], [IntrNoMem]>;
def int_coro_save : Intrinsic<[llvm_token_ty], [llvm_ptr_ty], []>;
def int_coro_suspend : Intrinsic<[llvm_i8_ty], [llvm_token_ty, llvm_i1_ty], []>;
+def int_coro_suspend_retcon : Intrinsic<[llvm_any_ty], [llvm_vararg_ty], []>;
+def int_coro_prepare_retcon : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty],
+ [IntrNoMem]>;
+def int_coro_alloca_alloc : Intrinsic<[llvm_token_ty],
+ [llvm_anyint_ty, llvm_i32_ty], []>;
+def int_coro_alloca_get : Intrinsic<[llvm_ptr_ty], [llvm_token_ty], []>;
+def int_coro_alloca_free : Intrinsic<[], [llvm_token_ty], []>;
def int_coro_param : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty, llvm_ptr_ty],
[IntrNoMem, ReadNone<0>, ReadNone<1>]>;
@@ -1018,19 +1070,19 @@ def int_experimental_guard : Intrinsic<[], [llvm_i1_ty, llvm_vararg_ty],
// Supports widenable conditions for guards represented as explicit branches.
def int_experimental_widenable_condition : Intrinsic<[llvm_i1_ty], [],
- [IntrInaccessibleMemOnly]>;
+ [IntrInaccessibleMemOnly, IntrWillReturn]>;
// NOP: calls/invokes to this intrinsic are removed by codegen
-def int_donothing : Intrinsic<[], [], [IntrNoMem]>;
+def int_donothing : Intrinsic<[], [], [IntrNoMem, IntrWillReturn]>;
// This instruction has no actual effect, though it is treated by the optimizer
// has having opaque side effects. This may be inserted into loops to ensure
// that they are not removed even if they turn out to be empty, for languages
// which specify that infinite loops must be preserved.
-def int_sideeffect : Intrinsic<[], [], [IntrInaccessibleMemOnly]>;
+def int_sideeffect : Intrinsic<[], [], [IntrInaccessibleMemOnly, IntrWillReturn]>;
-// Intrisics to support half precision floating point format
-let IntrProperties = [IntrNoMem] in {
+// Intrinsics to support half precision floating point format
+let IntrProperties = [IntrNoMem, IntrWillReturn] in {
def int_convert_to_fp16 : Intrinsic<[llvm_i16_ty], [llvm_anyfloat_ty]>;
def int_convert_from_fp16 : Intrinsic<[llvm_anyfloat_ty], [llvm_i16_ty]>;
}
@@ -1041,7 +1093,11 @@ def int_clear_cache : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty],
[], "llvm.clear_cache">;
// Intrinsic to detect whether its argument is a constant.
-def int_is_constant : Intrinsic<[llvm_i1_ty], [llvm_any_ty], [IntrNoMem], "llvm.is.constant">;
+def int_is_constant : Intrinsic<[llvm_i1_ty], [llvm_any_ty], [IntrNoMem, IntrWillReturn], "llvm.is.constant">;
+
+// Intrinsic to mask out bits of a pointer.
+def int_ptrmask: Intrinsic<[llvm_anyptr_ty], [llvm_anyptr_ty, llvm_anyint_ty],
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
//===-------------------------- Masked Intrinsics -------------------------===//
//
@@ -1049,45 +1105,45 @@ def int_masked_store : Intrinsic<[], [llvm_anyvector_ty,
LLVMAnyPointerType<LLVMMatchType<0>>,
llvm_i32_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
- [IntrArgMemOnly, ImmArg<2>]>;
+ [IntrArgMemOnly, IntrWillReturn, ImmArg<2>]>;
def int_masked_load : Intrinsic<[llvm_anyvector_ty],
[LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<0>],
- [IntrReadMem, IntrArgMemOnly, ImmArg<1>]>;
+ [IntrReadMem, IntrArgMemOnly, IntrWillReturn, ImmArg<1>]>;
def int_masked_gather: Intrinsic<[llvm_anyvector_ty],
[LLVMVectorOfAnyPointersToElt<0>, llvm_i32_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
LLVMMatchType<0>],
- [IntrReadMem, ImmArg<1>]>;
+ [IntrReadMem, IntrWillReturn, ImmArg<1>]>;
def int_masked_scatter: Intrinsic<[],
[llvm_anyvector_ty,
LLVMVectorOfAnyPointersToElt<0>, llvm_i32_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
- [ImmArg<2>]>;
+ [IntrWillReturn, ImmArg<2>]>;
def int_masked_expandload: Intrinsic<[llvm_anyvector_ty],
[LLVMPointerToElt<0>,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
LLVMMatchType<0>],
- [IntrReadMem]>;
+ [IntrReadMem, IntrWillReturn]>;
def int_masked_compressstore: Intrinsic<[],
[llvm_anyvector_ty,
LLVMPointerToElt<0>,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
- [IntrArgMemOnly]>;
+ [IntrArgMemOnly, IntrWillReturn]>;
// Test whether a pointer is associated with a type metadata identifier.
def int_type_test : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty, llvm_metadata_ty],
- [IntrNoMem]>;
+ [IntrNoMem, IntrWillReturn]>;
// Safely loads a function pointer from a virtual table pointer using type metadata.
def int_type_checked_load : Intrinsic<[llvm_ptr_ty, llvm_i1_ty],
[llvm_ptr_ty, llvm_i32_ty, llvm_metadata_ty],
- [IntrNoMem]>;
+ [IntrNoMem, IntrWillReturn]>;
// Create a branch funnel that implements an indirect call to a limited set of
// callees. This needs to be a musttail call.
@@ -1098,6 +1154,8 @@ def int_load_relative: Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_anyint_ty],
def int_hwasan_check_memaccess :
Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty], [IntrInaccessibleMemOnly, ImmArg<2>]>;
+def int_hwasan_check_memaccess_shortgranules :
+ Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty], [IntrInaccessibleMemOnly, ImmArg<2>]>;
// Xray intrinsics
//===----------------------------------------------------------------------===//
@@ -1121,7 +1179,7 @@ def int_memcpy_element_unordered_atomic
llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty, llvm_i32_ty
],
[
- IntrArgMemOnly, NoCapture<0>, NoCapture<1>, WriteOnly<0>,
+ IntrArgMemOnly, IntrWillReturn, NoCapture<0>, NoCapture<1>, WriteOnly<0>,
ReadOnly<1>, ImmArg<3>
]>;
@@ -1132,58 +1190,47 @@ def int_memmove_element_unordered_atomic
llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty, llvm_i32_ty
],
[
- IntrArgMemOnly, NoCapture<0>, NoCapture<1>, WriteOnly<0>,
+ IntrArgMemOnly, IntrWillReturn, NoCapture<0>, NoCapture<1>, WriteOnly<0>,
ReadOnly<1>, ImmArg<3>
]>;
// @llvm.memset.element.unordered.atomic.*(dest, value, length, elementsize)
def int_memset_element_unordered_atomic
: Intrinsic<[], [ llvm_anyptr_ty, llvm_i8_ty, llvm_anyint_ty, llvm_i32_ty ],
- [ IntrArgMemOnly, NoCapture<0>, WriteOnly<0>, ImmArg<3> ]>;
+ [ IntrArgMemOnly, IntrWillReturn, NoCapture<0>, WriteOnly<0>, ImmArg<3> ]>;
//===------------------------ Reduction Intrinsics ------------------------===//
//
-def int_experimental_vector_reduce_v2_fadd : Intrinsic<[llvm_anyfloat_ty],
- [LLVMMatchType<0>,
- llvm_anyvector_ty],
- [IntrNoMem]>;
-def int_experimental_vector_reduce_v2_fmul : Intrinsic<[llvm_anyfloat_ty],
- [LLVMMatchType<0>,
- llvm_anyvector_ty],
- [IntrNoMem]>;
-def int_experimental_vector_reduce_add : Intrinsic<[LLVMVectorElementType<0>],
- [llvm_anyvector_ty],
- [IntrNoMem]>;
-def int_experimental_vector_reduce_mul : Intrinsic<[LLVMVectorElementType<0>],
- [llvm_anyvector_ty],
- [IntrNoMem]>;
-def int_experimental_vector_reduce_and : Intrinsic<[LLVMVectorElementType<0>],
- [llvm_anyvector_ty],
- [IntrNoMem]>;
-def int_experimental_vector_reduce_or : Intrinsic<[LLVMVectorElementType<0>],
- [llvm_anyvector_ty],
- [IntrNoMem]>;
-def int_experimental_vector_reduce_xor : Intrinsic<[LLVMVectorElementType<0>],
- [llvm_anyvector_ty],
- [IntrNoMem]>;
-def int_experimental_vector_reduce_smax : Intrinsic<[LLVMVectorElementType<0>],
- [llvm_anyvector_ty],
- [IntrNoMem]>;
-def int_experimental_vector_reduce_smin : Intrinsic<[LLVMVectorElementType<0>],
- [llvm_anyvector_ty],
- [IntrNoMem]>;
-def int_experimental_vector_reduce_umax : Intrinsic<[LLVMVectorElementType<0>],
- [llvm_anyvector_ty],
- [IntrNoMem]>;
-def int_experimental_vector_reduce_umin : Intrinsic<[LLVMVectorElementType<0>],
- [llvm_anyvector_ty],
- [IntrNoMem]>;
-def int_experimental_vector_reduce_fmax : Intrinsic<[LLVMVectorElementType<0>],
- [llvm_anyvector_ty],
- [IntrNoMem]>;
-def int_experimental_vector_reduce_fmin : Intrinsic<[LLVMVectorElementType<0>],
- [llvm_anyvector_ty],
- [IntrNoMem]>;
+let IntrProperties = [IntrNoMem, IntrWillReturn] in {
+ def int_experimental_vector_reduce_v2_fadd : Intrinsic<[llvm_anyfloat_ty],
+ [LLVMMatchType<0>,
+ llvm_anyvector_ty]>;
+ def int_experimental_vector_reduce_v2_fmul : Intrinsic<[llvm_anyfloat_ty],
+ [LLVMMatchType<0>,
+ llvm_anyvector_ty]>;
+ def int_experimental_vector_reduce_add : Intrinsic<[LLVMVectorElementType<0>],
+ [llvm_anyvector_ty]>;
+ def int_experimental_vector_reduce_mul : Intrinsic<[LLVMVectorElementType<0>],
+ [llvm_anyvector_ty]>;
+ def int_experimental_vector_reduce_and : Intrinsic<[LLVMVectorElementType<0>],
+ [llvm_anyvector_ty]>;
+ def int_experimental_vector_reduce_or : Intrinsic<[LLVMVectorElementType<0>],
+ [llvm_anyvector_ty]>;
+ def int_experimental_vector_reduce_xor : Intrinsic<[LLVMVectorElementType<0>],
+ [llvm_anyvector_ty]>;
+ def int_experimental_vector_reduce_smax : Intrinsic<[LLVMVectorElementType<0>],
+ [llvm_anyvector_ty]>;
+ def int_experimental_vector_reduce_smin : Intrinsic<[LLVMVectorElementType<0>],
+ [llvm_anyvector_ty]>;
+ def int_experimental_vector_reduce_umax : Intrinsic<[LLVMVectorElementType<0>],
+ [llvm_anyvector_ty]>;
+ def int_experimental_vector_reduce_umin : Intrinsic<[LLVMVectorElementType<0>],
+ [llvm_anyvector_ty]>;
+ def int_experimental_vector_reduce_fmax : Intrinsic<[LLVMVectorElementType<0>],
+ [llvm_anyvector_ty]>;
+ def int_experimental_vector_reduce_fmin : Intrinsic<[LLVMVectorElementType<0>],
+ [llvm_anyvector_ty]>;
+}
//===---------- Intrinsics to control hardware supported loops ----------===//
diff --git a/include/llvm/IR/IntrinsicsAArch64.td b/include/llvm/IR/IntrinsicsAArch64.td
index 832aca4fd30f..db01700f409f 100644
--- a/include/llvm/IR/IntrinsicsAArch64.td
+++ b/include/llvm/IR/IntrinsicsAArch64.td
@@ -691,7 +691,7 @@ def int_aarch64_crc32cx : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
// Memory Tagging Extensions (MTE) Intrinsics
let TargetPrefix = "aarch64" in {
def int_aarch64_irg : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i64_ty],
- [IntrInaccessibleMemOnly]>;
+ [IntrNoMem, IntrHasSideEffects]>;
def int_aarch64_addg : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i64_ty],
[IntrNoMem]>;
def int_aarch64_gmi : Intrinsic<[llvm_i64_ty], [llvm_ptr_ty, llvm_i64_ty],
@@ -707,7 +707,7 @@ def int_aarch64_subp : Intrinsic<[llvm_i64_ty], [llvm_ptr_ty, llvm_ptr_ty],
// Generate a randomly tagged stack base pointer.
def int_aarch64_irg_sp : Intrinsic<[llvm_ptr_ty], [llvm_i64_ty],
- [IntrInaccessibleMemOnly]>;
+ [IntrNoMem, IntrHasSideEffects]>;
// Transfer pointer tag with offset.
// ptr1 = tagp(ptr0, baseptr, tag_offset) returns a pointer where
@@ -733,3 +733,124 @@ def int_aarch64_settag_zero : Intrinsic<[], [llvm_ptr_ty, llvm_i64_ty],
def int_aarch64_stgp : Intrinsic<[], [llvm_ptr_ty, llvm_i64_ty, llvm_i64_ty],
[IntrWriteMem, IntrArgMemOnly, NoCapture<0>, WriteOnly<0>]>;
}
+
+// Transactional Memory Extension (TME) Intrinsics
+let TargetPrefix = "aarch64" in {
+def int_aarch64_tstart : GCCBuiltin<"__builtin_arm_tstart">,
+ Intrinsic<[llvm_i64_ty]>;
+
+def int_aarch64_tcommit : GCCBuiltin<"__builtin_arm_tcommit">, Intrinsic<[]>;
+
+def int_aarch64_tcancel : GCCBuiltin<"__builtin_arm_tcancel">,
+ Intrinsic<[], [llvm_i64_ty], [ImmArg<0>]>;
+
+def int_aarch64_ttest : GCCBuiltin<"__builtin_arm_ttest">,
+ Intrinsic<[llvm_i64_ty], [],
+ [IntrNoMem, IntrHasSideEffects]>;
+}
+
+def llvm_nxv2i1_ty : LLVMType<nxv2i1>;
+def llvm_nxv4i1_ty : LLVMType<nxv4i1>;
+def llvm_nxv8i1_ty : LLVMType<nxv8i1>;
+def llvm_nxv16i1_ty : LLVMType<nxv16i1>;
+def llvm_nxv16i8_ty : LLVMType<nxv16i8>;
+def llvm_nxv4i32_ty : LLVMType<nxv4i32>;
+def llvm_nxv2i64_ty : LLVMType<nxv2i64>;
+def llvm_nxv8f16_ty : LLVMType<nxv8f16>;
+def llvm_nxv4f32_ty : LLVMType<nxv4f32>;
+def llvm_nxv2f64_ty : LLVMType<nxv2f64>;
+
+let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
+ class AdvSIMD_Merged1VectorArg_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ LLVMMatchType<0>],
+ [IntrNoMem]>;
+
+ class AdvSIMD_SVE_CNT_Intrinsic
+ : Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
+ [LLVMVectorOfBitcastsToInt<0>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ llvm_anyvector_ty],
+ [IntrNoMem]>;
+
+ class AdvSIMD_SVE_Unpack_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMSubdivide2VectorType<0>],
+ [IntrNoMem]>;
+
+ class AdvSIMD_SVE_PUNPKHI_Intrinsic
+ : Intrinsic<[LLVMHalfElementsVectorType<0>],
+ [llvm_anyvector_ty],
+ [IntrNoMem]>;
+
+ class AdvSIMD_SVE_DOT_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>,
+ LLVMSubdivide4VectorType<0>,
+ LLVMSubdivide4VectorType<0>],
+ [IntrNoMem]>;
+
+ class AdvSIMD_SVE_DOT_Indexed_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>,
+ LLVMSubdivide4VectorType<0>,
+ LLVMSubdivide4VectorType<0>,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+
+ // This class of intrinsics are not intended to be useful within LLVM IR but
+ // are instead here to support some of the more regid parts of the ACLE.
+ class Builtin_SVCVT<string name, LLVMType OUT, LLVMType IN>
+ : GCCBuiltin<"__builtin_sve_" # name>,
+ Intrinsic<[OUT], [OUT, llvm_nxv16i1_ty, IN], [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// SVE
+
+let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
+
+//
+// Integer arithmetic
+//
+
+def int_aarch64_sve_abs : AdvSIMD_Merged1VectorArg_Intrinsic;
+def int_aarch64_sve_neg : AdvSIMD_Merged1VectorArg_Intrinsic;
+
+def int_aarch64_sve_sdot : AdvSIMD_SVE_DOT_Intrinsic;
+def int_aarch64_sve_sdot_lane : AdvSIMD_SVE_DOT_Indexed_Intrinsic;
+
+def int_aarch64_sve_udot : AdvSIMD_SVE_DOT_Intrinsic;
+def int_aarch64_sve_udot_lane : AdvSIMD_SVE_DOT_Indexed_Intrinsic;
+
+//
+// Counting bits
+//
+
+def int_aarch64_sve_cnt : AdvSIMD_SVE_CNT_Intrinsic;
+
+//
+// Permutations and selection
+//
+
+def int_aarch64_sve_sunpkhi : AdvSIMD_SVE_Unpack_Intrinsic;
+def int_aarch64_sve_sunpklo : AdvSIMD_SVE_Unpack_Intrinsic;
+
+def int_aarch64_sve_uunpkhi : AdvSIMD_SVE_Unpack_Intrinsic;
+def int_aarch64_sve_uunpklo : AdvSIMD_SVE_Unpack_Intrinsic;
+
+//
+// Floating-point comparisons
+//
+
+def int_aarch64_sve_fcvtzs_i32f16 : Builtin_SVCVT<"svcvt_s32_f16_m", llvm_nxv4i32_ty, llvm_nxv8f16_ty>;
+
+//
+// Predicate operations
+//
+
+def int_aarch64_sve_punpkhi : AdvSIMD_SVE_PUNPKHI_Intrinsic;
+def int_aarch64_sve_punpklo : AdvSIMD_SVE_PUNPKHI_Intrinsic;
+}
diff --git a/include/llvm/IR/IntrinsicsAMDGPU.td b/include/llvm/IR/IntrinsicsAMDGPU.td
index 3982444b5401..ab6ee7f92dd1 100644
--- a/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -175,6 +175,7 @@ def int_amdgcn_implicit_buffer_ptr :
// Set EXEC to the 64-bit value given.
// This is always moved to the beginning of the basic block.
+// FIXME: Should be mangled for wave size.
def int_amdgcn_init_exec : Intrinsic<[],
[llvm_i64_ty], // 64-bit literal constant
[IntrConvergent, ImmArg<0>]>;
@@ -185,7 +186,7 @@ def int_amdgcn_init_exec : Intrinsic<[],
def int_amdgcn_init_exec_from_input : Intrinsic<[],
[llvm_i32_ty, // 32-bit SGPR input
llvm_i32_ty], // bit offset of the thread count
- [IntrConvergent]>;
+ [IntrConvergent, ImmArg<1>]>;
def int_amdgcn_wavefrontsize :
GCCBuiltin<"__builtin_amdgcn_wavefrontsize">,
@@ -199,12 +200,14 @@ def int_amdgcn_wavefrontsize :
// The first parameter is s_sendmsg immediate (i16),
// the second one is copied to m0
def int_amdgcn_s_sendmsg : GCCBuiltin<"__builtin_amdgcn_s_sendmsg">,
- Intrinsic <[], [llvm_i32_ty, llvm_i32_ty], [ImmArg<0>, IntrInaccessibleMemOnly]>;
+ Intrinsic <[], [llvm_i32_ty, llvm_i32_ty],
+ [ImmArg<0>, IntrNoMem, IntrHasSideEffects]>;
def int_amdgcn_s_sendmsghalt : GCCBuiltin<"__builtin_amdgcn_s_sendmsghalt">,
- Intrinsic <[], [llvm_i32_ty, llvm_i32_ty], [ImmArg<0>, IntrInaccessibleMemOnly]>;
+ Intrinsic <[], [llvm_i32_ty, llvm_i32_ty],
+ [ImmArg<0>, IntrNoMem, IntrHasSideEffects]>;
def int_amdgcn_s_barrier : GCCBuiltin<"__builtin_amdgcn_s_barrier">,
- Intrinsic<[], [], [IntrConvergent]>;
+ Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrConvergent]>;
def int_amdgcn_wave_barrier : GCCBuiltin<"__builtin_amdgcn_wave_barrier">,
Intrinsic<[], [], [IntrConvergent]>;
@@ -835,9 +838,6 @@ defset list<AMDGPUImageDimIntrinsic> AMDGPUImageDimAtomicIntrinsics = {
defm int_amdgcn_image_atomic_and : AMDGPUImageDimAtomic<"ATOMIC_AND">;
defm int_amdgcn_image_atomic_or : AMDGPUImageDimAtomic<"ATOMIC_OR">;
defm int_amdgcn_image_atomic_xor : AMDGPUImageDimAtomic<"ATOMIC_XOR">;
-
- // TODO: INC/DEC are weird: they seem to have a vdata argument in hardware,
- // even though it clearly shouldn't be needed
defm int_amdgcn_image_atomic_inc : AMDGPUImageDimAtomic<"ATOMIC_INC">;
defm int_amdgcn_image_atomic_dec : AMDGPUImageDimAtomic<"ATOMIC_DEC">;
@@ -854,8 +854,8 @@ let TargetPrefix = "amdgcn" in {
defset list<AMDGPURsrcIntrinsic> AMDGPUBufferIntrinsics = {
-class AMDGPUBufferLoad : Intrinsic <
- [llvm_any_ty],
+class AMDGPUBufferLoad<LLVMType data_ty = llvm_any_ty> : Intrinsic <
+ [data_ty],
[llvm_v4i32_ty, // rsrc(SGPR)
llvm_i32_ty, // vindex(VGPR)
llvm_i32_ty, // offset(SGPR/VGPR/imm)
@@ -863,7 +863,7 @@ class AMDGPUBufferLoad : Intrinsic <
llvm_i1_ty], // slc(imm)
[IntrReadMem, ImmArg<3>, ImmArg<4>], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<0>;
-def int_amdgcn_buffer_load_format : AMDGPUBufferLoad;
+def int_amdgcn_buffer_load_format : AMDGPUBufferLoad<llvm_anyfloat_ty>;
def int_amdgcn_buffer_load : AMDGPUBufferLoad;
def int_amdgcn_s_buffer_load : Intrinsic <
@@ -874,9 +874,9 @@ def int_amdgcn_s_buffer_load : Intrinsic <
[IntrNoMem, ImmArg<2>]>,
AMDGPURsrcIntrinsic<0>;
-class AMDGPUBufferStore : Intrinsic <
+class AMDGPUBufferStore<LLVMType data_ty = llvm_any_ty> : Intrinsic <
[],
- [llvm_any_ty, // vdata(VGPR)
+ [data_ty, // vdata(VGPR)
llvm_v4i32_ty, // rsrc(SGPR)
llvm_i32_ty, // vindex(VGPR)
llvm_i32_ty, // offset(SGPR/VGPR/imm)
@@ -884,7 +884,7 @@ class AMDGPUBufferStore : Intrinsic <
llvm_i1_ty], // slc(imm)
[IntrWriteMem, ImmArg<4>, ImmArg<5>], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<1>;
-def int_amdgcn_buffer_store_format : AMDGPUBufferStore;
+def int_amdgcn_buffer_store_format : AMDGPUBufferStore<llvm_anyfloat_ty>;
def int_amdgcn_buffer_store : AMDGPUBufferStore;
// New buffer intrinsics with separate raw and struct variants. The raw
@@ -894,56 +894,68 @@ def int_amdgcn_buffer_store : AMDGPUBufferStore;
// and swizzling changes depending on whether idxen is set in the instruction.
// These new instrinsics also keep the offset and soffset arguments separate as
// they behave differently in bounds checking and swizzling.
-class AMDGPURawBufferLoad : Intrinsic <
- [llvm_any_ty],
+class AMDGPURawBufferLoad<LLVMType data_ty = llvm_any_ty> : Intrinsic <
+ [data_ty],
[llvm_v4i32_ty, // rsrc(SGPR)
llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
- llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc, bit 2 = dlc on gfx10+)
+ llvm_i32_ty], // auxiliary data (imm, cachepolicy (bit 0 = glc,
+ // bit 1 = slc,
+ // bit 2 = dlc on gfx10+),
+ // swizzled buffer (bit 3 = swz))
[IntrReadMem, ImmArg<3>], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<0>;
-def int_amdgcn_raw_buffer_load_format : AMDGPURawBufferLoad;
+def int_amdgcn_raw_buffer_load_format : AMDGPURawBufferLoad<llvm_anyfloat_ty>;
def int_amdgcn_raw_buffer_load : AMDGPURawBufferLoad;
-class AMDGPUStructBufferLoad : Intrinsic <
- [llvm_any_ty],
+class AMDGPUStructBufferLoad<LLVMType data_ty = llvm_any_ty> : Intrinsic <
+ [data_ty],
[llvm_v4i32_ty, // rsrc(SGPR)
llvm_i32_ty, // vindex(VGPR)
llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
- llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc, bit 2 = dlc on gfx10+)
+ llvm_i32_ty], // auxiliary data (imm, cachepolicy (bit 0 = glc,
+ // bit 1 = slc,
+ // bit 2 = dlc on gfx10+),
+ // swizzled buffer (bit 3 = swz))
[IntrReadMem, ImmArg<4>], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<0>;
-def int_amdgcn_struct_buffer_load_format : AMDGPUStructBufferLoad;
+def int_amdgcn_struct_buffer_load_format : AMDGPUStructBufferLoad<llvm_anyfloat_ty>;
def int_amdgcn_struct_buffer_load : AMDGPUStructBufferLoad;
-class AMDGPURawBufferStore : Intrinsic <
+class AMDGPURawBufferStore<LLVMType data_ty = llvm_any_ty> : Intrinsic <
[],
- [llvm_any_ty, // vdata(VGPR)
+ [data_ty, // vdata(VGPR)
llvm_v4i32_ty, // rsrc(SGPR)
llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
- llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc, bit 2 = dlc on gfx10+)
+ llvm_i32_ty], // auxiliary data (imm, cachepolicy (bit 0 = glc,
+ // bit 1 = slc,
+ // bit 2 = dlc on gfx10+),
+ // swizzled buffer (bit 3 = swz))
[IntrWriteMem, ImmArg<4>], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<1>;
-def int_amdgcn_raw_buffer_store_format : AMDGPURawBufferStore;
+def int_amdgcn_raw_buffer_store_format : AMDGPURawBufferStore<llvm_anyfloat_ty>;
def int_amdgcn_raw_buffer_store : AMDGPURawBufferStore;
-class AMDGPUStructBufferStore : Intrinsic <
+class AMDGPUStructBufferStore<LLVMType data_ty = llvm_any_ty> : Intrinsic <
[],
- [llvm_any_ty, // vdata(VGPR)
+ [data_ty, // vdata(VGPR)
llvm_v4i32_ty, // rsrc(SGPR)
llvm_i32_ty, // vindex(VGPR)
llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
- llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc, bit 2 = dlc on gfx10+)
+ llvm_i32_ty], // auxiliary data (imm, cachepolicy (bit 0 = glc,
+ // bit 1 = slc,
+ // bit 2 = dlc on gfx10+),
+ // swizzled buffer (bit 3 = swz))
[IntrWriteMem, ImmArg<5>], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<1>;
-def int_amdgcn_struct_buffer_store_format : AMDGPUStructBufferStore;
+def int_amdgcn_struct_buffer_store_format : AMDGPUStructBufferStore<llvm_anyfloat_ty>;
def int_amdgcn_struct_buffer_store : AMDGPUStructBufferStore;
-class AMDGPURawBufferAtomic : Intrinsic <
- [llvm_anyint_ty],
+class AMDGPURawBufferAtomic<LLVMType data_ty = llvm_any_ty> : Intrinsic <
+ [data_ty],
[LLVMMatchType<0>, // vdata(VGPR)
llvm_v4i32_ty, // rsrc(SGPR)
llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
@@ -961,6 +973,8 @@ def int_amdgcn_raw_buffer_atomic_umax : AMDGPURawBufferAtomic;
def int_amdgcn_raw_buffer_atomic_and : AMDGPURawBufferAtomic;
def int_amdgcn_raw_buffer_atomic_or : AMDGPURawBufferAtomic;
def int_amdgcn_raw_buffer_atomic_xor : AMDGPURawBufferAtomic;
+def int_amdgcn_raw_buffer_atomic_inc : AMDGPURawBufferAtomic;
+def int_amdgcn_raw_buffer_atomic_dec : AMDGPURawBufferAtomic;
def int_amdgcn_raw_buffer_atomic_cmpswap : Intrinsic<
[llvm_anyint_ty],
[LLVMMatchType<0>, // src(VGPR)
@@ -972,8 +986,8 @@ def int_amdgcn_raw_buffer_atomic_cmpswap : Intrinsic<
[ImmArg<5>], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<2, 0>;
-class AMDGPUStructBufferAtomic : Intrinsic <
- [llvm_anyint_ty],
+class AMDGPUStructBufferAtomic<LLVMType data_ty = llvm_any_ty> : Intrinsic <
+ [data_ty],
[LLVMMatchType<0>, // vdata(VGPR)
llvm_v4i32_ty, // rsrc(SGPR)
llvm_i32_ty, // vindex(VGPR)
@@ -992,6 +1006,8 @@ def int_amdgcn_struct_buffer_atomic_umax : AMDGPUStructBufferAtomic;
def int_amdgcn_struct_buffer_atomic_and : AMDGPUStructBufferAtomic;
def int_amdgcn_struct_buffer_atomic_or : AMDGPUStructBufferAtomic;
def int_amdgcn_struct_buffer_atomic_xor : AMDGPUStructBufferAtomic;
+def int_amdgcn_struct_buffer_atomic_inc : AMDGPUStructBufferAtomic;
+def int_amdgcn_struct_buffer_atomic_dec : AMDGPUStructBufferAtomic;
def int_amdgcn_struct_buffer_atomic_cmpswap : Intrinsic<
[llvm_anyint_ty],
[LLVMMatchType<0>, // src(VGPR)
@@ -1046,7 +1062,10 @@ def int_amdgcn_raw_tbuffer_load : Intrinsic <
llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
llvm_i32_ty, // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
- llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc, bit 2 = dlc on gfx10+)
+ llvm_i32_ty], // auxiliary data (imm, cachepolicy (bit 0 = glc,
+ // bit 1 = slc,
+ // bit 2 = dlc on gfx10+),
+ // swizzled buffer (bit 3 = swz))
[IntrReadMem, ImmArg<3>, ImmArg<4>], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<0>;
@@ -1057,7 +1076,10 @@ def int_amdgcn_raw_tbuffer_store : Intrinsic <
llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
llvm_i32_ty, // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
- llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc, bit 2 = dlc on gfx10+)
+ llvm_i32_ty], // auxiliary data (imm, cachepolicy (bit 0 = glc,
+ // bit 1 = slc,
+ // bit 2 = dlc on gfx10+),
+ // swizzled buffer (bit 3 = swz))
[IntrWriteMem, ImmArg<4>, ImmArg<5>], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<1>;
@@ -1068,7 +1090,10 @@ def int_amdgcn_struct_tbuffer_load : Intrinsic <
llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
llvm_i32_ty, // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
- llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc, bit 2 = dlc on gfx10+)
+ llvm_i32_ty], // auxiliary data (imm, cachepolicy (bit 0 = glc,
+ // bit 1 = slc,
+ // bit 2 = dlc on gfx10+),
+ // swizzled buffer (bit 3 = swz))
[IntrReadMem, ImmArg<4>, ImmArg<5>], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<0>;
@@ -1080,7 +1105,10 @@ def int_amdgcn_struct_tbuffer_store : Intrinsic <
llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
llvm_i32_ty, // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
- llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc, bit 2 = dlc on gfx10+)
+ llvm_i32_ty], // auxiliary data (imm, cachepolicy (bit 0 = glc,
+ // bit 1 = slc,
+ // bit 2 = dlc on gfx10+),
+ // swizzled buffer (bit 3 = swz))
[IntrWriteMem, ImmArg<5>, ImmArg<6>], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<1>;
@@ -1431,6 +1459,13 @@ def int_amdgcn_wqm : Intrinsic<[llvm_any_ty],
[LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
>;
+// Copies the source value to the destination value, such that the source
+// is computed as if the entire program were executed in WQM if any other
+// program code executes in WQM.
+def int_amdgcn_softwqm : Intrinsic<[llvm_any_ty],
+ [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
+>;
+
// Return true if at least one thread within the pixel quad passes true into
// the function.
def int_amdgcn_wqm_vote : Intrinsic<[llvm_i1_ty],
@@ -1459,6 +1494,18 @@ def int_amdgcn_set_inactive :
LLVMMatchType<0>], // value for the inactive lanes to take
[IntrNoMem, IntrConvergent]>;
+// Return if the given flat pointer points to a local memory address.
+def int_amdgcn_is_shared : GCCBuiltin<"__builtin_amdgcn_is_shared">,
+ Intrinsic<[llvm_i1_ty], [llvm_ptr_ty],
+ [IntrNoMem, IntrSpeculatable, NoCapture<0>]
+>;
+
+// Return if the given flat pointer points to a prvate memory address.
+def int_amdgcn_is_private : GCCBuiltin<"__builtin_amdgcn_is_private">,
+ Intrinsic<[llvm_i1_ty], [llvm_ptr_ty],
+ [IntrNoMem, IntrSpeculatable, NoCapture<0>]
+>;
+
//===----------------------------------------------------------------------===//
// CI+ Intrinsics
//===----------------------------------------------------------------------===//
diff --git a/include/llvm/IR/IntrinsicsARM.td b/include/llvm/IR/IntrinsicsARM.td
index 4792af097d95..e13da6157e04 100644
--- a/include/llvm/IR/IntrinsicsARM.td
+++ b/include/llvm/IR/IntrinsicsARM.td
@@ -777,5 +777,14 @@ class Neon_Dot_Intrinsic
def int_arm_neon_udot : Neon_Dot_Intrinsic;
def int_arm_neon_sdot : Neon_Dot_Intrinsic;
+def int_arm_vctp8 : Intrinsic<[llvm_v16i1_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_arm_vctp16 : Intrinsic<[llvm_v8i1_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_arm_vctp32 : Intrinsic<[llvm_v4i1_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_arm_vctp64 : Intrinsic<[llvm_v2i1_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+// GNU eabi mcount
+def int_arm_gnu_eabi_mcount : Intrinsic<[],
+ [],
+ [IntrReadMem, IntrWriteMem]>;
} // end TargetPrefix
diff --git a/include/llvm/IR/IntrinsicsBPF.td b/include/llvm/IR/IntrinsicsBPF.td
index d7595a2a7700..3618cc6a4128 100644
--- a/include/llvm/IR/IntrinsicsBPF.td
+++ b/include/llvm/IR/IntrinsicsBPF.td
@@ -20,4 +20,7 @@ let TargetPrefix = "bpf" in { // All intrinsics start with "llvm.bpf."
Intrinsic<[llvm_i64_ty], [llvm_ptr_ty, llvm_i64_ty], [IntrReadMem]>;
def int_bpf_pseudo : GCCBuiltin<"__builtin_bpf_pseudo">,
Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty]>;
+ def int_bpf_preserve_field_info : GCCBuiltin<"__builtin_bpf_preserve_field_info">,
+ Intrinsic<[llvm_i32_ty], [llvm_anyptr_ty, llvm_i64_ty],
+ [IntrNoMem, ImmArg<1>]>;
}
diff --git a/include/llvm/IR/IntrinsicsMips.td b/include/llvm/IR/IntrinsicsMips.td
index 6393a9ca35d5..bfcdd80a52d5 100644
--- a/include/llvm/IR/IntrinsicsMips.td
+++ b/include/llvm/IR/IntrinsicsMips.td
@@ -1260,16 +1260,16 @@ def int_mips_insve_d : GCCBuiltin<"__builtin_msa_insve_d">,
def int_mips_ld_b : GCCBuiltin<"__builtin_msa_ld_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_ptr_ty, llvm_i32_ty],
- [IntrReadMem, IntrArgMemOnly, ImmArg<1>]>;
+ [IntrReadMem, IntrArgMemOnly]>;
def int_mips_ld_h : GCCBuiltin<"__builtin_msa_ld_h">,
Intrinsic<[llvm_v8i16_ty], [llvm_ptr_ty, llvm_i32_ty],
- [IntrReadMem, IntrArgMemOnly, ImmArg<1>]>;
+ [IntrReadMem, IntrArgMemOnly]>;
def int_mips_ld_w : GCCBuiltin<"__builtin_msa_ld_w">,
Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_i32_ty],
- [IntrReadMem, IntrArgMemOnly, ImmArg<1>]>;
+ [IntrReadMem, IntrArgMemOnly]>;
def int_mips_ld_d : GCCBuiltin<"__builtin_msa_ld_d">,
Intrinsic<[llvm_v2i64_ty], [llvm_ptr_ty, llvm_i32_ty],
- [IntrReadMem, IntrArgMemOnly, ImmArg<1>]>;
+ [IntrReadMem, IntrArgMemOnly]>;
def int_mips_ldi_b : GCCBuiltin<"__builtin_msa_ldi_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_i32_ty], [IntrNoMem, ImmArg<0>]>;
@@ -1684,16 +1684,16 @@ def int_mips_srlri_d : GCCBuiltin<"__builtin_msa_srlri_d">,
def int_mips_st_b : GCCBuiltin<"__builtin_msa_st_b">,
Intrinsic<[], [llvm_v16i8_ty, llvm_ptr_ty, llvm_i32_ty],
- [IntrArgMemOnly, ImmArg<2>]>;
+ [IntrArgMemOnly]>;
def int_mips_st_h : GCCBuiltin<"__builtin_msa_st_h">,
Intrinsic<[], [llvm_v8i16_ty, llvm_ptr_ty, llvm_i32_ty],
- [IntrArgMemOnly, ImmArg<2>]>;
+ [IntrArgMemOnly]>;
def int_mips_st_w : GCCBuiltin<"__builtin_msa_st_w">,
Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty, llvm_i32_ty],
- [IntrArgMemOnly, ImmArg<2>]>;
+ [IntrArgMemOnly]>;
def int_mips_st_d : GCCBuiltin<"__builtin_msa_st_d">,
Intrinsic<[], [llvm_v2i64_ty, llvm_ptr_ty, llvm_i32_ty],
- [IntrArgMemOnly, ImmArg<2>]>;
+ [IntrArgMemOnly]>;
def int_mips_subs_s_b : GCCBuiltin<"__builtin_msa_subs_s_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
diff --git a/include/llvm/IR/IntrinsicsNVVM.td b/include/llvm/IR/IntrinsicsNVVM.td
index dba7dd76c4ff..0483d965ba64 100644
--- a/include/llvm/IR/IntrinsicsNVVM.td
+++ b/include/llvm/IR/IntrinsicsNVVM.td
@@ -276,6 +276,26 @@ class NVVM_MMA_SUPPORTED<list<WMMA_REGS> frags, string layout_a, string layout_b
);
}
+class SHFL_INFO<bit sync, string mode, string type, bit return_pred> {
+ string Suffix = !if(sync, "sync_", "")
+ # mode # "_"
+ # type
+ # !if(return_pred, "p", "");
+
+ string Name = "int_nvvm_shfl_" # Suffix;
+ string Builtin = "__nvvm_shfl_" # Suffix;
+ string IntrName = "llvm.nvvm.shfl." # !subst("_",".", Suffix);
+ list<int> withGccBuiltin = !if(return_pred, [], [1]);
+ list<int> withoutGccBuiltin = !if(return_pred, [1], []);
+ LLVMType OpType = !cond(
+ !eq(type,"i32"): llvm_i32_ty,
+ !eq(type,"f32"): llvm_float_ty);
+ list<LLVMType> RetTy = !if(return_pred, [OpType, llvm_i1_ty], [OpType]);
+ list<LLVMType> ArgsTy = !if(sync,
+ [llvm_i32_ty, OpType, llvm_i32_ty, llvm_i32_ty],
+ [OpType, llvm_i32_ty, llvm_i32_ty]);
+}
+
let TargetPrefix = "nvvm" in {
def int_nvvm_prmt : GCCBuiltin<"__nvvm_prmt">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
@@ -3955,90 +3975,27 @@ def int_nvvm_read_ptx_sreg_warpsize : PTXReadSRegIntrinsic_r32<"warpsize">;
//
// SHUFFLE
//
-
-// shfl.down.b32 dest, val, offset, mask_and_clamp
-def int_nvvm_shfl_down_i32 :
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.down.i32">,
- GCCBuiltin<"__nvvm_shfl_down_i32">;
-def int_nvvm_shfl_down_f32 :
- Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.down.f32">,
- GCCBuiltin<"__nvvm_shfl_down_f32">;
-
-// shfl.up.b32 dest, val, offset, mask_and_clamp
-def int_nvvm_shfl_up_i32 :
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.up.i32">,
- GCCBuiltin<"__nvvm_shfl_up_i32">;
-def int_nvvm_shfl_up_f32 :
- Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.up.f32">,
- GCCBuiltin<"__nvvm_shfl_up_f32">;
-
-// shfl.bfly.b32 dest, val, offset, mask_and_clamp
-def int_nvvm_shfl_bfly_i32 :
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.bfly.i32">,
- GCCBuiltin<"__nvvm_shfl_bfly_i32">;
-def int_nvvm_shfl_bfly_f32 :
- Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.bfly.f32">,
- GCCBuiltin<"__nvvm_shfl_bfly_f32">;
-
-// shfl.idx.b32 dest, val, lane, mask_and_clamp
-def int_nvvm_shfl_idx_i32 :
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.idx.i32">,
- GCCBuiltin<"__nvvm_shfl_idx_i32">;
-def int_nvvm_shfl_idx_f32 :
- Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.idx.f32">,
- GCCBuiltin<"__nvvm_shfl_idx_f32">;
-
-// Synchronizing shfl variants available in CUDA-9.
-// On sm_70 these don't have to be convergent, so we may eventually want to
-// implement non-convergent variant of this intrinsic.
-
-// shfl.sync.down.b32 dest, threadmask, val, offset , mask_and_clamp
-def int_nvvm_shfl_sync_down_i32 :
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.sync.down.i32">,
- GCCBuiltin<"__nvvm_shfl_sync_down_i32">;
-def int_nvvm_shfl_sync_down_f32 :
- Intrinsic<[llvm_float_ty], [llvm_i32_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.sync.down.f32">,
- GCCBuiltin<"__nvvm_shfl_sync_down_f32">;
-
-// shfl.sync.up.b32 dest, threadmask, val, offset, mask_and_clamp
-def int_nvvm_shfl_sync_up_i32 :
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.sync.up.i32">,
- GCCBuiltin<"__nvvm_shfl_sync_up_i32">;
-def int_nvvm_shfl_sync_up_f32 :
- Intrinsic<[llvm_float_ty], [llvm_i32_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.sync.up.f32">,
- GCCBuiltin<"__nvvm_shfl_sync_up_f32">;
-
-// shfl.sync.bfly.b32 dest, threadmask, val, offset, mask_and_clamp
-def int_nvvm_shfl_sync_bfly_i32 :
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.sync.bfly.i32">,
- GCCBuiltin<"__nvvm_shfl_sync_bfly_i32">;
-def int_nvvm_shfl_sync_bfly_f32 :
- Intrinsic<[llvm_float_ty], [llvm_i32_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.sync.bfly.f32">,
- GCCBuiltin<"__nvvm_shfl_sync_bfly_f32">;
-
-// shfl.sync.idx.b32 dest, threadmask, val, lane, mask_and_clamp
-def int_nvvm_shfl_sync_idx_i32 :
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.sync.idx.i32">,
- GCCBuiltin<"__nvvm_shfl_sync_idx_i32">;
-def int_nvvm_shfl_sync_idx_f32 :
- Intrinsic<[llvm_float_ty], [llvm_i32_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrInaccessibleMemOnly, IntrConvergent], "llvm.nvvm.shfl.sync.idx.f32">,
- GCCBuiltin<"__nvvm_shfl_sync_idx_f32">;
+// Generate intrinsics for all variants of shfl instruction.
+foreach sync = [0, 1] in {
+ foreach mode = ["up", "down", "bfly", "idx"] in {
+ foreach type = ["i32", "f32"] in {
+ foreach return_pred = [0, 1] in {
+ foreach i = [SHFL_INFO<sync, mode, type, return_pred>] in {
+ foreach _ = i.withGccBuiltin in {
+ def i.Name : GCCBuiltin<i.Builtin>,
+ Intrinsic<i.RetTy, i.ArgsTy,
+ [IntrInaccessibleMemOnly, IntrConvergent],
+ i.IntrName>;
+ }
+ foreach _ = i.withoutGccBuiltin in {
+ def i.Name : Intrinsic<i.RetTy, i.ArgsTy,
+ [IntrInaccessibleMemOnly, IntrConvergent], i.IntrName>;
+ }
+ }
+ }
+ }
+ }
+}
//
// VOTE
diff --git a/include/llvm/IR/IntrinsicsWebAssembly.td b/include/llvm/IR/IntrinsicsWebAssembly.td
index 1b892727547d..810979b99934 100644
--- a/include/llvm/IR/IntrinsicsWebAssembly.td
+++ b/include/llvm/IR/IntrinsicsWebAssembly.td
@@ -24,6 +24,17 @@ def int_wasm_memory_grow : Intrinsic<[llvm_anyint_ty],
[]>;
//===----------------------------------------------------------------------===//
+// Trapping float-to-int conversions
+//===----------------------------------------------------------------------===//
+
+def int_wasm_trunc_signed : Intrinsic<[llvm_anyint_ty],
+ [llvm_anyfloat_ty],
+ [IntrNoMem]>;
+def int_wasm_trunc_unsigned : Intrinsic<[llvm_anyint_ty],
+ [llvm_anyfloat_ty],
+ [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
// Saturating float-to-int conversions
//===----------------------------------------------------------------------===//
@@ -89,6 +100,10 @@ def int_wasm_atomic_notify:
// SIMD intrinsics
//===----------------------------------------------------------------------===//
+def int_wasm_swizzle :
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem, IntrSpeculatable]>;
def int_wasm_sub_saturate_signed :
Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>],
@@ -109,6 +124,39 @@ def int_wasm_alltrue :
Intrinsic<[llvm_i32_ty],
[llvm_anyvector_ty],
[IntrNoMem, IntrSpeculatable]>;
+def int_wasm_qfma :
+ Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem, IntrSpeculatable]>;
+def int_wasm_qfms :
+ Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem, IntrSpeculatable]>;
+def int_wasm_narrow_signed :
+ Intrinsic<[llvm_anyvector_ty],
+ [llvm_anyvector_ty, LLVMMatchType<1>],
+ [IntrNoMem, IntrSpeculatable]>;
+def int_wasm_narrow_unsigned :
+ Intrinsic<[llvm_anyvector_ty],
+ [llvm_anyvector_ty, LLVMMatchType<1>],
+ [IntrNoMem, IntrSpeculatable]>;
+def int_wasm_widen_low_signed :
+ Intrinsic<[llvm_anyvector_ty],
+ [llvm_anyvector_ty],
+ [IntrNoMem, IntrSpeculatable]>;
+def int_wasm_widen_high_signed :
+ Intrinsic<[llvm_anyvector_ty],
+ [llvm_anyvector_ty],
+ [IntrNoMem, IntrSpeculatable]>;
+def int_wasm_widen_low_unsigned :
+ Intrinsic<[llvm_anyvector_ty],
+ [llvm_anyvector_ty],
+ [IntrNoMem, IntrSpeculatable]>;
+def int_wasm_widen_high_unsigned :
+ Intrinsic<[llvm_anyvector_ty],
+ [llvm_anyvector_ty],
+ [IntrNoMem, IntrSpeculatable]>;
+
//===----------------------------------------------------------------------===//
// Bulk memory intrinsics
@@ -133,4 +181,14 @@ def int_wasm_tls_size :
[],
[IntrNoMem, IntrSpeculatable]>;
+def int_wasm_tls_align :
+ Intrinsic<[llvm_anyint_ty],
+ [],
+ [IntrNoMem, IntrSpeculatable]>;
+
+def int_wasm_tls_base :
+ Intrinsic<[llvm_ptr_ty],
+ [],
+ [IntrReadMem]>;
+
} // TargetPrefix = "wasm"
diff --git a/include/llvm/IR/IntrinsicsX86.td b/include/llvm/IR/IntrinsicsX86.td
index 236d312d7d78..5796686dd79f 100644
--- a/include/llvm/IR/IntrinsicsX86.td
+++ b/include/llvm/IR/IntrinsicsX86.td
@@ -2091,16 +2091,20 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_ptr_ty], [], []>;
def int_x86_lwpins32 :
GCCBuiltin<"__builtin_ia32_lwpins32">,
- Intrinsic<[llvm_i8_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
+ Intrinsic<[llvm_i8_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [ImmArg<2>]>;
def int_x86_lwpins64 :
GCCBuiltin<"__builtin_ia32_lwpins64">,
- Intrinsic<[llvm_i8_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], []>;
+ Intrinsic<[llvm_i8_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty],
+ [ImmArg<2>]>;
def int_x86_lwpval32 :
GCCBuiltin<"__builtin_ia32_lwpval32">,
- Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [ImmArg<2>]>;
def int_x86_lwpval64 :
GCCBuiltin<"__builtin_ia32_lwpval64">,
- Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], []>;
+ Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty],
+ [ImmArg<2>]>;
}
//===----------------------------------------------------------------------===//
diff --git a/include/llvm/IR/LLVMContext.h b/include/llvm/IR/LLVMContext.h
index c80504500418..91bd57dc5ac0 100644
--- a/include/llvm/IR/LLVMContext.h
+++ b/include/llvm/IR/LLVMContext.h
@@ -72,34 +72,9 @@ public:
// Pinned metadata names, which always have the same value. This is a
// compile-time performance optimization, not a correctness optimization.
enum : unsigned {
- MD_dbg = 0, // "dbg"
- MD_tbaa = 1, // "tbaa"
- MD_prof = 2, // "prof"
- MD_fpmath = 3, // "fpmath"
- MD_range = 4, // "range"
- MD_tbaa_struct = 5, // "tbaa.struct"
- MD_invariant_load = 6, // "invariant.load"
- MD_alias_scope = 7, // "alias.scope"
- MD_noalias = 8, // "noalias",
- MD_nontemporal = 9, // "nontemporal"
- MD_mem_parallel_loop_access = 10, // "llvm.mem.parallel_loop_access"
- MD_nonnull = 11, // "nonnull"
- MD_dereferenceable = 12, // "dereferenceable"
- MD_dereferenceable_or_null = 13, // "dereferenceable_or_null"
- MD_make_implicit = 14, // "make.implicit"
- MD_unpredictable = 15, // "unpredictable"
- MD_invariant_group = 16, // "invariant.group"
- MD_align = 17, // "align"
- MD_loop = 18, // "llvm.loop"
- MD_type = 19, // "type"
- MD_section_prefix = 20, // "section_prefix"
- MD_absolute_symbol = 21, // "absolute_symbol"
- MD_associated = 22, // "associated"
- MD_callees = 23, // "callees"
- MD_irr_loop = 24, // "irr_loop"
- MD_access_group = 25, // "llvm.access.group"
- MD_callback = 26, // "callback"
- MD_preserve_access_index = 27, // "llvm.preserve.*.access.index"
+#define LLVM_FIXED_MD_KIND(EnumID, Name, Value) EnumID = Value,
+#include "llvm/IR/FixedMetadataKinds.def"
+#undef LLVM_FIXED_MD_KIND
};
/// Known operand bundle tag IDs, which always have the same value. All
diff --git a/include/llvm/IR/MDBuilder.h b/include/llvm/IR/MDBuilder.h
index 3a2b1bddf45d..11e2e2623257 100644
--- a/include/llvm/IR/MDBuilder.h
+++ b/include/llvm/IR/MDBuilder.h
@@ -16,6 +16,7 @@
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/Constants.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/Support/DataTypes.h"
#include <utility>
@@ -75,6 +76,10 @@ public:
/// Return metadata containing the section prefix for a function.
MDNode *createFunctionSectionPrefix(StringRef Prefix);
+ /// return metadata containing expected value
+ MDNode *createMisExpect(uint64_t Index, uint64_t LikelyWeight,
+ uint64_t UnlikelyWeight);
+
//===------------------------------------------------------------------===//
// Range metadata.
//===------------------------------------------------------------------===//
diff --git a/include/llvm/IR/Metadata.h b/include/llvm/IR/Metadata.h
index 7ca2540181ba..f62b1e246cca 100644
--- a/include/llvm/IR/Metadata.h
+++ b/include/llvm/IR/Metadata.h
@@ -601,7 +601,7 @@ dyn_extract_or_null(Y &&MD) {
/// These are used to efficiently contain a byte sequence for metadata.
/// MDString is always unnamed.
class MDString : public Metadata {
- friend class StringMapEntry<MDString>;
+ friend class StringMapEntryStorage<MDString>;
StringMapEntry<MDString> *Entry = nullptr;
@@ -806,7 +806,7 @@ public:
/// Ensure that this has RAUW support, and then return it.
ReplaceableMetadataImpl *getOrCreateReplaceableUses() {
if (!hasReplaceableUses())
- makeReplaceable(llvm::make_unique<ReplaceableMetadataImpl>(getContext()));
+ makeReplaceable(std::make_unique<ReplaceableMetadataImpl>(getContext()));
return getReplaceableUses();
}
diff --git a/include/llvm/IR/Module.h b/include/llvm/IR/Module.h
index f458680cfe15..59331142766a 100644
--- a/include/llvm/IR/Module.h
+++ b/include/llvm/IR/Module.h
@@ -46,6 +46,7 @@ class FunctionType;
class GVMaterializer;
class LLVMContext;
class MemoryBuffer;
+class Pass;
class RandomNumberGenerator;
template <class PtrType> class SmallPtrSetImpl;
class StructType;
diff --git a/include/llvm/IR/ModuleSummaryIndex.h b/include/llvm/IR/ModuleSummaryIndex.h
index aacf8cfc089f..be60447abd87 100644
--- a/include/llvm/IR/ModuleSummaryIndex.h
+++ b/include/llvm/IR/ModuleSummaryIndex.h
@@ -119,7 +119,7 @@ class GlobalValueSummary;
using GlobalValueSummaryList = std::vector<std::unique_ptr<GlobalValueSummary>>;
-struct LLVM_ALIGNAS(8) GlobalValueSummaryInfo {
+struct alignas(8) GlobalValueSummaryInfo {
union NameOrGV {
NameOrGV(bool HaveGVs) {
if (HaveGVs)
@@ -603,7 +603,7 @@ public:
if (!TypeTests.empty() || !TypeTestAssumeVCalls.empty() ||
!TypeCheckedLoadVCalls.empty() || !TypeTestAssumeConstVCalls.empty() ||
!TypeCheckedLoadConstVCalls.empty())
- TIdInfo = llvm::make_unique<TypeIdInfo>(TypeIdInfo{
+ TIdInfo = std::make_unique<TypeIdInfo>(TypeIdInfo{
std::move(TypeTests), std::move(TypeTestAssumeVCalls),
std::move(TypeCheckedLoadVCalls),
std::move(TypeTestAssumeConstVCalls),
@@ -632,6 +632,8 @@ public:
/// Return the list of <CalleeValueInfo, CalleeInfo> pairs.
ArrayRef<EdgeTy> calls() const { return CallGraphEdgeList; }
+ void addCall(EdgeTy E) { CallGraphEdgeList.push_back(E); }
+
/// Returns the list of type identifiers used by this function in
/// llvm.type.test intrinsics other than by an llvm.assume intrinsic,
/// represented as GUIDs.
@@ -680,7 +682,7 @@ public:
/// were unable to devirtualize a checked call.
void addTypeTest(GlobalValue::GUID Guid) {
if (!TIdInfo)
- TIdInfo = llvm::make_unique<TypeIdInfo>();
+ TIdInfo = std::make_unique<TypeIdInfo>();
TIdInfo->TypeTests.push_back(Guid);
}
@@ -780,7 +782,7 @@ public:
void setVTableFuncs(VTableFuncList Funcs) {
assert(!VTableFuncs);
- VTableFuncs = llvm::make_unique<VTableFuncList>(std::move(Funcs));
+ VTableFuncs = std::make_unique<VTableFuncList>(std::move(Funcs));
}
ArrayRef<VirtFuncOffset> vTableFuncs() const {
@@ -1293,6 +1295,12 @@ public:
return nullptr;
}
+ TypeIdSummary *getTypeIdSummary(StringRef TypeId) {
+ return const_cast<TypeIdSummary *>(
+ static_cast<const ModuleSummaryIndex *>(this)->getTypeIdSummary(
+ TypeId));
+ }
+
const std::map<std::string, TypeIdCompatibleVtableInfo> &
typeIdCompatibleVtableMap() const {
return TypeIdCompatibleVtableMap;
@@ -1411,7 +1419,7 @@ template <>
struct GraphTraits<ModuleSummaryIndex *> : public GraphTraits<ValueInfo> {
static NodeRef getEntryNode(ModuleSummaryIndex *I) {
std::unique_ptr<GlobalValueSummary> Root =
- make_unique<FunctionSummary>(I->calculateCallGraphRoot());
+ std::make_unique<FunctionSummary>(I->calculateCallGraphRoot());
GlobalValueSummaryInfo G(I->haveGVs());
G.SummaryList.push_back(std::move(Root));
static auto P =
diff --git a/include/llvm/IR/ModuleSummaryIndexYAML.h b/include/llvm/IR/ModuleSummaryIndexYAML.h
index 26d9c43fabf1..4d4a67c75172 100644
--- a/include/llvm/IR/ModuleSummaryIndexYAML.h
+++ b/include/llvm/IR/ModuleSummaryIndexYAML.h
@@ -220,7 +220,7 @@ template <> struct CustomMappingTraits<GlobalValueSummaryMapTy> {
V.emplace(RefGUID, /*IsAnalysis=*/false);
Refs.push_back(ValueInfo(/*IsAnalysis=*/false, &*V.find(RefGUID)));
}
- Elem.SummaryList.push_back(llvm::make_unique<FunctionSummary>(
+ Elem.SummaryList.push_back(std::make_unique<FunctionSummary>(
GlobalValueSummary::GVFlags(
static_cast<GlobalValue::LinkageTypes>(FSum.Linkage),
FSum.NotEligibleToImport, FSum.Live, FSum.IsLocal, FSum.CanAutoHide),
diff --git a/include/llvm/IR/Operator.h b/include/llvm/IR/Operator.h
index 8199c65ca8a0..037f5aed03ee 100644
--- a/include/llvm/IR/Operator.h
+++ b/include/llvm/IR/Operator.h
@@ -379,16 +379,25 @@ public:
return false;
switch (Opcode) {
+ case Instruction::FNeg:
+ case Instruction::FAdd:
+ case Instruction::FSub:
+ case Instruction::FMul:
+ case Instruction::FDiv:
+ case Instruction::FRem:
+ // FIXME: To clean up and correct the semantics of fast-math-flags, FCmp
+ // should not be treated as a math op, but the other opcodes should.
+ // This would make things consistent with Select/PHI (FP value type
+ // determines whether they are math ops and, therefore, capable of
+ // having fast-math-flags).
case Instruction::FCmp:
return true;
- // non math FP Operators (no FMF)
- case Instruction::ExtractElement:
- case Instruction::ShuffleVector:
- case Instruction::InsertElement:
case Instruction::PHI:
- return false;
- default:
+ case Instruction::Select:
+ case Instruction::Call:
return V->getType()->isFPOrFPVectorTy();
+ default:
+ return false;
}
}
};
diff --git a/include/llvm/IR/PassManager.h b/include/llvm/IR/PassManager.h
index 37fe2a5b01ad..1e1f4a92f844 100644
--- a/include/llvm/IR/PassManager.h
+++ b/include/llvm/IR/PassManager.h
@@ -45,6 +45,7 @@
#include "llvm/IR/Module.h"
#include "llvm/IR/PassInstrumentation.h"
#include "llvm/IR/PassManagerInternal.h"
+#include "llvm/Pass.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/TypeName.h"
#include "llvm/Support/raw_ostream.h"
@@ -418,7 +419,7 @@ template <typename PassT, typename IRUnitT, typename AnalysisManagerT,
typename PassT::Result
getAnalysisResultUnpackTuple(AnalysisManagerT &AM, IRUnitT &IR,
std::tuple<ArgTs...> Args,
- llvm::index_sequence<Ns...>) {
+ std::index_sequence<Ns...>) {
(void)Args;
return AM.template getResult<PassT>(IR, std::get<Ns>(Args)...);
}
@@ -435,7 +436,7 @@ getAnalysisResult(AnalysisManager<IRUnitT, AnalysisArgTs...> &AM, IRUnitT &IR,
std::tuple<MainArgTs...> Args) {
return (getAnalysisResultUnpackTuple<
PassT, IRUnitT>)(AM, IR, Args,
- llvm::index_sequence_for<AnalysisArgTs...>{});
+ std::index_sequence_for<AnalysisArgTs...>{});
}
} // namespace detail
diff --git a/include/llvm/IR/PassManagerInternal.h b/include/llvm/IR/PassManagerInternal.h
index 58198bf67b11..c602c0b5cc20 100644
--- a/include/llvm/IR/PassManagerInternal.h
+++ b/include/llvm/IR/PassManagerInternal.h
@@ -289,7 +289,7 @@ struct AnalysisPassModel : AnalysisPassConcept<IRUnitT, PreservedAnalysesT,
AnalysisResultConcept<IRUnitT, PreservedAnalysesT, InvalidatorT>>
run(IRUnitT &IR, AnalysisManager<IRUnitT, ExtraArgTs...> &AM,
ExtraArgTs... ExtraArgs) override {
- return llvm::make_unique<ResultModelT>(
+ return std::make_unique<ResultModelT>(
Pass.run(IR, AM, std::forward<ExtraArgTs>(ExtraArgs)...));
}
diff --git a/include/llvm/IR/PatternMatch.h b/include/llvm/IR/PatternMatch.h
index 0f03d7cc56b8..2851b24c05ae 100644
--- a/include/llvm/IR/PatternMatch.h
+++ b/include/llvm/IR/PatternMatch.h
@@ -88,6 +88,25 @@ inline class_match<UndefValue> m_Undef() { return class_match<UndefValue>(); }
/// Match an arbitrary Constant and ignore it.
inline class_match<Constant> m_Constant() { return class_match<Constant>(); }
+/// Match an arbitrary basic block value and ignore it.
+inline class_match<BasicBlock> m_BasicBlock() {
+ return class_match<BasicBlock>();
+}
+
+/// Inverting matcher
+template <typename Ty> struct match_unless {
+ Ty M;
+
+ match_unless(const Ty &Matcher) : M(Matcher) {}
+
+ template <typename ITy> bool match(ITy *V) { return !M.match(V); }
+};
+
+/// Match if the inner matcher does *NOT* match.
+template <typename Ty> inline match_unless<Ty> m_Unless(const Ty &M) {
+ return match_unless<Ty>(M);
+}
+
/// Matching combinators
template <typename LTy, typename RTy> struct match_combine_or {
LTy L;
@@ -300,6 +319,15 @@ template <typename Predicate> struct cstfp_pred_ty : public Predicate {
//
///////////////////////////////////////////////////////////////////////////////
+struct is_any_apint {
+ bool isValue(const APInt &C) { return true; }
+};
+/// Match an integer or vector with any integral constant.
+/// For vectors, this includes constants with undefined elements.
+inline cst_pred_ty<is_any_apint> m_AnyIntegralConstant() {
+ return cst_pred_ty<is_any_apint>();
+}
+
struct is_all_ones {
bool isValue(const APInt &C) { return C.isAllOnesValue(); }
};
@@ -388,6 +416,18 @@ inline api_pred_ty<is_power2> m_Power2(const APInt *&V) {
return V;
}
+struct is_negated_power2 {
+ bool isValue(const APInt &C) { return (-C).isPowerOf2(); }
+};
+/// Match a integer or vector negated power-of-2.
+/// For vectors, this includes constants with undefined elements.
+inline cst_pred_ty<is_negated_power2> m_NegatedPower2() {
+ return cst_pred_ty<is_negated_power2>();
+}
+inline api_pred_ty<is_negated_power2> m_NegatedPower2(const APInt *&V) {
+ return V;
+}
+
struct is_power2_or_zero {
bool isValue(const APInt &C) { return !C || C.isPowerOf2(); }
};
@@ -528,6 +568,12 @@ inline bind_ty<Constant> m_Constant(Constant *&C) { return C; }
/// Match a ConstantFP, capturing the value if we match.
inline bind_ty<ConstantFP> m_ConstantFP(ConstantFP *&C) { return C; }
+/// Match a basic block value, capturing it if we match.
+inline bind_ty<BasicBlock> m_BasicBlock(BasicBlock *&V) { return V; }
+inline bind_ty<const BasicBlock> m_BasicBlock(const BasicBlock *&V) {
+ return V;
+}
+
/// Match a specified Value*.
struct specificval_ty {
const Value *Val;
@@ -597,11 +643,11 @@ struct bind_const_intval_ty {
};
/// Match a specified integer value or vector of all elements of that
-// value.
+/// value.
struct specific_intval {
- uint64_t Val;
+ APInt Val;
- specific_intval(uint64_t V) : Val(V) {}
+ specific_intval(APInt V) : Val(std::move(V)) {}
template <typename ITy> bool match(ITy *V) {
const auto *CI = dyn_cast<ConstantInt>(V);
@@ -609,18 +655,50 @@ struct specific_intval {
if (const auto *C = dyn_cast<Constant>(V))
CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue());
- return CI && CI->getValue() == Val;
+ return CI && APInt::isSameValue(CI->getValue(), Val);
}
};
/// Match a specific integer value or vector with all elements equal to
/// the value.
-inline specific_intval m_SpecificInt(uint64_t V) { return specific_intval(V); }
+inline specific_intval m_SpecificInt(APInt V) {
+ return specific_intval(std::move(V));
+}
+
+inline specific_intval m_SpecificInt(uint64_t V) {
+ return m_SpecificInt(APInt(64, V));
+}
/// Match a ConstantInt and bind to its value. This does not match
/// ConstantInts wider than 64-bits.
inline bind_const_intval_ty m_ConstantInt(uint64_t &V) { return V; }
+/// Match a specified basic block value.
+struct specific_bbval {
+ BasicBlock *Val;
+
+ specific_bbval(BasicBlock *Val) : Val(Val) {}
+
+ template <typename ITy> bool match(ITy *V) {
+ const auto *BB = dyn_cast<BasicBlock>(V);
+ return BB && BB == Val;
+ }
+};
+
+/// Match a specific basic block value.
+inline specific_bbval m_SpecificBB(BasicBlock *BB) {
+ return specific_bbval(BB);
+}
+
+/// A commutative-friendly version of m_Specific().
+inline deferredval_ty<BasicBlock> m_Deferred(BasicBlock *const &BB) {
+ return BB;
+}
+inline deferredval_ty<const BasicBlock>
+m_Deferred(const BasicBlock *const &BB) {
+ return BB;
+}
+
//===----------------------------------------------------------------------===//
// Matcher for any binary operator.
//
@@ -968,6 +1046,12 @@ struct is_idiv_op {
}
};
+struct is_irem_op {
+ bool isOpType(unsigned Opcode) {
+ return Opcode == Instruction::SRem || Opcode == Instruction::URem;
+ }
+};
+
/// Matches shift operations.
template <typename LHS, typename RHS>
inline BinOpPred_match<LHS, RHS, is_shift_op> m_Shift(const LHS &L,
@@ -1003,6 +1087,13 @@ inline BinOpPred_match<LHS, RHS, is_idiv_op> m_IDiv(const LHS &L,
return BinOpPred_match<LHS, RHS, is_idiv_op>(L, R);
}
+/// Matches integer remainder operations.
+template <typename LHS, typename RHS>
+inline BinOpPred_match<LHS, RHS, is_irem_op> m_IRem(const LHS &L,
+ const RHS &R) {
+ return BinOpPred_match<LHS, RHS, is_irem_op>(L, R);
+}
+
//===----------------------------------------------------------------------===//
// Class that matches exact binary ops.
//
@@ -1210,6 +1301,12 @@ inline CastClass_match<OpTy, Instruction::Trunc> m_Trunc(const OpTy &Op) {
return CastClass_match<OpTy, Instruction::Trunc>(Op);
}
+template <typename OpTy>
+inline match_combine_or<CastClass_match<OpTy, Instruction::Trunc>, OpTy>
+m_TruncOrSelf(const OpTy &Op) {
+ return m_CombineOr(m_Trunc(Op), Op);
+}
+
/// Matches SExt.
template <typename OpTy>
inline CastClass_match<OpTy, Instruction::SExt> m_SExt(const OpTy &Op) {
@@ -1223,12 +1320,33 @@ inline CastClass_match<OpTy, Instruction::ZExt> m_ZExt(const OpTy &Op) {
}
template <typename OpTy>
+inline match_combine_or<CastClass_match<OpTy, Instruction::ZExt>, OpTy>
+m_ZExtOrSelf(const OpTy &Op) {
+ return m_CombineOr(m_ZExt(Op), Op);
+}
+
+template <typename OpTy>
+inline match_combine_or<CastClass_match<OpTy, Instruction::SExt>, OpTy>
+m_SExtOrSelf(const OpTy &Op) {
+ return m_CombineOr(m_SExt(Op), Op);
+}
+
+template <typename OpTy>
inline match_combine_or<CastClass_match<OpTy, Instruction::ZExt>,
CastClass_match<OpTy, Instruction::SExt>>
m_ZExtOrSExt(const OpTy &Op) {
return m_CombineOr(m_ZExt(Op), m_SExt(Op));
}
+template <typename OpTy>
+inline match_combine_or<
+ match_combine_or<CastClass_match<OpTy, Instruction::ZExt>,
+ CastClass_match<OpTy, Instruction::SExt>>,
+ OpTy>
+m_ZExtOrSExtOrSelf(const OpTy &Op) {
+ return m_CombineOr(m_ZExtOrSExt(Op), Op);
+}
+
/// Matches UIToFP.
template <typename OpTy>
inline CastClass_match<OpTy, Instruction::UIToFP> m_UIToFP(const OpTy &Op) {
@@ -1274,27 +1392,34 @@ struct br_match {
inline br_match m_UnconditionalBr(BasicBlock *&Succ) { return br_match(Succ); }
-template <typename Cond_t> struct brc_match {
+template <typename Cond_t, typename TrueBlock_t, typename FalseBlock_t>
+struct brc_match {
Cond_t Cond;
- BasicBlock *&T, *&F;
+ TrueBlock_t T;
+ FalseBlock_t F;
- brc_match(const Cond_t &C, BasicBlock *&t, BasicBlock *&f)
+ brc_match(const Cond_t &C, const TrueBlock_t &t, const FalseBlock_t &f)
: Cond(C), T(t), F(f) {}
template <typename OpTy> bool match(OpTy *V) {
if (auto *BI = dyn_cast<BranchInst>(V))
- if (BI->isConditional() && Cond.match(BI->getCondition())) {
- T = BI->getSuccessor(0);
- F = BI->getSuccessor(1);
- return true;
- }
+ if (BI->isConditional() && Cond.match(BI->getCondition()))
+ return T.match(BI->getSuccessor(0)) && F.match(BI->getSuccessor(1));
return false;
}
};
template <typename Cond_t>
-inline brc_match<Cond_t> m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F) {
- return brc_match<Cond_t>(C, T, F);
+inline brc_match<Cond_t, bind_ty<BasicBlock>, bind_ty<BasicBlock>>
+m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F) {
+ return brc_match<Cond_t, bind_ty<BasicBlock>, bind_ty<BasicBlock>>(
+ C, m_BasicBlock(T), m_BasicBlock(F));
+}
+
+template <typename Cond_t, typename TrueBlock_t, typename FalseBlock_t>
+inline brc_match<Cond_t, TrueBlock_t, FalseBlock_t>
+m_Br(const Cond_t &C, const TrueBlock_t &T, const FalseBlock_t &F) {
+ return brc_match<Cond_t, TrueBlock_t, FalseBlock_t>(C, T, F);
}
//===----------------------------------------------------------------------===//
diff --git a/include/llvm/IR/RemarkStreamer.h b/include/llvm/IR/RemarkStreamer.h
index f34cc660b2fb..2abf6f99cb08 100644
--- a/include/llvm/IR/RemarkStreamer.h
+++ b/include/llvm/IR/RemarkStreamer.h
@@ -25,12 +25,12 @@
namespace llvm {
/// Streamer for remarks.
class RemarkStreamer {
- /// The filename that the remark diagnostics are emitted to.
- const std::string Filename;
/// The regex used to filter remarks based on the passes that emit them.
Optional<Regex> PassFilter;
/// The object used to serialize the remarks to a specific format.
- std::unique_ptr<remarks::Serializer> Serializer;
+ std::unique_ptr<remarks::RemarkSerializer> RemarkSerializer;
+ /// The filename that the remark diagnostics are emitted to.
+ const Optional<std::string> Filename;
/// Convert diagnostics into remark objects.
/// The lifetime of the members of the result is bound to the lifetime of
@@ -38,14 +38,16 @@ class RemarkStreamer {
remarks::Remark toRemark(const DiagnosticInfoOptimizationBase &Diag);
public:
- RemarkStreamer(StringRef Filename,
- std::unique_ptr<remarks::Serializer> Serializer);
+ RemarkStreamer(std::unique_ptr<remarks::RemarkSerializer> RemarkSerializer,
+ Optional<StringRef> Filename = None);
/// Return the filename that the remark diagnostics are emitted to.
- StringRef getFilename() const { return Filename; }
+ Optional<StringRef> getFilename() const {
+ return Filename ? Optional<StringRef>(*Filename) : None;
+ }
/// Return stream that the remark diagnostics are emitted to.
- raw_ostream &getStream() { return Serializer->OS; }
+ raw_ostream &getStream() { return RemarkSerializer->OS; }
/// Return the serializer used for this stream.
- remarks::Serializer &getSerializer() { return *Serializer; }
+ remarks::RemarkSerializer &getSerializer() { return *RemarkSerializer; }
/// Set a pass filter based on a regex \p Filter.
/// Returns an error if the regex is invalid.
Error setFilter(StringRef Filter);
@@ -84,13 +86,21 @@ struct RemarkSetupFormatError : RemarkSetupErrorInfo<RemarkSetupFormatError> {
using RemarkSetupErrorInfo<RemarkSetupFormatError>::RemarkSetupErrorInfo;
};
-/// Setup optimization remarks.
+/// Setup optimization remarks that output to a file.
Expected<std::unique_ptr<ToolOutputFile>>
setupOptimizationRemarks(LLVMContext &Context, StringRef RemarksFilename,
StringRef RemarksPasses, StringRef RemarksFormat,
bool RemarksWithHotness,
unsigned RemarksHotnessThreshold = 0);
+/// Setup optimization remarks that output directly to a raw_ostream.
+/// \p OS is managed by the caller and should be open for writing as long as \p
+/// Context is streaming remarks to it.
+Error setupOptimizationRemarks(LLVMContext &Context, raw_ostream &OS,
+ StringRef RemarksPasses, StringRef RemarksFormat,
+ bool RemarksWithHotness,
+ unsigned RemarksHotnessThreshold = 0);
+
} // end namespace llvm
#endif // LLVM_IR_REMARKSTREAMER_H
diff --git a/include/llvm/IR/Type.h b/include/llvm/IR/Type.h
index f2aa49030aaa..d0961dac833d 100644
--- a/include/llvm/IR/Type.h
+++ b/include/llvm/IR/Type.h
@@ -21,6 +21,7 @@
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TypeSize.h"
#include <cassert>
#include <cstdint>
#include <iterator>
@@ -281,12 +282,15 @@ public:
/// This will return zero if the type does not have a size or is not a
/// primitive type.
///
+ /// If this is a scalable vector type, the scalable property will be set and
+ /// the runtime size will be a positive integer multiple of the base size.
+ ///
/// Note that this may not reflect the size of memory allocated for an
/// instance of the type or the number of bytes that are written when an
/// instance of the type is stored to memory. The DataLayout class provides
/// additional query functions to provide this information.
///
- unsigned getPrimitiveSizeInBits() const LLVM_READONLY;
+ TypeSize getPrimitiveSizeInBits() const LLVM_READONLY;
/// If this is a vector type, return the getPrimitiveSizeInBits value for the
/// element type. Otherwise return the getPrimitiveSizeInBits value for this
@@ -368,6 +372,7 @@ public:
inline bool getVectorIsScalable() const;
inline unsigned getVectorNumElements() const;
+ inline ElementCount getVectorElementCount() const;
Type *getVectorElementType() const {
assert(getTypeID() == VectorTyID);
return ContainedTys[0];
@@ -378,6 +383,14 @@ public:
return ContainedTys[0];
}
+ /// Given an integer or vector type, change the lane bitwidth to NewBitwidth,
+ /// whilst keeping the old number of lanes.
+ inline Type *getWithNewBitWidth(unsigned NewBitWidth) const;
+
+ /// Given scalar/vector integer type, returns a type with elements twice as
+ /// wide as in the original type. For vectors, preserves element count.
+ inline Type *getExtendedType() const;
+
/// Get the address space of this pointer or pointer vector type.
inline unsigned getPointerAddressSpace() const;
diff --git a/include/llvm/IR/User.h b/include/llvm/IR/User.h
index 19d87c5c621d..850ee72a0387 100644
--- a/include/llvm/IR/User.h
+++ b/include/llvm/IR/User.h
@@ -111,7 +111,7 @@ public:
#endif
}
/// Placement delete - required by std, called if the ctor throws.
- void operator delete(void *Usr, unsigned, bool) {
+ void operator delete(void *Usr, unsigned, unsigned) {
// Note: If a subclass manipulates the information which is required to calculate the
// Usr memory pointer, e.g. NumUserOperands, the operator delete of that subclass has
// to restore the changed information to the original value, since the dtor of that class
diff --git a/include/llvm/IR/Value.h b/include/llvm/IR/Value.h
index b2d8e7ac4741..f2c4b3b3f203 100644
--- a/include/llvm/IR/Value.h
+++ b/include/llvm/IR/Value.h
@@ -14,8 +14,10 @@
#define LLVM_IR_VALUE_H
#include "llvm-c/Types.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/Use.h"
+#include "llvm/Support/Alignment.h"
#include "llvm/Support/CBindingWrapping.h"
#include "llvm/Support/Casting.h"
#include <cassert>
@@ -292,10 +294,29 @@ public:
/// "V" instead of "this". This function skips metadata entries in the list.
void replaceNonMetadataUsesWith(Value *V);
+ /// Go through the uses list for this definition and make each use point
+ /// to "V" if the callback ShouldReplace returns true for the given Use.
+ /// Unlike replaceAllUsesWith() this function does not support basic block
+ /// values or constant users.
+ void replaceUsesWithIf(Value *New,
+ llvm::function_ref<bool(Use &U)> ShouldReplace) {
+ assert(New && "Value::replaceUsesWithIf(<null>) is invalid!");
+ assert(New->getType() == getType() &&
+ "replaceUses of value with new value of different type!");
+
+ for (use_iterator UI = use_begin(), E = use_end(); UI != E;) {
+ Use &U = *UI;
+ ++UI;
+ if (!ShouldReplace(U))
+ continue;
+ U.set(New);
+ }
+ }
+
/// replaceUsesOutsideBlock - Go through the uses list for this definition and
/// make each use point to "V" instead of "this" when the use is outside the
/// block. 'This's use list is expected to have at least one element.
- /// Unlike replaceAllUsesWith this function does not support basic block
+ /// Unlike replaceAllUsesWith() this function does not support basic block
/// values or constant users.
void replaceUsesOutsideBlock(Value *V, BasicBlock *BB);
@@ -493,17 +514,27 @@ public:
/// swifterror attribute.
bool isSwiftError() const;
- /// Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
+ /// Strip off pointer casts, all-zero GEPs and address space casts.
///
/// Returns the original uncasted value. If this is called on a non-pointer
/// value, it returns 'this'.
const Value *stripPointerCasts() const;
Value *stripPointerCasts() {
return const_cast<Value *>(
- static_cast<const Value *>(this)->stripPointerCasts());
+ static_cast<const Value *>(this)->stripPointerCasts());
}
- /// Strip off pointer casts, all-zero GEPs, address space casts, and aliases
+ /// Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
+ ///
+ /// Returns the original uncasted value. If this is called on a non-pointer
+ /// value, it returns 'this'.
+ const Value *stripPointerCastsAndAliases() const;
+ Value *stripPointerCastsAndAliases() {
+ return const_cast<Value *>(
+ static_cast<const Value *>(this)->stripPointerCastsAndAliases());
+ }
+
+ /// Strip off pointer casts, all-zero GEPs and address space casts
/// but ensures the representation of the result stays the same.
///
/// Returns the original uncasted value with the same representation. If this
@@ -514,26 +545,15 @@ public:
->stripPointerCastsSameRepresentation());
}
- /// Strip off pointer casts, all-zero GEPs, aliases and invariant group
- /// info.
+ /// Strip off pointer casts, all-zero GEPs and invariant group info.
///
/// Returns the original uncasted value. If this is called on a non-pointer
/// value, it returns 'this'. This function should be used only in
/// Alias analysis.
const Value *stripPointerCastsAndInvariantGroups() const;
Value *stripPointerCastsAndInvariantGroups() {
- return const_cast<Value *>(
- static_cast<const Value *>(this)->stripPointerCastsAndInvariantGroups());
- }
-
- /// Strip off pointer casts and all-zero GEPs.
- ///
- /// Returns the original uncasted value. If this is called on a non-pointer
- /// value, it returns 'this'.
- const Value *stripPointerCastsNoFollowAliases() const;
- Value *stripPointerCastsNoFollowAliases() {
- return const_cast<Value *>(
- static_cast<const Value *>(this)->stripPointerCastsNoFollowAliases());
+ return const_cast<Value *>(static_cast<const Value *>(this)
+ ->stripPointerCastsAndInvariantGroups());
}
/// Strip off pointer casts and all-constant inbounds GEPs.
@@ -612,7 +632,7 @@ public:
///
/// Returns an alignment which is either specified explicitly, e.g. via
/// align attribute of a function argument, or guaranteed by DataLayout.
- unsigned getPointerAlignment(const DataLayout &DL) const;
+ MaybeAlign getPointerAlignment(const DataLayout &DL) const;
/// Translate PHI node to its predecessor from the given basic block.
///
diff --git a/include/llvm/IR/ValueMap.h b/include/llvm/IR/ValueMap.h
index 6a79b1d387f3..fb5440d5efe8 100644
--- a/include/llvm/IR/ValueMap.h
+++ b/include/llvm/IR/ValueMap.h
@@ -33,11 +33,11 @@
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Mutex.h"
-#include "llvm/Support/UniqueLock.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
+#include <mutex>
#include <type_traits>
#include <utility>
@@ -93,7 +93,6 @@ class ValueMap {
MapT Map;
Optional<MDMapT> MDMap;
ExtraData Data;
- bool MayMapMetadata = true;
public:
using key_type = KeyT;
@@ -120,10 +119,6 @@ public:
}
Optional<MDMapT> &getMDMap() { return MDMap; }
- bool mayMapMetadata() const { return MayMapMetadata; }
- void enableMapMetadata() { MayMapMetadata = true; }
- void disableMapMetadata() { MayMapMetadata = false; }
-
/// Get the mapped metadata, if it's in the map.
Optional<Metadata *> getMappedMD(const Metadata *MD) const {
if (!MDMap)
@@ -266,9 +261,9 @@ public:
// Make a copy that won't get changed even when *this is destroyed.
ValueMapCallbackVH Copy(*this);
typename Config::mutex_type *M = Config::getMutex(Copy.Map->Data);
- unique_lock<typename Config::mutex_type> Guard;
+ std::unique_lock<typename Config::mutex_type> Guard;
if (M)
- Guard = unique_lock<typename Config::mutex_type>(*M);
+ Guard = std::unique_lock<typename Config::mutex_type>(*M);
Config::onDelete(Copy.Map->Data, Copy.Unwrap()); // May destroy *this.
Copy.Map->Map.erase(Copy); // Definitely destroys *this.
}
@@ -279,9 +274,9 @@ public:
// Make a copy that won't get changed even when *this is destroyed.
ValueMapCallbackVH Copy(*this);
typename Config::mutex_type *M = Config::getMutex(Copy.Map->Data);
- unique_lock<typename Config::mutex_type> Guard;
+ std::unique_lock<typename Config::mutex_type> Guard;
if (M)
- Guard = unique_lock<typename Config::mutex_type>(*M);
+ Guard = std::unique_lock<typename Config::mutex_type>(*M);
KeyT typed_new_key = cast<KeySansPointerT>(new_key);
// Can destroy *this: