aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/include
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2021-12-25 22:36:56 +0000
committerDimitry Andric <dim@FreeBSD.org>2022-05-14 11:44:01 +0000
commit0eae32dcef82f6f06de6419a0d623d7def0cc8f6 (patch)
tree55b7e05be47b835fd137915bee1e64026c35e71c /contrib/llvm-project/llvm/include
parent4824e7fd18a1223177218d4aec1b3c6c5c4a444e (diff)
parent77fc4c146f0870ffb09c1afb823ccbe742c5e6ff (diff)
downloadsrc-0eae32dcef82f6f06de6419a0d623d7def0cc8f6.tar.gz
src-0eae32dcef82f6f06de6419a0d623d7def0cc8f6.zip
Diffstat (limited to 'contrib/llvm-project/llvm/include')
-rw-r--r--contrib/llvm-project/llvm/include/llvm-c/Core.h86
-rw-r--r--contrib/llvm-project/llvm/include/llvm-c/Deprecated.h38
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/GenericCycleImpl.h411
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/GenericCycleInfo.h334
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/GenericSSAContext.h74
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/PointerUnion.h26
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/STLExtras.h55
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/SmallVector.h34
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/StringRef.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/Triple.h58
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/CycleAnalysis.h77
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/IVDescriptors.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/InlineCost.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/MLModelRunner.h25
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/MemoryBuiltins.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/MemoryLocation.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h59
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/NoInferenceModelRunner.h39
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h67
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/TargetTransformInfo.h67
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h20
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/Utils/TFUtils.h7
-rw-r--r--contrib/llvm-project/llvm/include/llvm/AsmParser/LLParser.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/AsmParser/LLToken.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/BinaryFormat/ELF.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Bitcode/LLVMBitCodes.h7
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/AsmPrinter.h5
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h52
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h38
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h28
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/Utils.h5
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/MIRYamlMapping.h3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/MachineCycleAnalysis.h31
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/MachineFunction.h21
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/MachineInstr.h20
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/MachinePassRegistry.def2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/MachineSSAContext.h58
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/MachineSSAUpdater.h12
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/MachineScheduler.h5
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/SelectionDAG.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/StackProtector.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/TargetInstrInfo.h6
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/TargetLowering.h32
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/VLIWMachineScheduler.h268
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFContext.h5
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugLoc.h17
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFFormValue.h31
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/MSF/MSFCommon.h20
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/MSF/MSFError.h5
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/Native/PDBFile.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Debuginfod/Debuginfod.h71
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Debuginfod/HTTPClient.h (renamed from contrib/llvm-project/llvm/include/llvm/Support/HTTPClient.h)6
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Core.h27
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h16
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Layer.h33
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/LazyReexports.h3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Mangling.h5
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/ObjectFileInterface.h38
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMP.td4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h62
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMPKinds.def4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/Attributes.h27
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/Attributes.td3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/AttributesAMDGPU.td14
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/Constants.h35
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/DataLayout.h33
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/Instructions.h9
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/IntrinsicInst.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/Intrinsics.td23
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsAMDGPU.td8
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsARM.td3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsHexagonDep.td1109
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsRISCV.td28
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsWebAssembly.td32
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/Module.h11
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/ModuleSummaryIndex.h12
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/SSAContext.h56
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/VPIntrinsics.def9
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/Value.def1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/InitializePasses.h6
-rw-r--r--contrib/llvm-project/llvm/include/llvm/MC/MCAssembler.h16
-rw-r--r--contrib/llvm-project/llvm/include/llvm/MC/MCObjectFileInfo.h19
-rw-r--r--contrib/llvm-project/llvm/include/llvm/MC/MCObjectStreamer.h16
-rw-r--r--contrib/llvm-project/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h5
-rw-r--r--contrib/llvm-project/llvm/include/llvm/MC/MCStreamer.h14
-rw-r--r--contrib/llvm-project/llvm/include/llvm/MC/MCTargetOptions.h3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Object/MachO.h7
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Option/ArgList.h6
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Passes/PassBuilder.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProf.h7
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProfCorrelator.h170
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProfData.inc4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProfReader.h26
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ProfileData/SampleProf.h67
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ProfileData/SampleProfReader.h16
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ProfileData/SampleProfWriter.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/ARMEHABI.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/Caching.h12
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/Chrono.h12
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/Compiler.h6
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/GraphWriter.h7
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/RISCVISAInfo.h3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/ScopedPrinter.h627
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/SmallVectorMemoryBuffer.h30
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/TargetParser.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/ThreadPool.h21
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/ToolOutputFile.h5
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/VirtualFileSystem.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Target/TargetOptions.h5
-rw-r--r--contrib/llvm-project/llvm/include/llvm/TextAPI/InterfaceFile.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/IPO/ProfiledCallGraph.h3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/IPO/SampleContextTracker.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/FlattenCFG.h25
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Utils/CodeLayout.h58
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Utils/FunctionComparator.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Utils/Local.h8
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Utils/LoopUtils.h9
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h47
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Vectorize/LoopVectorize.h32
-rw-r--r--contrib/llvm-project/llvm/include/llvm/module.modulemap1
123 files changed, 4296 insertions, 887 deletions
diff --git a/contrib/llvm-project/llvm/include/llvm-c/Core.h b/contrib/llvm-project/llvm/include/llvm-c/Core.h
index f2183ff52bfb..ae2bcb8444b4 100644
--- a/contrib/llvm-project/llvm/include/llvm-c/Core.h
+++ b/contrib/llvm-project/llvm/include/llvm-c/Core.h
@@ -15,6 +15,7 @@
#ifndef LLVM_C_CORE_H
#define LLVM_C_CORE_H
+#include "llvm-c/Deprecated.h"
#include "llvm-c/ErrorHandling.h"
#include "llvm-c/ExternC.h"
#include "llvm-c/Types.h"
@@ -2151,13 +2152,18 @@ LLVMValueRef LLVMConstFCmp(LLVMRealPredicate Predicate,
LLVMValueRef LLVMConstShl(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
LLVMValueRef LLVMConstLShr(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
LLVMValueRef LLVMConstAShr(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
-LLVMValueRef LLVMConstGEP(LLVMValueRef ConstantVal,
- LLVMValueRef *ConstantIndices, unsigned NumIndices);
+LLVM_ATTRIBUTE_C_DEPRECATED(
+ LLVMValueRef LLVMConstGEP(LLVMValueRef ConstantVal,
+ LLVMValueRef *ConstantIndices,
+ unsigned NumIndices),
+ "Use LLVMConstGEP2 instead to support opaque pointers");
LLVMValueRef LLVMConstGEP2(LLVMTypeRef Ty, LLVMValueRef ConstantVal,
LLVMValueRef *ConstantIndices, unsigned NumIndices);
-LLVMValueRef LLVMConstInBoundsGEP(LLVMValueRef ConstantVal,
- LLVMValueRef *ConstantIndices,
- unsigned NumIndices);
+LLVM_ATTRIBUTE_C_DEPRECATED(
+ LLVMValueRef LLVMConstInBoundsGEP(LLVMValueRef ConstantVal,
+ LLVMValueRef *ConstantIndices,
+ unsigned NumIndices),
+ "Use LLVMConstInBoundsGEP2 instead to support opaque pointers");
LLVMValueRef LLVMConstInBoundsGEP2(LLVMTypeRef Ty, LLVMValueRef ConstantVal,
LLVMValueRef *ConstantIndices,
unsigned NumIndices);
@@ -2378,9 +2384,10 @@ void LLVMSetExternallyInitialized(LLVMValueRef GlobalVar, LLVMBool IsExtInit);
* @{
*/
-/** Deprecated: Use LLVMAddAlias2 instead. */
-LLVMValueRef LLVMAddAlias(LLVMModuleRef M, LLVMTypeRef Ty, LLVMValueRef Aliasee,
- const char *Name);
+LLVM_ATTRIBUTE_C_DEPRECATED(
+ LLVMValueRef LLVMAddAlias(LLVMModuleRef M, LLVMTypeRef Ty,
+ LLVMValueRef Aliasee, const char *Name),
+ "Use LLVMAddAlias2 instead to support opaque pointers");
/**
* Add a GlobalAlias with the given value type, address space and aliasee.
@@ -3497,7 +3504,7 @@ LLVMTypeRef LLVMGetAllocatedType(LLVMValueRef Alloca);
*/
/**
- * Check whether the given GEP instruction is inbounds.
+ * Check whether the given GEP operator is inbounds.
*/
LLVMBool LLVMIsInBounds(LLVMValueRef GEP);
@@ -3507,6 +3514,11 @@ LLVMBool LLVMIsInBounds(LLVMValueRef GEP);
void LLVMSetIsInBounds(LLVMValueRef GEP, LLVMBool InBounds);
/**
+ * Get the source element type of the given GEP operator.
+ */
+LLVMTypeRef LLVMGetGEPSourceElementType(LLVMValueRef GEP);
+
+/**
* @}
*/
@@ -3556,7 +3568,7 @@ LLVMBasicBlockRef LLVMGetIncomingBlock(LLVMValueRef PhiNode, unsigned Index);
/**
* Obtain the number of indices.
- * NB: This also works on GEP.
+ * NB: This also works on GEP operators.
*/
unsigned LLVMGetNumIndices(LLVMValueRef Inst);
@@ -3676,12 +3688,12 @@ LLVMValueRef LLVMBuildSwitch(LLVMBuilderRef, LLVMValueRef V,
LLVMBasicBlockRef Else, unsigned NumCases);
LLVMValueRef LLVMBuildIndirectBr(LLVMBuilderRef B, LLVMValueRef Addr,
unsigned NumDests);
-// LLVMBuildInvoke is deprecated in favor of LLVMBuildInvoke2, in preparation
-// for opaque pointer types.
-LLVMValueRef LLVMBuildInvoke(LLVMBuilderRef, LLVMValueRef Fn,
- LLVMValueRef *Args, unsigned NumArgs,
- LLVMBasicBlockRef Then, LLVMBasicBlockRef Catch,
- const char *Name);
+LLVM_ATTRIBUTE_C_DEPRECATED(
+ LLVMValueRef LLVMBuildInvoke(LLVMBuilderRef, LLVMValueRef Fn,
+ LLVMValueRef *Args, unsigned NumArgs,
+ LLVMBasicBlockRef Then,
+ LLVMBasicBlockRef Catch, const char *Name),
+ "Use LLVMBuildInvoke2 instead to support opaque pointers");
LLVMValueRef LLVMBuildInvoke2(LLVMBuilderRef, LLVMTypeRef Ty, LLVMValueRef Fn,
LLVMValueRef *Args, unsigned NumArgs,
LLVMBasicBlockRef Then, LLVMBasicBlockRef Catch,
@@ -3875,23 +3887,27 @@ LLVMValueRef LLVMBuildAlloca(LLVMBuilderRef, LLVMTypeRef Ty, const char *Name);
LLVMValueRef LLVMBuildArrayAlloca(LLVMBuilderRef, LLVMTypeRef Ty,
LLVMValueRef Val, const char *Name);
LLVMValueRef LLVMBuildFree(LLVMBuilderRef, LLVMValueRef PointerVal);
-// LLVMBuildLoad is deprecated in favor of LLVMBuildLoad2, in preparation for
-// opaque pointer types.
-LLVMValueRef LLVMBuildLoad(LLVMBuilderRef, LLVMValueRef PointerVal,
- const char *Name);
+LLVM_ATTRIBUTE_C_DEPRECATED(
+ LLVMValueRef LLVMBuildLoad(LLVMBuilderRef, LLVMValueRef PointerVal,
+ const char *Name),
+ "Use LLVMBuildLoad2 instead to support opaque pointers");
LLVMValueRef LLVMBuildLoad2(LLVMBuilderRef, LLVMTypeRef Ty,
LLVMValueRef PointerVal, const char *Name);
LLVMValueRef LLVMBuildStore(LLVMBuilderRef, LLVMValueRef Val, LLVMValueRef Ptr);
-// LLVMBuildGEP, LLVMBuildInBoundsGEP, and LLVMBuildStructGEP are deprecated in
-// favor of LLVMBuild*GEP2, in preparation for opaque pointer types.
-LLVMValueRef LLVMBuildGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
- LLVMValueRef *Indices, unsigned NumIndices,
- const char *Name);
-LLVMValueRef LLVMBuildInBoundsGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
- LLVMValueRef *Indices, unsigned NumIndices,
- const char *Name);
-LLVMValueRef LLVMBuildStructGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
- unsigned Idx, const char *Name);
+LLVM_ATTRIBUTE_C_DEPRECATED(
+ LLVMValueRef LLVMBuildGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
+ LLVMValueRef *Indices, unsigned NumIndices,
+ const char *Name),
+ "Use LLVMBuildGEP2 instead to support opaque pointers");
+LLVM_ATTRIBUTE_C_DEPRECATED(
+ LLVMValueRef LLVMBuildInBoundsGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
+ LLVMValueRef *Indices,
+ unsigned NumIndices, const char *Name),
+ "Use LLVMBuildInBoundsGEP2 instead to support opaque pointers");
+LLVM_ATTRIBUTE_C_DEPRECATED(
+ LLVMValueRef LLVMBuildStructGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
+ unsigned Idx, const char *Name),
+ "Use LLVMBuildStructGEP2 instead to support opaque pointers");
LLVMValueRef LLVMBuildGEP2(LLVMBuilderRef B, LLVMTypeRef Ty,
LLVMValueRef Pointer, LLVMValueRef *Indices,
unsigned NumIndices, const char *Name);
@@ -3971,11 +3987,11 @@ LLVMValueRef LLVMBuildFCmp(LLVMBuilderRef, LLVMRealPredicate Op,
/* Miscellaneous instructions */
LLVMValueRef LLVMBuildPhi(LLVMBuilderRef, LLVMTypeRef Ty, const char *Name);
-// LLVMBuildCall is deprecated in favor of LLVMBuildCall2, in preparation for
-// opaque pointer types.
-LLVMValueRef LLVMBuildCall(LLVMBuilderRef, LLVMValueRef Fn,
- LLVMValueRef *Args, unsigned NumArgs,
- const char *Name);
+LLVM_ATTRIBUTE_C_DEPRECATED(
+ LLVMValueRef LLVMBuildCall(LLVMBuilderRef, LLVMValueRef Fn,
+ LLVMValueRef *Args, unsigned NumArgs,
+ const char *Name),
+ "Use LLVMBuildCall2 instead to support opaque pointers");
LLVMValueRef LLVMBuildCall2(LLVMBuilderRef, LLVMTypeRef, LLVMValueRef Fn,
LLVMValueRef *Args, unsigned NumArgs,
const char *Name);
diff --git a/contrib/llvm-project/llvm/include/llvm-c/Deprecated.h b/contrib/llvm-project/llvm/include/llvm-c/Deprecated.h
new file mode 100644
index 000000000000..1ed5d11550d2
--- /dev/null
+++ b/contrib/llvm-project/llvm/include/llvm-c/Deprecated.h
@@ -0,0 +1,38 @@
+/*===-- llvm-c/Deprecated.h - Deprecation macro -------------------*- C -*-===*\
+|* *|
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM *|
+|* Exceptions. *|
+|* See https://llvm.org/LICENSE.txt for license information. *|
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This header declares LLVM_ATTRIBUTE_C_DEPRECATED() macro, which can be *|
+|* used to deprecate functions in the C interface. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_DEPRECATED_H
+#define LLVM_C_DEPRECATED_H
+
+#ifndef __has_feature
+# define __has_feature(x) 0
+#endif
+
+// This is a variant of LLVM_ATTRIBUTE_DEPRECATED() that is compatible with
+// C compilers.
+#if __has_feature(attribute_deprecated_with_message)
+# define LLVM_ATTRIBUTE_C_DEPRECATED(decl, message) \
+ decl __attribute__((deprecated(message)))
+#elif defined(__GNUC__)
+# define LLVM_ATTRIBUTE_C_DEPRECATED(decl, message) \
+ decl __attribute__((deprecated))
+#elif defined(_MSC_VER)
+# define LLVM_ATTRIBUTE_C_DEPRECATED(decl, message) \
+ __declspec(deprecated(message)) decl
+#else
+# define LLVM_ATTRIBUTE_C_DEPRECATED(decl, message) \
+ decl
+#endif
+
+#endif /* LLVM_C_DEPRECATED_H */
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/GenericCycleImpl.h b/contrib/llvm-project/llvm/include/llvm/ADT/GenericCycleImpl.h
new file mode 100644
index 000000000000..5f29236eac47
--- /dev/null
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/GenericCycleImpl.h
@@ -0,0 +1,411 @@
+//===- GenericCycleImpl.h -------------------------------------*- C++ -*---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This template implementation resides in a separate file so that it
+// does not get injected into every .cpp file that includes the
+// generic header.
+//
+// DO NOT INCLUDE THIS FILE WHEN MERELY USING CYCLEINFO.
+//
+// This file should only be included by files that implement a
+// specialization of the relevant templates. Currently these are:
+// - CycleAnalysis.cpp
+// - MachineCycleAnalysis.cpp
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_GENERICCYCLEIMPL_H
+#define LLVM_ADT_GENERICCYCLEIMPL_H
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/GenericCycleInfo.h"
+
+#define DEBUG_TYPE "generic-cycle-impl"
+
+namespace llvm {
+
+template <typename ContextT>
+bool GenericCycle<ContextT>::contains(const GenericCycle *C) const {
+ if (!C)
+ return false;
+
+ if (Depth > C->Depth)
+ return false;
+ while (Depth < C->Depth)
+ C = C->ParentCycle;
+ return this == C;
+}
+
+template <typename ContextT>
+void GenericCycle<ContextT>::getExitBlocks(
+ SmallVectorImpl<BlockT *> &TmpStorage) const {
+ TmpStorage.clear();
+
+ size_t NumExitBlocks = 0;
+ for (BlockT *Block : blocks()) {
+ llvm::append_range(TmpStorage, successors(Block));
+
+ for (size_t Idx = NumExitBlocks, End = TmpStorage.size(); Idx < End;
+ ++Idx) {
+ BlockT *Succ = TmpStorage[Idx];
+ if (!contains(Succ)) {
+ auto ExitEndIt = TmpStorage.begin() + NumExitBlocks;
+ if (std::find(TmpStorage.begin(), ExitEndIt, Succ) == ExitEndIt)
+ TmpStorage[NumExitBlocks++] = Succ;
+ }
+ }
+
+ TmpStorage.resize(NumExitBlocks);
+ }
+}
+
+/// \brief Helper class for computing cycle information.
+template <typename ContextT> class GenericCycleInfoCompute {
+ using BlockT = typename ContextT::BlockT;
+ using CycleInfoT = GenericCycleInfo<ContextT>;
+ using CycleT = typename CycleInfoT::CycleT;
+
+ CycleInfoT &Info;
+
+ struct DFSInfo {
+ unsigned Start = 0; // DFS start; positive if block is found
+ unsigned End = 0; // DFS end
+
+ DFSInfo() {}
+ explicit DFSInfo(unsigned Start) : Start(Start) {}
+
+ /// Whether this node is an ancestor (or equal to) the node \p Other
+ /// in the DFS tree.
+ bool isAncestorOf(const DFSInfo &Other) const {
+ return Start <= Other.Start && Other.End <= End;
+ }
+ };
+
+ DenseMap<BlockT *, DFSInfo> BlockDFSInfo;
+ SmallVector<BlockT *, 8> BlockPreorder;
+
+ GenericCycleInfoCompute(const GenericCycleInfoCompute &) = delete;
+ GenericCycleInfoCompute &operator=(const GenericCycleInfoCompute &) = delete;
+
+public:
+ GenericCycleInfoCompute(CycleInfoT &Info) : Info(Info) {}
+
+ void run(BlockT *EntryBlock);
+
+ static void updateDepth(CycleT *SubTree);
+
+private:
+ void dfs(BlockT *EntryBlock);
+};
+
+template <typename ContextT>
+auto GenericCycleInfo<ContextT>::getTopLevelParentCycle(
+ const BlockT *Block) const -> CycleT * {
+ auto MapIt = BlockMap.find(Block);
+ if (MapIt == BlockMap.end())
+ return nullptr;
+
+ auto *C = MapIt->second;
+ while (C->ParentCycle)
+ C = C->ParentCycle;
+ return C;
+}
+
+template <typename ContextT>
+void GenericCycleInfo<ContextT>::moveToNewParent(CycleT *NewParent,
+ CycleT *Child) {
+ auto &CurrentContainer =
+ Child->ParentCycle ? Child->ParentCycle->Children : TopLevelCycles;
+ auto Pos = llvm::find_if(CurrentContainer, [=](const auto &Ptr) -> bool {
+ return Child == Ptr.get();
+ });
+ assert(Pos != CurrentContainer.end());
+ NewParent->Children.push_back(std::move(*Pos));
+ *Pos = std::move(CurrentContainer.back());
+ CurrentContainer.pop_back();
+ Child->ParentCycle = NewParent;
+}
+
+/// \brief Main function of the cycle info computations.
+template <typename ContextT>
+void GenericCycleInfoCompute<ContextT>::run(BlockT *EntryBlock) {
+ LLVM_DEBUG(errs() << "Entry block: " << Info.Context.print(EntryBlock)
+ << "\n");
+ dfs(EntryBlock);
+
+ SmallVector<BlockT *, 8> Worklist;
+
+ for (BlockT *HeaderCandidate : llvm::reverse(BlockPreorder)) {
+ const DFSInfo CandidateInfo = BlockDFSInfo.lookup(HeaderCandidate);
+
+ for (BlockT *Pred : predecessors(HeaderCandidate)) {
+ const DFSInfo PredDFSInfo = BlockDFSInfo.lookup(Pred);
+ if (CandidateInfo.isAncestorOf(PredDFSInfo))
+ Worklist.push_back(Pred);
+ }
+ if (Worklist.empty()) {
+ continue;
+ }
+
+ // Found a cycle with the candidate as its header.
+ LLVM_DEBUG(errs() << "Found cycle for header: "
+ << Info.Context.print(HeaderCandidate) << "\n");
+ std::unique_ptr<CycleT> NewCycle = std::make_unique<CycleT>();
+ NewCycle->appendEntry(HeaderCandidate);
+ NewCycle->appendBlock(HeaderCandidate);
+ Info.BlockMap.try_emplace(HeaderCandidate, NewCycle.get());
+
+ // Helper function to process (non-back-edge) predecessors of a discovered
+ // block and either add them to the worklist or recognize that the given
+ // block is an additional cycle entry.
+ auto ProcessPredecessors = [&](BlockT *Block) {
+ LLVM_DEBUG(errs() << " block " << Info.Context.print(Block) << ": ");
+
+ bool IsEntry = false;
+ for (BlockT *Pred : predecessors(Block)) {
+ const DFSInfo PredDFSInfo = BlockDFSInfo.lookup(Pred);
+ if (CandidateInfo.isAncestorOf(PredDFSInfo)) {
+ Worklist.push_back(Pred);
+ } else {
+ IsEntry = true;
+ }
+ }
+ if (IsEntry) {
+ assert(!NewCycle->isEntry(Block));
+ LLVM_DEBUG(errs() << "append as entry\n");
+ NewCycle->appendEntry(Block);
+ } else {
+ LLVM_DEBUG(errs() << "append as child\n");
+ }
+ };
+
+ do {
+ BlockT *Block = Worklist.pop_back_val();
+ if (Block == HeaderCandidate)
+ continue;
+
+ // If the block has already been discovered by some cycle
+ // (possibly by ourself), then the outermost cycle containing it
+ // should become our child.
+ if (auto *BlockParent = Info.getTopLevelParentCycle(Block)) {
+ LLVM_DEBUG(errs() << " block " << Info.Context.print(Block) << ": ");
+
+ if (BlockParent != NewCycle.get()) {
+ LLVM_DEBUG(errs()
+ << "discovered child cycle "
+ << Info.Context.print(BlockParent->getHeader()) << "\n");
+ // Make BlockParent the child of NewCycle.
+ Info.moveToNewParent(NewCycle.get(), BlockParent);
+ NewCycle->Blocks.insert(NewCycle->Blocks.end(),
+ BlockParent->block_begin(),
+ BlockParent->block_end());
+
+ for (auto *ChildEntry : BlockParent->entries())
+ ProcessPredecessors(ChildEntry);
+ } else {
+ LLVM_DEBUG(errs()
+ << "known child cycle "
+ << Info.Context.print(BlockParent->getHeader()) << "\n");
+ }
+ } else {
+ Info.BlockMap.try_emplace(Block, NewCycle.get());
+ assert(!is_contained(NewCycle->Blocks, Block));
+ NewCycle->Blocks.push_back(Block);
+ ProcessPredecessors(Block);
+ }
+ } while (!Worklist.empty());
+
+ Info.TopLevelCycles.push_back(std::move(NewCycle));
+ }
+
+ // Fix top-level cycle links and compute cycle depths.
+ for (auto *TLC : Info.toplevel_cycles()) {
+ LLVM_DEBUG(errs() << "top-level cycle: "
+ << Info.Context.print(TLC->getHeader()) << "\n");
+
+ TLC->ParentCycle = nullptr;
+ updateDepth(TLC);
+ }
+}
+
+/// \brief Recompute depth values of \p SubTree and all descendants.
+template <typename ContextT>
+void GenericCycleInfoCompute<ContextT>::updateDepth(CycleT *SubTree) {
+ for (CycleT *Cycle : depth_first(SubTree))
+ Cycle->Depth = Cycle->ParentCycle ? Cycle->ParentCycle->Depth + 1 : 1;
+}
+
+/// \brief Compute a DFS of basic blocks starting at the function entry.
+///
+/// Fills BlockDFSInfo with start/end counters and BlockPreorder.
+template <typename ContextT>
+void GenericCycleInfoCompute<ContextT>::dfs(BlockT *EntryBlock) {
+ SmallVector<unsigned, 8> DFSTreeStack;
+ SmallVector<BlockT *, 8> TraverseStack;
+ unsigned Counter = 0;
+ TraverseStack.emplace_back(EntryBlock);
+
+ do {
+ BlockT *Block = TraverseStack.back();
+ LLVM_DEBUG(errs() << "DFS visiting block: " << Info.Context.print(Block)
+ << "\n");
+ if (!BlockDFSInfo.count(Block)) {
+ // We're visiting the block for the first time. Open its DFSInfo, add
+ // successors to the traversal stack, and remember the traversal stack
+ // depth at which the block was opened, so that we can correctly record
+ // its end time.
+ LLVM_DEBUG(errs() << " first encountered at depth "
+ << TraverseStack.size() << "\n");
+
+ DFSTreeStack.emplace_back(TraverseStack.size());
+ llvm::append_range(TraverseStack, successors(Block));
+
+ LLVM_ATTRIBUTE_UNUSED
+ bool Added = BlockDFSInfo.try_emplace(Block, ++Counter).second;
+ assert(Added);
+ BlockPreorder.push_back(Block);
+ LLVM_DEBUG(errs() << " preorder number: " << Counter << "\n");
+ } else {
+ assert(!DFSTreeStack.empty());
+ if (DFSTreeStack.back() == TraverseStack.size()) {
+ LLVM_DEBUG(errs() << " ended at " << Counter << "\n");
+ BlockDFSInfo.find(Block)->second.End = Counter;
+ DFSTreeStack.pop_back();
+ } else {
+ LLVM_DEBUG(errs() << " already done\n");
+ }
+ TraverseStack.pop_back();
+ }
+ } while (!TraverseStack.empty());
+ assert(DFSTreeStack.empty());
+
+ LLVM_DEBUG(
+ errs() << "Preorder:\n";
+ for (int i = 0, e = BlockPreorder.size(); i != e; ++i) {
+ errs() << " " << Info.Context.print(BlockPreorder[i]) << ": " << i << "\n";
+ }
+ );
+}
+
+/// \brief Reset the object to its initial state.
+template <typename ContextT> void GenericCycleInfo<ContextT>::clear() {
+ TopLevelCycles.clear();
+ BlockMap.clear();
+}
+
+/// \brief Compute the cycle info for a function.
+template <typename ContextT>
+void GenericCycleInfo<ContextT>::compute(FunctionT &F) {
+ GenericCycleInfoCompute<ContextT> Compute(*this);
+ Context.setFunction(F);
+
+ LLVM_DEBUG(errs() << "Computing cycles for function: " << F.getName()
+ << "\n");
+ Compute.run(ContextT::getEntryBlock(F));
+
+ assert(validateTree());
+}
+
+/// \brief Find the innermost cycle containing a given block.
+///
+/// \returns the innermost cycle containing \p Block or nullptr if
+/// it is not contained in any cycle.
+template <typename ContextT>
+auto GenericCycleInfo<ContextT>::getCycle(const BlockT *Block) const
+ -> CycleT * {
+ auto MapIt = BlockMap.find(Block);
+ if (MapIt != BlockMap.end())
+ return MapIt->second;
+ return nullptr;
+}
+
+/// \brief Validate the internal consistency of the cycle tree.
+///
+/// Note that this does \em not check that cycles are really cycles in the CFG,
+/// or that the right set of cycles in the CFG were found.
+template <typename ContextT>
+bool GenericCycleInfo<ContextT>::validateTree() const {
+ DenseSet<BlockT *> Blocks;
+ DenseSet<BlockT *> Entries;
+
+ auto reportError = [](const char *File, int Line, const char *Cond) {
+ errs() << File << ':' << Line
+ << ": GenericCycleInfo::validateTree: " << Cond << '\n';
+ };
+#define check(cond) \
+ do { \
+ if (!(cond)) { \
+ reportError(__FILE__, __LINE__, #cond); \
+ return false; \
+ } \
+ } while (false)
+
+ for (const auto *TLC : toplevel_cycles()) {
+ for (const CycleT *Cycle : depth_first(TLC)) {
+ if (Cycle->ParentCycle)
+ check(is_contained(Cycle->ParentCycle->children(), Cycle));
+
+ for (BlockT *Block : Cycle->Blocks) {
+ auto MapIt = BlockMap.find(Block);
+ check(MapIt != BlockMap.end());
+ check(Cycle->contains(MapIt->second));
+ check(Blocks.insert(Block).second); // duplicates in block list?
+ }
+ Blocks.clear();
+
+ check(!Cycle->Entries.empty());
+ for (BlockT *Entry : Cycle->Entries) {
+ check(Entries.insert(Entry).second); // duplicate entry?
+ check(is_contained(Cycle->Blocks, Entry));
+ }
+ Entries.clear();
+
+ unsigned ChildDepth = 0;
+ for (const CycleT *Child : Cycle->children()) {
+ check(Child->Depth > Cycle->Depth);
+ if (!ChildDepth) {
+ ChildDepth = Child->Depth;
+ } else {
+ check(ChildDepth == Child->Depth);
+ }
+ }
+ }
+ }
+
+ for (const auto &Entry : BlockMap) {
+ BlockT *Block = Entry.first;
+ for (const CycleT *Cycle = Entry.second; Cycle;
+ Cycle = Cycle->ParentCycle) {
+ check(is_contained(Cycle->Blocks, Block));
+ }
+ }
+
+#undef check
+
+ return true;
+}
+
+/// \brief Print the cycle info.
+template <typename ContextT>
+void GenericCycleInfo<ContextT>::print(raw_ostream &Out) const {
+ for (const auto *TLC : toplevel_cycles()) {
+ for (const CycleT *Cycle : depth_first(TLC)) {
+ for (unsigned I = 0; I < Cycle->Depth; ++I)
+ Out << " ";
+
+ Out << Cycle->print(Context) << '\n';
+ }
+ }
+}
+
+} // namespace llvm
+
+#undef DEBUG_TYPE
+
+#endif // LLVM_ADT_GENERICCYCLEIMPL_H
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/GenericCycleInfo.h b/contrib/llvm-project/llvm/include/llvm/ADT/GenericCycleInfo.h
new file mode 100644
index 000000000000..aad704301e43
--- /dev/null
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/GenericCycleInfo.h
@@ -0,0 +1,334 @@
+//===- GenericCycleInfo.h - Info for Cycles in any IR ------*- C++ -*------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// \brief Find all cycles in a control-flow graph, including irreducible loops.
+///
+/// See docs/CycleTerminology.rst for a formal definition of cycles.
+///
+/// Briefly:
+/// - A cycle is a generalization of a loop which can represent
+/// irreducible control flow.
+/// - Cycles identified in a program are implementation defined,
+/// depending on the DFS traversal chosen.
+/// - Cycles are well-nested, and form a forest with a parent-child
+/// relationship.
+/// - In any choice of DFS, every natural loop L is represented by a
+/// unique cycle C which is a superset of L.
+/// - In the absence of irreducible control flow, the cycles are
+/// exactly the natural loops in the program.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_GENERICCYCLEINFO_H
+#define LLVM_ADT_GENERICCYCLEINFO_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/GenericSSAContext.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Printable.h"
+#include "llvm/Support/raw_ostream.h"
+#include <vector>
+
+namespace llvm {
+
+template <typename ContexT> class GenericCycleInfo;
+template <typename ContexT> class GenericCycleInfoCompute;
+
+/// A possibly irreducible generalization of a \ref Loop.
+template <typename ContextT> class GenericCycle {
+public:
+ using BlockT = typename ContextT::BlockT;
+ using FunctionT = typename ContextT::FunctionT;
+ template <typename> friend class GenericCycleInfo;
+ template <typename> friend class GenericCycleInfoCompute;
+
+private:
+ /// The parent cycle. Is null for the root "cycle". Top-level cycles point
+ /// at the root.
+ GenericCycle *ParentCycle = nullptr;
+
+ /// The entry block(s) of the cycle. The header is the only entry if
+ /// this is a loop. Is empty for the root "cycle", to avoid
+ /// unnecessary memory use.
+ SmallVector<BlockT *, 1> Entries;
+
+ /// Child cycles, if any.
+ std::vector<std::unique_ptr<GenericCycle>> Children;
+
+ /// Basic blocks that are contained in the cycle, including entry blocks,
+ /// and including blocks that are part of a child cycle.
+ std::vector<BlockT *> Blocks;
+
+ /// Depth of the cycle in the tree. The root "cycle" is at depth 0.
+ ///
+ /// \note Depths are not necessarily contiguous. However, child loops always
+ /// have strictly greater depth than their parents, and sibling loops
+ /// always have the same depth.
+ unsigned Depth = 0;
+
+ void clear() {
+ Entries.clear();
+ Children.clear();
+ Blocks.clear();
+ Depth = 0;
+ ParentCycle = nullptr;
+ }
+
+ void appendEntry(BlockT *Block) { Entries.push_back(Block); }
+ void appendBlock(BlockT *Block) { Blocks.push_back(Block); }
+
+ GenericCycle(const GenericCycle &) = delete;
+ GenericCycle &operator=(const GenericCycle &) = delete;
+ GenericCycle(GenericCycle &&Rhs) = delete;
+ GenericCycle &operator=(GenericCycle &&Rhs) = delete;
+
+public:
+ GenericCycle() = default;
+
+ /// \brief Whether the cycle is a natural loop.
+ bool isReducible() const { return Entries.size() == 1; }
+
+ BlockT *getHeader() const { return Entries[0]; }
+
+ /// \brief Return whether \p Block is an entry block of the cycle.
+ bool isEntry(BlockT *Block) const { return is_contained(Entries, Block); }
+
+ /// \brief Return whether \p Block is contained in the cycle.
+ bool contains(const BlockT *Block) const {
+ return is_contained(Blocks, Block);
+ }
+
+ /// \brief Returns true iff this cycle contains \p C.
+ ///
+ /// Note: Non-strict containment check, i.e. returns true if C is the
+ /// same cycle.
+ bool contains(const GenericCycle *C) const;
+
+ const GenericCycle *getParentCycle() const { return ParentCycle; }
+ GenericCycle *getParentCycle() { return ParentCycle; }
+ unsigned getDepth() const { return Depth; }
+
+ /// Return all of the successor blocks of this cycle.
+ ///
+ /// These are the blocks _outside of the current cycle_ which are
+ /// branched to.
+ void getExitBlocks(SmallVectorImpl<BlockT *> &TmpStorage) const;
+
+ /// Iteration over child cycles.
+ //@{
+ using const_child_iterator_base =
+ typename std::vector<std::unique_ptr<GenericCycle>>::const_iterator;
+ struct const_child_iterator
+ : iterator_adaptor_base<const_child_iterator, const_child_iterator_base> {
+ using Base =
+ iterator_adaptor_base<const_child_iterator, const_child_iterator_base>;
+
+ const_child_iterator() = default;
+ explicit const_child_iterator(const_child_iterator_base I) : Base(I) {}
+
+ const const_child_iterator_base &wrapped() { return Base::wrapped(); }
+ GenericCycle *operator*() const { return Base::I->get(); }
+ };
+
+ const_child_iterator child_begin() const {
+ return const_child_iterator{Children.begin()};
+ }
+ const_child_iterator child_end() const {
+ return const_child_iterator{Children.end()};
+ }
+ size_t getNumChildren() const { return Children.size(); }
+ iterator_range<const_child_iterator> children() const {
+ return llvm::make_range(const_child_iterator{Children.begin()},
+ const_child_iterator{Children.end()});
+ }
+ //@}
+
+ /// Iteration over blocks in the cycle (including entry blocks).
+ //@{
+ using const_block_iterator = typename std::vector<BlockT *>::const_iterator;
+
+ const_block_iterator block_begin() const {
+ return const_block_iterator{Blocks.begin()};
+ }
+ const_block_iterator block_end() const {
+ return const_block_iterator{Blocks.end()};
+ }
+ size_t getNumBlocks() const { return Blocks.size(); }
+ iterator_range<const_block_iterator> blocks() const {
+ return llvm::make_range(block_begin(), block_end());
+ }
+ //@}
+
+ /// Iteration over entry blocks.
+ //@{
+ using const_entry_iterator =
+ typename SmallVectorImpl<BlockT *>::const_iterator;
+
+ size_t getNumEntries() const { return Entries.size(); }
+ iterator_range<const_entry_iterator> entries() const {
+ return llvm::make_range(Entries.begin(), Entries.end());
+ }
+
+ Printable printEntries(const ContextT &Ctx) const {
+ return Printable([this, &Ctx](raw_ostream &Out) {
+ bool First = true;
+ for (auto *Entry : Entries) {
+ if (!First)
+ Out << ' ';
+ First = false;
+ Out << Ctx.print(Entry);
+ }
+ });
+ }
+
+ Printable print(const ContextT &Ctx) const {
+ return Printable([this, &Ctx](raw_ostream &Out) {
+ Out << "depth=" << Depth << ": entries(" << printEntries(Ctx) << ')';
+
+ for (auto *Block : Blocks) {
+ if (isEntry(Block))
+ continue;
+
+ Out << ' ' << Ctx.print(Block);
+ }
+ });
+ }
+};
+
+/// \brief Cycle information for a function.
+template <typename ContextT> class GenericCycleInfo {
+public:
+ using BlockT = typename ContextT::BlockT;
+ using CycleT = GenericCycle<ContextT>;
+ using FunctionT = typename ContextT::FunctionT;
+ template <typename> friend class GenericCycle;
+ template <typename> friend class GenericCycleInfoCompute;
+
+private:
+ ContextT Context;
+
+ /// Map basic blocks to their inner-most containing loop.
+ DenseMap<BlockT *, CycleT *> BlockMap;
+
+ /// Outermost cycles discovered by any DFS.
+ ///
+ /// Note: The implementation treats the nullptr as the parent of
+ /// every top-level cycle. See \ref contains for an example.
+ std::vector<std::unique_ptr<CycleT>> TopLevelCycles;
+
+public:
+ GenericCycleInfo() = default;
+ GenericCycleInfo(GenericCycleInfo &&) = default;
+ GenericCycleInfo &operator=(GenericCycleInfo &&) = default;
+
+ void clear();
+ void compute(FunctionT &F);
+
+ FunctionT *getFunction() const { return Context.getFunction(); }
+ const ContextT &getSSAContext() const { return Context; }
+
+ CycleT *getCycle(const BlockT *Block) const;
+ CycleT *getTopLevelParentCycle(const BlockT *Block) const;
+
+ /// Move \p Child to \p NewParent by manipulating Children vectors.
+ ///
+ /// Note: This is an incomplete operation that does not update the
+ /// list of blocks in the new parent or the depth of the subtree.
+ void moveToNewParent(CycleT *NewParent, CycleT *Child);
+
+ /// Methods for debug and self-test.
+ //@{
+ bool validateTree() const;
+ void print(raw_ostream &Out) const;
+ void dump() const { print(dbgs()); }
+ //@}
+
+ /// Iteration over top-level cycles.
+ //@{
+ using const_toplevel_iterator_base =
+ typename std::vector<std::unique_ptr<CycleT>>::const_iterator;
+ struct const_toplevel_iterator
+ : iterator_adaptor_base<const_toplevel_iterator,
+ const_toplevel_iterator_base> {
+ using Base = iterator_adaptor_base<const_toplevel_iterator,
+ const_toplevel_iterator_base>;
+
+ const_toplevel_iterator() = default;
+ explicit const_toplevel_iterator(const_toplevel_iterator_base I)
+ : Base(I) {}
+
+ const const_toplevel_iterator_base &wrapped() { return Base::wrapped(); }
+ CycleT *operator*() const { return Base::I->get(); }
+ };
+
+ const_toplevel_iterator toplevel_begin() const {
+ return const_toplevel_iterator{TopLevelCycles.begin()};
+ }
+ const_toplevel_iterator toplevel_end() const {
+ return const_toplevel_iterator{TopLevelCycles.end()};
+ }
+
+ iterator_range<const_toplevel_iterator> toplevel_cycles() const {
+ return llvm::make_range(const_toplevel_iterator{TopLevelCycles.begin()},
+ const_toplevel_iterator{TopLevelCycles.end()});
+ }
+ //@}
+};
+
+/// \brief GraphTraits for iterating over a sub-tree of the CycleT tree.
+template <typename CycleRefT, typename ChildIteratorT> struct CycleGraphTraits {
+ using NodeRef = CycleRefT;
+
+ using nodes_iterator = ChildIteratorT;
+ using ChildIteratorType = nodes_iterator;
+
+ static NodeRef getEntryNode(NodeRef Graph) { return Graph; }
+
+ static ChildIteratorType child_begin(NodeRef Ref) {
+ return Ref->child_begin();
+ }
+ static ChildIteratorType child_end(NodeRef Ref) { return Ref->child_end(); }
+
+ // Not implemented:
+ // static nodes_iterator nodes_begin(GraphType *G)
+ // static nodes_iterator nodes_end (GraphType *G)
+ // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+
+ // typedef EdgeRef - Type of Edge token in the graph, which should
+ // be cheap to copy.
+ // typedef ChildEdgeIteratorType - Type used to iterate over children edges in
+ // graph, dereference to a EdgeRef.
+
+ // static ChildEdgeIteratorType child_edge_begin(NodeRef)
+ // static ChildEdgeIteratorType child_edge_end(NodeRef)
+ // Return iterators that point to the beginning and ending of the
+ // edge list for the given callgraph node.
+ //
+ // static NodeRef edge_dest(EdgeRef)
+ // Return the destination node of an edge.
+ // static unsigned size (GraphType *G)
+ // Return total number of nodes in the graph
+};
+
+template <typename BlockT>
+struct GraphTraits<const GenericCycle<BlockT> *>
+ : CycleGraphTraits<const GenericCycle<BlockT> *,
+ typename GenericCycle<BlockT>::const_child_iterator> {};
+template <typename BlockT>
+struct GraphTraits<GenericCycle<BlockT> *>
+ : CycleGraphTraits<GenericCycle<BlockT> *,
+ typename GenericCycle<BlockT>::const_child_iterator> {};
+
+} // namespace llvm
+
+#endif // LLVM_ADT_GENERICCYCLEINFO_H
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/GenericSSAContext.h b/contrib/llvm-project/llvm/include/llvm/ADT/GenericSSAContext.h
new file mode 100644
index 000000000000..409222547d5c
--- /dev/null
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/GenericSSAContext.h
@@ -0,0 +1,74 @@
+//===- GenericSSAContext.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines the little GenericSSAContext<X> template class
+/// that can be used to implement IR analyses as templates.
+/// Specializing these templates allows the analyses to be used over
+/// both LLVM IR and Machine IR.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_GENERICSSACONTEXT_H
+#define LLVM_ADT_GENERICSSACONTEXT_H
+
+#include "llvm/Support/Printable.h"
+
+namespace llvm {
+
+template <typename _FunctionT> class GenericSSAContext {
+public:
+ // Specializations should provide the following types that are similar to how
+ // LLVM IR is structured:
+
+ // The smallest unit of the IR is a ValueT. The SSA context uses a ValueRefT,
+ // which is a pointer to a ValueT, since Machine IR does not have the
+ // equivalent of a ValueT.
+ //
+ // using ValueRefT = ...
+
+ // An InstT is a subclass of ValueT that itself defines one or more ValueT
+ // objects.
+ //
+ // using InstT = ... must be a subclass of Value
+
+ // A BlockT is a sequence of InstT, and forms a node of the CFG. It
+ // has global methods predecessors() and successors() that return
+ // the list of incoming CFG edges and outgoing CFG edges
+ // respectively.
+ //
+ // using BlockT = ...
+
+ // A FunctionT represents a CFG along with arguments and return values. It is
+ // the smallest complete unit of code in a Module.
+ //
+ // The compiler produces an error here if this class is implicitly
+ // specialized due to an instantiation. An explicit specialization
+ // of this template needs to be added before the instantiation point
+ // indicated by the compiler.
+ using FunctionT = typename _FunctionT::invalidTemplateInstanceError;
+
+ // Every FunctionT has a unique BlockT marked as its entry.
+ //
+ // static BlockT* getEntryBlock(FunctionT &F);
+
+ // Initialize the SSA context with information about the FunctionT being
+ // processed.
+ //
+ // void setFunction(FunctionT &function);
+ // FunctionT* getFunction() const;
+
+ // Methods to print various objects.
+ //
+ // Printable print(BlockT *block) const;
+ // Printable print(InstructionT *inst) const;
+ // Printable print(ValueRefT value) const;
+};
+} // namespace llvm
+
+#endif // LLVM_ADT_GENERICSSACONTEXT_H
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/PointerUnion.h b/contrib/llvm-project/llvm/include/llvm/ADT/PointerUnion.h
index 0874f67db3fe..5ce2dbee4b3a 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/PointerUnion.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/PointerUnion.h
@@ -16,6 +16,7 @@
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
#include <algorithm>
#include <cassert>
@@ -35,21 +36,6 @@ namespace pointer_union_detail {
return std::min<int>({PointerLikeTypeTraits<Ts>::NumLowBitsAvailable...});
}
- /// Find the index of a type in a list of types. TypeIndex<T, Us...>::Index
- /// is the index of T in Us, or sizeof...(Us) if T does not appear in the
- /// list.
- template <typename T, typename ...Us> struct TypeIndex;
- template <typename T, typename ...Us> struct TypeIndex<T, T, Us...> {
- static constexpr int Index = 0;
- };
- template <typename T, typename U, typename... Us>
- struct TypeIndex<T, U, Us...> {
- static constexpr int Index = 1 + TypeIndex<T, Us...>::Index;
- };
- template <typename T> struct TypeIndex<T> {
- static constexpr int Index = 0;
- };
-
/// Find the first type in a list of types.
template <typename T, typename...> struct GetFirstType {
using type = T;
@@ -116,6 +102,7 @@ namespace pointer_union_detail {
/// P = (float*)0;
/// Y = P.get<float*>(); // ok.
/// X = P.get<int*>(); // runtime assertion failure.
+/// PointerUnion<int*, int*> Q; // compile time failure.
template <typename... PTs>
class PointerUnion
: public pointer_union_detail::PointerUnionMembers<
@@ -124,12 +111,14 @@ class PointerUnion
void *, pointer_union_detail::bitsRequired(sizeof...(PTs)), int,
pointer_union_detail::PointerUnionUIntTraits<PTs...>>,
0, PTs...> {
+ static_assert(TypesAreDistinct<PTs...>::value,
+ "PointerUnion alternative types cannot be repeated");
// The first type is special because we want to directly cast a pointer to a
// default-initialized union to a pointer to the first type. But we don't
// want PointerUnion to be a 'template <typename First, typename ...Rest>'
// because it's much more convenient to have a name for the whole pack. So
// split off the first type here.
- using First = typename pointer_union_detail::GetFirstType<PTs...>::type;
+ using First = TypeAtIndex<0, PTs...>;
using Base = typename PointerUnion::PointerUnionMembers;
public:
@@ -146,10 +135,7 @@ public:
/// Test if the Union currently holds the type matching T.
template <typename T> bool is() const {
- constexpr int Index = pointer_union_detail::TypeIndex<T, PTs...>::Index;
- static_assert(Index < sizeof...(PTs),
- "PointerUnion::is<T> given type not in the union");
- return this->Val.getInt() == Index;
+ return this->Val.getInt() == FirstIndexOfType<T, PTs...>::value;
}
/// Returns the value of the specified pointer type.
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/STLExtras.h b/contrib/llvm-project/llvm/include/llvm/ADT/STLExtras.h
index f9b658ca960a..2d38e153c79e 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/STLExtras.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/STLExtras.h
@@ -144,6 +144,61 @@ template <typename ReturnType, typename... Args>
struct function_traits<ReturnType (&)(Args...), false>
: public function_traits<ReturnType (*)(Args...)> {};
+/// traits class for checking whether type T is one of any of the given
+/// types in the variadic list.
+template <typename T, typename... Ts>
+using is_one_of = disjunction<std::is_same<T, Ts>...>;
+
+/// traits class for checking whether type T is a base class for all
+/// the given types in the variadic list.
+template <typename T, typename... Ts>
+using are_base_of = conjunction<std::is_base_of<T, Ts>...>;
+
+namespace detail {
+template <typename T, typename... Us> struct TypesAreDistinct;
+template <typename T, typename... Us>
+struct TypesAreDistinct
+ : std::integral_constant<bool, !is_one_of<T, Us...>::value &&
+ TypesAreDistinct<Us...>::value> {};
+template <typename T> struct TypesAreDistinct<T> : std::true_type {};
+} // namespace detail
+
+/// Determine if all types in Ts are distinct.
+///
+/// Useful to statically assert when Ts is intended to describe a non-multi set
+/// of types.
+///
+/// Expensive (currently quadratic in sizeof(Ts...)), and so should only be
+/// asserted once per instantiation of a type which requires it.
+template <typename... Ts> struct TypesAreDistinct;
+template <> struct TypesAreDistinct<> : std::true_type {};
+template <typename... Ts>
+struct TypesAreDistinct
+ : std::integral_constant<bool, detail::TypesAreDistinct<Ts...>::value> {};
+
+/// Find the first index where a type appears in a list of types.
+///
+/// FirstIndexOfType<T, Us...>::value is the first index of T in Us.
+///
+/// Typically only meaningful when it is otherwise statically known that the
+/// type pack has no duplicate types. This should be guaranteed explicitly with
+/// static_assert(TypesAreDistinct<Us...>::value).
+///
+/// It is a compile-time error to instantiate when T is not present in Us, i.e.
+/// if is_one_of<T, Us...>::value is false.
+template <typename T, typename... Us> struct FirstIndexOfType;
+template <typename T, typename U, typename... Us>
+struct FirstIndexOfType<T, U, Us...>
+ : std::integral_constant<size_t, 1 + FirstIndexOfType<T, Us...>::value> {};
+template <typename T, typename... Us>
+struct FirstIndexOfType<T, T, Us...> : std::integral_constant<size_t, 0> {};
+
+/// Find the type at a given index in a list of types.
+///
+/// TypeAtIndex<I, Ts...> is the type at index I in Ts.
+template <size_t I, typename... Ts>
+using TypeAtIndex = std::tuple_element_t<I, std::tuple<Ts...>>;
+
//===----------------------------------------------------------------------===//
// Extra additions to <functional>
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/SmallVector.h b/contrib/llvm-project/llvm/include/llvm/ADT/SmallVector.h
index 0d13524f25ce..804567ebe3f1 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/SmallVector.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/SmallVector.h
@@ -589,17 +589,21 @@ public:
private:
template <bool ForOverwrite> void resizeImpl(size_type N) {
+ if (N == this->size())
+ return;
+
if (N < this->size()) {
- this->pop_back_n(this->size() - N);
- } else if (N > this->size()) {
- this->reserve(N);
- for (auto I = this->end(), E = this->begin() + N; I != E; ++I)
- if (ForOverwrite)
- new (&*I) T;
- else
- new (&*I) T();
- this->set_size(N);
+ this->truncate(N);
+ return;
}
+
+ this->reserve(N);
+ for (auto I = this->end(), E = this->begin() + N; I != E; ++I)
+ if (ForOverwrite)
+ new (&*I) T;
+ else
+ new (&*I) T();
+ this->set_size(N);
}
public:
@@ -608,12 +612,19 @@ public:
/// Like resize, but \ref T is POD, the new values won't be initialized.
void resize_for_overwrite(size_type N) { resizeImpl<true>(N); }
+ /// Like resize, but requires that \p N is less than \a size().
+ void truncate(size_type N) {
+ assert(this->size() >= N && "Cannot increase size with truncate");
+ this->destroy_range(this->begin() + N, this->end());
+ this->set_size(N);
+ }
+
void resize(size_type N, ValueParamT NV) {
if (N == this->size())
return;
if (N < this->size()) {
- this->pop_back_n(this->size() - N);
+ this->truncate(N);
return;
}
@@ -628,8 +639,7 @@ public:
void pop_back_n(size_type NumItems) {
assert(this->size() >= NumItems);
- this->destroy_range(this->end() - NumItems, this->end());
- this->set_size(this->size() - NumItems);
+ truncate(this->size() - NumItems);
}
LLVM_NODISCARD T pop_back_val() {
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/StringRef.h b/contrib/llvm-project/llvm/include/llvm/ADT/StringRef.h
index 9f4b89218042..3950910f0635 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/StringRef.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/StringRef.h
@@ -149,11 +149,11 @@ namespace llvm {
/// empty - Check if the string is empty.
LLVM_NODISCARD
- bool empty() const { return Length == 0; }
+ constexpr bool empty() const { return Length == 0; }
/// size - Get the string size.
LLVM_NODISCARD
- size_t size() const { return Length; }
+ constexpr size_t size() const { return Length; }
/// front - Get the first character in the string.
LLVM_NODISCARD
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/Triple.h b/contrib/llvm-project/llvm/include/llvm/ADT/Triple.h
index 2fd3047acbfd..5dbd4f16bfd5 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/Triple.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/Triple.h
@@ -10,6 +10,7 @@
#define LLVM_ADT_TRIPLE_H
#include "llvm/ADT/Twine.h"
+#include "llvm/Support/VersionTuple.h"
// Some system headers or GCC predefined macros conflict with identifiers in
// this file. Undefine them here.
@@ -19,8 +20,6 @@
namespace llvm {
-class VersionTuple;
-
/// Triple - Helper class for working with autoconf configuration names. For
/// historical reasons, we also call these 'triples' (they used to contain
/// exactly three fields).
@@ -332,10 +331,7 @@ public:
/// triple, if present.
///
/// For example, "fooos1.2.3" would return (1, 2, 3).
- ///
- /// If an entry is not defined, it will be returned as 0.
- void getEnvironmentVersion(unsigned &Major, unsigned &Minor,
- unsigned &Micro) const;
+ VersionTuple getEnvironmentVersion() const;
/// Get the object format for this triple.
ObjectFormatType getObjectFormat() const { return ObjectFormat; }
@@ -344,34 +340,25 @@ public:
/// present.
///
/// For example, "fooos1.2.3" would return (1, 2, 3).
- ///
- /// If an entry is not defined, it will be returned as 0.
- void getOSVersion(unsigned &Major, unsigned &Minor, unsigned &Micro) const;
+ VersionTuple getOSVersion() const;
/// Return just the major version number, this is specialized because it is a
/// common query.
- unsigned getOSMajorVersion() const {
- unsigned Maj, Min, Micro;
- getOSVersion(Maj, Min, Micro);
- return Maj;
- }
+ unsigned getOSMajorVersion() const { return getOSVersion().getMajor(); }
/// Parse the version number as with getOSVersion and then translate generic
/// "darwin" versions to the corresponding OS X versions. This may also be
/// called with IOS triples but the OS X version number is just set to a
/// constant 10.4.0 in that case. Returns true if successful.
- bool getMacOSXVersion(unsigned &Major, unsigned &Minor,
- unsigned &Micro) const;
+ bool getMacOSXVersion(VersionTuple &Version) const;
/// Parse the version number as with getOSVersion. This should only be called
/// with IOS or generic triples.
- void getiOSVersion(unsigned &Major, unsigned &Minor,
- unsigned &Micro) const;
+ VersionTuple getiOSVersion() const;
/// Parse the version number as with getOSVersion. This should only be called
/// with WatchOS or generic triples.
- void getWatchOSVersion(unsigned &Major, unsigned &Minor,
- unsigned &Micro) const;
+ VersionTuple getWatchOSVersion() const;
/// @}
/// @name Direct Component Access
@@ -428,23 +415,17 @@ public:
/// the target triple.
bool isOSVersionLT(unsigned Major, unsigned Minor = 0,
unsigned Micro = 0) const {
- unsigned LHS[3];
- getOSVersion(LHS[0], LHS[1], LHS[2]);
-
- if (LHS[0] != Major)
- return LHS[0] < Major;
- if (LHS[1] != Minor)
- return LHS[1] < Minor;
- if (LHS[2] != Micro)
- return LHS[2] < Micro;
-
- return false;
+ if (Minor == 0) {
+ return getOSVersion() < VersionTuple(Major);
+ }
+ if (Micro == 0) {
+ return getOSVersion() < VersionTuple(Major, Minor);
+ }
+ return getOSVersion() < VersionTuple(Major, Minor, Micro);
}
bool isOSVersionLT(const Triple &Other) const {
- unsigned RHS[3];
- Other.getOSVersion(RHS[0], RHS[1], RHS[2]);
- return isOSVersionLT(RHS[0], RHS[1], RHS[2]);
+ return getOSVersion() < Other.getOSVersion();
}
/// Comparison function for checking OS X version compatibility, which handles
@@ -678,14 +659,13 @@ public:
bool isAndroidVersionLT(unsigned Major) const {
assert(isAndroid() && "Not an Android triple!");
- unsigned Env[3];
- getEnvironmentVersion(Env[0], Env[1], Env[2]);
+ VersionTuple Version = getEnvironmentVersion();
// 64-bit targets did not exist before API level 21 (Lollipop).
- if (isArch64Bit() && Env[0] < 21)
- Env[0] = 21;
+ if (isArch64Bit() && Version.getMajor() < 21)
+ return VersionTuple(21) < VersionTuple(Major);
- return Env[0] < Major;
+ return Version < VersionTuple(Major);
}
/// Tests whether the environment is musl-libc
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/CycleAnalysis.h b/contrib/llvm-project/llvm/include/llvm/Analysis/CycleAnalysis.h
new file mode 100644
index 000000000000..e16b908d6a10
--- /dev/null
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/CycleAnalysis.h
@@ -0,0 +1,77 @@
+//===- CycleAnalysis.h - Cycle Info for LLVM IR -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file declares an analysis pass that computes CycleInfo for
+/// LLVM IR, specialized from GenericCycleInfo.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_CYCLEANALYSIS_H
+#define LLVM_ANALYSIS_CYCLEANALYSIS_H
+
+#include "llvm/ADT/GenericCycleInfo.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/SSAContext.h"
+
+namespace llvm {
+extern template class GenericCycleInfo<SSAContext>;
+extern template class GenericCycle<SSAContext>;
+
+using CycleInfo = GenericCycleInfo<SSAContext>;
+using Cycle = CycleInfo::CycleT;
+
+/// Analysis pass which computes a \ref CycleInfo.
+class CycleAnalysis : public AnalysisInfoMixin<CycleAnalysis> {
+ friend AnalysisInfoMixin<CycleAnalysis>;
+ static AnalysisKey Key;
+
+public:
+ /// Provide the result typedef for this analysis pass.
+ using Result = CycleInfo;
+
+ /// Run the analysis pass over a function and produce a dominator tree.
+ CycleInfo run(Function &F, FunctionAnalysisManager &);
+
+ // TODO: verify analysis?
+};
+
+/// Printer pass for the \c DominatorTree.
+class CycleInfoPrinterPass : public PassInfoMixin<CycleInfoPrinterPass> {
+ raw_ostream &OS;
+
+public:
+ explicit CycleInfoPrinterPass(raw_ostream &OS);
+
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// Legacy analysis pass which computes a \ref CycleInfo.
+class CycleInfoWrapperPass : public FunctionPass {
+ Function *F = nullptr;
+ CycleInfo CI;
+
+public:
+ static char ID;
+
+ CycleInfoWrapperPass();
+
+ CycleInfo &getCycleInfo() { return CI; }
+ const CycleInfo &getCycleInfo() const { return CI; }
+
+ bool runOnFunction(Function &F) override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ void releaseMemory() override;
+ void print(raw_ostream &OS, const Module *M = nullptr) const override;
+
+ // TODO: verify analysis?
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_CYCLEANALYSIS_H
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/IVDescriptors.h b/contrib/llvm-project/llvm/include/llvm/Analysis/IVDescriptors.h
index ea4c0312e073..9858a46d16a2 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/IVDescriptors.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/IVDescriptors.h
@@ -157,7 +157,7 @@ public:
static InstDesc isConditionalRdxPattern(RecurKind Kind, Instruction *I);
/// Returns identity corresponding to the RecurrenceKind.
- Value *getRecurrenceIdentity(RecurKind K, Type *Tp, FastMathFlags FMF);
+ Value *getRecurrenceIdentity(RecurKind K, Type *Tp, FastMathFlags FMF) const;
/// Returns the opcode corresponding to the RecurrenceKind.
static unsigned getOpcode(RecurKind Kind);
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/InlineCost.h b/contrib/llvm-project/llvm/include/llvm/Analysis/InlineCost.h
index b22841343b1a..776749b9a07f 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/InlineCost.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/InlineCost.h
@@ -212,7 +212,7 @@ struct InlineParams {
Optional<bool> ComputeFullInlineCost;
/// Indicate whether we should allow inline deferral.
- Optional<bool> EnableDeferral = true;
+ Optional<bool> EnableDeferral;
/// Indicate whether we allow inlining for recursive call.
Optional<bool> AllowRecursiveCall = false;
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/MLModelRunner.h b/contrib/llvm-project/llvm/include/llvm/Analysis/MLModelRunner.h
index 7cfa6efedf10..90b3cc7e76e6 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/MLModelRunner.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/MLModelRunner.h
@@ -10,7 +10,6 @@
#ifndef LLVM_ANALYSIS_MLMODELRUNNER_H
#define LLVM_ANALYSIS_MLMODELRUNNER_H
-#include "llvm/Analysis/InlineModelFeatureMaps.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/PassManager.h"
@@ -18,6 +17,9 @@ namespace llvm {
/// MLModelRunner interface: abstraction of a mechanism for evaluating a
/// tensorflow "saved model".
+/// NOTE: feature indices are expected to be consistent all accross
+/// MLModelRunners (pertaining to the same model), and also Loggers (see
+/// TFUtils.h)
class MLModelRunner {
public:
// Disallows copy and assign.
@@ -25,12 +27,27 @@ public:
MLModelRunner &operator=(const MLModelRunner &) = delete;
virtual ~MLModelRunner() = default;
- virtual bool run() = 0;
- virtual void setFeature(FeatureIndex Index, int64_t Value) = 0;
- virtual int64_t getFeature(int Index) const = 0;
+ template <typename T> T evaluate() {
+ return *reinterpret_cast<T *>(evaluateUntyped());
+ }
+
+ template <typename T, typename I> T *getTensor(I FeatureID) {
+ return reinterpret_cast<T *>(
+ getTensorUntyped(static_cast<size_t>(FeatureID)));
+ }
+
+ template <typename T, typename I> const T *getTensor(I FeatureID) const {
+ return reinterpret_cast<const T *>(
+ getTensorUntyped(static_cast<size_t>(FeatureID)));
+ }
protected:
MLModelRunner(LLVMContext &Ctx) : Ctx(Ctx) {}
+ virtual void *evaluateUntyped() = 0;
+ virtual void *getTensorUntyped(size_t Index) = 0;
+ const void *getTensorUntyped(size_t Index) const {
+ return (const_cast<MLModelRunner *>(this))->getTensorUntyped(Index);
+ }
LLVMContext &Ctx;
};
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/MemoryBuiltins.h b/contrib/llvm-project/llvm/include/llvm/Analysis/MemoryBuiltins.h
index 39ade20df53f..94495a518042 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/MemoryBuiltins.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/MemoryBuiltins.h
@@ -241,7 +241,7 @@ class ObjectSizeOffsetVisitor
APInt Zero;
SmallPtrSet<Instruction *, 8> SeenInsts;
- APInt align(APInt Size, uint64_t Align);
+ APInt align(APInt Size, MaybeAlign Align);
SizeOffsetType unknown() {
return std::make_pair(APInt(), APInt());
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/MemoryLocation.h b/contrib/llvm-project/llvm/include/llvm/Analysis/MemoryLocation.h
index 3b188d763ef2..833fce1b1726 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/MemoryLocation.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/MemoryLocation.h
@@ -253,6 +253,8 @@ public:
static MemoryLocation getForDest(const MemIntrinsic *MI);
static MemoryLocation getForDest(const AtomicMemIntrinsic *MI);
static MemoryLocation getForDest(const AnyMemIntrinsic *MI);
+ static Optional<MemoryLocation> getForDest(const CallBase *CI,
+ const TargetLibraryInfo &TLI);
/// Return a location representing a particular argument of a call.
static MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx,
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h b/contrib/llvm-project/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h
new file mode 100644
index 000000000000..ca99d5d01eef
--- /dev/null
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h
@@ -0,0 +1,59 @@
+//===- ModelUnderTrainingRunner.h -- 'development' mode runner --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+
+#ifndef LLVM_ANALYSIS_MODELUNDERTRAININGRUNNER_H
+#define LLVM_ANALYSIS_MODELUNDERTRAININGRUNNER_H
+
+#include "llvm/Config/llvm-config.h"
+
+#ifdef LLVM_HAVE_TF_API
+#include "llvm/Analysis/MLModelRunner.h"
+#include "llvm/Analysis/Utils/TFUtils.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// ModelUnderTrainingRunner - training mode implementation. It uses TF C APIs
+/// to dynamically load and evaluate a TF SavedModel
+/// (https://www.tensorflow.org/guide/saved_model). Runtime performance is
+/// sacrificed for ease of use while training.
+class ModelUnderTrainingRunner final : public MLModelRunner {
+public:
+ ModelUnderTrainingRunner(LLVMContext &Ctx, const std::string &ModelPath,
+ const std::vector<TensorSpec> &InputSpecs,
+ const std::vector<LoggedFeatureSpec> &OutputSpecs);
+
+ // Disallows copy and assign.
+ ModelUnderTrainingRunner(const ModelUnderTrainingRunner &) = delete;
+ ModelUnderTrainingRunner &
+ operator=(const ModelUnderTrainingRunner &) = delete;
+
+ bool isValid() const { return !!Evaluator; }
+
+ const std::vector<LoggedFeatureSpec> &outputLoggedFeatureSpecs() const {
+ return OutputSpecs;
+ }
+
+ const Optional<TFModelEvaluator::EvaluationResult> &
+ lastEvaluationResult() const {
+ return LastEvaluationResult;
+ }
+
+private:
+ std::unique_ptr<TFModelEvaluator> Evaluator;
+ const std::vector<LoggedFeatureSpec> OutputSpecs;
+ Optional<TFModelEvaluator::EvaluationResult> LastEvaluationResult;
+ void *evaluateUntyped() override;
+ void *getTensorUntyped(size_t Index) override;
+};
+
+} // namespace llvm
+#endif // define(LLVM_HAVE_TF_API)
+#endif // LLVM_ANALYSIS_MODELUNDERTRAININGRUNNER_H
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/NoInferenceModelRunner.h b/contrib/llvm-project/llvm/include/llvm/Analysis/NoInferenceModelRunner.h
new file mode 100644
index 000000000000..60d6777c765b
--- /dev/null
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/NoInferenceModelRunner.h
@@ -0,0 +1,39 @@
+//===- NoInferenceModelRunner.h ---- noop ML model runner ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+
+#ifndef LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H
+#define LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H
+
+#include "llvm/Config/llvm-config.h"
+
+/// While not strictly necessary to conditionally compile this, it really
+/// has no usecase outside the 'development' mode.
+#ifdef LLVM_HAVE_TF_API
+#include "llvm/Analysis/MLModelRunner.h"
+#include "llvm/Analysis/Utils/TFUtils.h"
+namespace llvm {
+/// A pseudo model runner. We use it to store feature values when collecting
+/// logs for the default policy, in 'development' mode, but never ask it to
+/// 'run'.
+class NoInferenceModelRunner : public MLModelRunner {
+public:
+ NoInferenceModelRunner(LLVMContext &Ctx,
+ const std::vector<TensorSpec> &Inputs);
+
+private:
+ void *evaluateUntyped() override {
+ llvm_unreachable("We shouldn't call run on this model runner.");
+ }
+ void *getTensorUntyped(size_t Index) override;
+
+ std::vector<std::unique_ptr<char[]>> ValuesBuffer;
+};
+} // namespace llvm
+#endif // defined(LLVM_HAVE_TF_API)
+#endif // defined(LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H)
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h b/contrib/llvm-project/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h
new file mode 100644
index 000000000000..b684f87ea5cb
--- /dev/null
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h
@@ -0,0 +1,67 @@
+//===- ReleaseModeModelRunner.h - Fast, precompiled model runner ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a model runner wrapping an AOT compiled ML model.
+// Only inference is supported.
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/Analysis/MLModelRunner.h"
+
+#include <memory>
+#include <vector>
+
+using namespace llvm;
+namespace llvm {
+
+/// ReleaseModeModelRunner - production mode implementation of the
+/// MLModelRunner. It uses an AOT-compiled SavedModel for efficient execution.
+template <class TGen>
+class ReleaseModeModelRunner final : public MLModelRunner {
+public:
+ /// FeatureNames' type should be an indexed collection of std::string, like
+ /// std::array or std::vector, that has a size() method.
+ template <class FType>
+ ReleaseModeModelRunner(LLVMContext &Ctx, const FType &FeatureNames,
+ StringRef DecisionName, StringRef FeedPrefix = "feed_",
+ StringRef FetchPrefix = "fetch_")
+ : MLModelRunner(Ctx), CompiledModel(std::make_unique<TGen>()) {
+ assert(CompiledModel && "The CompiledModel should be valid");
+
+ const size_t FeatureCount = FeatureNames.size();
+ FeatureIndices.resize(FeatureCount);
+
+ for (size_t I = 0; I < FeatureCount; ++I) {
+ const int Index =
+ CompiledModel->LookupArgIndex(FeedPrefix.str() + FeatureNames[I]);
+ assert(Index >= 0 && "Cannot find Feature in inlining model");
+ FeatureIndices[I] = Index;
+ }
+
+ ResultIndex = CompiledModel->LookupResultIndex(FetchPrefix.str() +
+ DecisionName.str());
+ assert(ResultIndex >= 0 && "Cannot find DecisionName in inlining model");
+ }
+
+ virtual ~ReleaseModeModelRunner() = default;
+
+private:
+ void *evaluateUntyped() override {
+ CompiledModel->Run();
+ return CompiledModel->result_data(ResultIndex);
+ }
+
+ void *getTensorUntyped(size_t Index) override {
+ return reinterpret_cast<char *>(
+ CompiledModel->arg_data(FeatureIndices[Index]));
+ }
+
+ std::vector<int32_t> FeatureIndices;
+ int32_t ResultIndex = -1;
+ std::unique_ptr<TGen> CompiledModel;
+};
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/TargetTransformInfo.h b/contrib/llvm-project/llvm/include/llvm/Analysis/TargetTransformInfo.h
index 170d6b8f35ff..d9f5c9689d5c 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -1137,6 +1137,13 @@ public:
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
const Instruction *I = nullptr) const;
+ /// \return The cost of VP Load and Store instructions.
+ InstructionCost
+ getVPMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
+ unsigned AddressSpace,
+ TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
+ const Instruction *I = nullptr) const;
+
/// \return The cost of masked Load and Store instructions.
InstructionCost getMaskedMemoryOpCost(
unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
@@ -1291,13 +1298,12 @@ public:
bool areInlineCompatible(const Function *Caller,
const Function *Callee) const;
- /// \returns True if the caller and callee agree on how \p Args will be passed
+ /// \returns True if the caller and callee agree on how \p Types will be
+ /// passed to or returned from the callee.
/// to the callee.
- /// \param[out] Args The list of compatible arguments. The implementation may
- /// filter out any incompatible args from this list.
- bool areFunctionArgsABICompatible(const Function *Caller,
- const Function *Callee,
- SmallPtrSetImpl<Argument *> &Args) const;
+ /// \param Types List of types to check.
+ bool areTypesABICompatible(const Function *Caller, const Function *Callee,
+ const ArrayRef<Type *> &Types) const;
/// The type of load/store indexing.
enum MemIndexedMode {
@@ -1388,12 +1394,17 @@ public:
/// \returns True if the target supports scalable vectors.
bool supportsScalableVectors() const;
+ /// \return true when scalable vectorization is preferred.
+ bool enableScalableVectorization() const;
+
/// \name Vector Predication Information
/// @{
/// Whether the target supports the %evl parameter of VP intrinsic efficiently
- /// in hardware. (see LLVM Language Reference - "Vector Predication
- /// Intrinsics") Use of %evl is discouraged when that is not the case.
- bool hasActiveVectorLength() const;
+ /// in hardware, for the given opcode and type/alignment. (see LLVM Language
+ /// Reference - "Vector Predication Intrinsics").
+ /// Use of %evl is discouraged when that is not the case.
+ bool hasActiveVectorLength(unsigned Opcode, Type *DataType,
+ Align Alignment) const;
struct VPLegalization {
enum VPTransform {
@@ -1667,6 +1678,11 @@ public:
unsigned AddressSpace,
TTI::TargetCostKind CostKind,
const Instruction *I) = 0;
+ virtual InstructionCost getVPMemoryOpCost(unsigned Opcode, Type *Src,
+ Align Alignment,
+ unsigned AddressSpace,
+ TTI::TargetCostKind CostKind,
+ const Instruction *I) = 0;
virtual InstructionCost
getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
unsigned AddressSpace,
@@ -1718,9 +1734,9 @@ public:
unsigned SrcAlign, unsigned DestAlign) const = 0;
virtual bool areInlineCompatible(const Function *Caller,
const Function *Callee) const = 0;
- virtual bool
- areFunctionArgsABICompatible(const Function *Caller, const Function *Callee,
- SmallPtrSetImpl<Argument *> &Args) const = 0;
+ virtual bool areTypesABICompatible(const Function *Caller,
+ const Function *Callee,
+ const ArrayRef<Type *> &Types) const = 0;
virtual bool isIndexedLoadLegal(MemIndexedMode Mode, Type *Ty) const = 0;
virtual bool isIndexedStoreLegal(MemIndexedMode Mode, Type *Ty) const = 0;
virtual unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const = 0;
@@ -1747,8 +1763,10 @@ public:
ReductionFlags) const = 0;
virtual bool shouldExpandReduction(const IntrinsicInst *II) const = 0;
virtual unsigned getGISelRematGlobalCost() const = 0;
+ virtual bool enableScalableVectorization() const = 0;
virtual bool supportsScalableVectors() const = 0;
- virtual bool hasActiveVectorLength() const = 0;
+ virtual bool hasActiveVectorLength(unsigned Opcode, Type *DataType,
+ Align Alignment) const = 0;
virtual InstructionCost getInstructionLatency(const Instruction *I) = 0;
virtual VPLegalization
getVPLegalizationStrategy(const VPIntrinsic &PI) const = 0;
@@ -2185,6 +2203,13 @@ public:
return Impl.getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
CostKind, I);
}
+ InstructionCost getVPMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
+ unsigned AddressSpace,
+ TTI::TargetCostKind CostKind,
+ const Instruction *I) override {
+ return Impl.getVPMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
+ CostKind, I);
+ }
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
Align Alignment, unsigned AddressSpace,
TTI::TargetCostKind CostKind) override {
@@ -2273,10 +2298,9 @@ public:
const Function *Callee) const override {
return Impl.areInlineCompatible(Caller, Callee);
}
- bool areFunctionArgsABICompatible(
- const Function *Caller, const Function *Callee,
- SmallPtrSetImpl<Argument *> &Args) const override {
- return Impl.areFunctionArgsABICompatible(Caller, Callee, Args);
+ bool areTypesABICompatible(const Function *Caller, const Function *Callee,
+ const ArrayRef<Type *> &Types) const override {
+ return Impl.areTypesABICompatible(Caller, Callee, Types);
}
bool isIndexedLoadLegal(MemIndexedMode Mode, Type *Ty) const override {
return Impl.isIndexedLoadLegal(Mode, Ty, getDataLayout());
@@ -2340,8 +2364,13 @@ public:
return Impl.supportsScalableVectors();
}
- bool hasActiveVectorLength() const override {
- return Impl.hasActiveVectorLength();
+ bool enableScalableVectorization() const override {
+ return Impl.enableScalableVectorization();
+ }
+
+ bool hasActiveVectorLength(unsigned Opcode, Type *DataType,
+ Align Alignment) const override {
+ return Impl.hasActiveVectorLength(Opcode, DataType, Alignment);
}
InstructionCost getInstructionLatency(const Instruction *I) override {
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/contrib/llvm-project/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index 05ef2495475f..26a696f09b3d 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -564,6 +564,13 @@ public:
return 1;
}
+ InstructionCost getVPMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
+ unsigned AddressSpace,
+ TTI::TargetCostKind CostKind,
+ const Instruction *I) const {
+ return 1;
+ }
+
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
Align Alignment, unsigned AddressSpace,
TTI::TargetCostKind CostKind) const {
@@ -618,7 +625,6 @@ public:
case Intrinsic::coro_frame:
case Intrinsic::coro_size:
case Intrinsic::coro_suspend:
- case Intrinsic::coro_param:
case Intrinsic::coro_subfn_addr:
// These intrinsics don't actually represent code after lowering.
return 0;
@@ -702,9 +708,8 @@ public:
Callee->getFnAttribute("target-features"));
}
- bool areFunctionArgsABICompatible(const Function *Caller,
- const Function *Callee,
- SmallPtrSetImpl<Argument *> &Args) const {
+ bool areTypesABICompatible(const Function *Caller, const Function *Callee,
+ const ArrayRef<Type *> &Types) const {
return (Caller->getFnAttribute("target-cpu") ==
Callee->getFnAttribute("target-cpu")) &&
(Caller->getFnAttribute("target-features") ==
@@ -772,7 +777,12 @@ public:
bool supportsScalableVectors() const { return false; }
- bool hasActiveVectorLength() const { return false; }
+ bool enableScalableVectorization() const { return false; }
+
+ bool hasActiveVectorLength(unsigned Opcode, Type *DataType,
+ Align Alignment) const {
+ return false;
+ }
TargetTransformInfo::VPLegalization
getVPLegalizationStrategy(const VPIntrinsic &PI) const {
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/Utils/TFUtils.h b/contrib/llvm-project/llvm/include/llvm/Analysis/Utils/TFUtils.h
index 1f6be0e60eb9..012fca53a200 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/Utils/TFUtils.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/Utils/TFUtils.h
@@ -147,6 +147,9 @@ public:
/// Construct a Logger. If IncludeReward is false, then logReward or
/// logFinalReward shouldn't be called, and the reward feature won't be
/// printed out.
+ /// NOTE: the FeatureSpecs are expected to be in the same order (i.e. have
+ /// corresponding indices) with any MLModelRunner implementations
+ /// corresponding to the model being trained/logged.
Logger(const std::vector<LoggedFeatureSpec> &FeatureSpecs,
const TensorSpec &RewardSpec, bool IncludeReward);
@@ -246,8 +249,10 @@ public:
/// otherwise.
bool isValid() const { return !!Impl; }
-private:
+ /// Untyped access to input.
void *getUntypedInput(size_t Index);
+
+private:
std::unique_ptr<TFModelEvaluatorImpl> Impl;
};
diff --git a/contrib/llvm-project/llvm/include/llvm/AsmParser/LLParser.h b/contrib/llvm-project/llvm/include/llvm/AsmParser/LLParser.h
index d621c232378c..62af3afbc142 100644
--- a/contrib/llvm-project/llvm/include/llvm/AsmParser/LLParser.h
+++ b/contrib/llvm-project/llvm/include/llvm/AsmParser/LLParser.h
@@ -62,12 +62,14 @@ namespace llvm {
APFloat APFloatVal{0.0};
Constant *ConstantVal;
std::unique_ptr<Constant *[]> ConstantStructElts;
+ bool NoCFI = false;
ValID() = default;
ValID(const ValID &RHS)
: Kind(RHS.Kind), Loc(RHS.Loc), UIntVal(RHS.UIntVal), FTy(RHS.FTy),
StrVal(RHS.StrVal), StrVal2(RHS.StrVal2), APSIntVal(RHS.APSIntVal),
- APFloatVal(RHS.APFloatVal), ConstantVal(RHS.ConstantVal) {
+ APFloatVal(RHS.APFloatVal), ConstantVal(RHS.ConstantVal),
+ NoCFI(RHS.NoCFI) {
assert(!RHS.ConstantStructElts);
}
diff --git a/contrib/llvm-project/llvm/include/llvm/AsmParser/LLToken.h b/contrib/llvm-project/llvm/include/llvm/AsmParser/LLToken.h
index f8ca054863ac..78ebb35e0ea4 100644
--- a/contrib/llvm-project/llvm/include/llvm/AsmParser/LLToken.h
+++ b/contrib/llvm-project/llvm/include/llvm/AsmParser/LLToken.h
@@ -370,6 +370,7 @@ enum Kind {
kw_insertvalue,
kw_blockaddress,
kw_dso_local_equivalent,
+ kw_no_cfi,
kw_freeze,
@@ -407,6 +408,7 @@ enum Kind {
kw_noUnwind,
kw_mayThrow,
kw_hasUnknownCall,
+ kw_mustBeUnreachable,
kw_calls,
kw_callee,
kw_params,
diff --git a/contrib/llvm-project/llvm/include/llvm/BinaryFormat/ELF.h b/contrib/llvm-project/llvm/include/llvm/BinaryFormat/ELF.h
index c199e933116a..065661cbd188 100644
--- a/contrib/llvm-project/llvm/include/llvm/BinaryFormat/ELF.h
+++ b/contrib/llvm-project/llvm/include/llvm/BinaryFormat/ELF.h
@@ -608,6 +608,8 @@ enum {
EF_HEXAGON_MACH_V67 = 0x00000067, // Hexagon V67
EF_HEXAGON_MACH_V67T = 0x00008067, // Hexagon V67T
EF_HEXAGON_MACH_V68 = 0x00000068, // Hexagon V68
+ EF_HEXAGON_MACH_V69 = 0x00000069, // Hexagon V69
+ EF_HEXAGON_MACH = 0x000003ff, // Hexagon V..
// Highest ISA version flags
EF_HEXAGON_ISA_MACH = 0x00000000, // Same as specified in bits[11:0]
@@ -623,6 +625,8 @@ enum {
EF_HEXAGON_ISA_V66 = 0x00000066, // Hexagon V66 ISA
EF_HEXAGON_ISA_V67 = 0x00000067, // Hexagon V67 ISA
EF_HEXAGON_ISA_V68 = 0x00000068, // Hexagon V68 ISA
+ EF_HEXAGON_ISA_V69 = 0x00000069, // Hexagon V69 ISA
+ EF_HEXAGON_ISA = 0x000003ff, // Hexagon V.. ISA
};
// Hexagon-specific section indexes for common small data
diff --git a/contrib/llvm-project/llvm/include/llvm/Bitcode/LLVMBitCodes.h b/contrib/llvm-project/llvm/include/llvm/Bitcode/LLVMBitCodes.h
index 04eb2739cbd5..7301618d337a 100644
--- a/contrib/llvm-project/llvm/include/llvm/Bitcode/LLVMBitCodes.h
+++ b/contrib/llvm-project/llvm/include/llvm/Bitcode/LLVMBitCodes.h
@@ -381,9 +381,10 @@ enum ConstantsCodes {
CST_CODE_CE_UNOP = 25, // CE_UNOP: [opcode, opval]
CST_CODE_POISON = 26, // POISON
CST_CODE_DSO_LOCAL_EQUIVALENT = 27, // DSO_LOCAL_EQUIVALENT [gvty, gv]
- CST_CODE_INLINEASM = 28, // INLINEASM: [sideeffect|alignstack|
- // asmdialect|unwind,
- // asmstr,conststr]
+ CST_CODE_INLINEASM = 28, // INLINEASM: [sideeffect|alignstack|
+ // asmdialect|unwind,
+ // asmstr,conststr]
+ CST_CODE_NO_CFI_VALUE = 29, // NO_CFI [ fty, f ]
};
/// CastOpcodes - These are values used in the bitcode files to encode which
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/AsmPrinter.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/AsmPrinter.h
index d7d3692877de..281ecb8de251 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/AsmPrinter.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/AsmPrinter.h
@@ -799,6 +799,11 @@ private:
/// This method decides whether the specified basic block requires a label.
bool shouldEmitLabelForBasicBlock(const MachineBasicBlock &MBB) const;
+
+protected:
+ virtual bool shouldEmitWeakSwiftAsyncExtendedFramePointerFlags() const {
+ return false;
+ }
};
} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
index 8a603de2f91d..886b3af834d7 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
@@ -1248,7 +1248,7 @@ private:
for (auto *DeadMI : DeadInsts) {
LLVM_DEBUG(dbgs() << *DeadMI << "Is dead, eagerly deleting\n");
WrapperObserver.erasingInstr(*DeadMI);
- DeadMI->eraseFromParentAndMarkDBGValuesForRemoval();
+ DeadMI->eraseFromParent();
}
DeadInsts.clear();
}
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index 74615c73741a..044f2e22cfdd 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -192,6 +192,10 @@ private:
SmallVectorImpl<Register> &VRegs,
SmallVectorImpl<Register> &LeftoverVRegs);
+ /// Version which handles irregular sub-vector splits.
+ void extractVectorParts(Register Reg, unsigned NumElst,
+ SmallVectorImpl<Register> &VRegs);
+
/// Helper function to build a wide generic register \p DstReg of type \p
/// RegTy from smaller parts. This will produce a G_MERGE_VALUES,
/// G_BUILD_VECTOR, G_CONCAT_VECTORS, or sequence of G_INSERT as appropriate
@@ -205,6 +209,11 @@ private:
LLT PartTy, ArrayRef<Register> PartRegs,
LLT LeftoverTy = LLT(), ArrayRef<Register> LeftoverRegs = {});
+ /// Merge \p PartRegs with different types into \p DstReg.
+ void mergeMixedSubvectors(Register DstReg, ArrayRef<Register> PartRegs);
+
+ void appendVectorElts(SmallVectorImpl<Register> &Elts, Register Reg);
+
/// Unmerge \p SrcReg into smaller sized values, and append them to \p
/// Parts. The elements of \p Parts will be the greatest common divisor type
/// of \p DstTy, \p NarrowTy and the type of \p SrcReg. This will compute and
@@ -285,26 +294,18 @@ public:
/// vector bounds.
Register getVectorElementPointer(Register VecPtr, LLT VecTy, Register Index);
- LegalizeResult fewerElementsVectorImplicitDef(MachineInstr &MI,
- unsigned TypeIdx, LLT NarrowTy);
-
- /// Legalize a instruction with a vector type where each operand may have a
- /// different element type. All type indexes must have the same number of
- /// elements.
- LegalizeResult fewerElementsVectorMultiEltType(MachineInstr &MI,
- unsigned TypeIdx, LLT NarrowTy);
+ /// Handles most opcodes. Split \p MI into same instruction on sub-vectors or
+ /// scalars with \p NumElts elements (1 for scalar). Supports uneven splits:
+ /// there can be leftover sub-vector with fewer then \p NumElts or a leftover
+ /// scalar. To avoid this use moreElements first and set MI number of elements
+ /// to multiple of \p NumElts. Non-vector operands that should be used on all
+ /// sub-instructions without split are listed in \p NonVecOpIndices.
+ LegalizeResult fewerElementsVectorMultiEltType(
+ GenericMachineInstr &MI, unsigned NumElts,
+ std::initializer_list<unsigned> NonVecOpIndices = {});
- LegalizeResult fewerElementsVectorCasts(MachineInstr &MI, unsigned TypeIdx,
- LLT NarrowTy);
-
- LegalizeResult
- fewerElementsVectorCmp(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy);
-
- LegalizeResult
- fewerElementsVectorSelect(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy);
-
- LegalizeResult fewerElementsVectorPhi(MachineInstr &MI,
- unsigned TypeIdx, LLT NarrowTy);
+ LegalizeResult fewerElementsVectorPhi(GenericMachineInstr &MI,
+ unsigned NumElts);
LegalizeResult moreElementsVectorPhi(MachineInstr &MI, unsigned TypeIdx,
LLT MoreTy);
@@ -320,22 +321,9 @@ public:
unsigned TypeIdx,
LLT NarrowTy);
- LegalizeResult fewerElementsVectorMulo(MachineInstr &MI, unsigned TypeIdx,
- LLT NarrowTy);
-
LegalizeResult reduceLoadStoreWidth(GLoadStore &MI, unsigned TypeIdx,
LLT NarrowTy);
- /// Legalize an instruction by reducing the operation width, either by
- /// narrowing the type of the operation or by reducing the number of elements
- /// of a vector.
- /// The used strategy (narrow vs. fewerElements) is decided by \p NarrowTy.
- /// Narrow is used if the scalar type of \p NarrowTy and \p DstTy differ,
- /// fewerElements is used when the scalar type is the same but the number of
- /// elements between \p NarrowTy and \p DstTy differ.
- LegalizeResult reduceOperationWidth(MachineInstr &MI, unsigned TypeIdx,
- LLT NarrowTy);
-
LegalizeResult fewerElementsVectorSextInReg(MachineInstr &MI, unsigned TypeIdx,
LLT NarrowTy);
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
index 68c14240ebc7..0b37539030b1 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
@@ -58,7 +58,10 @@ enum LegalizeAction : std::uint8_t {
/// The (vector) operation should be implemented by splitting it into
/// sub-vectors where the operation is legal. For example a <8 x s64> add
- /// might be implemented as 4 separate <2 x s64> adds.
+ /// might be implemented as 4 separate <2 x s64> adds. There can be a leftover
+ /// if there are not enough elements for last sub-vector e.g. <7 x s64> add
+ /// will be implemented as 3 separate <2 x s64> adds and one s64 add. Leftover
+ /// types can be avoided by doing MoreElements first.
FewerElements,
/// The (vector) operation should be implemented by widening the input
@@ -1050,6 +1053,26 @@ public:
TypeIdx, LLT::fixed_vector(MinElements, VecTy.getElementType()));
});
}
+
+ /// Set number of elements to nearest larger multiple of NumElts.
+ LegalizeRuleSet &alignNumElementsTo(unsigned TypeIdx, const LLT EltTy,
+ unsigned NumElts) {
+ typeIdx(TypeIdx);
+ return actionIf(
+ LegalizeAction::MoreElements,
+ [=](const LegalityQuery &Query) {
+ LLT VecTy = Query.Types[TypeIdx];
+ return VecTy.isVector() && VecTy.getElementType() == EltTy &&
+ (VecTy.getNumElements() % NumElts != 0);
+ },
+ [=](const LegalityQuery &Query) {
+ LLT VecTy = Query.Types[TypeIdx];
+ unsigned NewSize = alignTo(VecTy.getNumElements(), NumElts);
+ return std::make_pair(
+ TypeIdx, LLT::fixed_vector(NewSize, VecTy.getElementType()));
+ });
+ }
+
/// Limit the number of elements in EltTy vectors to at most MaxElements.
LegalizeRuleSet &clampMaxNumElements(unsigned TypeIdx, const LLT EltTy,
unsigned MaxElements) {
@@ -1085,6 +1108,19 @@ public:
.clampMaxNumElements(TypeIdx, EltTy, MaxTy.getNumElements());
}
+ /// Express \p EltTy vectors strictly using vectors with \p NumElts elements
+ /// (or scalars when \p NumElts equals 1).
+ /// First pad with undef elements to nearest larger multiple of \p NumElts.
+ /// Then perform split with all sub-instructions having the same type.
+ /// Using clampMaxNumElements (non-strict) can result in leftover instruction
+ /// with different type (fewer elements then \p NumElts or scalar).
+ /// No effect if the type is not a vector.
+ LegalizeRuleSet &clampMaxNumElementsStrict(unsigned TypeIdx, const LLT EltTy,
+ unsigned NumElts) {
+ return alignNumElementsTo(TypeIdx, EltTy, NumElts)
+ .clampMaxNumElements(TypeIdx, EltTy, NumElts);
+ }
+
/// Fallback on the previous implementation. This should only be used while
/// porting a rule.
LegalizeRuleSet &fallback() {
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
index a41166bb4c6b..28bb8de11762 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
@@ -361,9 +361,9 @@ m_GAdd(const LHS &L, const RHS &R) {
}
template <typename LHS, typename RHS>
-inline BinaryOp_match<LHS, RHS, TargetOpcode::G_PTR_ADD, true>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_PTR_ADD, false>
m_GPtrAdd(const LHS &L, const RHS &R) {
- return BinaryOp_match<LHS, RHS, TargetOpcode::G_PTR_ADD, true>(L, R);
+ return BinaryOp_match<LHS, RHS, TargetOpcode::G_PTR_ADD, false>(L, R);
}
template <typename LHS, typename RHS>
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index 069f71b54328..fde0cb3cf1af 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -497,6 +497,34 @@ public:
MachineInstrBuilder buildMaskLowPtrBits(const DstOp &Res, const SrcOp &Op0,
uint32_t NumBits);
+ /// Build and insert
+ /// a, b, ..., x = G_UNMERGE_VALUES \p Op0
+ /// \p Res = G_BUILD_VECTOR a, b, ..., x, undef, ..., undef
+ ///
+ /// Pad \p Op0 with undef elements to match number of elements in \p Res.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res and \p Op0 must be generic virtual registers with vector type,
+ /// same vector element type and Op0 must have fewer elements then Res.
+ ///
+ /// \return a MachineInstrBuilder for the newly created build vector instr.
+ MachineInstrBuilder buildPadVectorWithUndefElements(const DstOp &Res,
+ const SrcOp &Op0);
+
+ /// Build and insert
+ /// a, b, ..., x, y, z = G_UNMERGE_VALUES \p Op0
+ /// \p Res = G_BUILD_VECTOR a, b, ..., x
+ ///
+ /// Delete trailing elements in \p Op0 to match number of elements in \p Res.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res and \p Op0 must be generic virtual registers with vector type,
+ /// same vector element type and Op0 must have more elements then Res.
+ ///
+ /// \return a MachineInstrBuilder for the newly created build vector instr.
+ MachineInstrBuilder buildDeleteTrailingVectorElements(const DstOp &Res,
+ const SrcOp &Op0);
+
/// Build and insert \p Res, \p CarryOut = G_UADDO \p Op0, \p Op1
///
/// G_UADDO sets \p Res to \p Op0 + \p Op1 (truncated to the bit width) and
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/Utils.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
index 4126e2ac7b8f..8fed79585fe9 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
@@ -323,6 +323,11 @@ Register getFunctionLiveInPhysReg(MachineFunction &MF, const TargetInstrInfo &TI
LLVM_READNONE
LLT getLCMType(LLT OrigTy, LLT TargetTy);
+LLVM_READNONE
+/// Return smallest type that covers both \p OrigTy and \p TargetTy and is
+/// multiple of TargetTy.
+LLT getCoverTy(LLT OrigTy, LLT TargetTy);
+
/// Return a type where the total size is the greatest common divisor of \p
/// OrigTy and \p TargetTy. This will try to either change the number of vector
/// elements, or bitwidth of scalars. The intent is the result type can be used
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/MIRYamlMapping.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/MIRYamlMapping.h
index b6d7c2487126..05a375bc251b 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/MIRYamlMapping.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/MIRYamlMapping.h
@@ -695,6 +695,7 @@ struct MachineFunction {
bool TracksRegLiveness = false;
bool HasWinCFI = false;
bool FailsVerification = false;
+ bool TracksDebugUserValues = false;
std::vector<VirtualRegisterDefinition> VirtualRegisters;
std::vector<MachineFunctionLiveIn> LiveIns;
Optional<std::vector<FlowStringValue>> CalleeSavedRegisters;
@@ -724,6 +725,8 @@ template <> struct MappingTraits<MachineFunction> {
YamlIO.mapOptional("tracksRegLiveness", MF.TracksRegLiveness, false);
YamlIO.mapOptional("hasWinCFI", MF.HasWinCFI, false);
YamlIO.mapOptional("failsVerification", MF.FailsVerification, false);
+ YamlIO.mapOptional("tracksDebugUserValues", MF.TracksDebugUserValues,
+ false);
YamlIO.mapOptional("registers", MF.VirtualRegisters,
std::vector<VirtualRegisterDefinition>());
YamlIO.mapOptional("liveins", MF.LiveIns,
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineCycleAnalysis.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineCycleAnalysis.h
new file mode 100644
index 000000000000..d3816bbc0780
--- /dev/null
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineCycleAnalysis.h
@@ -0,0 +1,31 @@
+//===- MachineCycleAnalysis.h - Cycle Info for Machine IR -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MachineCycleInfo class, which is a thin wrapper over
+// the Machine IR instance of GenericCycleInfo.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINECYCLEANALYSIS_H
+#define LLVM_CODEGEN_MACHINECYCLEANALYSIS_H
+
+#include "llvm/ADT/GenericCycleInfo.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineSSAContext.h"
+
+namespace llvm {
+
+extern template class GenericCycleInfo<MachineSSAContext>;
+extern template class GenericCycle<MachineSSAContext>;
+
+using MachineCycleInfo = GenericCycleInfo<MachineSSAContext>;
+using MachineCycle = MachineCycleInfo::CycleT;
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINECYCLEANALYSIS_H
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineFunction.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineFunction.h
index ec23dde0c6c0..c4767a51b094 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineFunction.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineFunction.h
@@ -152,6 +152,12 @@ public:
// FailsVerification: Means that the function is not expected to pass machine
// verification. This can be set by passes that introduce known problems that
// have not been fixed yet.
+ // TracksDebugUserValues: Without this property enabled, debug instructions
+ // such as DBG_VALUE are allowed to reference virtual registers even if those
+ // registers do not have a definition. With the property enabled virtual
+ // registers must only be used if they have a definition. This property
+ // allows earlier passes in the pipeline to skip updates of `DBG_VALUE`
+ // instructions to save compile time.
enum class Property : unsigned {
IsSSA,
NoPHIs,
@@ -163,7 +169,8 @@ public:
Selected,
TiedOpsRewritten,
FailsVerification,
- LastProperty = FailsVerification,
+ TracksDebugUserValues,
+ LastProperty = TracksDebugUserValues,
};
bool hasProperty(Property P) const {
@@ -883,7 +890,7 @@ public:
/// CreateMachineInstr - Allocate a new MachineInstr. Use this instead
/// of `new MachineInstr'.
- MachineInstr *CreateMachineInstr(const MCInstrDesc &MCID, const DebugLoc &DL,
+ MachineInstr *CreateMachineInstr(const MCInstrDesc &MCID, DebugLoc DL,
bool NoImplicit = false);
/// Create a new MachineInstr which is a copy of \p Orig, identical in all
@@ -900,18 +907,20 @@ public:
///
/// Note: Does not perform target specific adjustments; consider using
/// TargetInstrInfo::duplicate() intead.
- MachineInstr &CloneMachineInstrBundle(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig);
+ MachineInstr &
+ cloneMachineInstrBundle(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator InsertBefore,
+ const MachineInstr &Orig);
/// DeleteMachineInstr - Delete the given MachineInstr.
- void DeleteMachineInstr(MachineInstr *MI);
+ void deleteMachineInstr(MachineInstr *MI);
/// CreateMachineBasicBlock - Allocate a new MachineBasicBlock. Use this
/// instead of `new MachineBasicBlock'.
MachineBasicBlock *CreateMachineBasicBlock(const BasicBlock *bb = nullptr);
/// DeleteMachineBasicBlock - Delete the given MachineBasicBlock.
- void DeleteMachineBasicBlock(MachineBasicBlock *MBB);
+ void deleteMachineBasicBlock(MachineBasicBlock *MBB);
/// getMachineMemOperand - Allocate a new MachineMemOperand.
/// MachineMemOperands are owned by the MachineFunction and need not be
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineInstr.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineInstr.h
index 0ac934e208b6..2893e138a95c 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineInstr.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineInstr.h
@@ -249,7 +249,7 @@ private:
PointerSumTypeMember<EIIK_OutOfLine, ExtraInfo *>>
Info;
- DebugLoc debugLoc; // Source line information.
+ DebugLoc DbgLoc; // Source line information.
/// Unique instruction number. Used by DBG_INSTR_REFs to refer to the values
/// defined by this instruction.
@@ -267,7 +267,7 @@ private:
/// This constructor create a MachineInstr and add the implicit operands.
/// It reserves space for number of operands specified by
/// MCInstrDesc. An explicit DebugLoc is supplied.
- MachineInstr(MachineFunction &, const MCInstrDesc &tid, DebugLoc dl,
+ MachineInstr(MachineFunction &, const MCInstrDesc &TID, DebugLoc DL,
bool NoImp = false);
// MachineInstrs are pool-allocated and owned by MachineFunction.
@@ -415,7 +415,7 @@ public:
void unbundleFromSucc();
/// Returns the debug location id of this MachineInstr.
- const DebugLoc &getDebugLoc() const { return debugLoc; }
+ const DebugLoc &getDebugLoc() const { return DbgLoc; }
/// Return the operand containing the offset to be used if this DBG_VALUE
/// instruction is indirect; will be an invalid register if this value is
@@ -1173,12 +1173,6 @@ public:
/// eraseFromBundle() to erase individual bundled instructions.
void eraseFromParent();
- /// Unlink 'this' from the containing basic block and delete it.
- ///
- /// For all definitions mark their uses in DBG_VALUE nodes
- /// as undefined. Otherwise like eraseFromParent().
- void eraseFromParentAndMarkDBGValuesForRemoval();
-
/// Unlink 'this' form its basic block and delete it.
///
/// If the instruction is part of a bundle, the other instructions in the
@@ -1739,13 +1733,13 @@ public:
/// Replace the instruction descriptor (thus opcode) of
/// the current instruction with a new one.
- void setDesc(const MCInstrDesc &tid) { MCID = &tid; }
+ void setDesc(const MCInstrDesc &TID) { MCID = &TID; }
/// Replace current source information with new such.
/// Avoid using this, the constructor argument is preferable.
- void setDebugLoc(DebugLoc dl) {
- debugLoc = std::move(dl);
- assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
+ void setDebugLoc(DebugLoc DL) {
+ DbgLoc = std::move(DL);
+ assert(DbgLoc.hasTrivialDestructor() && "Expected trivial destructor");
}
/// Erase an operand from an instruction, leaving it with one
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/MachinePassRegistry.def b/contrib/llvm-project/llvm/include/llvm/CodeGen/MachinePassRegistry.def
index d79303c771d6..e6763899a083 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/MachinePassRegistry.def
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/MachinePassRegistry.def
@@ -197,4 +197,6 @@ DUMMY_MACHINE_FUNCTION_PASS("regbankselect", RegBankSelectPass, ())
DUMMY_MACHINE_FUNCTION_PASS("instruction-select", InstructionSelectPass, ())
DUMMY_MACHINE_FUNCTION_PASS("reset-machine-function", ResetMachineFunctionPass, ())
DUMMY_MACHINE_FUNCTION_PASS("machineverifier", MachineVerifierPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("machine-cycles", MachineCycleInfoWrapperPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("print-machine-cycles", MachineCycleInfoPrinterPass, ())
#undef DUMMY_MACHINE_FUNCTION_PASS
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineSSAContext.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineSSAContext.h
new file mode 100644
index 000000000000..6dbf321bdeaa
--- /dev/null
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineSSAContext.h
@@ -0,0 +1,58 @@
+//===- MachineSSAContext.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file declares a specialization of the GenericSSAContext<X>
+/// template class for Machine IR.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINESSACONTEXT_H
+#define LLVM_CODEGEN_MACHINESSACONTEXT_H
+
+#include "llvm/ADT/GenericSSAContext.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/Printable.h"
+
+#include <memory>
+
+namespace llvm {
+class MachineInstr;
+class MachineBasicBlock;
+class MachineFunction;
+class Register;
+template <typename, bool> class DominatorTreeBase;
+
+inline auto successors(MachineBasicBlock *BB) { return BB->successors(); }
+inline auto predecessors(MachineBasicBlock *BB) { return BB->predecessors(); }
+
+template <> class GenericSSAContext<MachineFunction> {
+ const MachineRegisterInfo *RegInfo = nullptr;
+ MachineFunction *MF;
+
+public:
+ using BlockT = MachineBasicBlock;
+ using FunctionT = MachineFunction;
+ using InstructionT = MachineInstr;
+ using ValueRefT = Register;
+ using DominatorTreeT = DominatorTreeBase<BlockT, false>;
+
+ static MachineBasicBlock *getEntryBlock(MachineFunction &F);
+
+ void setFunction(MachineFunction &Fn);
+ MachineFunction *getFunction() const { return MF; }
+
+ Printable print(MachineBasicBlock *Block) const;
+ Printable print(MachineInstr *Inst) const;
+ Printable print(Register Value) const;
+};
+
+using MachineSSAContext = GenericSSAContext<MachineFunction>;
+} // namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINESSACONTEXT_H
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineSSAUpdater.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineSSAUpdater.h
index 0af356e376ab..3f0b55e0abb8 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineSSAUpdater.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineSSAUpdater.h
@@ -77,7 +77,9 @@ public:
Register GetValueAtEndOfBlock(MachineBasicBlock *BB);
/// GetValueInMiddleOfBlock - Construct SSA form, materializing a value that
- /// is live in the middle of the specified block.
+ /// is live in the middle of the specified block. If ExistingValueOnly is
+ /// true then this will only return an existing value or $noreg; otherwise new
+ /// instructions may be inserted to materialize a value.
///
/// GetValueInMiddleOfBlock is the same as GetValueAtEndOfBlock except in one
/// important case: if there is a definition of the rewritten value after the
@@ -94,7 +96,8 @@ public:
/// their respective blocks. However, the use of X happens in the *middle* of
/// a block. Because of this, we need to insert a new PHI node in SomeBB to
/// merge the appropriate values, and this value isn't live out of the block.
- Register GetValueInMiddleOfBlock(MachineBasicBlock *BB);
+ Register GetValueInMiddleOfBlock(MachineBasicBlock *BB,
+ bool ExistingValueOnly = false);
/// RewriteUse - Rewrite a use of the symbolic value. This handles PHI nodes,
/// which use their value in the corresponding predecessor. Note that this
@@ -104,7 +107,10 @@ public:
void RewriteUse(MachineOperand &U);
private:
- Register GetValueAtEndOfBlockInternal(MachineBasicBlock *BB);
+ // If ExistingValueOnly is true, will not create any new instructions. Used
+ // for debug values, which cannot modify Codegen.
+ Register GetValueAtEndOfBlockInternal(MachineBasicBlock *BB,
+ bool ExistingValueOnly = false);
};
} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineScheduler.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineScheduler.h
index 5bd5c8aa757a..e368fd7d056a 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineScheduler.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineScheduler.h
@@ -101,6 +101,11 @@ namespace llvm {
extern cl::opt<bool> ForceTopDown;
extern cl::opt<bool> ForceBottomUp;
extern cl::opt<bool> VerifyScheduling;
+#ifndef NDEBUG
+extern cl::opt<bool> ViewMISchedDAGs;
+#else
+extern const bool ViewMISchedDAGs;
+#endif
class AAResults;
class LiveIntervals;
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/SelectionDAG.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/SelectionDAG.h
index 5a3f4e9a23ff..d21844555f5b 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/SelectionDAG.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -1912,10 +1912,10 @@ public:
///
/// NOTE: The function will return true for a demanded splat of UNDEF values.
bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts,
- unsigned Depth = 0);
+ unsigned Depth = 0) const;
/// Test whether \p V has a splatted value.
- bool isSplatValue(SDValue V, bool AllowUndefs = false);
+ bool isSplatValue(SDValue V, bool AllowUndefs = false) const;
/// If V is a splatted value, return the source vector and its splat index.
SDValue getSplatSourceVector(SDValue V, int &SplatIndex);
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/StackProtector.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/StackProtector.h
index f6513e8d4ea0..57456b3f6c16 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/StackProtector.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/StackProtector.h
@@ -95,7 +95,7 @@ private:
bool InStruct = false) const;
/// Check whether a stack allocation has its address taken.
- bool HasAddressTaken(const Instruction *AI, uint64_t AllocSize);
+ bool HasAddressTaken(const Instruction *AI, TypeSize AllocSize);
/// RequiresStackProtector - Check whether or not this function needs a
/// stack protector based upon the stack protector level.
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/TargetInstrInfo.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/TargetInstrInfo.h
index d43dd9fac85d..58b8e59b68d7 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/TargetInstrInfo.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/TargetInstrInfo.h
@@ -1190,8 +1190,6 @@ public:
MachineInstr &NewMI1,
MachineInstr &NewMI2) const {}
- virtual void setSpecialOperandAttr(MachineInstr &MI, uint16_t Flags) const {}
-
/// Return true when a target supports MachineCombiner.
virtual bool useMachineCombiner() const { return false; }
@@ -1929,9 +1927,7 @@ public:
/// Optional target hook that returns true if \p MBB is safe to outline from,
/// and returns any target-specific information in \p Flags.
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
- unsigned &Flags) const {
- return true;
- }
+ unsigned &Flags) const;
/// Insert a custom frame for outlined functions.
virtual void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF,
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/TargetLowering.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/TargetLowering.h
index d862701c37d7..b2d82e0cc6e8 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -852,6 +852,20 @@ public:
return getBooleanContents(Type.isVector(), Type.isFloatingPoint());
}
+ /// Promote the given target boolean to a target boolean of the given type.
+ /// A target boolean is an integer value, not necessarily of type i1, the bits
+ /// of which conform to getBooleanContents.
+ ///
+ /// ValVT is the type of values that produced the boolean.
+ SDValue promoteTargetBoolean(SelectionDAG &DAG, SDValue Bool,
+ EVT ValVT) const {
+ SDLoc dl(Bool);
+ EVT BoolVT =
+ getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), ValVT);
+ ISD::NodeType ExtendCode = getExtendForContent(getBooleanContents(ValVT));
+ return DAG.getNode(ExtendCode, dl, BoolVT, Bool);
+ }
+
/// Return target scheduling preference.
Sched::Preference getSchedulingPreference() const {
return SchedPreferenceInfo;
@@ -3606,6 +3620,13 @@ public:
const SelectionDAG &DAG,
bool SNaN = false,
unsigned Depth = 0) const;
+
+ /// Return true if vector \p Op has the same value across all \p DemandedElts,
+ /// indicating any elements which may be undef in the output \p UndefElts.
+ virtual bool isSplatValueForTargetNode(SDValue Op, const APInt &DemandedElts,
+ APInt &UndefElts,
+ unsigned Depth = 0) const;
+
struct DAGCombinerInfo {
void *DC; // The DAG Combiner object.
CombineLevel Level;
@@ -4460,18 +4481,15 @@ public:
/// Expand funnel shift.
/// \param N Node to expand
- /// \param Result output after conversion
- /// \returns True, if the expansion was successful, false otherwise
- bool expandFunnelShift(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
+ /// \returns The expansion if successful, SDValue() otherwise
+ SDValue expandFunnelShift(SDNode *N, SelectionDAG &DAG) const;
/// Expand rotations.
/// \param N Node to expand
/// \param AllowVectorOps expand vector rotate, this should only be performed
/// if the legalization is happening outside of LegalizeVectorOps
- /// \param Result output after conversion
- /// \returns True, if the expansion was successful, false otherwise
- bool expandROT(SDNode *N, bool AllowVectorOps, SDValue &Result,
- SelectionDAG &DAG) const;
+ /// \returns The expansion if successful, SDValue() otherwise
+ SDValue expandROT(SDNode *N, bool AllowVectorOps, SelectionDAG &DAG) const;
/// Expand shift-by-parts.
/// \param N Node to expand
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/VLIWMachineScheduler.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/VLIWMachineScheduler.h
new file mode 100644
index 000000000000..a39f04f6db6c
--- /dev/null
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/VLIWMachineScheduler.h
@@ -0,0 +1,268 @@
+//===- VLIWMachineScheduler.h - VLIW-Focused Scheduling Pass ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// //
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_VLIWMACHINESCHEDULER_H
+#define LLVM_CODEGEN_VLIWMACHINESCHEDULER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/CodeGen/MachineScheduler.h"
+#include "llvm/CodeGen/TargetSchedule.h"
+#include <limits>
+#include <memory>
+#include <utility>
+
+namespace llvm {
+
+class DFAPacketizer;
+class RegisterClassInfo;
+class ScheduleHazardRecognizer;
+class SUnit;
+class TargetInstrInfo;
+class TargetSubtargetInfo;
+
+class VLIWResourceModel {
+protected:
+ const TargetInstrInfo *TII;
+
+ /// ResourcesModel - Represents VLIW state.
+ /// Not limited to VLIW targets per se, but assumes definition of resource
+ /// model by a target.
+ DFAPacketizer *ResourcesModel;
+
+ const TargetSchedModel *SchedModel;
+
+ /// Local packet/bundle model. Purely
+ /// internal to the MI scheduler at the time.
+ SmallVector<SUnit *> Packet;
+
+ /// Total packets created.
+ unsigned TotalPackets = 0;
+
+public:
+ VLIWResourceModel(const TargetSubtargetInfo &STI, const TargetSchedModel *SM);
+
+ virtual ~VLIWResourceModel();
+
+ virtual void reset();
+
+ virtual bool hasDependence(const SUnit *SUd, const SUnit *SUu);
+ virtual bool isResourceAvailable(SUnit *SU, bool IsTop);
+ virtual bool reserveResources(SUnit *SU, bool IsTop);
+ unsigned getTotalPackets() const { return TotalPackets; }
+ size_t getPacketInstCount() const { return Packet.size(); }
+ bool isInPacket(SUnit *SU) const { return is_contained(Packet, SU); }
+
+protected:
+ virtual DFAPacketizer *createPacketizer(const TargetSubtargetInfo &STI) const;
+};
+
+/// Extend the standard ScheduleDAGMILive to provide more context and override
+/// the top-level schedule() driver.
+class VLIWMachineScheduler : public ScheduleDAGMILive {
+public:
+ VLIWMachineScheduler(MachineSchedContext *C,
+ std::unique_ptr<MachineSchedStrategy> S)
+ : ScheduleDAGMILive(C, std::move(S)) {}
+
+ /// Schedule - This is called back from ScheduleDAGInstrs::Run() when it's
+ /// time to do some work.
+ void schedule() override;
+
+ RegisterClassInfo *getRegClassInfo() { return RegClassInfo; }
+ int getBBSize() { return BB->size(); }
+};
+
+//===----------------------------------------------------------------------===//
+// ConvergingVLIWScheduler - Implementation of a VLIW-aware
+// MachineSchedStrategy.
+//===----------------------------------------------------------------------===//
+
+class ConvergingVLIWScheduler : public MachineSchedStrategy {
+protected:
+ /// Store the state used by ConvergingVLIWScheduler heuristics, required
+ /// for the lifetime of one invocation of pickNode().
+ struct SchedCandidate {
+ // The best SUnit candidate.
+ SUnit *SU = nullptr;
+
+ // Register pressure values for the best candidate.
+ RegPressureDelta RPDelta;
+
+ // Best scheduling cost.
+ int SCost = 0;
+
+ SchedCandidate() = default;
+ };
+ /// Represent the type of SchedCandidate found within a single queue.
+ enum CandResult {
+ NoCand,
+ NodeOrder,
+ SingleExcess,
+ SingleCritical,
+ SingleMax,
+ MultiPressure,
+ BestCost,
+ Weak
+ };
+
+ // Constants used to denote relative importance of
+ // heuristic components for cost computation.
+ static constexpr unsigned PriorityOne = 200;
+ static constexpr unsigned PriorityTwo = 50;
+ static constexpr unsigned PriorityThree = 75;
+ static constexpr unsigned ScaleTwo = 10;
+
+ /// Each Scheduling boundary is associated with ready queues. It tracks the
+ /// current cycle in whichever direction at has moved, and maintains the state
+ /// of "hazards" and other interlocks at the current cycle.
+ struct VLIWSchedBoundary {
+ VLIWMachineScheduler *DAG = nullptr;
+ const TargetSchedModel *SchedModel = nullptr;
+
+ ReadyQueue Available;
+ ReadyQueue Pending;
+ bool CheckPending = false;
+
+ ScheduleHazardRecognizer *HazardRec = nullptr;
+ VLIWResourceModel *ResourceModel = nullptr;
+
+ unsigned CurrCycle = 0;
+ unsigned IssueCount = 0;
+ unsigned CriticalPathLength = 0;
+
+ /// MinReadyCycle - Cycle of the soonest available instruction.
+ unsigned MinReadyCycle = std::numeric_limits<unsigned>::max();
+
+ // Remember the greatest min operand latency.
+ unsigned MaxMinLatency = 0;
+
+ /// Pending queues extend the ready queues with the same ID and the
+ /// PendingFlag set.
+ VLIWSchedBoundary(unsigned ID, const Twine &Name)
+ : Available(ID, Name + ".A"),
+ Pending(ID << ConvergingVLIWScheduler::LogMaxQID, Name + ".P") {}
+
+ ~VLIWSchedBoundary();
+
+ void init(VLIWMachineScheduler *dag, const TargetSchedModel *smodel) {
+ DAG = dag;
+ SchedModel = smodel;
+ CurrCycle = 0;
+ IssueCount = 0;
+ // Initialize the critical path length limit, which used by the scheduling
+ // cost model to determine the value for scheduling an instruction. We use
+ // a slightly different heuristic for small and large functions. For small
+ // functions, it's important to use the height/depth of the instruction.
+ // For large functions, prioritizing by height or depth increases spills.
+ CriticalPathLength = DAG->getBBSize() / SchedModel->getIssueWidth();
+ if (DAG->getBBSize() < 50)
+ // We divide by two as a cheap and simple heuristic to reduce the
+ // critcal path length, which increases the priority of using the graph
+ // height/depth in the scheduler's cost computation.
+ CriticalPathLength >>= 1;
+ else {
+ // For large basic blocks, we prefer a larger critical path length to
+ // decrease the priority of using the graph height/depth.
+ unsigned MaxPath = 0;
+ for (auto &SU : DAG->SUnits)
+ MaxPath = std::max(MaxPath, isTop() ? SU.getHeight() : SU.getDepth());
+ CriticalPathLength = std::max(CriticalPathLength, MaxPath) + 1;
+ }
+ }
+
+ bool isTop() const {
+ return Available.getID() == ConvergingVLIWScheduler::TopQID;
+ }
+
+ bool checkHazard(SUnit *SU);
+
+ void releaseNode(SUnit *SU, unsigned ReadyCycle);
+
+ void bumpCycle();
+
+ void bumpNode(SUnit *SU);
+
+ void releasePending();
+
+ void removeReady(SUnit *SU);
+
+ SUnit *pickOnlyChoice();
+
+ bool isLatencyBound(SUnit *SU) {
+ if (CurrCycle >= CriticalPathLength)
+ return true;
+ unsigned PathLength = isTop() ? SU->getHeight() : SU->getDepth();
+ return CriticalPathLength - CurrCycle <= PathLength;
+ }
+ };
+
+ VLIWMachineScheduler *DAG = nullptr;
+ const TargetSchedModel *SchedModel = nullptr;
+
+ // State of the top and bottom scheduled instruction boundaries.
+ VLIWSchedBoundary Top;
+ VLIWSchedBoundary Bot;
+
+ /// List of pressure sets that have a high pressure level in the region.
+ SmallVector<bool> HighPressureSets;
+
+public:
+ /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both)
+ enum { TopQID = 1, BotQID = 2, LogMaxQID = 2 };
+
+ ConvergingVLIWScheduler() : Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {}
+ virtual ~ConvergingVLIWScheduler() = default;
+
+ void initialize(ScheduleDAGMI *dag) override;
+
+ SUnit *pickNode(bool &IsTopNode) override;
+
+ void schedNode(SUnit *SU, bool IsTopNode) override;
+
+ void releaseTopNode(SUnit *SU) override;
+
+ void releaseBottomNode(SUnit *SU) override;
+
+ unsigned reportPackets() {
+ return Top.ResourceModel->getTotalPackets() +
+ Bot.ResourceModel->getTotalPackets();
+ }
+
+protected:
+ virtual VLIWResourceModel *
+ createVLIWResourceModel(const TargetSubtargetInfo &STI,
+ const TargetSchedModel *SchedModel) const;
+
+ SUnit *pickNodeBidrectional(bool &IsTopNode);
+
+ int pressureChange(const SUnit *SU, bool isBotUp);
+
+ virtual int SchedulingCost(ReadyQueue &Q, SUnit *SU,
+ SchedCandidate &Candidate, RegPressureDelta &Delta,
+ bool verbose);
+
+ CandResult pickNodeFromQueue(VLIWSchedBoundary &Zone,
+ const RegPressureTracker &RPTracker,
+ SchedCandidate &Candidate);
+#ifndef NDEBUG
+ void traceCandidate(const char *Label, const ReadyQueue &Q, SUnit *SU,
+ int Cost, PressureChange P = PressureChange());
+
+ void readyQueueVerboseDump(const RegPressureTracker &RPTracker,
+ SchedCandidate &Candidate, ReadyQueue &Q);
+#endif
+};
+
+ScheduleDAGMILive *createVLIWSched(MachineSchedContext *C);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_VLIWMACHINESCHEDULER_H
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFContext.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFContext.h
index ae1afeb668be..24714ac3d101 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFContext.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFContext.h
@@ -189,6 +189,11 @@ public:
DWOUnits.begin() + DWOUnits.getNumInfoUnits());
}
+ const DWARFUnitVector &getDWOUnitsVector() {
+ parseDWOUnits();
+ return DWOUnits;
+ }
+
/// Get units from .debug_types.dwo in the DWO context.
unit_iterator_range dwo_types_section_units() {
parseDWOUnits();
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugLoc.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugLoc.h
index dbc11c51a789..1794f6649827 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugLoc.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugLoc.h
@@ -15,6 +15,7 @@
#include "llvm/DebugInfo/DWARF/DWARFDataExtractor.h"
#include "llvm/DebugInfo/DWARF/DWARFLocationExpression.h"
#include "llvm/DebugInfo/DWARF/DWARFRelocMap.h"
+#include "llvm/Support/Errc.h"
#include <cstdint>
namespace llvm {
@@ -142,6 +143,22 @@ private:
uint16_t Version;
};
+class ResolverError : public ErrorInfo<ResolverError> {
+public:
+ static char ID;
+
+ ResolverError(uint32_t Index, dwarf::LoclistEntries Kind) : Index(Index), Kind(Kind) {}
+
+ void log(raw_ostream &OS) const override;
+ std::error_code convertToErrorCode() const override {
+ return llvm::errc::invalid_argument;
+ }
+
+private:
+ uint32_t Index;
+ dwarf::LoclistEntries Kind;
+};
+
} // end namespace llvm
#endif // LLVM_DEBUGINFO_DWARF_DWARFDEBUGLOC_H
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFFormValue.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFFormValue.h
index 3c051c3ea018..130cdb8800a9 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFFormValue.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFFormValue.h
@@ -112,7 +112,7 @@ public:
Optional<UnitOffset> getAsRelativeReference() const;
Optional<uint64_t> getAsUnsignedConstant() const;
Optional<int64_t> getAsSignedConstant() const;
- Optional<const char *> getAsCString() const;
+ Expected<const char *> getAsCString() const;
Optional<uint64_t> getAsAddress() const;
Optional<object::SectionedAddress> getAsSectionedAddress() const;
Optional<uint64_t> getAsSectionOffset() const;
@@ -173,9 +173,14 @@ namespace dwarf {
/// \returns an optional value that contains a value if the form value
/// was valid and was a string.
inline Optional<const char *> toString(const Optional<DWARFFormValue> &V) {
- if (V)
- return V->getAsCString();
- return None;
+ if (!V)
+ return None;
+ Expected<const char*> E = V->getAsCString();
+ if (!E) {
+ consumeError(E.takeError());
+ return None;
+ }
+ return *E;
}
/// Take an optional DWARFFormValue and try to extract a string value from it.
@@ -185,10 +190,16 @@ inline Optional<const char *> toString(const Optional<DWARFFormValue> &V) {
/// was valid and was a string.
inline StringRef toStringRef(const Optional<DWARFFormValue> &V,
StringRef Default = {}) {
- if (V)
- if (auto S = V->getAsCString())
- return *S;
- return Default;
+ if (!V)
+ return Default;
+ auto S = V->getAsCString();
+ if (!S) {
+ consumeError(S.takeError());
+ return Default;
+ }
+ if (!*S)
+ return Default;
+ return *S;
}
/// Take an optional DWARFFormValue and extract a string value from it.
@@ -199,7 +210,9 @@ inline StringRef toStringRef(const Optional<DWARFFormValue> &V,
/// form value's encoding wasn't a string.
inline const char *toString(const Optional<DWARFFormValue> &V,
const char *Default) {
- return toString(V).getValueOr(Default);
+ if (auto E = toString(V))
+ return *E;
+ return Default;
}
/// Take an optional DWARFFormValue and try to extract an unsigned constant.
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h
index 1292bfbc0591..b96a4c19758f 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h
@@ -331,7 +331,7 @@ public:
Optional<object::SectionedAddress>
getAddrOffsetSectionItem(uint32_t Index) const;
- Optional<uint64_t> getStringOffsetSectionItem(uint32_t Index) const;
+ Expected<uint64_t> getStringOffsetSectionItem(uint32_t Index) const;
DWARFDataExtractor getDebugInfoExtractor() const;
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/MSF/MSFCommon.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/MSF/MSFCommon.h
index a922839a999d..0520b94ea3dd 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/MSF/MSFCommon.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/MSF/MSFCommon.h
@@ -101,6 +101,26 @@ inline bool isValidBlockSize(uint32_t Size) {
return false;
}
+/// Given the specified block size, returns the maximum possible file size.
+/// Block Size | Max File Size
+/// <= 4096 | 4GB
+/// 8192 | 8GB
+/// 16384 | 16GB
+/// 32768 | 32GB
+/// \p Size - the block size of the MSF
+inline uint64_t getMaxFileSizeFromBlockSize(uint32_t Size) {
+ switch (Size) {
+ case 8192:
+ return (uint64_t)UINT32_MAX * 2ULL;
+ case 16384:
+ return (uint64_t)UINT32_MAX * 3ULL;
+ case 32768:
+ return (uint64_t)UINT32_MAX * 4ULL;
+ default:
+ return (uint64_t)UINT32_MAX;
+ }
+}
+
// Super Block, Fpm0, Fpm1, and Block Map
inline uint32_t getMinimumBlockCount() { return 4; }
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/MSF/MSFError.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/MSF/MSFError.h
index 0ef30f10bc68..b84f9d7c4fee 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/MSF/MSFError.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/MSF/MSFError.h
@@ -16,7 +16,10 @@ namespace msf {
enum class msf_error_code {
unspecified = 1,
insufficient_buffer,
- size_overflow,
+ size_overflow_4096,
+ size_overflow_8192,
+ size_overflow_16384,
+ size_overflow_32768,
not_writable,
no_stream,
invalid_format,
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/Native/PDBFile.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/Native/PDBFile.h
index 2124e6a46ed5..c5ee73280c46 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/Native/PDBFile.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/Native/PDBFile.h
@@ -65,7 +65,7 @@ public:
uint32_t getStreamByteSize(uint32_t StreamIndex) const override;
ArrayRef<support::ulittle32_t>
getStreamBlockList(uint32_t StreamIndex) const override;
- uint32_t getFileSize() const;
+ uint64_t getFileSize() const;
Expected<ArrayRef<uint8_t>> getBlockData(uint32_t BlockIndex,
uint32_t NumBytes) const override;
diff --git a/contrib/llvm-project/llvm/include/llvm/Debuginfod/Debuginfod.h b/contrib/llvm-project/llvm/include/llvm/Debuginfod/Debuginfod.h
new file mode 100644
index 000000000000..fcb8ed3a9222
--- /dev/null
+++ b/contrib/llvm-project/llvm/include/llvm/Debuginfod/Debuginfod.h
@@ -0,0 +1,71 @@
+//===-- llvm/Debuginfod/Debuginfod.h - Debuginfod client --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the declarations of getCachedOrDownloadArtifact and
+/// several convenience functions for specific artifact types:
+/// getCachedOrDownloadSource, getCachedOrDownloadExecutable, and
+/// getCachedOrDownloadDebuginfo. This file also declares
+/// getDefaultDebuginfodUrls and getDefaultDebuginfodCacheDirectory.
+///
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFOD_DEBUGINFOD_H
+#define LLVM_DEBUGINFOD_DEBUGINFOD_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+namespace llvm {
+
+typedef ArrayRef<uint8_t> BuildIDRef;
+
+typedef SmallVector<uint8_t, 10> BuildID;
+
+/// Finds default array of Debuginfod server URLs by checking DEBUGINFOD_URLS
+/// environment variable.
+Expected<SmallVector<StringRef>> getDefaultDebuginfodUrls();
+
+/// Finds a default local file caching directory for the debuginfod client,
+/// first checking DEBUGINFOD_CACHE_PATH.
+Expected<std::string> getDefaultDebuginfodCacheDirectory();
+
+/// Finds a default timeout for debuginfod HTTP requests. Checks
+/// DEBUGINFOD_TIMEOUT environment variable, default is 90 seconds (90000 ms).
+std::chrono::milliseconds getDefaultDebuginfodTimeout();
+
+/// Fetches a specified source file by searching the default local cache
+/// directory and server URLs.
+Expected<std::string> getCachedOrDownloadSource(BuildIDRef ID,
+ StringRef SourceFilePath);
+
+/// Fetches an executable by searching the default local cache directory and
+/// server URLs.
+Expected<std::string> getCachedOrDownloadExecutable(BuildIDRef ID);
+
+/// Fetches a debug binary by searching the default local cache directory and
+/// server URLs.
+Expected<std::string> getCachedOrDownloadDebuginfo(BuildIDRef ID);
+
+/// Fetches any debuginfod artifact using the default local cache directory and
+/// server URLs.
+Expected<std::string> getCachedOrDownloadArtifact(StringRef UniqueKey,
+ StringRef UrlPath);
+
+/// Fetches any debuginfod artifact using the specified local cache directory,
+/// server URLs, and request timeout (in milliseconds). If the artifact is
+/// found, uses the UniqueKey for the local cache file.
+Expected<std::string> getCachedOrDownloadArtifact(
+ StringRef UniqueKey, StringRef UrlPath, StringRef CacheDirectoryPath,
+ ArrayRef<StringRef> DebuginfodUrls, std::chrono::milliseconds Timeout);
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/HTTPClient.h b/contrib/llvm-project/llvm/include/llvm/Debuginfod/HTTPClient.h
index 3172610c2d8b..51de66629544 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/HTTPClient.h
+++ b/contrib/llvm-project/llvm/include/llvm/Debuginfod/HTTPClient.h
@@ -77,10 +77,16 @@ public:
/// A reusable client that can perform HTTPRequests through a network socket.
class HTTPClient {
+#ifdef LLVM_ENABLE_CURL
+ void *Curl = nullptr;
+#endif
+
public:
HTTPClient();
~HTTPClient();
+ static bool IsInitialized;
+
/// Returns true only if LLVM has been compiled with a working HTTPClient.
static bool isAvailable();
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Core.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Core.h
index 2180be3341e1..b5f5636800df 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Core.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Core.h
@@ -670,14 +670,22 @@ class MaterializationUnit {
public:
static char ID;
- MaterializationUnit(SymbolFlagsMap InitalSymbolFlags,
- SymbolStringPtr InitSymbol)
- : SymbolFlags(std::move(InitalSymbolFlags)),
- InitSymbol(std::move(InitSymbol)) {
- assert((!this->InitSymbol || this->SymbolFlags.count(this->InitSymbol)) &&
- "If set, InitSymbol should appear in InitialSymbolFlags map");
- }
+ struct Interface {
+ Interface() = default;
+ Interface(SymbolFlagsMap InitalSymbolFlags, SymbolStringPtr InitSymbol)
+ : SymbolFlags(std::move(InitalSymbolFlags)),
+ InitSymbol(std::move(InitSymbol)) {
+ assert((!this->InitSymbol || this->SymbolFlags.count(this->InitSymbol)) &&
+ "If set, InitSymbol should appear in InitialSymbolFlags map");
+ }
+
+ SymbolFlagsMap SymbolFlags;
+ SymbolStringPtr InitSymbol;
+ };
+ MaterializationUnit(Interface I)
+ : SymbolFlags(std::move(I.SymbolFlags)),
+ InitSymbol(std::move(I.InitSymbol)) {}
virtual ~MaterializationUnit() {}
/// Return the name of this materialization unit. Useful for debugging
@@ -730,7 +738,7 @@ public:
private:
void materialize(std::unique_ptr<MaterializationResponsibility> R) override;
void discard(const JITDylib &JD, const SymbolStringPtr &Name) override;
- static SymbolFlagsMap extractFlags(const SymbolMap &Symbols);
+ static MaterializationUnit::Interface extractFlags(const SymbolMap &Symbols);
SymbolMap Symbols;
};
@@ -772,7 +780,8 @@ public:
private:
void materialize(std::unique_ptr<MaterializationResponsibility> R) override;
void discard(const JITDylib &JD, const SymbolStringPtr &Name) override;
- static SymbolFlagsMap extractFlags(const SymbolAliasMap &Aliases);
+ static MaterializationUnit::Interface
+ extractFlags(const SymbolAliasMap &Aliases);
JITDylib *SourceJD = nullptr;
JITDylibLookupFlags SourceJDLookupFlags;
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h
index 1946aed9733e..8e572ea1d0c1 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h
@@ -259,12 +259,18 @@ private:
/// the containing object being added to the JITDylib.
class StaticLibraryDefinitionGenerator : public DefinitionGenerator {
public:
+ // Interface builder function for objects loaded from this archive.
+ using GetObjectFileInterface =
+ unique_function<Expected<MaterializationUnit::Interface>(
+ ExecutionSession &ES, MemoryBufferRef ObjBuffer)>;
+
/// Try to create a StaticLibraryDefinitionGenerator from the given path.
///
/// This call will succeed if the file at the given path is a static library
/// is a valid archive, otherwise it will return an error.
static Expected<std::unique_ptr<StaticLibraryDefinitionGenerator>>
- Load(ObjectLayer &L, const char *FileName);
+ Load(ObjectLayer &L, const char *FileName,
+ GetObjectFileInterface GetObjFileInterface = GetObjectFileInterface());
/// Try to create a StaticLibraryDefinitionGenerator from the given path.
///
@@ -272,13 +278,15 @@ public:
/// or a MachO universal binary containing a static library that is compatible
/// with the given triple. Otherwise it will return an error.
static Expected<std::unique_ptr<StaticLibraryDefinitionGenerator>>
- Load(ObjectLayer &L, const char *FileName, const Triple &TT);
+ Load(ObjectLayer &L, const char *FileName, const Triple &TT,
+ GetObjectFileInterface GetObjFileInterface = GetObjectFileInterface());
/// Try to create a StaticLibrarySearchGenerator from the given memory buffer.
/// This call will succeed if the buffer contains a valid archive, otherwise
/// it will return an error.
static Expected<std::unique_ptr<StaticLibraryDefinitionGenerator>>
- Create(ObjectLayer &L, std::unique_ptr<MemoryBuffer> ArchiveBuffer);
+ Create(ObjectLayer &L, std::unique_ptr<MemoryBuffer> ArchiveBuffer,
+ GetObjectFileInterface GetObjFileInterface = GetObjectFileInterface());
Error tryToGenerate(LookupState &LS, LookupKind K, JITDylib &JD,
JITDylibLookupFlags JDLookupFlags,
@@ -287,9 +295,11 @@ public:
private:
StaticLibraryDefinitionGenerator(ObjectLayer &L,
std::unique_ptr<MemoryBuffer> ArchiveBuffer,
+ GetObjectFileInterface GetObjFileInterface,
Error &Err);
ObjectLayer &L;
+ GetObjectFileInterface GetObjFileInterface;
std::unique_ptr<MemoryBuffer> ArchiveBuffer;
std::unique_ptr<object::Archive> Archive;
};
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Layer.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Layer.h
index dccbb4be9b52..cfeedc2a0bda 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Layer.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Layer.h
@@ -43,8 +43,7 @@ public:
/// entries for each definition in M.
/// This constructor is useful for delegating work from one
/// IRMaterializationUnit to another.
- IRMaterializationUnit(ThreadSafeModule TSM, SymbolFlagsMap SymbolFlags,
- SymbolStringPtr InitSymbol,
+ IRMaterializationUnit(ThreadSafeModule TSM, Interface I,
SymbolNameToDefinitionMap SymbolToDefinition);
/// Return the ModuleIdentifier as the name for this MaterializationUnit.
@@ -141,14 +140,28 @@ public:
/// Returns the execution session for this layer.
ExecutionSession &getExecutionSession() { return ES; }
- /// Adds a MaterializationUnit representing the given IR to the given
- /// JITDylib.
- virtual Error add(ResourceTrackerSP RT, std::unique_ptr<MemoryBuffer> O);
-
- Error add(JITDylib &JD, std::unique_ptr<MemoryBuffer> O) {
- return add(JD.getDefaultResourceTracker(), std::move(O));
+ /// Adds a MaterializationUnit for the object file in the given memory buffer
+ /// to the JITDylib for the given ResourceTracker.
+ virtual Error add(ResourceTrackerSP RT, std::unique_ptr<MemoryBuffer> O,
+ MaterializationUnit::Interface I);
+
+ /// Adds a MaterializationUnit for the object file in the given memory buffer
+ /// to the JITDylib for the given ResourceTracker. The interface for the
+ /// object will be built using the default object interface builder.
+ Error add(ResourceTrackerSP RT, std::unique_ptr<MemoryBuffer> O);
+
+ /// Adds a MaterializationUnit for the object file in the given memory buffer
+ /// to the given JITDylib.
+ Error add(JITDylib &JD, std::unique_ptr<MemoryBuffer> O,
+ MaterializationUnit::Interface I) {
+ return add(JD.getDefaultResourceTracker(), std::move(O), std::move(I));
}
+ /// Adds a MaterializationUnit for the object file in the given memory buffer
+ /// to the given JITDylib. The interface for the object will be built using
+ /// the default object interface builder.
+ Error add(JITDylib &JD, std::unique_ptr<MemoryBuffer> O);
+
/// Emit should materialize the given IR.
virtual void emit(std::unique_ptr<MaterializationResponsibility> R,
std::unique_ptr<MemoryBuffer> O) = 0;
@@ -161,13 +174,13 @@ private:
/// instance) by calling 'emit' on the given ObjectLayer.
class BasicObjectLayerMaterializationUnit : public MaterializationUnit {
public:
+ /// Create using the default object interface builder function.
static Expected<std::unique_ptr<BasicObjectLayerMaterializationUnit>>
Create(ObjectLayer &L, std::unique_ptr<MemoryBuffer> O);
BasicObjectLayerMaterializationUnit(ObjectLayer &L,
std::unique_ptr<MemoryBuffer> O,
- SymbolFlagsMap SymbolFlags,
- SymbolStringPtr InitSymbol);
+ Interface I);
/// Return the buffer's identifier as the name for this MaterializationUnit.
StringRef getName() const override;
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/LazyReexports.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/LazyReexports.h
index e6a9d8945285..f81cdcef6655 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/LazyReexports.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/LazyReexports.h
@@ -151,7 +151,8 @@ public:
private:
void materialize(std::unique_ptr<MaterializationResponsibility> R) override;
void discard(const JITDylib &JD, const SymbolStringPtr &Name) override;
- static SymbolFlagsMap extractFlags(const SymbolAliasMap &Aliases);
+ static MaterializationUnit::Interface
+ extractFlags(const SymbolAliasMap &Aliases);
LazyCallThroughManager &LCTManager;
IndirectStubsManager &ISManager;
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Mangling.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Mangling.h
index e0f770a601fb..77429f4b11ee 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Mangling.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Mangling.h
@@ -55,11 +55,6 @@ public:
SymbolNameToDefinitionMap *SymbolToDefinition = nullptr);
};
-/// Returns a SymbolFlagsMap for the object file represented by the given
-/// buffer, or an error if the buffer does not contain a valid object file.
-Expected<std::pair<SymbolFlagsMap, SymbolStringPtr>>
-getObjectSymbolInfo(ExecutionSession &ES, MemoryBufferRef ObjBuffer);
-
} // End namespace orc
} // End namespace llvm
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/ObjectFileInterface.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/ObjectFileInterface.h
new file mode 100644
index 000000000000..1bf09069163e
--- /dev/null
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/ObjectFileInterface.h
@@ -0,0 +1,38 @@
+//===-- ObjectFileInterface.h - MU interface utils for objects --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Utilities for building MaterializationUnit::Interface objects from
+// object files.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_OBJECTFILEINTERFACE_H
+#define LLVM_EXECUTIONENGINE_ORC_OBJECTFILEINTERFACE_H
+
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+namespace llvm {
+namespace orc {
+
+/// Adds an initializer symbol to the given MU interface.
+/// The init symbol's name is guaranteed to be unique within I, and will be of
+/// the form $.<ObjFileName>.__inits.<N>, where N is some integer.
+void addInitSymbol(MaterializationUnit::Interface &I, ExecutionSession &ES,
+ StringRef ObjFileName);
+
+/// Returns a MaterializationUnit::Interface for the object file contained in
+/// the given buffer, or an error if the buffer does not contain a valid object
+/// file.
+Expected<MaterializationUnit::Interface>
+getObjectFileInterface(ExecutionSession &ES, MemoryBufferRef ObjBuffer);
+
+} // End namespace orc
+} // End namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_OBJECTFILEINTERFACE_H
diff --git a/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMP.td b/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMP.td
index 5ee379b7fcad..18d577dff497 100644
--- a/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMP.td
+++ b/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMP.td
@@ -180,6 +180,7 @@ def OMPC_Read : Clause<"read"> { let clangClass = "OMPReadClause"; }
def OMPC_Write : Clause<"write"> { let clangClass = "OMPWriteClause"; }
def OMPC_Update : Clause<"update"> { let clangClass = "OMPUpdateClause"; }
def OMPC_Capture : Clause<"capture"> { let clangClass = "OMPCaptureClause"; }
+def OMPC_Compare : Clause<"compare"> { let clangClass = "OMPCompareClause"; }
def OMPC_SeqCst : Clause<"seq_cst"> { let clangClass = "OMPSeqCstClause"; }
def OMPC_AcqRel : Clause<"acq_rel"> { let clangClass = "OMPAcqRelClause"; }
def OMPC_Acquire : Clause<"acquire"> { let clangClass = "OMPAcquireClause"; }
@@ -282,7 +283,7 @@ def OMPC_Allocate : Clause<"allocate"> {
def OMPC_NonTemporal : Clause<"nontemporal"> {
let clangClass = "OMPNontemporalClause";
let flangClass = "Name";
- let isValueList = true;
+ let isValueList = true;
}
def OMP_ORDER_concurrent : ClauseVal<"concurrent",1,1> {}
@@ -536,6 +537,7 @@ def OMP_Atomic : Directive<"atomic"> {
VersionedClause<OMPC_Write>,
VersionedClause<OMPC_Update>,
VersionedClause<OMPC_Capture>,
+ VersionedClause<OMPC_Compare, 51>
];
let allowedOnceClauses = [
VersionedClause<OMPC_SeqCst>,
diff --git a/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h b/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
index 563e0eed1762..9976d1961ed1 100644
--- a/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
+++ b/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
@@ -539,24 +539,27 @@ public:
function_ref<InsertPointTy(InsertPointTy, Value *, Value *, Value *&)>;
/// Functions used to generate atomic reductions. Such functions take two
- /// Values representing pointers to LHS and RHS of the reduction. They are
- /// expected to atomically update the LHS to the reduced value.
+ /// Values representing pointers to LHS and RHS of the reduction, as well as
+ /// the element type of these pointers. They are expected to atomically
+ /// update the LHS to the reduced value.
using AtomicReductionGenTy =
- function_ref<InsertPointTy(InsertPointTy, Value *, Value *)>;
+ function_ref<InsertPointTy(InsertPointTy, Type *, Value *, Value *)>;
/// Information about an OpenMP reduction.
struct ReductionInfo {
- ReductionInfo(Value *Variable, Value *PrivateVariable,
+ ReductionInfo(Type *ElementType, Value *Variable, Value *PrivateVariable,
ReductionGenTy ReductionGen,
AtomicReductionGenTy AtomicReductionGen)
- : Variable(Variable), PrivateVariable(PrivateVariable),
- ReductionGen(ReductionGen), AtomicReductionGen(AtomicReductionGen) {}
-
- /// Returns the type of the element being reduced.
- Type *getElementType() const {
- return Variable->getType()->getPointerElementType();
+ : ElementType(ElementType), Variable(Variable),
+ PrivateVariable(PrivateVariable), ReductionGen(ReductionGen),
+ AtomicReductionGen(AtomicReductionGen) {
+ assert(cast<PointerType>(Variable->getType())
+ ->isOpaqueOrPointeeTypeMatches(ElementType) && "Invalid elem type");
}
+ /// Reduction element type, must match pointee type of variable.
+ Type *ElementType;
+
/// Reduction variable of pointer type.
Value *Variable;
@@ -1166,9 +1169,9 @@ private:
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param VolatileX true if \a X volatile?
- /// \param IsXLHSInRHSPart true if \a X is Left H.S. in Right H.S. part of
- /// the update expression, false otherwise.
- /// (e.g. true for X = X BinOp Expr)
+ /// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the
+ /// update expression, false otherwise.
+ /// (e.g. true for X = X BinOp Expr)
///
/// \returns A pair of the old value of X before the update, and the value
/// used for the update.
@@ -1177,7 +1180,7 @@ private:
AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp,
bool VolatileX,
- bool IsXLHSInRHSPart);
+ bool IsXBinopExpr);
/// Emit the binary op. described by \p RMWOp, using \p Src1 and \p Src2 .
///
@@ -1235,9 +1238,9 @@ public:
/// atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
- /// \param IsXLHSInRHSPart true if \a X is Left H.S. in Right H.S. part of
- /// the update expression, false otherwise.
- /// (e.g. true for X = X BinOp Expr)
+ /// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the
+ /// update expression, false otherwise.
+ /// (e.g. true for X = X BinOp Expr)
///
/// \return Insertion point after generated atomic update IR.
InsertPointTy createAtomicUpdate(const LocationDescription &Loc,
@@ -1245,7 +1248,7 @@ public:
Value *Expr, AtomicOrdering AO,
AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp,
- bool IsXLHSInRHSPart);
+ bool IsXBinopExpr);
/// Emit atomic update for constructs: --- Only Scalar data types
/// V = X; X = X BinOp Expr ,
@@ -1269,9 +1272,9 @@ public:
/// expressed through atomicrmw instruction.
/// \param UpdateExpr true if X is an in place update of the form
/// X = X BinOp Expr or X = Expr BinOp X
- /// \param IsXLHSInRHSPart true if X is Left H.S. in Right H.S. part of the
- /// update expression, false otherwise.
- /// (e.g. true for X = X BinOp Expr)
+ /// \param IsXBinopExpr true if X is Left H.S. in Right H.S. part of the
+ /// update expression, false otherwise.
+ /// (e.g. true for X = X BinOp Expr)
/// \param IsPostfixUpdate true if original value of 'x' must be stored in
/// 'v', not an updated one.
///
@@ -1281,7 +1284,7 @@ public:
AtomicOpValue &X, AtomicOpValue &V, Value *Expr,
AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr,
- bool IsPostfixUpdate, bool IsXLHSInRHSPart);
+ bool IsPostfixUpdate, bool IsXBinopExpr);
/// Create the control flow structure of a canonical OpenMP loop.
///
@@ -1408,13 +1411,10 @@ class CanonicalLoopInfo {
friend class OpenMPIRBuilder;
private:
- BasicBlock *Preheader = nullptr;
BasicBlock *Header = nullptr;
BasicBlock *Cond = nullptr;
- BasicBlock *Body = nullptr;
BasicBlock *Latch = nullptr;
BasicBlock *Exit = nullptr;
- BasicBlock *After = nullptr;
/// Add the control blocks of this loop to \p BBs.
///
@@ -1436,10 +1436,7 @@ public:
/// Code that must be execute before any loop iteration can be emitted here,
/// such as computing the loop trip count and begin lifetime markers. Code in
/// the preheader is not considered part of the canonical loop.
- BasicBlock *getPreheader() const {
- assert(isValid() && "Requires a valid canonical loop");
- return Preheader;
- }
+ BasicBlock *getPreheader() const;
/// The header is the entry for each iteration. In the canonical control flow,
/// it only contains the PHINode for the induction variable.
@@ -1460,7 +1457,7 @@ public:
/// eventually branch to the \p Latch block.
BasicBlock *getBody() const {
assert(isValid() && "Requires a valid canonical loop");
- return Body;
+ return cast<BranchInst>(Cond->getTerminator())->getSuccessor(0);
}
/// Reaching the latch indicates the end of the loop body code. In the
@@ -1484,7 +1481,7 @@ public:
/// statements/cancellations).
BasicBlock *getAfter() const {
assert(isValid() && "Requires a valid canonical loop");
- return After;
+ return Exit->getSingleSuccessor();
}
/// Returns the llvm::Value containing the number of loop iterations. It must
@@ -1515,18 +1512,21 @@ public:
/// Return the insertion point for user code before the loop.
OpenMPIRBuilder::InsertPointTy getPreheaderIP() const {
assert(isValid() && "Requires a valid canonical loop");
+ BasicBlock *Preheader = getPreheader();
return {Preheader, std::prev(Preheader->end())};
};
/// Return the insertion point for user code in the body.
OpenMPIRBuilder::InsertPointTy getBodyIP() const {
assert(isValid() && "Requires a valid canonical loop");
+ BasicBlock *Body = getBody();
return {Body, Body->begin()};
};
/// Return the insertion point for user code after the loop.
OpenMPIRBuilder::InsertPointTy getAfterIP() const {
assert(isValid() && "Requires a valid canonical loop");
+ BasicBlock *After = getAfter();
return {After, After->begin()};
};
diff --git a/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMPKinds.def b/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMPKinds.def
index 8e4f7568fb9c..08bf5981cdc3 100644
--- a/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMPKinds.def
+++ b/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMPKinds.def
@@ -207,6 +207,7 @@ __OMP_RTL(__kmpc_omp_reg_task_with_affinity, false, Int32, IdentPtr, Int32,
__OMP_RTL(__kmpc_get_hardware_num_blocks, false, Int32, )
__OMP_RTL(__kmpc_get_hardware_num_threads_in_block, false, Int32, )
+__OMP_RTL(__kmpc_get_warp_size, false, Int32, )
__OMP_RTL(omp_get_thread_num, false, Int32, )
__OMP_RTL(omp_get_num_threads, false, Int32, )
@@ -455,8 +456,6 @@ __OMP_RTL(__kmpc_barrier_simple_generic, false, Void, IdentPtr, Int32)
__OMP_RTL(__kmpc_warp_active_thread_mask, false, Int64,)
__OMP_RTL(__kmpc_syncwarp, false, Void, Int64)
-__OMP_RTL(__kmpc_get_warp_size, false, Int32, )
-
__OMP_RTL(__kmpc_is_generic_main_thread_id, false, Int8, Int32)
__OMP_RTL(__last, false, Void, )
@@ -629,6 +628,7 @@ __OMP_RTL_ATTRS(__kmpc_omp_reg_task_with_affinity, DefaultAttrs, AttributeSet(),
__OMP_RTL_ATTRS(__kmpc_get_hardware_num_blocks, GetterAttrs, AttributeSet(), ParamAttrs())
__OMP_RTL_ATTRS(__kmpc_get_hardware_num_threads_in_block, GetterAttrs, AttributeSet(), ParamAttrs())
+__OMP_RTL_ATTRS(__kmpc_get_warp_size, GetterAttrs, AttributeSet(), ParamAttrs())
__OMP_RTL_ATTRS(omp_get_thread_num, GetterAttrs, AttributeSet(), ParamAttrs())
__OMP_RTL_ATTRS(omp_get_num_threads, GetterAttrs, AttributeSet(), ParamAttrs())
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/Attributes.h b/contrib/llvm-project/llvm/include/llvm/IR/Attributes.h
index 282be640d8be..f64f15bd38ba 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/Attributes.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/Attributes.h
@@ -216,9 +216,12 @@ public:
/// if not known).
std::pair<unsigned, Optional<unsigned>> getAllocSizeArgs() const;
- /// Returns the argument numbers for the vscale_range attribute (or pair(0, 0)
- /// if not known).
- std::pair<unsigned, unsigned> getVScaleRangeArgs() const;
+ /// Returns the minimum value for the vscale_range attribute.
+ unsigned getVScaleRangeMin() const;
+
+ /// Returns the maximum value for the vscale_range attribute or None when
+ /// unknown.
+ Optional<unsigned> getVScaleRangeMax() const;
/// The Attribute is converted to a string of equivalent mnemonic. This
/// is, presumably, for writing out the mnemonics for the assembly writer.
@@ -348,7 +351,8 @@ public:
Type *getInAllocaType() const;
Type *getElementType() const;
std::pair<unsigned, Optional<unsigned>> getAllocSizeArgs() const;
- std::pair<unsigned, unsigned> getVScaleRangeArgs() const;
+ unsigned getVScaleRangeMin() const;
+ Optional<unsigned> getVScaleRangeMax() const;
std::string getAsString(bool InAttrGrp = false) const;
/// Return true if this attribute set belongs to the LLVMContext.
@@ -452,6 +456,8 @@ public:
static AttributeList get(LLVMContext &C, unsigned Index,
ArrayRef<StringRef> Kind);
static AttributeList get(LLVMContext &C, unsigned Index,
+ AttributeSet Attrs);
+ static AttributeList get(LLVMContext &C, unsigned Index,
const AttrBuilder &B);
// TODO: remove non-AtIndex versions of these methods.
@@ -938,6 +944,8 @@ class AttrBuilder {
public:
AttrBuilder() = default;
+ AttrBuilder(const AttrBuilder &) = delete;
+ AttrBuilder(AttrBuilder &&) = default;
AttrBuilder(const Attribute &A) {
addAttribute(A);
@@ -1053,9 +1061,11 @@ public:
/// doesn't exist, pair(0, 0) is returned.
std::pair<unsigned, Optional<unsigned>> getAllocSizeArgs() const;
- /// Retrieve the vscale_range args, if the vscale_range attribute exists. If
- /// it doesn't exist, pair(0, 0) is returned.
- std::pair<unsigned, unsigned> getVScaleRangeArgs() const;
+ /// Retrieve the minimum value of 'vscale_range'.
+ unsigned getVScaleRangeMin() const;
+
+ /// Retrieve the maximum value of 'vscale_range' or None when unknown.
+ Optional<unsigned> getVScaleRangeMax() const;
/// Add integer attribute with raw value (packed/encoded if necessary).
AttrBuilder &addRawIntAttr(Attribute::AttrKind Kind, uint64_t Value);
@@ -1097,7 +1107,8 @@ public:
const Optional<unsigned> &NumElemsArg);
/// This turns two ints into the form used internally in Attribute.
- AttrBuilder &addVScaleRangeAttr(unsigned MinValue, unsigned MaxValue);
+ AttrBuilder &addVScaleRangeAttr(unsigned MinValue,
+ Optional<unsigned> MaxValue);
/// Add a type attribute with the given type.
AttrBuilder &addTypeAttr(Attribute::AttrKind Kind, Type *Ty);
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/Attributes.td b/contrib/llvm-project/llvm/include/llvm/IR/Attributes.td
index de25b51a6292..40c554c269ca 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/Attributes.td
+++ b/contrib/llvm-project/llvm/include/llvm/IR/Attributes.td
@@ -345,3 +345,6 @@ def : MergeRule<"adjustCallerStackProbeSize">;
def : MergeRule<"adjustMinLegalVectorWidth">;
def : MergeRule<"adjustNullPointerValidAttr">;
def : MergeRule<"setAND<MustProgressAttr>">;
+
+// Target dependent attributes
+include "llvm/IR/AttributesAMDGPU.td"
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/AttributesAMDGPU.td b/contrib/llvm-project/llvm/include/llvm/IR/AttributesAMDGPU.td
new file mode 100644
index 000000000000..e2a0f045b656
--- /dev/null
+++ b/contrib/llvm-project/llvm/include/llvm/IR/AttributesAMDGPU.td
@@ -0,0 +1,14 @@
+//===- AttributesAMDGPU.td - Defines AMDGPU attributes -----*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines AMDGPU specific attributes.
+//
+//===----------------------------------------------------------------------===//
+
+def AMDGPUUnsafeFPAtomics : StrBoolAttr<"amdgpu-unsafe-fp-atomics">;
+def : MergeRule<"setAND<AMDGPUUnsafeFPAtomicsAttr>">;
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/Constants.h b/contrib/llvm-project/llvm/include/llvm/IR/Constants.h
index 71414d95d9a3..65d453861628 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/Constants.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/Constants.h
@@ -926,6 +926,41 @@ struct OperandTraits<DSOLocalEquivalent>
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(DSOLocalEquivalent, Value)
+/// Wrapper for a value that won't be replaced with a CFI jump table
+/// pointer in LowerTypeTestsModule.
+class NoCFIValue final : public Constant {
+ friend class Constant;
+
+ NoCFIValue(GlobalValue *GV);
+
+ void *operator new(size_t S) { return User::operator new(S, 1); }
+
+ void destroyConstantImpl();
+ Value *handleOperandChangeImpl(Value *From, Value *To);
+
+public:
+ /// Return a NoCFIValue for the specified function.
+ static NoCFIValue *get(GlobalValue *GV);
+
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ GlobalValue *getGlobalValue() const {
+ return cast<GlobalValue>(Op<0>().get());
+ }
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const Value *V) {
+ return V->getValueID() == NoCFIValueVal;
+ }
+};
+
+template <>
+struct OperandTraits<NoCFIValue> : public FixedNumOperandTraits<NoCFIValue, 1> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(NoCFIValue, Value)
+
//===----------------------------------------------------------------------===//
/// A constant value that is initialized with an expression using
/// other constant values.
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/DataLayout.h b/contrib/llvm-project/llvm/include/llvm/IR/DataLayout.h
index 46acd403bef1..36438fc4f4e0 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/DataLayout.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/DataLayout.h
@@ -92,14 +92,14 @@ struct LayoutAlignElem {
struct PointerAlignElem {
Align ABIAlign;
Align PrefAlign;
- uint32_t TypeByteWidth;
+ uint32_t TypeBitWidth;
uint32_t AddressSpace;
- uint32_t IndexWidth;
+ uint32_t IndexBitWidth;
/// Initializer
- static PointerAlignElem get(uint32_t AddressSpace, Align ABIAlign,
- Align PrefAlign, uint32_t TypeByteWidth,
- uint32_t IndexWidth);
+ static PointerAlignElem getInBits(uint32_t AddressSpace, Align ABIAlign,
+ Align PrefAlign, uint32_t TypeBitWidth,
+ uint32_t IndexBitWidth);
bool operator==(const PointerAlignElem &rhs) const;
};
@@ -180,8 +180,9 @@ private:
/// Attempts to set the alignment of a pointer in the given address space.
/// Returns an error description on failure.
- Error setPointerAlignment(uint32_t AddrSpace, Align ABIAlign, Align PrefAlign,
- uint32_t TypeByteWidth, uint32_t IndexWidth);
+ Error setPointerAlignmentInBits(uint32_t AddrSpace, Align ABIAlign,
+ Align PrefAlign, uint32_t TypeBitWidth,
+ uint32_t IndexBitWidth);
/// Internal helper to get alignment for integer of given bitwidth.
Align getIntegerAlignment(uint32_t BitWidth, bool abi_or_pref) const;
@@ -372,7 +373,8 @@ public:
/// the backends/clients are updated.
Align getPointerPrefAlignment(unsigned AS = 0) const;
- /// Layout pointer size
+ /// Layout pointer size in bytes, rounded up to a whole
+ /// number of bytes.
/// FIXME: The defaults need to be removed once all of
/// the backends/clients are updated.
unsigned getPointerSize(unsigned AS = 0) const;
@@ -380,7 +382,8 @@ public:
/// Returns the maximum index size over all address spaces.
unsigned getMaxIndexSize() const;
- // Index size used for address calculation.
+ // Index size in bytes used for address calculation,
+ /// rounded up to a whole number of bytes.
unsigned getIndexSize(unsigned AS) const;
/// Return the address spaces containing non-integral pointers. Pointers in
@@ -407,7 +410,7 @@ public:
/// FIXME: The defaults need to be removed once all of
/// the backends/clients are updated.
unsigned getPointerSizeInBits(unsigned AS = 0) const {
- return getPointerSize(AS) * 8;
+ return getPointerAlignElem(AS).TypeBitWidth;
}
/// Returns the maximum index size over all address spaces.
@@ -417,7 +420,7 @@ public:
/// Size in bits of index used for address calculation in getelementptr.
unsigned getIndexSizeInBits(unsigned AS) const {
- return getIndexSize(AS) * 8;
+ return getPointerAlignElem(AS).IndexBitWidth;
}
/// Layout pointer size, in bits, based on the type. If this function is
@@ -470,7 +473,7 @@ public:
/// For example, returns 5 for i36 and 10 for x86_fp80.
TypeSize getTypeStoreSize(Type *Ty) const {
TypeSize BaseSize = getTypeSizeInBits(Ty);
- return { (BaseSize.getKnownMinSize() + 7) / 8, BaseSize.isScalable() };
+ return {divideCeil(BaseSize.getKnownMinSize(), 8), BaseSize.isScalable()};
}
/// Returns the maximum number of bits that may be overwritten by
@@ -588,6 +591,12 @@ public:
/// the result element type and Offset to be the residual offset.
SmallVector<APInt> getGEPIndicesForOffset(Type *&ElemTy, APInt &Offset) const;
+ /// Get single GEP index to access Offset inside ElemTy. Returns None if
+ /// index cannot be computed, e.g. because the type is not an aggregate.
+ /// ElemTy is updated to be the result element type and Offset to be the
+ /// residual offset.
+ Optional<APInt> getGEPIndexForOffset(Type *&ElemTy, APInt &Offset) const;
+
/// Returns a StructLayout object, indicating the alignment of the
/// struct, its size, and the offsets of its fields.
///
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/Instructions.h b/contrib/llvm-project/llvm/include/llvm/IR/Instructions.h
index 046e9b5e809e..84ebb461ebef 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/Instructions.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/Instructions.h
@@ -105,6 +105,11 @@ public:
return cast<PointerType>(Instruction::getType());
}
+ /// Return the address space for the allocation.
+ unsigned getAddressSpace() const {
+ return getType()->getAddressSpace();
+ }
+
/// Get allocation size in bits. Returns None if size can't be determined,
/// e.g. in case of a VLA.
Optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
@@ -1451,6 +1456,10 @@ public:
///
static auto predicates() { return FCmpPredicates(); }
+ /// Return result of `LHS Pred RHS` comparison.
+ static bool compare(const APFloat &LHS, const APFloat &RHS,
+ FCmpInst::Predicate Pred);
+
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::FCmp;
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicInst.h b/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicInst.h
index d186029db8cf..647a912b72f6 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicInst.h
@@ -390,8 +390,10 @@ public:
class VPIntrinsic : public IntrinsicInst {
public:
/// \brief Declares a llvm.vp.* intrinsic in \p M that matches the parameters
- /// \p Params.
+ /// \p Params. Additionally, the load and gather intrinsics require
+ /// \p ReturnType to be specified.
static Function *getDeclarationForParams(Module *M, Intrinsic::ID,
+ Type *ReturnType,
ArrayRef<Value *> Params);
static Optional<unsigned> getMaskParamPos(Intrinsic::ID IntrinsicID);
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/Intrinsics.td b/contrib/llvm-project/llvm/include/llvm/IR/Intrinsics.td
index 637e6d8f6cf5..da580de3dbd3 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/Intrinsics.td
+++ b/contrib/llvm-project/llvm/include/llvm/IR/Intrinsics.td
@@ -319,6 +319,7 @@ def llvm_v4bf16_ty : LLVMType<v4bf16>; // 4 x bfloat (__bf16)
def llvm_v8bf16_ty : LLVMType<v8bf16>; // 8 x bfloat (__bf16)
def llvm_v1f32_ty : LLVMType<v1f32>; // 1 x float
def llvm_v2f32_ty : LLVMType<v2f32>; // 2 x float
+def llvm_v3f32_ty : LLVMType<v3f32>; // 3 x float
def llvm_v4f32_ty : LLVMType<v4f32>; // 4 x float
def llvm_v8f32_ty : LLVMType<v8f32>; // 8 x float
def llvm_v16f32_ty : LLVMType<v16f32>; // 16 x float
@@ -331,6 +332,9 @@ def llvm_v16f64_ty : LLVMType<v16f64>; // 16 x double
def llvm_vararg_ty : LLVMType<isVoid>; // this means vararg here
+def llvm_externref_ty : LLVMType<externref>;
+def llvm_funcref_ty : LLVMType<funcref>;
+
//===----------------------------------------------------------------------===//
// Intrinsic Definitions.
//===----------------------------------------------------------------------===//
@@ -1013,14 +1017,15 @@ def int_codeview_annotation : DefaultAttrsIntrinsic<[], [llvm_metadata_ty],
//===------------------------ Trampoline Intrinsics -----------------------===//
//
-def int_init_trampoline : Intrinsic<[],
- [llvm_ptr_ty, llvm_ptr_ty, llvm_ptr_ty],
- [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>,
- GCCBuiltin<"__builtin_init_trampoline">;
+def int_init_trampoline : DefaultAttrsIntrinsic<
+ [], [llvm_ptr_ty, llvm_ptr_ty, llvm_ptr_ty],
+ [IntrArgMemOnly, NoCapture<ArgIndex<0>>, WriteOnly<ArgIndex<0>>,
+ ReadNone<ArgIndex<1>>, ReadNone<ArgIndex<2>>]>,
+ GCCBuiltin<"__builtin_init_trampoline">;
-def int_adjust_trampoline : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty],
- [IntrReadMem, IntrArgMemOnly]>,
- GCCBuiltin<"__builtin_adjust_trampoline">;
+def int_adjust_trampoline : DefaultAttrsIntrinsic<
+ [llvm_ptr_ty], [llvm_ptr_ty], [IntrReadMem, IntrArgMemOnly]>,
+ GCCBuiltin<"__builtin_adjust_trampoline">;
//===------------------------ Overflow Intrinsics -------------------------===//
//
@@ -1278,10 +1283,6 @@ def int_coro_alloca_alloc : Intrinsic<[llvm_token_ty],
def int_coro_alloca_get : Intrinsic<[llvm_ptr_ty], [llvm_token_ty], []>;
def int_coro_alloca_free : Intrinsic<[], [llvm_token_ty], []>;
-def int_coro_param : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty, llvm_ptr_ty],
- [IntrNoMem, ReadNone<ArgIndex<0>>,
- ReadNone<ArgIndex<1>>]>;
-
// Coroutine Manipulation Intrinsics.
def int_coro_resume : Intrinsic<[], [llvm_ptr_ty], [Throws]>;
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index 0a44670de76e..2f2564702b87 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -1284,7 +1284,7 @@ def int_amdgcn_s_dcache_inv :
def int_amdgcn_s_memtime :
GCCBuiltin<"__builtin_amdgcn_s_memtime">,
- Intrinsic<[llvm_i64_ty], [], [IntrWillReturn]>;
+ Intrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn]>;
def int_amdgcn_s_sleep :
GCCBuiltin<"__builtin_amdgcn_s_sleep">,
@@ -1726,7 +1726,7 @@ def int_amdgcn_s_dcache_wb_vol :
def int_amdgcn_s_memrealtime :
GCCBuiltin<"__builtin_amdgcn_s_memrealtime">,
- Intrinsic<[llvm_i64_ty], [], [IntrWillReturn]>;
+ Intrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn]>;
// llvm.amdgcn.ds.permute <index> <src>
def int_amdgcn_ds_permute :
@@ -1789,9 +1789,11 @@ def int_amdgcn_global_atomic_csub : AMDGPUGlobalAtomicRtn<llvm_i32_ty>;
// uint4 llvm.amdgcn.image.bvh.intersect.ray <node_ptr>, <ray_extent>, <ray_origin>,
// <ray_dir>, <ray_inv_dir>, <texture_descr>
+// <node_ptr> is i32 or i64.
+// <ray_dir> and <ray_inv_dir> are both v3f16 or both v3f32.
def int_amdgcn_image_bvh_intersect_ray :
Intrinsic<[llvm_v4i32_ty],
- [llvm_anyint_ty, llvm_float_ty, llvm_v4f32_ty, llvm_anyvector_ty,
+ [llvm_anyint_ty, llvm_float_ty, llvm_v3f32_ty, llvm_anyvector_ty,
LLVMMatchType<1>, llvm_v4i32_ty],
[IntrReadMem, IntrWillReturn]>;
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsARM.td b/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsARM.td
index 52702fe7e731..cf375b9280db 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsARM.td
+++ b/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsARM.td
@@ -809,8 +809,7 @@ def int_arm_cls64: Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem]>;
def int_arm_mve_vctp8 : Intrinsic<[llvm_v16i1_ty], [llvm_i32_ty], [IntrNoMem]>;
def int_arm_mve_vctp16 : Intrinsic<[llvm_v8i1_ty], [llvm_i32_ty], [IntrNoMem]>;
def int_arm_mve_vctp32 : Intrinsic<[llvm_v4i1_ty], [llvm_i32_ty], [IntrNoMem]>;
-// vctp64 takes v4i1, to work around v2i1 not being a legal MVE type
-def int_arm_mve_vctp64 : Intrinsic<[llvm_v4i1_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_arm_mve_vctp64 : Intrinsic<[llvm_v2i1_ty], [llvm_i32_ty], [IntrNoMem]>;
// v8.3-A Floating-point complex add
def int_arm_neon_vcadd_rot90 : Neon_2Arg_Intrinsic;
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsHexagonDep.td b/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsHexagonDep.td
index 6799273bf805..177114636a50 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsHexagonDep.td
+++ b/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsHexagonDep.td
@@ -23,13 +23,6 @@ class Hexagon_i64_i64_Intrinsic<string GCCIntSuffix,
intr_properties>;
// tag : A2_add
-class Hexagon_custom_i32_i32i32_Intrinsic<
- list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
- [llvm_i32_ty], [llvm_i32_ty,llvm_i32_ty],
- intr_properties>;
-
-// tag : A2_addh_h16_hh
class Hexagon_i32_i32i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
: Hexagon_Intrinsic<GCCIntSuffix,
@@ -37,13 +30,6 @@ class Hexagon_i32_i32i32_Intrinsic<string GCCIntSuffix,
intr_properties>;
// tag : A2_addp
-class Hexagon_custom_i64_i64i64_Intrinsic<
- list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
- [llvm_i64_ty], [llvm_i64_ty,llvm_i64_ty],
- intr_properties>;
-
-// tag : A2_addpsat
class Hexagon_i64_i64i64_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
: Hexagon_Intrinsic<GCCIntSuffix,
@@ -64,13 +50,6 @@ class Hexagon_i64_i32i32_Intrinsic<string GCCIntSuffix,
[llvm_i64_ty], [llvm_i32_ty,llvm_i32_ty],
intr_properties>;
-// tag : A2_neg
-class Hexagon_custom_i32_i32_Intrinsic<
- list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
- [llvm_i32_ty], [llvm_i32_ty],
- intr_properties>;
-
// tag : A2_roundsat
class Hexagon_i32_i64_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
@@ -288,20 +267,6 @@ class Hexagon_i64_i64i32i32_Intrinsic<string GCCIntSuffix,
[llvm_i64_ty], [llvm_i64_ty,llvm_i32_ty,llvm_i32_ty],
intr_properties>;
-// tag : M2_dpmpyss_s0
-class Hexagon_custom_i64_i32i32_Intrinsic<
- list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
- [llvm_i64_ty], [llvm_i32_ty,llvm_i32_ty],
- intr_properties>;
-
-// tag : S2_asl_i_p
-class Hexagon_custom_i64_i64i32_Intrinsic<
- list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
- [llvm_i64_ty], [llvm_i64_ty,llvm_i32_ty],
- intr_properties>;
-
// tag : S2_insert
class Hexagon_i32_i32i32i32i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
@@ -351,7 +316,7 @@ class Hexagon_v32i32_v64i32_Intrinsic<string GCCIntSuffix,
[llvm_v32i32_ty], [llvm_v64i32_ty],
intr_properties>;
-// tag : V6_lvsplatb
+// tag : V6_lvsplatw
class Hexagon_v16i32_i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
: Hexagon_Intrinsic<GCCIntSuffix,
@@ -366,44 +331,44 @@ class Hexagon_v32i32_i32_Intrinsic<string GCCIntSuffix,
intr_properties>;
// tag : V6_pred_and
-class Hexagon_custom_v64i1_v64i1v64i1_Intrinsic<
+class Hexagon_v64i1_v64i1v64i1_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v64i1_ty], [llvm_v64i1_ty,llvm_v64i1_ty],
intr_properties>;
// tag : V6_pred_and
-class Hexagon_custom_v128i1_v128i1v128i1_Intrinsic_128B<
+class Hexagon_v128i1_v128i1v128i1_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v128i1_ty], [llvm_v128i1_ty,llvm_v128i1_ty],
intr_properties>;
// tag : V6_pred_not
-class Hexagon_custom_v64i1_v64i1_Intrinsic<
+class Hexagon_v64i1_v64i1_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v64i1_ty], [llvm_v64i1_ty],
intr_properties>;
// tag : V6_pred_not
-class Hexagon_custom_v128i1_v128i1_Intrinsic_128B<
+class Hexagon_v128i1_v128i1_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v128i1_ty], [llvm_v128i1_ty],
intr_properties>;
// tag : V6_pred_scalar2
-class Hexagon_custom_v64i1_i32_Intrinsic<
+class Hexagon_v64i1_i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v64i1_ty], [llvm_i32_ty],
intr_properties>;
// tag : V6_pred_scalar2
-class Hexagon_custom_v128i1_i32_Intrinsic_128B<
+class Hexagon_v128i1_i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v128i1_ty], [llvm_i32_ty],
intr_properties>;
@@ -436,27 +401,27 @@ class Hexagon_v64i32_v64i32v64i32v64i32i32_Intrinsic<string GCCIntSuffix,
intr_properties>;
// tag : V6_vS32b_nqpred_ai
-class Hexagon_custom__v64i1ptrv16i32_Intrinsic<
+class Hexagon__v64i1ptrv16i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[], [llvm_v64i1_ty,llvm_ptr_ty,llvm_v16i32_ty],
intr_properties>;
// tag : V6_vS32b_nqpred_ai
-class Hexagon_custom__v128i1ptrv32i32_Intrinsic_128B<
+class Hexagon__v128i1ptrv32i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[], [llvm_v128i1_ty,llvm_ptr_ty,llvm_v32i32_ty],
intr_properties>;
-// tag : V6_vabsb
+// tag : V6_vabs_hf
class Hexagon_v16i32_v16i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
: Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v16i32_ty], [llvm_v16i32_ty],
intr_properties>;
-// tag : V6_vabsb
+// tag : V6_vabs_hf
class Hexagon_v32i32_v32i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
: Hexagon_Intrinsic<GCCIntSuffix,
@@ -477,6 +442,20 @@ class Hexagon_v32i32_v32i32v32i32_Intrinsic<string GCCIntSuffix,
[llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
intr_properties>;
+// tag : V6_vadd_sf_hf
+class Hexagon_v32i32_v16i32v16i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
+ intr_properties>;
+
+// tag : V6_vadd_sf_hf
+class Hexagon_v64i32_v32i32v32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
+ intr_properties>;
+
// tag : V6_vaddb_dv
class Hexagon_v64i32_v64i32v64i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
@@ -485,16 +464,16 @@ class Hexagon_v64i32_v64i32v64i32_Intrinsic<string GCCIntSuffix,
intr_properties>;
// tag : V6_vaddbnq
-class Hexagon_custom_v16i32_v64i1v16i32v16i32_Intrinsic<
+class Hexagon_v16i32_v64i1v16i32v16i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v16i32_ty], [llvm_v64i1_ty,llvm_v16i32_ty,llvm_v16i32_ty],
intr_properties>;
// tag : V6_vaddbnq
-class Hexagon_custom_v32i32_v128i1v32i32v32i32_Intrinsic_128B<
+class Hexagon_v32i32_v128i1v32i32v32i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v32i32_ty], [llvm_v128i1_ty,llvm_v32i32_ty,llvm_v32i32_ty],
intr_properties>;
@@ -513,31 +492,17 @@ class Hexagon_custom_v32i32v128i1_v32i32v32i32v128i1_Intrinsic_128B<
intr_properties>;
// tag : V6_vaddcarrysat
-class Hexagon_custom_v16i32_v16i32v16i32v64i1_Intrinsic<
+class Hexagon_v16i32_v16i32v16i32v64i1_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_v64i1_ty],
intr_properties>;
// tag : V6_vaddcarrysat
-class Hexagon_custom_v32i32_v32i32v32i32v128i1_Intrinsic_128B<
- list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_v128i1_ty],
- intr_properties>;
-
-// tag : V6_vaddhw
-class Hexagon_v32i32_v16i32v16i32_Intrinsic<string GCCIntSuffix,
- list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
- intr_properties>;
-
-// tag : V6_vaddhw
-class Hexagon_v64i32_v32i32v32i32_Intrinsic<string GCCIntSuffix,
+class Hexagon_v32i32_v32i32v32i32v128i1_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
: Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_v128i1_ty],
intr_properties>;
// tag : V6_vaddhw_acc
@@ -562,72 +527,72 @@ class Hexagon_v16i32_v16i32v16i32i32_Intrinsic<string GCCIntSuffix,
intr_properties>;
// tag : V6_vandnqrt
-class Hexagon_custom_v16i32_v64i1i32_Intrinsic<
+class Hexagon_v16i32_v64i1i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v16i32_ty], [llvm_v64i1_ty,llvm_i32_ty],
intr_properties>;
// tag : V6_vandnqrt
-class Hexagon_custom_v32i32_v128i1i32_Intrinsic_128B<
+class Hexagon_v32i32_v128i1i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v32i32_ty], [llvm_v128i1_ty,llvm_i32_ty],
intr_properties>;
// tag : V6_vandnqrt_acc
-class Hexagon_custom_v16i32_v16i32v64i1i32_Intrinsic<
+class Hexagon_v16i32_v16i32v64i1i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v64i1_ty,llvm_i32_ty],
intr_properties>;
// tag : V6_vandnqrt_acc
-class Hexagon_custom_v32i32_v32i32v128i1i32_Intrinsic_128B<
+class Hexagon_v32i32_v32i32v128i1i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v128i1_ty,llvm_i32_ty],
intr_properties>;
// tag : V6_vandvnqv
-class Hexagon_custom_v16i32_v64i1v16i32_Intrinsic<
+class Hexagon_v16i32_v64i1v16i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v16i32_ty], [llvm_v64i1_ty,llvm_v16i32_ty],
intr_properties>;
// tag : V6_vandvnqv
-class Hexagon_custom_v32i32_v128i1v32i32_Intrinsic_128B<
+class Hexagon_v32i32_v128i1v32i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v32i32_ty], [llvm_v128i1_ty,llvm_v32i32_ty],
intr_properties>;
// tag : V6_vandvrt
-class Hexagon_custom_v64i1_v16i32i32_Intrinsic<
+class Hexagon_v64i1_v16i32i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v64i1_ty], [llvm_v16i32_ty,llvm_i32_ty],
intr_properties>;
// tag : V6_vandvrt
-class Hexagon_custom_v128i1_v32i32i32_Intrinsic_128B<
+class Hexagon_v128i1_v32i32i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v128i1_ty], [llvm_v32i32_ty,llvm_i32_ty],
intr_properties>;
// tag : V6_vandvrt_acc
-class Hexagon_custom_v64i1_v64i1v16i32i32_Intrinsic<
+class Hexagon_v64i1_v64i1v16i32i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v64i1_ty], [llvm_v64i1_ty,llvm_v16i32_ty,llvm_i32_ty],
intr_properties>;
// tag : V6_vandvrt_acc
-class Hexagon_custom_v128i1_v128i1v32i32i32_Intrinsic_128B<
+class Hexagon_v128i1_v128i1v32i32i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v128i1_ty], [llvm_v128i1_ty,llvm_v32i32_ty,llvm_i32_ty],
intr_properties>;
@@ -645,6 +610,20 @@ class Hexagon_v32i32_v32i32i32_Intrinsic<string GCCIntSuffix,
[llvm_v32i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
intr_properties>;
+// tag : V6_vasrvuhubrndsat
+class Hexagon_v16i32_v32i32v16i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty],
+ intr_properties>;
+
+// tag : V6_vasrvuhubrndsat
+class Hexagon_v32i32_v64i32v32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty],
+ intr_properties>;
+
// tag : V6_vassignp
class Hexagon_v64i32_v64i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
@@ -652,6 +631,20 @@ class Hexagon_v64i32_v64i32_Intrinsic<string GCCIntSuffix,
[llvm_v64i32_ty], [llvm_v64i32_ty],
intr_properties>;
+// tag : V6_vcvt_hf_b
+class Hexagon_v32i32_v16i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v16i32_ty],
+ intr_properties>;
+
+// tag : V6_vcvt_hf_b
+class Hexagon_v64i32_v32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v32i32_ty],
+ intr_properties>;
+
// tag : V6_vd0
class Hexagon_v16i32__Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
@@ -687,6 +680,20 @@ class Hexagon_v64i32_v32i32v32i32i32_Intrinsic<string GCCIntSuffix,
[llvm_v64i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
intr_properties>;
+// tag : V6_vdmpy_sf_hf_acc
+class Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_v16i32_ty],
+ intr_properties>;
+
+// tag : V6_vdmpy_sf_hf_acc
+class Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<string GCCIntSuffix,
+ list<IntrinsicProperty> intr_properties = [IntrNoMem]>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_v32i32_ty],
+ intr_properties>;
+
// tag : V6_vdmpybus_dv
class Hexagon_v64i32_v64i32i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
@@ -722,45 +729,31 @@ class Hexagon_v32i32_v32i32v64i32i32_Intrinsic<string GCCIntSuffix,
[llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v64i32_ty,llvm_i32_ty],
intr_properties>;
-// tag : V6_vdmpyhvsat_acc
-class Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<string GCCIntSuffix,
- list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_v16i32_ty],
- intr_properties>;
-
-// tag : V6_vdmpyhvsat_acc
-class Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<string GCCIntSuffix,
- list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_v32i32_ty],
- intr_properties>;
-
// tag : V6_veqb
-class Hexagon_custom_v64i1_v16i32v16i32_Intrinsic<
+class Hexagon_v64i1_v16i32v16i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v64i1_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
intr_properties>;
// tag : V6_veqb
-class Hexagon_custom_v128i1_v32i32v32i32_Intrinsic_128B<
+class Hexagon_v128i1_v32i32v32i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v128i1_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
intr_properties>;
// tag : V6_veqb_and
-class Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic<
+class Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v64i1_ty], [llvm_v64i1_ty,llvm_v16i32_ty,llvm_v16i32_ty],
intr_properties>;
// tag : V6_veqb_and
-class Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B<
+class Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v128i1_ty], [llvm_v128i1_ty,llvm_v32i32_ty,llvm_v32i32_ty],
intr_properties>;
@@ -779,16 +772,16 @@ class Hexagon__ptri32i32v32i32_Intrinsic<string GCCIntSuffix,
intr_properties>;
// tag : V6_vgathermhq
-class Hexagon_custom__ptrv64i1i32i32v16i32_Intrinsic<
+class Hexagon__ptrv64i1i32i32v16i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[], [llvm_ptr_ty,llvm_v64i1_ty,llvm_i32_ty,llvm_i32_ty,llvm_v16i32_ty],
intr_properties>;
// tag : V6_vgathermhq
-class Hexagon_custom__ptrv128i1i32i32v32i32_Intrinsic_128B<
+class Hexagon__ptrv128i1i32i32v32i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[], [llvm_ptr_ty,llvm_v128i1_ty,llvm_i32_ty,llvm_i32_ty,llvm_v32i32_ty],
intr_properties>;
@@ -800,16 +793,16 @@ class Hexagon__ptri32i32v64i32_Intrinsic<string GCCIntSuffix,
intr_properties>;
// tag : V6_vgathermhwq
-class Hexagon_custom__ptrv64i1i32i32v32i32_Intrinsic<
+class Hexagon__ptrv64i1i32i32v32i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[], [llvm_ptr_ty,llvm_v64i1_ty,llvm_i32_ty,llvm_i32_ty,llvm_v32i32_ty],
intr_properties>;
// tag : V6_vgathermhwq
-class Hexagon_custom__ptrv128i1i32i32v64i32_Intrinsic_128B<
+class Hexagon__ptrv128i1i32i32v64i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[], [llvm_ptr_ty,llvm_v128i1_ty,llvm_i32_ty,llvm_i32_ty,llvm_v64i32_ty],
intr_properties>;
@@ -891,16 +884,16 @@ class Hexagon_v64i32_v64i32v32i32i32_Intrinsic<string GCCIntSuffix,
intr_properties>;
// tag : V6_vprefixqb
-class Hexagon_custom_v16i32_v64i1_Intrinsic<
+class Hexagon_v16i32_v64i1_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v16i32_ty], [llvm_v64i1_ty],
intr_properties>;
// tag : V6_vprefixqb
-class Hexagon_custom_v32i32_v128i1_Intrinsic_128B<
+class Hexagon_v32i32_v128i1_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v32i32_ty], [llvm_v128i1_ty],
intr_properties>;
@@ -932,20 +925,6 @@ class Hexagon_v64i32_v64i32v64i32i32i32_Intrinsic<string GCCIntSuffix,
[llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty,llvm_i32_ty,llvm_i32_ty],
intr_properties>;
-// tag : V6_vsb
-class Hexagon_v32i32_v16i32_Intrinsic<string GCCIntSuffix,
- list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v16i32_ty],
- intr_properties>;
-
-// tag : V6_vsb
-class Hexagon_v64i32_v32i32_Intrinsic<string GCCIntSuffix,
- list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v32i32_ty],
- intr_properties>;
-
// tag : V6_vscattermh
class Hexagon__i32i32v16i32v16i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
@@ -961,16 +940,16 @@ class Hexagon__i32i32v32i32v32i32_Intrinsic<string GCCIntSuffix,
intr_properties>;
// tag : V6_vscattermhq
-class Hexagon_custom__v64i1i32i32v16i32v16i32_Intrinsic<
+class Hexagon__v64i1i32i32v16i32v16i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[], [llvm_v64i1_ty,llvm_i32_ty,llvm_i32_ty,llvm_v16i32_ty,llvm_v16i32_ty],
intr_properties>;
// tag : V6_vscattermhq
-class Hexagon_custom__v128i1i32i32v32i32v32i32_Intrinsic_128B<
+class Hexagon__v128i1i32i32v32i32v32i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[], [llvm_v128i1_ty,llvm_i32_ty,llvm_i32_ty,llvm_v32i32_ty,llvm_v32i32_ty],
intr_properties>;
@@ -989,30 +968,30 @@ class Hexagon__i32i32v64i32v32i32_Intrinsic<string GCCIntSuffix,
intr_properties>;
// tag : V6_vscattermhwq
-class Hexagon_custom__v64i1i32i32v32i32v16i32_Intrinsic<
+class Hexagon__v64i1i32i32v32i32v16i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[], [llvm_v64i1_ty,llvm_i32_ty,llvm_i32_ty,llvm_v32i32_ty,llvm_v16i32_ty],
intr_properties>;
// tag : V6_vscattermhwq
-class Hexagon_custom__v128i1i32i32v64i32v32i32_Intrinsic_128B<
+class Hexagon__v128i1i32i32v64i32v32i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[], [llvm_v128i1_ty,llvm_i32_ty,llvm_i32_ty,llvm_v64i32_ty,llvm_v32i32_ty],
intr_properties>;
// tag : V6_vswap
-class Hexagon_custom_v32i32_v64i1v16i32v16i32_Intrinsic<
+class Hexagon_v32i32_v64i1v16i32v16i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v32i32_ty], [llvm_v64i1_ty,llvm_v16i32_ty,llvm_v16i32_ty],
intr_properties>;
// tag : V6_vswap
-class Hexagon_custom_v64i32_v128i1v32i32v32i32_Intrinsic_128B<
+class Hexagon_v64i32_v128i1v32i32v32i32_Intrinsic<string GCCIntSuffix,
list<IntrinsicProperty> intr_properties = [IntrNoMem]>
- : Hexagon_NonGCC_Intrinsic<
+ : Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v64i32_ty], [llvm_v128i1_ty,llvm_v32i32_ty,llvm_v32i32_ty],
intr_properties>;
@@ -1077,7 +1056,7 @@ def int_hexagon_A2_abssat :
Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_abssat">;
def int_hexagon_A2_add :
-Hexagon_custom_i32_i32i32_Intrinsic;
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_add">;
def int_hexagon_A2_addh_h16_hh :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_hh">;
@@ -1116,10 +1095,10 @@ def int_hexagon_A2_addh_l16_sat_ll :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_l16_sat_ll">;
def int_hexagon_A2_addi :
-Hexagon_custom_i32_i32i32_Intrinsic<[IntrNoMem, ImmArg<ArgIndex<1>>]>;
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addi", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_hexagon_A2_addp :
-Hexagon_custom_i64_i64i64_Intrinsic;
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_addp">;
def int_hexagon_A2_addpsat :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_addpsat">;
@@ -1131,10 +1110,10 @@ def int_hexagon_A2_addsp :
Hexagon_i64_i32i64_Intrinsic<"HEXAGON_A2_addsp">;
def int_hexagon_A2_and :
-Hexagon_custom_i32_i32i32_Intrinsic;
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_and">;
def int_hexagon_A2_andir :
-Hexagon_custom_i32_i32i32_Intrinsic<[IntrNoMem, ImmArg<ArgIndex<1>>]>;
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_andir", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_hexagon_A2_andp :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_andp">;
@@ -1188,7 +1167,7 @@ def int_hexagon_A2_minup :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_minup">;
def int_hexagon_A2_neg :
-Hexagon_custom_i32_i32_Intrinsic;
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_neg">;
def int_hexagon_A2_negp :
Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_negp">;
@@ -1197,16 +1176,16 @@ def int_hexagon_A2_negsat :
Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_negsat">;
def int_hexagon_A2_not :
-Hexagon_custom_i32_i32_Intrinsic;
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_not">;
def int_hexagon_A2_notp :
Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_notp">;
def int_hexagon_A2_or :
-Hexagon_custom_i32_i32i32_Intrinsic;
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_or">;
def int_hexagon_A2_orir :
-Hexagon_custom_i32_i32i32_Intrinsic<[IntrNoMem, ImmArg<ArgIndex<1>>]>;
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_orir", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_hexagon_A2_orp :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_orp">;
@@ -1230,7 +1209,7 @@ def int_hexagon_A2_satuh :
Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_satuh">;
def int_hexagon_A2_sub :
-Hexagon_custom_i32_i32i32_Intrinsic;
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_sub">;
def int_hexagon_A2_subh_h16_hh :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_hh">;
@@ -1269,10 +1248,10 @@ def int_hexagon_A2_subh_l16_sat_ll :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_l16_sat_ll">;
def int_hexagon_A2_subp :
-Hexagon_custom_i64_i64i64_Intrinsic;
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_subp">;
def int_hexagon_A2_subri :
-Hexagon_custom_i32_i32i32_Intrinsic<[IntrNoMem, ImmArg<ArgIndex<0>>]>;
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subri", [IntrNoMem, ImmArg<ArgIndex<0>>]>;
def int_hexagon_A2_subsat :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subsat">;
@@ -1308,10 +1287,10 @@ def int_hexagon_A2_swiz :
Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_swiz">;
def int_hexagon_A2_sxtb :
-Hexagon_custom_i32_i32_Intrinsic;
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_sxtb">;
def int_hexagon_A2_sxth :
-Hexagon_custom_i32_i32_Intrinsic;
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_sxth">;
def int_hexagon_A2_sxtw :
Hexagon_i64_i32_Intrinsic<"HEXAGON_A2_sxtw">;
@@ -1524,16 +1503,16 @@ def int_hexagon_A2_vsubws :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsubws">;
def int_hexagon_A2_xor :
-Hexagon_custom_i32_i32i32_Intrinsic;
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_xor">;
def int_hexagon_A2_xorp :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_xorp">;
def int_hexagon_A2_zxtb :
-Hexagon_custom_i32_i32_Intrinsic;
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_zxtb">;
def int_hexagon_A2_zxth :
-Hexagon_custom_i32_i32_Intrinsic;
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_zxth">;
def int_hexagon_A4_andn :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_andn">;
@@ -2088,7 +2067,7 @@ def int_hexagon_M2_dpmpyss_rnd_s0 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_dpmpyss_rnd_s0">;
def int_hexagon_M2_dpmpyss_s0 :
-Hexagon_custom_i64_i32i32_Intrinsic;
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_dpmpyss_s0">;
def int_hexagon_M2_dpmpyuu_acc_s0 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_dpmpyuu_acc_s0">;
@@ -2097,7 +2076,7 @@ def int_hexagon_M2_dpmpyuu_nac_s0 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_dpmpyuu_nac_s0">;
def int_hexagon_M2_dpmpyuu_s0 :
-Hexagon_custom_i64_i32i32_Intrinsic;
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_dpmpyuu_s0">;
def int_hexagon_M2_hmmpyh_rs1 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_hmmpyh_rs1">;
@@ -2514,10 +2493,10 @@ def int_hexagon_M2_mpyd_rnd_ll_s1 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_ll_s1">;
def int_hexagon_M2_mpyi :
-Hexagon_custom_i32_i32i32_Intrinsic;
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyi">;
def int_hexagon_M2_mpysmi :
-Hexagon_custom_i32_i32i32_Intrinsic<[IntrNoMem, ImmArg<ArgIndex<1>>]>;
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpysmi", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_hexagon_M2_mpysu_up :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpysu_up">;
@@ -2670,7 +2649,7 @@ def int_hexagon_M2_mpyud_nac_ll_s1 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_ll_s1">;
def int_hexagon_M2_mpyui :
-Hexagon_custom_i32_i32i32_Intrinsic;
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyui">;
def int_hexagon_M2_nacci :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_nacci">;
@@ -2958,7 +2937,7 @@ def int_hexagon_S2_addasl_rrri :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_addasl_rrri", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_hexagon_S2_asl_i_p :
-Hexagon_custom_i64_i64i32_Intrinsic<[IntrNoMem, ImmArg<ArgIndex<1>>]>;
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asl_i_p", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_hexagon_S2_asl_i_p_acc :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_i_p_acc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
@@ -2976,7 +2955,7 @@ def int_hexagon_S2_asl_i_p_xacc :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_i_p_xacc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_hexagon_S2_asl_i_r :
-Hexagon_custom_i32_i32i32_Intrinsic<[IntrNoMem, ImmArg<ArgIndex<1>>]>;
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asl_i_r", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_hexagon_S2_asl_i_r_acc :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_i_r_acc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
@@ -3045,7 +3024,7 @@ def int_hexagon_S2_asl_r_vw :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asl_r_vw">;
def int_hexagon_S2_asr_i_p :
-Hexagon_custom_i64_i64i32_Intrinsic<[IntrNoMem, ImmArg<ArgIndex<1>>]>;
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_i_p", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_hexagon_S2_asr_i_p_acc :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_i_p_acc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
@@ -3066,7 +3045,7 @@ def int_hexagon_S2_asr_i_p_rnd_goodsyntax :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_i_p_rnd_goodsyntax", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_hexagon_S2_asr_i_r :
-Hexagon_custom_i32_i32i32_Intrinsic<[IntrNoMem, ImmArg<ArgIndex<1>>]>;
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asr_i_r", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_hexagon_S2_asr_i_r_acc :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_i_r_acc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
@@ -3258,7 +3237,7 @@ def int_hexagon_S2_lsl_r_vw :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsl_r_vw">;
def int_hexagon_S2_lsr_i_p :
-Hexagon_custom_i64_i64i32_Intrinsic<[IntrNoMem, ImmArg<ArgIndex<1>>]>;
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsr_i_p", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_hexagon_S2_lsr_i_p_acc :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_i_p_acc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
@@ -3276,7 +3255,7 @@ def int_hexagon_S2_lsr_i_p_xacc :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_i_p_xacc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_hexagon_S2_lsr_i_r :
-Hexagon_custom_i32_i32i32_Intrinsic<[IntrNoMem, ImmArg<ArgIndex<1>>]>;
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_lsr_i_r", [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_hexagon_S2_lsr_i_r_acc :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_i_r_acc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;
@@ -3809,70 +3788,70 @@ def int_hexagon_V6_lvsplatw_128B :
Hexagon_v32i32_i32_Intrinsic<"HEXAGON_V6_lvsplatw_128B">;
def int_hexagon_V6_pred_and :
-Hexagon_custom_v64i1_v64i1v64i1_Intrinsic;
+Hexagon_v64i1_v64i1v64i1_Intrinsic<"HEXAGON_V6_pred_and">;
def int_hexagon_V6_pred_and_128B :
-Hexagon_custom_v128i1_v128i1v128i1_Intrinsic_128B;
+Hexagon_v128i1_v128i1v128i1_Intrinsic<"HEXAGON_V6_pred_and_128B">;
def int_hexagon_V6_pred_and_n :
-Hexagon_custom_v64i1_v64i1v64i1_Intrinsic;
+Hexagon_v64i1_v64i1v64i1_Intrinsic<"HEXAGON_V6_pred_and_n">;
def int_hexagon_V6_pred_and_n_128B :
-Hexagon_custom_v128i1_v128i1v128i1_Intrinsic_128B;
+Hexagon_v128i1_v128i1v128i1_Intrinsic<"HEXAGON_V6_pred_and_n_128B">;
def int_hexagon_V6_pred_not :
-Hexagon_custom_v64i1_v64i1_Intrinsic;
+Hexagon_v64i1_v64i1_Intrinsic<"HEXAGON_V6_pred_not">;
def int_hexagon_V6_pred_not_128B :
-Hexagon_custom_v128i1_v128i1_Intrinsic_128B;
+Hexagon_v128i1_v128i1_Intrinsic<"HEXAGON_V6_pred_not_128B">;
def int_hexagon_V6_pred_or :
-Hexagon_custom_v64i1_v64i1v64i1_Intrinsic;
+Hexagon_v64i1_v64i1v64i1_Intrinsic<"HEXAGON_V6_pred_or">;
def int_hexagon_V6_pred_or_128B :
-Hexagon_custom_v128i1_v128i1v128i1_Intrinsic_128B;
+Hexagon_v128i1_v128i1v128i1_Intrinsic<"HEXAGON_V6_pred_or_128B">;
def int_hexagon_V6_pred_or_n :
-Hexagon_custom_v64i1_v64i1v64i1_Intrinsic;
+Hexagon_v64i1_v64i1v64i1_Intrinsic<"HEXAGON_V6_pred_or_n">;
def int_hexagon_V6_pred_or_n_128B :
-Hexagon_custom_v128i1_v128i1v128i1_Intrinsic_128B;
+Hexagon_v128i1_v128i1v128i1_Intrinsic<"HEXAGON_V6_pred_or_n_128B">;
def int_hexagon_V6_pred_scalar2 :
-Hexagon_custom_v64i1_i32_Intrinsic;
+Hexagon_v64i1_i32_Intrinsic<"HEXAGON_V6_pred_scalar2">;
def int_hexagon_V6_pred_scalar2_128B :
-Hexagon_custom_v128i1_i32_Intrinsic_128B;
+Hexagon_v128i1_i32_Intrinsic<"HEXAGON_V6_pred_scalar2_128B">;
def int_hexagon_V6_pred_xor :
-Hexagon_custom_v64i1_v64i1v64i1_Intrinsic;
+Hexagon_v64i1_v64i1v64i1_Intrinsic<"HEXAGON_V6_pred_xor">;
def int_hexagon_V6_pred_xor_128B :
-Hexagon_custom_v128i1_v128i1v128i1_Intrinsic_128B;
+Hexagon_v128i1_v128i1v128i1_Intrinsic<"HEXAGON_V6_pred_xor_128B">;
def int_hexagon_V6_vS32b_nqpred_ai :
-Hexagon_custom__v64i1ptrv16i32_Intrinsic<[IntrWriteMem]>;
+Hexagon__v64i1ptrv16i32_Intrinsic<"HEXAGON_V6_vS32b_nqpred_ai", [IntrWriteMem]>;
def int_hexagon_V6_vS32b_nqpred_ai_128B :
-Hexagon_custom__v128i1ptrv32i32_Intrinsic_128B<[IntrWriteMem]>;
+Hexagon__v128i1ptrv32i32_Intrinsic<"HEXAGON_V6_vS32b_nqpred_ai_128B", [IntrWriteMem]>;
def int_hexagon_V6_vS32b_nt_nqpred_ai :
-Hexagon_custom__v64i1ptrv16i32_Intrinsic<[IntrWriteMem]>;
+Hexagon__v64i1ptrv16i32_Intrinsic<"HEXAGON_V6_vS32b_nt_nqpred_ai", [IntrWriteMem]>;
def int_hexagon_V6_vS32b_nt_nqpred_ai_128B :
-Hexagon_custom__v128i1ptrv32i32_Intrinsic_128B<[IntrWriteMem]>;
+Hexagon__v128i1ptrv32i32_Intrinsic<"HEXAGON_V6_vS32b_nt_nqpred_ai_128B", [IntrWriteMem]>;
def int_hexagon_V6_vS32b_nt_qpred_ai :
-Hexagon_custom__v64i1ptrv16i32_Intrinsic<[IntrWriteMem]>;
+Hexagon__v64i1ptrv16i32_Intrinsic<"HEXAGON_V6_vS32b_nt_qpred_ai", [IntrWriteMem]>;
def int_hexagon_V6_vS32b_nt_qpred_ai_128B :
-Hexagon_custom__v128i1ptrv32i32_Intrinsic_128B<[IntrWriteMem]>;
+Hexagon__v128i1ptrv32i32_Intrinsic<"HEXAGON_V6_vS32b_nt_qpred_ai_128B", [IntrWriteMem]>;
def int_hexagon_V6_vS32b_qpred_ai :
-Hexagon_custom__v64i1ptrv16i32_Intrinsic<[IntrWriteMem]>;
+Hexagon__v64i1ptrv16i32_Intrinsic<"HEXAGON_V6_vS32b_qpred_ai", [IntrWriteMem]>;
def int_hexagon_V6_vS32b_qpred_ai_128B :
-Hexagon_custom__v128i1ptrv32i32_Intrinsic_128B<[IntrWriteMem]>;
+Hexagon__v128i1ptrv32i32_Intrinsic<"HEXAGON_V6_vS32b_qpred_ai_128B", [IntrWriteMem]>;
def int_hexagon_V6_vabsdiffh :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vabsdiffh">;
@@ -3935,16 +3914,16 @@ def int_hexagon_V6_vaddb_dv_128B :
Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddb_dv_128B">;
def int_hexagon_V6_vaddbnq :
-Hexagon_custom_v16i32_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v16i32_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddbnq">;
def int_hexagon_V6_vaddbnq_128B :
-Hexagon_custom_v32i32_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v32i32_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddbnq_128B">;
def int_hexagon_V6_vaddbq :
-Hexagon_custom_v16i32_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v16i32_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddbq">;
def int_hexagon_V6_vaddbq_128B :
-Hexagon_custom_v32i32_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v32i32_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddbq_128B">;
def int_hexagon_V6_vaddh :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddh">;
@@ -3959,16 +3938,16 @@ def int_hexagon_V6_vaddh_dv_128B :
Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddh_dv_128B">;
def int_hexagon_V6_vaddhnq :
-Hexagon_custom_v16i32_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v16i32_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddhnq">;
def int_hexagon_V6_vaddhnq_128B :
-Hexagon_custom_v32i32_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v32i32_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddhnq_128B">;
def int_hexagon_V6_vaddhq :
-Hexagon_custom_v16i32_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v16i32_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddhq">;
def int_hexagon_V6_vaddhq_128B :
-Hexagon_custom_v32i32_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v32i32_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddhq_128B">;
def int_hexagon_V6_vaddhsat :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddhsat">;
@@ -4037,16 +4016,16 @@ def int_hexagon_V6_vaddw_dv_128B :
Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddw_dv_128B">;
def int_hexagon_V6_vaddwnq :
-Hexagon_custom_v16i32_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v16i32_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddwnq">;
def int_hexagon_V6_vaddwnq_128B :
-Hexagon_custom_v32i32_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v32i32_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddwnq_128B">;
def int_hexagon_V6_vaddwq :
-Hexagon_custom_v16i32_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v16i32_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddwq">;
def int_hexagon_V6_vaddwq_128B :
-Hexagon_custom_v32i32_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v32i32_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddwq_128B">;
def int_hexagon_V6_vaddwsat :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddwsat">;
@@ -4079,28 +4058,28 @@ def int_hexagon_V6_vand_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vand_128B">;
def int_hexagon_V6_vandqrt :
-Hexagon_custom_v16i32_v64i1i32_Intrinsic;
+Hexagon_v16i32_v64i1i32_Intrinsic<"HEXAGON_V6_vandqrt">;
def int_hexagon_V6_vandqrt_128B :
-Hexagon_custom_v32i32_v128i1i32_Intrinsic_128B;
+Hexagon_v32i32_v128i1i32_Intrinsic<"HEXAGON_V6_vandqrt_128B">;
def int_hexagon_V6_vandqrt_acc :
-Hexagon_custom_v16i32_v16i32v64i1i32_Intrinsic;
+Hexagon_v16i32_v16i32v64i1i32_Intrinsic<"HEXAGON_V6_vandqrt_acc">;
def int_hexagon_V6_vandqrt_acc_128B :
-Hexagon_custom_v32i32_v32i32v128i1i32_Intrinsic_128B;
+Hexagon_v32i32_v32i32v128i1i32_Intrinsic<"HEXAGON_V6_vandqrt_acc_128B">;
def int_hexagon_V6_vandvrt :
-Hexagon_custom_v64i1_v16i32i32_Intrinsic;
+Hexagon_v64i1_v16i32i32_Intrinsic<"HEXAGON_V6_vandvrt">;
def int_hexagon_V6_vandvrt_128B :
-Hexagon_custom_v128i1_v32i32i32_Intrinsic_128B;
+Hexagon_v128i1_v32i32i32_Intrinsic<"HEXAGON_V6_vandvrt_128B">;
def int_hexagon_V6_vandvrt_acc :
-Hexagon_custom_v64i1_v64i1v16i32i32_Intrinsic;
+Hexagon_v64i1_v64i1v16i32i32_Intrinsic<"HEXAGON_V6_vandvrt_acc">;
def int_hexagon_V6_vandvrt_acc_128B :
-Hexagon_custom_v128i1_v128i1v32i32i32_Intrinsic_128B;
+Hexagon_v128i1_v128i1v32i32i32_Intrinsic<"HEXAGON_V6_vandvrt_acc_128B">;
def int_hexagon_V6_vaslh :
Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vaslh">;
@@ -4439,220 +4418,220 @@ def int_hexagon_V6_vdsaduh_acc_128B :
Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vdsaduh_acc_128B">;
def int_hexagon_V6_veqb :
-Hexagon_custom_v64i1_v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_veqb">;
def int_hexagon_V6_veqb_128B :
-Hexagon_custom_v128i1_v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_veqb_128B">;
def int_hexagon_V6_veqb_and :
-Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqb_and">;
def int_hexagon_V6_veqb_and_128B :
-Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqb_and_128B">;
def int_hexagon_V6_veqb_or :
-Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqb_or">;
def int_hexagon_V6_veqb_or_128B :
-Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqb_or_128B">;
def int_hexagon_V6_veqb_xor :
-Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqb_xor">;
def int_hexagon_V6_veqb_xor_128B :
-Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqb_xor_128B">;
def int_hexagon_V6_veqh :
-Hexagon_custom_v64i1_v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_veqh">;
def int_hexagon_V6_veqh_128B :
-Hexagon_custom_v128i1_v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_veqh_128B">;
def int_hexagon_V6_veqh_and :
-Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqh_and">;
def int_hexagon_V6_veqh_and_128B :
-Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqh_and_128B">;
def int_hexagon_V6_veqh_or :
-Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqh_or">;
def int_hexagon_V6_veqh_or_128B :
-Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqh_or_128B">;
def int_hexagon_V6_veqh_xor :
-Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqh_xor">;
def int_hexagon_V6_veqh_xor_128B :
-Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqh_xor_128B">;
def int_hexagon_V6_veqw :
-Hexagon_custom_v64i1_v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_veqw">;
def int_hexagon_V6_veqw_128B :
-Hexagon_custom_v128i1_v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_veqw_128B">;
def int_hexagon_V6_veqw_and :
-Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqw_and">;
def int_hexagon_V6_veqw_and_128B :
-Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqw_and_128B">;
def int_hexagon_V6_veqw_or :
-Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqw_or">;
def int_hexagon_V6_veqw_or_128B :
-Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqw_or_128B">;
def int_hexagon_V6_veqw_xor :
-Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqw_xor">;
def int_hexagon_V6_veqw_xor_128B :
-Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqw_xor_128B">;
def int_hexagon_V6_vgtb :
-Hexagon_custom_v64i1_v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtb">;
def int_hexagon_V6_vgtb_128B :
-Hexagon_custom_v128i1_v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtb_128B">;
def int_hexagon_V6_vgtb_and :
-Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtb_and">;
def int_hexagon_V6_vgtb_and_128B :
-Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtb_and_128B">;
def int_hexagon_V6_vgtb_or :
-Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtb_or">;
def int_hexagon_V6_vgtb_or_128B :
-Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtb_or_128B">;
def int_hexagon_V6_vgtb_xor :
-Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtb_xor">;
def int_hexagon_V6_vgtb_xor_128B :
-Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtb_xor_128B">;
def int_hexagon_V6_vgth :
-Hexagon_custom_v64i1_v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_vgth">;
def int_hexagon_V6_vgth_128B :
-Hexagon_custom_v128i1_v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_vgth_128B">;
def int_hexagon_V6_vgth_and :
-Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgth_and">;
def int_hexagon_V6_vgth_and_128B :
-Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgth_and_128B">;
def int_hexagon_V6_vgth_or :
-Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgth_or">;
def int_hexagon_V6_vgth_or_128B :
-Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgth_or_128B">;
def int_hexagon_V6_vgth_xor :
-Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgth_xor">;
def int_hexagon_V6_vgth_xor_128B :
-Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgth_xor_128B">;
def int_hexagon_V6_vgtub :
-Hexagon_custom_v64i1_v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtub">;
def int_hexagon_V6_vgtub_128B :
-Hexagon_custom_v128i1_v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtub_128B">;
def int_hexagon_V6_vgtub_and :
-Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtub_and">;
def int_hexagon_V6_vgtub_and_128B :
-Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtub_and_128B">;
def int_hexagon_V6_vgtub_or :
-Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtub_or">;
def int_hexagon_V6_vgtub_or_128B :
-Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtub_or_128B">;
def int_hexagon_V6_vgtub_xor :
-Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtub_xor">;
def int_hexagon_V6_vgtub_xor_128B :
-Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtub_xor_128B">;
def int_hexagon_V6_vgtuh :
-Hexagon_custom_v64i1_v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuh">;
def int_hexagon_V6_vgtuh_128B :
-Hexagon_custom_v128i1_v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuh_128B">;
def int_hexagon_V6_vgtuh_and :
-Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuh_and">;
def int_hexagon_V6_vgtuh_and_128B :
-Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuh_and_128B">;
def int_hexagon_V6_vgtuh_or :
-Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuh_or">;
def int_hexagon_V6_vgtuh_or_128B :
-Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuh_or_128B">;
def int_hexagon_V6_vgtuh_xor :
-Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuh_xor">;
def int_hexagon_V6_vgtuh_xor_128B :
-Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuh_xor_128B">;
def int_hexagon_V6_vgtuw :
-Hexagon_custom_v64i1_v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuw">;
def int_hexagon_V6_vgtuw_128B :
-Hexagon_custom_v128i1_v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuw_128B">;
def int_hexagon_V6_vgtuw_and :
-Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuw_and">;
def int_hexagon_V6_vgtuw_and_128B :
-Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuw_and_128B">;
def int_hexagon_V6_vgtuw_or :
-Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuw_or">;
def int_hexagon_V6_vgtuw_or_128B :
-Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuw_or_128B">;
def int_hexagon_V6_vgtuw_xor :
-Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuw_xor">;
def int_hexagon_V6_vgtuw_xor_128B :
-Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuw_xor_128B">;
def int_hexagon_V6_vgtw :
-Hexagon_custom_v64i1_v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtw">;
def int_hexagon_V6_vgtw_128B :
-Hexagon_custom_v128i1_v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtw_128B">;
def int_hexagon_V6_vgtw_and :
-Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtw_and">;
def int_hexagon_V6_vgtw_and_128B :
-Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtw_and_128B">;
def int_hexagon_V6_vgtw_or :
-Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtw_or">;
def int_hexagon_V6_vgtw_or_128B :
-Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtw_or_128B">;
def int_hexagon_V6_vgtw_xor :
-Hexagon_custom_v64i1_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtw_xor">;
def int_hexagon_V6_vgtw_xor_128B :
-Hexagon_custom_v128i1_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtw_xor_128B">;
def int_hexagon_V6_vinsertwr :
Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vinsertwr">;
@@ -5051,10 +5030,10 @@ def int_hexagon_V6_vmpyuhv_acc_128B :
Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyuhv_acc_128B">;
def int_hexagon_V6_vmux :
-Hexagon_custom_v16i32_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v16i32_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vmux">;
def int_hexagon_V6_vmux_128B :
-Hexagon_custom_v32i32_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v32i32_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vmux_128B">;
def int_hexagon_V6_vnavgh :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vnavgh">;
@@ -5375,16 +5354,16 @@ def int_hexagon_V6_vsubb_dv_128B :
Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubb_dv_128B">;
def int_hexagon_V6_vsubbnq :
-Hexagon_custom_v16i32_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v16i32_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubbnq">;
def int_hexagon_V6_vsubbnq_128B :
-Hexagon_custom_v32i32_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v32i32_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubbnq_128B">;
def int_hexagon_V6_vsubbq :
-Hexagon_custom_v16i32_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v16i32_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubbq">;
def int_hexagon_V6_vsubbq_128B :
-Hexagon_custom_v32i32_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v32i32_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubbq_128B">;
def int_hexagon_V6_vsubh :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubh">;
@@ -5399,16 +5378,16 @@ def int_hexagon_V6_vsubh_dv_128B :
Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubh_dv_128B">;
def int_hexagon_V6_vsubhnq :
-Hexagon_custom_v16i32_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v16i32_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubhnq">;
def int_hexagon_V6_vsubhnq_128B :
-Hexagon_custom_v32i32_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v32i32_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubhnq_128B">;
def int_hexagon_V6_vsubhq :
-Hexagon_custom_v16i32_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v16i32_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubhq">;
def int_hexagon_V6_vsubhq_128B :
-Hexagon_custom_v32i32_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v32i32_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubhq_128B">;
def int_hexagon_V6_vsubhsat :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubhsat">;
@@ -5477,16 +5456,16 @@ def int_hexagon_V6_vsubw_dv_128B :
Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubw_dv_128B">;
def int_hexagon_V6_vsubwnq :
-Hexagon_custom_v16i32_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v16i32_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubwnq">;
def int_hexagon_V6_vsubwnq_128B :
-Hexagon_custom_v32i32_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v32i32_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubwnq_128B">;
def int_hexagon_V6_vsubwq :
-Hexagon_custom_v16i32_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v16i32_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubwq">;
def int_hexagon_V6_vsubwq_128B :
-Hexagon_custom_v32i32_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v32i32_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubwq_128B">;
def int_hexagon_V6_vsubwsat :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubwsat">;
@@ -5501,10 +5480,10 @@ def int_hexagon_V6_vsubwsat_dv_128B :
Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubwsat_dv_128B">;
def int_hexagon_V6_vswap :
-Hexagon_custom_v32i32_v64i1v16i32v16i32_Intrinsic;
+Hexagon_v32i32_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vswap">;
def int_hexagon_V6_vswap_128B :
-Hexagon_custom_v64i32_v128i1v32i32v32i32_Intrinsic_128B;
+Hexagon_v64i32_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vswap_128B">;
def int_hexagon_V6_vtmpyb :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vtmpyb">;
@@ -5611,22 +5590,22 @@ def int_hexagon_V6_lvsplath_128B :
Hexagon_v32i32_i32_Intrinsic<"HEXAGON_V6_lvsplath_128B">;
def int_hexagon_V6_pred_scalar2v2 :
-Hexagon_custom_v64i1_i32_Intrinsic;
+Hexagon_v64i1_i32_Intrinsic<"HEXAGON_V6_pred_scalar2v2">;
def int_hexagon_V6_pred_scalar2v2_128B :
-Hexagon_custom_v128i1_i32_Intrinsic_128B;
+Hexagon_v128i1_i32_Intrinsic<"HEXAGON_V6_pred_scalar2v2_128B">;
def int_hexagon_V6_shuffeqh :
-Hexagon_custom_v64i1_v64i1v64i1_Intrinsic;
+Hexagon_v64i1_v64i1v64i1_Intrinsic<"HEXAGON_V6_shuffeqh">;
def int_hexagon_V6_shuffeqh_128B :
-Hexagon_custom_v128i1_v128i1v128i1_Intrinsic_128B;
+Hexagon_v128i1_v128i1v128i1_Intrinsic<"HEXAGON_V6_shuffeqh_128B">;
def int_hexagon_V6_shuffeqw :
-Hexagon_custom_v64i1_v64i1v64i1_Intrinsic;
+Hexagon_v64i1_v64i1v64i1_Intrinsic<"HEXAGON_V6_shuffeqw">;
def int_hexagon_V6_shuffeqw_128B :
-Hexagon_custom_v128i1_v128i1v128i1_Intrinsic_128B;
+Hexagon_v128i1_v128i1v128i1_Intrinsic<"HEXAGON_V6_shuffeqw_128B">;
def int_hexagon_V6_vaddbsat :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddbsat">;
@@ -5695,28 +5674,28 @@ def int_hexagon_V6_vadduwsat_dv_128B :
Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vadduwsat_dv_128B">;
def int_hexagon_V6_vandnqrt :
-Hexagon_custom_v16i32_v64i1i32_Intrinsic;
+Hexagon_v16i32_v64i1i32_Intrinsic<"HEXAGON_V6_vandnqrt">;
def int_hexagon_V6_vandnqrt_128B :
-Hexagon_custom_v32i32_v128i1i32_Intrinsic_128B;
+Hexagon_v32i32_v128i1i32_Intrinsic<"HEXAGON_V6_vandnqrt_128B">;
def int_hexagon_V6_vandnqrt_acc :
-Hexagon_custom_v16i32_v16i32v64i1i32_Intrinsic;
+Hexagon_v16i32_v16i32v64i1i32_Intrinsic<"HEXAGON_V6_vandnqrt_acc">;
def int_hexagon_V6_vandnqrt_acc_128B :
-Hexagon_custom_v32i32_v32i32v128i1i32_Intrinsic_128B;
+Hexagon_v32i32_v32i32v128i1i32_Intrinsic<"HEXAGON_V6_vandnqrt_acc_128B">;
def int_hexagon_V6_vandvnqv :
-Hexagon_custom_v16i32_v64i1v16i32_Intrinsic;
+Hexagon_v16i32_v64i1v16i32_Intrinsic<"HEXAGON_V6_vandvnqv">;
def int_hexagon_V6_vandvnqv_128B :
-Hexagon_custom_v32i32_v128i1v32i32_Intrinsic_128B;
+Hexagon_v32i32_v128i1v32i32_Intrinsic<"HEXAGON_V6_vandvnqv_128B">;
def int_hexagon_V6_vandvqv :
-Hexagon_custom_v16i32_v64i1v16i32_Intrinsic;
+Hexagon_v16i32_v64i1v16i32_Intrinsic<"HEXAGON_V6_vandvqv">;
def int_hexagon_V6_vandvqv_128B :
-Hexagon_custom_v32i32_v128i1v32i32_Intrinsic_128B;
+Hexagon_v32i32_v128i1v32i32_Intrinsic<"HEXAGON_V6_vandvqv_128B">;
def int_hexagon_V6_vasrhbsat :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrhbsat">;
@@ -5961,10 +5940,10 @@ def int_hexagon_V6_vgathermh_128B :
Hexagon__ptri32i32v32i32_Intrinsic<"HEXAGON_V6_vgathermh_128B", [IntrArgMemOnly]>;
def int_hexagon_V6_vgathermhq :
-Hexagon_custom__ptrv64i1i32i32v16i32_Intrinsic<[IntrArgMemOnly]>;
+Hexagon__ptrv64i1i32i32v16i32_Intrinsic<"HEXAGON_V6_vgathermhq", [IntrArgMemOnly]>;
def int_hexagon_V6_vgathermhq_128B :
-Hexagon_custom__ptrv128i1i32i32v32i32_Intrinsic_128B<[IntrArgMemOnly]>;
+Hexagon__ptrv128i1i32i32v32i32_Intrinsic<"HEXAGON_V6_vgathermhq_128B", [IntrArgMemOnly]>;
def int_hexagon_V6_vgathermhw :
Hexagon__ptri32i32v32i32_Intrinsic<"HEXAGON_V6_vgathermhw", [IntrArgMemOnly]>;
@@ -5973,10 +5952,10 @@ def int_hexagon_V6_vgathermhw_128B :
Hexagon__ptri32i32v64i32_Intrinsic<"HEXAGON_V6_vgathermhw_128B", [IntrArgMemOnly]>;
def int_hexagon_V6_vgathermhwq :
-Hexagon_custom__ptrv64i1i32i32v32i32_Intrinsic<[IntrArgMemOnly]>;
+Hexagon__ptrv64i1i32i32v32i32_Intrinsic<"HEXAGON_V6_vgathermhwq", [IntrArgMemOnly]>;
def int_hexagon_V6_vgathermhwq_128B :
-Hexagon_custom__ptrv128i1i32i32v64i32_Intrinsic_128B<[IntrArgMemOnly]>;
+Hexagon__ptrv128i1i32i32v64i32_Intrinsic<"HEXAGON_V6_vgathermhwq_128B", [IntrArgMemOnly]>;
def int_hexagon_V6_vgathermw :
Hexagon__ptri32i32v16i32_Intrinsic<"HEXAGON_V6_vgathermw", [IntrArgMemOnly]>;
@@ -5985,10 +5964,10 @@ def int_hexagon_V6_vgathermw_128B :
Hexagon__ptri32i32v32i32_Intrinsic<"HEXAGON_V6_vgathermw_128B", [IntrArgMemOnly]>;
def int_hexagon_V6_vgathermwq :
-Hexagon_custom__ptrv64i1i32i32v16i32_Intrinsic<[IntrArgMemOnly]>;
+Hexagon__ptrv64i1i32i32v16i32_Intrinsic<"HEXAGON_V6_vgathermwq", [IntrArgMemOnly]>;
def int_hexagon_V6_vgathermwq_128B :
-Hexagon_custom__ptrv128i1i32i32v32i32_Intrinsic_128B<[IntrArgMemOnly]>;
+Hexagon__ptrv128i1i32i32v32i32_Intrinsic<"HEXAGON_V6_vgathermwq_128B", [IntrArgMemOnly]>;
def int_hexagon_V6_vlut4 :
Hexagon_v16i32_v16i32i64_Intrinsic<"HEXAGON_V6_vlut4">;
@@ -6051,22 +6030,22 @@ def int_hexagon_V6_vnavgb_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vnavgb_128B">;
def int_hexagon_V6_vprefixqb :
-Hexagon_custom_v16i32_v64i1_Intrinsic;
+Hexagon_v16i32_v64i1_Intrinsic<"HEXAGON_V6_vprefixqb">;
def int_hexagon_V6_vprefixqb_128B :
-Hexagon_custom_v32i32_v128i1_Intrinsic_128B;
+Hexagon_v32i32_v128i1_Intrinsic<"HEXAGON_V6_vprefixqb_128B">;
def int_hexagon_V6_vprefixqh :
-Hexagon_custom_v16i32_v64i1_Intrinsic;
+Hexagon_v16i32_v64i1_Intrinsic<"HEXAGON_V6_vprefixqh">;
def int_hexagon_V6_vprefixqh_128B :
-Hexagon_custom_v32i32_v128i1_Intrinsic_128B;
+Hexagon_v32i32_v128i1_Intrinsic<"HEXAGON_V6_vprefixqh_128B">;
def int_hexagon_V6_vprefixqw :
-Hexagon_custom_v16i32_v64i1_Intrinsic;
+Hexagon_v16i32_v64i1_Intrinsic<"HEXAGON_V6_vprefixqw">;
def int_hexagon_V6_vprefixqw_128B :
-Hexagon_custom_v32i32_v128i1_Intrinsic_128B;
+Hexagon_v32i32_v128i1_Intrinsic<"HEXAGON_V6_vprefixqw_128B">;
def int_hexagon_V6_vscattermh :
Hexagon__i32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vscattermh", [IntrWriteMem]>;
@@ -6081,10 +6060,10 @@ def int_hexagon_V6_vscattermh_add_128B :
Hexagon__i32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vscattermh_add_128B", [IntrWriteMem]>;
def int_hexagon_V6_vscattermhq :
-Hexagon_custom__v64i1i32i32v16i32v16i32_Intrinsic<[IntrWriteMem]>;
+Hexagon__v64i1i32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vscattermhq", [IntrWriteMem]>;
def int_hexagon_V6_vscattermhq_128B :
-Hexagon_custom__v128i1i32i32v32i32v32i32_Intrinsic_128B<[IntrWriteMem]>;
+Hexagon__v128i1i32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vscattermhq_128B", [IntrWriteMem]>;
def int_hexagon_V6_vscattermhw :
Hexagon__i32i32v32i32v16i32_Intrinsic<"HEXAGON_V6_vscattermhw", [IntrWriteMem]>;
@@ -6099,10 +6078,10 @@ def int_hexagon_V6_vscattermhw_add_128B :
Hexagon__i32i32v64i32v32i32_Intrinsic<"HEXAGON_V6_vscattermhw_add_128B", [IntrWriteMem]>;
def int_hexagon_V6_vscattermhwq :
-Hexagon_custom__v64i1i32i32v32i32v16i32_Intrinsic<[IntrWriteMem]>;
+Hexagon__v64i1i32i32v32i32v16i32_Intrinsic<"HEXAGON_V6_vscattermhwq", [IntrWriteMem]>;
def int_hexagon_V6_vscattermhwq_128B :
-Hexagon_custom__v128i1i32i32v64i32v32i32_Intrinsic_128B<[IntrWriteMem]>;
+Hexagon__v128i1i32i32v64i32v32i32_Intrinsic<"HEXAGON_V6_vscattermhwq_128B", [IntrWriteMem]>;
def int_hexagon_V6_vscattermw :
Hexagon__i32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vscattermw", [IntrWriteMem]>;
@@ -6117,18 +6096,18 @@ def int_hexagon_V6_vscattermw_add_128B :
Hexagon__i32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vscattermw_add_128B", [IntrWriteMem]>;
def int_hexagon_V6_vscattermwq :
-Hexagon_custom__v64i1i32i32v16i32v16i32_Intrinsic<[IntrWriteMem]>;
+Hexagon__v64i1i32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vscattermwq", [IntrWriteMem]>;
def int_hexagon_V6_vscattermwq_128B :
-Hexagon_custom__v128i1i32i32v32i32v32i32_Intrinsic_128B<[IntrWriteMem]>;
+Hexagon__v128i1i32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vscattermwq_128B", [IntrWriteMem]>;
// V66 HVX Instructions.
def int_hexagon_V6_vaddcarrysat :
-Hexagon_custom_v16i32_v16i32v16i32v64i1_Intrinsic;
+Hexagon_v16i32_v16i32v16i32v64i1_Intrinsic<"HEXAGON_V6_vaddcarrysat">;
def int_hexagon_V6_vaddcarrysat_128B :
-Hexagon_custom_v32i32_v32i32v32i32v128i1_Intrinsic_128B;
+Hexagon_v32i32_v32i32v32i32v128i1_Intrinsic<"HEXAGON_V6_vaddcarrysat_128B">;
def int_hexagon_V6_vasr_into :
Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vasr_into">;
@@ -6174,3 +6153,437 @@ Hexagon_v32i32_v32i32v32i32v32i32i32_Intrinsic<"HEXAGON_V6_v6mpyvubs10_vxx", [In
def int_hexagon_V6_v6mpyvubs10_vxx_128B :
Hexagon_v64i32_v64i32v64i32v64i32i32_Intrinsic<"HEXAGON_V6_v6mpyvubs10_vxx_128B", [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+def int_hexagon_V6_vabs_hf :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vabs_hf">;
+
+def int_hexagon_V6_vabs_hf_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vabs_hf_128B">;
+
+def int_hexagon_V6_vabs_sf :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vabs_sf">;
+
+def int_hexagon_V6_vabs_sf_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vabs_sf_128B">;
+
+def int_hexagon_V6_vadd_hf :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadd_hf">;
+
+def int_hexagon_V6_vadd_hf_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadd_hf_128B">;
+
+def int_hexagon_V6_vadd_hf_hf :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadd_hf_hf">;
+
+def int_hexagon_V6_vadd_hf_hf_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadd_hf_hf_128B">;
+
+def int_hexagon_V6_vadd_qf16 :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadd_qf16">;
+
+def int_hexagon_V6_vadd_qf16_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadd_qf16_128B">;
+
+def int_hexagon_V6_vadd_qf16_mix :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadd_qf16_mix">;
+
+def int_hexagon_V6_vadd_qf16_mix_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadd_qf16_mix_128B">;
+
+def int_hexagon_V6_vadd_qf32 :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadd_qf32">;
+
+def int_hexagon_V6_vadd_qf32_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadd_qf32_128B">;
+
+def int_hexagon_V6_vadd_qf32_mix :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadd_qf32_mix">;
+
+def int_hexagon_V6_vadd_qf32_mix_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadd_qf32_mix_128B">;
+
+def int_hexagon_V6_vadd_sf :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadd_sf">;
+
+def int_hexagon_V6_vadd_sf_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadd_sf_128B">;
+
+def int_hexagon_V6_vadd_sf_hf :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadd_sf_hf">;
+
+def int_hexagon_V6_vadd_sf_hf_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadd_sf_hf_128B">;
+
+def int_hexagon_V6_vadd_sf_sf :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadd_sf_sf">;
+
+def int_hexagon_V6_vadd_sf_sf_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadd_sf_sf_128B">;
+
+def int_hexagon_V6_vassign_fp :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vassign_fp">;
+
+def int_hexagon_V6_vassign_fp_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vassign_fp_128B">;
+
+def int_hexagon_V6_vconv_hf_qf16 :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vconv_hf_qf16">;
+
+def int_hexagon_V6_vconv_hf_qf16_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vconv_hf_qf16_128B">;
+
+def int_hexagon_V6_vconv_hf_qf32 :
+Hexagon_v16i32_v32i32_Intrinsic<"HEXAGON_V6_vconv_hf_qf32">;
+
+def int_hexagon_V6_vconv_hf_qf32_128B :
+Hexagon_v32i32_v64i32_Intrinsic<"HEXAGON_V6_vconv_hf_qf32_128B">;
+
+def int_hexagon_V6_vconv_sf_qf32 :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vconv_sf_qf32">;
+
+def int_hexagon_V6_vconv_sf_qf32_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vconv_sf_qf32_128B">;
+
+def int_hexagon_V6_vcvt_b_hf :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vcvt_b_hf">;
+
+def int_hexagon_V6_vcvt_b_hf_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vcvt_b_hf_128B">;
+
+def int_hexagon_V6_vcvt_h_hf :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vcvt_h_hf">;
+
+def int_hexagon_V6_vcvt_h_hf_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vcvt_h_hf_128B">;
+
+def int_hexagon_V6_vcvt_hf_b :
+Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vcvt_hf_b">;
+
+def int_hexagon_V6_vcvt_hf_b_128B :
+Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vcvt_hf_b_128B">;
+
+def int_hexagon_V6_vcvt_hf_h :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vcvt_hf_h">;
+
+def int_hexagon_V6_vcvt_hf_h_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vcvt_hf_h_128B">;
+
+def int_hexagon_V6_vcvt_hf_sf :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vcvt_hf_sf">;
+
+def int_hexagon_V6_vcvt_hf_sf_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vcvt_hf_sf_128B">;
+
+def int_hexagon_V6_vcvt_hf_ub :
+Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vcvt_hf_ub">;
+
+def int_hexagon_V6_vcvt_hf_ub_128B :
+Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vcvt_hf_ub_128B">;
+
+def int_hexagon_V6_vcvt_hf_uh :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vcvt_hf_uh">;
+
+def int_hexagon_V6_vcvt_hf_uh_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vcvt_hf_uh_128B">;
+
+def int_hexagon_V6_vcvt_sf_hf :
+Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vcvt_sf_hf">;
+
+def int_hexagon_V6_vcvt_sf_hf_128B :
+Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vcvt_sf_hf_128B">;
+
+def int_hexagon_V6_vcvt_ub_hf :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vcvt_ub_hf">;
+
+def int_hexagon_V6_vcvt_ub_hf_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vcvt_ub_hf_128B">;
+
+def int_hexagon_V6_vcvt_uh_hf :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vcvt_uh_hf">;
+
+def int_hexagon_V6_vcvt_uh_hf_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vcvt_uh_hf_128B">;
+
+def int_hexagon_V6_vdmpy_sf_hf :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vdmpy_sf_hf">;
+
+def int_hexagon_V6_vdmpy_sf_hf_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vdmpy_sf_hf_128B">;
+
+def int_hexagon_V6_vdmpy_sf_hf_acc :
+Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vdmpy_sf_hf_acc">;
+
+def int_hexagon_V6_vdmpy_sf_hf_acc_128B :
+Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vdmpy_sf_hf_acc_128B">;
+
+def int_hexagon_V6_vfmax_hf :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vfmax_hf">;
+
+def int_hexagon_V6_vfmax_hf_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vfmax_hf_128B">;
+
+def int_hexagon_V6_vfmax_sf :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vfmax_sf">;
+
+def int_hexagon_V6_vfmax_sf_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vfmax_sf_128B">;
+
+def int_hexagon_V6_vfmin_hf :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vfmin_hf">;
+
+def int_hexagon_V6_vfmin_hf_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vfmin_hf_128B">;
+
+def int_hexagon_V6_vfmin_sf :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vfmin_sf">;
+
+def int_hexagon_V6_vfmin_sf_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vfmin_sf_128B">;
+
+def int_hexagon_V6_vfneg_hf :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vfneg_hf">;
+
+def int_hexagon_V6_vfneg_hf_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vfneg_hf_128B">;
+
+def int_hexagon_V6_vfneg_sf :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vfneg_sf">;
+
+def int_hexagon_V6_vfneg_sf_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vfneg_sf_128B">;
+
+def int_hexagon_V6_vgthf :
+Hexagon_v64i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_vgthf">;
+
+def int_hexagon_V6_vgthf_128B :
+Hexagon_v128i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_vgthf_128B">;
+
+def int_hexagon_V6_vgthf_and :
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgthf_and">;
+
+def int_hexagon_V6_vgthf_and_128B :
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgthf_and_128B">;
+
+def int_hexagon_V6_vgthf_or :
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgthf_or">;
+
+def int_hexagon_V6_vgthf_or_128B :
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgthf_or_128B">;
+
+def int_hexagon_V6_vgthf_xor :
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgthf_xor">;
+
+def int_hexagon_V6_vgthf_xor_128B :
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgthf_xor_128B">;
+
+def int_hexagon_V6_vgtsf :
+Hexagon_v64i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtsf">;
+
+def int_hexagon_V6_vgtsf_128B :
+Hexagon_v128i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtsf_128B">;
+
+def int_hexagon_V6_vgtsf_and :
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtsf_and">;
+
+def int_hexagon_V6_vgtsf_and_128B :
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtsf_and_128B">;
+
+def int_hexagon_V6_vgtsf_or :
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtsf_or">;
+
+def int_hexagon_V6_vgtsf_or_128B :
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtsf_or_128B">;
+
+def int_hexagon_V6_vgtsf_xor :
+Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtsf_xor">;
+
+def int_hexagon_V6_vgtsf_xor_128B :
+Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtsf_xor_128B">;
+
+def int_hexagon_V6_vmax_hf :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmax_hf">;
+
+def int_hexagon_V6_vmax_hf_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmax_hf_128B">;
+
+def int_hexagon_V6_vmax_sf :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmax_sf">;
+
+def int_hexagon_V6_vmax_sf_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmax_sf_128B">;
+
+def int_hexagon_V6_vmin_hf :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmin_hf">;
+
+def int_hexagon_V6_vmin_hf_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmin_hf_128B">;
+
+def int_hexagon_V6_vmin_sf :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmin_sf">;
+
+def int_hexagon_V6_vmin_sf_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmin_sf_128B">;
+
+def int_hexagon_V6_vmpy_hf_hf :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpy_hf_hf">;
+
+def int_hexagon_V6_vmpy_hf_hf_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpy_hf_hf_128B">;
+
+def int_hexagon_V6_vmpy_hf_hf_acc :
+Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpy_hf_hf_acc">;
+
+def int_hexagon_V6_vmpy_hf_hf_acc_128B :
+Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpy_hf_hf_acc_128B">;
+
+def int_hexagon_V6_vmpy_qf16 :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpy_qf16">;
+
+def int_hexagon_V6_vmpy_qf16_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpy_qf16_128B">;
+
+def int_hexagon_V6_vmpy_qf16_hf :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpy_qf16_hf">;
+
+def int_hexagon_V6_vmpy_qf16_hf_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpy_qf16_hf_128B">;
+
+def int_hexagon_V6_vmpy_qf16_mix_hf :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpy_qf16_mix_hf">;
+
+def int_hexagon_V6_vmpy_qf16_mix_hf_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpy_qf16_mix_hf_128B">;
+
+def int_hexagon_V6_vmpy_qf32 :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpy_qf32">;
+
+def int_hexagon_V6_vmpy_qf32_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpy_qf32_128B">;
+
+def int_hexagon_V6_vmpy_qf32_hf :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpy_qf32_hf">;
+
+def int_hexagon_V6_vmpy_qf32_hf_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpy_qf32_hf_128B">;
+
+def int_hexagon_V6_vmpy_qf32_mix_hf :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpy_qf32_mix_hf">;
+
+def int_hexagon_V6_vmpy_qf32_mix_hf_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpy_qf32_mix_hf_128B">;
+
+def int_hexagon_V6_vmpy_qf32_qf16 :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpy_qf32_qf16">;
+
+def int_hexagon_V6_vmpy_qf32_qf16_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpy_qf32_qf16_128B">;
+
+def int_hexagon_V6_vmpy_qf32_sf :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpy_qf32_sf">;
+
+def int_hexagon_V6_vmpy_qf32_sf_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpy_qf32_sf_128B">;
+
+def int_hexagon_V6_vmpy_sf_hf :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpy_sf_hf">;
+
+def int_hexagon_V6_vmpy_sf_hf_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpy_sf_hf_128B">;
+
+def int_hexagon_V6_vmpy_sf_hf_acc :
+Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpy_sf_hf_acc">;
+
+def int_hexagon_V6_vmpy_sf_hf_acc_128B :
+Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpy_sf_hf_acc_128B">;
+
+def int_hexagon_V6_vmpy_sf_sf :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpy_sf_sf">;
+
+def int_hexagon_V6_vmpy_sf_sf_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpy_sf_sf_128B">;
+
+def int_hexagon_V6_vsub_hf :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsub_hf">;
+
+def int_hexagon_V6_vsub_hf_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsub_hf_128B">;
+
+def int_hexagon_V6_vsub_hf_hf :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsub_hf_hf">;
+
+def int_hexagon_V6_vsub_hf_hf_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsub_hf_hf_128B">;
+
+def int_hexagon_V6_vsub_qf16 :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsub_qf16">;
+
+def int_hexagon_V6_vsub_qf16_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsub_qf16_128B">;
+
+def int_hexagon_V6_vsub_qf16_mix :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsub_qf16_mix">;
+
+def int_hexagon_V6_vsub_qf16_mix_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsub_qf16_mix_128B">;
+
+def int_hexagon_V6_vsub_qf32 :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsub_qf32">;
+
+def int_hexagon_V6_vsub_qf32_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsub_qf32_128B">;
+
+def int_hexagon_V6_vsub_qf32_mix :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsub_qf32_mix">;
+
+def int_hexagon_V6_vsub_qf32_mix_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsub_qf32_mix_128B">;
+
+def int_hexagon_V6_vsub_sf :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsub_sf">;
+
+def int_hexagon_V6_vsub_sf_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsub_sf_128B">;
+
+def int_hexagon_V6_vsub_sf_hf :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsub_sf_hf">;
+
+def int_hexagon_V6_vsub_sf_hf_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsub_sf_hf_128B">;
+
+def int_hexagon_V6_vsub_sf_sf :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsub_sf_sf">;
+
+def int_hexagon_V6_vsub_sf_sf_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsub_sf_sf_128B">;
+
+// V69 HVX Instructions.
+
+def int_hexagon_V6_vasrvuhubrndsat :
+Hexagon_v16i32_v32i32v16i32_Intrinsic<"HEXAGON_V6_vasrvuhubrndsat">;
+
+def int_hexagon_V6_vasrvuhubrndsat_128B :
+Hexagon_v32i32_v64i32v32i32_Intrinsic<"HEXAGON_V6_vasrvuhubrndsat_128B">;
+
+def int_hexagon_V6_vasrvuhubsat :
+Hexagon_v16i32_v32i32v16i32_Intrinsic<"HEXAGON_V6_vasrvuhubsat">;
+
+def int_hexagon_V6_vasrvuhubsat_128B :
+Hexagon_v32i32_v64i32v32i32_Intrinsic<"HEXAGON_V6_vasrvuhubsat_128B">;
+
+def int_hexagon_V6_vasrvwuhrndsat :
+Hexagon_v16i32_v32i32v16i32_Intrinsic<"HEXAGON_V6_vasrvwuhrndsat">;
+
+def int_hexagon_V6_vasrvwuhrndsat_128B :
+Hexagon_v32i32_v64i32v32i32_Intrinsic<"HEXAGON_V6_vasrvwuhrndsat_128B">;
+
+def int_hexagon_V6_vasrvwuhsat :
+Hexagon_v16i32_v32i32v16i32_Intrinsic<"HEXAGON_V6_vasrvwuhsat">;
+
+def int_hexagon_V6_vasrvwuhsat_128B :
+Hexagon_v32i32_v64i32v32i32_Intrinsic<"HEXAGON_V6_vasrvwuhsat_128B">;
+
+def int_hexagon_V6_vmpyuhvs :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyuhvs">;
+
+def int_hexagon_V6_vmpyuhvs_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyuhvs_128B">;
+
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsRISCV.td b/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 3ceb347e97bf..747049b1035b 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -642,20 +642,6 @@ let TargetPrefix = "riscv" in {
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
LLVMMatchType<2>],
[ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic;
- // For atomic operations without mask
- // Input: (base, index, value, vl)
- class RISCVAMONoMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty, LLVMMatchType<0>,
- llvm_anyint_ty],
- [NoCapture<ArgIndex<0>>]>, RISCVVIntrinsic;
- // For atomic operations with mask
- // Input: (base, index, value, mask, vl)
- class RISCVAMOMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty, LLVMMatchType<0>,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
- [NoCapture<ArgIndex<0>>]>, RISCVVIntrinsic;
// For unit stride segment load
// Input: (pointer, vl)
@@ -930,10 +916,6 @@ let TargetPrefix = "riscv" in {
def "int_riscv_" #NAME :RISCVConversionNoMask;
def "int_riscv_" # NAME # "_mask" : RISCVConversionMask;
}
- multiclass RISCVAMO {
- def "int_riscv_" # NAME : RISCVAMONoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVAMOMask;
- }
multiclass RISCVUSSegLoad<int nf> {
def "int_riscv_" # NAME : RISCVUSSegLoad<nf>;
def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMask<nf>;
@@ -976,16 +958,6 @@ let TargetPrefix = "riscv" in {
def int_riscv_vlm : RISCVUSLoad;
def int_riscv_vsm : RISCVUSStore;
- defm vamoswap : RISCVAMO;
- defm vamoadd : RISCVAMO;
- defm vamoxor : RISCVAMO;
- defm vamoand : RISCVAMO;
- defm vamoor : RISCVAMO;
- defm vamomin : RISCVAMO;
- defm vamomax : RISCVAMO;
- defm vamominu : RISCVAMO;
- defm vamomaxu : RISCVAMO;
-
defm vadd : RISCVBinaryAAX;
defm vsub : RISCVBinaryAAX;
defm vrsub : RISCVBinaryAAX;
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsWebAssembly.td b/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsWebAssembly.td
index 6a8e6c797f85..aecc3d91fae7 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsWebAssembly.td
+++ b/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsWebAssembly.td
@@ -11,6 +11,9 @@
///
//===----------------------------------------------------------------------===//
+// Type definition for a table in an intrinsic
+def llvm_table_ty : LLVMQualPointerType<llvm_i8_ty, 1>;
+
let TargetPrefix = "wasm" in { // All intrinsics start with "llvm.wasm.".
// Query the current memory size, and increase the current memory size.
@@ -24,6 +27,35 @@ def int_wasm_memory_grow : Intrinsic<[llvm_anyint_ty],
[]>;
//===----------------------------------------------------------------------===//
+// ref.null intrinsics
+//===----------------------------------------------------------------------===//
+def int_wasm_ref_null_extern : Intrinsic<[llvm_externref_ty], [], [IntrNoMem]>;
+def int_wasm_ref_null_func : Intrinsic<[llvm_funcref_ty], [], [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
+// Table intrinsics
+//===----------------------------------------------------------------------===//
+// Query the current table size, and increase the current table size.
+def int_wasm_table_size : Intrinsic<[llvm_i32_ty],
+ [llvm_table_ty],
+ [IntrReadMem]>;
+def int_wasm_table_copy : Intrinsic<[],
+ [llvm_table_ty, llvm_table_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ []>;
+def int_wasm_table_grow_externref : Intrinsic<[llvm_i32_ty],
+ [llvm_table_ty, llvm_externref_ty, llvm_i32_ty],
+ []>;
+def int_wasm_table_grow_funcref : Intrinsic<[llvm_i32_ty],
+ [llvm_table_ty, llvm_funcref_ty, llvm_i32_ty],
+ []>;
+def int_wasm_table_fill_externref : Intrinsic<[],
+ [llvm_table_ty, llvm_i32_ty, llvm_externref_ty, llvm_i32_ty],
+ []>;
+def int_wasm_table_fill_funcref : Intrinsic<[],
+ [llvm_table_ty, llvm_i32_ty, llvm_funcref_ty, llvm_i32_ty],
+ []>;
+
+//===----------------------------------------------------------------------===//
// Trapping float-to-int conversions
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/Module.h b/contrib/llvm-project/llvm/include/llvm/IR/Module.h
index bd3a196c7181..4ddbd6ff14b3 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/Module.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/Module.h
@@ -934,6 +934,17 @@ public:
/// Set the partial sample profile ratio in the profile summary module flag,
/// if applicable.
void setPartialSampleProfileRatio(const ModuleSummaryIndex &Index);
+
+ /// Get the target variant triple which is a string describing a variant of
+ /// the target host platform. For example, Mac Catalyst can be a variant
+ /// target triple for a macOS target.
+ /// @returns a string containing the target variant triple.
+ StringRef getDarwinTargetVariantTriple() const;
+
+ /// Get the target variant version build SDK version metadata.
+ ///
+ /// An empty version is returned if no such metadata is attached.
+ VersionTuple getDarwinTargetVariantSDKVersion() const;
};
/// Given "llvm.used" or "llvm.compiler.used" as a global name, collect the
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/ModuleSummaryIndex.h b/contrib/llvm-project/llvm/include/llvm/IR/ModuleSummaryIndex.h
index e00b78d45c63..ec1d5ef79eed 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/ModuleSummaryIndex.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/ModuleSummaryIndex.h
@@ -581,6 +581,13 @@ public:
// If there are calls to unknown targets (e.g. indirect)
unsigned HasUnknownCall : 1;
+ // Indicate if a function must be an unreachable function.
+ //
+ // This bit is sufficient but not necessary;
+ // if this bit is on, the function must be regarded as unreachable;
+ // if this bit is off, the function might be reachable or unreachable.
+ unsigned MustBeUnreachable : 1;
+
FFlags &operator&=(const FFlags &RHS) {
this->ReadNone &= RHS.ReadNone;
this->ReadOnly &= RHS.ReadOnly;
@@ -591,13 +598,15 @@ public:
this->NoUnwind &= RHS.NoUnwind;
this->MayThrow &= RHS.MayThrow;
this->HasUnknownCall &= RHS.HasUnknownCall;
+ this->MustBeUnreachable &= RHS.MustBeUnreachable;
return *this;
}
bool anyFlagSet() {
return this->ReadNone | this->ReadOnly | this->NoRecurse |
this->ReturnDoesNotAlias | this->NoInline | this->AlwaysInline |
- this->NoUnwind | this->MayThrow | this->HasUnknownCall;
+ this->NoUnwind | this->MayThrow | this->HasUnknownCall |
+ this->MustBeUnreachable;
}
operator std::string() {
@@ -613,6 +622,7 @@ public:
OS << ", noUnwind: " << this->NoUnwind;
OS << ", mayThrow: " << this->MayThrow;
OS << ", hasUnknownCall: " << this->HasUnknownCall;
+ OS << ", mustBeUnreachable: " << this->MustBeUnreachable;
OS << ")";
return OS.str();
}
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/SSAContext.h b/contrib/llvm-project/llvm/include/llvm/IR/SSAContext.h
new file mode 100644
index 000000000000..9d9290a2c1d7
--- /dev/null
+++ b/contrib/llvm-project/llvm/include/llvm/IR/SSAContext.h
@@ -0,0 +1,56 @@
+//===- SSAContext.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file declares a specialization of the GenericSSAContext<X>
+/// class template for LLVM IR.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_SSACONTEXT_H
+#define LLVM_IR_SSACONTEXT_H
+
+#include "llvm/ADT/GenericSSAContext.h"
+#include "llvm/IR/ModuleSlotTracker.h"
+#include "llvm/Support/Printable.h"
+
+#include <memory>
+
+namespace llvm {
+class BasicBlock;
+class Function;
+class Instruction;
+class Value;
+template <typename> class SmallVectorImpl;
+template <typename, bool> class DominatorTreeBase;
+
+template <> class GenericSSAContext<Function> {
+ Function *F;
+
+public:
+ using BlockT = BasicBlock;
+ using FunctionT = Function;
+ using InstructionT = Instruction;
+ using ValueRefT = Value *;
+ using DominatorTreeT = DominatorTreeBase<BlockT, false>;
+
+ static BasicBlock *getEntryBlock(Function &F);
+
+ void setFunction(Function &Fn);
+ Function *getFunction() const { return F; }
+
+ Printable print(BasicBlock *Block) const;
+ Printable print(Instruction *Inst) const;
+ Printable print(Value *Value) const;
+};
+
+using SSAContext = GenericSSAContext<Function>;
+
+} // namespace llvm
+
+#endif // LLVM_IR_SSACONTEXT_H
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/VPIntrinsics.def b/contrib/llvm-project/llvm/include/llvm/IR/VPIntrinsics.def
index a3c6b4e70bf5..121c8bbc6c27 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/VPIntrinsics.def
+++ b/contrib/llvm-project/llvm/include/llvm/IR/VPIntrinsics.def
@@ -214,7 +214,7 @@ HELPER_REGISTER_BINARY_FP_VP(frem, VP_FREM, FRem)
///// } Floating-Point Arithmetic
///// Memory Operations {
-// llvm.vp.store(ptr,val,mask,vlen)
+// llvm.vp.store(val,ptr,mask,vlen)
BEGIN_REGISTER_VP_INTRINSIC(vp_store, 2, 3)
// chain = VP_STORE chain,val,base,offset,mask,evl
BEGIN_REGISTER_VP_SDNODE(VP_STORE, 0, vp_store, 4, 5)
@@ -345,10 +345,9 @@ HELPER_REGISTER_REDUCTION_SEQ_VP(vp_reduce_fmul, VP_REDUCE_FMUL,
///// Shuffles {
// llvm.vp.select(mask,on_true,on_false,vlen)
-BEGIN_REGISTER_VP_INTRINSIC(vp_select, 0, 3)
-// BEGIN_REGISTER_VP_SDNODE(VP_SELECT, -1, vp_select, 0, 4)
-// END_REGISTER_CASES(vp_select, VP_SELECT)
-END_REGISTER_VP_INTRINSIC(vp_select)
+BEGIN_REGISTER_VP(vp_select, 0, 3, VP_SELECT, -1)
+VP_PROPERTY_FUNCTIONAL_OPC(Select)
+END_REGISTER_VP(vp_select, VP_SELECT)
BEGIN_REGISTER_VP(experimental_vp_splice, 3, 5, EXPERIMENTAL_VP_SPLICE, -1)
END_REGISTER_VP(experimental_vp_splice, EXPERIMENTAL_VP_SPLICE)
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/Value.def b/contrib/llvm-project/llvm/include/llvm/IR/Value.def
index 0a0125d319c3..97d15260f36b 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/Value.def
+++ b/contrib/llvm-project/llvm/include/llvm/IR/Value.def
@@ -80,6 +80,7 @@ HANDLE_GLOBAL_VALUE(GlobalVariable)
HANDLE_CONSTANT(BlockAddress)
HANDLE_CONSTANT(ConstantExpr)
HANDLE_CONSTANT_EXCLUDE_LLVM_C_API(DSOLocalEquivalent)
+HANDLE_CONSTANT_EXCLUDE_LLVM_C_API(NoCFIValue)
// ConstantAggregate.
HANDLE_CONSTANT(ConstantArray)
diff --git a/contrib/llvm-project/llvm/include/llvm/InitializePasses.h b/contrib/llvm-project/llvm/include/llvm/InitializePasses.h
index 845d7dcdebd2..0c5ebc9a2f28 100644
--- a/contrib/llvm-project/llvm/include/llvm/InitializePasses.h
+++ b/contrib/llvm-project/llvm/include/llvm/InitializePasses.h
@@ -122,6 +122,7 @@ void initializeControlHeightReductionLegacyPassPass(PassRegistry&);
void initializeCorrelatedValuePropagationPass(PassRegistry&);
void initializeCostModelAnalysisPass(PassRegistry&);
void initializeCrossDSOCFIPass(PassRegistry&);
+void initializeCycleInfoWrapperPassPass(PassRegistry &);
void initializeDAEPass(PassRegistry&);
void initializeDAHPass(PassRegistry&);
void initializeDCELegacyPassPass(PassRegistry&);
@@ -164,7 +165,7 @@ void initializeFinalizeISelPass(PassRegistry&);
void initializeFinalizeMachineBundlesPass(PassRegistry&);
void initializeFixIrreduciblePass(PassRegistry &);
void initializeFixupStatepointCallerSavedPass(PassRegistry&);
-void initializeFlattenCFGPassPass(PassRegistry&);
+void initializeFlattenCFGLegacyPassPass(PassRegistry &);
void initializeFloat2IntLegacyPassPass(PassRegistry&);
void initializeForceFunctionAttrsLegacyPassPass(PassRegistry&);
void initializeForwardControlFlowIntegrityPass(PassRegistry&);
@@ -291,6 +292,8 @@ void initializeMachineBranchProbabilityInfoPass(PassRegistry&);
void initializeMachineCSEPass(PassRegistry&);
void initializeMachineCombinerPass(PassRegistry&);
void initializeMachineCopyPropagationPass(PassRegistry&);
+void initializeMachineCycleInfoPrinterPassPass(PassRegistry &);
+void initializeMachineCycleInfoWrapperPassPass(PassRegistry &);
void initializeMachineDominanceFrontierPass(PassRegistry&);
void initializeMachineDominatorTreePass(PassRegistry&);
void initializeMachineFunctionPrinterPassPass(PassRegistry&);
@@ -375,6 +378,7 @@ void initializeRAGreedyPass(PassRegistry&);
void initializeReachingDefAnalysisPass(PassRegistry&);
void initializeReassociateLegacyPassPass(PassRegistry&);
void initializeRedundantDbgInstEliminationPass(PassRegistry&);
+void initializeRegAllocEvictionAdvisorAnalysisPass(PassRegistry &);
void initializeRegAllocFastPass(PassRegistry&);
void initializeRegBankSelectPass(PassRegistry&);
void initializeRegToMemLegacyPass(PassRegistry&);
diff --git a/contrib/llvm-project/llvm/include/llvm/MC/MCAssembler.h b/contrib/llvm-project/llvm/include/llvm/MC/MCAssembler.h
index 1f670e3973ce..9d5cb620c9de 100644
--- a/contrib/llvm-project/llvm/include/llvm/MC/MCAssembler.h
+++ b/contrib/llvm-project/llvm/include/llvm/MC/MCAssembler.h
@@ -153,6 +153,7 @@ private:
MCLOHContainer LOHContainer;
VersionInfoType VersionInfo;
+ VersionInfoType DarwinTargetVariantVersionInfo;
/// Evaluate a fixup to a relocatable expression and the value which should be
/// placed into the fixup.
@@ -285,6 +286,21 @@ public:
VersionInfo.SDKVersion = SDKVersion;
}
+ const VersionInfoType &getDarwinTargetVariantVersionInfo() const {
+ return DarwinTargetVariantVersionInfo;
+ }
+ void setDarwinTargetVariantBuildVersion(MachO::PlatformType Platform,
+ unsigned Major, unsigned Minor,
+ unsigned Update,
+ VersionTuple SDKVersion) {
+ DarwinTargetVariantVersionInfo.EmitBuildVersion = true;
+ DarwinTargetVariantVersionInfo.TypeOrPlatform.Platform = Platform;
+ DarwinTargetVariantVersionInfo.Major = Major;
+ DarwinTargetVariantVersionInfo.Minor = Minor;
+ DarwinTargetVariantVersionInfo.Update = Update;
+ DarwinTargetVariantVersionInfo.SDKVersion = SDKVersion;
+ }
+
/// Reuse an assembler instance
///
void reset();
diff --git a/contrib/llvm-project/llvm/include/llvm/MC/MCObjectFileInfo.h b/contrib/llvm-project/llvm/include/llvm/MC/MCObjectFileInfo.h
index ba7450ac64f1..5e0cccaba77f 100644
--- a/contrib/llvm-project/llvm/include/llvm/MC/MCObjectFileInfo.h
+++ b/contrib/llvm-project/llvm/include/llvm/MC/MCObjectFileInfo.h
@@ -427,6 +427,8 @@ private:
bool PositionIndependent = false;
MCContext *Ctx = nullptr;
VersionTuple SDKVersion;
+ Optional<Triple> DarwinTargetVariantTriple;
+ VersionTuple DarwinTargetVariantSDKVersion;
void initMachOMCObjectFileInfo(const Triple &T);
void initELFMCObjectFileInfo(const Triple &T, bool Large);
@@ -442,6 +444,23 @@ public:
}
const VersionTuple &getSDKVersion() const { return SDKVersion; }
+
+ void setDarwinTargetVariantTriple(const Triple &T) {
+ DarwinTargetVariantTriple = T;
+ }
+
+ const Triple *getDarwinTargetVariantTriple() const {
+ return DarwinTargetVariantTriple ? DarwinTargetVariantTriple.getPointer()
+ : nullptr;
+ }
+
+ void setDarwinTargetVariantSDKVersion(const VersionTuple &TheSDKVersion) {
+ DarwinTargetVariantSDKVersion = TheSDKVersion;
+ }
+
+ const VersionTuple &getDarwinTargetVariantSDKVersion() const {
+ return DarwinTargetVariantSDKVersion;
+ }
};
} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/include/llvm/MC/MCObjectStreamer.h b/contrib/llvm-project/llvm/include/llvm/MC/MCObjectStreamer.h
index 9d6416e4a18d..183fd79fb9fc 100644
--- a/contrib/llvm-project/llvm/include/llvm/MC/MCObjectStreamer.h
+++ b/contrib/llvm-project/llvm/include/llvm/MC/MCObjectStreamer.h
@@ -50,6 +50,16 @@ class MCObjectStreamer : public MCStreamer {
};
SmallVector<PendingMCFixup, 2> PendingFixups;
+ struct PendingAssignment {
+ MCSymbol *Symbol;
+ const MCExpr *Value;
+ };
+
+ /// A list of conditional assignments we may need to emit if the target
+ /// symbol is later emitted.
+ DenseMap<const MCSymbol *, SmallVector<PendingAssignment, 1>>
+ pendingAssignments;
+
virtual void emitInstToData(const MCInst &Inst, const MCSubtargetInfo&) = 0;
void emitCFIStartProcImpl(MCDwarfFrameInfo &Frame) override;
void emitCFIEndProcImpl(MCDwarfFrameInfo &Frame) override;
@@ -118,6 +128,8 @@ public:
virtual void emitLabelAtPos(MCSymbol *Symbol, SMLoc Loc, MCFragment *F,
uint64_t Offset);
void emitAssignment(MCSymbol *Symbol, const MCExpr *Value) override;
+ void emitConditionalAssignment(MCSymbol *Symbol,
+ const MCExpr *Value) override;
void emitValueImpl(const MCExpr *Value, unsigned Size,
SMLoc Loc = SMLoc()) override;
void emitULEB128Value(const MCExpr *Value) override;
@@ -208,6 +220,10 @@ public:
const MCSymbol *Lo) override;
bool mayHaveInstructions(MCSection &Sec) const override;
+
+ /// Emits pending conditional assignments that depend on \p Symbol
+ /// being emitted.
+ void emitPendingAssignments(MCSymbol *Symbol);
};
} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h b/contrib/llvm-project/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h
index abb95628c2a9..faf0a4474c8a 100644
--- a/contrib/llvm-project/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h
+++ b/contrib/llvm-project/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h
@@ -10,6 +10,7 @@
#define LLVM_MC_MCPARSER_MCPARSEDASMOPERAND_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/MC/MCInstrDesc.h"
#include "llvm/Support/SMLoc.h"
#include <string>
@@ -76,6 +77,10 @@ public:
/// assembly.
virtual bool isOffsetOfLocal() const { return false; }
+ /// isMemPlaceholder - Do we need to ignore the constraint, rather than emit
+ /// code? Only valid when parsing MS-style inline assembly.
+ virtual bool isMemPlaceholder(const MCInstrDesc &Desc) const { return false; }
+
/// getOffsetOfLoc - Get the location of the offset operator.
virtual SMLoc getOffsetOfLoc() const { return SMLoc(); }
diff --git a/contrib/llvm-project/llvm/include/llvm/MC/MCStreamer.h b/contrib/llvm-project/llvm/include/llvm/MC/MCStreamer.h
index e00f50f617fa..7bfbdb880098 100644
--- a/contrib/llvm-project/llvm/include/llvm/MC/MCStreamer.h
+++ b/contrib/llvm-project/llvm/include/llvm/MC/MCStreamer.h
@@ -496,8 +496,16 @@ public:
unsigned Minor, unsigned Update,
VersionTuple SDKVersion) {}
+ virtual void emitDarwinTargetVariantBuildVersion(unsigned Platform,
+ unsigned Major,
+ unsigned Minor,
+ unsigned Update,
+ VersionTuple SDKVersion) {}
+
void emitVersionForTarget(const Triple &Target,
- const VersionTuple &SDKVersion);
+ const VersionTuple &SDKVersion,
+ const Triple *DarwinTargetVariantTriple,
+ const VersionTuple &DarwinTargetVariantSDKVersion);
/// Note in the output that the specified \p Func is a Thumb mode
/// function (ARM target only).
@@ -516,6 +524,10 @@ public:
/// \param Value - The value for the symbol.
virtual void emitAssignment(MCSymbol *Symbol, const MCExpr *Value);
+ /// Emit an assignment of \p Value to \p Symbol, but only if \p Value is also
+ /// emitted.
+ virtual void emitConditionalAssignment(MCSymbol *Symbol, const MCExpr *Value);
+
/// Emit an weak reference from \p Alias to \p Symbol.
///
/// This corresponds to an assembler statement such as:
diff --git a/contrib/llvm-project/llvm/include/llvm/MC/MCTargetOptions.h b/contrib/llvm-project/llvm/include/llvm/MC/MCTargetOptions.h
index cd97cb0e2992..3510eeca8953 100644
--- a/contrib/llvm-project/llvm/include/llvm/MC/MCTargetOptions.h
+++ b/contrib/llvm-project/llvm/include/llvm/MC/MCTargetOptions.h
@@ -62,9 +62,10 @@ public:
std::string ABIName;
std::string AssemblyLanguage;
std::string SplitDwarfFile;
+ std::string COFFOutputFilename;
const char *Argv0 = nullptr;
- ArrayRef<const char *> CommandLineArgs;
+ ArrayRef<std::string> CommandLineArgs;
/// Additional paths to search for `.include` directives when using the
/// integrated assembler.
diff --git a/contrib/llvm-project/llvm/include/llvm/Object/MachO.h b/contrib/llvm-project/llvm/include/llvm/Object/MachO.h
index ca5d63e4074f..ede742c47f97 100644
--- a/contrib/llvm-project/llvm/include/llvm/Object/MachO.h
+++ b/contrib/llvm-project/llvm/include/llvm/Object/MachO.h
@@ -652,6 +652,13 @@ public:
return std::string(std::string(Version.str()));
}
+ /// If the input path is a .dSYM bundle (as created by the dsymutil tool),
+ /// return the paths to the object files found in the bundle, otherwise return
+ /// an empty vector. If the path appears to be a .dSYM bundle but no objects
+ /// were found or there was a filesystem error, then return an error.
+ static Expected<std::vector<std::string>>
+ findDsymObjectMembers(StringRef Path);
+
private:
MachOObjectFile(MemoryBufferRef Object, bool IsLittleEndian, bool Is64Bits,
Error &Err, uint32_t UniversalCputype = 0,
diff --git a/contrib/llvm-project/llvm/include/llvm/Option/ArgList.h b/contrib/llvm-project/llvm/include/llvm/Option/ArgList.h
index f6abf2a62aa5..74897de52a93 100644
--- a/contrib/llvm-project/llvm/include/llvm/Option/ArgList.h
+++ b/contrib/llvm-project/llvm/include/llvm/Option/ArgList.h
@@ -245,6 +245,12 @@ public:
return getLastArg(Ids...) != nullptr;
}
+ /// Return true if the arg list contains multiple arguments matching \p Id.
+ bool hasMultipleArgs(OptSpecifier Id) const {
+ auto Args = filtered(Id);
+ return (Args.begin() != Args.end()) && (++Args.begin()) != Args.end();
+ }
+
/// Return the last argument matching \p Id, or null.
template<typename ...OptSpecifiers>
Arg *getLastArg(OptSpecifiers ...Ids) const {
diff --git a/contrib/llvm-project/llvm/include/llvm/Passes/PassBuilder.h b/contrib/llvm-project/llvm/include/llvm/Passes/PassBuilder.h
index 7c7883e98183..66b0b149fa25 100644
--- a/contrib/llvm-project/llvm/include/llvm/Passes/PassBuilder.h
+++ b/contrib/llvm-project/llvm/include/llvm/Passes/PassBuilder.h
@@ -199,7 +199,7 @@ public:
/// Construct the module pipeline that performs inlining with
/// module inliner pass.
- ModuleInlinerPass buildModuleInlinerPipeline(OptimizationLevel Level,
+ ModulePassManager buildModuleInlinerPipeline(OptimizationLevel Level,
ThinOrFullLTOPhase Phase);
/// Construct the core LLVM module optimization pipeline.
diff --git a/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProf.h b/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProf.h
index 4395c2abb33e..6c5efb2f6d5d 100644
--- a/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProf.h
+++ b/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProf.h
@@ -290,6 +290,10 @@ enum class instrprof_error {
too_large,
truncated,
malformed,
+ missing_debug_info_for_correlation,
+ unexpected_debug_info_for_correlation,
+ unable_to_correlate_profile,
+ unsupported_debug_format,
unknown_function,
invalid_prof,
hash_mismatch,
@@ -1149,7 +1153,8 @@ void getMemOPSizeRangeFromOption(StringRef Str, int64_t &RangeStart,
// Create a COMDAT variable INSTR_PROF_RAW_VERSION_VAR to make the runtime
// aware this is an ir_level profile so it can set the version flag.
GlobalVariable *createIRLevelProfileFlagVar(Module &M, bool IsCS,
- bool InstrEntryBBEnabled);
+ bool InstrEntryBBEnabled,
+ bool DebugInfoCorrelate);
// Create the variable for the profile file name.
void createProfileFileNameVar(Module &M, StringRef InstrProfileOutput);
diff --git a/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProfCorrelator.h b/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProfCorrelator.h
new file mode 100644
index 000000000000..eae7b4e0322c
--- /dev/null
+++ b/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProfCorrelator.h
@@ -0,0 +1,170 @@
+//===- InstrProfCorrelator.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// This file defines InstrProfCorrelator used to generate PGO profiles from
+// raw profile data and debug info.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PROFILEDATA_INSTRPROFCORRELATOR_H
+#define LLVM_PROFILEDATA_INSTRPROFCORRELATOR_H
+
+#include "llvm/DebugInfo/DWARF/DWARFContext.h"
+#include "llvm/Object/Binary.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/ProfileData/InstrProf.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <vector>
+
+namespace llvm {
+
+/// InstrProfCorrelator - A base class used to create raw instrumentation data
+/// to their functions.
+class InstrProfCorrelator {
+public:
+ static llvm::Expected<std::unique_ptr<InstrProfCorrelator>>
+ get(StringRef DebugInfoFilename);
+
+ /// Construct a ProfileData vector used to correlate raw instrumentation data
+ /// to their functions.
+ virtual Error correlateProfileData() = 0;
+
+ static const char *FunctionNameAttributeName;
+ static const char *CFGHashAttributeName;
+ static const char *NumCountersAttributeName;
+
+ enum InstrProfCorrelatorKind { CK_32Bit, CK_64Bit };
+ InstrProfCorrelatorKind getKind() const { return Kind; }
+ virtual ~InstrProfCorrelator() {}
+
+protected:
+ struct Context {
+ static llvm::Expected<std::unique_ptr<Context>>
+ get(std::unique_ptr<MemoryBuffer> Buffer, const object::ObjectFile &Obj);
+ std::unique_ptr<MemoryBuffer> Buffer;
+ /// The address range of the __llvm_prf_cnts section.
+ uint64_t CountersSectionStart;
+ uint64_t CountersSectionEnd;
+ /// True if target and host have different endian orders.
+ bool ShouldSwapBytes;
+ };
+ const std::unique_ptr<InstrProfCorrelator::Context> Ctx;
+
+ InstrProfCorrelator(InstrProfCorrelatorKind K, std::unique_ptr<Context> Ctx)
+ : Ctx(std::move(Ctx)), Kind(K) {}
+
+private:
+ static llvm::Expected<std::unique_ptr<InstrProfCorrelator>>
+ get(std::unique_ptr<MemoryBuffer> Buffer);
+
+ const InstrProfCorrelatorKind Kind;
+};
+
+/// InstrProfCorrelatorImpl - A child of InstrProfCorrelator with a template
+/// pointer type so that the ProfileData vector can be materialized.
+template <class IntPtrT>
+class InstrProfCorrelatorImpl : public InstrProfCorrelator {
+public:
+ InstrProfCorrelatorImpl(std::unique_ptr<InstrProfCorrelator::Context> Ctx);
+ static bool classof(const InstrProfCorrelator *C);
+
+ /// Return a pointer to the underlying ProfileData vector that this class
+ /// constructs.
+ const RawInstrProf::ProfileData<IntPtrT> *getDataPointer() const {
+ return Data.empty() ? nullptr : Data.data();
+ }
+
+ /// Return the number of ProfileData elements.
+ size_t getDataSize() const { return Data.size(); }
+
+ /// Return a pointer to the compressed names string that this class
+ /// constructs.
+ const char *getCompressedNamesPointer() const {
+ return CompressedNames.c_str();
+ }
+
+ /// Return the number of bytes in the compressed names string.
+ size_t getCompressedNamesSize() const { return CompressedNames.size(); }
+
+ static llvm::Expected<std::unique_ptr<InstrProfCorrelatorImpl<IntPtrT>>>
+ get(std::unique_ptr<InstrProfCorrelator::Context> Ctx,
+ const object::ObjectFile &Obj);
+
+protected:
+ std::vector<RawInstrProf::ProfileData<IntPtrT>> Data;
+ std::string CompressedNames;
+
+ Error correlateProfileData() override;
+ virtual void correlateProfileDataImpl() = 0;
+
+ void addProbe(StringRef FunctionName, uint64_t CFGHash, IntPtrT CounterOffset,
+ IntPtrT FunctionPtr, uint32_t NumCounters);
+
+private:
+ InstrProfCorrelatorImpl(InstrProfCorrelatorKind Kind,
+ std::unique_ptr<InstrProfCorrelator::Context> Ctx)
+ : InstrProfCorrelator(Kind, std::move(Ctx)){};
+ std::vector<std::string> Names;
+
+ // Byte-swap the value if necessary.
+ template <class T> T maybeSwap(T Value) const {
+ return Ctx->ShouldSwapBytes ? sys::getSwappedBytes(Value) : Value;
+ }
+};
+
+/// DwarfInstrProfCorrelator - A child of InstrProfCorrelatorImpl that takes
+/// DWARF debug info as input to correlate profiles.
+template <class IntPtrT>
+class DwarfInstrProfCorrelator : public InstrProfCorrelatorImpl<IntPtrT> {
+public:
+ DwarfInstrProfCorrelator(std::unique_ptr<DWARFContext> DICtx,
+ std::unique_ptr<InstrProfCorrelator::Context> Ctx)
+ : InstrProfCorrelatorImpl<IntPtrT>(std::move(Ctx)),
+ DICtx(std::move(DICtx)) {}
+
+private:
+ std::unique_ptr<DWARFContext> DICtx;
+
+ /// Return the address of the object that the provided DIE symbolizes.
+ llvm::Optional<uint64_t> getLocation(const DWARFDie &Die) const;
+
+ /// Returns true if the provided DIE symbolizes an instrumentation probe
+ /// symbol.
+ static bool isDIEOfProbe(const DWARFDie &Die);
+
+ /// Iterate over DWARF DIEs to find those that symbolize instrumentation
+ /// probes and construct the ProfileData vector and CompressedNames string.
+ ///
+ /// Here is some example DWARF for an instrumentation probe we are looking
+ /// for:
+ /// \code
+ /// DW_TAG_subprogram
+ /// DW_AT_low_pc (0x0000000000000000)
+ /// DW_AT_high_pc (0x0000000000000014)
+ /// DW_AT_name ("foo")
+ /// DW_TAG_variable
+ /// DW_AT_name ("__profc_foo")
+ /// DW_AT_location (DW_OP_addr 0x0)
+ /// DW_TAG_LLVM_annotation
+ /// DW_AT_name ("Function Name")
+ /// DW_AT_const_value ("foo")
+ /// DW_TAG_LLVM_annotation
+ /// DW_AT_name ("CFG Hash")
+ /// DW_AT_const_value (12345678)
+ /// DW_TAG_LLVM_annotation
+ /// DW_AT_name ("Num Counters")
+ /// DW_AT_const_value (2)
+ /// NULL
+ /// NULL
+ /// \endcode
+ void correlateProfileDataImpl() override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_PROFILEDATA_INSTRPROFCORRELATOR_H
diff --git a/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProfData.inc b/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProfData.inc
index 008b8dde5820..44719126b596 100644
--- a/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProfData.inc
+++ b/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProfData.inc
@@ -653,15 +653,17 @@ serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
/* Profile version is always of type uint64_t. Reserve the upper 8 bits in the
* version for other variants of profile. We set the lowest bit of the upper 8
- * bits (i.e. bit 56) to 1 to indicate if this is an IR-level instrumentaiton
+ * bits (i.e. bit 56) to 1 to indicate if this is an IR-level instrumentation
* generated profile, and 0 if this is a Clang FE generated profile.
* 1 in bit 57 indicates there are context-sensitive records in the profile.
+ * The 59th bit indicates whether to use debug info to correlate profiles.
*/
#define VARIANT_MASKS_ALL 0xff00000000000000ULL
#define GET_VERSION(V) ((V) & ~VARIANT_MASKS_ALL)
#define VARIANT_MASK_IR_PROF (0x1ULL << 56)
#define VARIANT_MASK_CSIR_PROF (0x1ULL << 57)
#define VARIANT_MASK_INSTR_ENTRY (0x1ULL << 58)
+#define VARIANT_MASK_DBG_CORRELATE (0x1ULL << 59)
#define INSTR_PROF_RAW_VERSION_VAR __llvm_profile_raw_version
#define INSTR_PROF_PROFILE_RUNTIME_VAR __llvm_profile_runtime
#define INSTR_PROF_PROFILE_COUNTER_BIAS_VAR __llvm_profile_counter_bias
diff --git a/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProfReader.h b/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProfReader.h
index b62d4ff044a3..c615e8533178 100644
--- a/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProfReader.h
+++ b/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProfReader.h
@@ -18,6 +18,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/ProfileSummary.h"
#include "llvm/ProfileData/InstrProf.h"
+#include "llvm/ProfileData/InstrProfCorrelator.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/LineIterator.h"
@@ -96,6 +97,9 @@ public:
virtual bool instrEntryBBEnabled() const = 0;
+ /// Return true if we must provide debug info to create PGO profiles.
+ virtual bool useDebugInfoCorrelate() const { return false; }
+
/// Return the PGO symtab. There are three different readers:
/// Raw, Text, and Indexed profile readers. The first two types
/// of readers are used only by llvm-profdata tool, while the indexed
@@ -150,10 +154,12 @@ public:
/// Factory method to create an appropriately typed reader for the given
/// instrprof file.
- static Expected<std::unique_ptr<InstrProfReader>> create(const Twine &Path);
+ static Expected<std::unique_ptr<InstrProfReader>>
+ create(const Twine &Path, const InstrProfCorrelator *Correlator = nullptr);
static Expected<std::unique_ptr<InstrProfReader>>
- create(std::unique_ptr<MemoryBuffer> Buffer);
+ create(std::unique_ptr<MemoryBuffer> Buffer,
+ const InstrProfCorrelator *Correlator = nullptr);
};
/// Reader for the simple text based instrprof format.
@@ -215,6 +221,9 @@ class RawInstrProfReader : public InstrProfReader {
private:
/// The profile data file contents.
std::unique_ptr<MemoryBuffer> DataBuffer;
+ /// If available, this hold the ProfileData array used to correlate raw
+ /// instrumentation data to their functions.
+ const InstrProfCorrelatorImpl<IntPtrT> *Correlator;
bool ShouldSwapBytes;
// The value of the version field of the raw profile data header. The lower 56
// bits specifies the format version and the most significant 8 bits specify
@@ -226,7 +235,7 @@ private:
const RawInstrProf::ProfileData<IntPtrT> *DataEnd;
const uint64_t *CountersStart;
const char *NamesStart;
- uint64_t NamesSize;
+ const char *NamesEnd;
// After value profile is all read, this pointer points to
// the header of next profile data (if exists)
const uint8_t *ValueDataStart;
@@ -237,8 +246,11 @@ private:
const uint8_t *BinaryIdsStart;
public:
- RawInstrProfReader(std::unique_ptr<MemoryBuffer> DataBuffer)
- : DataBuffer(std::move(DataBuffer)) {}
+ RawInstrProfReader(std::unique_ptr<MemoryBuffer> DataBuffer,
+ const InstrProfCorrelator *Correlator)
+ : DataBuffer(std::move(DataBuffer)),
+ Correlator(dyn_cast_or_null<const InstrProfCorrelatorImpl<IntPtrT>>(
+ Correlator)) {}
RawInstrProfReader(const RawInstrProfReader &) = delete;
RawInstrProfReader &operator=(const RawInstrProfReader &) = delete;
@@ -259,6 +271,10 @@ public:
return (Version & VARIANT_MASK_INSTR_ENTRY) != 0;
}
+ bool useDebugInfoCorrelate() const override {
+ return (Version & VARIANT_MASK_DBG_CORRELATE) != 0;
+ }
+
InstrProfSymtab &getSymtab() override {
assert(Symtab.get());
return *Symtab.get();
diff --git a/contrib/llvm-project/llvm/include/llvm/ProfileData/SampleProf.h b/contrib/llvm-project/llvm/include/llvm/ProfileData/SampleProf.h
index 7ac9eccf8ac2..dc6522f4ec4c 100644
--- a/contrib/llvm-project/llvm/include/llvm/ProfileData/SampleProf.h
+++ b/contrib/llvm-project/llvm/include/llvm/ProfileData/SampleProf.h
@@ -206,7 +206,8 @@ enum class SecProfSummaryFlags : uint32_t {
enum class SecFuncMetadataFlags : uint32_t {
SecFlagInvalid = 0,
SecFlagIsProbeBased = (1 << 0),
- SecFlagHasAttribute = (1 << 1)
+ SecFlagHasAttribute = (1 << 1),
+ SecFlagIsCSNested = (1 << 2),
};
enum class SecFuncOffsetFlags : uint32_t {
@@ -591,11 +592,11 @@ public:
: hash_value(getName());
}
- /// Set the name of the function.
+ /// Set the name of the function and clear the current context.
void setName(StringRef FunctionName) {
- assert(FullContext.empty() &&
- "setName should only be called for non-CS profile");
Name = FunctionName;
+ FullContext = SampleContextFrames();
+ State = UnknownContext;
}
void setContext(SampleContextFrames Context,
@@ -745,6 +746,16 @@ public:
}
}
+ // Set current context and all callee contexts to be synthetic.
+ void SetContextSynthetic() {
+ Context.setState(SyntheticContext);
+ for (auto &I : CallsiteSamples) {
+ for (auto &CS : I.second) {
+ CS.second.SetContextSynthetic();
+ }
+ }
+ }
+
/// Return the number of samples collected at the given location.
/// Each location is specified by \p LineOffset and \p Discriminator.
/// If the location is not found in profile, return error.
@@ -816,7 +827,7 @@ public:
/// Return the sample count of the first instruction of the function.
/// The function can be either a standalone symbol or an inlined function.
uint64_t getEntrySamples() const {
- if (FunctionSamples::ProfileIsCS && getHeadSamples()) {
+ if (FunctionSamples::ProfileIsCSFlat && getHeadSamples()) {
// For CS profile, if we already have more accurate head samples
// counted by branch sample from caller, use them as entry samples.
return getHeadSamples();
@@ -1008,7 +1019,13 @@ public:
/// instruction. This is wrapper of two scenarios, the probe-based profile and
/// regular profile, to hide implementation details from the sample loader and
/// the context tracker.
- static LineLocation getCallSiteIdentifier(const DILocation *DIL);
+ static LineLocation getCallSiteIdentifier(const DILocation *DIL,
+ bool ProfileIsFS = false);
+
+ /// Returns a unique hash code for a combination of a callsite location and
+ /// the callee function name.
+ static uint64_t getCallSiteHash(StringRef CalleeName,
+ const LineLocation &Callsite);
/// Get the FunctionSamples of the inline instance where DIL originates
/// from.
@@ -1027,7 +1044,9 @@ public:
static bool ProfileIsProbeBased;
- static bool ProfileIsCS;
+ static bool ProfileIsCSFlat;
+
+ static bool ProfileIsCSNested;
SampleContext &getContext() const { return Context; }
@@ -1161,6 +1180,40 @@ private:
SampleProfileMap &ProfileMap;
};
+// CSProfileConverter converts a full context-sensitive flat sample profile into
+// a nested context-sensitive sample profile.
+class CSProfileConverter {
+public:
+ CSProfileConverter(SampleProfileMap &Profiles);
+ void convertProfiles();
+ struct FrameNode {
+ FrameNode(StringRef FName = StringRef(),
+ FunctionSamples *FSamples = nullptr,
+ LineLocation CallLoc = {0, 0})
+ : FuncName(FName), FuncSamples(FSamples), CallSiteLoc(CallLoc){};
+
+ // Map line+discriminator location to child frame
+ std::map<uint64_t, FrameNode> AllChildFrames;
+ // Function name for current frame
+ StringRef FuncName;
+ // Function Samples for current frame
+ FunctionSamples *FuncSamples;
+ // Callsite location in parent context
+ LineLocation CallSiteLoc;
+
+ FrameNode *getOrCreateChildFrame(const LineLocation &CallSite,
+ StringRef CalleeName);
+ };
+
+private:
+ // Nest all children profiles into the profile of Node.
+ void convertProfiles(FrameNode &Node);
+ FrameNode *getOrCreateContextPath(const SampleContext &Context);
+
+ SampleProfileMap &ProfileMap;
+ FrameNode RootFrame;
+};
+
/// ProfileSymbolList records the list of function symbols shown up
/// in the binary used to generate the profile. It is useful to
/// to discriminate a function being so cold as not to shown up
diff --git a/contrib/llvm-project/llvm/include/llvm/ProfileData/SampleProfReader.h b/contrib/llvm-project/llvm/include/llvm/ProfileData/SampleProfReader.h
index e6d31f1b9098..a2caca246d93 100644
--- a/contrib/llvm-project/llvm/include/llvm/ProfileData/SampleProfReader.h
+++ b/contrib/llvm-project/llvm/include/llvm/ProfileData/SampleProfReader.h
@@ -473,8 +473,11 @@ public:
/// Whether input profile is based on pseudo probes.
bool profileIsProbeBased() const { return ProfileIsProbeBased; }
- /// Whether input profile is fully context-sensitive
- bool profileIsCS() const { return ProfileIsCS; }
+ /// Whether input profile is fully context-sensitive and flat.
+ bool profileIsCSFlat() const { return ProfileIsCSFlat; }
+
+ /// Whether input profile is fully context-sensitive and nested.
+ bool profileIsCSNested() const { return ProfileIsCSNested; }
virtual std::unique_ptr<ProfileSymbolList> getProfileSymbolList() {
return nullptr;
@@ -533,8 +536,11 @@ protected:
/// \brief Whether samples are collected based on pseudo probes.
bool ProfileIsProbeBased = false;
- /// Whether function profiles are context-sensitive.
- bool ProfileIsCS = false;
+ /// Whether function profiles are context-sensitive flat profiles.
+ bool ProfileIsCSFlat = false;
+
+ /// Whether function profiles are context-sensitive nested profiles.
+ bool ProfileIsCSNested = false;
/// Number of context-sensitive profiles.
uint32_t CSProfileCount = 0;
@@ -698,6 +704,8 @@ protected:
std::error_code readSecHdrTable();
std::error_code readFuncMetadata(bool ProfileHasAttribute);
+ std::error_code readFuncMetadata(bool ProfileHasAttribute,
+ FunctionSamples *FProfile);
std::error_code readFuncOffsetTable();
std::error_code readFuncProfiles();
std::error_code readMD5NameTable();
diff --git a/contrib/llvm-project/llvm/include/llvm/ProfileData/SampleProfWriter.h b/contrib/llvm-project/llvm/include/llvm/ProfileData/SampleProfWriter.h
index 773beac24ebc..42decd255203 100644
--- a/contrib/llvm-project/llvm/include/llvm/ProfileData/SampleProfWriter.h
+++ b/contrib/llvm-project/llvm/include/llvm/ProfileData/SampleProfWriter.h
@@ -269,6 +269,7 @@ protected:
std::error_code writeCSNameTableSection();
std::error_code writeFuncMetadata(const SampleProfileMap &Profiles);
+ std::error_code writeFuncMetadata(const FunctionSamples &Profile);
// Functions to write various kinds of sections.
std::error_code writeNameTableSection(const SampleProfileMap &ProfileMap);
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/ARMEHABI.h b/contrib/llvm-project/llvm/include/llvm/Support/ARMEHABI.h
index 3fbb56d65eb8..1a7778fe4a1c 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/ARMEHABI.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/ARMEHABI.h
@@ -71,6 +71,10 @@ namespace EHABI {
// Purpose: finish
UNWIND_OPCODE_FINISH = 0xb0,
+ // Format: 10110100
+ // Purpose: Pop Return Address Authetication Code
+ UNWIND_OPCODE_POP_RA_AUTH_CODE = 0xb4,
+
// Format: 10110001 0000xxxx
// Purpose: pop r[3:0]
// Constraint: x != 0
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/Caching.h b/contrib/llvm-project/llvm/include/llvm/Support/Caching.h
index 1e5fea17f708..5c30a822ef38 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/Caching.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/Caching.h
@@ -27,8 +27,11 @@ class MemoryBuffer;
/// that can be done by deriving from this class and overriding the destructor.
class CachedFileStream {
public:
- CachedFileStream(std::unique_ptr<raw_pwrite_stream> OS) : OS(std::move(OS)) {}
+ CachedFileStream(std::unique_ptr<raw_pwrite_stream> OS,
+ std::string OSPath = "")
+ : OS(std::move(OS)), ObjectPathName(OSPath) {}
std::unique_ptr<raw_pwrite_stream> OS;
+ std::string ObjectPathName;
virtual ~CachedFileStream() = default;
};
@@ -63,9 +66,10 @@ using AddBufferFn =
/// the cache directory if it does not already exist. The cache name appears in
/// error messages for errors during caching. The temporary file prefix is used
/// in the temporary file naming scheme used when writing files atomically.
-Expected<FileCache> localCache(Twine CacheNameRef, Twine TempFilePrefixRef,
- Twine CacheDirectoryPathRef,
- AddBufferFn AddBuffer);
+Expected<FileCache> localCache(
+ Twine CacheNameRef, Twine TempFilePrefixRef, Twine CacheDirectoryPathRef,
+ AddBufferFn AddBuffer = [](size_t Task, std::unique_ptr<MemoryBuffer> MB) {
+ });
} // namespace llvm
#endif
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/Chrono.h b/contrib/llvm-project/llvm/include/llvm/Support/Chrono.h
index f478549a7e4e..629a37a90aae 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/Chrono.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/Chrono.h
@@ -22,13 +22,13 @@ class raw_ostream;
namespace sys {
/// A time point on the system clock. This is provided for two reasons:
-/// - to insulate us agains subtle differences in behavoir to differences in
-/// system clock precision (which is implementation-defined and differs between
-/// platforms).
+/// - to insulate us against subtle differences in behavior to differences in
+/// system clock precision (which is implementation-defined and differs
+/// between platforms).
/// - to shorten the type name
-/// The default precision is nanoseconds. If need a specific precision specify
-/// it explicitly. If unsure, use the default. If you need a time point on a
-/// clock other than the system_clock, use std::chrono directly.
+/// The default precision is nanoseconds. If you need a specific precision
+/// specify it explicitly. If unsure, use the default. If you need a time point
+/// on a clock other than the system_clock, use std::chrono directly.
template <typename D = std::chrono::nanoseconds>
using TimePoint = std::chrono::time_point<std::chrono::system_clock, D>;
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/Compiler.h b/contrib/llvm-project/llvm/include/llvm/Support/Compiler.h
index c5318137ed3d..b31ba6bc7fc2 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/Compiler.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/Compiler.h
@@ -126,7 +126,11 @@
#if __has_attribute(visibility) && !defined(__MINGW32__) && \
!defined(__CYGWIN__) && !defined(_WIN32)
#define LLVM_LIBRARY_VISIBILITY __attribute__ ((visibility("hidden")))
-#define LLVM_EXTERNAL_VISIBILITY __attribute__ ((visibility("default")))
+#if defined(LLVM_BUILD_LLVM_DYLIB) || defined(LLVM_BUILD_SHARED_LIBS)
+#define LLVM_EXTERNAL_VISIBILITY __attribute__((visibility("default")))
+#else
+#define LLVM_EXTERNAL_VISIBILITY
+#endif
#else
#define LLVM_LIBRARY_VISIBILITY
#define LLVM_EXTERNAL_VISIBILITY
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/GraphWriter.h b/contrib/llvm-project/llvm/include/llvm/Support/GraphWriter.h
index 11a31bf40160..1c0f5f702c6d 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/GraphWriter.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/GraphWriter.h
@@ -265,10 +265,9 @@ public:
<< DOT::EscapeString(DTraits.getEdgeDestLabel(Node, i));
}
- if (RenderUsingHTML)
- O << "<td colspan=\"1\">... truncated</td>";
- else if (i != e)
- O << "|<d64>truncated...}";
+ if (i != e)
+ O << "|<d64>truncated...";
+ O << "}";
}
if (RenderUsingHTML)
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/RISCVISAInfo.h b/contrib/llvm-project/llvm/include/llvm/Support/RISCVISAInfo.h
index 7110de601123..1ba4d449b709 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/RISCVISAInfo.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/RISCVISAInfo.h
@@ -81,6 +81,9 @@ private:
void addExtension(StringRef ExtName, unsigned MajorVersion,
unsigned MinorVersion);
+ Error checkDependency();
+
+ void updateImplication();
void updateFLen();
};
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/ScopedPrinter.h b/contrib/llvm-project/llvm/include/llvm/Support/ScopedPrinter.h
index 0dfe1245f7d6..865337e3cc7f 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/ScopedPrinter.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/ScopedPrinter.h
@@ -16,6 +16,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Endian.h"
+#include "llvm/Support/JSON.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
@@ -57,19 +58,65 @@ struct HexNumber {
uint64_t Value;
};
+struct FlagEntry {
+ FlagEntry(StringRef Name, char Value)
+ : Name(Name), Value(static_cast<unsigned char>(Value)) {}
+ FlagEntry(StringRef Name, signed char Value)
+ : Name(Name), Value(static_cast<unsigned char>(Value)) {}
+ FlagEntry(StringRef Name, signed short Value)
+ : Name(Name), Value(static_cast<unsigned short>(Value)) {}
+ FlagEntry(StringRef Name, signed int Value)
+ : Name(Name), Value(static_cast<unsigned int>(Value)) {}
+ FlagEntry(StringRef Name, signed long Value)
+ : Name(Name), Value(static_cast<unsigned long>(Value)) {}
+ FlagEntry(StringRef Name, signed long long Value)
+ : Name(Name), Value(static_cast<unsigned long long>(Value)) {}
+ FlagEntry(StringRef Name, unsigned char Value) : Name(Name), Value(Value) {}
+ FlagEntry(StringRef Name, unsigned short Value) : Name(Name), Value(Value) {}
+ FlagEntry(StringRef Name, unsigned int Value) : Name(Name), Value(Value) {}
+ FlagEntry(StringRef Name, unsigned long Value) : Name(Name), Value(Value) {}
+ FlagEntry(StringRef Name, unsigned long long Value)
+ : Name(Name), Value(Value) {}
+ StringRef Name;
+ uint64_t Value;
+};
+
raw_ostream &operator<<(raw_ostream &OS, const HexNumber &Value);
std::string to_hexString(uint64_t Value, bool UpperCase = true);
template <class T> std::string to_string(const T &Value) {
std::string number;
- llvm::raw_string_ostream stream(number);
+ raw_string_ostream stream(number);
stream << Value;
return stream.str();
}
+template <typename T, typename TEnum>
+std::string enumToString(T Value, ArrayRef<EnumEntry<TEnum>> EnumValues) {
+ for (const EnumEntry<TEnum> &EnumItem : EnumValues)
+ if (EnumItem.Value == Value)
+ return std::string(EnumItem.AltName);
+ return to_hexString(Value, false);
+}
+
class ScopedPrinter {
public:
- ScopedPrinter(raw_ostream &OS) : OS(OS), IndentLevel(0) {}
+ enum class ScopedPrinterKind {
+ Base,
+ JSON,
+ };
+
+ ScopedPrinter(raw_ostream &OS,
+ ScopedPrinterKind Kind = ScopedPrinterKind::Base)
+ : OS(OS), IndentLevel(0), Kind(Kind) {}
+
+ ScopedPrinterKind getKind() const { return Kind; }
+
+ static bool classof(const ScopedPrinter *SP) {
+ return SP->getKind() == ScopedPrinterKind::Base;
+ }
+
+ virtual ~ScopedPrinter() {}
void flush() { OS.flush(); }
@@ -106,20 +153,17 @@ public:
}
}
- if (Found) {
- startLine() << Label << ": " << Name << " (" << hex(Value) << ")\n";
- } else {
- startLine() << Label << ": " << hex(Value) << "\n";
- }
+ if (Found)
+ printHex(Label, Name, Value);
+ else
+ printHex(Label, Value);
}
template <typename T, typename TFlag>
void printFlags(StringRef Label, T Value, ArrayRef<EnumEntry<TFlag>> Flags,
TFlag EnumMask1 = {}, TFlag EnumMask2 = {},
TFlag EnumMask3 = {}) {
- typedef EnumEntry<TFlag> FlagEntry;
- typedef SmallVector<FlagEntry, 10> FlagVector;
- FlagVector SetFlags;
+ SmallVector<FlagEntry, 10> SetFlags;
for (const auto &Flag : Flags) {
if (Flag.Value == 0)
@@ -135,69 +179,69 @@ public:
bool IsEnum = (Flag.Value & EnumMask) != 0;
if ((!IsEnum && (Value & Flag.Value) == Flag.Value) ||
(IsEnum && (Value & EnumMask) == Flag.Value)) {
- SetFlags.push_back(Flag);
+ SetFlags.emplace_back(Flag.Name, Flag.Value);
}
}
- llvm::sort(SetFlags, &flagName<TFlag>);
-
- startLine() << Label << " [ (" << hex(Value) << ")\n";
- for (const auto &Flag : SetFlags) {
- startLine() << " " << Flag.Name << " (" << hex(Flag.Value) << ")\n";
- }
- startLine() << "]\n";
+ llvm::sort(SetFlags, &flagName);
+ printFlagsImpl(Label, hex(Value), SetFlags);
}
template <typename T> void printFlags(StringRef Label, T Value) {
- startLine() << Label << " [ (" << hex(Value) << ")\n";
+ SmallVector<HexNumber, 10> SetFlags;
uint64_t Flag = 1;
uint64_t Curr = Value;
while (Curr > 0) {
if (Curr & 1)
- startLine() << " " << hex(Flag) << "\n";
+ SetFlags.emplace_back(Flag);
Curr >>= 1;
Flag <<= 1;
}
- startLine() << "]\n";
+ printFlagsImpl(Label, hex(Value), SetFlags);
}
- void printNumber(StringRef Label, uint64_t Value) {
+ virtual void printNumber(StringRef Label, uint64_t Value) {
startLine() << Label << ": " << Value << "\n";
}
- void printNumber(StringRef Label, uint32_t Value) {
+ virtual void printNumber(StringRef Label, uint32_t Value) {
startLine() << Label << ": " << Value << "\n";
}
- void printNumber(StringRef Label, uint16_t Value) {
+ virtual void printNumber(StringRef Label, uint16_t Value) {
startLine() << Label << ": " << Value << "\n";
}
- void printNumber(StringRef Label, uint8_t Value) {
+ virtual void printNumber(StringRef Label, uint8_t Value) {
startLine() << Label << ": " << unsigned(Value) << "\n";
}
- void printNumber(StringRef Label, int64_t Value) {
+ virtual void printNumber(StringRef Label, int64_t Value) {
startLine() << Label << ": " << Value << "\n";
}
- void printNumber(StringRef Label, int32_t Value) {
+ virtual void printNumber(StringRef Label, int32_t Value) {
startLine() << Label << ": " << Value << "\n";
}
- void printNumber(StringRef Label, int16_t Value) {
+ virtual void printNumber(StringRef Label, int16_t Value) {
startLine() << Label << ": " << Value << "\n";
}
- void printNumber(StringRef Label, int8_t Value) {
+ virtual void printNumber(StringRef Label, int8_t Value) {
startLine() << Label << ": " << int(Value) << "\n";
}
- void printNumber(StringRef Label, const APSInt &Value) {
+ virtual void printNumber(StringRef Label, const APSInt &Value) {
startLine() << Label << ": " << Value << "\n";
}
- void printBoolean(StringRef Label, bool Value) {
+ template <typename T>
+ void printNumber(StringRef Label, StringRef Str, T Value) {
+ printNumberImpl(Label, Str, to_string(Value));
+ }
+
+ virtual void printBoolean(StringRef Label, bool Value) {
startLine() << Label << ": " << (Value ? "Yes" : "No") << '\n';
}
@@ -207,12 +251,62 @@ public:
getOStream() << "\n";
}
- template <typename T> void printList(StringRef Label, const T &List) {
- startLine() << Label << ": [";
- ListSeparator LS;
+ template <typename T>
+ void printList(StringRef Label, const ArrayRef<T> List) {
+ SmallVector<std::string, 10> StringList;
for (const auto &Item : List)
- OS << LS << Item;
- OS << "]\n";
+ StringList.emplace_back(to_string(Item));
+ printList(Label, StringList);
+ }
+
+ virtual void printList(StringRef Label, const ArrayRef<bool> List) {
+ printListImpl(Label, List);
+ }
+
+ virtual void printList(StringRef Label, const ArrayRef<std::string> List) {
+ printListImpl(Label, List);
+ }
+
+ virtual void printList(StringRef Label, const ArrayRef<uint64_t> List) {
+ printListImpl(Label, List);
+ }
+
+ virtual void printList(StringRef Label, const ArrayRef<uint32_t> List) {
+ printListImpl(Label, List);
+ }
+
+ virtual void printList(StringRef Label, const ArrayRef<uint16_t> List) {
+ printListImpl(Label, List);
+ }
+
+ virtual void printList(StringRef Label, const ArrayRef<uint8_t> List) {
+ SmallVector<unsigned> NumberList;
+ for (const uint8_t &Item : List)
+ NumberList.emplace_back(Item);
+ printListImpl(Label, NumberList);
+ }
+
+ virtual void printList(StringRef Label, const ArrayRef<int64_t> List) {
+ printListImpl(Label, List);
+ }
+
+ virtual void printList(StringRef Label, const ArrayRef<int32_t> List) {
+ printListImpl(Label, List);
+ }
+
+ virtual void printList(StringRef Label, const ArrayRef<int16_t> List) {
+ printListImpl(Label, List);
+ }
+
+ virtual void printList(StringRef Label, const ArrayRef<int8_t> List) {
+ SmallVector<int> NumberList;
+ for (const int8_t &Item : List)
+ NumberList.emplace_back(Item);
+ printListImpl(Label, NumberList);
+ }
+
+ virtual void printList(StringRef Label, const ArrayRef<APSInt> List) {
+ printListImpl(Label, List);
}
template <typename T, typename U>
@@ -227,45 +321,31 @@ public:
}
template <typename T> void printHexList(StringRef Label, const T &List) {
- startLine() << Label << ": [";
- ListSeparator LS;
+ SmallVector<HexNumber> HexList;
for (const auto &Item : List)
- OS << LS << hex(Item);
- OS << "]\n";
+ HexList.emplace_back(Item);
+ printHexListImpl(Label, HexList);
}
template <typename T> void printHex(StringRef Label, T Value) {
- startLine() << Label << ": " << hex(Value) << "\n";
+ printHexImpl(Label, hex(Value));
}
template <typename T> void printHex(StringRef Label, StringRef Str, T Value) {
- startLine() << Label << ": " << Str << " (" << hex(Value) << ")\n";
+ printHexImpl(Label, Str, hex(Value));
}
template <typename T>
void printSymbolOffset(StringRef Label, StringRef Symbol, T Value) {
- startLine() << Label << ": " << Symbol << '+' << hex(Value) << '\n';
+ printSymbolOffsetImpl(Label, Symbol, hex(Value));
}
- void printString(StringRef Value) { startLine() << Value << "\n"; }
+ virtual void printString(StringRef Value) { startLine() << Value << "\n"; }
- void printString(StringRef Label, StringRef Value) {
+ virtual void printString(StringRef Label, StringRef Value) {
startLine() << Label << ": " << Value << "\n";
}
- void printString(StringRef Label, const std::string &Value) {
- printString(Label, StringRef(Value));
- }
-
- void printString(StringRef Label, const char* Value) {
- printString(Label, StringRef(Value));
- }
-
- template <typename T>
- void printNumber(StringRef Label, StringRef Str, T Value) {
- startLine() << Label << ": " << Str << " (" << Value << ")\n";
- }
-
void printBinary(StringRef Label, StringRef Str, ArrayRef<uint8_t> Value) {
printBinaryImpl(Label, Str, Value, false);
}
@@ -308,15 +388,27 @@ public:
}
template <typename T> void printObject(StringRef Label, const T &Value) {
- startLine() << Label << ": " << Value << "\n";
+ printString(Label, to_string(Value));
}
- raw_ostream &startLine() {
+ virtual void objectBegin() { scopedBegin('{'); }
+
+ virtual void objectBegin(StringRef Label) { scopedBegin(Label, '{'); }
+
+ virtual void objectEnd() { scopedEnd('}'); }
+
+ virtual void arrayBegin() { scopedBegin('['); }
+
+ virtual void arrayBegin(StringRef Label) { scopedBegin(Label, '['); }
+
+ virtual void arrayEnd() { scopedEnd(']'); }
+
+ virtual raw_ostream &startLine() {
printIndent();
return OS;
}
- raw_ostream &getOStream() { return OS; }
+ virtual raw_ostream &getOStream() { return OS; }
private:
template <typename T> void printVersionInternal(T Value) {
@@ -329,17 +421,87 @@ private:
printVersionInternal(Value2, Args...);
}
- template <typename T>
- static bool flagName(const EnumEntry<T> &lhs, const EnumEntry<T> &rhs) {
- return lhs.Name < rhs.Name;
+ static bool flagName(const FlagEntry &LHS, const FlagEntry &RHS) {
+ return LHS.Name < RHS.Name;
}
- void printBinaryImpl(StringRef Label, StringRef Str, ArrayRef<uint8_t> Value,
- bool Block, uint32_t StartOffset = 0);
+ virtual void printBinaryImpl(StringRef Label, StringRef Str,
+ ArrayRef<uint8_t> Value, bool Block,
+ uint32_t StartOffset = 0);
+
+ virtual void printFlagsImpl(StringRef Label, HexNumber Value,
+ ArrayRef<FlagEntry> Flags) {
+ startLine() << Label << " [ (" << Value << ")\n";
+ for (const auto &Flag : Flags)
+ startLine() << " " << Flag.Name << " (" << hex(Flag.Value) << ")\n";
+ startLine() << "]\n";
+ }
+
+ virtual void printFlagsImpl(StringRef Label, HexNumber Value,
+ ArrayRef<HexNumber> Flags) {
+ startLine() << Label << " [ (" << Value << ")\n";
+ for (const auto &Flag : Flags)
+ startLine() << " " << Flag << '\n';
+ startLine() << "]\n";
+ }
+
+ template <typename T> void printListImpl(StringRef Label, const T List) {
+ startLine() << Label << ": [";
+ ListSeparator LS;
+ for (const auto &Item : List)
+ OS << LS << Item;
+ OS << "]\n";
+ }
+
+ virtual void printHexListImpl(StringRef Label,
+ const ArrayRef<HexNumber> List) {
+ startLine() << Label << ": [";
+ ListSeparator LS;
+ for (const auto &Item : List)
+ OS << LS << hex(Item);
+ OS << "]\n";
+ }
+
+ virtual void printHexImpl(StringRef Label, HexNumber Value) {
+ startLine() << Label << ": " << Value << "\n";
+ }
+
+ virtual void printHexImpl(StringRef Label, StringRef Str, HexNumber Value) {
+ startLine() << Label << ": " << Str << " (" << Value << ")\n";
+ }
+
+ virtual void printSymbolOffsetImpl(StringRef Label, StringRef Symbol,
+ HexNumber Value) {
+ startLine() << Label << ": " << Symbol << '+' << Value << '\n';
+ }
+
+ virtual void printNumberImpl(StringRef Label, StringRef Str,
+ StringRef Value) {
+ startLine() << Label << ": " << Str << " (" << Value << ")\n";
+ }
+
+ void scopedBegin(char Symbol) {
+ startLine() << Symbol << '\n';
+ indent();
+ }
+
+ void scopedBegin(StringRef Label, char Symbol) {
+ startLine() << Label;
+ if (!Label.empty())
+ OS << ' ';
+ OS << Symbol << '\n';
+ indent();
+ }
+
+ void scopedEnd(char Symbol) {
+ unindent();
+ startLine() << Symbol << '\n';
+ }
raw_ostream &OS;
int IndentLevel;
StringRef Prefix;
+ ScopedPrinterKind Kind;
};
template <>
@@ -349,31 +511,330 @@ ScopedPrinter::printHex<support::ulittle16_t>(StringRef Label,
startLine() << Label << ": " << hex(Value) << "\n";
}
-template<char Open, char Close>
-struct DelimitedScope {
- explicit DelimitedScope(ScopedPrinter &W) : W(W) {
- W.startLine() << Open << '\n';
- W.indent();
+struct DelimitedScope;
+
+class JSONScopedPrinter : public ScopedPrinter {
+private:
+ enum class Scope {
+ Array,
+ Object,
+ };
+
+ enum class ScopeKind {
+ NoAttribute,
+ Attribute,
+ NestedAttribute,
+ };
+
+ struct ScopeContext {
+ Scope Context;
+ ScopeKind Kind;
+ ScopeContext(Scope Context, ScopeKind Kind = ScopeKind::NoAttribute)
+ : Context(Context), Kind(Kind) {}
+ };
+
+ SmallVector<ScopeContext, 8> ScopeHistory;
+ json::OStream JOS;
+ std::unique_ptr<DelimitedScope> OuterScope;
+
+public:
+ JSONScopedPrinter(raw_ostream &OS, bool PrettyPrint = false,
+ std::unique_ptr<DelimitedScope> &&OuterScope =
+ std::unique_ptr<DelimitedScope>{});
+
+ static bool classof(const ScopedPrinter *SP) {
+ return SP->getKind() == ScopedPrinter::ScopedPrinterKind::JSON;
+ }
+
+ void printNumber(StringRef Label, uint64_t Value) override {
+ JOS.attribute(Label, Value);
+ }
+
+ void printNumber(StringRef Label, uint32_t Value) override {
+ JOS.attribute(Label, Value);
+ }
+
+ void printNumber(StringRef Label, uint16_t Value) override {
+ JOS.attribute(Label, Value);
+ }
+
+ void printNumber(StringRef Label, uint8_t Value) override {
+ JOS.attribute(Label, Value);
+ }
+
+ void printNumber(StringRef Label, int64_t Value) override {
+ JOS.attribute(Label, Value);
+ }
+
+ void printNumber(StringRef Label, int32_t Value) override {
+ JOS.attribute(Label, Value);
+ }
+
+ void printNumber(StringRef Label, int16_t Value) override {
+ JOS.attribute(Label, Value);
+ }
+
+ void printNumber(StringRef Label, int8_t Value) override {
+ JOS.attribute(Label, Value);
+ }
+
+ void printNumber(StringRef Label, const APSInt &Value) override {
+ JOS.attributeBegin(Label);
+ printAPSInt(Value);
+ JOS.attributeEnd();
+ }
+
+ void printBoolean(StringRef Label, bool Value) override {
+ JOS.attribute(Label, Value);
+ }
+
+ void printList(StringRef Label, const ArrayRef<bool> List) override {
+ printListImpl(Label, List);
+ }
+
+ void printList(StringRef Label, const ArrayRef<std::string> List) override {
+ printListImpl(Label, List);
+ }
+
+ void printList(StringRef Label, const ArrayRef<uint64_t> List) override {
+ printListImpl(Label, List);
+ }
+
+ void printList(StringRef Label, const ArrayRef<uint32_t> List) override {
+ printListImpl(Label, List);
+ }
+
+ void printList(StringRef Label, const ArrayRef<uint16_t> List) override {
+ printListImpl(Label, List);
+ }
+
+ void printList(StringRef Label, const ArrayRef<uint8_t> List) override {
+ printListImpl(Label, List);
+ }
+
+ void printList(StringRef Label, const ArrayRef<int64_t> List) override {
+ printListImpl(Label, List);
+ }
+
+ void printList(StringRef Label, const ArrayRef<int32_t> List) override {
+ printListImpl(Label, List);
}
- DelimitedScope(ScopedPrinter &W, StringRef N) : W(W) {
- W.startLine() << N;
- if (!N.empty())
- W.getOStream() << ' ';
- W.getOStream() << Open << '\n';
- W.indent();
+ void printList(StringRef Label, const ArrayRef<int16_t> List) override {
+ printListImpl(Label, List);
}
- ~DelimitedScope() {
- W.unindent();
- W.startLine() << Close << '\n';
+ void printList(StringRef Label, const ArrayRef<int8_t> List) override {
+ printListImpl(Label, List);
}
- ScopedPrinter &W;
+ void printList(StringRef Label, const ArrayRef<APSInt> List) override {
+ JOS.attributeArray(Label, [&]() {
+ for (const APSInt &Item : List) {
+ printAPSInt(Item);
+ }
+ });
+ }
+
+ void printString(StringRef Value) override { JOS.value(Value); }
+
+ void printString(StringRef Label, StringRef Value) override {
+ JOS.attribute(Label, Value);
+ }
+
+ void objectBegin() override {
+ scopedBegin({Scope::Object, ScopeKind::NoAttribute});
+ }
+
+ void objectBegin(StringRef Label) override {
+ scopedBegin(Label, Scope::Object);
+ }
+
+ void objectEnd() override { scopedEnd(); }
+
+ void arrayBegin() override {
+ scopedBegin({Scope::Array, ScopeKind::NoAttribute});
+ }
+
+ void arrayBegin(StringRef Label) override {
+ scopedBegin(Label, Scope::Array);
+ }
+
+ void arrayEnd() override { scopedEnd(); }
+
+private:
+ // Output HexNumbers as decimals so that they're easier to parse.
+ uint64_t hexNumberToInt(HexNumber Hex) { return Hex.Value; }
+
+ void printAPSInt(const APSInt &Value) {
+ JOS.rawValueBegin() << Value;
+ JOS.rawValueEnd();
+ }
+
+ void printFlagsImpl(StringRef Label, HexNumber Value,
+ ArrayRef<FlagEntry> Flags) override {
+ JOS.attributeObject(Label, [&]() {
+ JOS.attribute("RawFlags", hexNumberToInt(Value));
+ JOS.attributeArray("Flags", [&]() {
+ for (const FlagEntry &Flag : Flags) {
+ JOS.objectBegin();
+ JOS.attribute("Name", Flag.Name);
+ JOS.attribute("Value", Flag.Value);
+ JOS.objectEnd();
+ }
+ });
+ });
+ }
+
+ void printFlagsImpl(StringRef Label, HexNumber Value,
+ ArrayRef<HexNumber> Flags) override {
+ JOS.attributeObject(Label, [&]() {
+ JOS.attribute("RawFlags", hexNumberToInt(Value));
+ JOS.attributeArray("Flags", [&]() {
+ for (const HexNumber &Flag : Flags) {
+ JOS.value(Flag.Value);
+ }
+ });
+ });
+ }
+
+ template <typename T> void printListImpl(StringRef Label, const T &List) {
+ JOS.attributeArray(Label, [&]() {
+ for (const auto &Item : List)
+ JOS.value(Item);
+ });
+ }
+
+ void printHexListImpl(StringRef Label,
+ const ArrayRef<HexNumber> List) override {
+ JOS.attributeArray(Label, [&]() {
+ for (const HexNumber &Item : List) {
+ JOS.value(hexNumberToInt(Item));
+ }
+ });
+ }
+
+ void printHexImpl(StringRef Label, HexNumber Value) override {
+ JOS.attribute(Label, hexNumberToInt(Value));
+ }
+
+ void printHexImpl(StringRef Label, StringRef Str, HexNumber Value) override {
+ JOS.attributeObject(Label, [&]() {
+ JOS.attribute("Value", Str);
+ JOS.attribute("RawValue", hexNumberToInt(Value));
+ });
+ }
+
+ void printSymbolOffsetImpl(StringRef Label, StringRef Symbol,
+ HexNumber Value) override {
+ JOS.attributeObject(Label, [&]() {
+ JOS.attribute("SymName", Symbol);
+ JOS.attribute("Offset", hexNumberToInt(Value));
+ });
+ }
+
+ void printNumberImpl(StringRef Label, StringRef Str,
+ StringRef Value) override {
+ JOS.attributeObject(Label, [&]() {
+ JOS.attribute("Value", Str);
+ JOS.attributeBegin("RawValue");
+ JOS.rawValueBegin() << Value;
+ JOS.rawValueEnd();
+ JOS.attributeEnd();
+ });
+ }
+
+ void printBinaryImpl(StringRef Label, StringRef Str, ArrayRef<uint8_t> Value,
+ bool Block, uint32_t StartOffset = 0) override {
+ JOS.attributeObject(Label, [&]() {
+ if (!Str.empty())
+ JOS.attribute("Value", Str);
+ JOS.attribute("Offset", StartOffset);
+ JOS.attributeArray("Bytes", [&]() {
+ for (uint8_t Val : Value)
+ JOS.value(Val);
+ });
+ });
+ }
+
+ void scopedBegin(ScopeContext ScopeCtx) {
+ if (ScopeCtx.Context == Scope::Object)
+ JOS.objectBegin();
+ else if (ScopeCtx.Context == Scope::Array)
+ JOS.arrayBegin();
+ ScopeHistory.push_back(ScopeCtx);
+ }
+
+ void scopedBegin(StringRef Label, Scope Ctx) {
+ ScopeKind Kind = ScopeKind::Attribute;
+ if (ScopeHistory.empty() || ScopeHistory.back().Context != Scope::Object) {
+ JOS.objectBegin();
+ Kind = ScopeKind::NestedAttribute;
+ }
+ JOS.attributeBegin(Label);
+ scopedBegin({Ctx, Kind});
+ }
+
+ void scopedEnd() {
+ ScopeContext ScopeCtx = ScopeHistory.back();
+ if (ScopeCtx.Context == Scope::Object)
+ JOS.objectEnd();
+ else if (ScopeCtx.Context == Scope::Array)
+ JOS.arrayEnd();
+ if (ScopeCtx.Kind == ScopeKind::Attribute ||
+ ScopeCtx.Kind == ScopeKind::NestedAttribute)
+ JOS.attributeEnd();
+ if (ScopeCtx.Kind == ScopeKind::NestedAttribute)
+ JOS.objectEnd();
+ ScopeHistory.pop_back();
+ }
+};
+
+struct DelimitedScope {
+ DelimitedScope(ScopedPrinter &W) : W(&W) {}
+ DelimitedScope() : W(nullptr) {}
+ virtual ~DelimitedScope(){};
+ virtual void setPrinter(ScopedPrinter &W) = 0;
+ ScopedPrinter *W;
+};
+
+struct DictScope : DelimitedScope {
+ explicit DictScope() : DelimitedScope() {}
+ explicit DictScope(ScopedPrinter &W) : DelimitedScope(W) { W.objectBegin(); }
+
+ DictScope(ScopedPrinter &W, StringRef N) : DelimitedScope(W) {
+ W.objectBegin(N);
+ }
+
+ void setPrinter(ScopedPrinter &W) override {
+ this->W = &W;
+ W.objectBegin();
+ }
+
+ ~DictScope() {
+ if (W)
+ W->objectEnd();
+ }
};
-using DictScope = DelimitedScope<'{', '}'>;
-using ListScope = DelimitedScope<'[', ']'>;
+struct ListScope : DelimitedScope {
+ explicit ListScope() : DelimitedScope() {}
+ explicit ListScope(ScopedPrinter &W) : DelimitedScope(W) { W.arrayBegin(); }
+
+ ListScope(ScopedPrinter &W, StringRef N) : DelimitedScope(W) {
+ W.arrayBegin(N);
+ }
+
+ void setPrinter(ScopedPrinter &W) override {
+ this->W = &W;
+ W.arrayBegin();
+ }
+
+ ~ListScope() {
+ if (W)
+ W->arrayEnd();
+ }
+};
} // namespace llvm
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/SmallVectorMemoryBuffer.h b/contrib/llvm-project/llvm/include/llvm/Support/SmallVectorMemoryBuffer.h
index 9aa4e9aec266..f7f2d4e54e70 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/SmallVectorMemoryBuffer.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/SmallVectorMemoryBuffer.h
@@ -28,23 +28,21 @@ namespace llvm {
/// MemoryBuffer).
class SmallVectorMemoryBuffer : public MemoryBuffer {
public:
- /// Construct an SmallVectorMemoryBuffer from the given SmallVector
- /// r-value.
- ///
- /// FIXME: It'd be nice for this to be a non-templated constructor taking a
- /// SmallVectorImpl here instead of a templated one taking a SmallVector<N>,
- /// but SmallVector's move-construction/assignment currently only take
- /// SmallVectors. If/when that is fixed we can simplify this constructor and
- /// the following one.
- SmallVectorMemoryBuffer(SmallVectorImpl<char> &&SV)
- : SV(std::move(SV)), BufferName("<in-memory object>") {
- init(this->SV.begin(), this->SV.end(), false);
- }
-
- /// Construct a named SmallVectorMemoryBuffer from the given
- /// SmallVector r-value and StringRef.
- SmallVectorMemoryBuffer(SmallVectorImpl<char> &&SV, StringRef Name)
+ /// Construct a SmallVectorMemoryBuffer from the given SmallVector r-value.
+ SmallVectorMemoryBuffer(SmallVectorImpl<char> &&SV,
+ bool RequiresNullTerminator = true)
+ : SmallVectorMemoryBuffer(std::move(SV), "<in-memory object>",
+ RequiresNullTerminator) {}
+
+ /// Construct a named SmallVectorMemoryBuffer from the given SmallVector
+ /// r-value and StringRef.
+ SmallVectorMemoryBuffer(SmallVectorImpl<char> &&SV, StringRef Name,
+ bool RequiresNullTerminator = true)
: SV(std::move(SV)), BufferName(std::string(Name)) {
+ if (RequiresNullTerminator) {
+ this->SV.push_back('\0');
+ this->SV.pop_back();
+ }
init(this->SV.begin(), this->SV.end(), false);
}
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/TargetParser.h b/contrib/llvm-project/llvm/include/llvm/Support/TargetParser.h
index b11467dcce28..01e25a0ea857 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/TargetParser.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/TargetParser.h
@@ -17,8 +17,9 @@
// FIXME: vector is used because that's what clang uses for subtarget feature
// lists, but SmallVector would probably be better
#include "llvm/ADT/Triple.h"
-#include "llvm/Support/ARMTargetParser.h"
#include "llvm/Support/AArch64TargetParser.h"
+#include "llvm/Support/ARMTargetParser.h"
+#include "llvm/Support/RISCVISAInfo.h"
#include <vector>
namespace llvm {
@@ -174,6 +175,7 @@ void fillValidCPUArchList(SmallVectorImpl<StringRef> &Values, bool IsRV64);
void fillValidTuneCPUArchList(SmallVectorImpl<StringRef> &Values, bool IsRV64);
bool getCPUFeaturesExceptStdExt(CPUKind Kind, std::vector<StringRef> &Features);
StringRef resolveTuneCPUAlias(StringRef TuneCPU, bool IsRV64);
+StringRef computeDefaultABIFromArch(const llvm::RISCVISAInfo &ISAInfo);
} // namespace RISCV
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/ThreadPool.h b/contrib/llvm-project/llvm/include/llvm/Support/ThreadPool.h
index 8d30e8e92755..aecff122d3cb 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/ThreadPool.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/ThreadPool.h
@@ -65,7 +65,10 @@ public:
/// It is an error to try to add new tasks while blocking on this call.
void wait();
- unsigned getThreadCount() const { return ThreadCount; }
+ // TODO: misleading legacy name warning!
+ // Returns the maximum number of worker threads in the pool, not the current
+ // number of threads!
+ unsigned getThreadCount() const { return MaxThreadCount; }
/// Returns true if the current thread is a worker thread of this thread pool.
bool isWorkerThread() const;
@@ -108,6 +111,7 @@ private:
/// corresponding future.
auto R = createTaskAndFuture(Task);
+ int requestedThreads;
{
// Lock the queue and push the new task
std::unique_lock<std::mutex> LockGuard(QueueLock);
@@ -115,8 +119,10 @@ private:
// Don't allow enqueueing after disabling the pool
assert(EnableFlag && "Queuing a thread during ThreadPool destruction");
Tasks.push(std::move(R.first));
+ requestedThreads = ActiveThreads + Tasks.size();
}
QueueCondition.notify_one();
+ grow(requestedThreads);
return R.second.share();
#else // LLVM_ENABLE_THREADS Disabled
@@ -130,8 +136,16 @@ private:
#endif
}
+#if LLVM_ENABLE_THREADS
+ // Grow to ensure that we have at least `requested` Threads, but do not go
+ // over MaxThreadCount.
+ void grow(int requested);
+#endif
+
/// Threads in flight
std::vector<llvm::thread> Threads;
+ /// Lock protecting access to the Threads vector.
+ mutable std::mutex ThreadsLock;
/// Tasks waiting for execution in the pool.
std::queue<std::function<void()>> Tasks;
@@ -151,7 +165,10 @@ private:
bool EnableFlag = true;
#endif
- unsigned ThreadCount;
+ const ThreadPoolStrategy Strategy;
+
+ /// Maximum number of threads to potentially grow this pool to.
+ const unsigned MaxThreadCount;
};
}
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/ToolOutputFile.h b/contrib/llvm-project/llvm/include/llvm/Support/ToolOutputFile.h
index ec1d6ae52268..6b7222550b9f 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/ToolOutputFile.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/ToolOutputFile.h
@@ -29,9 +29,10 @@ class ToolOutputFile {
/// raw_fd_ostream is destructed. It installs cleanups in its constructor and
/// uninstalls them in its destructor.
class CleanupInstaller {
+ public:
/// The name of the file.
std::string Filename;
- public:
+
/// The flag which indicates whether we should not delete the file.
bool Keep;
@@ -64,6 +65,8 @@ public:
/// Indicate that the tool's job wrt this output file has been successful and
/// the file should not be deleted.
void keep() { Installer.Keep = true; }
+
+ const std::string &outputFilename() { return Installer.Filename; }
};
} // end llvm namespace
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/VirtualFileSystem.h b/contrib/llvm-project/llvm/include/llvm/Support/VirtualFileSystem.h
index 10d2389ee079..78bcdbf3e932 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/VirtualFileSystem.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/VirtualFileSystem.h
@@ -64,6 +64,8 @@ public:
uint64_t Size, llvm::sys::fs::file_type Type,
llvm::sys::fs::perms Perms);
+ /// Get a copy of a Status with a different size.
+ static Status copyWithNewSize(const Status &In, uint64_t NewSize);
/// Get a copy of a Status with a different name.
static Status copyWithNewName(const Status &In, const Twine &NewName);
static Status copyWithNewName(const llvm::sys::fs::file_status &In,
diff --git a/contrib/llvm-project/llvm/include/llvm/Target/TargetOptions.h b/contrib/llvm-project/llvm/include/llvm/Target/TargetOptions.h
index 912f6d1c153a..c639f326abc9 100644
--- a/contrib/llvm-project/llvm/include/llvm/Target/TargetOptions.h
+++ b/contrib/llvm-project/llvm/include/llvm/Target/TargetOptions.h
@@ -418,6 +418,11 @@ namespace llvm {
/// Machine level options.
MCTargetOptions MCOptions;
+
+ /// Stores the filename/path of the final .o/.obj file, to be written in the
+ /// debug information. This is used for emitting the CodeView S_OBJNAME
+ /// record.
+ std::string ObjectFilenameForDebug;
};
} // End llvm namespace
diff --git a/contrib/llvm-project/llvm/include/llvm/TextAPI/InterfaceFile.h b/contrib/llvm-project/llvm/include/llvm/TextAPI/InterfaceFile.h
index 03a541454e1a..6ef4db2ae158 100644
--- a/contrib/llvm-project/llvm/include/llvm/TextAPI/InterfaceFile.h
+++ b/contrib/llvm-project/llvm/include/llvm/TextAPI/InterfaceFile.h
@@ -381,6 +381,8 @@ public:
return {Symbols.begin(), Symbols.end()};
}
+ size_t symbolsCount() const { return Symbols.size(); }
+
const_filtered_symbol_range exports() const {
std::function<bool(const Symbol *)> fn = [](const Symbol *Symbol) {
return !Symbol->isUndefined();
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/ProfiledCallGraph.h b/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/ProfiledCallGraph.h
index 429fcbd81b45..893654650caa 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/ProfiledCallGraph.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/ProfiledCallGraph.h
@@ -68,7 +68,8 @@ public:
// Constructor for non-CS profile.
ProfiledCallGraph(SampleProfileMap &ProfileMap) {
- assert(!FunctionSamples::ProfileIsCS && "CS profile is not handled here");
+ assert(!FunctionSamples::ProfileIsCSFlat &&
+ "CS flat profile is not handled here");
for (const auto &Samples : ProfileMap) {
addProfiledCalls(Samples.second);
}
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/SampleContextTracker.h b/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/SampleContextTracker.h
index 5d80da407d7e..cf87d028600f 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/SampleContextTracker.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/SampleContextTracker.h
@@ -66,8 +66,6 @@ public:
void dumpTree();
private:
- static uint64_t nodeHash(StringRef ChildName, const LineLocation &Callsite);
-
// Map line+discriminator location to child context
std::map<uint64_t, ContextTrieNode> AllChildContext;
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/FlattenCFG.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/FlattenCFG.h
new file mode 100644
index 000000000000..ff49a4ab7ceb
--- /dev/null
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/FlattenCFG.h
@@ -0,0 +1,25 @@
+//===- FlattenCFG.h -------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// The FlattenCFG pass flattens a function's CFG using the FlattenCFG utility
+// function, iteratively flattening until no further changes are made.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_FLATTENCFG_H
+#define LLVM_TRANSFORMS_SCALAR_FLATTENCFG_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+struct FlattenCFGPass : PassInfoMixin<FlattenCFGPass> {
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+} // namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_FLATTENCFG_H
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/CodeLayout.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/CodeLayout.h
new file mode 100644
index 000000000000..987a5651a8b6
--- /dev/null
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/CodeLayout.h
@@ -0,0 +1,58 @@
+//===- CodeLayout.h - Code layout/placement algorithms ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// Declares methods and data structures for code layout algorithms.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CODELAYOUT_H
+#define LLVM_TRANSFORMS_UTILS_CODELAYOUT_H
+
+#include "llvm/ADT/DenseMap.h"
+
+#include <vector>
+
+namespace llvm {
+
+class MachineBasicBlock;
+
+/// Find a layout of nodes (basic blocks) of a given CFG optimizing jump
+/// locality and thus processor I-cache utilization. This is achieved via
+/// increasing the number of fall-through jumps and co-locating frequently
+/// executed nodes together.
+/// The nodes are assumed to be indexed by integers from [0, |V|) so that the
+/// current order is the identity permutation.
+/// \p NodeSizes: The sizes of the nodes (in bytes).
+/// \p NodeCounts: The execution counts of the nodes in the profile.
+/// \p EdgeCounts: The execution counts of every edge (jump) in the profile. The
+/// map also defines the edges in CFG and should include 0-count edges.
+/// \returns The best block order found.
+std::vector<uint64_t> applyExtTspLayout(
+ const std::vector<uint64_t> &NodeSizes,
+ const std::vector<uint64_t> &NodeCounts,
+ const DenseMap<std::pair<uint64_t, uint64_t>, uint64_t> &EdgeCounts);
+
+/// Estimate the "quality" of a given node order in CFG. The higher the score,
+/// the better the order is. The score is designed to reflect the locality of
+/// the given order, which is anti-correlated with the number of I-cache misses
+/// in a typical execution of the function.
+double calcExtTspScore(
+ const std::vector<uint64_t> &Order, const std::vector<uint64_t> &NodeSizes,
+ const std::vector<uint64_t> &NodeCounts,
+ const DenseMap<std::pair<uint64_t, uint64_t>, uint64_t> &EdgeCounts);
+
+/// Estimate the "quality" of the current node order in CFG.
+double calcExtTspScore(
+ const std::vector<uint64_t> &NodeSizes,
+ const std::vector<uint64_t> &NodeCounts,
+ const DenseMap<std::pair<uint64_t, uint64_t>, uint64_t> &EdgeCounts);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_CODELAYOUT_H
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/FunctionComparator.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/FunctionComparator.h
index e808a50b320f..964fdce45744 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/FunctionComparator.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/FunctionComparator.h
@@ -320,6 +320,7 @@ protected:
int cmpTypes(Type *TyL, Type *TyR) const;
int cmpNumbers(uint64_t L, uint64_t R) const;
+ int cmpAligns(Align L, Align R) const;
int cmpAPInts(const APInt &L, const APInt &R) const;
int cmpAPFloats(const APFloat &L, const APFloat &R) const;
int cmpMem(StringRef L, StringRef R) const;
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/Local.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/Local.h
index 3c529abce85a..a914c6e0925f 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/Local.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/Local.h
@@ -89,6 +89,14 @@ bool isInstructionTriviallyDead(Instruction *I,
bool wouldInstructionBeTriviallyDead(Instruction *I,
const TargetLibraryInfo *TLI = nullptr);
+/// Return true if the result produced by the instruction has no side effects on
+/// any paths other than where it is used. This is less conservative than
+/// wouldInstructionBeTriviallyDead which is based on the assumption
+/// that the use count will be 0. An example usage of this API is for
+/// identifying instructions that can be sunk down to use(s).
+bool wouldInstructionBeTriviallyDeadOnUnusedPaths(
+ Instruction *I, const TargetLibraryInfo *TLI = nullptr);
+
/// If the specified value is a trivially dead instruction, delete it.
/// If that makes any of its operands trivially dead, delete them too,
/// recursively. Return true if any instructions were deleted.
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/LoopUtils.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/LoopUtils.h
index 30c3f71e0947..e0a9115f61b0 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/LoopUtils.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/LoopUtils.h
@@ -367,14 +367,12 @@ Value *createMinMaxOp(IRBuilderBase &Builder, RecurKind RK, Value *Left,
/// Generates an ordered vector reduction using extracts to reduce the value.
Value *getOrderedReduction(IRBuilderBase &Builder, Value *Acc, Value *Src,
- unsigned Op, RecurKind MinMaxKind = RecurKind::None,
- ArrayRef<Value *> RedOps = None);
+ unsigned Op, RecurKind MinMaxKind = RecurKind::None);
/// Generates a vector reduction using shufflevectors to reduce the value.
/// Fast-math-flags are propagated using the IRBuilder's setting.
Value *getShuffleReduction(IRBuilderBase &Builder, Value *Src, unsigned Op,
- RecurKind MinMaxKind = RecurKind::None,
- ArrayRef<Value *> RedOps = None);
+ RecurKind MinMaxKind = RecurKind::None);
/// Create a target reduction of the given vector. The reduction operation
/// is described by the \p Opcode parameter. min/max reductions require
@@ -384,8 +382,7 @@ Value *getShuffleReduction(IRBuilderBase &Builder, Value *Src, unsigned Op,
/// Fast-math-flags are propagated using the IRBuilder's setting.
Value *createSimpleTargetReduction(IRBuilderBase &B,
const TargetTransformInfo *TTI, Value *Src,
- RecurKind RdxKind,
- ArrayRef<Value *> RedOps = None);
+ RecurKind RdxKind);
/// Create a target reduction of the given vector \p Src for a reduction of the
/// kind RecurKind::SelectICmp or RecurKind::SelectFCmp. The reduction operation
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
index ed9e0beb0339..32d295a2dd16 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
@@ -29,6 +29,7 @@
#include "llvm/ADT/MapVector.h"
#include "llvm/Analysis/LoopAccessAnalysis.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Support/TypeSize.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
@@ -104,14 +105,12 @@ public:
/// Vectorize loops using scalable vectors or fixed-width vectors, but favor
/// scalable vectors when the cost-model is inconclusive. This is the
/// default when the scalable.enable hint is enabled through a pragma.
- SK_PreferScalable = 1,
- /// Vectorize loops using scalable vectors or fixed-width vectors, but
- /// favor fixed-width vectors when the cost is inconclusive.
- SK_PreferFixedWidth = 2,
+ SK_PreferScalable = 1
};
LoopVectorizeHints(const Loop *L, bool InterleaveOnlyWhenForced,
- OptimizationRemarkEmitter &ORE);
+ OptimizationRemarkEmitter &ORE,
+ const TargetTransformInfo *TTI = nullptr);
/// Mark the loop L as already vectorized by setting the width to 1.
void setAlreadyVectorized();
@@ -123,9 +122,10 @@ public:
void emitRemarkWithHints() const;
ElementCount getWidth() const {
- return ElementCount::get(Width.Value,
- isScalableVectorizationExplicitlyEnabled());
+ return ElementCount::get(Width.Value, (ScalableForceKind)Scalable.Value ==
+ SK_PreferScalable);
}
+
unsigned getInterleave() const {
if (Interleave.Value)
return Interleave.Value;
@@ -144,22 +144,9 @@ public:
return (ForceKind)Force.Value;
}
- /// \return true if the cost-model for scalable vectorization should
- /// favor vectorization with scalable vectors over fixed-width vectors when
- /// the cost-model is inconclusive.
- bool isScalableVectorizationPreferred() const {
- return Scalable.Value == SK_PreferScalable;
- }
-
- /// \return true if scalable vectorization has been explicitly enabled.
- bool isScalableVectorizationExplicitlyEnabled() const {
- return Scalable.Value == SK_PreferFixedWidth ||
- Scalable.Value == SK_PreferScalable;
- }
-
/// \return true if scalable vectorization has been explicitly disabled.
bool isScalableVectorizationDisabled() const {
- return Scalable.Value == SK_FixedWidthOnly;
+ return (ScalableForceKind)Scalable.Value == SK_FixedWidthOnly;
}
/// If hints are provided that force vectorization, use the AlwaysPrint
@@ -293,10 +280,10 @@ public:
PHINode *getPrimaryInduction() { return PrimaryInduction; }
/// Returns the reduction variables found in the loop.
- ReductionList &getReductionVars() { return Reductions; }
+ const ReductionList &getReductionVars() const { return Reductions; }
/// Returns the induction variables found in the loop.
- InductionList &getInductionVars() { return Inductions; }
+ const InductionList &getInductionVars() const { return Inductions; }
/// Return the first-order recurrences found in the loop.
RecurrenceSet &getFirstOrderRecurrences() { return FirstOrderRecurrences; }
@@ -308,23 +295,27 @@ public:
Type *getWidestInductionType() { return WidestIndTy; }
/// Returns True if V is a Phi node of an induction variable in this loop.
- bool isInductionPhi(const Value *V);
+ bool isInductionPhi(const Value *V) const;
+
+ /// Returns a pointer to the induction descriptor, if \p Phi is an integer or
+ /// floating point induction.
+ const InductionDescriptor *getIntOrFpInductionDescriptor(PHINode *Phi) const;
/// Returns True if V is a cast that is part of an induction def-use chain,
/// and had been proven to be redundant under a runtime guard (in other
/// words, the cast has the same SCEV expression as the induction phi).
- bool isCastedInductionVariable(const Value *V);
+ bool isCastedInductionVariable(const Value *V) const;
/// Returns True if V can be considered as an induction variable in this
/// loop. V can be the induction phi, or some redundant cast in the def-use
/// chain of the inducion phi.
- bool isInductionVariable(const Value *V);
+ bool isInductionVariable(const Value *V) const;
/// Returns True if PN is a reduction variable in this loop.
- bool isReductionVariable(PHINode *PN) { return Reductions.count(PN); }
+ bool isReductionVariable(PHINode *PN) const { return Reductions.count(PN); }
/// Returns True if Phi is a first-order recurrence in this loop.
- bool isFirstOrderRecurrence(const PHINode *Phi);
+ bool isFirstOrderRecurrence(const PHINode *Phi) const;
/// Return true if the block BB needs to be predicated in order for the loop
/// to be vectorized.
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Vectorize/LoopVectorize.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Vectorize/LoopVectorize.h
index d105496ad47f..463b5a058052 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Vectorize/LoopVectorize.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Vectorize/LoopVectorize.h
@@ -80,6 +80,38 @@ class TargetTransformInfo;
extern cl::opt<bool> EnableLoopInterleaving;
extern cl::opt<bool> EnableLoopVectorization;
+/// A marker to determine if extra passes after loop vectorization should be
+/// run.
+struct ShouldRunExtraVectorPasses
+ : public AnalysisInfoMixin<ShouldRunExtraVectorPasses> {
+ static AnalysisKey Key;
+ struct Result {
+ bool invalidate(Function &F, const PreservedAnalyses &PA,
+ FunctionAnalysisManager::Invalidator &) {
+ // Check whether the analysis has been explicitly invalidated. Otherwise,
+ // it remains preserved.
+ auto PAC = PA.getChecker<ShouldRunExtraVectorPasses>();
+ return !PAC.preservedWhenStateless();
+ }
+ };
+
+ Result run(Function &F, FunctionAnalysisManager &FAM) { return Result(); }
+};
+
+/// A pass manager to run a set of extra function simplification passes after
+/// vectorization, if requested. LoopVectorize caches the
+/// ShouldRunExtraVectorPasses analysis to request extra simplifications, if
+/// they could be beneficial.
+struct ExtraVectorPassManager : public FunctionPassManager {
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM) {
+ auto PA = PreservedAnalyses::all();
+ if (AM.getCachedResult<ShouldRunExtraVectorPasses>(F))
+ PA.intersect(FunctionPassManager::run(F, AM));
+ PA.abandon<ShouldRunExtraVectorPasses>();
+ return PA;
+ }
+};
+
struct LoopVectorizeOptions {
/// If false, consider all loops for interleaving.
/// If true, only loops that explicitly request interleaving are considered.
diff --git a/contrib/llvm-project/llvm/include/llvm/module.modulemap b/contrib/llvm-project/llvm/include/llvm/module.modulemap
index 6cbbb9a4028e..b0f7f2120606 100644
--- a/contrib/llvm-project/llvm/include/llvm/module.modulemap
+++ b/contrib/llvm-project/llvm/include/llvm/module.modulemap
@@ -354,6 +354,7 @@ module LLVM_ProfileData {
module * { export * }
textual header "ProfileData/InstrProfData.inc"
+ textual header "ProfileData/MemProfData.inc"
}
// FIXME: Mislayered?