aboutsummaryrefslogtreecommitdiff
path: root/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp')
-rw-r--r--lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp1132
1 files changed, 935 insertions, 197 deletions
diff --git a/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 670f6225fbf7..5aba35a19ced 100644
--- a/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -11,6 +11,13 @@
/// \todo This should be generated by TableGen.
//===----------------------------------------------------------------------===//
+#if defined(_MSC_VER) || defined(__MINGW32__)
+// According to Microsoft, one must set _USE_MATH_DEFINES in order to get M_PI
+// from the Visual C++ cmath / math.h headers:
+// https://docs.microsoft.com/en-us/cpp/c-runtime-library/math-constants?view=vs-2019
+#define _USE_MATH_DEFINES
+#endif
+
#include "AMDGPU.h"
#include "AMDGPULegalizerInfo.h"
#include "AMDGPUTargetMachine.h"
@@ -20,6 +27,7 @@
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/Debug.h"
@@ -32,7 +40,7 @@ using namespace LegalityPredicates;
static LegalityPredicate isMultiple32(unsigned TypeIdx,
- unsigned MaxSize = 512) {
+ unsigned MaxSize = 1024) {
return [=](const LegalityQuery &Query) {
const LLT Ty = Query.Types[TypeIdx];
const LLT EltTy = Ty.getScalarType();
@@ -40,12 +48,27 @@ static LegalityPredicate isMultiple32(unsigned TypeIdx,
};
}
+static LegalityPredicate sizeIs(unsigned TypeIdx, unsigned Size) {
+ return [=](const LegalityQuery &Query) {
+ return Query.Types[TypeIdx].getSizeInBits() == Size;
+ };
+}
+
static LegalityPredicate isSmallOddVector(unsigned TypeIdx) {
return [=](const LegalityQuery &Query) {
const LLT Ty = Query.Types[TypeIdx];
return Ty.isVector() &&
Ty.getNumElements() % 2 != 0 &&
- Ty.getElementType().getSizeInBits() < 32;
+ Ty.getElementType().getSizeInBits() < 32 &&
+ Ty.getSizeInBits() % 32 != 0;
+ };
+}
+
+static LegalityPredicate isWideVec16(unsigned TypeIdx) {
+ return [=](const LegalityQuery &Query) {
+ const LLT Ty = Query.Types[TypeIdx];
+ const LLT EltTy = Ty.getScalarType();
+ return EltTy.getSizeInBits() == 16 && Ty.getNumElements() > 2;
};
}
@@ -68,6 +91,31 @@ static LegalizeMutation fewerEltsToSize64Vector(unsigned TypeIdx) {
};
}
+// Increase the number of vector elements to reach the next multiple of 32-bit
+// type.
+static LegalizeMutation moreEltsToNext32Bit(unsigned TypeIdx) {
+ return [=](const LegalityQuery &Query) {
+ const LLT Ty = Query.Types[TypeIdx];
+
+ const LLT EltTy = Ty.getElementType();
+ const int Size = Ty.getSizeInBits();
+ const int EltSize = EltTy.getSizeInBits();
+ const int NextMul32 = (Size + 31) / 32;
+
+ assert(EltSize < 32);
+
+ const int NewNumElts = (32 * NextMul32 + EltSize - 1) / EltSize;
+ return std::make_pair(TypeIdx, LLT::vector(NewNumElts, EltTy));
+ };
+}
+
+static LegalityPredicate vectorSmallerThan(unsigned TypeIdx, unsigned Size) {
+ return [=](const LegalityQuery &Query) {
+ const LLT QueryTy = Query.Types[TypeIdx];
+ return QueryTy.isVector() && QueryTy.getSizeInBits() < Size;
+ };
+}
+
static LegalityPredicate vectorWiderThan(unsigned TypeIdx, unsigned Size) {
return [=](const LegalityQuery &Query) {
const LLT QueryTy = Query.Types[TypeIdx];
@@ -82,7 +130,7 @@ static LegalityPredicate numElementsNotEven(unsigned TypeIdx) {
};
}
-// Any combination of 32 or 64-bit elements up to 512 bits, and multiples of
+// Any combination of 32 or 64-bit elements up to 1024 bits, and multiples of
// v2s16.
static LegalityPredicate isRegisterType(unsigned TypeIdx) {
return [=](const LegalityQuery &Query) {
@@ -94,7 +142,21 @@ static LegalityPredicate isRegisterType(unsigned TypeIdx) {
EltSize == 128 || EltSize == 256;
}
- return Ty.getSizeInBits() % 32 == 0 && Ty.getSizeInBits() <= 512;
+ return Ty.getSizeInBits() % 32 == 0 && Ty.getSizeInBits() <= 1024;
+ };
+}
+
+static LegalityPredicate elementTypeIs(unsigned TypeIdx, LLT Type) {
+ return [=](const LegalityQuery &Query) {
+ return Query.Types[TypeIdx].getElementType() == Type;
+ };
+}
+
+static LegalityPredicate isWideScalarTruncStore(unsigned TypeIdx) {
+ return [=](const LegalityQuery &Query) {
+ const LLT Ty = Query.Types[TypeIdx];
+ return !Ty.isVector() && Ty.getSizeInBits() > 32 &&
+ Query.MMODescrs[0].SizeInBits < Ty.getSizeInBits();
};
}
@@ -112,9 +174,10 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
const LLT S16 = LLT::scalar(16);
const LLT S32 = LLT::scalar(32);
const LLT S64 = LLT::scalar(64);
+ const LLT S96 = LLT::scalar(96);
const LLT S128 = LLT::scalar(128);
const LLT S256 = LLT::scalar(256);
- const LLT S512 = LLT::scalar(512);
+ const LLT S1024 = LLT::scalar(1024);
const LLT V2S16 = LLT::vector(2, 16);
const LLT V4S16 = LLT::vector(4, 16);
@@ -134,6 +197,7 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
const LLT V14S32 = LLT::vector(14, 32);
const LLT V15S32 = LLT::vector(15, 32);
const LLT V16S32 = LLT::vector(16, 32);
+ const LLT V32S32 = LLT::vector(32, 32);
const LLT V2S64 = LLT::vector(2, 64);
const LLT V3S64 = LLT::vector(3, 64);
@@ -142,16 +206,19 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
const LLT V6S64 = LLT::vector(6, 64);
const LLT V7S64 = LLT::vector(7, 64);
const LLT V8S64 = LLT::vector(8, 64);
+ const LLT V16S64 = LLT::vector(16, 64);
std::initializer_list<LLT> AllS32Vectors =
{V2S32, V3S32, V4S32, V5S32, V6S32, V7S32, V8S32,
- V9S32, V10S32, V11S32, V12S32, V13S32, V14S32, V15S32, V16S32};
+ V9S32, V10S32, V11S32, V12S32, V13S32, V14S32, V15S32, V16S32, V32S32};
std::initializer_list<LLT> AllS64Vectors =
- {V2S64, V3S64, V4S64, V5S64, V6S64, V7S64, V8S64};
+ {V2S64, V3S64, V4S64, V5S64, V6S64, V7S64, V8S64, V16S64};
const LLT GlobalPtr = GetAddrSpacePtr(AMDGPUAS::GLOBAL_ADDRESS);
const LLT ConstantPtr = GetAddrSpacePtr(AMDGPUAS::CONSTANT_ADDRESS);
+ const LLT Constant32Ptr = GetAddrSpacePtr(AMDGPUAS::CONSTANT_ADDRESS_32BIT);
const LLT LocalPtr = GetAddrSpacePtr(AMDGPUAS::LOCAL_ADDRESS);
+ const LLT RegionPtr = GetAddrSpacePtr(AMDGPUAS::REGION_ADDRESS);
const LLT FlatPtr = GetAddrSpacePtr(AMDGPUAS::FLAT_ADDRESS);
const LLT PrivatePtr = GetAddrSpacePtr(AMDGPUAS::PRIVATE_ADDRESS);
@@ -162,7 +229,7 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
};
const std::initializer_list<LLT> AddrSpaces32 = {
- LocalPtr, PrivatePtr
+ LocalPtr, PrivatePtr, Constant32Ptr, RegionPtr
};
const std::initializer_list<LLT> FPTypesBase = {
@@ -216,37 +283,34 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
.legalFor({S32, S1, S64, V2S32, S16, V2S16, V4S16})
.clampScalar(0, S32, S64)
.moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
- .fewerElementsIf(vectorWiderThan(0, 32), fewerEltsToSize64Vector(0))
+ .fewerElementsIf(vectorWiderThan(0, 64), fewerEltsToSize64Vector(0))
.widenScalarToNextPow2(0)
.scalarize(0);
- getActionDefinitionsBuilder({G_UADDO, G_SADDO, G_USUBO, G_SSUBO,
+ getActionDefinitionsBuilder({G_UADDO, G_USUBO,
G_UADDE, G_SADDE, G_USUBE, G_SSUBE})
.legalFor({{S32, S1}})
- .clampScalar(0, S32, S32);
+ .clampScalar(0, S32, S32)
+ .scalarize(0); // TODO: Implement.
+
+ getActionDefinitionsBuilder({G_SADDO, G_SSUBO})
+ .lower();
getActionDefinitionsBuilder(G_BITCAST)
- .legalForCartesianProduct({S32, V2S16})
- .legalForCartesianProduct({S64, V2S32, V4S16})
- .legalForCartesianProduct({V2S64, V4S32})
// Don't worry about the size constraint.
- .legalIf(all(isPointer(0), isPointer(1)));
+ .legalIf(all(isRegisterType(0), isRegisterType(1)))
+ // FIXME: Testing hack
+ .legalForCartesianProduct({S16, LLT::vector(2, 8), });
- if (ST.has16BitInsts()) {
- getActionDefinitionsBuilder(G_FCONSTANT)
- .legalFor({S32, S64, S16})
- .clampScalar(0, S16, S64);
- } else {
- getActionDefinitionsBuilder(G_FCONSTANT)
- .legalFor({S32, S64})
- .clampScalar(0, S32, S64);
- }
+ getActionDefinitionsBuilder(G_FCONSTANT)
+ .legalFor({S32, S64, S16})
+ .clampScalar(0, S16, S64);
getActionDefinitionsBuilder(G_IMPLICIT_DEF)
- .legalFor({S1, S32, S64, V2S32, V4S32, V2S16, V4S16, GlobalPtr,
+ .legalFor({S1, S32, S64, S16, V2S32, V4S32, V2S16, V4S16, GlobalPtr,
ConstantPtr, LocalPtr, FlatPtr, PrivatePtr})
.moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
- .clampScalarOrElt(0, S32, S512)
+ .clampScalarOrElt(0, S32, S1024)
.legalIf(isMultiple32(0))
.widenScalarToNextPow2(0, 32)
.clampMaxNumElements(0, S32, 16);
@@ -256,23 +320,33 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
// values may not be legal. We need to figure out how to distinguish
// between these two scenarios.
getActionDefinitionsBuilder(G_CONSTANT)
- .legalFor({S1, S32, S64, GlobalPtr,
+ .legalFor({S1, S32, S64, S16, GlobalPtr,
LocalPtr, ConstantPtr, PrivatePtr, FlatPtr })
.clampScalar(0, S32, S64)
.widenScalarToNextPow2(0)
.legalIf(isPointer(0));
setAction({G_FRAME_INDEX, PrivatePtr}, Legal);
+ getActionDefinitionsBuilder(G_GLOBAL_VALUE)
+ .customFor({LocalPtr, GlobalPtr, ConstantPtr, Constant32Ptr});
+
auto &FPOpActions = getActionDefinitionsBuilder(
- { G_FADD, G_FMUL, G_FNEG, G_FABS, G_FMA, G_FCANONICALIZE})
+ { G_FADD, G_FMUL, G_FMA, G_FCANONICALIZE})
.legalFor({S32, S64});
+ auto &TrigActions = getActionDefinitionsBuilder({G_FSIN, G_FCOS})
+ .customFor({S32, S64});
+ auto &FDIVActions = getActionDefinitionsBuilder(G_FDIV)
+ .customFor({S32, S64});
if (ST.has16BitInsts()) {
if (ST.hasVOP3PInsts())
FPOpActions.legalFor({S16, V2S16});
else
FPOpActions.legalFor({S16});
+
+ TrigActions.customFor({S16});
+ FDIVActions.customFor({S16});
}
auto &MinNumMaxNum = getActionDefinitionsBuilder({
@@ -293,22 +367,37 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
.scalarize(0);
}
- // TODO: Implement
- getActionDefinitionsBuilder({G_FMINIMUM, G_FMAXIMUM}).lower();
-
if (ST.hasVOP3PInsts())
FPOpActions.clampMaxNumElements(0, S16, 2);
+
FPOpActions
.scalarize(0)
.clampScalar(0, ST.has16BitInsts() ? S16 : S32, S64);
+ TrigActions
+ .scalarize(0)
+ .clampScalar(0, ST.has16BitInsts() ? S16 : S32, S64);
+
+ FDIVActions
+ .scalarize(0)
+ .clampScalar(0, ST.has16BitInsts() ? S16 : S32, S64);
+
+ getActionDefinitionsBuilder({G_FNEG, G_FABS})
+ .legalFor(FPTypesPK16)
+ .clampMaxNumElements(0, S16, 2)
+ .scalarize(0)
+ .clampScalar(0, S16, S64);
+
+ // TODO: Implement
+ getActionDefinitionsBuilder({G_FMINIMUM, G_FMAXIMUM}).lower();
+
if (ST.has16BitInsts()) {
- getActionDefinitionsBuilder(G_FSQRT)
+ getActionDefinitionsBuilder({G_FSQRT, G_FFLOOR})
.legalFor({S32, S64, S16})
.scalarize(0)
.clampScalar(0, S16, S64);
} else {
- getActionDefinitionsBuilder(G_FSQRT)
+ getActionDefinitionsBuilder({G_FSQRT, G_FFLOOR})
.legalFor({S32, S64})
.scalarize(0)
.clampScalar(0, S32, S64);
@@ -334,23 +423,43 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
.scalarize(0)
.clampScalar(0, S32, S64);
+ // Whether this is legal depends on the floating point mode for the function.
+ auto &FMad = getActionDefinitionsBuilder(G_FMAD);
+ if (ST.hasMadF16())
+ FMad.customFor({S32, S16});
+ else
+ FMad.customFor({S32});
+ FMad.scalarize(0)
+ .lower();
+
getActionDefinitionsBuilder({G_SEXT, G_ZEXT, G_ANYEXT})
.legalFor({{S64, S32}, {S32, S16}, {S64, S16},
{S32, S1}, {S64, S1}, {S16, S1},
+ {S96, S32},
// FIXME: Hack
{S64, LLT::scalar(33)},
{S32, S8}, {S128, S32}, {S128, S64}, {S32, LLT::scalar(24)}})
.scalarize(0);
- getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
- .legalFor({{S32, S32}, {S64, S32}})
+ // TODO: Split s1->s64 during regbankselect for VALU.
+ auto &IToFP = getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
+ .legalFor({{S32, S32}, {S64, S32}, {S16, S32}, {S32, S1}, {S16, S1}, {S64, S1}})
.lowerFor({{S32, S64}})
- .customFor({{S64, S64}})
- .scalarize(0);
+ .customFor({{S64, S64}});
+ if (ST.has16BitInsts())
+ IToFP.legalFor({{S16, S16}});
+ IToFP.clampScalar(1, S32, S64)
+ .scalarize(0);
+
+ auto &FPToI = getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
+ .legalFor({{S32, S32}, {S32, S64}, {S32, S16}});
+ if (ST.has16BitInsts())
+ FPToI.legalFor({{S16, S16}});
+ else
+ FPToI.minScalar(1, S32);
- getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
- .legalFor({{S32, S32}, {S32, S64}})
- .scalarize(0);
+ FPToI.minScalar(0, S32)
+ .scalarize(0);
getActionDefinitionsBuilder(G_INTRINSIC_ROUND)
.legalFor({S32, S64})
@@ -374,6 +483,10 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
.legalForCartesianProduct(AddrSpaces32, {S32})
.scalarize(0);
+ getActionDefinitionsBuilder(G_PTR_MASK)
+ .scalarize(0)
+ .alwaysLegal();
+
setAction({G_BLOCK_ADDR, CodePtr}, Legal);
auto &CmpBuilder =
@@ -415,7 +528,7 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
.widenScalarToNextPow2(1, 32);
// TODO: Expand for > s32
- getActionDefinitionsBuilder(G_BSWAP)
+ getActionDefinitionsBuilder({G_BSWAP, G_BITREVERSE})
.legalFor({S32})
.clampScalar(0, S32, S32)
.scalarize(0);
@@ -491,87 +604,239 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
return std::make_pair(0, LLT::scalar(Query.Types[1].getSizeInBits()));
});
- if (ST.hasFlatAddressSpace()) {
- getActionDefinitionsBuilder(G_ADDRSPACE_CAST)
- .scalarize(0)
- .custom();
- }
+ getActionDefinitionsBuilder(G_ADDRSPACE_CAST)
+ .scalarize(0)
+ .custom();
// TODO: Should load to s16 be legal? Most loads extend to 32-bits, but we
// handle some operations by just promoting the register during
// selection. There are also d16 loads on GFX9+ which preserve the high bits.
- getActionDefinitionsBuilder({G_LOAD, G_STORE})
- .narrowScalarIf([](const LegalityQuery &Query) {
- unsigned Size = Query.Types[0].getSizeInBits();
- unsigned MemSize = Query.MMODescrs[0].SizeInBits;
- return (Size > 32 && MemSize < Size);
- },
- [](const LegalityQuery &Query) {
- return std::make_pair(0, LLT::scalar(32));
- })
- .fewerElementsIf([=](const LegalityQuery &Query) {
- unsigned MemSize = Query.MMODescrs[0].SizeInBits;
- return (MemSize == 96) &&
- Query.Types[0].isVector() &&
- !ST.hasDwordx3LoadStores();
- },
- [=](const LegalityQuery &Query) {
- return std::make_pair(0, V2S32);
- })
- .legalIf([=](const LegalityQuery &Query) {
- const LLT &Ty0 = Query.Types[0];
-
- unsigned Size = Ty0.getSizeInBits();
- unsigned MemSize = Query.MMODescrs[0].SizeInBits;
- if (Size < 32 || (Size > 32 && MemSize < Size))
- return false;
-
- if (Ty0.isVector() && Size != MemSize)
- return false;
-
- // TODO: Decompose private loads into 4-byte components.
- // TODO: Illegal flat loads on SI
- switch (MemSize) {
- case 8:
- case 16:
- return Size == 32;
- case 32:
- case 64:
- case 128:
- return true;
+ auto maxSizeForAddrSpace = [this](unsigned AS) -> unsigned {
+ switch (AS) {
+ // FIXME: Private element size.
+ case AMDGPUAS::PRIVATE_ADDRESS:
+ return 32;
+ // FIXME: Check subtarget
+ case AMDGPUAS::LOCAL_ADDRESS:
+ return ST.useDS128() ? 128 : 64;
+
+ // Treat constant and global as identical. SMRD loads are sometimes usable
+ // for global loads (ideally constant address space should be eliminated)
+ // depending on the context. Legality cannot be context dependent, but
+ // RegBankSelect can split the load as necessary depending on the pointer
+ // register bank/uniformity and if the memory is invariant or not written in
+ // a kernel.
+ case AMDGPUAS::CONSTANT_ADDRESS:
+ case AMDGPUAS::GLOBAL_ADDRESS:
+ return 512;
+ default:
+ return 128;
+ }
+ };
- case 96:
- return ST.hasDwordx3LoadStores();
-
- case 256:
- case 512:
- // TODO: Possibly support loads of i256 and i512 . This will require
- // adding i256 and i512 types to MVT in order for to be able to use
- // TableGen.
- // TODO: Add support for other vector types, this will require
- // defining more value mappings for the new types.
- return Ty0.isVector() && (Ty0.getScalarType().getSizeInBits() == 32 ||
- Ty0.getScalarType().getSizeInBits() == 64);
-
- default:
- return false;
- }
- })
- .clampScalar(0, S32, S64);
+ const auto needToSplitLoad = [=](const LegalityQuery &Query) -> bool {
+ const LLT DstTy = Query.Types[0];
+
+ // Split vector extloads.
+ unsigned MemSize = Query.MMODescrs[0].SizeInBits;
+ if (DstTy.isVector() && DstTy.getSizeInBits() > MemSize)
+ return true;
+
+ const LLT PtrTy = Query.Types[1];
+ unsigned AS = PtrTy.getAddressSpace();
+ if (MemSize > maxSizeForAddrSpace(AS))
+ return true;
+
+ // Catch weird sized loads that don't evenly divide into the access sizes
+ // TODO: May be able to widen depending on alignment etc.
+ unsigned NumRegs = MemSize / 32;
+ if (NumRegs == 3 && !ST.hasDwordx3LoadStores())
+ return true;
+
+ unsigned Align = Query.MMODescrs[0].AlignInBits;
+ if (Align < MemSize) {
+ const SITargetLowering *TLI = ST.getTargetLowering();
+ return !TLI->allowsMisalignedMemoryAccessesImpl(MemSize, AS, Align / 8);
+ }
+
+ return false;
+ };
+ unsigned GlobalAlign32 = ST.hasUnalignedBufferAccess() ? 0 : 32;
+ unsigned GlobalAlign16 = ST.hasUnalignedBufferAccess() ? 0 : 16;
+ unsigned GlobalAlign8 = ST.hasUnalignedBufferAccess() ? 0 : 8;
+
+ // TODO: Refine based on subtargets which support unaligned access or 128-bit
+ // LDS
+ // TODO: Unsupported flat for SI.
+
+ for (unsigned Op : {G_LOAD, G_STORE}) {
+ const bool IsStore = Op == G_STORE;
+
+ auto &Actions = getActionDefinitionsBuilder(Op);
+ // Whitelist the common cases.
+ // TODO: Pointer loads
+ // TODO: Wide constant loads
+ // TODO: Only CI+ has 3x loads
+ // TODO: Loads to s16 on gfx9
+ Actions.legalForTypesWithMemDesc({{S32, GlobalPtr, 32, GlobalAlign32},
+ {V2S32, GlobalPtr, 64, GlobalAlign32},
+ {V3S32, GlobalPtr, 96, GlobalAlign32},
+ {S96, GlobalPtr, 96, GlobalAlign32},
+ {V4S32, GlobalPtr, 128, GlobalAlign32},
+ {S128, GlobalPtr, 128, GlobalAlign32},
+ {S64, GlobalPtr, 64, GlobalAlign32},
+ {V2S64, GlobalPtr, 128, GlobalAlign32},
+ {V2S16, GlobalPtr, 32, GlobalAlign32},
+ {S32, GlobalPtr, 8, GlobalAlign8},
+ {S32, GlobalPtr, 16, GlobalAlign16},
+
+ {S32, LocalPtr, 32, 32},
+ {S64, LocalPtr, 64, 32},
+ {V2S32, LocalPtr, 64, 32},
+ {S32, LocalPtr, 8, 8},
+ {S32, LocalPtr, 16, 16},
+ {V2S16, LocalPtr, 32, 32},
+
+ {S32, PrivatePtr, 32, 32},
+ {S32, PrivatePtr, 8, 8},
+ {S32, PrivatePtr, 16, 16},
+ {V2S16, PrivatePtr, 32, 32},
+
+ {S32, FlatPtr, 32, GlobalAlign32},
+ {S32, FlatPtr, 16, GlobalAlign16},
+ {S32, FlatPtr, 8, GlobalAlign8},
+ {V2S16, FlatPtr, 32, GlobalAlign32},
+
+ {S32, ConstantPtr, 32, GlobalAlign32},
+ {V2S32, ConstantPtr, 64, GlobalAlign32},
+ {V3S32, ConstantPtr, 96, GlobalAlign32},
+ {V4S32, ConstantPtr, 128, GlobalAlign32},
+ {S64, ConstantPtr, 64, GlobalAlign32},
+ {S128, ConstantPtr, 128, GlobalAlign32},
+ {V2S32, ConstantPtr, 32, GlobalAlign32}});
+ Actions
+ .customIf(typeIs(1, Constant32Ptr))
+ .narrowScalarIf(
+ [=](const LegalityQuery &Query) -> bool {
+ return !Query.Types[0].isVector() && needToSplitLoad(Query);
+ },
+ [=](const LegalityQuery &Query) -> std::pair<unsigned, LLT> {
+ const LLT DstTy = Query.Types[0];
+ const LLT PtrTy = Query.Types[1];
+
+ const unsigned DstSize = DstTy.getSizeInBits();
+ unsigned MemSize = Query.MMODescrs[0].SizeInBits;
+
+ // Split extloads.
+ if (DstSize > MemSize)
+ return std::make_pair(0, LLT::scalar(MemSize));
+
+ if (DstSize > 32 && (DstSize % 32 != 0)) {
+ // FIXME: Need a way to specify non-extload of larger size if
+ // suitably aligned.
+ return std::make_pair(0, LLT::scalar(32 * (DstSize / 32)));
+ }
+
+ unsigned MaxSize = maxSizeForAddrSpace(PtrTy.getAddressSpace());
+ if (MemSize > MaxSize)
+ return std::make_pair(0, LLT::scalar(MaxSize));
+
+ unsigned Align = Query.MMODescrs[0].AlignInBits;
+ return std::make_pair(0, LLT::scalar(Align));
+ })
+ .fewerElementsIf(
+ [=](const LegalityQuery &Query) -> bool {
+ return Query.Types[0].isVector() && needToSplitLoad(Query);
+ },
+ [=](const LegalityQuery &Query) -> std::pair<unsigned, LLT> {
+ const LLT DstTy = Query.Types[0];
+ const LLT PtrTy = Query.Types[1];
+
+ LLT EltTy = DstTy.getElementType();
+ unsigned MaxSize = maxSizeForAddrSpace(PtrTy.getAddressSpace());
+
+ // Split if it's too large for the address space.
+ if (Query.MMODescrs[0].SizeInBits > MaxSize) {
+ unsigned NumElts = DstTy.getNumElements();
+ unsigned NumPieces = Query.MMODescrs[0].SizeInBits / MaxSize;
+
+ // FIXME: Refine when odd breakdowns handled
+ // The scalars will need to be re-legalized.
+ if (NumPieces == 1 || NumPieces >= NumElts ||
+ NumElts % NumPieces != 0)
+ return std::make_pair(0, EltTy);
+
+ return std::make_pair(0,
+ LLT::vector(NumElts / NumPieces, EltTy));
+ }
+
+ // Need to split because of alignment.
+ unsigned Align = Query.MMODescrs[0].AlignInBits;
+ unsigned EltSize = EltTy.getSizeInBits();
+ if (EltSize > Align &&
+ (EltSize / Align < DstTy.getNumElements())) {
+ return std::make_pair(0, LLT::vector(EltSize / Align, EltTy));
+ }
+
+ // May need relegalization for the scalars.
+ return std::make_pair(0, EltTy);
+ })
+ .minScalar(0, S32);
+
+ if (IsStore)
+ Actions.narrowScalarIf(isWideScalarTruncStore(0), changeTo(0, S32));
+
+ // TODO: Need a bitcast lower option?
+ Actions
+ .legalIf([=](const LegalityQuery &Query) {
+ const LLT Ty0 = Query.Types[0];
+ unsigned Size = Ty0.getSizeInBits();
+ unsigned MemSize = Query.MMODescrs[0].SizeInBits;
+ unsigned Align = Query.MMODescrs[0].AlignInBits;
+
+ // No extending vector loads.
+ if (Size > MemSize && Ty0.isVector())
+ return false;
+
+ // FIXME: Widening store from alignment not valid.
+ if (MemSize < Size)
+ MemSize = std::max(MemSize, Align);
+
+ switch (MemSize) {
+ case 8:
+ case 16:
+ return Size == 32;
+ case 32:
+ case 64:
+ case 128:
+ return true;
+ case 96:
+ return ST.hasDwordx3LoadStores();
+ case 256:
+ case 512:
+ return true;
+ default:
+ return false;
+ }
+ })
+ .widenScalarToNextPow2(0)
+ // TODO: v3s32->v4s32 with alignment
+ .moreElementsIf(vectorSmallerThan(0, 32), moreEltsToNext32Bit(0));
+ }
- // FIXME: Handle alignment requirements.
auto &ExtLoads = getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
- .legalForTypesWithMemDesc({
- {S32, GlobalPtr, 8, 8},
- {S32, GlobalPtr, 16, 8},
- {S32, LocalPtr, 8, 8},
- {S32, LocalPtr, 16, 8},
- {S32, PrivatePtr, 8, 8},
- {S32, PrivatePtr, 16, 8}});
+ .legalForTypesWithMemDesc({{S32, GlobalPtr, 8, 8},
+ {S32, GlobalPtr, 16, 2 * 8},
+ {S32, LocalPtr, 8, 8},
+ {S32, LocalPtr, 16, 16},
+ {S32, PrivatePtr, 8, 8},
+ {S32, PrivatePtr, 16, 16},
+ {S32, ConstantPtr, 8, 8},
+ {S32, ConstantPtr, 16, 2 * 8}});
if (ST.hasFlatAddressSpace()) {
- ExtLoads.legalForTypesWithMemDesc({{S32, FlatPtr, 8, 8},
- {S32, FlatPtr, 16, 8}});
+ ExtLoads.legalForTypesWithMemDesc(
+ {{S32, FlatPtr, 8, 8}, {S32, FlatPtr, 16, 16}});
}
ExtLoads.clampScalar(0, S32, S32)
@@ -590,6 +855,12 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
Atomics.legalFor({{S32, FlatPtr}, {S64, FlatPtr}});
}
+ getActionDefinitionsBuilder(G_ATOMICRMW_FADD)
+ .legalFor({{S32, LocalPtr}});
+
+ getActionDefinitionsBuilder(G_ATOMIC_CMPXCHG_WITH_SUCCESS)
+ .lower();
+
// TODO: Pointer types, any 32-bit or 64-bit vector
getActionDefinitionsBuilder(G_SELECT)
.legalForCartesianProduct({S32, S64, S16, V2S32, V2S16, V4S16,
@@ -643,7 +914,7 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
return (EltTy.getSizeInBits() == 16 ||
EltTy.getSizeInBits() % 32 == 0) &&
VecTy.getSizeInBits() % 32 == 0 &&
- VecTy.getSizeInBits() <= 512 &&
+ VecTy.getSizeInBits() <= 1024 &&
IdxTy.getSizeInBits() == 32;
})
.clampScalar(EltTypeIdx, S32, S64)
@@ -663,6 +934,8 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
// FIXME: Doesn't handle extract of illegal sizes.
getActionDefinitionsBuilder(Op)
+ .lowerIf(all(typeIs(LitTyIdx, S16), sizeIs(BigTyIdx, 32)))
+ // FIXME: Multiples of 16 should not be legal.
.legalIf([=](const LegalityQuery &Query) {
const LLT BigTy = Query.Types[BigTyIdx];
const LLT LitTy = Query.Types[LitTyIdx];
@@ -686,18 +959,36 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
}
- getActionDefinitionsBuilder(G_BUILD_VECTOR)
- .legalForCartesianProduct(AllS32Vectors, {S32})
- .legalForCartesianProduct(AllS64Vectors, {S64})
- .clampNumElements(0, V16S32, V16S32)
- .clampNumElements(0, V2S64, V8S64)
- .minScalarSameAs(1, 0)
- .legalIf(isRegisterType(0))
- .minScalarOrElt(0, S32);
+ auto &BuildVector = getActionDefinitionsBuilder(G_BUILD_VECTOR)
+ .legalForCartesianProduct(AllS32Vectors, {S32})
+ .legalForCartesianProduct(AllS64Vectors, {S64})
+ .clampNumElements(0, V16S32, V32S32)
+ .clampNumElements(0, V2S64, V16S64)
+ .fewerElementsIf(isWideVec16(0), changeTo(0, V2S16));
+
+ if (ST.hasScalarPackInsts())
+ BuildVector.legalFor({V2S16, S32});
+
+ BuildVector
+ .minScalarSameAs(1, 0)
+ .legalIf(isRegisterType(0))
+ .minScalarOrElt(0, S32);
+
+ if (ST.hasScalarPackInsts()) {
+ getActionDefinitionsBuilder(G_BUILD_VECTOR_TRUNC)
+ .legalFor({V2S16, S32})
+ .lower();
+ } else {
+ getActionDefinitionsBuilder(G_BUILD_VECTOR_TRUNC)
+ .lower();
+ }
getActionDefinitionsBuilder(G_CONCAT_VECTORS)
.legalIf(isRegisterType(0));
+ // TODO: Don't fully scalarize v2s16 pieces
+ getActionDefinitionsBuilder(G_SHUFFLE_VECTOR).lower();
+
// Merge/Unmerge
for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;
@@ -715,14 +1006,17 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
return false;
};
- getActionDefinitionsBuilder(Op)
+ auto &Builder = getActionDefinitionsBuilder(Op)
.widenScalarToNextPow2(LitTyIdx, /*Min*/ 16)
// Clamp the little scalar to s8-s256 and make it a power of 2. It's not
// worth considering the multiples of 64 since 2*192 and 2*384 are not
// valid.
.clampScalar(LitTyIdx, S16, S256)
.widenScalarToNextPow2(LitTyIdx, /*Min*/ 32)
-
+ .moreElementsIf(isSmallOddVector(BigTyIdx), oneMoreElement(BigTyIdx))
+ .fewerElementsIf(all(typeIs(0, S16), vectorWiderThan(1, 32),
+ elementTypeIs(1, S16)),
+ changeTo(1, V2S16))
// Break up vectors with weird elements into scalars
.fewerElementsIf(
[=](const LegalityQuery &Query) { return notValidElt(Query, 0); },
@@ -730,25 +1024,37 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
.fewerElementsIf(
[=](const LegalityQuery &Query) { return notValidElt(Query, 1); },
scalarize(1))
- .clampScalar(BigTyIdx, S32, S512)
- .widenScalarIf(
+ .clampScalar(BigTyIdx, S32, S1024)
+ .lowerFor({{S16, V2S16}});
+
+ if (Op == G_MERGE_VALUES) {
+ Builder.widenScalarIf(
+ // TODO: Use 16-bit shifts if legal for 8-bit values?
[=](const LegalityQuery &Query) {
- const LLT &Ty = Query.Types[BigTyIdx];
- return !isPowerOf2_32(Ty.getSizeInBits()) &&
- Ty.getSizeInBits() % 16 != 0;
+ const LLT Ty = Query.Types[LitTyIdx];
+ return Ty.getSizeInBits() < 32;
},
- [=](const LegalityQuery &Query) {
- // Pick the next power of 2, or a multiple of 64 over 128.
- // Whichever is smaller.
- const LLT &Ty = Query.Types[BigTyIdx];
- unsigned NewSizeInBits = 1 << Log2_32_Ceil(Ty.getSizeInBits() + 1);
- if (NewSizeInBits >= 256) {
- unsigned RoundedTo = alignTo<64>(Ty.getSizeInBits() + 1);
- if (RoundedTo < NewSizeInBits)
- NewSizeInBits = RoundedTo;
- }
- return std::make_pair(BigTyIdx, LLT::scalar(NewSizeInBits));
- })
+ changeTo(LitTyIdx, S32));
+ }
+
+ Builder.widenScalarIf(
+ [=](const LegalityQuery &Query) {
+ const LLT Ty = Query.Types[BigTyIdx];
+ return !isPowerOf2_32(Ty.getSizeInBits()) &&
+ Ty.getSizeInBits() % 16 != 0;
+ },
+ [=](const LegalityQuery &Query) {
+ // Pick the next power of 2, or a multiple of 64 over 128.
+ // Whichever is smaller.
+ const LLT &Ty = Query.Types[BigTyIdx];
+ unsigned NewSizeInBits = 1 << Log2_32_Ceil(Ty.getSizeInBits() + 1);
+ if (NewSizeInBits >= 256) {
+ unsigned RoundedTo = alignTo<64>(Ty.getSizeInBits() + 1);
+ if (RoundedTo < NewSizeInBits)
+ NewSizeInBits = RoundedTo;
+ }
+ return std::make_pair(BigTyIdx, LLT::scalar(NewSizeInBits));
+ })
.legalIf([=](const LegalityQuery &Query) {
const LLT &BigTy = Query.Types[BigTyIdx];
const LLT &LitTy = Query.Types[LitTyIdx];
@@ -760,43 +1066,56 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
return BigTy.getSizeInBits() % 16 == 0 &&
LitTy.getSizeInBits() % 16 == 0 &&
- BigTy.getSizeInBits() <= 512;
+ BigTy.getSizeInBits() <= 1024;
})
// Any vectors left are the wrong size. Scalarize them.
.scalarize(0)
.scalarize(1);
}
+ getActionDefinitionsBuilder(G_SEXT_INREG).lower();
+
computeTables();
verify(*ST.getInstrInfo());
}
bool AMDGPULegalizerInfo::legalizeCustom(MachineInstr &MI,
MachineRegisterInfo &MRI,
- MachineIRBuilder &MIRBuilder,
+ MachineIRBuilder &B,
GISelChangeObserver &Observer) const {
switch (MI.getOpcode()) {
case TargetOpcode::G_ADDRSPACE_CAST:
- return legalizeAddrSpaceCast(MI, MRI, MIRBuilder);
+ return legalizeAddrSpaceCast(MI, MRI, B);
case TargetOpcode::G_FRINT:
- return legalizeFrint(MI, MRI, MIRBuilder);
+ return legalizeFrint(MI, MRI, B);
case TargetOpcode::G_FCEIL:
- return legalizeFceil(MI, MRI, MIRBuilder);
+ return legalizeFceil(MI, MRI, B);
case TargetOpcode::G_INTRINSIC_TRUNC:
- return legalizeIntrinsicTrunc(MI, MRI, MIRBuilder);
+ return legalizeIntrinsicTrunc(MI, MRI, B);
case TargetOpcode::G_SITOFP:
- return legalizeITOFP(MI, MRI, MIRBuilder, true);
+ return legalizeITOFP(MI, MRI, B, true);
case TargetOpcode::G_UITOFP:
- return legalizeITOFP(MI, MRI, MIRBuilder, false);
+ return legalizeITOFP(MI, MRI, B, false);
case TargetOpcode::G_FMINNUM:
case TargetOpcode::G_FMAXNUM:
case TargetOpcode::G_FMINNUM_IEEE:
case TargetOpcode::G_FMAXNUM_IEEE:
- return legalizeMinNumMaxNum(MI, MRI, MIRBuilder);
+ return legalizeMinNumMaxNum(MI, MRI, B);
case TargetOpcode::G_EXTRACT_VECTOR_ELT:
- return legalizeExtractVectorElt(MI, MRI, MIRBuilder);
+ return legalizeExtractVectorElt(MI, MRI, B);
case TargetOpcode::G_INSERT_VECTOR_ELT:
- return legalizeInsertVectorElt(MI, MRI, MIRBuilder);
+ return legalizeInsertVectorElt(MI, MRI, B);
+ case TargetOpcode::G_FSIN:
+ case TargetOpcode::G_FCOS:
+ return legalizeSinCos(MI, MRI, B);
+ case TargetOpcode::G_GLOBAL_VALUE:
+ return legalizeGlobalValue(MI, MRI, B);
+ case TargetOpcode::G_LOAD:
+ return legalizeLoad(MI, MRI, B, Observer);
+ case TargetOpcode::G_FMAD:
+ return legalizeFMad(MI, MRI, B);
+ case TargetOpcode::G_FDIV:
+ return legalizeFDIV(MI, MRI, B);
default:
return false;
}
@@ -807,11 +1126,13 @@ bool AMDGPULegalizerInfo::legalizeCustom(MachineInstr &MI,
Register AMDGPULegalizerInfo::getSegmentAperture(
unsigned AS,
MachineRegisterInfo &MRI,
- MachineIRBuilder &MIRBuilder) const {
- MachineFunction &MF = MIRBuilder.getMF();
+ MachineIRBuilder &B) const {
+ MachineFunction &MF = B.getMF();
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
const LLT S32 = LLT::scalar(32);
+ assert(AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::PRIVATE_ADDRESS);
+
if (ST.hasApertureRegs()) {
// FIXME: Use inline constants (src_{shared, private}_base) instead of
// getreg.
@@ -829,13 +1150,13 @@ Register AMDGPULegalizerInfo::getSegmentAperture(
Register ApertureReg = MRI.createGenericVirtualRegister(S32);
Register GetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
- MIRBuilder.buildInstr(AMDGPU::S_GETREG_B32)
+ B.buildInstr(AMDGPU::S_GETREG_B32)
.addDef(GetReg)
.addImm(Encoding);
MRI.setType(GetReg, S32);
- auto ShiftAmt = MIRBuilder.buildConstant(S32, WidthM1 + 1);
- MIRBuilder.buildInstr(TargetOpcode::G_SHL)
+ auto ShiftAmt = B.buildConstant(S32, WidthM1 + 1);
+ B.buildInstr(TargetOpcode::G_SHL)
.addDef(ApertureReg)
.addUse(GetReg)
.addUse(ShiftAmt.getReg(0));
@@ -846,8 +1167,9 @@ Register AMDGPULegalizerInfo::getSegmentAperture(
Register QueuePtr = MRI.createGenericVirtualRegister(
LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
- // FIXME: Placeholder until we can track the input registers.
- MIRBuilder.buildConstant(QueuePtr, 0xdeadbeef);
+ const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
+ if (!loadInputValue(QueuePtr, B, &MFI->getArgInfo().QueuePtr))
+ return Register();
// Offset into amd_queue_t for group_segment_aperture_base_hi /
// private_segment_aperture_base_hi.
@@ -870,18 +1192,19 @@ Register AMDGPULegalizerInfo::getSegmentAperture(
Register LoadResult = MRI.createGenericVirtualRegister(S32);
Register LoadAddr;
- MIRBuilder.materializeGEP(LoadAddr, QueuePtr, LLT::scalar(64), StructOffset);
- MIRBuilder.buildLoad(LoadResult, LoadAddr, *MMO);
+ B.materializeGEP(LoadAddr, QueuePtr, LLT::scalar(64), StructOffset);
+ B.buildLoad(LoadResult, LoadAddr, *MMO);
return LoadResult;
}
bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
MachineInstr &MI, MachineRegisterInfo &MRI,
- MachineIRBuilder &MIRBuilder) const {
- MachineFunction &MF = MIRBuilder.getMF();
+ MachineIRBuilder &B) const {
+ MachineFunction &MF = B.getMF();
- MIRBuilder.setInstr(MI);
+ B.setInstr(MI);
+ const LLT S32 = LLT::scalar(32);
Register Dst = MI.getOperand(0).getReg();
Register Src = MI.getOperand(1).getReg();
@@ -899,7 +1222,28 @@ bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
if (ST.getTargetLowering()->isNoopAddrSpaceCast(SrcAS, DestAS)) {
- MI.setDesc(MIRBuilder.getTII().get(TargetOpcode::G_BITCAST));
+ MI.setDesc(B.getTII().get(TargetOpcode::G_BITCAST));
+ return true;
+ }
+
+ if (DestAS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) {
+ // Truncate.
+ B.buildExtract(Dst, Src, 0);
+ MI.eraseFromParent();
+ return true;
+ }
+
+ if (SrcAS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) {
+ const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
+ uint32_t AddrHiVal = Info->get32BitAddressHighBits();
+
+ // FIXME: This is a bit ugly due to creating a merge of 2 pointers to
+ // another. Merge operands are required to be the same type, but creating an
+ // extra ptrtoint would be kind of pointless.
+ auto HighAddr = B.buildConstant(
+ LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS_32BIT, 32), AddrHiVal);
+ B.buildMerge(Dst, {Src, HighAddr.getReg(0)});
+ MI.eraseFromParent();
return true;
}
@@ -908,47 +1252,52 @@ bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
DestAS == AMDGPUAS::PRIVATE_ADDRESS);
unsigned NullVal = TM.getNullPointerValue(DestAS);
- auto SegmentNull = MIRBuilder.buildConstant(DstTy, NullVal);
- auto FlatNull = MIRBuilder.buildConstant(SrcTy, 0);
+ auto SegmentNull = B.buildConstant(DstTy, NullVal);
+ auto FlatNull = B.buildConstant(SrcTy, 0);
Register PtrLo32 = MRI.createGenericVirtualRegister(DstTy);
// Extract low 32-bits of the pointer.
- MIRBuilder.buildExtract(PtrLo32, Src, 0);
+ B.buildExtract(PtrLo32, Src, 0);
Register CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1));
- MIRBuilder.buildICmp(CmpInst::ICMP_NE, CmpRes, Src, FlatNull.getReg(0));
- MIRBuilder.buildSelect(Dst, CmpRes, PtrLo32, SegmentNull.getReg(0));
+ B.buildICmp(CmpInst::ICMP_NE, CmpRes, Src, FlatNull.getReg(0));
+ B.buildSelect(Dst, CmpRes, PtrLo32, SegmentNull.getReg(0));
MI.eraseFromParent();
return true;
}
- assert(SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
- SrcAS == AMDGPUAS::PRIVATE_ADDRESS);
+ if (SrcAS != AMDGPUAS::LOCAL_ADDRESS && SrcAS != AMDGPUAS::PRIVATE_ADDRESS)
+ return false;
+
+ if (!ST.hasFlatAddressSpace())
+ return false;
auto SegmentNull =
- MIRBuilder.buildConstant(SrcTy, TM.getNullPointerValue(SrcAS));
+ B.buildConstant(SrcTy, TM.getNullPointerValue(SrcAS));
auto FlatNull =
- MIRBuilder.buildConstant(DstTy, TM.getNullPointerValue(DestAS));
+ B.buildConstant(DstTy, TM.getNullPointerValue(DestAS));
- Register ApertureReg = getSegmentAperture(DestAS, MRI, MIRBuilder);
+ Register ApertureReg = getSegmentAperture(SrcAS, MRI, B);
+ if (!ApertureReg.isValid())
+ return false;
Register CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1));
- MIRBuilder.buildICmp(CmpInst::ICMP_NE, CmpRes, Src, SegmentNull.getReg(0));
+ B.buildICmp(CmpInst::ICMP_NE, CmpRes, Src, SegmentNull.getReg(0));
Register BuildPtr = MRI.createGenericVirtualRegister(DstTy);
// Coerce the type of the low half of the result so we can use merge_values.
- Register SrcAsInt = MRI.createGenericVirtualRegister(LLT::scalar(32));
- MIRBuilder.buildInstr(TargetOpcode::G_PTRTOINT)
+ Register SrcAsInt = MRI.createGenericVirtualRegister(S32);
+ B.buildInstr(TargetOpcode::G_PTRTOINT)
.addDef(SrcAsInt)
.addUse(Src);
// TODO: Should we allow mismatched types but matching sizes in merges to
// avoid the ptrtoint?
- MIRBuilder.buildMerge(BuildPtr, {SrcAsInt, ApertureReg});
- MIRBuilder.buildSelect(Dst, CmpRes, BuildPtr, FlatNull.getReg(0));
+ B.buildMerge(BuildPtr, {SrcAsInt, ApertureReg});
+ B.buildSelect(Dst, CmpRes, BuildPtr, FlatNull.getReg(0));
MI.eraseFromParent();
return true;
@@ -956,8 +1305,8 @@ bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
bool AMDGPULegalizerInfo::legalizeFrint(
MachineInstr &MI, MachineRegisterInfo &MRI,
- MachineIRBuilder &MIRBuilder) const {
- MIRBuilder.setInstr(MI);
+ MachineIRBuilder &B) const {
+ B.setInstr(MI);
Register Src = MI.getOperand(1).getReg();
LLT Ty = MRI.getType(Src);
@@ -966,18 +1315,18 @@ bool AMDGPULegalizerInfo::legalizeFrint(
APFloat C1Val(APFloat::IEEEdouble(), "0x1.0p+52");
APFloat C2Val(APFloat::IEEEdouble(), "0x1.fffffffffffffp+51");
- auto C1 = MIRBuilder.buildFConstant(Ty, C1Val);
- auto CopySign = MIRBuilder.buildFCopysign(Ty, C1, Src);
+ auto C1 = B.buildFConstant(Ty, C1Val);
+ auto CopySign = B.buildFCopysign(Ty, C1, Src);
// TODO: Should this propagate fast-math-flags?
- auto Tmp1 = MIRBuilder.buildFAdd(Ty, Src, CopySign);
- auto Tmp2 = MIRBuilder.buildFSub(Ty, Tmp1, CopySign);
+ auto Tmp1 = B.buildFAdd(Ty, Src, CopySign);
+ auto Tmp2 = B.buildFSub(Ty, Tmp1, CopySign);
- auto C2 = MIRBuilder.buildFConstant(Ty, C2Val);
- auto Fabs = MIRBuilder.buildFAbs(Ty, Src);
+ auto C2 = B.buildFConstant(Ty, C2Val);
+ auto Fabs = B.buildFAbs(Ty, Src);
- auto Cond = MIRBuilder.buildFCmp(CmpInst::FCMP_OGT, LLT::scalar(1), Fabs, C2);
- MIRBuilder.buildSelect(MI.getOperand(0).getReg(), Cond, Src, Tmp2);
+ auto Cond = B.buildFCmp(CmpInst::FCMP_OGT, LLT::scalar(1), Fabs, C2);
+ B.buildSelect(MI.getOperand(0).getReg(), Cond, Src, Tmp2);
return true;
}
@@ -1124,7 +1473,7 @@ bool AMDGPULegalizerInfo::legalizeMinNumMaxNum(
MachineIRBuilder HelperBuilder(MI);
GISelObserverWrapper DummyObserver;
LegalizerHelper Helper(MF, DummyObserver, HelperBuilder);
- HelperBuilder.setMBB(*MI.getParent());
+ HelperBuilder.setInstr(MI);
return Helper.lowerFMinNumMaxNum(MI) == LegalizerHelper::Legalized;
}
@@ -1187,6 +1536,194 @@ bool AMDGPULegalizerInfo::legalizeInsertVectorElt(
return true;
}
+bool AMDGPULegalizerInfo::legalizeSinCos(
+ MachineInstr &MI, MachineRegisterInfo &MRI,
+ MachineIRBuilder &B) const {
+ B.setInstr(MI);
+
+ Register DstReg = MI.getOperand(0).getReg();
+ Register SrcReg = MI.getOperand(1).getReg();
+ LLT Ty = MRI.getType(DstReg);
+ unsigned Flags = MI.getFlags();
+
+ Register TrigVal;
+ auto OneOver2Pi = B.buildFConstant(Ty, 0.5 / M_PI);
+ if (ST.hasTrigReducedRange()) {
+ auto MulVal = B.buildFMul(Ty, SrcReg, OneOver2Pi, Flags);
+ TrigVal = B.buildIntrinsic(Intrinsic::amdgcn_fract, {Ty}, false)
+ .addUse(MulVal.getReg(0))
+ .setMIFlags(Flags).getReg(0);
+ } else
+ TrigVal = B.buildFMul(Ty, SrcReg, OneOver2Pi, Flags).getReg(0);
+
+ Intrinsic::ID TrigIntrin = MI.getOpcode() == AMDGPU::G_FSIN ?
+ Intrinsic::amdgcn_sin : Intrinsic::amdgcn_cos;
+ B.buildIntrinsic(TrigIntrin, makeArrayRef<Register>(DstReg), false)
+ .addUse(TrigVal)
+ .setMIFlags(Flags);
+ MI.eraseFromParent();
+ return true;
+}
+
+bool AMDGPULegalizerInfo::buildPCRelGlobalAddress(
+ Register DstReg, LLT PtrTy,
+ MachineIRBuilder &B, const GlobalValue *GV,
+ unsigned Offset, unsigned GAFlags) const {
+ // In order to support pc-relative addressing, SI_PC_ADD_REL_OFFSET is lowered
+ // to the following code sequence:
+ //
+ // For constant address space:
+ // s_getpc_b64 s[0:1]
+ // s_add_u32 s0, s0, $symbol
+ // s_addc_u32 s1, s1, 0
+ //
+ // s_getpc_b64 returns the address of the s_add_u32 instruction and then
+ // a fixup or relocation is emitted to replace $symbol with a literal
+ // constant, which is a pc-relative offset from the encoding of the $symbol
+ // operand to the global variable.
+ //
+ // For global address space:
+ // s_getpc_b64 s[0:1]
+ // s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo
+ // s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi
+ //
+ // s_getpc_b64 returns the address of the s_add_u32 instruction and then
+ // fixups or relocations are emitted to replace $symbol@*@lo and
+ // $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant,
+ // which is a 64-bit pc-relative offset from the encoding of the $symbol
+ // operand to the global variable.
+ //
+ // What we want here is an offset from the value returned by s_getpc
+ // (which is the address of the s_add_u32 instruction) to the global
+ // variable, but since the encoding of $symbol starts 4 bytes after the start
+ // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too
+ // small. This requires us to add 4 to the global variable offset in order to
+ // compute the correct address.
+
+ LLT ConstPtrTy = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64);
+
+ Register PCReg = PtrTy.getSizeInBits() != 32 ? DstReg :
+ B.getMRI()->createGenericVirtualRegister(ConstPtrTy);
+
+ MachineInstrBuilder MIB = B.buildInstr(AMDGPU::SI_PC_ADD_REL_OFFSET)
+ .addDef(PCReg);
+
+ MIB.addGlobalAddress(GV, Offset + 4, GAFlags);
+ if (GAFlags == SIInstrInfo::MO_NONE)
+ MIB.addImm(0);
+ else
+ MIB.addGlobalAddress(GV, Offset + 4, GAFlags + 1);
+
+ B.getMRI()->setRegClass(PCReg, &AMDGPU::SReg_64RegClass);
+
+ if (PtrTy.getSizeInBits() == 32)
+ B.buildExtract(DstReg, PCReg, 0);
+ return true;
+ }
+
+bool AMDGPULegalizerInfo::legalizeGlobalValue(
+ MachineInstr &MI, MachineRegisterInfo &MRI,
+ MachineIRBuilder &B) const {
+ Register DstReg = MI.getOperand(0).getReg();
+ LLT Ty = MRI.getType(DstReg);
+ unsigned AS = Ty.getAddressSpace();
+
+ const GlobalValue *GV = MI.getOperand(1).getGlobal();
+ MachineFunction &MF = B.getMF();
+ SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
+ B.setInstr(MI);
+
+ if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) {
+ if (!MFI->isEntryFunction()) {
+ const Function &Fn = MF.getFunction();
+ DiagnosticInfoUnsupported BadLDSDecl(
+ Fn, "local memory global used by non-kernel function", MI.getDebugLoc());
+ Fn.getContext().diagnose(BadLDSDecl);
+ }
+
+ // TODO: We could emit code to handle the initialization somewhere.
+ if (!AMDGPUTargetLowering::hasDefinedInitializer(GV)) {
+ B.buildConstant(DstReg, MFI->allocateLDSGlobal(B.getDataLayout(), *GV));
+ MI.eraseFromParent();
+ return true;
+ }
+
+ const Function &Fn = MF.getFunction();
+ DiagnosticInfoUnsupported BadInit(
+ Fn, "unsupported initializer for address space", MI.getDebugLoc());
+ Fn.getContext().diagnose(BadInit);
+ return true;
+ }
+
+ const SITargetLowering *TLI = ST.getTargetLowering();
+
+ if (TLI->shouldEmitFixup(GV)) {
+ buildPCRelGlobalAddress(DstReg, Ty, B, GV, 0);
+ MI.eraseFromParent();
+ return true;
+ }
+
+ if (TLI->shouldEmitPCReloc(GV)) {
+ buildPCRelGlobalAddress(DstReg, Ty, B, GV, 0, SIInstrInfo::MO_REL32);
+ MI.eraseFromParent();
+ return true;
+ }
+
+ LLT PtrTy = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64);
+ Register GOTAddr = MRI.createGenericVirtualRegister(PtrTy);
+
+ MachineMemOperand *GOTMMO = MF.getMachineMemOperand(
+ MachinePointerInfo::getGOT(MF),
+ MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
+ MachineMemOperand::MOInvariant,
+ 8 /*Size*/, 8 /*Align*/);
+
+ buildPCRelGlobalAddress(GOTAddr, PtrTy, B, GV, 0, SIInstrInfo::MO_GOTPCREL32);
+
+ if (Ty.getSizeInBits() == 32) {
+ // Truncate if this is a 32-bit constant adrdess.
+ auto Load = B.buildLoad(PtrTy, GOTAddr, *GOTMMO);
+ B.buildExtract(DstReg, Load, 0);
+ } else
+ B.buildLoad(DstReg, GOTAddr, *GOTMMO);
+
+ MI.eraseFromParent();
+ return true;
+}
+
+bool AMDGPULegalizerInfo::legalizeLoad(
+ MachineInstr &MI, MachineRegisterInfo &MRI,
+ MachineIRBuilder &B, GISelChangeObserver &Observer) const {
+ B.setInstr(MI);
+ LLT ConstPtr = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64);
+ auto Cast = B.buildAddrSpaceCast(ConstPtr, MI.getOperand(1).getReg());
+ Observer.changingInstr(MI);
+ MI.getOperand(1).setReg(Cast.getReg(0));
+ Observer.changedInstr(MI);
+ return true;
+}
+
+bool AMDGPULegalizerInfo::legalizeFMad(
+ MachineInstr &MI, MachineRegisterInfo &MRI,
+ MachineIRBuilder &B) const {
+ LLT Ty = MRI.getType(MI.getOperand(0).getReg());
+ assert(Ty.isScalar());
+
+ // TODO: Always legal with future ftz flag.
+ if (Ty == LLT::scalar(32) && !ST.hasFP32Denormals())
+ return true;
+ if (Ty == LLT::scalar(16) && !ST.hasFP16Denormals())
+ return true;
+
+ MachineFunction &MF = B.getMF();
+
+ MachineIRBuilder HelperBuilder(MI);
+ GISelObserverWrapper DummyObserver;
+ LegalizerHelper Helper(MF, DummyObserver, HelperBuilder);
+ HelperBuilder.setMBB(*MI.getParent());
+ return Helper.lowerFMad(MI) == LegalizerHelper::Legalized;
+}
+
// Return the use branch instruction, otherwise null if the usage is invalid.
static MachineInstr *verifyCFIntrinsic(MachineInstr &MI,
MachineRegisterInfo &MRI) {
@@ -1212,10 +1749,9 @@ Register AMDGPULegalizerInfo::getLiveInRegister(MachineRegisterInfo &MRI,
bool AMDGPULegalizerInfo::loadInputValue(Register DstReg, MachineIRBuilder &B,
const ArgDescriptor *Arg) const {
- if (!Arg->isRegister())
+ if (!Arg->isRegister() || !Arg->getRegister().isValid())
return false; // TODO: Handle these
- assert(Arg->getRegister() != 0);
assert(Arg->getRegister().isPhysical());
MachineRegisterInfo &MRI = *B.getMRI();
@@ -1229,19 +1765,30 @@ bool AMDGPULegalizerInfo::loadInputValue(Register DstReg, MachineIRBuilder &B,
const unsigned Mask = Arg->getMask();
const unsigned Shift = countTrailingZeros<unsigned>(Mask);
- auto ShiftAmt = B.buildConstant(S32, Shift);
- auto LShr = B.buildLShr(S32, LiveIn, ShiftAmt);
- B.buildAnd(DstReg, LShr, B.buildConstant(S32, Mask >> Shift));
+ Register AndMaskSrc = LiveIn;
+
+ if (Shift != 0) {
+ auto ShiftAmt = B.buildConstant(S32, Shift);
+ AndMaskSrc = B.buildLShr(S32, LiveIn, ShiftAmt).getReg(0);
+ }
+
+ B.buildAnd(DstReg, AndMaskSrc, B.buildConstant(S32, Mask >> Shift));
} else
B.buildCopy(DstReg, LiveIn);
// Insert the argument copy if it doens't already exist.
// FIXME: It seems EmitLiveInCopies isn't called anywhere?
if (!MRI.getVRegDef(LiveIn)) {
+ // FIXME: Should have scoped insert pt
+ MachineBasicBlock &OrigInsBB = B.getMBB();
+ auto OrigInsPt = B.getInsertPt();
+
MachineBasicBlock &EntryMBB = B.getMF().front();
EntryMBB.addLiveIn(Arg->getRegister());
B.setInsertPt(EntryMBB, EntryMBB.begin());
B.buildCopy(LiveIn, Arg->getRegister());
+
+ B.setInsertPt(OrigInsBB, OrigInsPt);
}
return true;
@@ -1272,6 +1819,113 @@ bool AMDGPULegalizerInfo::legalizePreloadedArgIntrin(
return false;
}
+bool AMDGPULegalizerInfo::legalizeFDIV(MachineInstr &MI,
+ MachineRegisterInfo &MRI,
+ MachineIRBuilder &B) const {
+ B.setInstr(MI);
+
+ if (legalizeFastUnsafeFDIV(MI, MRI, B))
+ return true;
+
+ return false;
+}
+
+bool AMDGPULegalizerInfo::legalizeFastUnsafeFDIV(MachineInstr &MI,
+ MachineRegisterInfo &MRI,
+ MachineIRBuilder &B) const {
+ Register Res = MI.getOperand(0).getReg();
+ Register LHS = MI.getOperand(1).getReg();
+ Register RHS = MI.getOperand(2).getReg();
+
+ uint16_t Flags = MI.getFlags();
+
+ LLT ResTy = MRI.getType(Res);
+ LLT S32 = LLT::scalar(32);
+ LLT S64 = LLT::scalar(64);
+
+ const MachineFunction &MF = B.getMF();
+ bool Unsafe =
+ MF.getTarget().Options.UnsafeFPMath || MI.getFlag(MachineInstr::FmArcp);
+
+ if (!MF.getTarget().Options.UnsafeFPMath && ResTy == S64)
+ return false;
+
+ if (!Unsafe && ResTy == S32 && ST.hasFP32Denormals())
+ return false;
+
+ if (auto CLHS = getConstantFPVRegVal(LHS, MRI)) {
+ // 1 / x -> RCP(x)
+ if (CLHS->isExactlyValue(1.0)) {
+ B.buildIntrinsic(Intrinsic::amdgcn_rcp, Res, false)
+ .addUse(RHS)
+ .setMIFlags(Flags);
+
+ MI.eraseFromParent();
+ return true;
+ }
+
+ // -1 / x -> RCP( FNEG(x) )
+ if (CLHS->isExactlyValue(-1.0)) {
+ auto FNeg = B.buildFNeg(ResTy, RHS, Flags);
+ B.buildIntrinsic(Intrinsic::amdgcn_rcp, Res, false)
+ .addUse(FNeg.getReg(0))
+ .setMIFlags(Flags);
+
+ MI.eraseFromParent();
+ return true;
+ }
+ }
+
+ // x / y -> x * (1.0 / y)
+ if (Unsafe) {
+ auto RCP = B.buildIntrinsic(Intrinsic::amdgcn_rcp, {ResTy}, false)
+ .addUse(RHS)
+ .setMIFlags(Flags);
+ B.buildFMul(Res, LHS, RCP, Flags);
+
+ MI.eraseFromParent();
+ return true;
+ }
+
+ return false;
+}
+
+bool AMDGPULegalizerInfo::legalizeFDIVFastIntrin(MachineInstr &MI,
+ MachineRegisterInfo &MRI,
+ MachineIRBuilder &B) const {
+ B.setInstr(MI);
+ Register Res = MI.getOperand(0).getReg();
+ Register LHS = MI.getOperand(2).getReg();
+ Register RHS = MI.getOperand(3).getReg();
+ uint16_t Flags = MI.getFlags();
+
+ LLT S32 = LLT::scalar(32);
+ LLT S1 = LLT::scalar(1);
+
+ auto Abs = B.buildFAbs(S32, RHS, Flags);
+ const APFloat C0Val(1.0f);
+
+ auto C0 = B.buildConstant(S32, 0x6f800000);
+ auto C1 = B.buildConstant(S32, 0x2f800000);
+ auto C2 = B.buildConstant(S32, FloatToBits(1.0f));
+
+ auto CmpRes = B.buildFCmp(CmpInst::FCMP_OGT, S1, Abs, C0, Flags);
+ auto Sel = B.buildSelect(S32, CmpRes, C1, C2, Flags);
+
+ auto Mul0 = B.buildFMul(S32, RHS, Sel, Flags);
+
+ auto RCP = B.buildIntrinsic(Intrinsic::amdgcn_rcp, {S32}, false)
+ .addUse(Mul0.getReg(0))
+ .setMIFlags(Flags);
+
+ auto Mul1 = B.buildFMul(S32, LHS, RCP, Flags);
+
+ B.buildFMul(Res, Sel, Mul1, Flags);
+
+ MI.eraseFromParent();
+ return true;
+}
+
bool AMDGPULegalizerInfo::legalizeImplicitArgPtr(MachineInstr &MI,
MachineRegisterInfo &MRI,
MachineIRBuilder &B) const {
@@ -1306,11 +1960,79 @@ bool AMDGPULegalizerInfo::legalizeImplicitArgPtr(MachineInstr &MI,
return true;
}
+bool AMDGPULegalizerInfo::legalizeIsAddrSpace(MachineInstr &MI,
+ MachineRegisterInfo &MRI,
+ MachineIRBuilder &B,
+ unsigned AddrSpace) const {
+ B.setInstr(MI);
+ Register ApertureReg = getSegmentAperture(AddrSpace, MRI, B);
+ auto Hi32 = B.buildExtract(LLT::scalar(32), MI.getOperand(2).getReg(), 32);
+ B.buildICmp(ICmpInst::ICMP_EQ, MI.getOperand(0), Hi32, ApertureReg);
+ MI.eraseFromParent();
+ return true;
+}
+
+/// Handle register layout difference for f16 images for some subtargets.
+Register AMDGPULegalizerInfo::handleD16VData(MachineIRBuilder &B,
+ MachineRegisterInfo &MRI,
+ Register Reg) const {
+ if (!ST.hasUnpackedD16VMem())
+ return Reg;
+
+ const LLT S16 = LLT::scalar(16);
+ const LLT S32 = LLT::scalar(32);
+ LLT StoreVT = MRI.getType(Reg);
+ assert(StoreVT.isVector() && StoreVT.getElementType() == S16);
+
+ auto Unmerge = B.buildUnmerge(S16, Reg);
+
+ SmallVector<Register, 4> WideRegs;
+ for (int I = 0, E = Unmerge->getNumOperands() - 1; I != E; ++I)
+ WideRegs.push_back(B.buildAnyExt(S32, Unmerge.getReg(I)).getReg(0));
+
+ int NumElts = StoreVT.getNumElements();
+
+ return B.buildBuildVector(LLT::vector(NumElts, S32), WideRegs).getReg(0);
+}
+
+bool AMDGPULegalizerInfo::legalizeRawBufferStore(MachineInstr &MI,
+ MachineRegisterInfo &MRI,
+ MachineIRBuilder &B,
+ bool IsFormat) const {
+ // TODO: Reject f16 format on targets where unsupported.
+ Register VData = MI.getOperand(1).getReg();
+ LLT Ty = MRI.getType(VData);
+
+ B.setInstr(MI);
+
+ const LLT S32 = LLT::scalar(32);
+ const LLT S16 = LLT::scalar(16);
+
+ // Fixup illegal register types for i8 stores.
+ if (Ty == LLT::scalar(8) || Ty == S16) {
+ Register AnyExt = B.buildAnyExt(LLT::scalar(32), VData).getReg(0);
+ MI.getOperand(1).setReg(AnyExt);
+ return true;
+ }
+
+ if (Ty.isVector()) {
+ if (Ty.getElementType() == S16 && Ty.getNumElements() <= 4) {
+ if (IsFormat)
+ MI.getOperand(1).setReg(handleD16VData(B, MRI, VData));
+ return true;
+ }
+
+ return Ty.getElementType() == S32 && Ty.getNumElements() <= 4;
+ }
+
+ return Ty == S32;
+}
+
bool AMDGPULegalizerInfo::legalizeIntrinsic(MachineInstr &MI,
MachineRegisterInfo &MRI,
MachineIRBuilder &B) const {
// Replace the use G_BRCOND with the exec manipulate and branch pseudos.
- switch (MI.getOperand(MI.getNumExplicitDefs()).getIntrinsicID()) {
+ switch (MI.getIntrinsicID()) {
case Intrinsic::amdgcn_if: {
if (MachineInstr *BrCond = verifyCFIntrinsic(MI, MRI)) {
const SIRegisterInfo *TRI
@@ -1386,6 +2108,22 @@ bool AMDGPULegalizerInfo::legalizeIntrinsic(MachineInstr &MI,
case Intrinsic::amdgcn_dispatch_id:
return legalizePreloadedArgIntrin(MI, MRI, B,
AMDGPUFunctionArgInfo::DISPATCH_ID);
+ case Intrinsic::amdgcn_fdiv_fast:
+ return legalizeFDIVFastIntrin(MI, MRI, B);
+ case Intrinsic::amdgcn_is_shared:
+ return legalizeIsAddrSpace(MI, MRI, B, AMDGPUAS::LOCAL_ADDRESS);
+ case Intrinsic::amdgcn_is_private:
+ return legalizeIsAddrSpace(MI, MRI, B, AMDGPUAS::PRIVATE_ADDRESS);
+ case Intrinsic::amdgcn_wavefrontsize: {
+ B.setInstr(MI);
+ B.buildConstant(MI.getOperand(0), ST.getWavefrontSize());
+ MI.eraseFromParent();
+ return true;
+ }
+ case Intrinsic::amdgcn_raw_buffer_store:
+ return legalizeRawBufferStore(MI, MRI, B, false);
+ case Intrinsic::amdgcn_raw_buffer_store_format:
+ return legalizeRawBufferStore(MI, MRI, B, true);
default:
return true;
}