aboutsummaryrefslogtreecommitdiff
path: root/lib/Target/AMDGPU/SIISelLowering.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/AMDGPU/SIISelLowering.cpp')
-rw-r--r--lib/Target/AMDGPU/SIISelLowering.cpp1780
1 files changed, 1208 insertions, 572 deletions
diff --git a/lib/Target/AMDGPU/SIISelLowering.cpp b/lib/Target/AMDGPU/SIISelLowering.cpp
index b98f9f400ee7..7268131396dc 100644
--- a/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -15,26 +15,70 @@
#ifdef _MSC_VER
// Provide M_PI.
#define _USE_MATH_DEFINES
-#include <cmath>
#endif
#include "AMDGPU.h"
#include "AMDGPUIntrinsicInfo.h"
+#include "AMDGPUTargetMachine.h"
#include "AMDGPUSubtarget.h"
#include "SIDefines.h"
#include "SIISelLowering.h"
#include "SIInstrInfo.h"
#include "SIMachineFunctionInfo.h"
#include "SIRegisterInfo.h"
+#include "Utils/AMDGPUBaseInfo.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/CodeGen/DAGCombine.h"
+#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/CodeGen/Analysis.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Target/TargetCallingConv.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include <cassert>
+#include <cmath>
+#include <cstdint>
+#include <iterator>
+#include <tuple>
+#include <utility>
+#include <vector>
using namespace llvm;
@@ -43,7 +87,6 @@ static cl::opt<bool> EnableVGPRIndexMode(
cl::desc("Use GPR indexing mode instead of movrel for vector indexing"),
cl::init(false));
-
static unsigned findFirstFreeSGPR(CCState &CCInfo) {
unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) {
@@ -84,6 +127,11 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass);
}
+ if (Subtarget->hasVOP3PInsts()) {
+ addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32_XM0RegClass);
+ addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32_XM0RegClass);
+ }
+
computeRegisterProperties(STI.getRegisterInfo());
// We need to custom lower vector stores from local memory
@@ -110,7 +158,6 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand);
setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand);
-
setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
setOperationAction(ISD::ConstantPool, MVT::v2i64, Expand);
@@ -142,10 +189,17 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom);
+ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom);
+ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom);
+
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
+ setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
+ setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom);
+ setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom);
+
setOperationAction(ISD::BRCOND, MVT::Other, Custom);
setOperationAction(ISD::BR_CC, MVT::i1, Expand);
setOperationAction(ISD::BR_CC, MVT::i32, Expand);
@@ -153,9 +207,13 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
setOperationAction(ISD::BR_CC, MVT::f32, Expand);
setOperationAction(ISD::BR_CC, MVT::f64, Expand);
+ setOperationAction(ISD::UADDO, MVT::i32, Legal);
+ setOperationAction(ISD::USUBO, MVT::i32, Legal);
+
// We only support LOAD/STORE and vector manipulation ops for vectors
// with > 4 elements.
- for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32, MVT::v2i64, MVT::v2f64}) {
+ for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32,
+ MVT::v2i64, MVT::v2f64}) {
for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
switch (Op) {
case ISD::LOAD:
@@ -202,6 +260,13 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand);
+ // Avoid stack access for these.
+ // TODO: Generalize to more vector types.
+ setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
+
// BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling,
// and output demarshalling
setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
@@ -222,7 +287,8 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
// On SI this is s_memtime and s_memrealtime on VI.
setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
- setOperationAction(ISD::TRAP, MVT::Other, Custom);
+ setOperationAction(ISD::TRAP, MVT::Other, Legal);
+ setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
@@ -303,6 +369,7 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote);
setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote);
setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote);
+ setOperationAction(ISD::FROUND, MVT::f16, Custom);
// F16 - VOP2 Actions.
setOperationAction(ISD::BR_CC, MVT::f16, Expand);
@@ -317,6 +384,85 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
setOperationAction(ISD::FMAD, MVT::f16, Legal);
}
+ if (Subtarget->hasVOP3PInsts()) {
+ for (MVT VT : {MVT::v2i16, MVT::v2f16}) {
+ for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
+ switch (Op) {
+ case ISD::LOAD:
+ case ISD::STORE:
+ case ISD::BUILD_VECTOR:
+ case ISD::BITCAST:
+ case ISD::EXTRACT_VECTOR_ELT:
+ case ISD::INSERT_VECTOR_ELT:
+ case ISD::INSERT_SUBVECTOR:
+ case ISD::EXTRACT_SUBVECTOR:
+ case ISD::SCALAR_TO_VECTOR:
+ break;
+ case ISD::CONCAT_VECTORS:
+ setOperationAction(Op, VT, Custom);
+ break;
+ default:
+ setOperationAction(Op, VT, Expand);
+ break;
+ }
+ }
+ }
+
+ // XXX - Do these do anything? Vector constants turn into build_vector.
+ setOperationAction(ISD::Constant, MVT::v2i16, Legal);
+ setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal);
+
+ setOperationAction(ISD::STORE, MVT::v2i16, Promote);
+ AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32);
+ setOperationAction(ISD::STORE, MVT::v2f16, Promote);
+ AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32);
+
+ setOperationAction(ISD::LOAD, MVT::v2i16, Promote);
+ AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32);
+ setOperationAction(ISD::LOAD, MVT::v2f16, Promote);
+ AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32);
+
+ setOperationAction(ISD::AND, MVT::v2i16, Promote);
+ AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32);
+ setOperationAction(ISD::OR, MVT::v2i16, Promote);
+ AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32);
+ setOperationAction(ISD::XOR, MVT::v2i16, Promote);
+ AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32);
+ setOperationAction(ISD::SELECT, MVT::v2i16, Promote);
+ AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32);
+ setOperationAction(ISD::SELECT, MVT::v2f16, Promote);
+ AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32);
+
+ setOperationAction(ISD::ADD, MVT::v2i16, Legal);
+ setOperationAction(ISD::SUB, MVT::v2i16, Legal);
+ setOperationAction(ISD::MUL, MVT::v2i16, Legal);
+ setOperationAction(ISD::SHL, MVT::v2i16, Legal);
+ setOperationAction(ISD::SRL, MVT::v2i16, Legal);
+ setOperationAction(ISD::SRA, MVT::v2i16, Legal);
+ setOperationAction(ISD::SMIN, MVT::v2i16, Legal);
+ setOperationAction(ISD::UMIN, MVT::v2i16, Legal);
+ setOperationAction(ISD::SMAX, MVT::v2i16, Legal);
+ setOperationAction(ISD::UMAX, MVT::v2i16, Legal);
+
+ setOperationAction(ISD::FADD, MVT::v2f16, Legal);
+ setOperationAction(ISD::FNEG, MVT::v2f16, Legal);
+ setOperationAction(ISD::FMUL, MVT::v2f16, Legal);
+ setOperationAction(ISD::FMA, MVT::v2f16, Legal);
+ setOperationAction(ISD::FMINNUM, MVT::v2f16, Legal);
+ setOperationAction(ISD::FMAXNUM, MVT::v2f16, Legal);
+
+ // This isn't really legal, but this avoids the legalizer unrolling it (and
+ // allows matching fneg (fabs x) patterns)
+ setOperationAction(ISD::FABS, MVT::v2f16, Legal);
+
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
+
+ setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand);
+ setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand);
+ setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand);
+ }
+
setTargetDAGCombine(ISD::FADD);
setTargetDAGCombine(ISD::FSUB);
setTargetDAGCombine(ISD::FMINNUM);
@@ -332,6 +478,8 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
setTargetDAGCombine(ISD::SINT_TO_FP);
setTargetDAGCombine(ISD::UINT_TO_FP);
setTargetDAGCombine(ISD::FCANONICALIZE);
+ setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
+ setTargetDAGCombine(ISD::ZERO_EXTEND);
// All memory operations. Some folding on the pointer operand is done to help
// matching the constant offsets in the addressing modes.
@@ -364,30 +512,49 @@ const SISubtarget *SITargetLowering::getSubtarget() const {
// TargetLowering queries
//===----------------------------------------------------------------------===//
+bool SITargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &,
+ EVT) const {
+ // SI has some legal vector types, but no legal vector operations. Say no
+ // shuffles are legal in order to prefer scalarizing some vector operations.
+ return false;
+}
+
bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
const CallInst &CI,
unsigned IntrID) const {
switch (IntrID) {
case Intrinsic::amdgcn_atomic_inc:
- case Intrinsic::amdgcn_atomic_dec:
+ case Intrinsic::amdgcn_atomic_dec: {
Info.opc = ISD::INTRINSIC_W_CHAIN;
Info.memVT = MVT::getVT(CI.getType());
Info.ptrVal = CI.getOperand(0);
Info.align = 0;
- Info.vol = false;
+
+ const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4));
+ Info.vol = !Vol || !Vol->isNullValue();
Info.readMem = true;
Info.writeMem = true;
return true;
+ }
default:
return false;
}
}
-bool SITargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &,
- EVT) const {
- // SI has some legal vector types, but no legal vector operations. Say no
- // shuffles are legal in order to prefer scalarizing some vector operations.
- return false;
+bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II,
+ SmallVectorImpl<Value*> &Ops,
+ Type *&AccessTy) const {
+ switch (II->getIntrinsicID()) {
+ case Intrinsic::amdgcn_atomic_inc:
+ case Intrinsic::amdgcn_atomic_dec: {
+ Value *Ptr = II->getArgOperand(0);
+ AccessTy = II->getType();
+ Ops.push_back(Ptr);
+ return true;
+ }
+ default:
+ return false;
+ }
}
bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const {
@@ -438,8 +605,7 @@ bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
if (AM.BaseGV)
return false;
- switch (AS) {
- case AMDGPUAS::GLOBAL_ADDRESS: {
+ if (AS == AMDGPUASI.GLOBAL_ADDRESS) {
if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
// Assume the we will use FLAT for all global memory accesses
// on VI.
@@ -454,8 +620,7 @@ bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
}
return isLegalMUBUFAddressingMode(AM);
- }
- case AMDGPUAS::CONSTANT_ADDRESS: {
+ } else if (AS == AMDGPUASI.CONSTANT_ADDRESS) {
// If the offset isn't a multiple of 4, it probably isn't going to be
// correctly aligned.
// FIXME: Can we get the real alignment here?
@@ -478,7 +643,7 @@ bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
// in 8-bits, it can use a smaller encoding.
if (!isUInt<32>(AM.BaseOffs / 4))
return false;
- } else if (Subtarget->getGeneration() == SISubtarget::VOLCANIC_ISLANDS) {
+ } else if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
// On VI, these use the SMEM format and the offset is 20-bit in bytes.
if (!isUInt<20>(AM.BaseOffs))
return false;
@@ -492,13 +657,11 @@ bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
return true;
return false;
- }
- case AMDGPUAS::PRIVATE_ADDRESS:
+ } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
return isLegalMUBUFAddressingMode(AM);
-
- case AMDGPUAS::LOCAL_ADDRESS:
- case AMDGPUAS::REGION_ADDRESS: {
+ } else if (AS == AMDGPUASI.LOCAL_ADDRESS ||
+ AS == AMDGPUASI.REGION_ADDRESS) {
// Basic, single offset DS instructions allow a 16-bit unsigned immediate
// field.
// XXX - If doing a 4-byte aligned 8-byte type access, we effectively have
@@ -513,17 +676,15 @@ bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
return true;
return false;
- }
- case AMDGPUAS::FLAT_ADDRESS:
- case AMDGPUAS::UNKNOWN_ADDRESS_SPACE:
+ } else if (AS == AMDGPUASI.FLAT_ADDRESS ||
+ AS == AMDGPUASI.UNKNOWN_ADDRESS_SPACE) {
// For an unknown address space, this usually means that this is for some
// reason being used for pure arithmetic, and not based on some addressing
// computation. We don't have instructions that compute pointers with any
// addressing modes, so treat them as having no offset like flat
// instructions.
return isLegalFlatAddressingMode(AM);
-
- default:
+ } else {
llvm_unreachable("unhandled address space");
}
}
@@ -544,8 +705,8 @@ bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
return false;
}
- if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
- AddrSpace == AMDGPUAS::REGION_ADDRESS) {
+ if (AddrSpace == AMDGPUASI.LOCAL_ADDRESS ||
+ AddrSpace == AMDGPUASI.REGION_ADDRESS) {
// ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte
// aligned, 8 byte access in a single operation using ds_read2/write2_b32
// with adjacent offsets.
@@ -560,8 +721,8 @@ bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
// will access scratch. If we had access to the IR function, then we
// could determine if any private memory was used in the function.
if (!Subtarget->hasUnalignedScratchAccess() &&
- (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
- AddrSpace == AMDGPUAS::FLAT_ADDRESS)) {
+ (AddrSpace == AMDGPUASI.PRIVATE_ADDRESS ||
+ AddrSpace == AMDGPUASI.FLAT_ADDRESS)) {
return false;
}
@@ -569,7 +730,7 @@ bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
// If we have an uniform constant load, it still requires using a slow
// buffer instruction if unaligned.
if (IsFast) {
- *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS) ?
+ *IsFast = (AddrSpace == AMDGPUASI.CONSTANT_ADDRESS) ?
(Align % 4 == 0) : true;
}
@@ -609,15 +770,16 @@ EVT SITargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
return MVT::Other;
}
-static bool isFlatGlobalAddrSpace(unsigned AS) {
- return AS == AMDGPUAS::GLOBAL_ADDRESS ||
- AS == AMDGPUAS::FLAT_ADDRESS ||
- AS == AMDGPUAS::CONSTANT_ADDRESS;
+static bool isFlatGlobalAddrSpace(unsigned AS, AMDGPUAS AMDGPUASI) {
+ return AS == AMDGPUASI.GLOBAL_ADDRESS ||
+ AS == AMDGPUASI.FLAT_ADDRESS ||
+ AS == AMDGPUASI.CONSTANT_ADDRESS;
}
bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
unsigned DestAS) const {
- return isFlatGlobalAddrSpace(SrcAS) && isFlatGlobalAddrSpace(DestAS);
+ return isFlatGlobalAddrSpace(SrcAS, AMDGPUASI) &&
+ isFlatGlobalAddrSpace(DestAS, AMDGPUASI);
}
bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const {
@@ -631,7 +793,7 @@ bool SITargetLowering::isCheapAddrSpaceCast(unsigned SrcAS,
unsigned DestAS) const {
// Flat -> private/local is a simple truncate.
// Flat -> global is no-op
- if (SrcAS == AMDGPUAS::FLAT_ADDRESS)
+ if (SrcAS == AMDGPUASI.FLAT_ADDRESS)
return true;
return isNoopAddrSpaceCast(SrcAS, DestAS);
@@ -639,18 +801,8 @@ bool SITargetLowering::isCheapAddrSpaceCast(unsigned SrcAS,
bool SITargetLowering::isMemOpUniform(const SDNode *N) const {
const MemSDNode *MemNode = cast<MemSDNode>(N);
- const Value *Ptr = MemNode->getMemOperand()->getValue();
- // UndefValue means this is a load of a kernel input. These are uniform.
- // Sometimes LDS instructions have constant pointers.
- // If Ptr is null, then that means this mem operand contains a
- // PseudoSourceValue like GOT.
- if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
- isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
- return true;
-
- const Instruction *I = dyn_cast<Instruction>(Ptr);
- return I && I->getMetadata("amdgpu.uniform");
+ return AMDGPU::isUniformMMO(MemNode->getMemOperand());
}
TargetLoweringBase::LegalizeTypeAction
@@ -693,40 +845,28 @@ bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const {
return TargetLowering::isTypeDesirableForOp(Op, VT);
}
-SDValue SITargetLowering::LowerParameterPtr(SelectionDAG &DAG,
- const SDLoc &SL, SDValue Chain,
- unsigned Offset) const {
+SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG,
+ const SDLoc &SL,
+ SDValue Chain,
+ uint64_t Offset) const {
const DataLayout &DL = DAG.getDataLayout();
MachineFunction &MF = DAG.getMachineFunction();
const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
- unsigned InputPtrReg = TRI->getPreloadedValue(MF, SIRegisterInfo::KERNARG_SEGMENT_PTR);
+ unsigned InputPtrReg = TRI->getPreloadedValue(MF,
+ SIRegisterInfo::KERNARG_SEGMENT_PTR);
MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
- MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS);
+ MVT PtrVT = getPointerTy(DL, AMDGPUASI.CONSTANT_ADDRESS);
SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
MRI.getLiveInVirtReg(InputPtrReg), PtrVT);
return DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
DAG.getConstant(Offset, SL, PtrVT));
}
-SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT,
- const SDLoc &SL, SDValue Chain,
- unsigned Offset, bool Signed,
+SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT,
+ const SDLoc &SL, SDValue Val,
+ bool Signed,
const ISD::InputArg *Arg) const {
- const DataLayout &DL = DAG.getDataLayout();
- Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
- PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS);
- MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
-
- unsigned Align = DL.getABITypeAlignment(Ty);
-
- SDValue Ptr = LowerParameterPtr(DAG, SL, Chain, Offset);
- SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align,
- MachineMemOperand::MONonTemporal |
- MachineMemOperand::MODereferenceable |
- MachineMemOperand::MOInvariant);
-
- SDValue Val = Load;
if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) &&
VT.bitsLT(MemVT)) {
unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext;
@@ -740,373 +880,434 @@ SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT,
else
Val = DAG.getZExtOrTrunc(Val, SL, VT);
- return DAG.getMergeValues({ Val, Load.getValue(1) }, SL);
+ return Val;
}
-SDValue SITargetLowering::LowerFormalArguments(
- SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
- SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
- const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
-
- MachineFunction &MF = DAG.getMachineFunction();
- FunctionType *FType = MF.getFunction()->getFunctionType();
- SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
- const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
+SDValue SITargetLowering::lowerKernargMemParameter(
+ SelectionDAG &DAG, EVT VT, EVT MemVT,
+ const SDLoc &SL, SDValue Chain,
+ uint64_t Offset, bool Signed,
+ const ISD::InputArg *Arg) const {
+ const DataLayout &DL = DAG.getDataLayout();
+ Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
+ PointerType *PtrTy = PointerType::get(Ty, AMDGPUASI.CONSTANT_ADDRESS);
+ MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
- if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) {
- const Function *Fn = MF.getFunction();
- DiagnosticInfoUnsupported NoGraphicsHSA(
- *Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
- DAG.getContext()->diagnose(NoGraphicsHSA);
- return DAG.getEntryNode();
- }
+ unsigned Align = DL.getABITypeAlignment(Ty);
- // Create stack objects that are used for emitting debugger prologue if
- // "amdgpu-debugger-emit-prologue" attribute was specified.
- if (ST.debuggerEmitPrologue())
- createDebuggerPrologueStackObjects(MF);
+ SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset);
+ SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align,
+ MachineMemOperand::MONonTemporal |
+ MachineMemOperand::MODereferenceable |
+ MachineMemOperand::MOInvariant);
- SmallVector<ISD::InputArg, 16> Splits;
- BitVector Skipped(Ins.size());
+ SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg);
+ return DAG.getMergeValues({ Val, Load.getValue(1) }, SL);
+}
- for (unsigned i = 0, e = Ins.size(), PSInputNum = 0; i != e; ++i) {
- const ISD::InputArg &Arg = Ins[i];
+static void processShaderInputArgs(SmallVectorImpl<ISD::InputArg> &Splits,
+ CallingConv::ID CallConv,
+ ArrayRef<ISD::InputArg> Ins,
+ BitVector &Skipped,
+ FunctionType *FType,
+ SIMachineFunctionInfo *Info) {
+ for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) {
+ const ISD::InputArg &Arg = Ins[I];
- // First check if it's a PS input addr
+ // First check if it's a PS input addr.
if (CallConv == CallingConv::AMDGPU_PS && !Arg.Flags.isInReg() &&
!Arg.Flags.isByVal() && PSInputNum <= 15) {
if (!Arg.Used && !Info->isPSInputAllocated(PSInputNum)) {
- // We can safely skip PS inputs
- Skipped.set(i);
+ // We can safely skip PS inputs.
+ Skipped.set(I);
++PSInputNum;
continue;
}
Info->markPSInputAllocated(PSInputNum);
if (Arg.Used)
- Info->PSInputEna |= 1 << PSInputNum;
+ Info->markPSInputEnabled(PSInputNum);
++PSInputNum;
}
- if (AMDGPU::isShader(CallConv)) {
- // Second split vertices into their elements
- if (Arg.VT.isVector()) {
- ISD::InputArg NewArg = Arg;
- NewArg.Flags.setSplit();
- NewArg.VT = Arg.VT.getVectorElementType();
-
- // We REALLY want the ORIGINAL number of vertex elements here, e.g. a
- // three or five element vertex only needs three or five registers,
- // NOT four or eight.
- Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
- unsigned NumElements = ParamType->getVectorNumElements();
-
- for (unsigned j = 0; j != NumElements; ++j) {
- Splits.push_back(NewArg);
- NewArg.PartOffset += NewArg.VT.getStoreSize();
- }
- } else {
- Splits.push_back(Arg);
+ // Second split vertices into their elements.
+ if (Arg.VT.isVector()) {
+ ISD::InputArg NewArg = Arg;
+ NewArg.Flags.setSplit();
+ NewArg.VT = Arg.VT.getVectorElementType();
+
+ // We REALLY want the ORIGINAL number of vertex elements here, e.g. a
+ // three or five element vertex only needs three or five registers,
+ // NOT four or eight.
+ Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
+ unsigned NumElements = ParamType->getVectorNumElements();
+
+ for (unsigned J = 0; J != NumElements; ++J) {
+ Splits.push_back(NewArg);
+ NewArg.PartOffset += NewArg.VT.getStoreSize();
}
+ } else {
+ Splits.push_back(Arg);
}
}
+}
- SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
- *DAG.getContext());
+// Allocate special inputs passed in VGPRs.
+static void allocateSpecialInputVGPRs(CCState &CCInfo,
+ MachineFunction &MF,
+ const SIRegisterInfo &TRI,
+ SIMachineFunctionInfo &Info) {
+ if (Info.hasWorkItemIDX()) {
+ unsigned Reg = TRI.getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_X);
+ MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
+ CCInfo.AllocateReg(Reg);
+ }
- // At least one interpolation mode must be enabled or else the GPU will hang.
- //
- // Check PSInputAddr instead of PSInputEna. The idea is that if the user set
- // PSInputAddr, the user wants to enable some bits after the compilation
- // based on run-time states. Since we can't know what the final PSInputEna
- // will look like, so we shouldn't do anything here and the user should take
- // responsibility for the correct programming.
- //
- // Otherwise, the following restrictions apply:
- // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
- // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
- // enabled too.
- if (CallConv == CallingConv::AMDGPU_PS &&
- ((Info->getPSInputAddr() & 0x7F) == 0 ||
- ((Info->getPSInputAddr() & 0xF) == 0 && Info->isPSInputAllocated(11)))) {
- CCInfo.AllocateReg(AMDGPU::VGPR0);
- CCInfo.AllocateReg(AMDGPU::VGPR1);
- Info->markPSInputAllocated(0);
- Info->PSInputEna |= 1;
- }
-
- if (!AMDGPU::isShader(CallConv)) {
- assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX());
- } else {
- assert(!Info->hasDispatchPtr() &&
- !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() &&
- !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&
- !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&
- !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() &&
- !Info->hasWorkItemIDZ());
+ if (Info.hasWorkItemIDY()) {
+ unsigned Reg = TRI.getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Y);
+ MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
+ CCInfo.AllocateReg(Reg);
}
- if (Info->hasPrivateMemoryInputPtr()) {
- unsigned PrivateMemoryPtrReg = Info->addPrivateMemoryPtr(*TRI);
- MF.addLiveIn(PrivateMemoryPtrReg, &AMDGPU::SReg_64RegClass);
+ if (Info.hasWorkItemIDZ()) {
+ unsigned Reg = TRI.getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Z);
+ MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
+ CCInfo.AllocateReg(Reg);
+ }
+}
+
+// Allocate special inputs passed in user SGPRs.
+static void allocateHSAUserSGPRs(CCState &CCInfo,
+ MachineFunction &MF,
+ const SIRegisterInfo &TRI,
+ SIMachineFunctionInfo &Info) {
+ if (Info.hasPrivateMemoryInputPtr()) {
+ unsigned PrivateMemoryPtrReg = Info.addPrivateMemoryPtr(TRI);
+ MF.addLiveIn(PrivateMemoryPtrReg, &AMDGPU::SGPR_64RegClass);
CCInfo.AllocateReg(PrivateMemoryPtrReg);
}
// FIXME: How should these inputs interact with inreg / custom SGPR inputs?
- if (Info->hasPrivateSegmentBuffer()) {
- unsigned PrivateSegmentBufferReg = Info->addPrivateSegmentBuffer(*TRI);
- MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SReg_128RegClass);
+ if (Info.hasPrivateSegmentBuffer()) {
+ unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
+ MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
CCInfo.AllocateReg(PrivateSegmentBufferReg);
}
- if (Info->hasDispatchPtr()) {
- unsigned DispatchPtrReg = Info->addDispatchPtr(*TRI);
+ if (Info.hasDispatchPtr()) {
+ unsigned DispatchPtrReg = Info.addDispatchPtr(TRI);
MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
CCInfo.AllocateReg(DispatchPtrReg);
}
- if (Info->hasQueuePtr()) {
- unsigned QueuePtrReg = Info->addQueuePtr(*TRI);
+ if (Info.hasQueuePtr()) {
+ unsigned QueuePtrReg = Info.addQueuePtr(TRI);
MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
CCInfo.AllocateReg(QueuePtrReg);
}
- if (Info->hasKernargSegmentPtr()) {
- unsigned InputPtrReg = Info->addKernargSegmentPtr(*TRI);
+ if (Info.hasKernargSegmentPtr()) {
+ unsigned InputPtrReg = Info.addKernargSegmentPtr(TRI);
MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass);
CCInfo.AllocateReg(InputPtrReg);
}
- if (Info->hasDispatchID()) {
- unsigned DispatchIDReg = Info->addDispatchID(*TRI);
+ if (Info.hasDispatchID()) {
+ unsigned DispatchIDReg = Info.addDispatchID(TRI);
MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
CCInfo.AllocateReg(DispatchIDReg);
}
- if (Info->hasFlatScratchInit()) {
- unsigned FlatScratchInitReg = Info->addFlatScratchInit(*TRI);
+ if (Info.hasFlatScratchInit()) {
+ unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI);
MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
CCInfo.AllocateReg(FlatScratchInitReg);
}
- if (!AMDGPU::isShader(CallConv))
- analyzeFormalArgumentsCompute(CCInfo, Ins);
- else
- AnalyzeFormalArguments(CCInfo, Splits);
-
- SmallVector<SDValue, 16> Chains;
-
- for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
-
- const ISD::InputArg &Arg = Ins[i];
- if (Skipped[i]) {
- InVals.push_back(DAG.getUNDEF(Arg.VT));
- continue;
- }
-
- CCValAssign &VA = ArgLocs[ArgIdx++];
- MVT VT = VA.getLocVT();
-
- if (VA.isMemLoc()) {
- VT = Ins[i].VT;
- EVT MemVT = VA.getLocVT();
- const unsigned Offset = Subtarget->getExplicitKernelArgOffset(MF) +
- VA.getLocMemOffset();
- // The first 36 bytes of the input buffer contains information about
- // thread group and global sizes.
- SDValue Arg = LowerParameter(DAG, VT, MemVT, DL, Chain,
- Offset, Ins[i].Flags.isSExt(),
- &Ins[i]);
- Chains.push_back(Arg.getValue(1));
-
- auto *ParamTy =
- dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
- if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS &&
- ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
- // On SI local pointers are just offsets into LDS, so they are always
- // less than 16-bits. On CI and newer they could potentially be
- // real pointers, so we can't guarantee their size.
- Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg,
- DAG.getValueType(MVT::i16));
- }
-
- InVals.push_back(Arg);
- Info->setABIArgOffset(Offset + MemVT.getStoreSize());
- continue;
- }
- assert(VA.isRegLoc() && "Parameter must be in a register!");
-
- unsigned Reg = VA.getLocReg();
-
- if (VT == MVT::i64) {
- // For now assume it is a pointer
- Reg = TRI->getMatchingSuperReg(Reg, AMDGPU::sub0,
- &AMDGPU::SGPR_64RegClass);
- Reg = MF.addLiveIn(Reg, &AMDGPU::SGPR_64RegClass);
- SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT);
- InVals.push_back(Copy);
- continue;
- }
-
- const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
-
- Reg = MF.addLiveIn(Reg, RC);
- SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT);
-
- if (Arg.VT.isVector()) {
-
- // Build a vector from the registers
- Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
- unsigned NumElements = ParamType->getVectorNumElements();
-
- SmallVector<SDValue, 4> Regs;
- Regs.push_back(Val);
- for (unsigned j = 1; j != NumElements; ++j) {
- Reg = ArgLocs[ArgIdx++].getLocReg();
- Reg = MF.addLiveIn(Reg, RC);
-
- SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT);
- Regs.push_back(Copy);
- }
-
- // Fill up the missing vector elements
- NumElements = Arg.VT.getVectorNumElements() - NumElements;
- Regs.append(NumElements, DAG.getUNDEF(VT));
-
- InVals.push_back(DAG.getBuildVector(Arg.VT, DL, Regs));
- continue;
- }
-
- InVals.push_back(Val);
- }
-
// TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read
// these from the dispatch pointer.
+}
- // Start adding system SGPRs.
- if (Info->hasWorkGroupIDX()) {
- unsigned Reg = Info->addWorkGroupIDX();
+// Allocate special input registers that are initialized per-wave.
+static void allocateSystemSGPRs(CCState &CCInfo,
+ MachineFunction &MF,
+ SIMachineFunctionInfo &Info,
+ bool IsShader) {
+ if (Info.hasWorkGroupIDX()) {
+ unsigned Reg = Info.addWorkGroupIDX();
MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
CCInfo.AllocateReg(Reg);
}
- if (Info->hasWorkGroupIDY()) {
- unsigned Reg = Info->addWorkGroupIDY();
+ if (Info.hasWorkGroupIDY()) {
+ unsigned Reg = Info.addWorkGroupIDY();
MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
CCInfo.AllocateReg(Reg);
}
- if (Info->hasWorkGroupIDZ()) {
- unsigned Reg = Info->addWorkGroupIDZ();
+ if (Info.hasWorkGroupIDZ()) {
+ unsigned Reg = Info.addWorkGroupIDZ();
MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
CCInfo.AllocateReg(Reg);
}
- if (Info->hasWorkGroupInfo()) {
- unsigned Reg = Info->addWorkGroupInfo();
+ if (Info.hasWorkGroupInfo()) {
+ unsigned Reg = Info.addWorkGroupInfo();
MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
CCInfo.AllocateReg(Reg);
}
- if (Info->hasPrivateSegmentWaveByteOffset()) {
+ if (Info.hasPrivateSegmentWaveByteOffset()) {
// Scratch wave offset passed in system SGPR.
unsigned PrivateSegmentWaveByteOffsetReg;
- if (AMDGPU::isShader(CallConv)) {
+ if (IsShader) {
PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
- Info->setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
+ Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
} else
- PrivateSegmentWaveByteOffsetReg = Info->addPrivateSegmentWaveByteOffset();
+ PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset();
MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass);
CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg);
}
+}
+static void reservePrivateMemoryRegs(const TargetMachine &TM,
+ MachineFunction &MF,
+ const SIRegisterInfo &TRI,
+ SIMachineFunctionInfo &Info) {
// Now that we've figured out where the scratch register inputs are, see if
// should reserve the arguments and use them directly.
bool HasStackObjects = MF.getFrameInfo().hasStackObjects();
+
// Record that we know we have non-spill stack objects so we don't need to
// check all stack objects later.
if (HasStackObjects)
- Info->setHasNonSpillStackObjects(true);
+ Info.setHasNonSpillStackObjects(true);
// Everything live out of a block is spilled with fast regalloc, so it's
// almost certain that spilling will be required.
- if (getTargetMachine().getOptLevel() == CodeGenOpt::None)
+ if (TM.getOptLevel() == CodeGenOpt::None)
HasStackObjects = true;
+ const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
if (ST.isAmdCodeObjectV2(MF)) {
if (HasStackObjects) {
// If we have stack objects, we unquestionably need the private buffer
// resource. For the Code Object V2 ABI, this will be the first 4 user
// SGPR inputs. We can reserve those and use them directly.
- unsigned PrivateSegmentBufferReg = TRI->getPreloadedValue(
+ unsigned PrivateSegmentBufferReg = TRI.getPreloadedValue(
MF, SIRegisterInfo::PRIVATE_SEGMENT_BUFFER);
- Info->setScratchRSrcReg(PrivateSegmentBufferReg);
+ Info.setScratchRSrcReg(PrivateSegmentBufferReg);
- unsigned PrivateSegmentWaveByteOffsetReg = TRI->getPreloadedValue(
+ unsigned PrivateSegmentWaveByteOffsetReg = TRI.getPreloadedValue(
MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
- Info->setScratchWaveOffsetReg(PrivateSegmentWaveByteOffsetReg);
+ Info.setScratchWaveOffsetReg(PrivateSegmentWaveByteOffsetReg);
} else {
unsigned ReservedBufferReg
- = TRI->reservedPrivateSegmentBufferReg(MF);
+ = TRI.reservedPrivateSegmentBufferReg(MF);
unsigned ReservedOffsetReg
- = TRI->reservedPrivateSegmentWaveByteOffsetReg(MF);
+ = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
// We tentatively reserve the last registers (skipping the last two
// which may contain VCC). After register allocation, we'll replace
// these with the ones immediately after those which were really
// allocated. In the prologue copies will be inserted from the argument
// to these reserved registers.
- Info->setScratchRSrcReg(ReservedBufferReg);
- Info->setScratchWaveOffsetReg(ReservedOffsetReg);
+ Info.setScratchRSrcReg(ReservedBufferReg);
+ Info.setScratchWaveOffsetReg(ReservedOffsetReg);
}
} else {
- unsigned ReservedBufferReg = TRI->reservedPrivateSegmentBufferReg(MF);
+ unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF);
// Without HSA, relocations are used for the scratch pointer and the
// buffer resource setup is always inserted in the prologue. Scratch wave
// offset is still in an input SGPR.
- Info->setScratchRSrcReg(ReservedBufferReg);
+ Info.setScratchRSrcReg(ReservedBufferReg);
if (HasStackObjects) {
- unsigned ScratchWaveOffsetReg = TRI->getPreloadedValue(
+ unsigned ScratchWaveOffsetReg = TRI.getPreloadedValue(
MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
- Info->setScratchWaveOffsetReg(ScratchWaveOffsetReg);
+ Info.setScratchWaveOffsetReg(ScratchWaveOffsetReg);
} else {
unsigned ReservedOffsetReg
- = TRI->reservedPrivateSegmentWaveByteOffsetReg(MF);
- Info->setScratchWaveOffsetReg(ReservedOffsetReg);
+ = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
+ Info.setScratchWaveOffsetReg(ReservedOffsetReg);
}
}
+}
- if (Info->hasWorkItemIDX()) {
- unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_X);
- MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
- CCInfo.AllocateReg(Reg);
+SDValue SITargetLowering::LowerFormalArguments(
+ SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
+ const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
+
+ MachineFunction &MF = DAG.getMachineFunction();
+ FunctionType *FType = MF.getFunction()->getFunctionType();
+ SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
+ const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
+
+ if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) {
+ const Function *Fn = MF.getFunction();
+ DiagnosticInfoUnsupported NoGraphicsHSA(
+ *Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
+ DAG.getContext()->diagnose(NoGraphicsHSA);
+ return DAG.getEntryNode();
}
- if (Info->hasWorkItemIDY()) {
- unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Y);
- MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
- CCInfo.AllocateReg(Reg);
+ // Create stack objects that are used for emitting debugger prologue if
+ // "amdgpu-debugger-emit-prologue" attribute was specified.
+ if (ST.debuggerEmitPrologue())
+ createDebuggerPrologueStackObjects(MF);
+
+ SmallVector<ISD::InputArg, 16> Splits;
+ SmallVector<CCValAssign, 16> ArgLocs;
+ BitVector Skipped(Ins.size());
+ CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
+ *DAG.getContext());
+
+ bool IsShader = AMDGPU::isShader(CallConv);
+ bool IsKernel = AMDGPU::isKernel(CallConv);
+ bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv);
+
+ if (IsShader) {
+ processShaderInputArgs(Splits, CallConv, Ins, Skipped, FType, Info);
+
+ // At least one interpolation mode must be enabled or else the GPU will
+ // hang.
+ //
+ // Check PSInputAddr instead of PSInputEnable. The idea is that if the user
+ // set PSInputAddr, the user wants to enable some bits after the compilation
+ // based on run-time states. Since we can't know what the final PSInputEna
+ // will look like, so we shouldn't do anything here and the user should take
+ // responsibility for the correct programming.
+ //
+ // Otherwise, the following restrictions apply:
+ // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
+ // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
+ // enabled too.
+ if (CallConv == CallingConv::AMDGPU_PS &&
+ ((Info->getPSInputAddr() & 0x7F) == 0 ||
+ ((Info->getPSInputAddr() & 0xF) == 0 &&
+ Info->isPSInputAllocated(11)))) {
+ CCInfo.AllocateReg(AMDGPU::VGPR0);
+ CCInfo.AllocateReg(AMDGPU::VGPR1);
+ Info->markPSInputAllocated(0);
+ Info->markPSInputEnabled(0);
+ }
+
+ assert(!Info->hasDispatchPtr() &&
+ !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() &&
+ !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&
+ !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&
+ !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() &&
+ !Info->hasWorkItemIDZ());
+ } else {
+ assert(!IsKernel || (Info->hasWorkGroupIDX() && Info->hasWorkItemIDX()));
}
- if (Info->hasWorkItemIDZ()) {
- unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Z);
- MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
- CCInfo.AllocateReg(Reg);
+ if (IsEntryFunc) {
+ allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info);
+ allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info);
+ }
+
+ if (IsKernel) {
+ analyzeFormalArgumentsCompute(CCInfo, Ins);
+ } else {
+ CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg);
+ CCInfo.AnalyzeFormalArguments(Splits, AssignFn);
+ }
+
+ SmallVector<SDValue, 16> Chains;
+
+ for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
+ const ISD::InputArg &Arg = Ins[i];
+ if (Skipped[i]) {
+ InVals.push_back(DAG.getUNDEF(Arg.VT));
+ continue;
+ }
+
+ CCValAssign &VA = ArgLocs[ArgIdx++];
+ MVT VT = VA.getLocVT();
+
+ if (IsEntryFunc && VA.isMemLoc()) {
+ VT = Ins[i].VT;
+ EVT MemVT = VA.getLocVT();
+
+ const uint64_t Offset = Subtarget->getExplicitKernelArgOffset(MF) +
+ VA.getLocMemOffset();
+ Info->setABIArgOffset(Offset + MemVT.getStoreSize());
+
+ // The first 36 bytes of the input buffer contains information about
+ // thread group and global sizes.
+ SDValue Arg = lowerKernargMemParameter(
+ DAG, VT, MemVT, DL, Chain, Offset, Ins[i].Flags.isSExt(), &Ins[i]);
+ Chains.push_back(Arg.getValue(1));
+
+ auto *ParamTy =
+ dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
+ if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS &&
+ ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
+ // On SI local pointers are just offsets into LDS, so they are always
+ // less than 16-bits. On CI and newer they could potentially be
+ // real pointers, so we can't guarantee their size.
+ Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg,
+ DAG.getValueType(MVT::i16));
+ }
+
+ InVals.push_back(Arg);
+ continue;
+ }
+
+ if (VA.isMemLoc())
+ report_fatal_error("memloc not supported with calling convention");
+
+ assert(VA.isRegLoc() && "Parameter must be in a register!");
+
+ unsigned Reg = VA.getLocReg();
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
+
+ Reg = MF.addLiveIn(Reg, RC);
+ SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT);
+
+ if (Arg.VT.isVector()) {
+ // Build a vector from the registers
+ Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
+ unsigned NumElements = ParamType->getVectorNumElements();
+
+ SmallVector<SDValue, 4> Regs;
+ Regs.push_back(Val);
+ for (unsigned j = 1; j != NumElements; ++j) {
+ Reg = ArgLocs[ArgIdx++].getLocReg();
+ Reg = MF.addLiveIn(Reg, RC);
+
+ SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT);
+ Regs.push_back(Copy);
+ }
+
+ // Fill up the missing vector elements
+ NumElements = Arg.VT.getVectorNumElements() - NumElements;
+ Regs.append(NumElements, DAG.getUNDEF(VT));
+
+ InVals.push_back(DAG.getBuildVector(Arg.VT, DL, Regs));
+ continue;
+ }
+
+ InVals.push_back(Val);
}
- if (Chains.empty())
- return Chain;
+ // Start adding system SGPRs.
+ if (IsEntryFunc)
+ allocateSystemSGPRs(CCInfo, MF, *Info, IsShader);
+
+ reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info);
- return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
+ return Chains.empty() ? Chain :
+ DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
}
SDValue
@@ -1197,7 +1398,7 @@ SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
if (Flag.getNode())
RetOps.push_back(Flag);
- unsigned Opc = Info->returnsVoid() ? AMDGPUISD::ENDPGM : AMDGPUISD::RETURN;
+ unsigned Opc = Info->returnsVoid() ? AMDGPUISD::ENDPGM : AMDGPUISD::RETURN_TO_EPILOG;
return DAG.getNode(Opc, DL, MVT::Other, RetOps);
}
@@ -1470,16 +1671,16 @@ static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII,
VGPRIndexMode::SRC0_ENABLE : VGPRIndexMode::DST_ENABLE;
if (Offset == 0) {
MachineInstr *SetOn =
- BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
- .addOperand(*Idx)
- .addImm(IdxMode);
+ BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
+ .add(*Idx)
+ .addImm(IdxMode);
SetOn->getOperand(3).setIsUndef();
} else {
unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp)
- .addOperand(*Idx)
- .addImm(Offset);
+ .add(*Idx)
+ .addImm(Offset);
MachineInstr *SetOn =
BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
.addReg(Tmp, RegState::Kill)
@@ -1493,10 +1694,10 @@ static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII,
if (Offset == 0) {
BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
- .addOperand(*Idx);
+ .add(*Idx);
} else {
BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
- .addOperand(*Idx)
+ .add(*Idx)
.addImm(Offset);
}
@@ -1522,7 +1723,7 @@ static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI,
std::tie(SubReg, Offset)
= computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset);
- bool UseGPRIdxMode = ST.hasVGPRIndexMode() && EnableVGPRIndexMode;
+ bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) {
MachineBasicBlock::iterator I(&MI);
@@ -1548,7 +1749,6 @@ static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI,
return &MBB;
}
-
const DebugLoc &DL = MI.getDebugLoc();
MachineBasicBlock::iterator I(&MI);
@@ -1625,7 +1825,7 @@ static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC,
SrcVec->getReg(),
Offset);
- bool UseGPRIdxMode = ST.hasVGPRIndexMode() && EnableVGPRIndexMode;
+ bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
if (Idx->getReg() == AMDGPU::NoRegister) {
MachineBasicBlock::iterator I(&MI);
@@ -1634,9 +1834,9 @@ static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
assert(Offset == 0);
BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst)
- .addOperand(*SrcVec)
- .addOperand(*Val)
- .addImm(SubReg);
+ .add(*SrcVec)
+ .add(*Val)
+ .addImm(SubReg);
MI.eraseFromParent();
return &MBB;
@@ -1648,11 +1848,11 @@ static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
if (UseGPRIdxMode) {
BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
- .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst
- .addOperand(*Val)
- .addReg(Dst, RegState::ImplicitDefine)
- .addReg(SrcVec->getReg(), RegState::Implicit)
- .addReg(AMDGPU::M0, RegState::Implicit);
+ .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst
+ .add(*Val)
+ .addReg(Dst, RegState::ImplicitDefine)
+ .addReg(SrcVec->getReg(), RegState::Implicit)
+ .addReg(AMDGPU::M0, RegState::Implicit);
BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
} else {
@@ -1661,7 +1861,7 @@ static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
BuildMI(MBB, I, DL, MovRelDesc)
.addReg(Dst, RegState::Define)
.addReg(SrcVec->getReg())
- .addOperand(*Val)
+ .add(*Val)
.addImm(SubReg - AMDGPU::sub0);
}
@@ -1694,18 +1894,18 @@ static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
if (UseGPRIdxMode) {
BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
- .addReg(PhiReg, RegState::Undef, SubReg) // vdst
- .addOperand(*Val) // src0
- .addReg(Dst, RegState::ImplicitDefine)
- .addReg(PhiReg, RegState::Implicit)
- .addReg(AMDGPU::M0, RegState::Implicit);
+ .addReg(PhiReg, RegState::Undef, SubReg) // vdst
+ .add(*Val) // src0
+ .addReg(Dst, RegState::ImplicitDefine)
+ .addReg(PhiReg, RegState::Implicit)
+ .addReg(AMDGPU::M0, RegState::Implicit);
} else {
const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(VecRC));
BuildMI(*LoopBB, InsPt, DL, MovRelDesc)
.addReg(Dst, RegState::Define)
.addReg(PhiReg)
- .addOperand(*Val)
+ .add(*Val)
.addImm(SubReg - AMDGPU::sub0);
}
@@ -1741,18 +1941,62 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
}
switch (MI.getOpcode()) {
- case AMDGPU::SI_INIT_M0: {
+ case AMDGPU::S_TRAP_PSEUDO: {
+ const DebugLoc &DL = MI.getDebugLoc();
+ const int TrapType = MI.getOperand(0).getImm();
+
+ if (Subtarget->getTrapHandlerAbi() == SISubtarget::TrapHandlerAbiHsa &&
+ Subtarget->isTrapHandlerEnabled()) {
+
+ MachineFunction *MF = BB->getParent();
+ SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
+ unsigned UserSGPR = Info->getQueuePtrUserSGPR();
+ assert(UserSGPR != AMDGPU::NoRegister);
+
+ if (!BB->isLiveIn(UserSGPR))
+ BB->addLiveIn(UserSGPR);
+
+ BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), AMDGPU::SGPR0_SGPR1)
+ .addReg(UserSGPR);
+ BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_TRAP))
+ .addImm(TrapType)
+ .addReg(AMDGPU::SGPR0_SGPR1, RegState::Implicit);
+ } else {
+ switch (TrapType) {
+ case SISubtarget::TrapIDLLVMTrap:
+ BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_ENDPGM));
+ break;
+ case SISubtarget::TrapIDLLVMDebugTrap: {
+ DiagnosticInfoUnsupported NoTrap(*MF->getFunction(),
+ "debugtrap handler not supported",
+ DL,
+ DS_Warning);
+ LLVMContext &C = MF->getFunction()->getContext();
+ C.diagnose(NoTrap);
+ BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_NOP))
+ .addImm(0);
+ break;
+ }
+ default:
+ llvm_unreachable("unsupported trap handler type!");
+ }
+ }
+
+ MI.eraseFromParent();
+ return BB;
+ }
+ case AMDGPU::SI_INIT_M0:
BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
- .addOperand(MI.getOperand(0));
+ .add(MI.getOperand(0));
MI.eraseFromParent();
return BB;
- }
+
case AMDGPU::GET_GROUPSTATICSIZE: {
DebugLoc DL = MI.getDebugLoc();
BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32))
- .addOperand(MI.getOperand(0))
- .addImm(MFI->getLDSSize());
+ .add(MI.getOperand(0))
+ .addImm(MFI->getLDSSize());
MI.eraseFromParent();
return BB;
}
@@ -1803,7 +2047,7 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
const DebugLoc &DL = MI.getDebugLoc();
MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
- .addOperand(MI.getOperand(0));
+ .add(MI.getOperand(0));
Br->getOperand(1).setIsUndef(true); // read undef SCC
MI.eraseFromParent();
return BB;
@@ -1856,9 +2100,6 @@ MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const {
bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
VT = VT.getScalarType();
- if (!VT.isSimple())
- return false;
-
switch (VT.getSimpleVT().SimpleTy) {
case MVT::f32:
// This is as fast on some subtargets. However, we always have full rate f32
@@ -1909,13 +2150,52 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG);
- case ISD::TRAP: return lowerTRAP(Op, DAG);
+ case ISD::INSERT_VECTOR_ELT:
+ return lowerINSERT_VECTOR_ELT(Op, DAG);
+ case ISD::EXTRACT_VECTOR_ELT:
+ return lowerEXTRACT_VECTOR_ELT(Op, DAG);
case ISD::FP_ROUND:
return lowerFP_ROUND(Op, DAG);
}
return SDValue();
}
+void SITargetLowering::ReplaceNodeResults(SDNode *N,
+ SmallVectorImpl<SDValue> &Results,
+ SelectionDAG &DAG) const {
+ switch (N->getOpcode()) {
+ case ISD::INSERT_VECTOR_ELT: {
+ if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG))
+ Results.push_back(Res);
+ return;
+ }
+ case ISD::EXTRACT_VECTOR_ELT: {
+ if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG))
+ Results.push_back(Res);
+ return;
+ }
+ case ISD::INTRINSIC_WO_CHAIN: {
+ unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+ switch (IID) {
+ case Intrinsic::amdgcn_cvt_pkrtz: {
+ SDValue Src0 = N->getOperand(1);
+ SDValue Src1 = N->getOperand(2);
+ SDLoc SL(N);
+ SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32,
+ Src0, Src1);
+
+ Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt));
+ return;
+ }
+ default:
+ break;
+ }
+ }
+ default:
+ break;
+ }
+}
+
/// \brief Helper function for LowerBRCOND
static SDNode *findUser(SDValue Value, unsigned Opcode) {
@@ -1932,31 +2212,25 @@ static SDNode *findUser(SDValue Value, unsigned Opcode) {
return nullptr;
}
-bool SITargetLowering::isCFIntrinsic(const SDNode *Intr) const {
+unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const {
if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) {
- case AMDGPUIntrinsic::amdgcn_if:
- case AMDGPUIntrinsic::amdgcn_else:
- case AMDGPUIntrinsic::amdgcn_end_cf:
- case AMDGPUIntrinsic::amdgcn_loop:
- return true;
+ case Intrinsic::amdgcn_if:
+ return AMDGPUISD::IF;
+ case Intrinsic::amdgcn_else:
+ return AMDGPUISD::ELSE;
+ case Intrinsic::amdgcn_loop:
+ return AMDGPUISD::LOOP;
+ case Intrinsic::amdgcn_end_cf:
+ llvm_unreachable("should not occur");
default:
- return false;
+ return 0;
}
}
- if (Intr->getOpcode() == ISD::INTRINSIC_WO_CHAIN) {
- switch (cast<ConstantSDNode>(Intr->getOperand(0))->getZExtValue()) {
- case AMDGPUIntrinsic::amdgcn_break:
- case AMDGPUIntrinsic::amdgcn_if_break:
- case AMDGPUIntrinsic::amdgcn_else_break:
- return true;
- default:
- return false;
- }
- }
-
- return false;
+ // break, if_break, else_break are all only used as inputs to loop, not
+ // directly as branch conditions.
+ return 0;
}
void SITargetLowering::createDebuggerPrologueStackObjects(
@@ -1987,13 +2261,13 @@ void SITargetLowering::createDebuggerPrologueStackObjects(
bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const {
const Triple &TT = getTargetMachine().getTargetTriple();
- return GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS &&
+ return GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS &&
AMDGPU::shouldEmitConstantsToTextSection(TT);
}
bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const {
- return (GV->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
- GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) &&
+ return (GV->getType()->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS ||
+ GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS) &&
!shouldEmitFixup(GV) &&
!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
}
@@ -2006,7 +2280,6 @@ bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const {
/// last parameter, also switches branch target with BR if the need arise
SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
SelectionDAG &DAG) const {
-
SDLoc DL(BRCOND);
SDNode *Intr = BRCOND.getOperand(1).getNode();
@@ -2032,7 +2305,8 @@ SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
// eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3
// => t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088>
- if (!isCFIntrinsic(Intr)) {
+ unsigned CFNode = isCFIntrinsic(Intr);
+ if (CFNode == 0) {
// This is a uniform branch so we don't need to legalize.
return BRCOND;
}
@@ -2050,15 +2324,13 @@ SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
if (HaveChain)
Ops.push_back(BRCOND.getOperand(0));
- Ops.append(Intr->op_begin() + (HaveChain ? 1 : 0), Intr->op_end());
+ Ops.append(Intr->op_begin() + (HaveChain ? 2 : 1), Intr->op_end());
Ops.push_back(Target);
ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end());
// build the new intrinsic call
- SDNode *Result = DAG.getNode(
- Res.size() > 1 ? ISD::INTRINSIC_W_CHAIN : ISD::INTRINSIC_VOID, DL,
- DAG.getVTList(Res), Ops).getNode();
+ SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode();
if (!HaveChain) {
SDValue Ops[] = {
@@ -2130,9 +2402,28 @@ SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc);;
}
-SDValue SITargetLowering::getSegmentAperture(unsigned AS,
+SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL,
SelectionDAG &DAG) const {
- SDLoc SL;
+ // FIXME: Use inline constants (src_{shared, private}_base) instead.
+ if (Subtarget->hasApertureRegs()) {
+ unsigned Offset = AS == AMDGPUASI.LOCAL_ADDRESS ?
+ AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE :
+ AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE;
+ unsigned WidthM1 = AS == AMDGPUASI.LOCAL_ADDRESS ?
+ AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE :
+ AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE;
+ unsigned Encoding =
+ AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ |
+ Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
+ WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
+
+ SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16);
+ SDValue ApertureReg = SDValue(
+ DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0);
+ SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32);
+ return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount);
+ }
+
MachineFunction &MF = DAG.getMachineFunction();
SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
unsigned UserSGPR = Info->getQueuePtrUserSGPR();
@@ -2143,19 +2434,19 @@ SDValue SITargetLowering::getSegmentAperture(unsigned AS,
// Offset into amd_queue_t for group_segment_aperture_base_hi /
// private_segment_aperture_base_hi.
- uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44;
+ uint32_t StructOffset = (AS == AMDGPUASI.LOCAL_ADDRESS) ? 0x40 : 0x44;
- SDValue Ptr = DAG.getNode(ISD::ADD, SL, MVT::i64, QueuePtr,
- DAG.getConstant(StructOffset, SL, MVT::i64));
+ SDValue Ptr = DAG.getNode(ISD::ADD, DL, MVT::i64, QueuePtr,
+ DAG.getConstant(StructOffset, DL, MVT::i64));
// TODO: Use custom target PseudoSourceValue.
// TODO: We should use the value from the IR intrinsic call, but it might not
// be available and how do we get it?
Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()),
- AMDGPUAS::CONSTANT_ADDRESS));
+ AMDGPUASI.CONSTANT_ADDRESS));
MachinePointerInfo PtrInfo(V, StructOffset);
- return DAG.getLoad(MVT::i32, SL, QueuePtr.getValue(1), Ptr, PtrInfo,
+ return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo,
MinAlign(64, StructOffset),
MachineMemOperand::MODereferenceable |
MachineMemOperand::MOInvariant);
@@ -2167,15 +2458,19 @@ SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op);
SDValue Src = ASC->getOperand(0);
-
- // FIXME: Really support non-0 null pointers.
- SDValue SegmentNullPtr = DAG.getConstant(-1, SL, MVT::i32);
SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
+ const AMDGPUTargetMachine &TM =
+ static_cast<const AMDGPUTargetMachine &>(getTargetMachine());
+
// flat -> local/private
- if (ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
- if (ASC->getDestAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
- ASC->getDestAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) {
+ if (ASC->getSrcAddressSpace() == AMDGPUASI.FLAT_ADDRESS) {
+ unsigned DestAS = ASC->getDestAddressSpace();
+
+ if (DestAS == AMDGPUASI.LOCAL_ADDRESS ||
+ DestAS == AMDGPUASI.PRIVATE_ADDRESS) {
+ unsigned NullVal = TM.getNullPointerValue(DestAS);
+ SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE);
SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
@@ -2185,13 +2480,18 @@ SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
}
// local/private -> flat
- if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
- if (ASC->getSrcAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
- ASC->getSrcAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) {
+ if (ASC->getDestAddressSpace() == AMDGPUASI.FLAT_ADDRESS) {
+ unsigned SrcAS = ASC->getSrcAddressSpace();
+
+ if (SrcAS == AMDGPUASI.LOCAL_ADDRESS ||
+ SrcAS == AMDGPUASI.PRIVATE_ADDRESS) {
+ unsigned NullVal = TM.getNullPointerValue(SrcAS);
+ SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
+
SDValue NonNull
= DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE);
- SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), DAG);
+ SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG);
SDValue CvtPtr
= DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
@@ -2211,17 +2511,88 @@ SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
return DAG.getUNDEF(ASC->getValueType(0));
}
+SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue Idx = Op.getOperand(2);
+ if (isa<ConstantSDNode>(Idx))
+ return SDValue();
+
+ // Avoid stack access for dynamic indexing.
+ SDLoc SL(Op);
+ SDValue Vec = Op.getOperand(0);
+ SDValue Val = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Op.getOperand(1));
+
+ // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec
+ SDValue ExtVal = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Val);
+
+ // Convert vector index to bit-index.
+ SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx,
+ DAG.getConstant(16, SL, MVT::i32));
+
+ SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
+
+ SDValue BFM = DAG.getNode(ISD::SHL, SL, MVT::i32,
+ DAG.getConstant(0xffff, SL, MVT::i32),
+ ScaledIdx);
+
+ SDValue LHS = DAG.getNode(ISD::AND, SL, MVT::i32, BFM, ExtVal);
+ SDValue RHS = DAG.getNode(ISD::AND, SL, MVT::i32,
+ DAG.getNOT(SL, BFM, MVT::i32), BCVec);
+
+ SDValue BFI = DAG.getNode(ISD::OR, SL, MVT::i32, LHS, RHS);
+ return DAG.getNode(ISD::BITCAST, SL, Op.getValueType(), BFI);
+}
+
+SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc SL(Op);
+
+ EVT ResultVT = Op.getValueType();
+ SDValue Vec = Op.getOperand(0);
+ SDValue Idx = Op.getOperand(1);
+
+ if (const ConstantSDNode *CIdx = dyn_cast<ConstantSDNode>(Idx)) {
+ SDValue Result = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
+
+ if (CIdx->getZExtValue() == 1) {
+ Result = DAG.getNode(ISD::SRL, SL, MVT::i32, Result,
+ DAG.getConstant(16, SL, MVT::i32));
+ } else {
+ assert(CIdx->getZExtValue() == 0);
+ }
+
+ if (ResultVT.bitsLT(MVT::i32))
+ Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Result);
+ return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result);
+ }
+
+ SDValue Sixteen = DAG.getConstant(16, SL, MVT::i32);
+
+ // Convert vector index to bit-index.
+ SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, Sixteen);
+
+ SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
+ SDValue Elt = DAG.getNode(ISD::SRL, SL, MVT::i32, BC, ScaledIdx);
+
+ SDValue Result = Elt;
+ if (ResultVT.bitsLT(MVT::i32))
+ Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Result);
+
+ return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result);
+}
+
bool
SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
// We can fold offsets for anything that doesn't require a GOT relocation.
- return (GA->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
- GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) &&
+ return (GA->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS ||
+ GA->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS) &&
!shouldEmitGOTReloc(GA->getGlobal());
}
-static SDValue buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV,
- SDLoc DL, unsigned Offset, EVT PtrVT,
- unsigned GAFlags = SIInstrInfo::MO_NONE) {
+static SDValue
+buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV,
+ const SDLoc &DL, unsigned Offset, EVT PtrVT,
+ unsigned GAFlags = SIInstrInfo::MO_NONE) {
// In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is
// lowered to the following code sequence:
//
@@ -2265,8 +2636,8 @@ SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
SelectionDAG &DAG) const {
GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op);
- if (GSD->getAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS &&
- GSD->getAddressSpace() != AMDGPUAS::GLOBAL_ADDRESS)
+ if (GSD->getAddressSpace() != AMDGPUASI.CONSTANT_ADDRESS &&
+ GSD->getAddressSpace() != AMDGPUASI.GLOBAL_ADDRESS)
return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG);
SDLoc DL(GSD);
@@ -2283,7 +2654,7 @@ SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
SIInstrInfo::MO_GOTPCREL32);
Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext());
- PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS);
+ PointerType *PtrTy = PointerType::get(Ty, AMDGPUASI.CONSTANT_ADDRESS);
const DataLayout &DataLayout = DAG.getDataLayout();
unsigned Align = DataLayout.getABITypeAlignment(PtrTy);
// FIXME: Use a PseudoSourceValue once those can be assigned an address space.
@@ -2294,23 +2665,6 @@ SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
MachineMemOperand::MOInvariant);
}
-SDValue SITargetLowering::lowerTRAP(SDValue Op,
- SelectionDAG &DAG) const {
- const MachineFunction &MF = DAG.getMachineFunction();
- DiagnosticInfoUnsupported NoTrap(*MF.getFunction(),
- "trap handler not supported",
- Op.getDebugLoc(),
- DS_Warning);
- DAG.getContext()->diagnose(NoTrap);
-
- // Emit s_endpgm.
-
- // FIXME: This should really be selected to s_trap, but that requires
- // setting up the trap handler for it o do anything.
- return DAG.getNode(AMDGPUISD::ENDPGM, SDLoc(Op), MVT::Other,
- Op.getOperand(0));
-}
-
SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain,
const SDLoc &DL, SDValue V) const {
// We can't use S_MOV_B32 directly, because there is no way to specify m0 as
@@ -2332,14 +2686,15 @@ SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG,
MVT VT,
unsigned Offset) const {
SDLoc SL(Op);
- SDValue Param = LowerParameter(DAG, MVT::i32, MVT::i32, SL,
- DAG.getEntryNode(), Offset, false);
+ SDValue Param = lowerKernargMemParameter(DAG, MVT::i32, MVT::i32, SL,
+ DAG.getEntryNode(), Offset, false);
// The local size values will have the hi 16-bits as zero.
return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param,
DAG.getValueType(VT));
}
-static SDValue emitNonHSAIntrinsicError(SelectionDAG& DAG, SDLoc DL, EVT VT) {
+static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
+ EVT VT) {
DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(),
"non-hsa intrinsic with hsa target",
DL.getDebugLoc());
@@ -2347,7 +2702,8 @@ static SDValue emitNonHSAIntrinsicError(SelectionDAG& DAG, SDLoc DL, EVT VT) {
return DAG.getUNDEF(VT);
}
-static SDValue emitRemovedIntrinsicError(SelectionDAG& DAG, SDLoc DL, EVT VT) {
+static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
+ EVT VT) {
DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(),
"intrinsic not supported on subtarget",
DL.getDebugLoc());
@@ -2389,7 +2745,7 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
}
case Intrinsic::amdgcn_implicitarg_ptr: {
unsigned offset = getImplicitParameterOffset(MFI, FIRST_IMPLICIT);
- return LowerParameterPtr(DAG, DL, DAG.getEntryNode(), offset);
+ return lowerKernArgParameterPtr(DAG, DL, DAG.getEntryNode(), offset);
}
case Intrinsic::amdgcn_kernarg_segment_ptr: {
unsigned Reg
@@ -2403,19 +2759,16 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
case Intrinsic::amdgcn_rcp:
return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
case Intrinsic::amdgcn_rsq:
- case AMDGPUIntrinsic::AMDGPU_rsq: // Legacy name
return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
- case Intrinsic::amdgcn_rsq_legacy: {
+ case Intrinsic::amdgcn_rsq_legacy:
if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
return emitRemovedIntrinsicError(DAG, DL, VT);
return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
- }
- case Intrinsic::amdgcn_rcp_legacy: {
+ case Intrinsic::amdgcn_rcp_legacy:
if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
return emitRemovedIntrinsicError(DAG, DL, VT);
return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1));
- }
case Intrinsic::amdgcn_rsq_clamp: {
if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS)
return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1));
@@ -2434,38 +2787,38 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
if (Subtarget->isAmdHsaOS())
return emitNonHSAIntrinsicError(DAG, DL, VT);
- return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
- SI::KernelInputOffsets::NGROUPS_X, false);
+ return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
+ SI::KernelInputOffsets::NGROUPS_X, false);
case Intrinsic::r600_read_ngroups_y:
if (Subtarget->isAmdHsaOS())
return emitNonHSAIntrinsicError(DAG, DL, VT);
- return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
- SI::KernelInputOffsets::NGROUPS_Y, false);
+ return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
+ SI::KernelInputOffsets::NGROUPS_Y, false);
case Intrinsic::r600_read_ngroups_z:
if (Subtarget->isAmdHsaOS())
return emitNonHSAIntrinsicError(DAG, DL, VT);
- return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
- SI::KernelInputOffsets::NGROUPS_Z, false);
+ return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
+ SI::KernelInputOffsets::NGROUPS_Z, false);
case Intrinsic::r600_read_global_size_x:
if (Subtarget->isAmdHsaOS())
return emitNonHSAIntrinsicError(DAG, DL, VT);
- return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
- SI::KernelInputOffsets::GLOBAL_SIZE_X, false);
+ return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
+ SI::KernelInputOffsets::GLOBAL_SIZE_X, false);
case Intrinsic::r600_read_global_size_y:
if (Subtarget->isAmdHsaOS())
return emitNonHSAIntrinsicError(DAG, DL, VT);
- return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
- SI::KernelInputOffsets::GLOBAL_SIZE_Y, false);
+ return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
+ SI::KernelInputOffsets::GLOBAL_SIZE_Y, false);
case Intrinsic::r600_read_global_size_z:
if (Subtarget->isAmdHsaOS())
return emitNonHSAIntrinsicError(DAG, DL, VT);
- return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
- SI::KernelInputOffsets::GLOBAL_SIZE_Z, false);
+ return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
+ SI::KernelInputOffsets::GLOBAL_SIZE_Z, false);
case Intrinsic::r600_read_local_size_x:
if (Subtarget->isAmdHsaOS())
return emitNonHSAIntrinsicError(DAG, DL, VT);
@@ -2522,43 +2875,8 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
return DAG.getMemIntrinsicNode(AMDGPUISD::LOAD_CONSTANT, DL,
Op->getVTList(), Ops, VT, MMO);
}
- case AMDGPUIntrinsic::amdgcn_fdiv_fast: {
+ case Intrinsic::amdgcn_fdiv_fast:
return lowerFDIV_FAST(Op, DAG);
- }
- case AMDGPUIntrinsic::SI_vs_load_input:
- return DAG.getNode(AMDGPUISD::LOAD_INPUT, DL, VT,
- Op.getOperand(1),
- Op.getOperand(2),
- Op.getOperand(3));
-
- case AMDGPUIntrinsic::SI_fs_constant: {
- SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(3));
- SDValue Glue = M0.getValue(1);
- return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32,
- DAG.getConstant(2, DL, MVT::i32), // P0
- Op.getOperand(1), Op.getOperand(2), Glue);
- }
- case AMDGPUIntrinsic::SI_packf16:
- if (Op.getOperand(1).isUndef() && Op.getOperand(2).isUndef())
- return DAG.getUNDEF(MVT::i32);
- return Op;
- case AMDGPUIntrinsic::SI_fs_interp: {
- SDValue IJ = Op.getOperand(4);
- SDValue I = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, IJ,
- DAG.getConstant(0, DL, MVT::i32));
- SDValue J = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, IJ,
- DAG.getConstant(1, DL, MVT::i32));
- I = DAG.getNode(ISD::BITCAST, DL, MVT::f32, I);
- J = DAG.getNode(ISD::BITCAST, DL, MVT::f32, J);
- SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(3));
- SDValue Glue = M0.getValue(1);
- SDValue P1 = DAG.getNode(AMDGPUISD::INTERP_P1, DL,
- DAG.getVTList(MVT::f32, MVT::Glue),
- I, Op.getOperand(1), Op.getOperand(2), Glue);
- Glue = SDValue(P1.getNode(), 1);
- return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, P1, J,
- Op.getOperand(1), Op.getOperand(2), Glue);
- }
case Intrinsic::amdgcn_interp_mov: {
SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
SDValue Glue = M0.getValue(1);
@@ -2639,10 +2957,12 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
}
case Intrinsic::amdgcn_icmp: {
const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3));
- int CondCode = CD->getSExtValue();
+ if (!CD)
+ return DAG.getUNDEF(VT);
+ int CondCode = CD->getSExtValue();
if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE ||
- CondCode >= ICmpInst::Predicate::BAD_ICMP_PREDICATE)
+ CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE)
return DAG.getUNDEF(VT);
ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode);
@@ -2652,10 +2972,12 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
}
case Intrinsic::amdgcn_fcmp: {
const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3));
- int CondCode = CD->getSExtValue();
+ if (!CD)
+ return DAG.getUNDEF(VT);
- if (CondCode <= FCmpInst::Predicate::FCMP_FALSE ||
- CondCode >= FCmpInst::Predicate::FCMP_TRUE)
+ int CondCode = CD->getSExtValue();
+ if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE ||
+ CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE)
return DAG.getUNDEF(VT);
FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode);
@@ -2663,14 +2985,29 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1),
Op.getOperand(2), DAG.getCondCode(CCOpcode));
}
+ case Intrinsic::amdgcn_fmed3:
+ return DAG.getNode(AMDGPUISD::FMED3, DL, VT,
+ Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
case Intrinsic::amdgcn_fmul_legacy:
return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT,
Op.getOperand(1), Op.getOperand(2));
case Intrinsic::amdgcn_sffbh:
- case AMDGPUIntrinsic::AMDGPU_flbit_i32: // Legacy name.
return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1));
+ case Intrinsic::amdgcn_sbfe:
+ return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
+ Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
+ case Intrinsic::amdgcn_ubfe:
+ return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
+ Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
+ case Intrinsic::amdgcn_cvt_pkrtz: {
+ // FIXME: Stop adding cast if v2f16 legal.
+ EVT VT = Op.getValueType();
+ SDValue Node = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, DL, MVT::i32,
+ Op.getOperand(1), Op.getOperand(2));
+ return DAG.getNode(ISD::BITCAST, DL, VT, Node);
+ }
default:
- return AMDGPUTargetLowering::LowerOperation(Op, DAG);
+ return Op;
}
}
@@ -2718,6 +3055,64 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT, MMO);
}
+ // Basic sample.
+ case Intrinsic::amdgcn_image_sample:
+ case Intrinsic::amdgcn_image_sample_cl:
+ case Intrinsic::amdgcn_image_sample_d:
+ case Intrinsic::amdgcn_image_sample_d_cl:
+ case Intrinsic::amdgcn_image_sample_l:
+ case Intrinsic::amdgcn_image_sample_b:
+ case Intrinsic::amdgcn_image_sample_b_cl:
+ case Intrinsic::amdgcn_image_sample_lz:
+ case Intrinsic::amdgcn_image_sample_cd:
+ case Intrinsic::amdgcn_image_sample_cd_cl:
+
+ // Sample with comparison.
+ case Intrinsic::amdgcn_image_sample_c:
+ case Intrinsic::amdgcn_image_sample_c_cl:
+ case Intrinsic::amdgcn_image_sample_c_d:
+ case Intrinsic::amdgcn_image_sample_c_d_cl:
+ case Intrinsic::amdgcn_image_sample_c_l:
+ case Intrinsic::amdgcn_image_sample_c_b:
+ case Intrinsic::amdgcn_image_sample_c_b_cl:
+ case Intrinsic::amdgcn_image_sample_c_lz:
+ case Intrinsic::amdgcn_image_sample_c_cd:
+ case Intrinsic::amdgcn_image_sample_c_cd_cl:
+
+ // Sample with offsets.
+ case Intrinsic::amdgcn_image_sample_o:
+ case Intrinsic::amdgcn_image_sample_cl_o:
+ case Intrinsic::amdgcn_image_sample_d_o:
+ case Intrinsic::amdgcn_image_sample_d_cl_o:
+ case Intrinsic::amdgcn_image_sample_l_o:
+ case Intrinsic::amdgcn_image_sample_b_o:
+ case Intrinsic::amdgcn_image_sample_b_cl_o:
+ case Intrinsic::amdgcn_image_sample_lz_o:
+ case Intrinsic::amdgcn_image_sample_cd_o:
+ case Intrinsic::amdgcn_image_sample_cd_cl_o:
+
+ // Sample with comparison and offsets.
+ case Intrinsic::amdgcn_image_sample_c_o:
+ case Intrinsic::amdgcn_image_sample_c_cl_o:
+ case Intrinsic::amdgcn_image_sample_c_d_o:
+ case Intrinsic::amdgcn_image_sample_c_d_cl_o:
+ case Intrinsic::amdgcn_image_sample_c_l_o:
+ case Intrinsic::amdgcn_image_sample_c_b_o:
+ case Intrinsic::amdgcn_image_sample_c_b_cl_o:
+ case Intrinsic::amdgcn_image_sample_c_lz_o:
+ case Intrinsic::amdgcn_image_sample_c_cd_o:
+ case Intrinsic::amdgcn_image_sample_c_cd_cl_o:
+
+ case Intrinsic::amdgcn_image_getlod: {
+ // Replace dmask with everything disabled with undef.
+ const ConstantSDNode *DMask = dyn_cast<ConstantSDNode>(Op.getOperand(5));
+ if (!DMask || DMask->isNullValue()) {
+ SDValue Undef = DAG.getUNDEF(Op.getValueType());
+ return DAG.getMergeValues({ Undef, Op.getOperand(0) }, SDLoc(Op));
+ }
+
+ return SDValue();
+ }
default:
return SDValue();
}
@@ -2731,17 +3126,60 @@ SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
switch (IntrinsicID) {
- case AMDGPUIntrinsic::SI_sendmsg:
- case Intrinsic::amdgcn_s_sendmsg: {
- Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3));
- SDValue Glue = Chain.getValue(1);
- return DAG.getNode(AMDGPUISD::SENDMSG, DL, MVT::Other, Chain,
- Op.getOperand(2), Glue);
+ case Intrinsic::amdgcn_exp: {
+ const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
+ const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
+ const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(8));
+ const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(9));
+
+ const SDValue Ops[] = {
+ Chain,
+ DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
+ DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en
+ Op.getOperand(4), // src0
+ Op.getOperand(5), // src1
+ Op.getOperand(6), // src2
+ Op.getOperand(7), // src3
+ DAG.getTargetConstant(0, DL, MVT::i1), // compr
+ DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
+ };
+
+ unsigned Opc = Done->isNullValue() ?
+ AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
+ return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
+ }
+ case Intrinsic::amdgcn_exp_compr: {
+ const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
+ const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
+ SDValue Src0 = Op.getOperand(4);
+ SDValue Src1 = Op.getOperand(5);
+ const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6));
+ const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(7));
+
+ SDValue Undef = DAG.getUNDEF(MVT::f32);
+ const SDValue Ops[] = {
+ Chain,
+ DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
+ DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en
+ DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0),
+ DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1),
+ Undef, // src2
+ Undef, // src3
+ DAG.getTargetConstant(1, DL, MVT::i1), // compr
+ DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
+ };
+
+ unsigned Opc = Done->isNullValue() ?
+ AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
+ return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
}
+ case Intrinsic::amdgcn_s_sendmsg:
case Intrinsic::amdgcn_s_sendmsghalt: {
+ unsigned NodeOp = (IntrinsicID == Intrinsic::amdgcn_s_sendmsg) ?
+ AMDGPUISD::SENDMSG : AMDGPUISD::SENDMSGHALT;
Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3));
SDValue Glue = Chain.getValue(1);
- return DAG.getNode(AMDGPUISD::SENDMSGHALT, DL, MVT::Other, Chain,
+ return DAG.getNode(NodeOp, DL, MVT::Other, Chain,
Op.getOperand(2), Glue);
}
case AMDGPUIntrinsic::SI_tbuffer_store: {
@@ -2784,31 +3222,19 @@ SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
SDValue Cast = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Src);
return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, Cast);
}
- case AMDGPUIntrinsic::SI_export: {
- const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(2));
- const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(3));
- const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(4));
- const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(5));
- const ConstantSDNode *Compr = cast<ConstantSDNode>(Op.getOperand(6));
-
- const SDValue Ops[] = {
- Chain,
- DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8),
- DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1),
- DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8),
- DAG.getTargetConstant(Compr->getZExtValue(), DL, MVT::i1),
- Op.getOperand(7), // src0
- Op.getOperand(8), // src1
- Op.getOperand(9), // src2
- Op.getOperand(10) // src3
- };
-
- unsigned Opc = Done->isNullValue() ?
- AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
- return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
- }
- default:
+ case Intrinsic::amdgcn_s_barrier: {
+ if (getTargetMachine().getOptLevel() > CodeGenOpt::None) {
+ const MachineFunction &MF = DAG.getMachineFunction();
+ const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
+ unsigned WGSize = ST.getFlatWorkGroupSizes(*MF.getFunction()).second;
+ if (WGSize <= ST.getWavefrontSize())
+ return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other,
+ Op.getOperand(0)), 0);
+ }
return SDValue();
+ };
+ default:
+ return Op;
}
}
@@ -2857,21 +3283,20 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
// If there is a possibilty that flat instruction access scratch memory
// then we need to use the same legalization rules we use for private.
- if (AS == AMDGPUAS::FLAT_ADDRESS)
+ if (AS == AMDGPUASI.FLAT_ADDRESS)
AS = MFI->hasFlatScratchInit() ?
- AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS;
+ AMDGPUASI.PRIVATE_ADDRESS : AMDGPUASI.GLOBAL_ADDRESS;
unsigned NumElements = MemVT.getVectorNumElements();
- switch (AS) {
- case AMDGPUAS::CONSTANT_ADDRESS:
+ if (AS == AMDGPUASI.CONSTANT_ADDRESS) {
if (isMemOpUniform(Load))
return SDValue();
// Non-uniform loads will be selected to MUBUF instructions, so they
// have the same legalization requirements as global and private
// loads.
//
- LLVM_FALLTHROUGH;
- case AMDGPUAS::GLOBAL_ADDRESS: {
+ }
+ if (AS == AMDGPUASI.CONSTANT_ADDRESS || AS == AMDGPUASI.GLOBAL_ADDRESS) {
if (Subtarget->getScalarizeGlobalBehavior() && isMemOpUniform(Load) &&
isMemOpHasNoClobberedMemOperand(Load))
return SDValue();
@@ -2880,13 +3305,14 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
// loads.
//
}
- LLVM_FALLTHROUGH;
- case AMDGPUAS::FLAT_ADDRESS:
+ if (AS == AMDGPUASI.CONSTANT_ADDRESS || AS == AMDGPUASI.GLOBAL_ADDRESS ||
+ AS == AMDGPUASI.FLAT_ADDRESS) {
if (NumElements > 4)
return SplitVectorLoad(Op, DAG);
// v4 loads are supported for private and global memory.
return SDValue();
- case AMDGPUAS::PRIVATE_ADDRESS: {
+ }
+ if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
// Depending on the setting of the private_element_size field in the
// resource descriptor, we can only make private accesses up to a certain
// size.
@@ -2905,8 +3331,7 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
default:
llvm_unreachable("unsupported private_element_size");
}
- }
- case AMDGPUAS::LOCAL_ADDRESS: {
+ } else if (AS == AMDGPUASI.LOCAL_ADDRESS) {
if (NumElements > 2)
return SplitVectorLoad(Op, DAG);
@@ -2916,9 +3341,7 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
// If properly aligned, if we split we might be able to use ds_read_b64.
return SplitVectorLoad(Op, DAG);
}
- default:
- return SDValue();
- }
+ return SDValue();
}
SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
@@ -3287,18 +3710,17 @@ SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
// If there is a possibilty that flat instruction access scratch memory
// then we need to use the same legalization rules we use for private.
- if (AS == AMDGPUAS::FLAT_ADDRESS)
+ if (AS == AMDGPUASI.FLAT_ADDRESS)
AS = MFI->hasFlatScratchInit() ?
- AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS;
+ AMDGPUASI.PRIVATE_ADDRESS : AMDGPUASI.GLOBAL_ADDRESS;
unsigned NumElements = VT.getVectorNumElements();
- switch (AS) {
- case AMDGPUAS::GLOBAL_ADDRESS:
- case AMDGPUAS::FLAT_ADDRESS:
+ if (AS == AMDGPUASI.GLOBAL_ADDRESS ||
+ AS == AMDGPUASI.FLAT_ADDRESS) {
if (NumElements > 4)
return SplitVectorStore(Op, DAG);
return SDValue();
- case AMDGPUAS::PRIVATE_ADDRESS: {
+ } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
switch (Subtarget->getMaxPrivateElementSize()) {
case 4:
return scalarizeVectorStore(Store, DAG);
@@ -3313,8 +3735,7 @@ SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
default:
llvm_unreachable("unsupported private_element_size");
}
- }
- case AMDGPUAS::LOCAL_ADDRESS: {
+ } else if (AS == AMDGPUASI.LOCAL_ADDRESS) {
if (NumElements > 2)
return SplitVectorStore(Op, DAG);
@@ -3323,8 +3744,7 @@ SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
// If properly aligned, if we split we might be able to use ds_write_b64.
return SplitVectorStore(Op, DAG);
- }
- default:
+ } else {
llvm_unreachable("unhandled address space");
}
}
@@ -3355,7 +3775,7 @@ SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) co
unsigned AS = AtomicNode->getAddressSpace();
// No custom lowering required for local address space
- if (!isFlatGlobalAddrSpace(AS))
+ if (!isFlatGlobalAddrSpace(AS, AMDGPUASI))
return Op;
// Non-local address space requires custom lowering for atomic compare
@@ -3412,12 +3832,12 @@ SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
/// the immediate offsets of a memory instruction for the given address space.
static bool canFoldOffset(unsigned OffsetSize, unsigned AS,
const SISubtarget &STI) {
- switch (AS) {
- case AMDGPUAS::GLOBAL_ADDRESS: {
+ auto AMDGPUASI = STI.getAMDGPUAS();
+ if (AS == AMDGPUASI.GLOBAL_ADDRESS) {
// MUBUF instructions a 12-bit offset in bytes.
return isUInt<12>(OffsetSize);
}
- case AMDGPUAS::CONSTANT_ADDRESS: {
+ if (AS == AMDGPUASI.CONSTANT_ADDRESS) {
// SMRD instructions have an 8-bit offset in dwords on SI and
// a 20-bit offset in bytes on VI.
if (STI.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
@@ -3425,16 +3845,13 @@ static bool canFoldOffset(unsigned OffsetSize, unsigned AS,
else
return (OffsetSize % 4 == 0) && isUInt<8>(OffsetSize / 4);
}
- case AMDGPUAS::LOCAL_ADDRESS:
- case AMDGPUAS::REGION_ADDRESS: {
+ if (AS == AMDGPUASI.LOCAL_ADDRESS ||
+ AS == AMDGPUASI.REGION_ADDRESS) {
// The single offset versions have a 16-bit offset in bytes.
return isUInt<16>(OffsetSize);
}
- case AMDGPUAS::PRIVATE_ADDRESS:
// Indirect register addressing does not use any offsets.
- default:
- return 0;
- }
+ return false;
}
// (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2)
@@ -3492,7 +3909,7 @@ SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N,
// TODO: We could also do this for multiplies.
unsigned AS = N->getAddressSpace();
- if (Ptr.getOpcode() == ISD::SHL && AS != AMDGPUAS::PRIVATE_ADDRESS) {
+ if (Ptr.getOpcode() == ISD::SHL && AS != AMDGPUASI.PRIVATE_ADDRESS) {
SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), AS, DCI);
if (NewPtr) {
SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end());
@@ -3692,6 +4109,88 @@ SDValue SITargetLowering::performXorCombine(SDNode *N,
return SDValue();
}
+// Instructions that will be lowered with a final instruction that zeros the
+// high result bits.
+// XXX - probably only need to list legal operations.
+static bool fp16SrcZerosHighBits(unsigned Opc) {
+ switch (Opc) {
+ case ISD::FADD:
+ case ISD::FSUB:
+ case ISD::FMUL:
+ case ISD::FDIV:
+ case ISD::FREM:
+ case ISD::FMA:
+ case ISD::FMAD:
+ case ISD::FCANONICALIZE:
+ case ISD::FP_ROUND:
+ case ISD::UINT_TO_FP:
+ case ISD::SINT_TO_FP:
+ case ISD::FABS:
+ // Fabs is lowered to a bit operation, but it's an and which will clear the
+ // high bits anyway.
+ case ISD::FSQRT:
+ case ISD::FSIN:
+ case ISD::FCOS:
+ case ISD::FPOWI:
+ case ISD::FPOW:
+ case ISD::FLOG:
+ case ISD::FLOG2:
+ case ISD::FLOG10:
+ case ISD::FEXP:
+ case ISD::FEXP2:
+ case ISD::FCEIL:
+ case ISD::FTRUNC:
+ case ISD::FRINT:
+ case ISD::FNEARBYINT:
+ case ISD::FROUND:
+ case ISD::FFLOOR:
+ case ISD::FMINNUM:
+ case ISD::FMAXNUM:
+ case AMDGPUISD::FRACT:
+ case AMDGPUISD::CLAMP:
+ case AMDGPUISD::COS_HW:
+ case AMDGPUISD::SIN_HW:
+ case AMDGPUISD::FMIN3:
+ case AMDGPUISD::FMAX3:
+ case AMDGPUISD::FMED3:
+ case AMDGPUISD::FMAD_FTZ:
+ case AMDGPUISD::RCP:
+ case AMDGPUISD::RSQ:
+ case AMDGPUISD::LDEXP:
+ return true;
+ default:
+ // fcopysign, select and others may be lowered to 32-bit bit operations
+ // which don't zero the high bits.
+ return false;
+ }
+}
+
+SDValue SITargetLowering::performZeroExtendCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ if (!Subtarget->has16BitInsts() ||
+ DCI.getDAGCombineLevel() < AfterLegalizeDAG)
+ return SDValue();
+
+ EVT VT = N->getValueType(0);
+ if (VT != MVT::i32)
+ return SDValue();
+
+ SDValue Src = N->getOperand(0);
+ if (Src.getValueType() != MVT::i16)
+ return SDValue();
+
+ // (i32 zext (i16 (bitcast f16:$src))) -> fp16_zext $src
+ // FIXME: It is not universally true that the high bits are zeroed on gfx9.
+ if (Src.getOpcode() == ISD::BITCAST) {
+ SDValue BCSrc = Src.getOperand(0);
+ if (BCSrc.getValueType() == MVT::f16 &&
+ fp16SrcZerosHighBits(BCSrc.getOpcode()))
+ return DCI.DAG.getNode(AMDGPUISD::FP16_ZEXT, SDLoc(N), VT, BCSrc);
+ }
+
+ return SDValue();
+}
+
SDValue SITargetLowering::performClassCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
@@ -3713,7 +4212,7 @@ SDValue SITargetLowering::performClassCombine(SDNode *N,
SDValue SITargetLowering::performFCanonicalizeCombine(
SDNode *N,
DAGCombinerInfo &DCI) const {
- ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
+ ConstantFPSDNode *CFP = isConstOrConstSplatFP(N->getOperand(0));
if (!CFP)
return SDValue();
@@ -3723,13 +4222,14 @@ SDValue SITargetLowering::performFCanonicalizeCombine(
// Flush denormals to 0 if not enabled.
if (C.isDenormal()) {
EVT VT = N->getValueType(0);
- if (VT == MVT::f32 && !Subtarget->hasFP32Denormals())
+ EVT SVT = VT.getScalarType();
+ if (SVT == MVT::f32 && !Subtarget->hasFP32Denormals())
return DAG.getConstantFP(0.0, SDLoc(N), VT);
- if (VT == MVT::f64 && !Subtarget->hasFP64Denormals())
+ if (SVT == MVT::f64 && !Subtarget->hasFP64Denormals())
return DAG.getConstantFP(0.0, SDLoc(N), VT);
- if (VT == MVT::f16 && !Subtarget->hasFP16Denormals())
+ if (SVT == MVT::f16 && !Subtarget->hasFP16Denormals())
return DAG.getConstantFP(0.0, SDLoc(N), VT);
}
@@ -3749,7 +4249,7 @@ SDValue SITargetLowering::performFCanonicalizeCombine(
return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT);
}
- return SDValue(CFP, 0);
+ return N->getOperand(0);
}
static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) {
@@ -3771,8 +4271,9 @@ static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) {
}
}
-static SDValue performIntMed3ImmCombine(SelectionDAG &DAG, const SDLoc &SL,
- SDValue Op0, SDValue Op1, bool Signed) {
+SDValue SITargetLowering::performIntMed3ImmCombine(
+ SelectionDAG &DAG, const SDLoc &SL,
+ SDValue Op0, SDValue Op1, bool Signed) const {
ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1);
if (!K1)
return SDValue();
@@ -3790,23 +4291,22 @@ static SDValue performIntMed3ImmCombine(SelectionDAG &DAG, const SDLoc &SL,
}
EVT VT = K0->getValueType(0);
+ unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3;
+ if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) {
+ return DAG.getNode(Med3Opc, SL, VT,
+ Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0));
+ }
+ // If there isn't a 16-bit med3 operation, convert to 32-bit.
MVT NVT = MVT::i32;
unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
- SDValue Tmp1, Tmp2, Tmp3;
- Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0));
- Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1));
- Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1);
-
- if (VT == MVT::i16) {
- Tmp1 = DAG.getNode(Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3, SL, NVT,
- Tmp1, Tmp2, Tmp3);
+ SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0));
+ SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1));
+ SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1);
- return DAG.getNode(ISD::TRUNCATE, SL, VT, Tmp1);
- } else
- return DAG.getNode(Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3, SL, VT,
- Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0));
+ SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3);
+ return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3);
}
static bool isKnownNeverSNan(SelectionDAG &DAG, SDValue Op) {
@@ -3816,8 +4316,10 @@ static bool isKnownNeverSNan(SelectionDAG &DAG, SDValue Op) {
return DAG.isKnownNeverNaN(Op);
}
-static SDValue performFPMed3ImmCombine(SelectionDAG &DAG, const SDLoc &SL,
- SDValue Op0, SDValue Op1) {
+SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG,
+ const SDLoc &SL,
+ SDValue Op0,
+ SDValue Op1) const {
ConstantFPSDNode *K1 = dyn_cast<ConstantFPSDNode>(Op1);
if (!K1)
return SDValue();
@@ -3831,6 +4333,20 @@ static SDValue performFPMed3ImmCombine(SelectionDAG &DAG, const SDLoc &SL,
if (Cmp == APFloat::cmpGreaterThan)
return SDValue();
+ // TODO: Check IEEE bit enabled?
+ EVT VT = K0->getValueType(0);
+ if (Subtarget->enableDX10Clamp()) {
+ // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the
+ // hardware fmed3 behavior converting to a min.
+ // FIXME: Should this be allowing -0.0?
+ if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0))
+ return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0));
+ }
+
+ // med3 for f16 is only available on gfx9+.
+ if (VT == MVT::f64 || (VT == MVT::f16 && !Subtarget->hasMed3_16()))
+ return SDValue();
+
// This isn't safe with signaling NaNs because in IEEE mode, min/max on a
// signaling NaN gives a quiet NaN. The quiet NaN input to the min would then
// give the other result, which is different from med3 with a NaN input.
@@ -3846,6 +4362,7 @@ SDValue SITargetLowering::performMinMaxCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
+ EVT VT = N->getValueType(0);
unsigned Opc = N->getOpcode();
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
@@ -3853,7 +4370,9 @@ SDValue SITargetLowering::performMinMaxCombine(SDNode *N,
// Only do this if the inner op has one use since this will just increases
// register pressure for no benefit.
- if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY) {
+
+ if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY &&
+ VT != MVT::f64) {
// max(max(a, b), c) -> max3(a, b, c)
// min(min(a, b), c) -> min3(a, b, c)
if (Op0.getOpcode() == Opc && Op0.hasOneUse()) {
@@ -3895,7 +4414,9 @@ SDValue SITargetLowering::performMinMaxCombine(SDNode *N,
if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) ||
(Opc == AMDGPUISD::FMIN_LEGACY &&
Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) &&
- N->getValueType(0) == MVT::f32 && Op0.hasOneUse()) {
+ (VT == MVT::f32 || VT == MVT::f64 ||
+ (VT == MVT::f16 && Subtarget->has16BitInsts())) &&
+ Op0.hasOneUse()) {
if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1))
return Res;
}
@@ -3903,6 +4424,69 @@ SDValue SITargetLowering::performMinMaxCombine(SDNode *N,
return SDValue();
}
+static bool isClampZeroToOne(SDValue A, SDValue B) {
+ if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) {
+ if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) {
+ // FIXME: Should this be allowing -0.0?
+ return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) ||
+ (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0));
+ }
+ }
+
+ return false;
+}
+
+// FIXME: Should only worry about snans for version with chain.
+SDValue SITargetLowering::performFMed3Combine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ EVT VT = N->getValueType(0);
+ // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and
+ // NaNs. With a NaN input, the order of the operands may change the result.
+
+ SelectionDAG &DAG = DCI.DAG;
+ SDLoc SL(N);
+
+ SDValue Src0 = N->getOperand(0);
+ SDValue Src1 = N->getOperand(1);
+ SDValue Src2 = N->getOperand(2);
+
+ if (isClampZeroToOne(Src0, Src1)) {
+ // const_a, const_b, x -> clamp is safe in all cases including signaling
+ // nans.
+ // FIXME: Should this be allowing -0.0?
+ return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2);
+ }
+
+ // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother
+ // handling no dx10-clamp?
+ if (Subtarget->enableDX10Clamp()) {
+ // If NaNs is clamped to 0, we are free to reorder the inputs.
+
+ if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
+ std::swap(Src0, Src1);
+
+ if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2))
+ std::swap(Src1, Src2);
+
+ if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
+ std::swap(Src0, Src1);
+
+ if (isClampZeroToOne(Src1, Src2))
+ return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0);
+ }
+
+ return SDValue();
+}
+
+SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ SDValue Src0 = N->getOperand(0);
+ SDValue Src1 = N->getOperand(1);
+ if (Src0.isUndef() && Src1.isUndef())
+ return DCI.DAG.getUNDEF(N->getValueType(0));
+ return SDValue();
+}
+
unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG,
const SDNode *N0,
const SDNode *N1) const {
@@ -3933,7 +4517,6 @@ SDValue SITargetLowering::performFAddCombine(SDNode *N,
SelectionDAG &DAG = DCI.DAG;
EVT VT = N->getValueType(0);
- assert(!VT.isVector());
SDLoc SL(N);
SDValue LHS = N->getOperand(0);
@@ -4112,7 +4695,6 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
case AMDGPUISD::FMIN_LEGACY:
case AMDGPUISD::FMAX_LEGACY: {
if (DCI.getDAGCombineLevel() >= AfterLegalizeDAG &&
- N->getValueType(0) != MVT::f64 &&
getTargetMachine().getOptLevel() > CodeGenOpt::None)
return performMinMaxCombine(N, DCI);
break;
@@ -4135,17 +4717,18 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
case ISD::ATOMIC_LOAD_UMIN:
case ISD::ATOMIC_LOAD_UMAX:
case AMDGPUISD::ATOMIC_INC:
- case AMDGPUISD::ATOMIC_DEC: { // TODO: Target mem intrinsics.
+ case AMDGPUISD::ATOMIC_DEC: // TODO: Target mem intrinsics.
if (DCI.isBeforeLegalize())
break;
return performMemSDNodeCombine(cast<MemSDNode>(N), DCI);
- }
case ISD::AND:
return performAndCombine(N, DCI);
case ISD::OR:
return performOrCombine(N, DCI);
case ISD::XOR:
return performXorCombine(N, DCI);
+ case ISD::ZERO_EXTEND:
+ return performZeroExtendCombine(N, DCI);
case AMDGPUISD::FP_CLASS:
return performClassCombine(N, DCI);
case ISD::FCANONICALIZE:
@@ -4170,6 +4753,28 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
case AMDGPUISD::CVT_F32_UBYTE2:
case AMDGPUISD::CVT_F32_UBYTE3:
return performCvtF32UByteNCombine(N, DCI);
+ case AMDGPUISD::FMED3:
+ return performFMed3Combine(N, DCI);
+ case AMDGPUISD::CVT_PKRTZ_F16_F32:
+ return performCvtPkRTZCombine(N, DCI);
+ case ISD::SCALAR_TO_VECTOR: {
+ SelectionDAG &DAG = DCI.DAG;
+ EVT VT = N->getValueType(0);
+
+ // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x))
+ if (VT == MVT::v2i16 || VT == MVT::v2f16) {
+ SDLoc SL(N);
+ SDValue Src = N->getOperand(0);
+ EVT EltVT = Src.getValueType();
+ if (EltVT == MVT::f16)
+ Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src);
+
+ SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src);
+ return DAG.getNode(ISD::BITCAST, SL, VT, Ext);
+ }
+
+ break;
+ }
}
return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
}
@@ -4198,6 +4803,10 @@ void SITargetLowering::adjustWritemask(MachineSDNode *&Node,
for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end();
I != E; ++I) {
+ // Don't look at users of the chain.
+ if (I.getUse().getResNo() != 0)
+ continue;
+
// Abort if we can't understand the usage
if (!I->isMachineOpcode() ||
I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
@@ -4250,7 +4859,6 @@ void SITargetLowering::adjustWritemask(MachineSDNode *&Node,
// Update the users of the node with the new indices
for (unsigned i = 0, Idx = AMDGPU::sub0; i < 4; ++i) {
-
SDNode *User = Users[i];
if (!User)
continue;
@@ -4277,8 +4885,33 @@ static bool isFrameIndexOp(SDValue Op) {
/// \brief Legalize target independent instructions (e.g. INSERT_SUBREG)
/// with frame index operands.
/// LLVM assumes that inputs are to these instructions are registers.
-void SITargetLowering::legalizeTargetIndependentNode(SDNode *Node,
- SelectionDAG &DAG) const {
+SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node,
+ SelectionDAG &DAG) const {
+ if (Node->getOpcode() == ISD::CopyToReg) {
+ RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1));
+ SDValue SrcVal = Node->getOperand(2);
+
+ // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have
+ // to try understanding copies to physical registers.
+ if (SrcVal.getValueType() == MVT::i1 &&
+ TargetRegisterInfo::isPhysicalRegister(DestReg->getReg())) {
+ SDLoc SL(Node);
+ MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
+ SDValue VReg = DAG.getRegister(
+ MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1);
+
+ SDNode *Glued = Node->getGluedNode();
+ SDValue ToVReg
+ = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal,
+ SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0));
+ SDValue ToResultReg
+ = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0),
+ VReg, ToVReg.getValue(1));
+ DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode());
+ DAG.RemoveDeadNode(Node);
+ return ToResultReg.getNode();
+ }
+ }
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0; i < Node->getNumOperands(); ++i) {
@@ -4294,6 +4927,7 @@ void SITargetLowering::legalizeTargetIndependentNode(SDNode *Node,
}
DAG.UpdateNodeOperands(Node, Ops);
+ return Node;
}
/// \brief Fold the instructions after selecting them.
@@ -4496,6 +5130,8 @@ SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
return std::make_pair(0U, &AMDGPU::SReg_128RegClass);
case 256:
return std::make_pair(0U, &AMDGPU::SReg_256RegClass);
+ case 512:
+ return std::make_pair(0U, &AMDGPU::SReg_512RegClass);
}
case 'v':