aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/lib
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2022-12-04 22:09:55 +0000
committerDimitry Andric <dim@FreeBSD.org>2023-02-08 19:05:19 +0000
commitf3fd488f1e19a3d09c4bdcece893901de4f49cdd (patch)
tree82183a5b9324006d7ceab77a1f77644cf9394067 /contrib/llvm-project/llvm/lib
parent6246ae0b85d8159978c01ae916a9ad6cde9378b5 (diff)
parent458532c2dd24708181cdce77b8f60938fdc90dc6 (diff)
Diffstat (limited to 'contrib/llvm-project/llvm/lib')
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/VectorUtils.cpp7
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/TypePromotion.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/IR/AutoUpgrade.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Support/Unix/Signals.inc4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp67
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/SIISelLowering.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Sparc/SparcCallingConv.td10
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Sparc/SparcISelLowering.cpp61
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Sparc/SparcISelLowering.h5
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/IPO/GlobalOpt.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/SROA.cpp19
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Utils/VNCoercion.cpp4
16 files changed, 138 insertions, 62 deletions
diff --git a/contrib/llvm-project/llvm/lib/Analysis/VectorUtils.cpp b/contrib/llvm-project/llvm/lib/Analysis/VectorUtils.cpp
index c4795a80ead2..bc20f33f174c 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/VectorUtils.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/VectorUtils.cpp
@@ -1110,6 +1110,12 @@ void InterleavedAccessInfo::collectConstStrideAccesses(
continue;
Type *ElementTy = getLoadStoreType(&I);
+ // Currently, codegen doesn't support cases where the type size doesn't
+ // match the alloc size. Skip them for now.
+ uint64_t Size = DL.getTypeAllocSize(ElementTy);
+ if (Size * 8 != DL.getTypeSizeInBits(ElementTy))
+ continue;
+
// We don't check wrapping here because we don't know yet if Ptr will be
// part of a full group or a group with gaps. Checking wrapping for all
// pointers (even those that end up in groups with no gaps) will be overly
@@ -1121,7 +1127,6 @@ void InterleavedAccessInfo::collectConstStrideAccesses(
/*Assume=*/true, /*ShouldCheckWrap=*/false);
const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
- uint64_t Size = DL.getTypeAllocSize(ElementTy);
AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size,
getLoadStoreAlignment(&I));
}
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 35650b9bd00e..ecdaef0442da 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -9693,6 +9693,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
Entry.Alignment = Alignment;
CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
CLI.NumFixedArgs += 1;
+ CLI.getArgs()[0].IndirectType = CLI.RetTy;
CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext());
// sret demotion isn't compatible with tail-calls, since the sret argument
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/TypePromotion.cpp b/contrib/llvm-project/llvm/lib/CodeGen/TypePromotion.cpp
index 8dc8d381ad16..a63118067139 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/TypePromotion.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/TypePromotion.cpp
@@ -569,7 +569,8 @@ void IRPromoter::TruncateSinks() {
void IRPromoter::Cleanup() {
LLVM_DEBUG(dbgs() << "IR Promotion: Cleanup..\n");
// Some zexts will now have become redundant, along with their trunc
- // operands, so remove them
+ // operands, so remove them.
+ // Some zexts need to be replaced with truncate if src bitwidth is larger.
for (auto *V : Visited) {
if (!isa<ZExtInst>(V))
continue;
@@ -584,6 +585,11 @@ void IRPromoter::Cleanup() {
<< "\n");
ReplaceAllUsersOfWith(ZExt, Src);
continue;
+ } else if (ZExt->getSrcTy()->getScalarSizeInBits() > PromotedWidth) {
+ IRBuilder<> Builder{ZExt};
+ Value *Trunc = Builder.CreateTrunc(Src, ZExt->getDestTy());
+ ReplaceAllUsersOfWith(ZExt, Trunc);
+ continue;
}
// We've inserted a trunc for a zext sink, but we already know that the
diff --git a/contrib/llvm-project/llvm/lib/IR/AutoUpgrade.cpp b/contrib/llvm-project/llvm/lib/IR/AutoUpgrade.cpp
index 75594f90c926..b9962da1d302 100644
--- a/contrib/llvm-project/llvm/lib/IR/AutoUpgrade.cpp
+++ b/contrib/llvm-project/llvm/lib/IR/AutoUpgrade.cpp
@@ -1040,7 +1040,7 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
Name, F->getParent());
// The new function may also need remangling.
- if (auto Result = llvm::Intrinsic::remangleIntrinsicFunction(F))
+ if (auto Result = llvm::Intrinsic::remangleIntrinsicFunction(NewFn))
NewFn = *Result;
return true;
}
diff --git a/contrib/llvm-project/llvm/lib/Support/Unix/Signals.inc b/contrib/llvm-project/llvm/lib/Support/Unix/Signals.inc
index bf145bffe8bf..23ac012b9e00 100644
--- a/contrib/llvm-project/llvm/lib/Support/Unix/Signals.inc
+++ b/contrib/llvm-project/llvm/lib/Support/Unix/Signals.inc
@@ -432,10 +432,6 @@ void llvm::sys::SetOneShotPipeSignalFunction(void (*Handler)()) {
}
void llvm::sys::DefaultOneShotPipeSignalHandler() {
- // UNIX03 conformance requires a non-zero exit code and an error message
- // to stderr when writing to a closed stdout fails.
- errs() << "error: write on a pipe with no reader\n";
-
// Send a special return code that drivers can check for, from sysexits.h.
exit(EX_IOERR);
}
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index c28216048d7c..06e21f90ebf1 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -5805,7 +5805,7 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
assert(!Res && "Call operand has unhandled type");
(void)Res;
}
- SmallVector<SDValue, 16> ArgValues;
+
unsigned ExtraArgLocs = 0;
for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i - ExtraArgLocs];
@@ -6157,17 +6157,10 @@ void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo,
/// appropriate copies out of appropriate physical registers.
SDValue AArch64TargetLowering::LowerCallResult(
SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
+ const SmallVectorImpl<CCValAssign> &RVLocs, const SDLoc &DL,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
SDValue ThisVal) const {
- CCAssignFn *RetCC = CCAssignFnForReturn(CallConv);
- // Assign locations to each value returned by this call.
- SmallVector<CCValAssign, 16> RVLocs;
DenseMap<unsigned, SDValue> CopiedRegs;
- CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
- *DAG.getContext());
- CCInfo.AnalyzeCallResult(Ins, RetCC);
-
// Copy all of the result registers out of their specified physreg.
for (unsigned i = 0; i != RVLocs.size(); ++i) {
CCValAssign VA = RVLocs[i];
@@ -6508,17 +6501,39 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
GuardWithBTI = FuncInfo->branchTargetEnforcement();
}
+ // Analyze operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
+
+ if (IsVarArg) {
+ unsigned NumArgs = Outs.size();
+
+ for (unsigned i = 0; i != NumArgs; ++i) {
+ if (!Outs[i].IsFixed && Outs[i].VT.isScalableVector())
+ report_fatal_error("Passing SVE types to variadic functions is "
+ "currently not supported");
+ }
+ }
+
+ analyzeCallOperands(*this, Subtarget, CLI, CCInfo);
+
+ CCAssignFn *RetCC = CCAssignFnForReturn(CallConv);
+ // Assign locations to each value returned by this call.
+ SmallVector<CCValAssign, 16> RVLocs;
+ CCState RetCCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
+ *DAG.getContext());
+ RetCCInfo.AnalyzeCallResult(Ins, RetCC);
+
// Check callee args/returns for SVE registers and set calling convention
// accordingly.
if (CallConv == CallingConv::C || CallConv == CallingConv::Fast) {
- bool CalleeOutSVE = any_of(Outs, [](ISD::OutputArg &Out){
- return Out.VT.isScalableVector();
- });
- bool CalleeInSVE = any_of(Ins, [](ISD::InputArg &In){
- return In.VT.isScalableVector();
- });
-
- if (CalleeInSVE || CalleeOutSVE)
+ auto HasSVERegLoc = [](CCValAssign &Loc) {
+ if (!Loc.isRegLoc())
+ return false;
+ return AArch64::ZPRRegClass.contains(Loc.getLocReg()) ||
+ AArch64::PPRRegClass.contains(Loc.getLocReg());
+ };
+ if (any_of(RVLocs, HasSVERegLoc) || any_of(ArgLocs, HasSVERegLoc))
CallConv = CallingConv::AArch64_SVE_VectorCall;
}
@@ -6540,22 +6555,6 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
report_fatal_error("failed to perform tail call elimination on a call "
"site marked musttail");
- // Analyze operands of the call, assigning locations to each operand.
- SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
-
- if (IsVarArg) {
- unsigned NumArgs = Outs.size();
-
- for (unsigned i = 0; i != NumArgs; ++i) {
- if (!Outs[i].IsFixed && Outs[i].VT.isScalableVector())
- report_fatal_error("Passing SVE types to variadic functions is "
- "currently not supported");
- }
- }
-
- analyzeCallOperands(*this, Subtarget, CLI, CCInfo);
-
// Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getNextStackOffset();
@@ -6961,7 +6960,7 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
// Handle result values, copying them out of physregs into vregs that we
// return.
- return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
+ return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, RVLocs, DL, DAG,
InVals, IsThisReturn,
IsThisReturn ? OutVals[0] : SDValue());
}
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 1ba2e2f315ec..ff3bfe897869 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -894,7 +894,7 @@ private:
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SmallVectorImpl<CCValAssign> &RVLocs,
const SDLoc &DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
SDValue ThisVal) const;
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index f7d139adc63b..f6b7d1ffc6d2 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -249,6 +249,7 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
case ISD::STORE:
case ISD::BUILD_VECTOR:
case ISD::BITCAST:
+ case ISD::UNDEF:
case ISD::EXTRACT_VECTOR_ELT:
case ISD::INSERT_VECTOR_ELT:
case ISD::EXTRACT_SUBVECTOR:
@@ -516,6 +517,7 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
case ISD::STORE:
case ISD::BUILD_VECTOR:
case ISD::BITCAST:
+ case ISD::UNDEF:
case ISD::EXTRACT_VECTOR_ELT:
case ISD::INSERT_VECTOR_ELT:
case ISD::INSERT_SUBVECTOR:
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
index 3c102463ba08..cbfd2bc68f18 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -1036,7 +1036,7 @@ InstructionCost ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
// split, we may need an expensive shuffle to get two in sync. This has the
// effect of making larger than legal compares (v8i32 for example)
// expensive.
- if (LT.second.getVectorNumElements() > 2) {
+ if (LT.second.isVector() && LT.second.getVectorNumElements() > 2) {
if (LT.first > 1)
return LT.first * BaseCost +
BaseT::getScalarizationOverhead(VecCondTy, true, false);
diff --git a/contrib/llvm-project/llvm/lib/Target/Sparc/SparcCallingConv.td b/contrib/llvm-project/llvm/lib/Target/Sparc/SparcCallingConv.td
index e6d23f741ea5..8afd0a7fc09a 100644
--- a/contrib/llvm-project/llvm/lib/Target/Sparc/SparcCallingConv.td
+++ b/contrib/llvm-project/llvm/lib/Target/Sparc/SparcCallingConv.td
@@ -125,10 +125,14 @@ def CC_Sparc64 : CallingConv<[
def RetCC_Sparc64 : CallingConv<[
// A single f32 return value always goes in %f0. The ABI doesn't specify what
// happens to multiple f32 return values outside a struct.
- CCIfType<[f32], CCCustom<"CC_Sparc64_Half">>,
+ CCIfType<[f32], CCCustom<"RetCC_Sparc64_Half">>,
- // Otherwise, return values are passed exactly like arguments.
- CCDelegateTo<CC_Sparc64>
+ // Otherwise, return values are passed exactly like arguments, except that
+ // returns that are too big to fit into the registers is passed as an sret
+ // instead.
+ CCIfInReg<CCIfType<[i32, f32], CCCustom<"RetCC_Sparc64_Half">>>,
+ CCIfType<[i32], CCPromoteToType<i64>>,
+ CCCustom<"RetCC_Sparc64_Full">
]>;
// Callee-saved registers are handled by the register window mechanism.
diff --git a/contrib/llvm-project/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index 2cb74e7709c7..f55675089102 100644
--- a/contrib/llvm-project/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -101,9 +101,9 @@ static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT,
}
// Allocate a full-sized argument for the 64-bit ABI.
-static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT,
- MVT &LocVT, CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags, CCState &State) {
+static bool Analyze_CC_Sparc64_Full(bool IsReturn, unsigned &ValNo, MVT &ValVT,
+ MVT &LocVT, CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags, CCState &State) {
assert((LocVT == MVT::f32 || LocVT == MVT::f128
|| LocVT.getSizeInBits() == 64) &&
"Can't handle non-64 bits locations");
@@ -133,6 +133,11 @@ static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT,
return true;
}
+ // Bail out if this is a return CC and we run out of registers to place
+ // values into.
+ if (IsReturn)
+ return false;
+
// This argument goes on the stack in an 8-byte slot.
// When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
// the right-aligned float. The first 4 bytes of the stack slot are undefined.
@@ -146,9 +151,9 @@ static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT,
// Allocate a half-sized argument for the 64-bit ABI.
//
// This is used when passing { float, int } structs by value in registers.
-static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT,
- MVT &LocVT, CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags, CCState &State) {
+static bool Analyze_CC_Sparc64_Half(bool IsReturn, unsigned &ValNo, MVT &ValVT,
+ MVT &LocVT, CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags, CCState &State) {
assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
unsigned Offset = State.AllocateStack(4, Align(4));
@@ -174,10 +179,43 @@ static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT,
return true;
}
+ // Bail out if this is a return CC and we run out of registers to place
+ // values into.
+ if (IsReturn)
+ return false;
+
State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
return true;
}
+static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags, CCState &State) {
+ return Analyze_CC_Sparc64_Full(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
+ State);
+}
+
+static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags, CCState &State) {
+ return Analyze_CC_Sparc64_Half(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
+ State);
+}
+
+static bool RetCC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags, CCState &State) {
+ return Analyze_CC_Sparc64_Full(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
+ State);
+}
+
+static bool RetCC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags, CCState &State) {
+ return Analyze_CC_Sparc64_Half(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
+ State);
+}
+
#include "SparcGenCallingConv.inc"
// The calling conventions in SparcCallingConv.td are described in terms of the
@@ -191,6 +229,15 @@ static unsigned toCallerWindow(unsigned Reg) {
return Reg;
}
+bool SparcTargetLowering::CanLowerReturn(
+ CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
+ SmallVector<CCValAssign, 16> RVLocs;
+ CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
+ return CCInfo.CheckReturn(Outs, Subtarget->is64Bit() ? RetCC_Sparc64
+ : RetCC_Sparc32);
+}
+
SDValue
SparcTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool IsVarArg,
@@ -1031,6 +1078,7 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
// Copy all of the result registers out of their specified physreg.
for (unsigned i = 0; i != RVLocs.size(); ++i) {
+ assert(RVLocs[i].isRegLoc() && "Can only return in registers!");
if (RVLocs[i].getLocVT() == MVT::v2i32) {
SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32);
SDValue Lo = DAG.getCopyFromReg(
@@ -1346,6 +1394,7 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
// Copy all of the result registers out of their specified physreg.
for (unsigned i = 0; i != RVLocs.size(); ++i) {
CCValAssign &VA = RVLocs[i];
+ assert(VA.isRegLoc() && "Can only return in registers!");
unsigned Reg = toCallerWindow(VA.getLocReg());
// When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
diff --git a/contrib/llvm-project/llvm/lib/Target/Sparc/SparcISelLowering.h b/contrib/llvm-project/llvm/lib/Target/Sparc/SparcISelLowering.h
index 2768bb20566a..16e4f2687054 100644
--- a/contrib/llvm-project/llvm/lib/Target/Sparc/SparcISelLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/Sparc/SparcISelLowering.h
@@ -144,6 +144,11 @@ namespace llvm {
SDValue LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const;
+ bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ LLVMContext &Context) const override;
+
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
diff --git a/contrib/llvm-project/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/contrib/llvm-project/llvm/lib/Transforms/IPO/GlobalOpt.cpp
index 6df0409256bb..6fc7b29c5b78 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/IPO/GlobalOpt.cpp
@@ -2003,7 +2003,7 @@ OptimizeFunctions(Module &M,
// FIXME: We should also hoist alloca affected by this to the entry
// block if possible.
if (F.getAttributes().hasAttrSomewhere(Attribute::InAlloca) &&
- !F.hasAddressTaken() && !hasMustTailCallers(&F)) {
+ !F.hasAddressTaken() && !hasMustTailCallers(&F) && !F.isVarArg()) {
RemoveAttribute(&F, Attribute::InAlloca);
Changed = true;
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index bc01d2ef7fe2..52596b30494f 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -3289,6 +3289,10 @@ bool InstCombinerImpl::transformConstExprCastCall(CallBase &Call) {
if (CallerPAL.hasParamAttr(i, Attribute::SwiftError))
return false;
+ if (CallerPAL.hasParamAttr(i, Attribute::ByVal) !=
+ Callee->getAttributes().hasParamAttr(i, Attribute::ByVal))
+ return false; // Cannot transform to or from byval.
+
// If the parameter is passed as a byval argument, then we have to have a
// sized type and the sized type has to have the same size as the old type.
if (ParamTy != ActTy && CallerPAL.hasParamAttr(i, Attribute::ByVal)) {
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/SROA.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/SROA.cpp
index 143a035749c7..644c5c82e58e 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -1210,8 +1210,7 @@ static bool isSafePHIToSpeculate(PHINode &PN) {
BasicBlock *BB = PN.getParent();
Align MaxAlign;
uint64_t APWidth = DL.getIndexTypeSizeInBits(PN.getType());
- APInt MaxSize(APWidth, 0);
- bool HaveLoad = false;
+ Type *LoadType = nullptr;
for (User *U : PN.users()) {
LoadInst *LI = dyn_cast<LoadInst>(U);
if (!LI || !LI->isSimple())
@@ -1223,21 +1222,27 @@ static bool isSafePHIToSpeculate(PHINode &PN) {
if (LI->getParent() != BB)
return false;
+ if (LoadType) {
+ if (LoadType != LI->getType())
+ return false;
+ } else {
+ LoadType = LI->getType();
+ }
+
// Ensure that there are no instructions between the PHI and the load that
// could store.
for (BasicBlock::iterator BBI(PN); &*BBI != LI; ++BBI)
if (BBI->mayWriteToMemory())
return false;
- uint64_t Size = DL.getTypeStoreSize(LI->getType()).getFixedSize();
MaxAlign = std::max(MaxAlign, LI->getAlign());
- MaxSize = MaxSize.ult(Size) ? APInt(APWidth, Size) : MaxSize;
- HaveLoad = true;
}
- if (!HaveLoad)
+ if (!LoadType)
return false;
+ APInt LoadSize = APInt(APWidth, DL.getTypeStoreSize(LoadType).getFixedSize());
+
// We can only transform this if it is safe to push the loads into the
// predecessor blocks. The only thing to watch out for is that we can't put
// a possibly trapping load in the predecessor if it is a critical edge.
@@ -1259,7 +1264,7 @@ static bool isSafePHIToSpeculate(PHINode &PN) {
// If this pointer is always safe to load, or if we can prove that there
// is already a load in the block, then we can move the load to the pred
// block.
- if (isSafeToLoadUnconditionally(InVal, MaxAlign, MaxSize, DL, TI))
+ if (isSafeToLoadUnconditionally(InVal, MaxAlign, LoadSize, DL, TI))
continue;
return false;
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Utils/VNCoercion.cpp b/contrib/llvm-project/llvm/lib/Transforms/Utils/VNCoercion.cpp
index 42be67f3cfc0..264da2187754 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Utils/VNCoercion.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Utils/VNCoercion.cpp
@@ -356,9 +356,9 @@ int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
// If this is memset, we just need to see if the offset is valid in the size
// of the memset..
- if (MI->getIntrinsicID() == Intrinsic::memset) {
+ if (const auto *memset_inst = dyn_cast<MemSetInst>(MI)) {
if (DL.isNonIntegralPointerType(LoadTy->getScalarType())) {
- auto *CI = dyn_cast<ConstantInt>(cast<MemSetInst>(MI)->getValue());
+ auto *CI = dyn_cast<ConstantInt>(memset_inst->getValue());
if (!CI || !CI->isZero())
return -1;
}