aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2024-05-04 10:17:01 +0000
committerDimitry Andric <dim@FreeBSD.org>2024-05-04 10:17:01 +0000
commit9a7cb8417a2a13bcb7a300c65c43960389b85456 (patch)
treefec582d7fe11fe1e4e1f805aee1aca74817d9c3d
parentce77c4c120414f3ab4c25f43eb4fd6f996548020 (diff)
downloadsrc-vendor/llvm-project/release-18.x.tar.gz
src-vendor/llvm-project/release-18.x.zip
Vendor import of llvm-project branch release/18.x llvmorg-18.1.5-0-g617a15a9eac9.vendor/llvm-project/llvmorg-18.1.5-0-g617a15a9eac9vendor/llvm-project/release-18.x
-rw-r--r--clang/lib/CodeGen/CGBuiltin.cpp25
-rw-r--r--clang/lib/CodeGen/CGCall.cpp5
-rw-r--r--clang/lib/CodeGen/CGObjCGNU.cpp34
-rw-r--r--clang/lib/CodeGen/CodeGenModule.h3
-rw-r--r--clang/lib/CodeGen/CoverageMappingGen.cpp11
-rw-r--r--clang/lib/CodeGen/MicrosoftCXXABI.cpp12
-rw-r--r--clang/lib/Format/ContinuationIndenter.cpp8
-rw-r--r--clang/lib/Format/TokenAnnotator.cpp10
-rw-r--r--clang/lib/Format/UnwrappedLineParser.cpp40
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/Taint.cpp14
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/cert/InvalidPtrChecker.cpp6
-rw-r--r--libcxx/modules/std.compat/cstdlib.inc2
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/LoadStoreOpt.h20
-rw-r--r--llvm/lib/CodeGen/CodeGenPrepare.cpp2
-rw-r--r--llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp4
-rw-r--r--llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp4
-rw-r--r--llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp48
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp9
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.td10
-rw-r--r--llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp10
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp2
-rw-r--r--llvm/lib/Target/X86/X86ISelDAGToDAG.cpp7
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp29
-rw-r--r--llvm/lib/Target/X86/X86Subtarget.h3
-rw-r--r--llvm/lib/TargetParser/Host.cpp5
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp3
27 files changed, 200 insertions, 128 deletions
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index a4f26a6f0eb1..44ddd2428b10 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -823,29 +823,32 @@ const FieldDecl *CodeGenFunction::FindFlexibleArrayMemberField(
ASTContext &Ctx, const RecordDecl *RD, StringRef Name, uint64_t &Offset) {
const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
getLangOpts().getStrictFlexArraysLevel();
- unsigned FieldNo = 0;
- bool IsUnion = RD->isUnion();
+ uint32_t FieldNo = 0;
- for (const Decl *D : RD->decls()) {
- if (const auto *Field = dyn_cast<FieldDecl>(D);
- Field && (Name.empty() || Field->getNameAsString() == Name) &&
+ if (RD->isImplicit())
+ return nullptr;
+
+ for (const FieldDecl *FD : RD->fields()) {
+ if ((Name.empty() || FD->getNameAsString() == Name) &&
Decl::isFlexibleArrayMemberLike(
- Ctx, Field, Field->getType(), StrictFlexArraysLevel,
+ Ctx, FD, FD->getType(), StrictFlexArraysLevel,
/*IgnoreTemplateOrMacroSubstitution=*/true)) {
const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
Offset += Layout.getFieldOffset(FieldNo);
- return Field;
+ return FD;
}
- if (const auto *Record = dyn_cast<RecordDecl>(D))
- if (const FieldDecl *Field =
- FindFlexibleArrayMemberField(Ctx, Record, Name, Offset)) {
+ QualType Ty = FD->getType();
+ if (Ty->isRecordType()) {
+ if (const FieldDecl *Field = FindFlexibleArrayMemberField(
+ Ctx, Ty->getAsRecordDecl(), Name, Offset)) {
const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
Offset += Layout.getFieldOffset(FieldNo);
return Field;
}
+ }
- if (!IsUnion && isa<FieldDecl>(D))
+ if (!RD->isUnion())
++FieldNo;
}
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index 28c211aa631e..a6a2f3595fe7 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -1581,6 +1581,11 @@ bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
}
+bool CodeGenModule::ReturnTypeHasInReg(const CGFunctionInfo &FI) {
+ const auto &RI = FI.getReturnInfo();
+ return RI.getInReg();
+}
+
bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
return ReturnTypeUsesSRet(FI) &&
getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
diff --git a/clang/lib/CodeGen/CGObjCGNU.cpp b/clang/lib/CodeGen/CGObjCGNU.cpp
index a36b0cdddaf0..05e3f8d4bfc2 100644
--- a/clang/lib/CodeGen/CGObjCGNU.cpp
+++ b/clang/lib/CodeGen/CGObjCGNU.cpp
@@ -2903,23 +2903,29 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
break;
case CodeGenOptions::Mixed:
case CodeGenOptions::NonLegacy:
+ StringRef name = "objc_msgSend";
if (CGM.ReturnTypeUsesFPRet(ResultType)) {
- imp =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, IdTy, true),
- "objc_msgSend_fpret")
- .getCallee();
+ name = "objc_msgSend_fpret";
} else if (CGM.ReturnTypeUsesSRet(MSI.CallInfo)) {
- // The actual types here don't matter - we're going to bitcast the
- // function anyway
- imp =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, IdTy, true),
- "objc_msgSend_stret")
- .getCallee();
- } else {
- imp = CGM.CreateRuntimeFunction(
- llvm::FunctionType::get(IdTy, IdTy, true), "objc_msgSend")
- .getCallee();
+ name = "objc_msgSend_stret";
+
+ // The address of the memory block is be passed in x8 for POD type,
+ // or in x0 for non-POD type (marked as inreg).
+ bool shouldCheckForInReg =
+ CGM.getContext()
+ .getTargetInfo()
+ .getTriple()
+ .isWindowsMSVCEnvironment() &&
+ CGM.getContext().getTargetInfo().getTriple().isAArch64();
+ if (shouldCheckForInReg && CGM.ReturnTypeHasInReg(MSI.CallInfo)) {
+ name = "objc_msgSend_stret2";
+ }
}
+ // The actual types here don't matter - we're going to bitcast the
+ // function anyway
+ imp = CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, IdTy, true),
+ name)
+ .getCallee();
}
// Reset the receiver in case the lookup modified it
diff --git a/clang/lib/CodeGen/CodeGenModule.h b/clang/lib/CodeGen/CodeGenModule.h
index ec34680fd3f7..d9ece4d98eec 100644
--- a/clang/lib/CodeGen/CodeGenModule.h
+++ b/clang/lib/CodeGen/CodeGenModule.h
@@ -1239,6 +1239,9 @@ public:
/// Return true iff the given type uses 'sret' when used as a return type.
bool ReturnTypeUsesSRet(const CGFunctionInfo &FI);
+ /// Return true iff the given type has `inreg` set.
+ bool ReturnTypeHasInReg(const CGFunctionInfo &FI);
+
/// Return true iff the given type uses an argument slot when 'sret' is used
/// as a return type.
bool ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI);
diff --git a/clang/lib/CodeGen/CoverageMappingGen.cpp b/clang/lib/CodeGen/CoverageMappingGen.cpp
index 0c43317642bc..ae4e6d4c88c0 100644
--- a/clang/lib/CodeGen/CoverageMappingGen.cpp
+++ b/clang/lib/CodeGen/CoverageMappingGen.cpp
@@ -1207,6 +1207,12 @@ struct CounterCoverageMappingBuilder
/// Find a valid gap range between \p AfterLoc and \p BeforeLoc.
std::optional<SourceRange> findGapAreaBetween(SourceLocation AfterLoc,
SourceLocation BeforeLoc) {
+ // Some statements (like AttributedStmt and ImplicitValueInitExpr) don't
+ // have valid source locations. Do not emit a gap region if this is the case
+ // in either AfterLoc end or BeforeLoc end.
+ if (AfterLoc.isInvalid() || BeforeLoc.isInvalid())
+ return std::nullopt;
+
// If AfterLoc is in function-like macro, use the right parenthesis
// location.
if (AfterLoc.isMacroID()) {
@@ -1370,9 +1376,8 @@ struct CounterCoverageMappingBuilder
for (const Stmt *Child : S->children())
if (Child) {
// If last statement contains terminate statements, add a gap area
- // between the two statements. Skipping attributed statements, because
- // they don't have valid start location.
- if (LastStmt && HasTerminateStmt && !isa<AttributedStmt>(Child)) {
+ // between the two statements.
+ if (LastStmt && HasTerminateStmt) {
auto Gap = findGapAreaBetween(getEnd(LastStmt), getStart(Child));
if (Gap)
fillGapAreaWithCount(Gap->getBegin(), Gap->getEnd(),
diff --git a/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
index 172c4c937b97..4d0f4c63f843 100644
--- a/clang/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -1135,9 +1135,15 @@ static bool isTrivialForMSVC(const CXXRecordDecl *RD, QualType Ty,
return false;
if (RD->hasNonTrivialCopyAssignment())
return false;
- for (const CXXConstructorDecl *Ctor : RD->ctors())
- if (Ctor->isUserProvided())
- return false;
+ for (const Decl *D : RD->decls()) {
+ if (auto *Ctor = dyn_cast<CXXConstructorDecl>(D)) {
+ if (Ctor->isUserProvided())
+ return false;
+ } else if (auto *Template = dyn_cast<FunctionTemplateDecl>(D)) {
+ if (isa<CXXConstructorDecl>(Template->getTemplatedDecl()))
+ return false;
+ }
+ }
if (RD->hasNonTrivialDestructor())
return false;
return true;
diff --git a/clang/lib/Format/ContinuationIndenter.cpp b/clang/lib/Format/ContinuationIndenter.cpp
index a3eb9138b218..53cd169b0590 100644
--- a/clang/lib/Format/ContinuationIndenter.cpp
+++ b/clang/lib/Format/ContinuationIndenter.cpp
@@ -674,7 +674,13 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
// arguments to function calls. We do this by ensuring that either all
// arguments (including any lambdas) go on the same line as the function
// call, or we break before the first argument.
- auto PrevNonComment = Current.getPreviousNonComment();
+ const auto *Prev = Current.Previous;
+ if (!Prev)
+ return false;
+ // For example, `/*Newline=*/false`.
+ if (Prev->is(TT_BlockComment) && Current.SpacesRequiredBefore == 0)
+ return false;
+ const auto *PrevNonComment = Current.getPreviousNonComment();
if (!PrevNonComment || PrevNonComment->isNot(tok::l_paren))
return false;
if (Current.isOneOf(tok::comment, tok::l_paren, TT_LambdaLSquare))
diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp
index 4d482e6543d6..c1f166248192 100644
--- a/clang/lib/Format/TokenAnnotator.cpp
+++ b/clang/lib/Format/TokenAnnotator.cpp
@@ -3532,6 +3532,8 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) const {
}
} else if (ClosingParen) {
for (auto *Tok = ClosingParen->Next; Tok; Tok = Tok->Next) {
+ if (Tok->is(TT_CtorInitializerColon))
+ break;
if (Tok->is(tok::arrow)) {
Tok->setType(TT_TrailingReturnArrow);
break;
@@ -5157,12 +5159,8 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
return true;
if (Left.IsUnterminatedLiteral)
return true;
- // FIXME: Breaking after newlines seems useful in general. Turn this into an
- // option and recognize more cases like endl etc, and break independent of
- // what comes after operator lessless.
- if (Right.is(tok::lessless) && Right.Next &&
- Right.Next->is(tok::string_literal) && Left.is(tok::string_literal) &&
- Left.TokenText.ends_with("\\n\"")) {
+ if (Right.is(tok::lessless) && Right.Next && Left.is(tok::string_literal) &&
+ Right.Next->is(tok::string_literal)) {
return true;
}
if (Right.is(TT_RequiresClause)) {
diff --git a/clang/lib/Format/UnwrappedLineParser.cpp b/clang/lib/Format/UnwrappedLineParser.cpp
index 573919798870..a6eb18bb2b32 100644
--- a/clang/lib/Format/UnwrappedLineParser.cpp
+++ b/clang/lib/Format/UnwrappedLineParser.cpp
@@ -489,18 +489,23 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
};
SmallVector<StackEntry, 8> LBraceStack;
assert(Tok->is(tok::l_brace));
+
do {
- // Get next non-comment, non-preprocessor token.
FormatToken *NextTok;
do {
NextTok = Tokens->getNextToken();
} while (NextTok->is(tok::comment));
- while (NextTok->is(tok::hash) && !Line->InMacroBody) {
- NextTok = Tokens->getNextToken();
- do {
- NextTok = Tokens->getNextToken();
- } while (NextTok->is(tok::comment) ||
- (NextTok->NewlinesBefore == 0 && NextTok->isNot(tok::eof)));
+
+ if (!Line->InMacroBody) {
+ // Skip PPDirective lines and comments.
+ while (NextTok->is(tok::hash)) {
+ do {
+ NextTok = Tokens->getNextToken();
+ } while (NextTok->NewlinesBefore == 0 && NextTok->isNot(tok::eof));
+
+ while (NextTok->is(tok::comment))
+ NextTok = Tokens->getNextToken();
+ }
}
switch (Tok->Tok.getKind()) {
@@ -534,16 +539,6 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
if (Style.Language == FormatStyle::LK_Proto) {
ProbablyBracedList = NextTok->isOneOf(tok::comma, tok::r_square);
} else {
- // Skip NextTok over preprocessor lines, otherwise we may not
- // properly diagnose the block as a braced intializer
- // if the comma separator appears after the pp directive.
- while (NextTok->is(tok::hash)) {
- ScopedMacroState MacroState(*Line, Tokens, NextTok);
- do {
- NextTok = Tokens->getNextToken();
- } while (NextTok->isNot(tok::eof));
- }
-
// Using OriginalColumn to distinguish between ObjC methods and
// binary operators is a bit hacky.
bool NextIsObjCMethod = NextTok->isOneOf(tok::plus, tok::minus) &&
@@ -602,6 +597,16 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
NextTok = Tokens->getNextToken();
ProbablyBracedList = NextTok->isNot(tok::l_square);
}
+
+ // Cpp macro definition body that is a nonempty braced list or block:
+ if (Style.isCpp() && Line->InMacroBody && PrevTok != FormatTok &&
+ !FormatTok->Previous && NextTok->is(tok::eof) &&
+ // A statement can end with only `;` (simple statement), a block
+ // closing brace (compound statement), or `:` (label statement).
+ // If PrevTok is a block opening brace, Tok ends an empty block.
+ !PrevTok->isOneOf(tok::semi, BK_Block, tok::colon)) {
+ ProbablyBracedList = true;
+ }
}
if (ProbablyBracedList) {
Tok->setBlockKind(BK_BracedInit);
@@ -631,6 +636,7 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
default:
break;
}
+
PrevTok = Tok;
Tok = NextTok;
} while (Tok->isNot(tok::eof) && !LBraceStack.empty());
diff --git a/clang/lib/StaticAnalyzer/Checkers/Taint.cpp b/clang/lib/StaticAnalyzer/Checkers/Taint.cpp
index 4edb671753bf..6362c82b009d 100644
--- a/clang/lib/StaticAnalyzer/Checkers/Taint.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/Taint.cpp
@@ -216,21 +216,17 @@ std::vector<SymbolRef> taint::getTaintedSymbolsImpl(ProgramStateRef State,
std::vector<SymbolRef> TaintedSymbols;
if (!Reg)
return TaintedSymbols;
- // Element region (array element) is tainted if either the base or the offset
- // are tainted.
+
+ // Element region (array element) is tainted if the offset is tainted.
if (const ElementRegion *ER = dyn_cast<ElementRegion>(Reg)) {
std::vector<SymbolRef> TaintedIndex =
getTaintedSymbolsImpl(State, ER->getIndex(), K, returnFirstOnly);
llvm::append_range(TaintedSymbols, TaintedIndex);
if (returnFirstOnly && !TaintedSymbols.empty())
return TaintedSymbols; // return early if needed
- std::vector<SymbolRef> TaintedSuperRegion =
- getTaintedSymbolsImpl(State, ER->getSuperRegion(), K, returnFirstOnly);
- llvm::append_range(TaintedSymbols, TaintedSuperRegion);
- if (returnFirstOnly && !TaintedSymbols.empty())
- return TaintedSymbols; // return early if needed
}
+ // Symbolic region is tainted if the corresponding symbol is tainted.
if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(Reg)) {
std::vector<SymbolRef> TaintedRegions =
getTaintedSymbolsImpl(State, SR->getSymbol(), K, returnFirstOnly);
@@ -239,6 +235,8 @@ std::vector<SymbolRef> taint::getTaintedSymbolsImpl(ProgramStateRef State,
return TaintedSymbols; // return early if needed
}
+ // Any subregion (including Element and Symbolic regions) is tainted if its
+ // super-region is tainted.
if (const SubRegion *ER = dyn_cast<SubRegion>(Reg)) {
std::vector<SymbolRef> TaintedSubRegions =
getTaintedSymbolsImpl(State, ER->getSuperRegion(), K, returnFirstOnly);
@@ -318,4 +316,4 @@ std::vector<SymbolRef> taint::getTaintedSymbolsImpl(ProgramStateRef State,
}
}
return TaintedSymbols;
-} \ No newline at end of file
+}
diff --git a/clang/lib/StaticAnalyzer/Checkers/cert/InvalidPtrChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/cert/InvalidPtrChecker.cpp
index e5dd907c660d..b2947f590c4e 100644
--- a/clang/lib/StaticAnalyzer/Checkers/cert/InvalidPtrChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/cert/InvalidPtrChecker.cpp
@@ -205,8 +205,12 @@ void InvalidPtrChecker::postPreviousReturnInvalidatingCall(
CE, LCtx, CE->getType(), C.blockCount());
State = State->BindExpr(CE, LCtx, RetVal);
+ const auto *SymRegOfRetVal =
+ dyn_cast_or_null<SymbolicRegion>(RetVal.getAsRegion());
+ if (!SymRegOfRetVal)
+ return;
+
// Remember to this region.
- const auto *SymRegOfRetVal = cast<SymbolicRegion>(RetVal.getAsRegion());
const MemRegion *MR = SymRegOfRetVal->getBaseRegion();
State = State->set<PreviousCallResultMap>(FD, MR);
diff --git a/libcxx/modules/std.compat/cstdlib.inc b/libcxx/modules/std.compat/cstdlib.inc
index a45a0a1caf8b..4783cbf51623 100644
--- a/libcxx/modules/std.compat/cstdlib.inc
+++ b/libcxx/modules/std.compat/cstdlib.inc
@@ -25,7 +25,7 @@ export {
using ::system;
// [c.malloc], C library memory allocation
- using ::aligned_alloc;
+ using ::aligned_alloc _LIBCPP_USING_IF_EXISTS;
using ::calloc;
using ::free;
using ::malloc;
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LoadStoreOpt.h b/llvm/include/llvm/CodeGen/GlobalISel/LoadStoreOpt.h
index 0f20a33f3a75..7990997835d0 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LoadStoreOpt.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LoadStoreOpt.h
@@ -35,11 +35,23 @@ struct LegalityQuery;
class MachineRegisterInfo;
namespace GISelAddressing {
/// Helper struct to store a base, index and offset that forms an address
-struct BaseIndexOffset {
+class BaseIndexOffset {
+private:
Register BaseReg;
Register IndexReg;
- int64_t Offset = 0;
- bool IsIndexSignExt = false;
+ std::optional<int64_t> Offset;
+
+public:
+ BaseIndexOffset() = default;
+ Register getBase() { return BaseReg; }
+ Register getBase() const { return BaseReg; }
+ Register getIndex() { return IndexReg; }
+ Register getIndex() const { return IndexReg; }
+ void setBase(Register NewBase) { BaseReg = NewBase; }
+ void setIndex(Register NewIndex) { IndexReg = NewIndex; }
+ void setOffset(std::optional<int64_t> NewOff) { Offset = NewOff; }
+ bool hasValidOffset() const { return Offset.has_value(); }
+ int64_t getOffset() const { return *Offset; }
};
/// Returns a BaseIndexOffset which describes the pointer in \p Ptr.
@@ -89,7 +101,7 @@ private:
// order stores are writing to incremeneting consecutive addresses. So when
// we walk the block in reverse order, the next eligible store must write to
// an offset one store width lower than CurrentLowestOffset.
- uint64_t CurrentLowestOffset;
+ int64_t CurrentLowestOffset;
SmallVector<GStore *> Stores;
// A vector of MachineInstr/unsigned pairs to denote potential aliases that
// need to be checked before the candidate is considered safe to merge. The
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 8ee1f19e083e..1cca56fc19cf 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -8154,6 +8154,7 @@ static bool optimizeBranch(BranchInst *Branch, const TargetLowering &TLI,
IRBuilder<> Builder(Branch);
if (UI->getParent() != Branch->getParent())
UI->moveBefore(Branch);
+ UI->dropPoisonGeneratingFlags();
Value *NewCmp = Builder.CreateCmp(ICmpInst::ICMP_EQ, UI,
ConstantInt::get(UI->getType(), 0));
LLVM_DEBUG(dbgs() << "Converting " << *Cmp << "\n");
@@ -8167,6 +8168,7 @@ static bool optimizeBranch(BranchInst *Branch, const TargetLowering &TLI,
IRBuilder<> Builder(Branch);
if (UI->getParent() != Branch->getParent())
UI->moveBefore(Branch);
+ UI->dropPoisonGeneratingFlags();
Value *NewCmp = Builder.CreateCmp(Cmp->getPredicate(), UI,
ConstantInt::get(UI->getType(), 0));
LLVM_DEBUG(dbgs() << "Converting " << *Cmp << "\n");
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 772229215e79..61ddc858ba44 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -591,8 +591,8 @@ bool CombinerHelper::matchCombineExtendingLoads(MachineInstr &MI,
UseMI.getOpcode() == TargetOpcode::G_ZEXT ||
(UseMI.getOpcode() == TargetOpcode::G_ANYEXT)) {
const auto &MMO = LoadMI->getMMO();
- // For atomics, only form anyextending loads.
- if (MMO.isAtomic() && UseMI.getOpcode() != TargetOpcode::G_ANYEXT)
+ // Don't do anything for atomics.
+ if (MMO.isAtomic())
continue;
// Check for legality.
if (!isPreLegalize()) {
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index c0c22e36004f..47d045ac4817 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -4180,6 +4180,10 @@ LegalizerHelper::fewerElementsVectorPhi(GenericMachineInstr &MI,
}
}
+ // Set the insert point after the existing PHIs
+ MachineBasicBlock &MBB = *MI.getParent();
+ MIRBuilder.setInsertPt(MBB, MBB.getFirstNonPHI());
+
// Merge small outputs into MI's def.
if (NumLeftovers) {
mergeMixedSubvectors(MI.getReg(0), OutputRegs);
diff --git a/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp b/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp
index 246aa88b09ac..ee499c41c558 100644
--- a/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp
@@ -84,21 +84,20 @@ BaseIndexOffset GISelAddressing::getPointerInfo(Register Ptr,
MachineRegisterInfo &MRI) {
BaseIndexOffset Info;
Register PtrAddRHS;
- if (!mi_match(Ptr, MRI, m_GPtrAdd(m_Reg(Info.BaseReg), m_Reg(PtrAddRHS)))) {
- Info.BaseReg = Ptr;
- Info.IndexReg = Register();
- Info.IsIndexSignExt = false;
+ Register BaseReg;
+ if (!mi_match(Ptr, MRI, m_GPtrAdd(m_Reg(BaseReg), m_Reg(PtrAddRHS)))) {
+ Info.setBase(Ptr);
+ Info.setOffset(0);
return Info;
}
-
+ Info.setBase(BaseReg);
auto RHSCst = getIConstantVRegValWithLookThrough(PtrAddRHS, MRI);
if (RHSCst)
- Info.Offset = RHSCst->Value.getSExtValue();
+ Info.setOffset(RHSCst->Value.getSExtValue());
// Just recognize a simple case for now. In future we'll need to match
// indexing patterns for base + index + constant.
- Info.IndexReg = PtrAddRHS;
- Info.IsIndexSignExt = false;
+ Info.setIndex(PtrAddRHS);
return Info;
}
@@ -114,15 +113,16 @@ bool GISelAddressing::aliasIsKnownForLoadStore(const MachineInstr &MI1,
BaseIndexOffset BasePtr0 = getPointerInfo(LdSt1->getPointerReg(), MRI);
BaseIndexOffset BasePtr1 = getPointerInfo(LdSt2->getPointerReg(), MRI);
- if (!BasePtr0.BaseReg.isValid() || !BasePtr1.BaseReg.isValid())
+ if (!BasePtr0.getBase().isValid() || !BasePtr1.getBase().isValid())
return false;
int64_t Size1 = LdSt1->getMemSize();
int64_t Size2 = LdSt2->getMemSize();
int64_t PtrDiff;
- if (BasePtr0.BaseReg == BasePtr1.BaseReg) {
- PtrDiff = BasePtr1.Offset - BasePtr0.Offset;
+ if (BasePtr0.getBase() == BasePtr1.getBase() && BasePtr0.hasValidOffset() &&
+ BasePtr1.hasValidOffset()) {
+ PtrDiff = BasePtr1.getOffset() - BasePtr0.getOffset();
// If the size of memory access is unknown, do not use it to do analysis.
// One example of unknown size memory access is to load/store scalable
// vector objects on the stack.
@@ -151,8 +151,8 @@ bool GISelAddressing::aliasIsKnownForLoadStore(const MachineInstr &MI1,
// able to calculate their relative offset if at least one arises
// from an alloca. However, these allocas cannot overlap and we
// can infer there is no alias.
- auto *Base0Def = getDefIgnoringCopies(BasePtr0.BaseReg, MRI);
- auto *Base1Def = getDefIgnoringCopies(BasePtr1.BaseReg, MRI);
+ auto *Base0Def = getDefIgnoringCopies(BasePtr0.getBase(), MRI);
+ auto *Base1Def = getDefIgnoringCopies(BasePtr1.getBase(), MRI);
if (!Base0Def || !Base1Def)
return false; // Couldn't tell anything.
@@ -520,16 +520,20 @@ bool LoadStoreOpt::addStoreToCandidate(GStore &StoreMI,
Register StoreAddr = StoreMI.getPointerReg();
auto BIO = getPointerInfo(StoreAddr, *MRI);
- Register StoreBase = BIO.BaseReg;
- uint64_t StoreOffCst = BIO.Offset;
+ Register StoreBase = BIO.getBase();
if (C.Stores.empty()) {
+ C.BasePtr = StoreBase;
+ if (!BIO.hasValidOffset()) {
+ C.CurrentLowestOffset = 0;
+ } else {
+ C.CurrentLowestOffset = BIO.getOffset();
+ }
// This is the first store of the candidate.
// If the offset can't possibly allow for a lower addressed store with the
// same base, don't bother adding it.
- if (StoreOffCst < ValueTy.getSizeInBytes())
+ if (BIO.hasValidOffset() &&
+ BIO.getOffset() < static_cast<int64_t>(ValueTy.getSizeInBytes()))
return false;
- C.BasePtr = StoreBase;
- C.CurrentLowestOffset = StoreOffCst;
C.Stores.emplace_back(&StoreMI);
LLVM_DEBUG(dbgs() << "Starting a new merge candidate group with: "
<< StoreMI);
@@ -549,8 +553,12 @@ bool LoadStoreOpt::addStoreToCandidate(GStore &StoreMI,
// writes to the next lowest adjacent address.
if (C.BasePtr != StoreBase)
return false;
- if ((C.CurrentLowestOffset - ValueTy.getSizeInBytes()) !=
- static_cast<uint64_t>(StoreOffCst))
+ // If we don't have a valid offset, we can't guarantee to be an adjacent
+ // offset.
+ if (!BIO.hasValidOffset())
+ return false;
+ if ((C.CurrentLowestOffset -
+ static_cast<int64_t>(ValueTy.getSizeInBytes())) != BIO.getOffset())
return false;
// This writes to an adjacent address. Allow it.
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index e806e0f0731f..5038f8a1fc15 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -9636,8 +9636,15 @@ static SDValue combineShiftOfShiftedLogic(SDNode *Shift, SelectionDAG &DAG) {
if (ShiftAmtVal->getBitWidth() != C1Val.getBitWidth())
return false;
+ // The fold is not valid if the sum of the shift values doesn't fit in the
+ // given shift amount type.
+ bool Overflow = false;
+ APInt NewShiftAmt = C1Val.uadd_ov(*ShiftAmtVal, Overflow);
+ if (Overflow)
+ return false;
+
// The fold is not valid if the sum of the shift values exceeds bitwidth.
- if ((*ShiftAmtVal + C1Val).uge(V.getScalarValueSizeInBits()))
+ if (NewShiftAmt.uge(V.getScalarValueSizeInBits()))
return false;
return true;
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 03baa7497615..ac61dd8745d4 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -4885,19 +4885,9 @@ defm UABDL : SIMDLongThreeVectorBHSabdl<1, 0b0111, "uabdl",
def : Pat<(abs (v8i16 (sub (zext (v8i8 V64:$opA)),
(zext (v8i8 V64:$opB))))),
(UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
-def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
- (v8i16 (add (sub (zext (v8i8 V64:$opA)),
- (zext (v8i8 V64:$opB))),
- (AArch64vashr v8i16:$src, (i32 15))))),
- (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
def : Pat<(abs (v8i16 (sub (zext (extract_high_v16i8 (v16i8 V128:$opA))),
(zext (extract_high_v16i8 (v16i8 V128:$opB)))))),
(UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
-def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
- (v8i16 (add (sub (zext (extract_high_v16i8 (v16i8 V128:$opA))),
- (zext (extract_high_v16i8 (v16i8 V128:$opB)))),
- (AArch64vashr v8i16:$src, (i32 15))))),
- (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
def : Pat<(abs (v4i32 (sub (zext (v4i16 V64:$opA)),
(zext (v4i16 V64:$opB))))),
(UABDLv4i16_v4i32 V64:$opA, V64:$opB)>;
diff --git a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
index 84b9330ef963..50d8bfa87508 100644
--- a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
+++ b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
@@ -2358,6 +2358,11 @@ bool SIGfx12CacheControl::enableVolatileAndOrNonTemporal(
bool Changed = false;
+ if (IsNonTemporal) {
+ // Set non-temporal hint for all cache levels.
+ Changed |= setTH(MI, AMDGPU::CPol::TH_NT);
+ }
+
if (IsVolatile) {
Changed |= setScope(MI, AMDGPU::CPol::SCOPE_SYS);
@@ -2370,11 +2375,6 @@ bool SIGfx12CacheControl::enableVolatileAndOrNonTemporal(
Position::AFTER);
}
- if (IsNonTemporal) {
- // Set non-temporal hint for all cache levels.
- Changed |= setTH(MI, AMDGPU::CPol::TH_NT);
- }
-
return Changed;
}
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index a0cec426002b..d46093b9e260 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -14559,7 +14559,7 @@ static SDValue tryFoldSelectIntoOp(SDNode *N, SelectionDAG &DAG,
EVT VT = N->getValueType(0);
SDLoc DL(N);
SDValue OtherOp = TrueVal.getOperand(1 - OpToFold);
- EVT OtherOpVT = OtherOp->getValueType(0);
+ EVT OtherOpVT = OtherOp.getValueType();
SDValue IdentityOperand =
DAG.getNeutralElement(Opc, DL, OtherOpVT, N->getFlags());
if (!Commutative)
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 833f058253d8..553d338b7790 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -2923,11 +2923,10 @@ bool X86DAGToDAGISel::selectAddr(SDNode *Parent, SDValue N, SDValue &Base,
}
bool X86DAGToDAGISel::selectMOV64Imm32(SDValue N, SDValue &Imm) {
- // Cannot use 32 bit constants to reference objects in kernel code model.
- // Cannot use 32 bit constants to reference objects in large PIC mode since
- // GOTOFF is 64 bits.
+ // Cannot use 32 bit constants to reference objects in kernel/large code
+ // model.
if (TM.getCodeModel() == CodeModel::Kernel ||
- (TM.getCodeModel() == CodeModel::Large && TM.isPositionIndependent()))
+ TM.getCodeModel() == CodeModel::Large)
return false;
// In static codegen with small code model, we can get the address of a label
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 9e64726fb6ff..71fc6b5047ea 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -47035,10 +47035,13 @@ static SDValue combineShiftRightArithmetic(SDNode *N, SelectionDAG &DAG,
if (SDValue V = combineShiftToPMULH(N, DAG, Subtarget))
return V;
- // fold (ashr (shl, a, [56,48,32,24,16]), SarConst)
- // into (shl, (sext (a), [56,48,32,24,16] - SarConst)) or
- // into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
- // depending on sign of (SarConst - [56,48,32,24,16])
+ // fold (SRA (SHL X, ShlConst), SraConst)
+ // into (SHL (sext_in_reg X), ShlConst - SraConst)
+ // or (sext_in_reg X)
+ // or (SRA (sext_in_reg X), SraConst - ShlConst)
+ // depending on relation between SraConst and ShlConst.
+ // We only do this if (Size - ShlConst) is equal to 8, 16 or 32. That allows
+ // us to do the sext_in_reg from corresponding bit.
// sexts in X86 are MOVs. The MOVs have the same code size
// as above SHIFTs (only SHIFT on 1 has lower code size).
@@ -47054,29 +47057,29 @@ static SDValue combineShiftRightArithmetic(SDNode *N, SelectionDAG &DAG,
SDValue N00 = N0.getOperand(0);
SDValue N01 = N0.getOperand(1);
APInt ShlConst = N01->getAsAPIntVal();
- APInt SarConst = N1->getAsAPIntVal();
+ APInt SraConst = N1->getAsAPIntVal();
EVT CVT = N1.getValueType();
- if (SarConst.isNegative())
+ if (CVT != N01.getValueType())
+ return SDValue();
+ if (SraConst.isNegative())
return SDValue();
for (MVT SVT : { MVT::i8, MVT::i16, MVT::i32 }) {
unsigned ShiftSize = SVT.getSizeInBits();
- // skipping types without corresponding sext/zext and
- // ShlConst that is not one of [56,48,32,24,16]
+ // Only deal with (Size - ShlConst) being equal to 8, 16 or 32.
if (ShiftSize >= Size || ShlConst != Size - ShiftSize)
continue;
SDLoc DL(N);
SDValue NN =
DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N00, DAG.getValueType(SVT));
- SarConst = SarConst - (Size - ShiftSize);
- if (SarConst == 0)
+ if (SraConst.eq(ShlConst))
return NN;
- if (SarConst.isNegative())
+ if (SraConst.ult(ShlConst))
return DAG.getNode(ISD::SHL, DL, VT, NN,
- DAG.getConstant(-SarConst, DL, CVT));
+ DAG.getConstant(ShlConst - SraConst, DL, CVT));
return DAG.getNode(ISD::SRA, DL, VT, NN,
- DAG.getConstant(SarConst, DL, CVT));
+ DAG.getConstant(SraConst - ShlConst, DL, CVT));
}
return SDValue();
}
diff --git a/llvm/lib/Target/X86/X86Subtarget.h b/llvm/lib/Target/X86/X86Subtarget.h
index a458b5f9ec8f..4d55a084b730 100644
--- a/llvm/lib/Target/X86/X86Subtarget.h
+++ b/llvm/lib/Target/X86/X86Subtarget.h
@@ -244,7 +244,8 @@ public:
// TODO: Currently we're always allowing widening on CPUs without VLX,
// because for many cases we don't have a better option.
bool canExtendTo512DQ() const {
- return hasAVX512() && (!hasVLX() || getPreferVectorWidth() >= 512);
+ return hasAVX512() && hasEVEX512() &&
+ (!hasVLX() || getPreferVectorWidth() >= 512);
}
bool canExtendTo512BW() const {
return hasBWI() && canExtendTo512DQ();
diff --git a/llvm/lib/TargetParser/Host.cpp b/llvm/lib/TargetParser/Host.cpp
index 4466d50458e1..1adef15771fa 100644
--- a/llvm/lib/TargetParser/Host.cpp
+++ b/llvm/lib/TargetParser/Host.cpp
@@ -1266,8 +1266,10 @@ static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf,
setFeature(X86::FEATURE_AVX2);
if (HasLeaf7 && ((EBX >> 8) & 1))
setFeature(X86::FEATURE_BMI2);
- if (HasLeaf7 && ((EBX >> 16) & 1) && HasAVX512Save)
+ if (HasLeaf7 && ((EBX >> 16) & 1) && HasAVX512Save) {
setFeature(X86::FEATURE_AVX512F);
+ setFeature(X86::FEATURE_EVEX512);
+ }
if (HasLeaf7 && ((EBX >> 17) & 1) && HasAVX512Save)
setFeature(X86::FEATURE_AVX512DQ);
if (HasLeaf7 && ((EBX >> 19) & 1))
@@ -1772,6 +1774,7 @@ bool sys::getHostCPUFeatures(StringMap<bool> &Features) {
Features["rtm"] = HasLeaf7 && ((EBX >> 11) & 1);
// AVX512 is only supported if the OS supports the context save for it.
Features["avx512f"] = HasLeaf7 && ((EBX >> 16) & 1) && HasAVX512Save;
+ Features["evex512"] = Features["avx512f"];
Features["avx512dq"] = HasLeaf7 && ((EBX >> 17) & 1) && HasAVX512Save;
Features["rdseed"] = HasLeaf7 && ((EBX >> 18) & 1);
Features["adx"] = HasLeaf7 && ((EBX >> 19) & 1);
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index 9f220ec003ec..8cc7901cbac7 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -2606,7 +2606,7 @@ static Instruction *foldSelectWithSRem(SelectInst &SI, InstCombinerImpl &IC,
// %cnd = icmp slt i32 %rem, 0
// %add = add i32 %rem, %n
// %sel = select i1 %cnd, i32 %add, i32 %rem
- if (match(TrueVal, m_Add(m_Value(RemRes), m_Value(Remainder))) &&
+ if (match(TrueVal, m_Add(m_Specific(RemRes), m_Value(Remainder))) &&
match(RemRes, m_SRem(m_Value(Op), m_Specific(Remainder))) &&
IC.isKnownToBeAPowerOfTwo(Remainder, /*OrZero*/ true) &&
FalseVal == RemRes)
diff --git a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
index 9df28747570c..104e8ceb7967 100644
--- a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
@@ -279,6 +279,9 @@ bool InductiveRangeCheck::parseRangeCheckICmp(Loop *L, ICmpInst *ICI,
Value *LHS = ICI->getOperand(0);
Value *RHS = ICI->getOperand(1);
+ if (!LHS->getType()->isIntegerTy())
+ return false;
+
// Canonicalize to the `Index Pred Invariant` comparison
if (IsLoopInvariant(LHS)) {
std::swap(LHS, RHS);