summaryrefslogtreecommitdiff
path: root/lib/CodeGen/CodeGenFunction.h
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen/CodeGenFunction.h')
-rw-r--r--lib/CodeGen/CodeGenFunction.h928
1 files changed, 581 insertions, 347 deletions
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index dd4c2e43ef64..79870ed59c96 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -34,6 +34,7 @@
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/Debug.h"
@@ -137,6 +138,88 @@ enum SanitizerHandler {
#undef SANITIZER_CHECK
};
+/// Helper class with most of the code for saving a value for a
+/// conditional expression cleanup.
+struct DominatingLLVMValue {
+ typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
+
+ /// Answer whether the given value needs extra work to be saved.
+ static bool needsSaving(llvm::Value *value) {
+ // If it's not an instruction, we don't need to save.
+ if (!isa<llvm::Instruction>(value)) return false;
+
+ // If it's an instruction in the entry block, we don't need to save.
+ llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
+ return (block != &block->getParent()->getEntryBlock());
+ }
+
+ static saved_type save(CodeGenFunction &CGF, llvm::Value *value);
+ static llvm::Value *restore(CodeGenFunction &CGF, saved_type value);
+};
+
+/// A partial specialization of DominatingValue for llvm::Values that
+/// might be llvm::Instructions.
+template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
+ typedef T *type;
+ static type restore(CodeGenFunction &CGF, saved_type value) {
+ return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
+ }
+};
+
+/// A specialization of DominatingValue for Address.
+template <> struct DominatingValue<Address> {
+ typedef Address type;
+
+ struct saved_type {
+ DominatingLLVMValue::saved_type SavedValue;
+ CharUnits Alignment;
+ };
+
+ static bool needsSaving(type value) {
+ return DominatingLLVMValue::needsSaving(value.getPointer());
+ }
+ static saved_type save(CodeGenFunction &CGF, type value) {
+ return { DominatingLLVMValue::save(CGF, value.getPointer()),
+ value.getAlignment() };
+ }
+ static type restore(CodeGenFunction &CGF, saved_type value) {
+ return Address(DominatingLLVMValue::restore(CGF, value.SavedValue),
+ value.Alignment);
+ }
+};
+
+/// A specialization of DominatingValue for RValue.
+template <> struct DominatingValue<RValue> {
+ typedef RValue type;
+ class saved_type {
+ enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
+ AggregateAddress, ComplexAddress };
+
+ llvm::Value *Value;
+ unsigned K : 3;
+ unsigned Align : 29;
+ saved_type(llvm::Value *v, Kind k, unsigned a = 0)
+ : Value(v), K(k), Align(a) {}
+
+ public:
+ static bool needsSaving(RValue value);
+ static saved_type save(CodeGenFunction &CGF, RValue value);
+ RValue restore(CodeGenFunction &CGF);
+
+ // implementations in CGCleanup.cpp
+ };
+
+ static bool needsSaving(type value) {
+ return saved_type::needsSaving(value);
+ }
+ static saved_type save(CodeGenFunction &CGF, type value) {
+ return saved_type::save(CGF, value);
+ }
+ static type restore(CodeGenFunction &CGF, saved_type value) {
+ return value.restore(CGF);
+ }
+};
+
/// CodeGenFunction - This class organizes the per-function state that is used
/// while generating LLVM code.
class CodeGenFunction : public CodeGenTypeCache {
@@ -200,7 +283,7 @@ public:
Address UB)>
CodeGenDispatchBoundsTy;
- /// \brief CGBuilder insert helper. This function is called after an
+ /// CGBuilder insert helper. This function is called after an
/// instruction is created using Builder.
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
llvm::BasicBlock *BB,
@@ -213,7 +296,7 @@ public:
const Decl *CurCodeDecl;
const CGFunctionInfo *CurFnInfo;
QualType FnRetTy;
- llvm::Function *CurFn;
+ llvm::Function *CurFn = nullptr;
// Holds coroutine data if the current function is a coroutine. We use a
// wrapper to manage its lifetime, so that we don't have to define CGCoroData
@@ -241,7 +324,7 @@ public:
/// ReturnValue - The temporary alloca to hold the return
/// value. This is invalid iff the function has no return value.
- Address ReturnValue;
+ Address ReturnValue = Address::invalid();
/// Return true if a label was seen in the current scope.
bool hasLabelBeenSeenInCurrentScope() const {
@@ -254,7 +337,7 @@ public:
/// we prefer to insert allocas.
llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
- /// \brief API for captured statement code generation.
+ /// API for captured statement code generation.
class CGCapturedStmtInfo {
public:
explicit CGCapturedStmtInfo(CapturedRegionKind K = CR_Default)
@@ -282,10 +365,10 @@ public:
CapturedRegionKind getKind() const { return Kind; }
virtual void setContextValue(llvm::Value *V) { ThisValue = V; }
- // \brief Retrieve the value of the context parameter.
+ // Retrieve the value of the context parameter.
virtual llvm::Value *getContextValue() const { return ThisValue; }
- /// \brief Lookup the captured field decl for a variable.
+ /// Lookup the captured field decl for a variable.
virtual const FieldDecl *lookup(const VarDecl *VD) const {
return CaptureFields.lookup(VD->getCanonicalDecl());
}
@@ -297,32 +380,32 @@ public:
return true;
}
- /// \brief Emit the captured statement body.
+ /// Emit the captured statement body.
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S) {
CGF.incrementProfileCounter(S);
CGF.EmitStmt(S);
}
- /// \brief Get the name of the capture helper.
+ /// Get the name of the capture helper.
virtual StringRef getHelperName() const { return "__captured_stmt"; }
private:
- /// \brief The kind of captured statement being generated.
+ /// The kind of captured statement being generated.
CapturedRegionKind Kind;
- /// \brief Keep the map between VarDecl and FieldDecl.
+ /// Keep the map between VarDecl and FieldDecl.
llvm::SmallDenseMap<const VarDecl *, FieldDecl *> CaptureFields;
- /// \brief The base address of the captured record, passed in as the first
+ /// The base address of the captured record, passed in as the first
/// argument of the parallel region function.
llvm::Value *ThisValue;
- /// \brief Captured 'this' type.
+ /// Captured 'this' type.
FieldDecl *CXXThisFieldDecl;
};
- CGCapturedStmtInfo *CapturedStmtInfo;
+ CGCapturedStmtInfo *CapturedStmtInfo = nullptr;
- /// \brief RAII for correct setting/restoring of CapturedStmtInfo.
+ /// RAII for correct setting/restoring of CapturedStmtInfo.
class CGCapturedStmtRAII {
private:
CodeGenFunction &CGF;
@@ -361,13 +444,13 @@ public:
}
};
- /// \brief Sanitizers enabled for this function.
+ /// Sanitizers enabled for this function.
SanitizerSet SanOpts;
- /// \brief True if CodeGen currently emits code implementing sanitizer checks.
- bool IsSanitizerScope;
+ /// True if CodeGen currently emits code implementing sanitizer checks.
+ bool IsSanitizerScope = false;
- /// \brief RAII object to set/unset CodeGenFunction::IsSanitizerScope.
+ /// RAII object to set/unset CodeGenFunction::IsSanitizerScope.
class SanitizerScope {
CodeGenFunction *CGF;
public:
@@ -377,28 +460,28 @@ public:
/// In C++, whether we are code generating a thunk. This controls whether we
/// should emit cleanups.
- bool CurFuncIsThunk;
+ bool CurFuncIsThunk = false;
/// In ARC, whether we should autorelease the return value.
- bool AutoreleaseResult;
+ bool AutoreleaseResult = false;
/// Whether we processed a Microsoft-style asm block during CodeGen. These can
/// potentially set the return value.
- bool SawAsmBlock;
+ bool SawAsmBlock = false;
const FunctionDecl *CurSEHParent = nullptr;
/// True if the current function is an outlined SEH helper. This can be a
/// finally block or filter expression.
- bool IsOutlinedSEHHelper;
+ bool IsOutlinedSEHHelper = false;
- const CodeGen::CGBlockInfo *BlockInfo;
- llvm::Value *BlockPointer;
+ const CodeGen::CGBlockInfo *BlockInfo = nullptr;
+ llvm::Value *BlockPointer = nullptr;
llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
- FieldDecl *LambdaThisCaptureField;
+ FieldDecl *LambdaThisCaptureField = nullptr;
- /// \brief A mapping from NRVO variables to the flags used to indicate
+ /// A mapping from NRVO variables to the flags used to indicate
/// when the NRVO has been applied to this variable.
llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
@@ -426,30 +509,33 @@ public:
/// The size of the following cleanup object.
unsigned Size;
/// The kind of cleanup to push: a value from the CleanupKind enumeration.
- CleanupKind Kind;
+ unsigned Kind : 31;
+ /// Whether this is a conditional cleanup.
+ unsigned IsConditional : 1;
size_t getSize() const { return Size; }
- CleanupKind getKind() const { return Kind; }
+ CleanupKind getKind() const { return (CleanupKind)Kind; }
+ bool isConditional() const { return IsConditional; }
};
/// i32s containing the indexes of the cleanup destinations.
- llvm::AllocaInst *NormalCleanupDest;
+ Address NormalCleanupDest = Address::invalid();
- unsigned NextCleanupDestIndex;
+ unsigned NextCleanupDestIndex = 1;
/// FirstBlockInfo - The head of a singly-linked-list of block layouts.
- CGBlockInfo *FirstBlockInfo;
+ CGBlockInfo *FirstBlockInfo = nullptr;
/// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
- llvm::BasicBlock *EHResumeBlock;
+ llvm::BasicBlock *EHResumeBlock = nullptr;
/// The exception slot. All landing pads write the current exception pointer
/// into this alloca.
- llvm::Value *ExceptionSlot;
+ llvm::Value *ExceptionSlot = nullptr;
/// The selector slot. Under the MandatoryCleanup model, all landing pads
/// write the current selector value into this alloca.
- llvm::AllocaInst *EHSelectorSlot;
+ llvm::AllocaInst *EHSelectorSlot = nullptr;
/// A stack of exception code slots. Entering an __except block pushes a slot
/// on the stack and leaving pops one. The __exception_code() intrinsic loads
@@ -524,28 +610,52 @@ public:
initFullExprCleanup();
}
- /// \brief Queue a cleanup to be pushed after finishing the current
+ /// Queue a cleanup to be pushed after finishing the current
/// full-expression.
template <class T, class... As>
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A) {
- assert(!isInConditionalBranch() && "can't defer conditional cleanup");
+ if (!isInConditionalBranch())
+ return pushCleanupAfterFullExprImpl<T>(Kind, Address::invalid(), A...);
+
+ Address ActiveFlag = createCleanupActiveFlag();
+ assert(!DominatingValue<Address>::needsSaving(ActiveFlag) &&
+ "cleanup active flag should never need saving");
- LifetimeExtendedCleanupHeader Header = { sizeof(T), Kind };
+ typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
+ SavedTuple Saved{saveValueInCond(A)...};
+
+ typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
+ pushCleanupAfterFullExprImpl<CleanupType>(Kind, ActiveFlag, Saved);
+ }
+
+ template <class T, class... As>
+ void pushCleanupAfterFullExprImpl(CleanupKind Kind, Address ActiveFlag,
+ As... A) {
+ LifetimeExtendedCleanupHeader Header = {sizeof(T), Kind,
+ ActiveFlag.isValid()};
size_t OldSize = LifetimeExtendedCleanupStack.size();
LifetimeExtendedCleanupStack.resize(
- LifetimeExtendedCleanupStack.size() + sizeof(Header) + Header.Size);
+ LifetimeExtendedCleanupStack.size() + sizeof(Header) + Header.Size +
+ (Header.IsConditional ? sizeof(ActiveFlag) : 0));
static_assert(sizeof(Header) % alignof(T) == 0,
"Cleanup will be allocated on misaligned address");
char *Buffer = &LifetimeExtendedCleanupStack[OldSize];
new (Buffer) LifetimeExtendedCleanupHeader(Header);
new (Buffer + sizeof(Header)) T(A...);
+ if (Header.IsConditional)
+ new (Buffer + sizeof(Header) + sizeof(T)) Address(ActiveFlag);
}
- /// Set up the last cleaup that was pushed as a conditional
+ /// Set up the last cleanup that was pushed as a conditional
/// full-expression cleanup.
- void initFullExprCleanup();
+ void initFullExprCleanup() {
+ initFullExprCleanupWithFlag(createCleanupActiveFlag());
+ }
+
+ void initFullExprCleanupWithFlag(Address ActiveFlag);
+ Address createCleanupActiveFlag();
/// PushDestructorCleanup - Push a cleanup to call the
/// complete-object destructor of an object of the given type at the
@@ -583,10 +693,10 @@ public:
void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
llvm::Instruction *DominatingIP);
- /// \brief Enters a new scope for capturing cleanups, all of which
+ /// Enters a new scope for capturing cleanups, all of which
/// will be executed once the scope is exited.
class RunCleanupsScope {
- EHScopeStack::stable_iterator CleanupStackDepth;
+ EHScopeStack::stable_iterator CleanupStackDepth, OldCleanupScopeDepth;
size_t LifetimeExtendedCleanupStackSize;
bool OldDidCallStackSave;
protected:
@@ -600,7 +710,7 @@ public:
CodeGenFunction& CGF;
public:
- /// \brief Enter a new cleanup scope.
+ /// Enter a new cleanup scope.
explicit RunCleanupsScope(CodeGenFunction &CGF)
: PerformCleanup(true), CGF(CGF)
{
@@ -609,20 +719,22 @@ public:
CGF.LifetimeExtendedCleanupStack.size();
OldDidCallStackSave = CGF.DidCallStackSave;
CGF.DidCallStackSave = false;
+ OldCleanupScopeDepth = CGF.CurrentCleanupScopeDepth;
+ CGF.CurrentCleanupScopeDepth = CleanupStackDepth;
}
- /// \brief Exit this cleanup scope, emitting any accumulated cleanups.
+ /// Exit this cleanup scope, emitting any accumulated cleanups.
~RunCleanupsScope() {
if (PerformCleanup)
ForceCleanup();
}
- /// \brief Determine whether this scope requires any cleanups.
+ /// Determine whether this scope requires any cleanups.
bool requiresCleanups() const {
return CGF.EHStack.stable_begin() != CleanupStackDepth;
}
- /// \brief Force the emission of cleanups now, instead of waiting
+ /// Force the emission of cleanups now, instead of waiting
/// until this object is destroyed.
/// \param ValuesToReload - A list of values that need to be available at
/// the insertion point after cleanup emission. If cleanup emission created
@@ -634,9 +746,14 @@ public:
CGF.PopCleanupBlocks(CleanupStackDepth, LifetimeExtendedCleanupStackSize,
ValuesToReload);
PerformCleanup = false;
+ CGF.CurrentCleanupScopeDepth = OldCleanupScopeDepth;
}
};
+ // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
+ EHScopeStack::stable_iterator CurrentCleanupScopeDepth =
+ EHScopeStack::stable_end();
+
class LexicalScope : public RunCleanupsScope {
SourceRange Range;
SmallVector<const LabelDecl*, 4> Labels;
@@ -646,7 +763,7 @@ public:
void operator=(const LexicalScope &) = delete;
public:
- /// \brief Enter a new cleanup scope.
+ /// Enter a new cleanup scope.
explicit LexicalScope(CodeGenFunction &CGF, SourceRange Range)
: RunCleanupsScope(CGF), Range(Range), ParentScope(CGF.CurLexicalScope) {
CGF.CurLexicalScope = this;
@@ -659,7 +776,7 @@ public:
Labels.push_back(label);
}
- /// \brief Exit this cleanup scope, emitting any accumulated
+ /// Exit this cleanup scope, emitting any accumulated
/// cleanups.
~LexicalScope() {
if (CGDebugInfo *DI = CGF.getDebugInfo())
@@ -673,7 +790,7 @@ public:
}
}
- /// \brief Force the emission of cleanups now, instead of waiting
+ /// Force the emission of cleanups now, instead of waiting
/// until this object is destroyed.
void ForceCleanup() {
CGF.CurLexicalScope = ParentScope;
@@ -692,57 +809,107 @@ public:
typedef llvm::DenseMap<const Decl *, Address> DeclMapTy;
- /// \brief The scope used to remap some variables as private in the OpenMP
- /// loop body (or other captured region emitted without outlining), and to
- /// restore old vars back on exit.
- class OMPPrivateScope : public RunCleanupsScope {
+ /// The class used to assign some variables some temporarily addresses.
+ class OMPMapVars {
DeclMapTy SavedLocals;
- DeclMapTy SavedPrivates;
-
- private:
- OMPPrivateScope(const OMPPrivateScope &) = delete;
- void operator=(const OMPPrivateScope &) = delete;
+ DeclMapTy SavedTempAddresses;
+ OMPMapVars(const OMPMapVars &) = delete;
+ void operator=(const OMPMapVars &) = delete;
public:
- /// \brief Enter a new OpenMP private scope.
- explicit OMPPrivateScope(CodeGenFunction &CGF) : RunCleanupsScope(CGF) {}
-
- /// \brief Registers \a LocalVD variable as a private and apply \a
- /// PrivateGen function for it to generate corresponding private variable.
- /// \a PrivateGen returns an address of the generated private variable.
- /// \return true if the variable is registered as private, false if it has
- /// been privatized already.
- bool
- addPrivate(const VarDecl *LocalVD,
- llvm::function_ref<Address()> PrivateGen) {
- assert(PerformCleanup && "adding private to dead scope");
+ explicit OMPMapVars() = default;
+ ~OMPMapVars() {
+ assert(SavedLocals.empty() && "Did not restored original addresses.");
+ };
+ /// Sets the address of the variable \p LocalVD to be \p TempAddr in
+ /// function \p CGF.
+ /// \return true if at least one variable was set already, false otherwise.
+ bool setVarAddr(CodeGenFunction &CGF, const VarDecl *LocalVD,
+ Address TempAddr) {
LocalVD = LocalVD->getCanonicalDecl();
// Only save it once.
if (SavedLocals.count(LocalVD)) return false;
// Copy the existing local entry to SavedLocals.
auto it = CGF.LocalDeclMap.find(LocalVD);
- if (it != CGF.LocalDeclMap.end()) {
- SavedLocals.insert({LocalVD, it->second});
- } else {
- SavedLocals.insert({LocalVD, Address::invalid()});
- }
+ if (it != CGF.LocalDeclMap.end())
+ SavedLocals.try_emplace(LocalVD, it->second);
+ else
+ SavedLocals.try_emplace(LocalVD, Address::invalid());
// Generate the private entry.
- Address Addr = PrivateGen();
QualType VarTy = LocalVD->getType();
if (VarTy->isReferenceType()) {
Address Temp = CGF.CreateMemTemp(VarTy);
- CGF.Builder.CreateStore(Addr.getPointer(), Temp);
- Addr = Temp;
+ CGF.Builder.CreateStore(TempAddr.getPointer(), Temp);
+ TempAddr = Temp;
}
- SavedPrivates.insert({LocalVD, Addr});
+ SavedTempAddresses.try_emplace(LocalVD, TempAddr);
return true;
}
- /// \brief Privatizes local variables previously registered as private.
+ /// Applies new addresses to the list of the variables.
+ /// \return true if at least one variable is using new address, false
+ /// otherwise.
+ bool apply(CodeGenFunction &CGF) {
+ copyInto(SavedTempAddresses, CGF.LocalDeclMap);
+ SavedTempAddresses.clear();
+ return !SavedLocals.empty();
+ }
+
+ /// Restores original addresses of the variables.
+ void restore(CodeGenFunction &CGF) {
+ if (!SavedLocals.empty()) {
+ copyInto(SavedLocals, CGF.LocalDeclMap);
+ SavedLocals.clear();
+ }
+ }
+
+ private:
+ /// Copy all the entries in the source map over the corresponding
+ /// entries in the destination, which must exist.
+ static void copyInto(const DeclMapTy &Src, DeclMapTy &Dest) {
+ for (auto &Pair : Src) {
+ if (!Pair.second.isValid()) {
+ Dest.erase(Pair.first);
+ continue;
+ }
+
+ auto I = Dest.find(Pair.first);
+ if (I != Dest.end())
+ I->second = Pair.second;
+ else
+ Dest.insert(Pair);
+ }
+ }
+ };
+
+ /// The scope used to remap some variables as private in the OpenMP loop body
+ /// (or other captured region emitted without outlining), and to restore old
+ /// vars back on exit.
+ class OMPPrivateScope : public RunCleanupsScope {
+ OMPMapVars MappedVars;
+ OMPPrivateScope(const OMPPrivateScope &) = delete;
+ void operator=(const OMPPrivateScope &) = delete;
+
+ public:
+ /// Enter a new OpenMP private scope.
+ explicit OMPPrivateScope(CodeGenFunction &CGF) : RunCleanupsScope(CGF) {}
+
+ /// Registers \p LocalVD variable as a private and apply \p PrivateGen
+ /// function for it to generate corresponding private variable. \p
+ /// PrivateGen returns an address of the generated private variable.
+ /// \return true if the variable is registered as private, false if it has
+ /// been privatized already.
+ bool addPrivate(const VarDecl *LocalVD,
+ const llvm::function_ref<Address()> PrivateGen) {
+ assert(PerformCleanup && "adding private to dead scope");
+ return MappedVars.setVarAddr(CGF, LocalVD, PrivateGen());
+ }
+
+ /// Privatizes local variables previously registered as private.
/// Registration is separate from the actual privatization to allow
/// initializers use values of the original variables, not the private one.
/// This is important, for example, if the private variable is a class
@@ -750,19 +917,14 @@ public:
/// variables. But at initialization original variables must be used, not
/// private copies.
/// \return true if at least one variable was privatized, false otherwise.
- bool Privatize() {
- copyInto(SavedPrivates, CGF.LocalDeclMap);
- SavedPrivates.clear();
- return !SavedLocals.empty();
- }
+ bool Privatize() { return MappedVars.apply(CGF); }
void ForceCleanup() {
RunCleanupsScope::ForceCleanup();
- copyInto(SavedLocals, CGF.LocalDeclMap);
- SavedLocals.clear();
+ MappedVars.restore(CGF);
}
- /// \brief Exit scope - all the mapped variables are restored.
+ /// Exit scope - all the mapped variables are restored.
~OMPPrivateScope() {
if (PerformCleanup)
ForceCleanup();
@@ -773,34 +935,15 @@ public:
VD = VD->getCanonicalDecl();
return !VD->isLocalVarDeclOrParm() && CGF.LocalDeclMap.count(VD) > 0;
}
-
- private:
- /// Copy all the entries in the source map over the corresponding
- /// entries in the destination, which must exist.
- static void copyInto(const DeclMapTy &src, DeclMapTy &dest) {
- for (auto &pair : src) {
- if (!pair.second.isValid()) {
- dest.erase(pair.first);
- continue;
- }
-
- auto it = dest.find(pair.first);
- if (it != dest.end()) {
- it->second = pair.second;
- } else {
- dest.insert(pair);
- }
- }
- }
};
- /// \brief Takes the old cleanup stack size and emits the cleanup blocks
+ /// Takes the old cleanup stack size and emits the cleanup blocks
/// that have been added.
void
PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize,
std::initializer_list<llvm::Value **> ValuesToReload = {});
- /// \brief Takes the old cleanup stack size and emits the cleanup blocks
+ /// Takes the old cleanup stack size and emits the cleanup blocks
/// that have been added, then adds all lifetime-extended cleanups from
/// the given position to the stack.
void
@@ -843,7 +986,8 @@ public:
llvm::BasicBlock *getEHResumeBlock(bool isCleanup);
llvm::BasicBlock *getEHDispatchBlock(EHScopeStack::stable_iterator scope);
- llvm::BasicBlock *getMSVCDispatchBlock(EHScopeStack::stable_iterator scope);
+ llvm::BasicBlock *
+ getFuncletEHDispatchBlock(EHScopeStack::stable_iterator scope);
/// An object to manage conditionally-evaluated expressions.
class ConditionalEvaluation {
@@ -1052,22 +1196,27 @@ public:
private:
CGDebugInfo *DebugInfo;
- bool DisableDebugInfo;
+ bool DisableDebugInfo = false;
/// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
/// calling llvm.stacksave for multiple VLAs in the same scope.
- bool DidCallStackSave;
+ bool DidCallStackSave = false;
/// IndirectBranch - The first time an indirect goto is seen we create a block
/// with an indirect branch. Every time we see the address of a label taken,
/// we add the label to the indirect goto. Every subsequent indirect goto is
/// codegen'd as a jump to the IndirectBranch's basic block.
- llvm::IndirectBrInst *IndirectBranch;
+ llvm::IndirectBrInst *IndirectBranch = nullptr;
/// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
/// decls.
DeclMapTy LocalDeclMap;
+ // Keep track of the cleanups for callee-destructed parameters pushed to the
+ // cleanup stack so that they can be deactivated later.
+ llvm::DenseMap<const ParmVarDecl *, EHScopeStack::stable_iterator>
+ CalleeDestructedParamCleanups;
+
/// SizeArguments - If a ParmVarDecl had the pass_object_size attribute, this
/// will contain a mapping from said ParmVarDecl to its implicit "object_size"
/// parameter.
@@ -1119,7 +1268,7 @@ private:
/// Emits exit block with special codegen procedure specific for the related
/// OpenMP construct + emits code for normal construct cleanup.
void emitExit(CodeGenFunction &CGF, OpenMPDirectiveKind Kind,
- const llvm::function_ref<void(CodeGenFunction &)> &CodeGen) {
+ const llvm::function_ref<void(CodeGenFunction &)> CodeGen) {
if (Stack.back().Kind == Kind && getExitBlock().isValid()) {
assert(CGF.getOMPCancelDestination(Kind).isValid());
assert(CGF.HaveInsertPoint());
@@ -1207,13 +1356,13 @@ private:
/// SwitchInsn - This is nearest current switch instruction. It is null if
/// current context is not in a switch.
- llvm::SwitchInst *SwitchInsn;
+ llvm::SwitchInst *SwitchInsn = nullptr;
/// The branch weights of SwitchInsn when doing instrumentation based PGO.
- SmallVector<uint64_t, 16> *SwitchWeights;
+ SmallVector<uint64_t, 16> *SwitchWeights = nullptr;
/// CaseRangeBlock - This block holds if condition check for last case
/// statement range in current switch instruction.
- llvm::BasicBlock *CaseRangeBlock;
+ llvm::BasicBlock *CaseRangeBlock = nullptr;
/// OpaqueLValues - Keeps track of the current set of opaque value
/// expressions.
@@ -1230,13 +1379,13 @@ private:
/// A block containing a single 'unreachable' instruction. Created
/// lazily by getUnreachableBlock().
- llvm::BasicBlock *UnreachableBlock;
+ llvm::BasicBlock *UnreachableBlock = nullptr;
/// Counts of the number return expressions in the function.
- unsigned NumReturnExprs;
+ unsigned NumReturnExprs = 0;
/// Count the number of simple (constant) return expressions in the function.
- unsigned NumSimpleReturnExprs;
+ unsigned NumSimpleReturnExprs = 0;
/// The last regular (non-return) debug location (breakpoint) in the function.
SourceLocation LastStopPoint;
@@ -1356,9 +1505,9 @@ public:
private:
/// CXXThisDecl - When generating code for a C++ member function,
/// this will hold the implicit 'this' declaration.
- ImplicitParamDecl *CXXABIThisDecl;
- llvm::Value *CXXABIThisValue;
- llvm::Value *CXXThisValue;
+ ImplicitParamDecl *CXXABIThisDecl = nullptr;
+ llvm::Value *CXXABIThisValue = nullptr;
+ llvm::Value *CXXThisValue = nullptr;
CharUnits CXXABIThisAlignment;
CharUnits CXXThisAlignment;
@@ -1376,16 +1525,16 @@ private:
/// CXXStructorImplicitParamDecl - When generating code for a constructor or
/// destructor, this will hold the implicit argument (e.g. VTT).
- ImplicitParamDecl *CXXStructorImplicitParamDecl;
- llvm::Value *CXXStructorImplicitParamValue;
+ ImplicitParamDecl *CXXStructorImplicitParamDecl = nullptr;
+ llvm::Value *CXXStructorImplicitParamValue = nullptr;
/// OutermostConditional - Points to the outermost active
/// conditional control. This is used so that we know if a
/// temporary should be destroyed conditionally.
- ConditionalEvaluation *OutermostConditional;
+ ConditionalEvaluation *OutermostConditional = nullptr;
/// The current lexical scope.
- LexicalScope *CurLexicalScope;
+ LexicalScope *CurLexicalScope = nullptr;
/// The current source location that should be used for exception
/// handling code.
@@ -1416,14 +1565,21 @@ private:
CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>());
}
- llvm::BasicBlock *TerminateLandingPad;
- llvm::BasicBlock *TerminateHandler;
- llvm::BasicBlock *TrapBB;
+ llvm::BasicBlock *TerminateLandingPad = nullptr;
+ llvm::BasicBlock *TerminateHandler = nullptr;
+ llvm::BasicBlock *TrapBB = nullptr;
+
+ /// Terminate funclets keyed by parent funclet pad.
+ llvm::MapVector<llvm::Value *, llvm::BasicBlock *> TerminateFunclets;
+
+ /// Largest vector width used in ths function. Will be used to create a
+ /// function attribute.
+ unsigned LargestVectorWidth = 0;
/// True if we need emit the life-time markers.
const bool ShouldEmitLifetimeMarkers;
- /// Add OpenCL kernel arg metadata and the kernel attribute meatadata to
+ /// Add OpenCL kernel arg metadata and the kernel attribute metadata to
/// the function metadata.
void EmitOpenCLKernelMetadata(const FunctionDecl *FD,
llvm::Function *Fn);
@@ -1532,6 +1688,7 @@ public:
return false;
case QualType::DK_cxx_destructor:
case QualType::DK_objc_weak_lifetime:
+ case QualType::DK_nontrivial_c_struct:
return getLangOpts().Exceptions;
case QualType::DK_objc_strong_lifetime:
return getLangOpts().Exceptions &&
@@ -1579,10 +1736,7 @@ public:
/// \return an LLVM value which is a pointer to a struct which contains
/// information about the block, including the block invoke function, the
/// captured variables, etc.
- /// \param InvokeF will contain the block invoke function if it is not
- /// nullptr.
- llvm::Value *EmitBlockLiteral(const BlockExpr *,
- llvm::Function **InvokeF = nullptr);
+ llvm::Value *EmitBlockLiteral(const BlockExpr *);
static void destroyBlockInfos(CGBlockInfo *info);
llvm::Function *GenerateBlockFunction(GlobalDecl GD,
@@ -1604,7 +1758,25 @@ public:
class AutoVarEmission;
void emitByrefStructureInit(const AutoVarEmission &emission);
- void enterByrefCleanup(const AutoVarEmission &emission);
+
+ /// Enter a cleanup to destroy a __block variable. Note that this
+ /// cleanup should be a no-op if the variable hasn't left the stack
+ /// yet; if a cleanup is required for the variable itself, that needs
+ /// to be done externally.
+ ///
+ /// \param Kind Cleanup kind.
+ ///
+ /// \param Addr When \p LoadBlockVarAddr is false, the address of the __block
+ /// structure that will be passed to _Block_object_dispose. When
+ /// \p LoadBlockVarAddr is true, the address of the field of the block
+ /// structure that holds the address of the __block structure.
+ ///
+ /// \param Flags The flag that will be passed to _Block_object_dispose.
+ ///
+ /// \param LoadBlockVarAddr Indicates whether we need to emit a load from
+ /// \p Addr to get the address of the __block structure.
+ void enterByrefCleanup(CleanupKind Kind, Address Addr, BlockFieldFlags Flags,
+ bool LoadBlockVarAddr);
void setBlockContextParameter(const ImplicitParamDecl *D, unsigned argNum,
llvm::Value *ptr);
@@ -1627,7 +1799,7 @@ public:
void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
const CGFunctionInfo &FnInfo);
- /// \brief Emit code for the start of a function.
+ /// Emit code for the start of a function.
/// \param Loc The location to be associated with the function.
/// \param StartLoc The location of the function body.
void StartFunction(GlobalDecl GD,
@@ -1653,7 +1825,7 @@ public:
void EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD);
void EmitAsanPrologueOrEpilogue(bool Prologue);
- /// \brief Emit the unified return block, trying to avoid its emission when
+ /// Emit the unified return block, trying to avoid its emission when
/// possible.
/// \return The debug location of the user written return statement if the
/// return block is is avoided.
@@ -1664,10 +1836,10 @@ public:
void FinishFunction(SourceLocation EndLoc=SourceLocation());
void StartThunk(llvm::Function *Fn, GlobalDecl GD,
- const CGFunctionInfo &FnInfo);
+ const CGFunctionInfo &FnInfo, bool IsUnprototyped);
- void EmitCallAndReturnForThunk(llvm::Constant *Callee,
- const ThunkInfo *Thunk);
+ void EmitCallAndReturnForThunk(llvm::Constant *Callee, const ThunkInfo *Thunk,
+ bool IsUnprototyped);
void FinishThunk();
@@ -1677,7 +1849,8 @@ public:
/// Generate a thunk for the given method.
void generateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
- GlobalDecl GD, const ThunkInfo &Thunk);
+ GlobalDecl GD, const ThunkInfo &Thunk,
+ bool IsUnprototyped);
llvm::Function *GenerateVarArgsThunk(llvm::Function *Fn,
const CGFunctionInfo &FnInfo,
@@ -1688,7 +1861,7 @@ public:
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init);
- /// Struct with all informations about dynamic [sub]class needed to set vptr.
+ /// Struct with all information about dynamic [sub]class needed to set vptr.
struct VPtr {
BaseSubobject Base;
const CXXRecordDecl *NearestVBase;
@@ -1723,9 +1896,11 @@ public:
CFITCK_DerivedCast,
CFITCK_UnrelatedCast,
CFITCK_ICall,
+ CFITCK_NVMFCall,
+ CFITCK_VMFCall,
};
- /// \brief Derived is the presumed address of an object of type T after a
+ /// Derived is the presumed address of an object of type T after a
/// cast. If T is a polymorphic class type, emit a check that the virtual
/// table for Derived belongs to a class derived from T.
void EmitVTablePtrCheckForCast(QualType T, llvm::Value *Derived,
@@ -1775,6 +1950,10 @@ public:
/// XRay custom event handling calls.
bool AlwaysEmitXRayCustomEvents() const;
+ /// AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit
+ /// XRay typed event handling calls.
+ bool AlwaysEmitXRayTypedEvents() const;
+
/// Encode an address into a form suitable for use in a function prologue.
llvm::Constant *EncodeAddrForUseInPrologue(llvm::Function *F,
llvm::Constant *Addr);
@@ -1808,6 +1987,10 @@ public:
/// getTerminateLandingPad - Return a landing pad that just calls terminate.
llvm::BasicBlock *getTerminateLandingPad();
+ /// getTerminateLandingPad - Return a cleanup funclet that just calls
+ /// terminate.
+ llvm::BasicBlock *getTerminateFunclet();
+
/// getTerminateHandler - Return a handler (not a landing pad, just
/// a catch handler) that just calls terminate. This is used when
/// a terminate scope encloses a try.
@@ -1841,11 +2024,7 @@ public:
llvm::BasicBlock *createBasicBlock(const Twine &name = "",
llvm::Function *parent = nullptr,
llvm::BasicBlock *before = nullptr) {
-#ifdef NDEBUG
- return llvm::BasicBlock::Create(getLLVMContext(), "", parent, before);
-#else
return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before);
-#endif
}
/// getBasicBlockForLabel - Return the LLVM basicblock that the specified
@@ -1975,15 +2154,20 @@ public:
/// to the stack.
///
/// Because the address of a temporary is often exposed to the program in
- /// various ways, this function will perform the cast by default. The cast
- /// may be avoided by passing false as \p CastToDefaultAddrSpace; this is
+ /// various ways, this function will perform the cast. The original alloca
+ /// instruction is returned through \p Alloca if it is not nullptr.
+ ///
+ /// The cast is not performaed in CreateTempAllocaWithoutCast. This is
/// more efficient if the caller knows that the address will not be exposed.
llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty, const Twine &Name = "tmp",
llvm::Value *ArraySize = nullptr);
Address CreateTempAlloca(llvm::Type *Ty, CharUnits align,
const Twine &Name = "tmp",
llvm::Value *ArraySize = nullptr,
- bool CastToDefaultAddrSpace = true);
+ Address *Alloca = nullptr);
+ Address CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align,
+ const Twine &Name = "tmp",
+ llvm::Value *ArraySize = nullptr);
/// CreateDefaultAlignedTempAlloca - This creates an alloca with the
/// default ABI alignment of the given LLVM type.
@@ -2018,12 +2202,18 @@ public:
Address CreateIRTemp(QualType T, const Twine &Name = "tmp");
/// CreateMemTemp - Create a temporary memory object of the given type, with
- /// appropriate alignment. Cast it to the default address space if
- /// \p CastToDefaultAddrSpace is true.
+ /// appropriate alignmen and cast it to the default address space. Returns
+ /// the original alloca instruction by \p Alloca if it is not nullptr.
Address CreateMemTemp(QualType T, const Twine &Name = "tmp",
- bool CastToDefaultAddrSpace = true);
+ Address *Alloca = nullptr);
Address CreateMemTemp(QualType T, CharUnits Align, const Twine &Name = "tmp",
- bool CastToDefaultAddrSpace = true);
+ Address *Alloca = nullptr);
+
+ /// CreateMemTemp - Create a temporary memory object of the given type, with
+ /// appropriate alignmen without casting it to the default address space.
+ Address CreateMemTempWithoutCast(QualType T, const Twine &Name = "tmp");
+ Address CreateMemTempWithoutCast(QualType T, CharUnits Align,
+ const Twine &Name = "tmp");
/// CreateAggTemp - Create a temporary memory object for the given
/// aggregate type.
@@ -2032,7 +2222,8 @@ public:
T.getQualifiers(),
AggValueSlot::IsNotDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased);
+ AggValueSlot::IsNotAliased,
+ AggValueSlot::DoesNotOverlap);
}
/// Emit a cast to void* in the appropriate address space.
@@ -2089,31 +2280,52 @@ public:
}
return false;
}
- /// EmitAggregateCopy - Emit an aggregate assignment.
- ///
- /// The difference to EmitAggregateCopy is that tail padding is not copied.
- /// This is required for correctness when assigning non-POD structures in C++.
- void EmitAggregateAssign(Address DestPtr, Address SrcPtr,
- QualType EltTy) {
+
+ /// Determine whether a return value slot may overlap some other object.
+ AggValueSlot::Overlap_t overlapForReturnValue() {
+ // FIXME: Assuming no overlap here breaks guaranteed copy elision for base
+ // class subobjects. These cases may need to be revisited depending on the
+ // resolution of the relevant core issue.
+ return AggValueSlot::DoesNotOverlap;
+ }
+
+ /// Determine whether a field initialization may overlap some other object.
+ AggValueSlot::Overlap_t overlapForFieldInit(const FieldDecl *FD) {
+ // FIXME: These cases can result in overlap as a result of P0840R0's
+ // [[no_unique_address]] attribute. We can still infer NoOverlap in the
+ // presence of that attribute if the field is within the nvsize of its
+ // containing class, because non-virtual subobjects are initialized in
+ // address order.
+ return AggValueSlot::DoesNotOverlap;
+ }
+
+ /// Determine whether a base class initialization may overlap some other
+ /// object.
+ AggValueSlot::Overlap_t overlapForBaseInit(const CXXRecordDecl *RD,
+ const CXXRecordDecl *BaseRD,
+ bool IsVirtual);
+
+ /// Emit an aggregate assignment.
+ void EmitAggregateAssign(LValue Dest, LValue Src, QualType EltTy) {
bool IsVolatile = hasVolatileMember(EltTy);
- EmitAggregateCopy(DestPtr, SrcPtr, EltTy, IsVolatile, true);
+ EmitAggregateCopy(Dest, Src, EltTy, AggValueSlot::MayOverlap, IsVolatile);
}
- void EmitAggregateCopyCtor(Address DestPtr, Address SrcPtr,
- QualType DestTy, QualType SrcTy) {
- EmitAggregateCopy(DestPtr, SrcPtr, SrcTy, /*IsVolatile=*/false,
- /*IsAssignment=*/false);
+ void EmitAggregateCopyCtor(LValue Dest, LValue Src,
+ AggValueSlot::Overlap_t MayOverlap) {
+ EmitAggregateCopy(Dest, Src, Src.getType(), MayOverlap);
}
/// EmitAggregateCopy - Emit an aggregate copy.
///
- /// \param isVolatile - True iff either the source or the destination is
- /// volatile.
- /// \param isAssignment - If false, allow padding to be copied. This often
- /// yields more efficient.
- void EmitAggregateCopy(Address DestPtr, Address SrcPtr,
- QualType EltTy, bool isVolatile=false,
- bool isAssignment = false);
+ /// \param isVolatile \c true iff either the source or the destination is
+ /// volatile.
+ /// \param MayOverlap Whether the tail padding of the destination might be
+ /// occupied by some other object. More efficient code can often be
+ /// generated if not.
+ void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy,
+ AggValueSlot::Overlap_t MayOverlap,
+ bool isVolatile = false);
/// GetAddrOfLocalVar - Return the address of a local variable.
Address GetAddrOfLocalVar(const VarDecl *VD) {
@@ -2123,27 +2335,13 @@ public:
return it->second;
}
- /// getOpaqueLValueMapping - Given an opaque value expression (which
- /// must be mapped to an l-value), return its mapping.
- const LValue &getOpaqueLValueMapping(const OpaqueValueExpr *e) {
- assert(OpaqueValueMapping::shouldBindAsLValue(e));
+ /// Given an opaque value expression, return its LValue mapping if it exists,
+ /// otherwise create one.
+ LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e);
- llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
- it = OpaqueLValues.find(e);
- assert(it != OpaqueLValues.end() && "no mapping for opaque value!");
- return it->second;
- }
-
- /// getOpaqueRValueMapping - Given an opaque value expression (which
- /// must be mapped to an r-value), return its mapping.
- const RValue &getOpaqueRValueMapping(const OpaqueValueExpr *e) {
- assert(!OpaqueValueMapping::shouldBindAsLValue(e));
-
- llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
- it = OpaqueRValues.find(e);
- assert(it != OpaqueRValues.end() && "no mapping for opaque value!");
- return it->second;
- }
+ /// Given an opaque value expression, return its RValue mapping if it exists,
+ /// otherwise create one.
+ RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e);
/// Get the index of the current ArrayInitLoopExpr, if any.
llvm::Value *getArrayInitIndex() { return ArrayInitIndex; }
@@ -2193,12 +2391,24 @@ public:
/// This function can be called with a null (unreachable) insert point.
void EmitVariablyModifiedType(QualType Ty);
- /// getVLASize - Returns an LLVM value that corresponds to the size,
+ struct VlaSizePair {
+ llvm::Value *NumElts;
+ QualType Type;
+
+ VlaSizePair(llvm::Value *NE, QualType T) : NumElts(NE), Type(T) {}
+ };
+
+ /// Return the number of elements for a single dimension
+ /// for the given array type.
+ VlaSizePair getVLAElements1D(const VariableArrayType *vla);
+ VlaSizePair getVLAElements1D(QualType vla);
+
+ /// Returns an LLVM value that corresponds to the size,
/// in non-variably-sized elements, of a variable length array type,
/// plus that largest non-variably-sized element type. Assumes that
/// the type has already been emitted with EmitVariablyModifiedType.
- std::pair<llvm::Value*,QualType> getVLASize(const VariableArrayType *vla);
- std::pair<llvm::Value*,QualType> getVLASize(QualType vla);
+ VlaSizePair getVLASize(const VariableArrayType *vla);
+ VlaSizePair getVLASize(QualType vla);
/// LoadCXXThis - Load the value of 'this'. This function is only valid while
/// generating code for an C++ member function.
@@ -2279,11 +2489,14 @@ public:
void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
bool ForVirtualBase, bool Delegating,
- Address This, const CXXConstructExpr *E);
+ Address This, const CXXConstructExpr *E,
+ AggValueSlot::Overlap_t Overlap);
void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
bool ForVirtualBase, bool Delegating,
- Address This, CallArgList &Args);
+ Address This, CallArgList &Args,
+ AggValueSlot::Overlap_t Overlap,
+ SourceLocation Loc);
/// Emit assumption load for all bases. Requires to be be called only on
/// most-derived class and not under construction of the object.
@@ -2333,13 +2546,13 @@ public:
CharUnits CookieSize = CharUnits());
RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
- const Expr *Arg, bool IsDelete);
+ const CallExpr *TheCallExpr, bool IsDelete);
llvm::Value *EmitCXXTypeidExpr(const CXXTypeidExpr *E);
llvm::Value *EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE);
Address EmitCXXUuidofExpr(const CXXUuidofExpr *E);
- /// \brief Situations in which we might emit a check for the suitability of a
+ /// Situations in which we might emit a check for the suitability of a
/// pointer or glvalue.
enum TypeCheckKind {
/// Checking the operand of a load. Must be suitably sized and aligned.
@@ -2383,17 +2596,17 @@ public:
/// Determine whether the pointer type check \p TCK requires a vptr check.
static bool isVptrCheckRequired(TypeCheckKind TCK, QualType Ty);
- /// \brief Whether any type-checking sanitizers are enabled. If \c false,
+ /// Whether any type-checking sanitizers are enabled. If \c false,
/// calls to EmitTypeCheck can be skipped.
bool sanitizePerformTypeCheck() const;
- /// \brief Emit a check that \p V is the address of storage of the
+ /// Emit a check that \p V is the address of storage of the
/// appropriate size and alignment for an object of type \p Type.
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V,
QualType Type, CharUnits Alignment = CharUnits::Zero(),
SanitizerSet SkippedChecks = SanitizerSet());
- /// \brief Emit a check that \p Base points into an array object, which
+ /// Emit a check that \p Base points into an array object, which
/// we can access at index \p Index. \p Accessed should be \c false if we
/// this expression is used as an lvalue, for instance in "&Arr[Idx]".
void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index,
@@ -2434,7 +2647,7 @@ public:
typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
llvm::Value *Address);
- /// \brief Determine whether the given initializer is trivial in the sense
+ /// Determine whether the given initializer is trivial in the sense
/// that it requires no code to be generated.
bool isTrivialInitializer(const Expr *Init);
@@ -2448,7 +2661,9 @@ public:
const VarDecl *Variable;
- /// The address of the alloca. Invalid if the variable was emitted
+ /// The address of the alloca for languages with explicit address space
+ /// (e.g. OpenCL) or alloca casted to generic pointer for address space
+ /// agnostic languages (e.g. C++). Invalid if the variable was emitted
/// as a global constant.
Address Addr;
@@ -2464,13 +2679,19 @@ public:
/// Non-null if we should use lifetime annotations.
llvm::Value *SizeForLifetimeMarkers;
+ /// Address with original alloca instruction. Invalid if the variable was
+ /// emitted as a global constant.
+ Address AllocaAddr;
+
struct Invalid {};
- AutoVarEmission(Invalid) : Variable(nullptr), Addr(Address::invalid()) {}
+ AutoVarEmission(Invalid)
+ : Variable(nullptr), Addr(Address::invalid()),
+ AllocaAddr(Address::invalid()) {}
AutoVarEmission(const VarDecl &variable)
- : Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr),
- IsByRef(false), IsConstantAggregate(false),
- SizeForLifetimeMarkers(nullptr) {}
+ : Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr),
+ IsByRef(false), IsConstantAggregate(false),
+ SizeForLifetimeMarkers(nullptr), AllocaAddr(Address::invalid()) {}
bool wasEmittedAsGlobal() const { return !Addr.isValid(); }
@@ -2486,11 +2707,15 @@ public:
}
/// Returns the raw, allocated address, which is not necessarily
- /// the address of the object itself.
+ /// the address of the object itself. It is casted to default
+ /// address space for address space agnostic languages.
Address getAllocatedAddress() const {
return Addr;
}
+ /// Returns the address for the original alloca instruction.
+ Address getOriginalAllocatedAddress() const { return AllocaAddr; }
+
/// Returns the address of the object within this declaration.
/// Note that this does not chase the forwarding pointer for
/// __block decls.
@@ -2506,6 +2731,15 @@ public:
void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
QualType::DestructionKind dtorKind);
+ /// Emits the alloca and debug information for the size expressions for each
+ /// dimension of an array. It registers the association of its (1-dimensional)
+ /// QualTypes and size expression's debug node, so that CGDebugInfo can
+ /// reference this node when creating the DISubrange object to describe the
+ /// array types.
+ void EmitAndRegisterVariableArrayDimensions(CGDebugInfo *DI,
+ const VarDecl &D,
+ bool EmitDebugInfo);
+
void EmitStaticVarDecl(const VarDecl &D,
llvm::GlobalValue::LinkageTypes Linkage);
@@ -2655,6 +2889,9 @@ public:
llvm::Value *EmitSEHExceptionInfo();
llvm::Value *EmitSEHAbnormalTermination();
+ /// Emit simple code for OpenMP directives in Simd-only mode.
+ void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D);
+
/// Scan the outlined statement for captures from the parent function. For
/// each capture, mark the capture as escaped and emit a call to
/// llvm.localrecover. Insert the localrecover result into the LocalDeclMap.
@@ -2697,7 +2934,7 @@ public:
SmallVectorImpl<llvm::Value *> &CapturedVars);
void emitOMPSimpleStore(LValue LVal, RValue RVal, QualType RValTy,
SourceLocation Loc);
- /// \brief Perform element by element copying of arrays with type \a
+ /// Perform element by element copying of arrays with type \a
/// OriginalType from \a SrcAddr to \a DestAddr using copying procedure
/// generated by \a CopyGen.
///
@@ -2708,8 +2945,8 @@ public:
/// to another single array element.
void EmitOMPAggregateAssign(
Address DestAddr, Address SrcAddr, QualType OriginalType,
- const llvm::function_ref<void(Address, Address)> &CopyGen);
- /// \brief Emit proper copying of data from one variable to another.
+ const llvm::function_ref<void(Address, Address)> CopyGen);
+ /// Emit proper copying of data from one variable to another.
///
/// \param OriginalType Original type of the copied variables.
/// \param DestAddr Destination address.
@@ -2724,7 +2961,7 @@ public:
Address DestAddr, Address SrcAddr,
const VarDecl *DestVD, const VarDecl *SrcVD,
const Expr *Copy);
- /// \brief Emit atomic update code for constructs: \a X = \a X \a BO \a E or
+ /// Emit atomic update code for constructs: \a X = \a X \a BO \a E or
/// \a X = \a E \a BO \a E.
///
/// \param X Value to be updated.
@@ -2740,7 +2977,7 @@ public:
std::pair<bool, RValue> EmitOMPAtomicSimpleUpdateExpr(
LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
llvm::AtomicOrdering AO, SourceLocation Loc,
- const llvm::function_ref<RValue(RValue)> &CommonGen);
+ const llvm::function_ref<RValue(RValue)> CommonGen);
bool EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
OMPPrivateScope &PrivateScope);
void EmitOMPPrivateClause(const OMPExecutableDirective &D,
@@ -2748,7 +2985,7 @@ public:
void EmitOMPUseDevicePtrClause(
const OMPClause &C, OMPPrivateScope &PrivateScope,
const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap);
- /// \brief Emit code for copyin clause in \a D directive. The next code is
+ /// Emit code for copyin clause in \a D directive. The next code is
/// generated at the start of outlined functions for directives:
/// \code
/// threadprivate_var1 = master_threadprivate_var1;
@@ -2760,7 +2997,7 @@ public:
/// \param D OpenMP directive possibly with 'copyin' clause(s).
/// \returns true if at least one copyin variable is found, false otherwise.
bool EmitOMPCopyinClause(const OMPExecutableDirective &D);
- /// \brief Emit initial code for lastprivate variables. If some variable is
+ /// Emit initial code for lastprivate variables. If some variable is
/// not also firstprivate, then the default initialization is used. Otherwise
/// initialization of this variable is performed by EmitOMPFirstprivateClause
/// method.
@@ -2773,7 +3010,7 @@ public:
/// otherwise.
bool EmitOMPLastprivateClauseInit(const OMPExecutableDirective &D,
OMPPrivateScope &PrivateScope);
- /// \brief Emit final copying of lastprivate values to original variables at
+ /// Emit final copying of lastprivate values to original variables at
/// the end of the worksharing or simd directive.
///
/// \param D Directive that has at least one 'lastprivate' directives.
@@ -2791,8 +3028,8 @@ public:
/// linear clause.
void EmitOMPLinearClauseFinal(
const OMPLoopDirective &D,
- const llvm::function_ref<llvm::Value *(CodeGenFunction &)> &CondGen);
- /// \brief Emit initial code for reduction variables. Creates reduction copies
+ const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
+ /// Emit initial code for reduction variables. Creates reduction copies
/// and initializes them with the values according to OpenMP standard.
///
/// \param D Directive (possibly) with the 'reduction' clause.
@@ -2801,14 +3038,14 @@ public:
///
void EmitOMPReductionClauseInit(const OMPExecutableDirective &D,
OMPPrivateScope &PrivateScope);
- /// \brief Emit final update of reduction values to original variables at
+ /// Emit final update of reduction values to original variables at
/// the end of the directive.
///
/// \param D Directive that has at least one 'reduction' directives.
/// \param ReductionKind The kind of reduction to perform.
void EmitOMPReductionClauseFinal(const OMPExecutableDirective &D,
const OpenMPDirectiveKind ReductionKind);
- /// \brief Emit initial code for linear variables. Creates private copies
+ /// Emit initial code for linear variables. Creates private copies
/// and initializes them with the values according to OpenMP standard.
///
/// \param D Directive (possibly) with the 'linear' clause.
@@ -2821,6 +3058,7 @@ public:
const OMPTaskDataTy & /*Data*/)>
TaskGenTy;
void EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
+ const OpenMPDirectiveKind CapturedRegion,
const RegionCodeGenTy &BodyGen,
const TaskGenTy &TaskGen, OMPTaskDataTy &Data);
struct OMPTargetDataInfo {
@@ -2930,7 +3168,16 @@ public:
static void EmitOMPTargetSimdDeviceFunction(CodeGenModule &CGM,
StringRef ParentName,
const OMPTargetSimdDirective &S);
- /// \brief Emit inner loop of the worksharing/simd construct.
+ /// Emit device code for the target teams distribute parallel for simd
+ /// directive.
+ static void EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(
+ CodeGenModule &CGM, StringRef ParentName,
+ const OMPTargetTeamsDistributeParallelForSimdDirective &S);
+
+ static void EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
+ CodeGenModule &CGM, StringRef ParentName,
+ const OMPTargetTeamsDistributeParallelForDirective &S);
+ /// Emit inner loop of the worksharing/simd construct.
///
/// \param S Directive, for which the inner loop must be emitted.
/// \param RequiresCleanup true, if directive has some associated private
@@ -2943,8 +3190,8 @@ public:
void EmitOMPInnerLoop(
const Stmt &S, bool RequiresCleanup, const Expr *LoopCond,
const Expr *IncExpr,
- const llvm::function_ref<void(CodeGenFunction &)> &BodyGen,
- const llvm::function_ref<void(CodeGenFunction &)> &PostIncGen);
+ const llvm::function_ref<void(CodeGenFunction &)> BodyGen,
+ const llvm::function_ref<void(CodeGenFunction &)> PostIncGen);
JumpDest getOMPCancelDestination(OpenMPDirectiveKind Kind);
/// Emit initial code for loop counters of loop-based directives.
@@ -2954,7 +3201,7 @@ public:
/// Helper for the OpenMP loop directives.
void EmitOMPLoopBody(const OMPLoopDirective &D, JumpDest LoopExit);
- /// \brief Emit code for the worksharing loop-based directive.
+ /// Emit code for the worksharing loop-based directive.
/// \return true, if this construct has any lastprivate clause, false -
/// otherwise.
bool EmitOMPWorksharingLoop(const OMPLoopDirective &S, Expr *EUB,
@@ -2969,17 +3216,14 @@ public:
void EmitOMPSimdInit(const OMPLoopDirective &D, bool IsMonotonic = false);
void EmitOMPSimdFinal(
const OMPLoopDirective &D,
- const llvm::function_ref<llvm::Value *(CodeGenFunction &)> &CondGen);
+ const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
/// Emits the lvalue for the expression with possibly captured variable.
LValue EmitOMPSharedLValue(const Expr *E);
private:
- /// Helpers for blocks. Returns invoke function by \p InvokeF if it is not
- /// nullptr. It should be called without \p InvokeF if the caller does not
- /// need invoke function to be returned.
- llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info,
- llvm::Function **InvokeF = nullptr);
+ /// Helpers for blocks.
+ llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
/// struct with the values to be passed to the OpenMP loop-related functions
struct OMPLoopArguments {
@@ -3030,7 +3274,7 @@ private:
OMPPrivateScope &LoopScope,
const OMPLoopArguments &LoopArgs,
const CodeGenLoopTy &CodeGenLoopContent);
- /// \brief Emit code for sections directive.
+ /// Emit code for sections directive.
void EmitSections(const OMPExecutableDirective &S);
public:
@@ -3071,7 +3315,7 @@ public:
///
LValue EmitLValue(const Expr *E);
- /// \brief Same as EmitLValue but additionally we generate checking code to
+ /// Same as EmitLValue but additionally we generate checking code to
/// guard against undefined behavior. This is only suitable when we know
/// that the address will be used to access the object.
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK);
@@ -3332,6 +3576,9 @@ public:
ArrayRef<llvm::Value*> args,
const Twine &name = "");
+ SmallVector<llvm::OperandBundleDef, 1>
+ getBundlesForFunclet(llvm::Value *Callee);
+
llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
ArrayRef<llvm::Value *> Args,
const Twine &Name = "");
@@ -3351,6 +3598,16 @@ public:
CXXDtorType Type,
const CXXRecordDecl *RD);
+ // These functions emit calls to the special functions of non-trivial C
+ // structs.
+ void defaultInitNonTrivialCStructVar(LValue Dst);
+ void callCStructDefaultConstructor(LValue Dst);
+ void callCStructDestructor(LValue Dst);
+ void callCStructCopyConstructor(LValue Dst, LValue Src);
+ void callCStructMoveConstructor(LValue Dst, LValue Src);
+ void callCStructCopyAssignmentOperator(LValue Dst, LValue Src);
+ void callCStructMoveAssignmentOperator(LValue Dst, LValue Src);
+
RValue
EmitCXXMemberOrOperatorCall(const CXXMethodDecl *Method,
const CGCallee &Callee,
@@ -3424,6 +3681,10 @@ public:
SmallVectorImpl<llvm::Value *> &Ops,
Address PtrOp0, Address PtrOp1,
llvm::Triple::ArchType Arch);
+
+ llvm::Value *EmitISOVolatileLoad(const CallExpr *E);
+ llvm::Value *EmitISOVolatileStore(const CallExpr *E);
+
llvm::Function *LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
unsigned Modifier, llvm::Type *ArgTy,
const CallExpr *E);
@@ -3482,6 +3743,8 @@ public:
llvm::Value *EmitARCLoadWeak(Address addr);
llvm::Value *EmitARCLoadWeakRetained(Address addr);
llvm::Value *EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored);
+ void emitARCCopyAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr);
+ void emitARCMoveAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr);
void EmitARCCopyWeak(Address dst, Address src);
void EmitARCMoveWeak(Address dst, Address src);
llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
@@ -3525,6 +3788,7 @@ public:
static Destroyer destroyARCStrongPrecise;
static Destroyer destroyARCWeak;
static Destroyer emitARCIntrinsicUse;
+ static Destroyer destroyNonTrivialCStruct;
void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr);
llvm::Value *EmitObjCAutoreleasePoolPush();
@@ -3532,7 +3796,7 @@ public:
void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr);
void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr);
- /// \brief Emits a reference binding to the passed in expression.
+ /// Emits a reference binding to the passed in expression.
RValue EmitReferenceBindingToExpr(const Expr *E);
//===--------------------------------------------------------------------===//
@@ -3610,6 +3874,9 @@ public:
void registerGlobalDtorWithAtExit(const VarDecl &D, llvm::Constant *fn,
llvm::Constant *addr);
+ /// Call atexit() with function dtorStub.
+ void registerGlobalDtorWithAtExit(llvm::Constant *dtorStub);
+
/// Emit code in this function to perform a guarded variable
/// initialization. Guarded initializations are used when it's not
/// possible to prove that an initialization will be done exactly
@@ -3746,26 +4013,26 @@ public:
/// enabled, a runtime check specified by \p Kind is also emitted.
llvm::Value *EmitCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind);
- /// \brief Emit a description of a type in a format suitable for passing to
+ /// Emit a description of a type in a format suitable for passing to
/// a runtime sanitizer handler.
llvm::Constant *EmitCheckTypeDescriptor(QualType T);
- /// \brief Convert a value into a format suitable for passing to a runtime
+ /// Convert a value into a format suitable for passing to a runtime
/// sanitizer handler.
llvm::Value *EmitCheckValue(llvm::Value *V);
- /// \brief Emit a description of a source location in a format suitable for
+ /// Emit a description of a source location in a format suitable for
/// passing to a runtime sanitizer handler.
llvm::Constant *EmitCheckSourceLocation(SourceLocation Loc);
- /// \brief Create a basic block that will call a handler function in a
+ /// Create a basic block that will call a handler function in a
/// sanitizer runtime with the provided arguments, and create a conditional
/// branch to it.
void EmitCheck(ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked,
SanitizerHandler Check, ArrayRef<llvm::Constant *> StaticArgs,
ArrayRef<llvm::Value *> DynamicArgs);
- /// \brief Emit a slow path cross-DSO CFI check which calls __cfi_slowpath
+ /// Emit a slow path cross-DSO CFI check which calls __cfi_slowpath
/// if Cond if false.
void EmitCfiSlowPathCheck(SanitizerMask Kind, llvm::Value *Cond,
llvm::ConstantInt *TypeId, llvm::Value *Ptr,
@@ -3775,21 +4042,21 @@ public:
/// checking is enabled. Otherwise, just emit an unreachable instruction.
void EmitUnreachable(SourceLocation Loc);
- /// \brief Create a basic block that will call the trap intrinsic, and emit a
+ /// Create a basic block that will call the trap intrinsic, and emit a
/// conditional branch to it, for the -ftrapv checks.
void EmitTrapCheck(llvm::Value *Checked);
- /// \brief Emit a call to trap or debugtrap and attach function attribute
+ /// Emit a call to trap or debugtrap and attach function attribute
/// "trap-func-name" if specified.
llvm::CallInst *EmitTrapCall(llvm::Intrinsic::ID IntrID);
- /// \brief Emit a stub for the cross-DSO CFI check function.
+ /// Emit a stub for the cross-DSO CFI check function.
void EmitCfiCheckStub();
- /// \brief Emit a cross-DSO CFI failure handling function.
+ /// Emit a cross-DSO CFI failure handling function.
void EmitCfiCheckFail();
- /// \brief Create a check for a function parameter that may potentially be
+ /// Create a check for a function parameter that may potentially be
/// declared as non-null.
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc,
AbstractCallee AC, unsigned ParmNum);
@@ -3829,10 +4096,10 @@ private:
void ExpandTypeFromArgs(QualType Ty, LValue Dst,
SmallVectorImpl<llvm::Value *>::iterator &AI);
- /// ExpandTypeToArgs - Expand an RValue \arg RV, with the LLVM type for \arg
+ /// ExpandTypeToArgs - Expand an CallArg \arg Arg, with the LLVM type for \arg
/// Ty, into individual arguments on the provided vector \arg IRCallArgs,
/// starting at index \arg IRCallArgPos. See ABIArgInfo::Expand.
- void ExpandTypeToArgs(QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
+ void ExpandTypeToArgs(QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
SmallVectorImpl<llvm::Value *> &IRCallArgs,
unsigned &IRCallArgPos);
@@ -3844,7 +4111,7 @@ private:
std::string &ConstraintStr,
SourceLocation Loc);
- /// \brief Attempts to statically evaluate the object size of E. If that
+ /// Attempts to statically evaluate the object size of E. If that
/// fails, emits code to figure the size of E out for us. This is
/// pass_object_size aware.
///
@@ -3853,7 +4120,7 @@ private:
llvm::IntegerType *ResType,
llvm::Value *EmittedE);
- /// \brief Emits the size of E, as required by __builtin_object_size. This
+ /// Emits the size of E, as required by __builtin_object_size. This
/// function is aware of pass_object_size parameters, and will act accordingly
/// if E is a parameter with the pass_object_size attribute.
llvm::Value *emitBuiltinObjectSize(const Expr *E, unsigned Type,
@@ -3973,6 +4240,48 @@ public:
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK);
+ struct TargetMultiVersionResolverOption {
+ llvm::Function *Function;
+ TargetAttr::ParsedTargetAttr ParsedAttribute;
+ unsigned Priority;
+ TargetMultiVersionResolverOption(
+ const TargetInfo &TargInfo, llvm::Function *F,
+ const clang::TargetAttr::ParsedTargetAttr &PT)
+ : Function(F), ParsedAttribute(PT), Priority(0u) {
+ for (StringRef Feat : PT.Features)
+ Priority = std::max(Priority,
+ TargInfo.multiVersionSortPriority(Feat.substr(1)));
+
+ if (!PT.Architecture.empty())
+ Priority = std::max(Priority,
+ TargInfo.multiVersionSortPriority(PT.Architecture));
+ }
+
+ bool operator>(const TargetMultiVersionResolverOption &Other) const {
+ return Priority > Other.Priority;
+ }
+ };
+ void EmitTargetMultiVersionResolver(
+ llvm::Function *Resolver,
+ ArrayRef<TargetMultiVersionResolverOption> Options);
+
+ struct CPUDispatchMultiVersionResolverOption {
+ llvm::Function *Function;
+ // Note: EmitX86CPUSupports only has 32 bits available, so we store the mask
+ // as 32 bits here. When 64-bit support is added to __builtin_cpu_supports,
+ // this can be extended to 64 bits.
+ uint32_t FeatureMask;
+ CPUDispatchMultiVersionResolverOption(llvm::Function *F, uint64_t Mask)
+ : Function(F), FeatureMask(static_cast<uint32_t>(Mask)) {}
+ bool operator>(const CPUDispatchMultiVersionResolverOption &Other) const {
+ return FeatureMask > Other.FeatureMask;
+ }
+ };
+ void EmitCPUDispatchMultiVersionResolver(
+ llvm::Function *Resolver,
+ ArrayRef<CPUDispatchMultiVersionResolverOption> Options);
+ static uint32_t GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs);
+
private:
QualType getVarArgType(const Expr *Arg);
@@ -3988,110 +4297,35 @@ private:
llvm::Value *EmitX86CpuIs(StringRef CPUStr);
llvm::Value *EmitX86CpuSupports(const CallExpr *E);
llvm::Value *EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs);
+ llvm::Value *EmitX86CpuSupports(uint32_t Mask);
llvm::Value *EmitX86CpuInit();
+ llvm::Value *
+ FormResolverCondition(const TargetMultiVersionResolverOption &RO);
};
-/// Helper class with most of the code for saving a value for a
-/// conditional expression cleanup.
-struct DominatingLLVMValue {
- typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
-
- /// Answer whether the given value needs extra work to be saved.
- static bool needsSaving(llvm::Value *value) {
- // If it's not an instruction, we don't need to save.
- if (!isa<llvm::Instruction>(value)) return false;
-
- // If it's an instruction in the entry block, we don't need to save.
- llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
- return (block != &block->getParent()->getEntryBlock());
- }
-
- /// Try to save the given value.
- static saved_type save(CodeGenFunction &CGF, llvm::Value *value) {
- if (!needsSaving(value)) return saved_type(value, false);
-
- // Otherwise, we need an alloca.
- auto align = CharUnits::fromQuantity(
- CGF.CGM.getDataLayout().getPrefTypeAlignment(value->getType()));
- Address alloca =
- CGF.CreateTempAlloca(value->getType(), align, "cond-cleanup.save");
- CGF.Builder.CreateStore(value, alloca);
-
- return saved_type(alloca.getPointer(), true);
- }
-
- static llvm::Value *restore(CodeGenFunction &CGF, saved_type value) {
- // If the value says it wasn't saved, trust that it's still dominating.
- if (!value.getInt()) return value.getPointer();
-
- // Otherwise, it should be an alloca instruction, as set up in save().
- auto alloca = cast<llvm::AllocaInst>(value.getPointer());
- return CGF.Builder.CreateAlignedLoad(alloca, alloca->getAlignment());
- }
-};
-
-/// A partial specialization of DominatingValue for llvm::Values that
-/// might be llvm::Instructions.
-template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
- typedef T *type;
- static type restore(CodeGenFunction &CGF, saved_type value) {
- return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
- }
-};
-
-/// A specialization of DominatingValue for Address.
-template <> struct DominatingValue<Address> {
- typedef Address type;
-
- struct saved_type {
- DominatingLLVMValue::saved_type SavedValue;
- CharUnits Alignment;
- };
-
- static bool needsSaving(type value) {
- return DominatingLLVMValue::needsSaving(value.getPointer());
- }
- static saved_type save(CodeGenFunction &CGF, type value) {
- return { DominatingLLVMValue::save(CGF, value.getPointer()),
- value.getAlignment() };
- }
- static type restore(CodeGenFunction &CGF, saved_type value) {
- return Address(DominatingLLVMValue::restore(CGF, value.SavedValue),
- value.Alignment);
- }
-};
-
-/// A specialization of DominatingValue for RValue.
-template <> struct DominatingValue<RValue> {
- typedef RValue type;
- class saved_type {
- enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
- AggregateAddress, ComplexAddress };
+inline DominatingLLVMValue::saved_type
+DominatingLLVMValue::save(CodeGenFunction &CGF, llvm::Value *value) {
+ if (!needsSaving(value)) return saved_type(value, false);
- llvm::Value *Value;
- unsigned K : 3;
- unsigned Align : 29;
- saved_type(llvm::Value *v, Kind k, unsigned a = 0)
- : Value(v), K(k), Align(a) {}
+ // Otherwise, we need an alloca.
+ auto align = CharUnits::fromQuantity(
+ CGF.CGM.getDataLayout().getPrefTypeAlignment(value->getType()));
+ Address alloca =
+ CGF.CreateTempAlloca(value->getType(), align, "cond-cleanup.save");
+ CGF.Builder.CreateStore(value, alloca);
- public:
- static bool needsSaving(RValue value);
- static saved_type save(CodeGenFunction &CGF, RValue value);
- RValue restore(CodeGenFunction &CGF);
+ return saved_type(alloca.getPointer(), true);
+}
- // implementations in CGCleanup.cpp
- };
+inline llvm::Value *DominatingLLVMValue::restore(CodeGenFunction &CGF,
+ saved_type value) {
+ // If the value says it wasn't saved, trust that it's still dominating.
+ if (!value.getInt()) return value.getPointer();
- static bool needsSaving(type value) {
- return saved_type::needsSaving(value);
- }
- static saved_type save(CodeGenFunction &CGF, type value) {
- return saved_type::save(CGF, value);
- }
- static type restore(CodeGenFunction &CGF, saved_type value) {
- return value.restore(CGF);
- }
-};
+ // Otherwise, it should be an alloca instruction, as set up in save().
+ auto alloca = cast<llvm::AllocaInst>(value.getPointer());
+ return CGF.Builder.CreateAlignedLoad(alloca, alloca->getAlignment());
+}
} // end namespace CodeGen
} // end namespace clang