diff options
Diffstat (limited to 'lib/CodeGen/CGAtomic.cpp')
-rw-r--r-- | lib/CodeGen/CGAtomic.cpp | 67 |
1 files changed, 41 insertions, 26 deletions
diff --git a/lib/CodeGen/CGAtomic.cpp b/lib/CodeGen/CGAtomic.cpp index 6862fd811186..b34bcdc1fc38 100644 --- a/lib/CodeGen/CGAtomic.cpp +++ b/lib/CodeGen/CGAtomic.cpp @@ -18,6 +18,7 @@ #include "TargetInfo.h" #include "clang/AST/ASTContext.h" #include "clang/CodeGen/CGFunctionInfo.h" +#include "clang/Sema/SemaDiagnostic.h" #include "llvm/ADT/DenseMap.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Intrinsics.h" @@ -186,7 +187,7 @@ namespace { RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot, SourceLocation loc, bool AsValue) const; - /// \brief Converts a rvalue to integer value. + /// Converts a rvalue to integer value. llvm::Value *convertRValueToInt(RValue RVal) const; RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal, @@ -207,13 +208,13 @@ namespace { LVal.getBaseInfo(), LVal.getTBAAInfo()); } - /// \brief Emits atomic load. + /// Emits atomic load. /// \returns Loaded value. RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc, bool AsValue, llvm::AtomicOrdering AO, bool IsVolatile); - /// \brief Emits atomic compare-and-exchange sequence. + /// Emits atomic compare-and-exchange sequence. /// \param Expected Expected value. /// \param Desired Desired value. /// \param Success Atomic ordering for success operation. @@ -229,13 +230,13 @@ namespace { llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak = false); - /// \brief Emits atomic update. + /// Emits atomic update. /// \param AO Atomic ordering. /// \param UpdateOp Update operation for the current lvalue. void EmitAtomicUpdate(llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile); - /// \brief Emits atomic update. + /// Emits atomic update. /// \param AO Atomic ordering. void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal, bool IsVolatile); @@ -243,25 +244,25 @@ namespace { /// Materialize an atomic r-value in atomic-layout memory. Address materializeRValue(RValue rvalue) const; - /// \brief Creates temp alloca for intermediate operations on atomic value. + /// Creates temp alloca for intermediate operations on atomic value. Address CreateTempAlloca() const; private: bool requiresMemSetZero(llvm::Type *type) const; - /// \brief Emits atomic load as a libcall. + /// Emits atomic load as a libcall. void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded, llvm::AtomicOrdering AO, bool IsVolatile); - /// \brief Emits atomic load as LLVM instruction. + /// Emits atomic load as LLVM instruction. llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile); - /// \brief Emits atomic compare-and-exchange op as a libcall. + /// Emits atomic compare-and-exchange op as a libcall. llvm::Value *EmitAtomicCompareExchangeLibcall( llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr, llvm::AtomicOrdering Success = llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure = llvm::AtomicOrdering::SequentiallyConsistent); - /// \brief Emits atomic compare-and-exchange op as LLVM instruction. + /// Emits atomic compare-and-exchange op as LLVM instruction. std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp( llvm::Value *ExpectedVal, llvm::Value *DesiredVal, llvm::AtomicOrdering Success = @@ -269,19 +270,19 @@ namespace { llvm::AtomicOrdering Failure = llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak = false); - /// \brief Emit atomic update as libcalls. + /// Emit atomic update as libcalls. void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile); - /// \brief Emit atomic update as LLVM instructions. + /// Emit atomic update as LLVM instructions. void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile); - /// \brief Emit atomic update as libcalls. + /// Emit atomic update as libcalls. void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal, bool IsVolatile); - /// \brief Emit atomic update as LLVM instructions. + /// Emit atomic update as LLVM instructions. void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal, bool IsVolatile); }; @@ -590,11 +591,13 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, break; case AtomicExpr::AO__opencl_atomic_fetch_min: + case AtomicExpr::AO__atomic_fetch_min: Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Min : llvm::AtomicRMWInst::UMin; break; case AtomicExpr::AO__opencl_atomic_fetch_max: + case AtomicExpr::AO__atomic_fetch_max: Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Max : llvm::AtomicRMWInst::UMax; break; @@ -751,6 +754,13 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { Address Dest = Address::invalid(); Address Ptr = EmitPointerWithAlignment(E->getPtr()); + if (E->getOp() == AtomicExpr::AO__c11_atomic_init || + E->getOp() == AtomicExpr::AO__opencl_atomic_init) { + LValue lvalue = MakeAddrLValue(Ptr, AtomicTy); + EmitAtomicInit(E->getVal1(), lvalue); + return RValue::get(nullptr); + } + CharUnits sizeChars, alignChars; std::tie(sizeChars, alignChars) = getContext().getTypeInfoInChars(AtomicTy); uint64_t Size = sizeChars.getQuantity(); @@ -758,12 +768,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { bool UseLibcall = ((Ptr.getAlignment() % sizeChars) != 0 || getContext().toBits(sizeChars) > MaxInlineWidthInBits); - if (E->getOp() == AtomicExpr::AO__c11_atomic_init || - E->getOp() == AtomicExpr::AO__opencl_atomic_init) { - LValue lvalue = MakeAddrLValue(Ptr, AtomicTy); - EmitAtomicInit(E->getVal1(), lvalue); - return RValue::get(nullptr); - } + if (UseLibcall) + CGM.getDiags().Report(E->getLocStart(), diag::warn_atomic_op_misaligned); llvm::Value *Order = EmitScalarExpr(E->getOrder()); llvm::Value *Scope = @@ -855,6 +861,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { case AtomicExpr::AO__atomic_or_fetch: case AtomicExpr::AO__atomic_xor_fetch: case AtomicExpr::AO__atomic_nand_fetch: + case AtomicExpr::AO__atomic_fetch_min: + case AtomicExpr::AO__atomic_fetch_max: Val1 = EmitValToTemp(*this, E->getVal1()); break; } @@ -909,6 +917,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { case AtomicExpr::AO__atomic_or_fetch: case AtomicExpr::AO__atomic_sub_fetch: case AtomicExpr::AO__atomic_xor_fetch: + case AtomicExpr::AO__atomic_fetch_min: + case AtomicExpr::AO__atomic_fetch_max: // For these, only library calls for certain sizes exist. UseOptimizedLibcall = true; break; @@ -1091,6 +1101,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(), MemTy, E->getExprLoc(), sizeChars); break; + case AtomicExpr::AO__atomic_fetch_min: case AtomicExpr::AO__opencl_atomic_fetch_min: LibCallName = E->getValueType()->isSignedIntegerType() ? "__atomic_fetch_min" @@ -1098,6 +1109,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(), LoweredMemTy, E->getExprLoc(), sizeChars); break; + case AtomicExpr::AO__atomic_fetch_max: case AtomicExpr::AO__opencl_atomic_fetch_max: LibCallName = E->getValueType()->isSignedIntegerType() ? "__atomic_fetch_max" @@ -1160,7 +1172,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { if (UseOptimizedLibcall && Res.getScalarVal()) { llvm::Value *ResVal = Res.getScalarVal(); if (PostOp) { - llvm::Value *LoadVal1 = Args[1].RV.getScalarVal(); + llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal(); ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1); } if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch) @@ -1508,11 +1520,13 @@ void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const { // which means that the caller is responsible for having zeroed // any padding. Just do an aggregate copy of that type. if (rvalue.isAggregate()) { - CGF.EmitAggregateCopy(getAtomicAddress(), - rvalue.getAggregateAddress(), - getAtomicType(), - (rvalue.isVolatileQualified() - || LVal.isVolatileQualified())); + LValue Dest = CGF.MakeAddrLValue(getAtomicAddress(), getAtomicType()); + LValue Src = CGF.MakeAddrLValue(rvalue.getAggregateAddress(), + getAtomicType()); + bool IsVolatile = rvalue.isVolatileQualified() || + LVal.isVolatileQualified(); + CGF.EmitAggregateCopy(Dest, Src, getAtomicType(), + AggValueSlot::DoesNotOverlap, IsVolatile); return; } @@ -2007,6 +2021,7 @@ void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) { AggValueSlot::IsNotDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, + AggValueSlot::DoesNotOverlap, Zeroed ? AggValueSlot::IsZeroed : AggValueSlot::IsNotZeroed); |