diff options
Diffstat (limited to 'contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp')
| -rw-r--r-- | contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp | 92 |
1 files changed, 45 insertions, 47 deletions
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp index 03fc0ec7ff54..80a64d8e4cdd 100644 --- a/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp +++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp @@ -94,8 +94,8 @@ CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) { // and cast value to correct type Address Temporary = CreateMemTemp(SubExpr->getType()); EmitAnyExprToMem(SubExpr, Temporary, Qualifiers(), /*isInit*/ true); - llvm::Value *BitCast = - Builder.CreateBitCast(Temporary.getPointer(), ConvertType(ArgQT)); + llvm::Value *BitCast = Builder.CreateBitCast( + Temporary.emitRawPointer(*this), ConvertType(ArgQT)); Args.add(RValue::get(BitCast), ArgQT); // Create char array to store type encoding @@ -204,11 +204,11 @@ llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E, ObjCMethodDecl::param_const_iterator PI = MethodWithObjects->param_begin(); const ParmVarDecl *argDecl = *PI++; QualType ArgQT = argDecl->getType().getUnqualifiedType(); - Args.add(RValue::get(Objects.getPointer()), ArgQT); + Args.add(RValue::get(Objects, *this), ArgQT); if (DLE) { argDecl = *PI++; ArgQT = argDecl->getType().getUnqualifiedType(); - Args.add(RValue::get(Keys.getPointer()), ArgQT); + Args.add(RValue::get(Keys, *this), ArgQT); } argDecl = *PI; ArgQT = argDecl->getType().getUnqualifiedType(); @@ -586,7 +586,7 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E, method->getMethodFamily() == OMF_retain) { if (auto lvalueExpr = findWeakLValue(E->getInstanceReceiver())) { LValue lvalue = EmitLValue(lvalueExpr); - llvm::Value *result = EmitARCLoadWeakRetained(lvalue.getAddress(*this)); + llvm::Value *result = EmitARCLoadWeakRetained(lvalue.getAddress()); return AdjustObjCObjectType(*this, E->getType(), RValue::get(result)); } } @@ -827,7 +827,7 @@ static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar, // sizeof (Type of Ivar), isAtomic, false); CallArgList args; - llvm::Value *dest = CGF.ReturnValue.getPointer(); + llvm::Value *dest = CGF.ReturnValue.emitRawPointer(CGF); args.add(RValue::get(dest), Context.VoidPtrTy); args.add(RValue::get(src), Context.VoidPtrTy); @@ -899,9 +899,13 @@ namespace { const ObjCPropertyImplDecl *propImpl); private: + LLVM_PREFERRED_TYPE(StrategyKind) unsigned Kind : 8; + LLVM_PREFERRED_TYPE(bool) unsigned IsAtomic : 1; + LLVM_PREFERRED_TYPE(bool) unsigned IsCopy : 1; + LLVM_PREFERRED_TYPE(bool) unsigned HasStrong : 1; CharUnits IvarSize; @@ -1143,8 +1147,8 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl, callCStructCopyConstructor(Dst, Src); } else { ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); - emitCPPObjectAtomicGetterCall(*this, ReturnValue.getPointer(), ivar, - AtomicHelperFn); + emitCPPObjectAtomicGetterCall(*this, ReturnValue.emitRawPointer(*this), + ivar, AtomicHelperFn); } return; } @@ -1159,7 +1163,7 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl, } else { ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); - emitCPPObjectAtomicGetterCall(*this, ReturnValue.getPointer(), + emitCPPObjectAtomicGetterCall(*this, ReturnValue.emitRawPointer(*this), ivar, AtomicHelperFn); } return; @@ -1185,7 +1189,7 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl, llvm::Type *bitcastType = llvm::Type::getIntNTy(getLLVMContext(), ivarSize); // Perform an atomic load. This does not impose ordering constraints. - Address ivarAddr = LV.getAddress(*this); + Address ivarAddr = LV.getAddress(); ivarAddr = ivarAddr.withElementType(bitcastType); llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load"); load->setAtomic(llvm::AtomicOrdering::Unordered); @@ -1283,14 +1287,14 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl, case TEK_Scalar: { llvm::Value *value; if (propType->isReferenceType()) { - value = LV.getAddress(*this).getPointer(); + value = LV.getAddress().emitRawPointer(*this); } else { // We want to load and autoreleaseReturnValue ARC __weak ivars. if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) { if (getLangOpts().ObjCAutoRefCount) { value = emitARCRetainLoadOfScalar(*this, LV, ivarType); } else { - value = EmitARCLoadWeak(LV.getAddress(*this)); + value = EmitARCLoadWeak(LV.getAddress()); } // Otherwise we want to do a simple load, suppressing the @@ -1473,7 +1477,7 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl, LValue ivarLValue = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, /*quals*/ 0); - Address ivarAddr = ivarLValue.getAddress(*this); + Address ivarAddr = ivarLValue.getAddress(); // Currently, all atomic accesses have to be through integer // types, so there's no point in trying to pick a prettier type. @@ -1651,7 +1655,7 @@ namespace { void Emit(CodeGenFunction &CGF, Flags flags) override { LValue lvalue = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), addr, ivar, /*CVR*/ 0); - CGF.emitDestroy(lvalue.getAddress(CGF), ivar->getType(), destroyer, + CGF.emitDestroy(lvalue.getAddress(), ivar->getType(), destroyer, flags.isForNormalCleanup() && useEHCleanupForArray); } }; @@ -1718,7 +1722,7 @@ void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP, LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), Ivar, 0); EmitAggExpr(IvarInit->getInit(), - AggValueSlot::forLValue(LV, *this, AggValueSlot::IsDestructed, + AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap)); @@ -1785,11 +1789,10 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){ static const unsigned NumItems = 16; // Fetch the countByEnumeratingWithState:objects:count: selector. - IdentifierInfo *II[] = { - &CGM.getContext().Idents.get("countByEnumeratingWithState"), - &CGM.getContext().Idents.get("objects"), - &CGM.getContext().Idents.get("count") - }; + const IdentifierInfo *II[] = { + &CGM.getContext().Idents.get("countByEnumeratingWithState"), + &CGM.getContext().Idents.get("objects"), + &CGM.getContext().Idents.get("count")}; Selector FastEnumSel = CGM.getContext().Selectors.getSelector(std::size(II), &II[0]); @@ -1817,16 +1820,14 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){ CallArgList Args; // The first argument is a temporary of the enumeration-state type. - Args.add(RValue::get(StatePtr.getPointer()), - getContext().getPointerType(StateTy)); + Args.add(RValue::get(StatePtr, *this), getContext().getPointerType(StateTy)); // The second argument is a temporary array with space for NumItems // pointers. We'll actually be loading elements from the array // pointer written into the control state; this buffer is so that // collections that *aren't* backed by arrays can still queue up // batches of elements. - Args.add(RValue::get(ItemsPtr.getPointer()), - getContext().getPointerType(ItemsTy)); + Args.add(RValue::get(ItemsPtr, *this), getContext().getPointerType(ItemsTy)); // The third argument is the capacity of that temporary array. llvm::Type *NSUIntegerTy = ConvertType(getContext().getNSUIntegerType()); @@ -1951,7 +1952,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){ Builder.CreateLoad(StateItemsPtr, "stateitems"); // Fetch the value at the current index from the buffer. - llvm::Value *CurrentItemPtr = Builder.CreateGEP( + llvm::Value *CurrentItemPtr = Builder.CreateInBoundsGEP( ObjCIdType, EnumStateItems, index, "currentitem.ptr"); llvm::Value *CurrentItem = Builder.CreateAlignedLoad(ObjCIdType, CurrentItemPtr, getPointerAlign()); @@ -2027,7 +2028,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){ // First we check in the local buffer. llvm::Value *indexPlusOne = - Builder.CreateAdd(index, llvm::ConstantInt::get(NSUIntegerTy, 1)); + Builder.CreateNUWAdd(index, llvm::ConstantInt::get(NSUIntegerTy, 1)); // If we haven't overrun the buffer yet, we can continue. // Set the branch weights based on the simplifying assumption that this is @@ -2194,7 +2195,7 @@ static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF, Address addr, if (!fn) fn = getARCIntrinsic(IntID, CGF.CGM); - return CGF.EmitNounwindRuntimeCall(fn, addr.getPointer()); + return CGF.EmitNounwindRuntimeCall(fn, addr.emitRawPointer(CGF)); } /// Perform an operation having the following signature: @@ -2212,9 +2213,8 @@ static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF, Address addr, llvm::Type *origType = value->getType(); llvm::Value *args[] = { - CGF.Builder.CreateBitCast(addr.getPointer(), CGF.Int8PtrPtrTy), - CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy) - }; + CGF.Builder.CreateBitCast(addr.emitRawPointer(CGF), CGF.Int8PtrPtrTy), + CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy)}; llvm::CallInst *result = CGF.EmitNounwindRuntimeCall(fn, args); if (ignored) return nullptr; @@ -2233,9 +2233,8 @@ static void emitARCCopyOperation(CodeGenFunction &CGF, Address dst, Address src, fn = getARCIntrinsic(IntID, CGF.CGM); llvm::Value *args[] = { - CGF.Builder.CreateBitCast(dst.getPointer(), CGF.Int8PtrPtrTy), - CGF.Builder.CreateBitCast(src.getPointer(), CGF.Int8PtrPtrTy) - }; + CGF.Builder.CreateBitCast(dst.emitRawPointer(CGF), CGF.Int8PtrPtrTy), + CGF.Builder.CreateBitCast(src.emitRawPointer(CGF), CGF.Int8PtrPtrTy)}; CGF.EmitNounwindRuntimeCall(fn, args); } @@ -2486,9 +2485,8 @@ llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(Address addr, fn = getARCIntrinsic(llvm::Intrinsic::objc_storeStrong, CGM); llvm::Value *args[] = { - Builder.CreateBitCast(addr.getPointer(), Int8PtrPtrTy), - Builder.CreateBitCast(value, Int8PtrTy) - }; + Builder.CreateBitCast(addr.emitRawPointer(*this), Int8PtrPtrTy), + Builder.CreateBitCast(value, Int8PtrTy)}; EmitNounwindRuntimeCall(fn, args); if (ignored) return nullptr; @@ -2510,7 +2508,7 @@ llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst, !isBlock && (dst.getAlignment().isZero() || dst.getAlignment() >= CharUnits::fromQuantity(PointerAlignInBytes))) { - return EmitARCStoreStrongCall(dst.getAddress(*this), newValue, ignored); + return EmitARCStoreStrongCall(dst.getAddress(), newValue, ignored); } // Otherwise, split it out. @@ -2639,7 +2637,7 @@ void CodeGenFunction::EmitARCDestroyWeak(Address addr) { if (!fn) fn = getARCIntrinsic(llvm::Intrinsic::objc_destroyWeak, CGM); - EmitNounwindRuntimeCall(fn, addr.getPointer()); + EmitNounwindRuntimeCall(fn, addr.emitRawPointer(*this)); } /// void \@objc_moveWeak(i8** %dest, i8** %src) @@ -2721,7 +2719,7 @@ llvm::Value *CodeGenFunction::EmitObjCMRRAutoreleasePoolPush() { CGObjCRuntime &Runtime = CGM.getObjCRuntime(); llvm::Value *Receiver = Runtime.EmitNSAutoreleasePoolClassRef(*this); // [NSAutoreleasePool alloc] - IdentifierInfo *II = &CGM.getContext().Idents.get("alloc"); + const IdentifierInfo *II = &CGM.getContext().Idents.get("alloc"); Selector AllocSel = getContext().Selectors.getSelector(0, &II); CallArgList Args; RValue AllocRV = @@ -2768,7 +2766,7 @@ llvm::Value *CodeGenFunction::EmitObjCAllocInit(llvm::Value *value, /// Produce the code to do a primitive release. /// [tmp drain]; void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) { - IdentifierInfo *II = &CGM.getContext().Idents.get("drain"); + const IdentifierInfo *II = &CGM.getContext().Idents.get("drain"); Selector DrainSel = getContext().Selectors.getSelector(0, &II); CallArgList Args; CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(), @@ -2900,7 +2898,7 @@ static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF, result = CGF.EmitLoadOfLValue(lvalue, SourceLocation()).getScalarVal(); } else { assert(type.getObjCLifetime() == Qualifiers::OCL_Weak); - result = CGF.EmitARCLoadWeakRetained(lvalue.getAddress(CGF)); + result = CGF.EmitARCLoadWeakRetained(lvalue.getAddress()); } return TryEmitResult(result, !shouldRetain); } @@ -2924,7 +2922,7 @@ static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF, SourceLocation()).getScalarVal(); // Set the source pointer to NULL. - CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress(CGF)), lv); + CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress()), lv); return TryEmitResult(result, true); } @@ -3716,8 +3714,8 @@ CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction( if ((HelperFn = CGM.getAtomicSetterHelperFnMap(Ty))) return HelperFn; - IdentifierInfo *II - = &CGM.getContext().Idents.get("__assign_helper_atomic_property_"); + const IdentifierInfo *II = + &CGM.getContext().Idents.get("__assign_helper_atomic_property_"); QualType ReturnTy = C.VoidTy; QualType DestTy = C.getPointerType(Ty); @@ -3814,7 +3812,7 @@ llvm::Constant *CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction( if ((HelperFn = CGM.getAtomicGetterHelperFnMap(Ty))) return HelperFn; - IdentifierInfo *II = + const IdentifierInfo *II = &CGM.getContext().Idents.get("__copy_helper_atomic_property_"); QualType ReturnTy = C.VoidTy; @@ -3908,10 +3906,10 @@ llvm::Constant *CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction( llvm::Value * CodeGenFunction::EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty) { // Get selectors for retain/autorelease. - IdentifierInfo *CopyID = &getContext().Idents.get("copy"); + const IdentifierInfo *CopyID = &getContext().Idents.get("copy"); Selector CopySelector = getContext().Selectors.getNullarySelector(CopyID); - IdentifierInfo *AutoreleaseID = &getContext().Idents.get("autorelease"); + const IdentifierInfo *AutoreleaseID = &getContext().Idents.get("autorelease"); Selector AutoreleaseSelector = getContext().Selectors.getNullarySelector(AutoreleaseID); |
