aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/lib/Bitcode/Writer
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2022-07-04 19:20:19 +0000
committerDimitry Andric <dim@FreeBSD.org>2023-02-08 19:02:26 +0000
commit81ad626541db97eb356e2c1d4a20eb2a26a766ab (patch)
tree311b6a8987c32b1e1dcbab65c54cfac3fdb56175 /contrib/llvm-project/llvm/lib/Bitcode/Writer
parent5fff09660e06a66bed6482da9c70df328e16bbb6 (diff)
parent145449b1e420787bb99721a429341fa6be3adfb6 (diff)
Diffstat (limited to 'contrib/llvm-project/llvm/lib/Bitcode/Writer')
-rw-r--r--contrib/llvm-project/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp90
-rw-r--r--contrib/llvm-project/llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp136
3 files changed, 130 insertions, 97 deletions
diff --git a/contrib/llvm-project/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/contrib/llvm-project/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
index 4bba0b356675..941ed808bab1 100644
--- a/contrib/llvm-project/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/contrib/llvm-project/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -19,6 +19,8 @@
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
@@ -610,6 +612,8 @@ static uint64_t getAttrKindEncoding(Attribute::AttrKind Kind) {
switch (Kind) {
case Attribute::Alignment:
return bitc::ATTR_KIND_ALIGNMENT;
+ case Attribute::AllocAlign:
+ return bitc::ATTR_KIND_ALLOC_ALIGN;
case Attribute::AllocSize:
return bitc::ATTR_KIND_ALLOC_SIZE;
case Attribute::AlwaysInline:
@@ -644,6 +648,10 @@ static uint64_t getAttrKindEncoding(Attribute::AttrKind Kind) {
return bitc::ATTR_KIND_JUMP_TABLE;
case Attribute::MinSize:
return bitc::ATTR_KIND_MIN_SIZE;
+ case Attribute::AllocatedPointer:
+ return bitc::ATTR_KIND_ALLOCATED_POINTER;
+ case Attribute::AllocKind:
+ return bitc::ATTR_KIND_ALLOC_KIND;
case Attribute::Naked:
return bitc::ATTR_KIND_NAKED;
case Attribute::Nest:
@@ -688,6 +696,8 @@ static uint64_t getAttrKindEncoding(Attribute::AttrKind Kind) {
return bitc::ATTR_KIND_NO_PROFILE;
case Attribute::NoUnwind:
return bitc::ATTR_KIND_NO_UNWIND;
+ case Attribute::NoSanitizeBounds:
+ return bitc::ATTR_KIND_NO_SANITIZE_BOUNDS;
case Attribute::NoSanitizeCoverage:
return bitc::ATTR_KIND_NO_SANITIZE_COVERAGE;
case Attribute::NullPointerIsValid:
@@ -764,6 +774,8 @@ static uint64_t getAttrKindEncoding(Attribute::AttrKind Kind) {
return bitc::ATTR_KIND_BYREF;
case Attribute::MustProgress:
return bitc::ATTR_KIND_MUSTPROGRESS;
+ case Attribute::PresplitCoroutine:
+ return bitc::ATTR_KIND_PRESPLIT_COROUTINE;
case Attribute::EndAttrKinds:
llvm_unreachable("Can not encode end-attribute kinds marker.");
case Attribute::None:
@@ -1013,6 +1025,8 @@ void ModuleBitcodeWriter::writeTypeTable() {
TypeVals.push_back(true);
break;
}
+ case Type::DXILPointerTyID:
+ llvm_unreachable("DXIL pointers cannot be added to IR modules");
}
// Emit the finished record.
@@ -1211,6 +1225,14 @@ static StringEncoding getStringEncoding(StringRef Str) {
return SE_Fixed7;
}
+static_assert(sizeof(GlobalValue::SanitizerMetadata) <= sizeof(unsigned),
+ "Sanitizer Metadata is too large for naive serialization.");
+static unsigned
+serializeSanitizerMetadata(const GlobalValue::SanitizerMetadata &Meta) {
+ return Meta.NoAddress | (Meta.NoHWAddress << 1) |
+ (Meta.NoMemtag << 2) | (Meta.IsDynInit << 3);
+}
+
/// Emit top-level description of module, including target triple, inline asm,
/// descriptors for global variables, and function prototype info.
/// Returns the bit offset to backpatch with the location of the real VST.
@@ -1334,7 +1356,7 @@ void ModuleBitcodeWriter::writeModuleInfo() {
// GLOBALVAR: [strtab offset, strtab size, type, isconst, initid,
// linkage, alignment, section, visibility, threadlocal,
// unnamed_addr, externally_initialized, dllstorageclass,
- // comdat, attributes, DSO_Local]
+ // comdat, attributes, DSO_Local, GlobalSanitizer]
Vals.push_back(addToStrtab(GV.getName()));
Vals.push_back(GV.getName().size());
Vals.push_back(VE.getTypeID(GV.getValueType()));
@@ -1350,10 +1372,8 @@ void ModuleBitcodeWriter::writeModuleInfo() {
GV.getUnnamedAddr() != GlobalValue::UnnamedAddr::None ||
GV.isExternallyInitialized() ||
GV.getDLLStorageClass() != GlobalValue::DefaultStorageClass ||
- GV.hasComdat() ||
- GV.hasAttributes() ||
- GV.isDSOLocal() ||
- GV.hasPartition()) {
+ GV.hasComdat() || GV.hasAttributes() || GV.isDSOLocal() ||
+ GV.hasPartition() || GV.hasSanitizerMetadata()) {
Vals.push_back(getEncodedVisibility(GV));
Vals.push_back(getEncodedThreadLocalMode(GV));
Vals.push_back(getEncodedUnnamedAddr(GV));
@@ -1367,6 +1387,10 @@ void ModuleBitcodeWriter::writeModuleInfo() {
Vals.push_back(GV.isDSOLocal());
Vals.push_back(addToStrtab(GV.getPartition()));
Vals.push_back(GV.getPartition().size());
+
+ Vals.push_back((GV.hasSanitizerMetadata() ? serializeSanitizerMetadata(
+ GV.getSanitizerMetadata())
+ : 0));
} else {
AbbrevToUse = SimpleGVarAbbrev;
}
@@ -1817,6 +1841,7 @@ void ModuleBitcodeWriter::writeDISubprogram(const DISubprogram *N,
Record.push_back(N->getThisAdjustment());
Record.push_back(VE.getMetadataOrNullID(N->getThrownTypes().get()));
Record.push_back(VE.getMetadataOrNullID(N->getAnnotations().get()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawTargetFuncName()));
Stream.EmitRecord(bitc::METADATA_SUBPROGRAM, Record, Abbrev);
Record.clear();
@@ -2649,6 +2674,9 @@ void ModuleBitcodeWriter::writeConstants(unsigned FirstVal, unsigned LastVal,
Record.push_back(VE.getValueID(C->getOperand(1)));
Record.push_back(CE->getPredicate());
break;
+ case Instruction::InsertValue:
+ report_fatal_error("insertvalue constexprs not supported");
+ break;
}
} else if (const BlockAddress *BA = dyn_cast<BlockAddress>(C)) {
Code = bitc::CST_CODE_BLOCKADDRESS;
@@ -3068,6 +3096,10 @@ void ModuleBitcodeWriter::writeInstruction(const Instruction &I,
Bitfield::set<APV::ExplicitType>(Record, true);
Bitfield::set<APV::SwiftError>(Record, AI.isSwiftError());
Vals.push_back(Record);
+
+ unsigned AS = AI.getAddressSpace();
+ if (AS != M.getDataLayout().getAllocaAddrSpace())
+ Vals.push_back(AS);
break;
}
@@ -3347,8 +3379,10 @@ void ModuleBitcodeWriter::writeFunction(
bool NeedsMetadataAttachment = F.hasMetadata();
DILocation *LastDL = nullptr;
+ SmallSetVector<Function *, 4> BlockAddressUsers;
+
// Finally, emit all the instructions, in order.
- for (const BasicBlock &BB : F)
+ for (const BasicBlock &BB : F) {
for (const Instruction &I : BB) {
writeInstruction(I, InstID, Vals);
@@ -3380,6 +3414,32 @@ void ModuleBitcodeWriter::writeFunction(
LastDL = DL;
}
+ if (BlockAddress *BA = BlockAddress::lookup(&BB)) {
+ SmallVector<Value *> Worklist{BA};
+ SmallPtrSet<Value *, 8> Visited{BA};
+ while (!Worklist.empty()) {
+ Value *V = Worklist.pop_back_val();
+ for (User *U : V->users()) {
+ if (auto *I = dyn_cast<Instruction>(U)) {
+ Function *P = I->getFunction();
+ if (P != &F)
+ BlockAddressUsers.insert(P);
+ } else if (isa<Constant>(U) && !isa<GlobalValue>(U) &&
+ Visited.insert(U).second)
+ Worklist.push_back(U);
+ }
+ }
+ }
+ }
+
+ if (!BlockAddressUsers.empty()) {
+ Vals.resize(BlockAddressUsers.size());
+ for (auto I : llvm::enumerate(BlockAddressUsers))
+ Vals[I.index()] = VE.getValueID(I.value());
+ Stream.EmitRecord(bitc::FUNC_CODE_BLOCKADDR_USERS, Vals);
+ Vals.clear();
+ }
+
// Emit names for all the instructions etc.
if (auto *Symtab = F.getValueSymbolTable())
writeFunctionLevelValueSymbolTable(*Symtab);
@@ -4375,7 +4435,7 @@ void ModuleBitcodeWriter::writeModuleHash(size_t BlockStartPos) {
uint32_t Vals[5];
Hasher.update(ArrayRef<uint8_t>((const uint8_t *)&(Buffer)[BlockStartPos],
Buffer.size() - BlockStartPos));
- StringRef Hash = Hasher.result();
+ std::array<uint8_t, 20> Hash = Hasher.result();
for (int Pos = 0; Pos < 20; Pos += 4) {
Vals[Pos / 4] = support::endian::read32be(Hash.data() + Pos);
}
@@ -4855,9 +4915,15 @@ static const char *getSectionNameForBitcode(const Triple &T) {
case Triple::GOFF:
llvm_unreachable("GOFF is not yet implemented");
break;
+ case Triple::SPIRV:
+ llvm_unreachable("SPIRV is not yet implemented");
+ break;
case Triple::XCOFF:
llvm_unreachable("XCOFF is not yet implemented");
break;
+ case Triple::DXContainer:
+ llvm_unreachable("DXContainer is not yet implemented");
+ break;
}
llvm_unreachable("Unimplemented ObjectFormatType");
}
@@ -4874,9 +4940,15 @@ static const char *getSectionNameForCommandline(const Triple &T) {
case Triple::GOFF:
llvm_unreachable("GOFF is not yet implemented");
break;
+ case Triple::SPIRV:
+ llvm_unreachable("SPIRV is not yet implemented");
+ break;
case Triple::XCOFF:
llvm_unreachable("XCOFF is not yet implemented");
break;
+ case Triple::DXContainer:
+ llvm_unreachable("DXC is not yet implemented");
+ break;
}
llvm_unreachable("Unimplemented ObjectFormatType");
}
@@ -4931,7 +5003,7 @@ void llvm::embedBitcodeInModule(llvm::Module &M, llvm::MemoryBufferRef Buf,
ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV, UsedElementType));
if (llvm::GlobalVariable *Old =
M.getGlobalVariable("llvm.embedded.module", true)) {
- assert(Old->hasOneUse() &&
+ assert(Old->hasZeroLiveUses() &&
"llvm.embedded.module can only be used once in llvm.compiler.used");
GV->takeName(Old);
Old->eraseFromParent();
@@ -4954,7 +5026,7 @@ void llvm::embedBitcodeInModule(llvm::Module &M, llvm::MemoryBufferRef Buf,
UsedArray.push_back(
ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV, UsedElementType));
if (llvm::GlobalVariable *Old = M.getGlobalVariable("llvm.cmdline", true)) {
- assert(Old->hasOneUse() &&
+ assert(Old->hasZeroLiveUses() &&
"llvm.cmdline can only be used once in llvm.compiler.used");
GV->takeName(Old);
Old->eraseFromParent();
diff --git a/contrib/llvm-project/llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp b/contrib/llvm-project/llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp
index d884415aafd5..536d04f2fe26 100644
--- a/contrib/llvm-project/llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp
+++ b/contrib/llvm-project/llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp
@@ -13,7 +13,6 @@
#include "llvm/Bitcode/BitcodeWriterPass.h"
#include "llvm/Analysis/ModuleSummaryAnalysis.h"
#include "llvm/Bitcode/BitcodeWriter.h"
-#include "llvm/IR/Module.h"
#include "llvm/IR/PassManager.h"
#include "llvm/InitializePasses.h"
#include "llvm/Pass.h"
diff --git a/contrib/llvm-project/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp b/contrib/llvm-project/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
index 01f7e85bd60e..727ec2e02cc2 100644
--- a/contrib/llvm-project/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
+++ b/contrib/llvm-project/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
@@ -50,17 +50,12 @@ namespace {
struct OrderMap {
DenseMap<const Value *, std::pair<unsigned, bool>> IDs;
- unsigned LastGlobalConstantID = 0;
unsigned LastGlobalValueID = 0;
OrderMap() = default;
- bool isGlobalConstant(unsigned ID) const {
- return ID <= LastGlobalConstantID;
- }
-
bool isGlobalValue(unsigned ID) const {
- return ID <= LastGlobalValueID && !isGlobalConstant(ID);
+ return ID <= LastGlobalValueID;
}
unsigned size() const { return IDs.size(); }
@@ -84,7 +79,7 @@ static void orderValue(const Value *V, OrderMap &OM) {
return;
if (const Constant *C = dyn_cast<Constant>(V)) {
- if (C->getNumOperands() && !isa<GlobalValue>(C)) {
+ if (C->getNumOperands()) {
for (const Value *Op : C->operands())
if (!isa<BasicBlock>(Op) && !isa<GlobalValue>(Op))
orderValue(Op, OM);
@@ -104,39 +99,40 @@ static OrderMap orderModule(const Module &M) {
// and ValueEnumerator::incorporateFunction().
OrderMap OM;
- // In the reader, initializers of GlobalValues are set *after* all the
- // globals have been read. Rather than awkwardly modeling this behaviour
- // directly in predictValueUseListOrderImpl(), just assign IDs to
- // initializers of GlobalValues before GlobalValues themselves to model this
- // implicitly.
- for (const GlobalVariable &G : M.globals())
- if (G.hasInitializer())
- if (!isa<GlobalValue>(G.getInitializer()))
- orderValue(G.getInitializer(), OM);
- for (const GlobalAlias &A : M.aliases())
- if (!isa<GlobalValue>(A.getAliasee()))
- orderValue(A.getAliasee(), OM);
- for (const GlobalIFunc &I : M.ifuncs())
- if (!isa<GlobalValue>(I.getResolver()))
- orderValue(I.getResolver(), OM);
- for (const Function &F : M) {
- for (const Use &U : F.operands())
- if (!isa<GlobalValue>(U.get()))
- orderValue(U.get(), OM);
- }
+ // Initializers of GlobalValues are processed in
+ // BitcodeReader::ResolveGlobalAndAliasInits(). Match the order there rather
+ // than ValueEnumerator, and match the code in predictValueUseListOrderImpl()
+ // by giving IDs in reverse order.
+ //
+ // Since GlobalValues never reference each other directly (just through
+ // initializers), their relative IDs only matter for determining order of
+ // uses in their initializers.
+ for (const GlobalVariable &G : reverse(M.globals()))
+ orderValue(&G, OM);
+ for (const GlobalAlias &A : reverse(M.aliases()))
+ orderValue(&A, OM);
+ for (const GlobalIFunc &I : reverse(M.ifuncs()))
+ orderValue(&I, OM);
+ for (const Function &F : reverse(M))
+ orderValue(&F, OM);
+ OM.LastGlobalValueID = OM.size();
- // As constants used in metadata operands are emitted as module-level
- // constants, we must order them before other operands. Also, we must order
- // these before global values, as these will be read before setting the
- // global values' initializers. The latter matters for constants which have
- // uses towards other constants that are used as initializers.
auto orderConstantValue = [&OM](const Value *V) {
- if ((isa<Constant>(V) && !isa<GlobalValue>(V)) || isa<InlineAsm>(V))
+ if (isa<Constant>(V) || isa<InlineAsm>(V))
orderValue(V, OM);
};
+
for (const Function &F : M) {
if (F.isDeclaration())
continue;
+ // Here we need to match the union of ValueEnumerator::incorporateFunction()
+ // and WriteFunction(). Basic blocks are implicitly declared before
+ // anything else (by declaring their size).
+ for (const BasicBlock &BB : F)
+ orderValue(&BB, OM);
+
+ // Metadata used by instructions is decoded before the actual instructions,
+ // so visit any constants used by it beforehand.
for (const BasicBlock &BB : F)
for (const Instruction &I : BB)
for (const Value *V : I.operands()) {
@@ -151,49 +147,17 @@ static OrderMap orderModule(const Module &M) {
}
}
}
- }
- OM.LastGlobalConstantID = OM.size();
-
- // Initializers of GlobalValues are processed in
- // BitcodeReader::ResolveGlobalAndAliasInits(). Match the order there rather
- // than ValueEnumerator, and match the code in predictValueUseListOrderImpl()
- // by giving IDs in reverse order.
- //
- // Since GlobalValues never reference each other directly (just through
- // initializers), their relative IDs only matter for determining order of
- // uses in their initializers.
- for (const Function &F : M)
- orderValue(&F, OM);
- for (const GlobalAlias &A : M.aliases())
- orderValue(&A, OM);
- for (const GlobalIFunc &I : M.ifuncs())
- orderValue(&I, OM);
- for (const GlobalVariable &G : M.globals())
- orderValue(&G, OM);
- OM.LastGlobalValueID = OM.size();
- for (const Function &F : M) {
- if (F.isDeclaration())
- continue;
- // Here we need to match the union of ValueEnumerator::incorporateFunction()
- // and WriteFunction(). Basic blocks are implicitly declared before
- // anything else (by declaring their size).
- for (const BasicBlock &BB : F)
- orderValue(&BB, OM);
for (const Argument &A : F.args())
orderValue(&A, OM);
for (const BasicBlock &BB : F)
for (const Instruction &I : BB) {
for (const Value *Op : I.operands())
- if ((isa<Constant>(*Op) && !isa<GlobalValue>(*Op)) ||
- isa<InlineAsm>(*Op))
- orderValue(Op, OM);
+ orderConstantValue(Op);
if (auto *SVI = dyn_cast<ShuffleVectorInst>(&I))
orderValue(SVI->getShuffleMaskForBitcode(), OM);
- }
- for (const BasicBlock &BB : F)
- for (const Instruction &I : BB)
orderValue(&I, OM);
+ }
}
return OM;
}
@@ -223,18 +187,6 @@ static void predictValueUseListOrderImpl(const Value *V, const Function *F,
auto LID = OM.lookup(LU->getUser()).first;
auto RID = OM.lookup(RU->getUser()).first;
- // Global values are processed in reverse order.
- //
- // Moreover, initializers of GlobalValues are set *after* all the globals
- // have been read (despite having earlier IDs). Rather than awkwardly
- // modeling this behaviour here, orderModule() has assigned IDs to
- // initializers of GlobalValues before GlobalValues themselves.
- if (OM.isGlobalValue(LID) && OM.isGlobalValue(RID)) {
- if (LID == RID)
- return LU->getOperandNo() > RU->getOperandNo();
- return LID < RID;
- }
-
// If ID is 4, then expect: 7 6 5 1 2 3.
if (LID < RID) {
if (RID <= ID)
@@ -257,9 +209,7 @@ static void predictValueUseListOrderImpl(const Value *V, const Function *F,
return LU->getOperandNo() > RU->getOperandNo();
});
- if (llvm::is_sorted(List, [](const Entry &L, const Entry &R) {
- return L.second < R.second;
- }))
+ if (llvm::is_sorted(List, llvm::less_second()))
// Order is already correct.
return;
@@ -319,16 +269,25 @@ static UseListOrderStack predictUseListOrder(const Module &M) {
predictValueUseListOrder(&A, &F, OM, Stack);
for (const BasicBlock &BB : F)
for (const Instruction &I : BB) {
- for (const Value *Op : I.operands())
+ for (const Value *Op : I.operands()) {
if (isa<Constant>(*Op) || isa<InlineAsm>(*Op)) // Visit GlobalValues.
predictValueUseListOrder(Op, &F, OM, Stack);
+ if (const auto *MAV = dyn_cast<MetadataAsValue>(Op)) {
+ if (const auto *VAM =
+ dyn_cast<ValueAsMetadata>(MAV->getMetadata())) {
+ predictValueUseListOrder(VAM->getValue(), &F, OM, Stack);
+ } else if (const auto *AL =
+ dyn_cast<DIArgList>(MAV->getMetadata())) {
+ for (const auto *VAM : AL->getArgs())
+ predictValueUseListOrder(VAM->getValue(), &F, OM, Stack);
+ }
+ }
+ }
if (auto *SVI = dyn_cast<ShuffleVectorInst>(&I))
predictValueUseListOrder(SVI->getShuffleMaskForBitcode(), &F, OM,
Stack);
- }
- for (const BasicBlock &BB : F)
- for (const Instruction &I : BB)
predictValueUseListOrder(&I, &F, OM, Stack);
+ }
}
// Visit globals last, since the module-level use-list block will be seen
@@ -939,9 +898,12 @@ void ValueEnumerator::EnumerateValue(const Value *V) {
I != E; ++I)
if (!isa<BasicBlock>(*I)) // Don't enumerate BB operand to BlockAddress.
EnumerateValue(*I);
- if (auto *CE = dyn_cast<ConstantExpr>(C))
+ if (auto *CE = dyn_cast<ConstantExpr>(C)) {
if (CE->getOpcode() == Instruction::ShuffleVector)
EnumerateValue(CE->getShuffleMaskForBitcode());
+ if (auto *GEP = dyn_cast<GEPOperator>(CE))
+ EnumerateType(GEP->getSourceElementType());
+ }
// Finally, add the value. Doing this could make the ValueID reference be
// dangling, don't reuse it.