summaryrefslogtreecommitdiff
path: root/llvm/lib/Bitcode/Writer
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2020-07-26 19:36:28 +0000
committerDimitry Andric <dim@FreeBSD.org>2020-07-26 19:36:28 +0000
commitcfca06d7963fa0909f90483b42a6d7d194d01e08 (patch)
tree209fb2a2d68f8f277793fc8df46c753d31bc853b /llvm/lib/Bitcode/Writer
parent706b4fc47bbc608932d3b491ae19a3b9cde9497b (diff)
Notes
Diffstat (limited to 'llvm/lib/Bitcode/Writer')
-rw-r--r--llvm/lib/Bitcode/Writer/BitcodeWriter.cpp170
-rw-r--r--llvm/lib/Bitcode/Writer/ValueEnumerator.cpp49
-rw-r--r--llvm/lib/Bitcode/Writer/ValueEnumerator.h2
3 files changed, 145 insertions, 76 deletions
diff --git a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
index dcff7c421fc4d..9c15a5f9f193a 100644
--- a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -31,7 +31,6 @@
#include "llvm/Config/llvm-config.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/Comdat.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
@@ -374,7 +373,7 @@ private:
void writeModuleConstants();
bool pushValueAndType(const Value *V, unsigned InstID,
SmallVectorImpl<unsigned> &Vals);
- void writeOperandBundles(ImmutableCallSite CS, unsigned InstID);
+ void writeOperandBundles(const CallBase &CB, unsigned InstID);
void pushValue(const Value *V, unsigned InstID,
SmallVectorImpl<unsigned> &Vals);
void pushValueSigned(const Value *V, unsigned InstID,
@@ -648,6 +647,8 @@ static uint64_t getAttrKindEncoding(Attribute::AttrKind Kind) {
return bitc::ATTR_KIND_NO_INLINE;
case Attribute::NoRecurse:
return bitc::ATTR_KIND_NO_RECURSE;
+ case Attribute::NoMerge:
+ return bitc::ATTR_KIND_NO_MERGE;
case Attribute::NonLazyBind:
return bitc::ATTR_KIND_NON_LAZY_BIND;
case Attribute::NonNull:
@@ -666,6 +667,8 @@ static uint64_t getAttrKindEncoding(Attribute::AttrKind Kind) {
return bitc::ATTR_KIND_NOCF_CHECK;
case Attribute::NoUnwind:
return bitc::ATTR_KIND_NO_UNWIND;
+ case Attribute::NullPointerIsValid:
+ return bitc::ATTR_KIND_NULL_POINTER_IS_VALID;
case Attribute::OptForFuzzing:
return bitc::ATTR_KIND_OPT_FOR_FUZZING;
case Attribute::OptimizeForSize:
@@ -726,10 +729,17 @@ static uint64_t getAttrKindEncoding(Attribute::AttrKind Kind) {
return bitc::ATTR_KIND_IMMARG;
case Attribute::SanitizeMemTag:
return bitc::ATTR_KIND_SANITIZE_MEMTAG;
+ case Attribute::Preallocated:
+ return bitc::ATTR_KIND_PREALLOCATED;
+ case Attribute::NoUndef:
+ return bitc::ATTR_KIND_NOUNDEF;
case Attribute::EndAttrKinds:
llvm_unreachable("Can not encode end-attribute kinds marker.");
case Attribute::None:
llvm_unreachable("Can not encode none-attribute.");
+ case Attribute::EmptyKey:
+ case Attribute::TombstoneKey:
+ llvm_unreachable("Trying to encode EmptyKey/TombstoneKey");
}
llvm_unreachable("Trying to encode unknown attribute");
@@ -875,6 +885,7 @@ void ModuleBitcodeWriter::writeTypeTable() {
switch (T->getTypeID()) {
case Type::VoidTyID: Code = bitc::TYPE_CODE_VOID; break;
case Type::HalfTyID: Code = bitc::TYPE_CODE_HALF; break;
+ case Type::BFloatTyID: Code = bitc::TYPE_CODE_BFLOAT; break;
case Type::FloatTyID: Code = bitc::TYPE_CODE_FLOAT; break;
case Type::DoubleTyID: Code = bitc::TYPE_CODE_DOUBLE; break;
case Type::X86_FP80TyID: Code = bitc::TYPE_CODE_X86_FP80; break;
@@ -946,15 +957,16 @@ void ModuleBitcodeWriter::writeTypeTable() {
AbbrevToUse = ArrayAbbrev;
break;
}
- case Type::VectorTyID: {
+ case Type::FixedVectorTyID:
+ case Type::ScalableVectorTyID: {
VectorType *VT = cast<VectorType>(T);
// VECTOR [numelts, eltty] or
// [numelts, eltty, scalable]
Code = bitc::TYPE_CODE_VECTOR;
- TypeVals.push_back(VT->getNumElements());
+ TypeVals.push_back(VT->getElementCount().Min);
TypeVals.push_back(VE.getTypeID(VT->getElementType()));
- if (VT->isScalable())
- TypeVals.push_back(VT->isScalable());
+ if (isa<ScalableVectorType>(VT))
+ TypeVals.push_back(true);
break;
}
}
@@ -1028,7 +1040,8 @@ static uint64_t getEncodedGVSummaryFlags(GlobalValueSummary::GVFlags Flags) {
}
static uint64_t getEncodedGVarFlags(GlobalVarSummary::GVarFlags Flags) {
- uint64_t RawFlags = Flags.MaybeReadOnly | (Flags.MaybeWriteOnly << 1);
+ uint64_t RawFlags = Flags.MaybeReadOnly | (Flags.MaybeWriteOnly << 1) |
+ (Flags.Constant << 2) | Flags.VCallVisibility << 3;
return RawFlags;
}
@@ -1168,12 +1181,12 @@ void ModuleBitcodeWriter::writeModuleInfo() {
std::map<std::string, unsigned> GCMap;
unsigned MaxAlignment = 0;
unsigned MaxGlobalType = 0;
- for (const GlobalValue &GV : M.globals()) {
+ for (const GlobalVariable &GV : M.globals()) {
MaxAlignment = std::max(MaxAlignment, GV.getAlignment());
MaxGlobalType = std::max(MaxGlobalType, VE.getTypeID(GV.getValueType()));
if (GV.hasSection()) {
// Give section names unique ID's.
- unsigned &Entry = SectionMap[GV.getSection()];
+ unsigned &Entry = SectionMap[std::string(GV.getSection())];
if (!Entry) {
writeStringRecord(Stream, bitc::MODULE_CODE_SECTIONNAME, GV.getSection(),
0 /*TODO*/);
@@ -1185,7 +1198,7 @@ void ModuleBitcodeWriter::writeModuleInfo() {
MaxAlignment = std::max(MaxAlignment, F.getAlignment());
if (F.hasSection()) {
// Give section names unique ID's.
- unsigned &Entry = SectionMap[F.getSection()];
+ unsigned &Entry = SectionMap[std::string(F.getSection())];
if (!Entry) {
writeStringRecord(Stream, bitc::MODULE_CODE_SECTIONNAME, F.getSection(),
0 /*TODO*/);
@@ -1275,7 +1288,8 @@ void ModuleBitcodeWriter::writeModuleInfo() {
(VE.getValueID(GV.getInitializer()) + 1));
Vals.push_back(getEncodedLinkage(GV));
Vals.push_back(Log2_32(GV.getAlignment())+1);
- Vals.push_back(GV.hasSection() ? SectionMap[GV.getSection()] : 0);
+ Vals.push_back(GV.hasSection() ? SectionMap[std::string(GV.getSection())]
+ : 0);
if (GV.isThreadLocal() ||
GV.getVisibility() != GlobalValue::DefaultVisibility ||
GV.getUnnamedAddr() != GlobalValue::UnnamedAddr::None ||
@@ -1320,7 +1334,8 @@ void ModuleBitcodeWriter::writeModuleInfo() {
Vals.push_back(getEncodedLinkage(F));
Vals.push_back(VE.getAttributeListID(F.getAttributes()));
Vals.push_back(Log2_32(F.getAlignment())+1);
- Vals.push_back(F.hasSection() ? SectionMap[F.getSection()] : 0);
+ Vals.push_back(F.hasSection() ? SectionMap[std::string(F.getSection())]
+ : 0);
Vals.push_back(getEncodedVisibility(F));
Vals.push_back(F.hasGC() ? GCMap[F.getGC()] : 0);
Vals.push_back(getEncodedUnnamedAddr(F));
@@ -1506,29 +1521,46 @@ void ModuleBitcodeWriter::writeGenericDINode(const GenericDINode *N,
Record.clear();
}
-static uint64_t rotateSign(int64_t I) {
- uint64_t U = I;
- return I < 0 ? ~(U << 1) : U << 1;
-}
-
void ModuleBitcodeWriter::writeDISubrange(const DISubrange *N,
SmallVectorImpl<uint64_t> &Record,
unsigned Abbrev) {
- const uint64_t Version = 1 << 1;
+ const uint64_t Version = 2 << 1;
Record.push_back((uint64_t)N->isDistinct() | Version);
Record.push_back(VE.getMetadataOrNullID(N->getRawCountNode()));
- Record.push_back(rotateSign(N->getLowerBound()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawLowerBound()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawUpperBound()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawStride()));
Stream.EmitRecord(bitc::METADATA_SUBRANGE, Record, Abbrev);
Record.clear();
}
+static void emitSignedInt64(SmallVectorImpl<uint64_t> &Vals, uint64_t V) {
+ if ((int64_t)V >= 0)
+ Vals.push_back(V << 1);
+ else
+ Vals.push_back((-V << 1) | 1);
+}
+
+static void emitWideAPInt(SmallVectorImpl<uint64_t> &Vals, const APInt &A) {
+ // We have an arbitrary precision integer value to write whose
+ // bit width is > 64. However, in canonical unsigned integer
+ // format it is likely that the high bits are going to be zero.
+ // So, we only write the number of active words.
+ unsigned NumWords = A.getActiveWords();
+ const uint64_t *RawData = A.getRawData();
+ for (unsigned i = 0; i < NumWords; i++)
+ emitSignedInt64(Vals, RawData[i]);
+}
+
void ModuleBitcodeWriter::writeDIEnumerator(const DIEnumerator *N,
SmallVectorImpl<uint64_t> &Record,
unsigned Abbrev) {
- Record.push_back((N->isUnsigned() << 1) | N->isDistinct());
- Record.push_back(rotateSign(N->getValue()));
+ const uint64_t IsBigInt = 1 << 2;
+ Record.push_back(IsBigInt | (N->isUnsigned() << 1) | N->isDistinct());
+ Record.push_back(N->getValue().getBitWidth());
Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
+ emitWideAPInt(Record, N->getValue());
Stream.EmitRecord(bitc::METADATA_ENUMERATOR, Record, Abbrev);
Record.clear();
@@ -1597,6 +1629,7 @@ void ModuleBitcodeWriter::writeDICompositeType(
Record.push_back(VE.getMetadataOrNullID(N->getTemplateParams().get()));
Record.push_back(VE.getMetadataOrNullID(N->getRawIdentifier()));
Record.push_back(VE.getMetadataOrNullID(N->getDiscriminator()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawDataLocation()));
Stream.EmitRecord(bitc::METADATA_COMPOSITE_TYPE, Record, Abbrev);
Record.clear();
@@ -1661,6 +1694,9 @@ void ModuleBitcodeWriter::writeDICompileUnit(const DICompileUnit *N,
Record.push_back(N->getSplitDebugInlining());
Record.push_back(N->getDebugInfoForProfiling());
Record.push_back((unsigned)N->getNameTableKind());
+ Record.push_back(N->getRangesBaseAddress());
+ Record.push_back(VE.getMetadataOrNullID(N->getRawSysRoot()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawSDK()));
Stream.EmitRecord(bitc::METADATA_COMPILE_UNIT, Record, Abbrev);
Record.clear();
@@ -1776,6 +1812,7 @@ void ModuleBitcodeWriter::writeDIModule(const DIModule *N,
Record.push_back(N->isDistinct());
for (auto &I : N->operands())
Record.push_back(VE.getMetadataOrNullID(I));
+ Record.push_back(N->getLineNo());
Stream.EmitRecord(bitc::METADATA_MODULE, Record, Abbrev);
Record.clear();
@@ -1787,6 +1824,7 @@ void ModuleBitcodeWriter::writeDITemplateTypeParameter(
Record.push_back(N->isDistinct());
Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
Record.push_back(VE.getMetadataOrNullID(N->getType()));
+ Record.push_back(N->isDefault());
Stream.EmitRecord(bitc::METADATA_TEMPLATE_TYPE, Record, Abbrev);
Record.clear();
@@ -1799,6 +1837,7 @@ void ModuleBitcodeWriter::writeDITemplateValueParameter(
Record.push_back(N->getTag());
Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
Record.push_back(VE.getMetadataOrNullID(N->getType()));
+ Record.push_back(N->isDefault());
Record.push_back(VE.getMetadataOrNullID(N->getValue()));
Stream.EmitRecord(bitc::METADATA_TEMPLATE_VALUE, Record, Abbrev);
@@ -2258,13 +2297,6 @@ void ModuleBitcodeWriter::writeSyncScopeNames() {
Stream.ExitBlock();
}
-static void emitSignedInt64(SmallVectorImpl<uint64_t> &Vals, uint64_t V) {
- if ((int64_t)V >= 0)
- Vals.push_back(V << 1);
- else
- Vals.push_back((-V << 1) | 1);
-}
-
void ModuleBitcodeWriter::writeConstants(unsigned FirstVal, unsigned LastVal,
bool isGlobal) {
if (FirstVal == LastVal) return;
@@ -2351,21 +2383,14 @@ void ModuleBitcodeWriter::writeConstants(unsigned FirstVal, unsigned LastVal,
Code = bitc::CST_CODE_INTEGER;
AbbrevToUse = CONSTANTS_INTEGER_ABBREV;
} else { // Wide integers, > 64 bits in size.
- // We have an arbitrary precision integer value to write whose
- // bit width is > 64. However, in canonical unsigned integer
- // format it is likely that the high bits are going to be zero.
- // So, we only write the number of active words.
- unsigned NWords = IV->getValue().getActiveWords();
- const uint64_t *RawWords = IV->getValue().getRawData();
- for (unsigned i = 0; i != NWords; ++i) {
- emitSignedInt64(Record, RawWords[i]);
- }
+ emitWideAPInt(Record, IV->getValue());
Code = bitc::CST_CODE_WIDE_INTEGER;
}
} else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
Code = bitc::CST_CODE_FLOAT;
Type *Ty = CFP->getType();
- if (Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy()) {
+ if (Ty->isHalfTy() || Ty->isBFloatTy() || Ty->isFloatTy() ||
+ Ty->isDoubleTy()) {
Record.push_back(CFP->getValueAPF().bitcastToAPInt().getZExtValue());
} else if (Ty->isX86_FP80Ty()) {
// api needed to prevent premature destruction
@@ -2412,7 +2437,7 @@ void ModuleBitcodeWriter::writeConstants(unsigned FirstVal, unsigned LastVal,
} else if (const ConstantDataSequential *CDS =
dyn_cast<ConstantDataSequential>(C)) {
Code = bitc::CST_CODE_DATA;
- Type *EltTy = CDS->getType()->getElementType();
+ Type *EltTy = CDS->getElementType();
if (isa<IntegerType>(EltTy)) {
for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i)
Record.push_back(CDS->getElementAsInteger(i));
@@ -2504,7 +2529,7 @@ void ModuleBitcodeWriter::writeConstants(unsigned FirstVal, unsigned LastVal,
}
Record.push_back(VE.getValueID(C->getOperand(0)));
Record.push_back(VE.getValueID(C->getOperand(1)));
- Record.push_back(VE.getValueID(C->getOperand(2)));
+ Record.push_back(VE.getValueID(CE->getShuffleMaskForBitcode()));
break;
case Instruction::ICmp:
case Instruction::FCmp:
@@ -2566,10 +2591,10 @@ bool ModuleBitcodeWriter::pushValueAndType(const Value *V, unsigned InstID,
return false;
}
-void ModuleBitcodeWriter::writeOperandBundles(ImmutableCallSite CS,
+void ModuleBitcodeWriter::writeOperandBundles(const CallBase &CS,
unsigned InstID) {
SmallVector<unsigned, 64> Record;
- LLVMContext &C = CS.getInstruction()->getContext();
+ LLVMContext &C = CS.getContext();
for (unsigned i = 0, e = CS.getNumOperandBundles(); i != e; ++i) {
const auto &Bundle = CS.getOperandBundleAt(i);
@@ -2691,7 +2716,8 @@ void ModuleBitcodeWriter::writeInstruction(const Instruction &I,
Code = bitc::FUNC_CODE_INST_SHUFFLEVEC;
pushValueAndType(I.getOperand(0), InstID, Vals);
pushValue(I.getOperand(1), InstID, Vals);
- pushValue(I.getOperand(2), InstID, Vals);
+ pushValue(cast<ShuffleVectorInst>(I).getShuffleMaskForBitcode(), InstID,
+ Vals);
break;
case Instruction::ICmp:
case Instruction::FCmp: {
@@ -2756,11 +2782,11 @@ void ModuleBitcodeWriter::writeInstruction(const Instruction &I,
case Instruction::Invoke: {
const InvokeInst *II = cast<InvokeInst>(&I);
- const Value *Callee = II->getCalledValue();
+ const Value *Callee = II->getCalledOperand();
FunctionType *FTy = II->getFunctionType();
if (II->hasOperandBundles())
- writeOperandBundles(II, InstID);
+ writeOperandBundles(*II, InstID);
Code = bitc::FUNC_CODE_INST_INVOKE;
@@ -2832,11 +2858,11 @@ void ModuleBitcodeWriter::writeInstruction(const Instruction &I,
}
case Instruction::CallBr: {
const CallBrInst *CBI = cast<CallBrInst>(&I);
- const Value *Callee = CBI->getCalledValue();
+ const Value *Callee = CBI->getCalledOperand();
FunctionType *FTy = CBI->getFunctionType();
if (CBI->hasOperandBundles())
- writeOperandBundles(CBI, InstID);
+ writeOperandBundles(*CBI, InstID);
Code = bitc::FUNC_CODE_INST_CALLBR;
@@ -2993,7 +3019,7 @@ void ModuleBitcodeWriter::writeInstruction(const Instruction &I,
FunctionType *FTy = CI.getFunctionType();
if (CI.hasOperandBundles())
- writeOperandBundles(&CI, InstID);
+ writeOperandBundles(CI, InstID);
Code = bitc::FUNC_CODE_INST_CALL;
@@ -3010,7 +3036,7 @@ void ModuleBitcodeWriter::writeInstruction(const Instruction &I,
Vals.push_back(Flags);
Vals.push_back(VE.getTypeID(FTy));
- pushValueAndType(CI.getCalledValue(), InstID, Vals); // Callee
+ pushValueAndType(CI.getCalledOperand(), InstID, Vals); // Callee
// Emit value #'s for the fixed parameters.
for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
@@ -3552,6 +3578,29 @@ static void writeFunctionTypeMetadataRecords(BitstreamWriter &Stream,
FS->type_test_assume_const_vcalls());
WriteConstVCallVec(bitc::FS_TYPE_CHECKED_LOAD_CONST_VCALL,
FS->type_checked_load_const_vcalls());
+
+ auto WriteRange = [&](ConstantRange Range) {
+ Range = Range.sextOrTrunc(FunctionSummary::ParamAccess::RangeWidth);
+ assert(Range.getLower().getNumWords() == 1);
+ assert(Range.getUpper().getNumWords() == 1);
+ emitSignedInt64(Record, *Range.getLower().getRawData());
+ emitSignedInt64(Record, *Range.getUpper().getRawData());
+ };
+
+ if (!FS->paramAccesses().empty()) {
+ Record.clear();
+ for (auto &Arg : FS->paramAccesses()) {
+ Record.push_back(Arg.ParamNo);
+ WriteRange(Arg.Use);
+ Record.push_back(Arg.Calls.size());
+ for (auto &Call : Arg.Calls) {
+ Record.push_back(Call.ParamNo);
+ Record.push_back(Call.Callee);
+ WriteRange(Call.Offsets);
+ }
+ }
+ Stream.EmitRecord(bitc::FS_PARAM_ACCESS, Record);
+ }
}
/// Collect type IDs from type tests used by function.
@@ -3882,6 +3931,9 @@ void ModuleBitcodeWriterBase::writePerModuleGlobalValueSummary() {
NameVals.clear();
}
+ Stream.EmitRecord(bitc::FS_BLOCK_COUNT,
+ ArrayRef<uint64_t>{Index->getBlockCount()});
+
Stream.ExitBlock();
}
@@ -3893,20 +3945,7 @@ void IndexBitcodeWriter::writeCombinedGlobalValueSummary() {
ArrayRef<uint64_t>{ModuleSummaryIndex::BitcodeSummaryVersion});
// Write the index flags.
- uint64_t Flags = 0;
- if (Index.withGlobalValueDeadStripping())
- Flags |= 0x1;
- if (Index.skipModuleByDistributedBackend())
- Flags |= 0x2;
- if (Index.hasSyntheticEntryCounts())
- Flags |= 0x4;
- if (Index.enableSplitLTOUnit())
- Flags |= 0x8;
- if (Index.partiallySplitLTOUnits())
- Flags |= 0x10;
- if (Index.withAttributePropagation())
- Flags |= 0x20;
- Stream.EmitRecord(bitc::FS_FLAGS, ArrayRef<uint64_t>{Flags});
+ Stream.EmitRecord(bitc::FS_FLAGS, ArrayRef<uint64_t>{Index.getFlags()});
for (const auto &GVI : valueIds()) {
Stream.EmitRecord(bitc::FS_VALUE_GUID,
@@ -4178,6 +4217,9 @@ void IndexBitcodeWriter::writeCombinedGlobalValueSummary() {
}
}
+ Stream.EmitRecord(bitc::FS_BLOCK_COUNT,
+ ArrayRef<uint64_t>{Index.getBlockCount()});
+
Stream.ExitBlock();
}
@@ -4200,7 +4242,7 @@ static void writeIdentificationBlock(BitstreamWriter &Stream) {
Abbv->Add(BitCodeAbbrevOp(bitc::IDENTIFICATION_CODE_EPOCH));
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
auto EpochAbbrev = Stream.EmitAbbrev(std::move(Abbv));
- SmallVector<unsigned, 1> Vals = {bitc::BITCODE_CURRENT_EPOCH};
+ constexpr std::array<unsigned, 1> Vals = {{bitc::BITCODE_CURRENT_EPOCH}};
Stream.EmitRecord(bitc::IDENTIFICATION_CODE_EPOCH, Vals, EpochAbbrev);
Stream.ExitBlock();
}
diff --git a/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp b/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
index f59c906c7b757..8bdddc27e95ab 100644
--- a/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
+++ b/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
@@ -88,11 +88,16 @@ static void orderValue(const Value *V, OrderMap &OM) {
if (OM.lookup(V).first)
return;
- if (const Constant *C = dyn_cast<Constant>(V))
- if (C->getNumOperands() && !isa<GlobalValue>(C))
+ if (const Constant *C = dyn_cast<Constant>(V)) {
+ if (C->getNumOperands() && !isa<GlobalValue>(C)) {
for (const Value *Op : C->operands())
if (!isa<BasicBlock>(Op) && !isa<GlobalValue>(Op))
orderValue(Op, OM);
+ if (auto *CE = dyn_cast<ConstantExpr>(C))
+ if (CE->getOpcode() == Instruction::ShuffleVector)
+ orderValue(CE->getShuffleMaskForBitcode(), OM);
+ }
+ }
// Note: we cannot cache this lookup above, since inserting into the map
// changes the map's size, and thus affects the other IDs.
@@ -155,11 +160,14 @@ static OrderMap orderModule(const Module &M) {
for (const Argument &A : F.args())
orderValue(&A, OM);
for (const BasicBlock &BB : F)
- for (const Instruction &I : BB)
+ for (const Instruction &I : BB) {
for (const Value *Op : I.operands())
if ((isa<Constant>(*Op) && !isa<GlobalValue>(*Op)) ||
isa<InlineAsm>(*Op))
orderValue(Op, OM);
+ if (auto *SVI = dyn_cast<ShuffleVectorInst>(&I))
+ orderValue(SVI->getShuffleMaskForBitcode(), OM);
+ }
for (const BasicBlock &BB : F)
for (const Instruction &I : BB)
orderValue(&I, OM);
@@ -223,9 +231,9 @@ static void predictValueUseListOrderImpl(const Value *V, const Function *F,
return LU->getOperandNo() > RU->getOperandNo();
});
- if (std::is_sorted(
- List.begin(), List.end(),
- [](const Entry &L, const Entry &R) { return L.second < R.second; }))
+ if (llvm::is_sorted(List, [](const Entry &L, const Entry &R) {
+ return L.second < R.second;
+ }))
// Order is already correct.
return;
@@ -250,11 +258,17 @@ static void predictValueUseListOrder(const Value *V, const Function *F,
predictValueUseListOrderImpl(V, F, IDPair.first, OM, Stack);
// Recursive descent into constants.
- if (const Constant *C = dyn_cast<Constant>(V))
- if (C->getNumOperands()) // Visit GlobalValues.
+ if (const Constant *C = dyn_cast<Constant>(V)) {
+ if (C->getNumOperands()) { // Visit GlobalValues.
for (const Value *Op : C->operands())
if (isa<Constant>(Op)) // Visit GlobalValues.
predictValueUseListOrder(Op, F, OM, Stack);
+ if (auto *CE = dyn_cast<ConstantExpr>(C))
+ if (CE->getOpcode() == Instruction::ShuffleVector)
+ predictValueUseListOrder(CE->getShuffleMaskForBitcode(), F, OM,
+ Stack);
+ }
+ }
}
static UseListOrderStack predictUseListOrder(const Module &M) {
@@ -279,10 +293,14 @@ static UseListOrderStack predictUseListOrder(const Module &M) {
for (const Argument &A : F.args())
predictValueUseListOrder(&A, &F, OM, Stack);
for (const BasicBlock &BB : F)
- for (const Instruction &I : BB)
+ for (const Instruction &I : BB) {
for (const Value *Op : I.operands())
if (isa<Constant>(*Op) || isa<InlineAsm>(*Op)) // Visit GlobalValues.
predictValueUseListOrder(Op, &F, OM, Stack);
+ if (auto *SVI = dyn_cast<ShuffleVectorInst>(&I))
+ predictValueUseListOrder(SVI->getShuffleMaskForBitcode(), &F, OM,
+ Stack);
+ }
for (const BasicBlock &BB : F)
for (const Instruction &I : BB)
predictValueUseListOrder(&I, &F, OM, Stack);
@@ -413,6 +431,8 @@ ValueEnumerator::ValueEnumerator(const Module &M,
EnumerateMetadata(&F, MD->getMetadata());
}
+ if (auto *SVI = dyn_cast<ShuffleVectorInst>(&I))
+ EnumerateType(SVI->getShuffleMaskForBitcode()->getType());
EnumerateType(I.getType());
if (const auto *Call = dyn_cast<CallBase>(&I))
EnumerateAttributes(Call->getAttributes());
@@ -836,6 +856,9 @@ void ValueEnumerator::EnumerateValue(const Value *V) {
I != E; ++I)
if (!isa<BasicBlock>(*I)) // Don't enumerate BB operand to BlockAddress.
EnumerateValue(*I);
+ if (auto *CE = dyn_cast<ConstantExpr>(C))
+ if (CE->getOpcode() == Instruction::ShuffleVector)
+ EnumerateValue(CE->getShuffleMaskForBitcode());
// Finally, add the value. Doing this could make the ValueID reference be
// dangling, don't reuse it.
@@ -913,6 +936,9 @@ void ValueEnumerator::EnumerateOperandType(const Value *V) {
EnumerateOperandType(Op);
}
+ if (auto *CE = dyn_cast<ConstantExpr>(C))
+ if (CE->getOpcode() == Instruction::ShuffleVector)
+ EnumerateOperandType(CE->getShuffleMaskForBitcode());
}
void ValueEnumerator::EnumerateAttributes(AttributeList PAL) {
@@ -958,11 +984,14 @@ void ValueEnumerator::incorporateFunction(const Function &F) {
// Add all function-level constants to the value table.
for (const BasicBlock &BB : F) {
- for (const Instruction &I : BB)
+ for (const Instruction &I : BB) {
for (const Use &OI : I.operands()) {
if ((isa<Constant>(OI) && !isa<GlobalValue>(OI)) || isa<InlineAsm>(OI))
EnumerateValue(OI);
}
+ if (auto *SVI = dyn_cast<ShuffleVectorInst>(&I))
+ EnumerateValue(SVI->getShuffleMaskForBitcode());
+ }
BasicBlocks.push_back(&BB);
ValueMap[&BB] = BasicBlocks.size();
}
diff --git a/llvm/lib/Bitcode/Writer/ValueEnumerator.h b/llvm/lib/Bitcode/Writer/ValueEnumerator.h
index 112f0b4a1dc46..3c3bd0d9fdc70 100644
--- a/llvm/lib/Bitcode/Writer/ValueEnumerator.h
+++ b/llvm/lib/Bitcode/Writer/ValueEnumerator.h
@@ -17,8 +17,6 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/UniqueVector.h"
#include "llvm/IR/Attributes.h"
-#include "llvm/IR/Metadata.h"
-#include "llvm/IR/Type.h"
#include "llvm/IR/UseListOrder.h"
#include <cassert>
#include <cstdint>