summaryrefslogtreecommitdiff
path: root/utils
diff options
context:
space:
mode:
Diffstat (limited to 'utils')
-rw-r--r--utils/TableGen/CMakeLists.txt2
-rw-r--r--utils/TableGen/ClangAttrEmitter.cpp395
-rw-r--r--utils/TableGen/ClangDataCollectorsEmitter.cpp18
-rw-r--r--utils/TableGen/TableGen.cpp6
-rw-r--r--utils/TableGen/TableGenBackends.h2
-rwxr-xr-xutils/analyzer/CmpRuns.py97
-rw-r--r--utils/analyzer/SATestAdd.py50
-rw-r--r--utils/analyzer/SATestBuild.py620
-rwxr-xr-xutils/analyzer/SATestUpdateDiffs.py73
-rw-r--r--utils/analyzer/SATestUtils.py100
-rw-r--r--utils/analyzer/SumTimerInfo.py78
-rwxr-xr-xutils/analyzer/ubiviz87
-rwxr-xr-xutils/clangdiag.py192
-rw-r--r--utils/perf-training/CMakeLists.txt12
14 files changed, 1028 insertions, 704 deletions
diff --git a/utils/TableGen/CMakeLists.txt b/utils/TableGen/CMakeLists.txt
index c8e9537cb56b..dba0c94ac0e4 100644
--- a/utils/TableGen/CMakeLists.txt
+++ b/utils/TableGen/CMakeLists.txt
@@ -6,9 +6,11 @@ add_tablegen(clang-tblgen CLANG
ClangCommentCommandInfoEmitter.cpp
ClangCommentHTMLNamedCharacterReferenceEmitter.cpp
ClangCommentHTMLTagsEmitter.cpp
+ ClangDataCollectorsEmitter.cpp
ClangDiagnosticsEmitter.cpp
ClangOptionDocEmitter.cpp
ClangSACheckersEmitter.cpp
NeonEmitter.cpp
TableGen.cpp
)
+set_target_properties(clang-tblgen PROPERTIES FOLDER "Clang tablegenning")
diff --git a/utils/TableGen/ClangAttrEmitter.cpp b/utils/TableGen/ClangAttrEmitter.cpp
index b6d2988964b4..70ce15f5a24e 100644
--- a/utils/TableGen/ClangAttrEmitter.cpp
+++ b/utils/TableGen/ClangAttrEmitter.cpp
@@ -56,9 +56,9 @@ public:
V(Spelling.getValueAsString("Variety")),
N(Spelling.getValueAsString("Name")) {
- assert(V != "GCC" && "Given a GCC spelling, which means this hasn't been"
- "flattened!");
- if (V == "CXX11" || V == "Pragma")
+ assert(V != "GCC" && V != "Clang" &&
+ "Given a GCC spelling, which means this hasn't been flattened!");
+ if (V == "CXX11" || V == "C2x" || V == "Pragma")
NS = Spelling.getValueAsString("Namespace");
bool Unset;
K = Spelling.getValueAsBitOrUnset("KnownToGCC", Unset);
@@ -78,11 +78,15 @@ GetFlattenedSpellings(const Record &Attr) {
std::vector<FlattenedSpelling> Ret;
for (const auto &Spelling : Spellings) {
- if (Spelling->getValueAsString("Variety") == "GCC") {
+ StringRef Variety = Spelling->getValueAsString("Variety");
+ StringRef Name = Spelling->getValueAsString("Name");
+ if (Variety == "GCC") {
// Gin up two new spelling objects to add into the list.
- Ret.emplace_back("GNU", Spelling->getValueAsString("Name"), "", true);
- Ret.emplace_back("CXX11", Spelling->getValueAsString("Name"), "gnu",
- true);
+ Ret.emplace_back("GNU", Name, "", true);
+ Ret.emplace_back("CXX11", Name, "gnu", true);
+ } else if (Variety == "Clang") {
+ Ret.emplace_back("GNU", Name, "", false);
+ Ret.emplace_back("CXX11", Name, "clang", false);
} else
Ret.push_back(FlattenedSpelling(*Spelling));
}
@@ -490,6 +494,17 @@ namespace {
OS << "}\n";
}
+ void writeASTVisitorTraversal(raw_ostream &OS) const override {
+ StringRef Name = getUpperName();
+ OS << " if (A->is" << Name << "Expr()) {\n"
+ << " if (!getDerived().TraverseStmt(A->get" << Name << "Expr()))\n"
+ << " return false;\n"
+ << " } else if (auto *TSI = A->get" << Name << "Type()) {\n"
+ << " if (!getDerived().TraverseTypeLoc(TSI->getTypeLoc()))\n"
+ << " return false;\n"
+ << " }\n";
+ }
+
void writeCloneArgs(raw_ostream &OS) const override {
OS << "is" << getLowerName() << "Expr, is" << getLowerName()
<< "Expr ? static_cast<void*>(" << getLowerName()
@@ -630,6 +645,10 @@ namespace {
<< "A->" << getLowerName() << "_size()";
}
+ void writeASTVisitorTraversal(raw_ostream &OS) const override {
+ // FIXME: Traverse the elements.
+ }
+
void writeCtorBody(raw_ostream &OS) const override {
OS << " std::copy(" << getUpperName() << ", " << getUpperName()
<< " + " << ArgSizeName << ", " << ArgName << ");\n";
@@ -1153,6 +1172,12 @@ namespace {
OS << " }";
}
+ void writeASTVisitorTraversal(raw_ostream &OS) const override {
+ OS << " if (auto *TSI = A->get" << getUpperName() << "Loc())\n";
+ OS << " if (!getDerived().TraverseTypeLoc(TSI->getTypeLoc()))\n";
+ OS << " return false;\n";
+ }
+
void writeTemplateInstantiationArgs(raw_ostream &OS) const override {
OS << "A->get" << getUpperName() << "Loc()";
}
@@ -1305,7 +1330,7 @@ writePrettyPrintFunction(Record &R,
if (Variety == "GNU") {
Prefix = " __attribute__((";
Suffix = "))";
- } else if (Variety == "CXX11") {
+ } else if (Variety == "CXX11" || Variety == "C2x") {
Prefix = " [[";
Suffix = "]]";
std::string Namespace = Spellings[I].nameSpace();
@@ -1419,7 +1444,7 @@ static void writeAttrAccessorDefinition(const Record &R, raw_ostream &OS) {
assert(!SpellingList.empty() &&
"Attribute with empty spelling list can't have accessors!");
for (const auto *Accessor : Accessors) {
- std::string Name = Accessor->getValueAsString("Name");
+ const StringRef Name = Accessor->getValueAsString("Name");
std::vector<FlattenedSpelling> Spellings = GetFlattenedSpellings(*Accessor);
OS << " bool " << Name << "() const { return SpellingListIndex == ";
@@ -1568,7 +1593,7 @@ struct AttributeSubjectMatchRule {
// Abstract rules are used only for sub-rules
bool isAbstractRule() const { return getSubjects().empty(); }
- std::string getName() const {
+ StringRef getName() const {
return (Constraint ? Constraint : MetaSubject)->getValueAsString("Name");
}
@@ -1800,13 +1825,11 @@ PragmaClangAttributeSupport::generateStrictConformsTo(const Record &Attr,
// Generate a function that constructs a set of matching rules that describe
// to which declarations the attribute should apply to.
std::string FnName = "matchRulesFor" + Attr.getName().str();
- std::stringstream SS;
- SS << "static void " << FnName << "(llvm::SmallVectorImpl<std::pair<"
+ OS << "static void " << FnName << "(llvm::SmallVectorImpl<std::pair<"
<< AttributeSubjectMatchRule::EnumName
<< ", bool>> &MatchRules, const LangOptions &LangOpts) {\n";
if (Attr.isValueUnset("Subjects")) {
- SS << "}\n\n";
- OS << SS.str();
+ OS << "}\n\n";
return FnName;
}
const Record *SubjectObj = Attr.getValueAsDef("Subjects");
@@ -1819,24 +1842,23 @@ PragmaClangAttributeSupport::generateStrictConformsTo(const Record &Attr,
// The rule might be language specific, so only subtract it from the given
// rules if the specific language options are specified.
std::vector<Record *> LangOpts = Rule.getLangOpts();
- SS << " MatchRules.push_back(std::make_pair(" << Rule.getEnumValue()
+ OS << " MatchRules.push_back(std::make_pair(" << Rule.getEnumValue()
<< ", /*IsSupported=*/";
if (!LangOpts.empty()) {
for (auto I = LangOpts.begin(), E = LangOpts.end(); I != E; ++I) {
- std::string Part = (*I)->getValueAsString("Name");
+ const StringRef Part = (*I)->getValueAsString("Name");
if ((*I)->getValueAsBit("Negated"))
- SS << "!";
- SS << "LangOpts." + Part;
+ OS << "!";
+ OS << "LangOpts." << Part;
if (I + 1 != E)
- SS << " || ";
+ OS << " || ";
}
} else
- SS << "true";
- SS << "));\n";
+ OS << "true";
+ OS << "));\n";
}
}
- SS << "}\n\n";
- OS << SS.str();
+ OS << "}\n\n";
return FnName;
}
@@ -1892,7 +1914,8 @@ void PragmaClangAttributeSupport::generateParsingHelpers(raw_ostream &OS) {
continue;
std::string SubRuleFunction;
if (SubMatchRules.count(Rule.MetaSubject))
- SubRuleFunction = "isAttributeSubjectMatchSubRuleFor_" + Rule.getName();
+ SubRuleFunction =
+ ("isAttributeSubjectMatchSubRuleFor_" + Rule.getName()).str();
else
SubRuleFunction = "defaultIsAttributeSubjectMatchSubRuleFor";
OS << " Case(\"" << Rule.getName() << "\", std::make_pair("
@@ -2695,10 +2718,14 @@ static void GenerateHasAttrSpellingStringSwitch(
// If this is the C++11 variety, also add in the LangOpts test.
if (Variety == "CXX11")
Test += " && LangOpts.CPlusPlus11";
+ else if (Variety == "C2x")
+ Test += " && LangOpts.DoubleSquareBracketAttributes";
} else if (Variety == "CXX11")
// C++11 mode should be checked against LangOpts, which is presumed to be
// present in the caller.
Test = "LangOpts.CPlusPlus11";
+ else if (Variety == "C2x")
+ Test = "LangOpts.DoubleSquareBracketAttributes";
std::string TestStr =
!Test.empty() ? Test + " ? " + llvm::itostr(Version) + " : 0" : "1";
@@ -2719,7 +2746,7 @@ void EmitClangAttrHasAttrImpl(RecordKeeper &Records, raw_ostream &OS) {
// and declspecs. Then generate a big switch statement for each of them.
std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr");
std::vector<Record *> Declspec, Microsoft, GNU, Pragma;
- std::map<std::string, std::vector<Record *>> CXX;
+ std::map<std::string, std::vector<Record *>> CXX, C2x;
// Walk over the list of all attributes, and split them out based on the
// spelling variety.
@@ -2735,6 +2762,8 @@ void EmitClangAttrHasAttrImpl(RecordKeeper &Records, raw_ostream &OS) {
Microsoft.push_back(R);
else if (Variety == "CXX11")
CXX[SI.nameSpace()].push_back(R);
+ else if (Variety == "C2x")
+ C2x[SI.nameSpace()].push_back(R);
else if (Variety == "Pragma")
Pragma.push_back(R);
}
@@ -2754,20 +2783,25 @@ void EmitClangAttrHasAttrImpl(RecordKeeper &Records, raw_ostream &OS) {
OS << "case AttrSyntax::Pragma:\n";
OS << " return llvm::StringSwitch<int>(Name)\n";
GenerateHasAttrSpellingStringSwitch(Pragma, OS, "Pragma");
- OS << "case AttrSyntax::CXX: {\n";
- // C++11-style attributes are further split out based on the Scope.
- for (auto I = CXX.cbegin(), E = CXX.cend(); I != E; ++I) {
- if (I != CXX.begin())
- OS << " else ";
- if (I->first.empty())
- OS << "if (!Scope || Scope->getName() == \"\") {\n";
- else
- OS << "if (Scope->getName() == \"" << I->first << "\") {\n";
- OS << " return llvm::StringSwitch<int>(Name)\n";
- GenerateHasAttrSpellingStringSwitch(I->second, OS, "CXX11", I->first);
- OS << "}";
- }
- OS << "\n}\n";
+ auto fn = [&OS](const char *Spelling, const char *Variety,
+ const std::map<std::string, std::vector<Record *>> &List) {
+ OS << "case AttrSyntax::" << Variety << ": {\n";
+ // C++11-style attributes are further split out based on the Scope.
+ for (auto I = List.cbegin(), E = List.cend(); I != E; ++I) {
+ if (I != List.cbegin())
+ OS << " else ";
+ if (I->first.empty())
+ OS << "if (!Scope || Scope->getName() == \"\") {\n";
+ else
+ OS << "if (Scope->getName() == \"" << I->first << "\") {\n";
+ OS << " return llvm::StringSwitch<int>(Name)\n";
+ GenerateHasAttrSpellingStringSwitch(I->second, OS, Spelling, I->first);
+ OS << "}";
+ }
+ OS << "\n} break;\n";
+ };
+ fn("CXX11", "CXX", CXX);
+ fn("C2x", "C", C2x);
OS << "}\n";
}
@@ -2788,10 +2822,11 @@ void EmitClangAttrSpellingListIndex(RecordKeeper &Records, raw_ostream &OS) {
<< StringSwitch<unsigned>(Spellings[I].variety())
.Case("GNU", 0)
.Case("CXX11", 1)
- .Case("Declspec", 2)
- .Case("Microsoft", 3)
- .Case("Keyword", 4)
- .Case("Pragma", 5)
+ .Case("C2x", 2)
+ .Case("Declspec", 3)
+ .Case("Microsoft", 4)
+ .Case("Keyword", 5)
+ .Case("Pragma", 6)
.Default(0)
<< " && Scope == \"" << Spellings[I].nameSpace() << "\")\n"
<< " return " << I << ";\n";
@@ -2965,7 +3000,7 @@ static bool isArgVariadic(const Record &R, StringRef AttrName) {
return createArgument(R, AttrName)->isVariadic();
}
-static void emitArgInfo(const Record &R, std::stringstream &OS) {
+static void emitArgInfo(const Record &R, raw_ostream &OS) {
// This function will count the number of arguments specified for the
// attribute and emit the number of required arguments followed by the
// number of optional arguments.
@@ -2994,136 +3029,72 @@ static void GenerateDefaultAppertainsTo(raw_ostream &OS) {
OS << "}\n\n";
}
+static std::string GetDiagnosticSpelling(const Record &R) {
+ std::string Ret = R.getValueAsString("DiagSpelling");
+ if (!Ret.empty())
+ return Ret;
+
+ // If we couldn't find the DiagSpelling in this object, we can check to see
+ // if the object is one that has a base, and if it is, loop up to the Base
+ // member recursively.
+ std::string Super = R.getSuperClasses().back().first->getName();
+ if (Super == "DDecl" || Super == "DStmt")
+ return GetDiagnosticSpelling(*R.getValueAsDef("Base"));
+
+ return "";
+}
+
static std::string CalculateDiagnostic(const Record &S) {
// If the SubjectList object has a custom diagnostic associated with it,
// return that directly.
- std::string CustomDiag = S.getValueAsString("CustomDiag");
+ const StringRef CustomDiag = S.getValueAsString("CustomDiag");
if (!CustomDiag.empty())
- return CustomDiag;
-
- // Given the list of subjects, determine what diagnostic best fits.
- enum {
- Func = 1U << 0,
- Var = 1U << 1,
- ObjCMethod = 1U << 2,
- Param = 1U << 3,
- Class = 1U << 4,
- GenericRecord = 1U << 5,
- Type = 1U << 6,
- ObjCIVar = 1U << 7,
- ObjCProp = 1U << 8,
- ObjCInterface = 1U << 9,
- Block = 1U << 10,
- Namespace = 1U << 11,
- Field = 1U << 12,
- CXXMethod = 1U << 13,
- ObjCProtocol = 1U << 14,
- Enum = 1U << 15,
- Named = 1U << 16,
- };
- uint32_t SubMask = 0;
+ return ("\"" + Twine(CustomDiag) + "\"").str();
+ std::vector<std::string> DiagList;
std::vector<Record *> Subjects = S.getValueAsListOfDefs("Subjects");
for (const auto *Subject : Subjects) {
const Record &R = *Subject;
- std::string Name;
-
- if (R.isSubClassOf("SubsetSubject")) {
- PrintError(R.getLoc(), "SubsetSubjects should use a custom diagnostic");
- // As a fallback, look through the SubsetSubject to see what its base
- // type is, and use that. This needs to be updated if SubsetSubjects
- // are allowed within other SubsetSubjects.
- Name = R.getValueAsDef("Base")->getName();
- } else
- Name = R.getName();
-
- uint32_t V = StringSwitch<uint32_t>(Name)
- .Case("Function", Func)
- .Case("Var", Var)
- .Case("ObjCMethod", ObjCMethod)
- .Case("ParmVar", Param)
- .Case("TypedefName", Type)
- .Case("ObjCIvar", ObjCIVar)
- .Case("ObjCProperty", ObjCProp)
- .Case("Record", GenericRecord)
- .Case("ObjCInterface", ObjCInterface)
- .Case("ObjCProtocol", ObjCProtocol)
- .Case("Block", Block)
- .Case("CXXRecord", Class)
- .Case("Namespace", Namespace)
- .Case("Field", Field)
- .Case("CXXMethod", CXXMethod)
- .Case("Enum", Enum)
- .Case("Named", Named)
- .Default(0);
- if (!V) {
- // Something wasn't in our mapping, so be helpful and let the developer
- // know about it.
- PrintFatalError(R.getLoc(), "Unknown subject type: " + R.getName());
- return "";
+ // Get the diagnostic text from the Decl or Stmt node given.
+ std::string V = GetDiagnosticSpelling(R);
+ if (V.empty()) {
+ PrintError(R.getLoc(),
+ "Could not determine diagnostic spelling for the node: " +
+ R.getName() + "; please add one to DeclNodes.td");
+ } else {
+ // The node may contain a list of elements itself, so split the elements
+ // by a comma, and trim any whitespace.
+ SmallVector<StringRef, 2> Frags;
+ llvm::SplitString(V, Frags, ",");
+ for (auto Str : Frags) {
+ DiagList.push_back(Str.trim());
+ }
}
-
- SubMask |= V;
}
- switch (SubMask) {
- // For the simple cases where there's only a single entry in the mask, we
- // don't have to resort to bit fiddling.
- case Func: return "ExpectedFunction";
- case Var: return "ExpectedVariable";
- case Param: return "ExpectedParameter";
- case Class: return "ExpectedClass";
- case Enum: return "ExpectedEnum";
- case CXXMethod:
- // FIXME: Currently, this maps to ExpectedMethod based on existing code,
- // but should map to something a bit more accurate at some point.
- case ObjCMethod: return "ExpectedMethod";
- case Type: return "ExpectedType";
- case ObjCInterface: return "ExpectedObjectiveCInterface";
- case ObjCProtocol: return "ExpectedObjectiveCProtocol";
-
- // "GenericRecord" means struct, union or class; check the language options
- // and if not compiling for C++, strip off the class part. Note that this
- // relies on the fact that the context for this declares "Sema &S".
- case GenericRecord:
- return "(S.getLangOpts().CPlusPlus ? ExpectedStructOrUnionOrClass : "
- "ExpectedStructOrUnion)";
- case Func | ObjCMethod | Block: return "ExpectedFunctionMethodOrBlock";
- case Func | ObjCMethod | Class: return "ExpectedFunctionMethodOrClass";
- case Func | Param:
- case Func | ObjCMethod | Param: return "ExpectedFunctionMethodOrParameter";
- case Func | ObjCMethod: return "ExpectedFunctionOrMethod";
- case Func | Var: return "ExpectedVariableOrFunction";
-
- // If not compiling for C++, the class portion does not apply.
- case Func | Var | Class:
- return "(S.getLangOpts().CPlusPlus ? ExpectedFunctionVariableOrClass : "
- "ExpectedVariableOrFunction)";
-
- case Func | Var | Class | ObjCInterface:
- return "(S.getLangOpts().CPlusPlus"
- " ? ((S.getLangOpts().ObjC1 || S.getLangOpts().ObjC2)"
- " ? ExpectedFunctionVariableClassOrObjCInterface"
- " : ExpectedFunctionVariableOrClass)"
- " : ((S.getLangOpts().ObjC1 || S.getLangOpts().ObjC2)"
- " ? ExpectedFunctionVariableOrObjCInterface"
- " : ExpectedVariableOrFunction))";
+ if (DiagList.empty()) {
+ PrintFatalError(S.getLoc(),
+ "Could not deduce diagnostic argument for Attr subjects");
+ return "";
+ }
- case ObjCMethod | ObjCProp: return "ExpectedMethodOrProperty";
- case Func | ObjCMethod | ObjCProp:
- return "ExpectedFunctionOrMethodOrProperty";
- case ObjCProtocol | ObjCInterface:
- return "ExpectedObjectiveCInterfaceOrProtocol";
- case Field | Var: return "ExpectedFieldOrGlobalVar";
+ // FIXME: this is not particularly good for localization purposes and ideally
+ // should be part of the diagnostics engine itself with some sort of list
+ // specifier.
- case Named:
- return "ExpectedNamedDecl";
- }
+ // A single member of the list can be returned directly.
+ if (DiagList.size() == 1)
+ return '"' + DiagList.front() + '"';
- PrintFatalError(S.getLoc(),
- "Could not deduce diagnostic argument for Attr subjects");
+ if (DiagList.size() == 2)
+ return '"' + DiagList[0] + " and " + DiagList[1] + '"';
- return "";
+ // If there are more than two in the list, we serialize the first N - 1
+ // elements with a comma. This leaves the string in the state: foo, bar,
+ // baz (but misses quux). We can then add ", and " for the last element
+ // manually.
+ std::string Diag = llvm::join(DiagList.begin(), DiagList.end() - 1, ", ");
+ return '"' + Diag + ", and " + *(DiagList.end() - 1) + '"';
}
static std::string GetSubjectWithSuffix(const Record *R) {
@@ -3210,8 +3181,8 @@ static std::string GenerateAppertainsTo(const Record &Attr, raw_ostream &OS) {
}
SS << ") {\n";
SS << " S.Diag(Attr.getLoc(), diag::";
- SS << (Warn ? "warn_attribute_wrong_decl_type" :
- "err_attribute_wrong_decl_type");
+ SS << (Warn ? "warn_attribute_wrong_decl_type_str" :
+ "err_attribute_wrong_decl_type_str");
SS << ")\n";
SS << " << Attr.getName() << ";
SS << CalculateDiagnostic(*SubjectObj) << ";\n";
@@ -3281,12 +3252,13 @@ static std::string GenerateLangOptRequirements(const Record &R,
// codegen efficiency).
std::string FnName = "check", Test;
for (auto I = LangOpts.begin(), E = LangOpts.end(); I != E; ++I) {
- std::string Part = (*I)->getValueAsString("Name");
+ const StringRef Part = (*I)->getValueAsString("Name");
if ((*I)->getValueAsBit("Negated")) {
FnName += "Not";
Test += "!";
}
- Test += "S.LangOpts." + Part;
+ Test += "S.LangOpts.";
+ Test += Part;
if (I + 1 != E)
Test += " || ";
FnName += Part;
@@ -3342,7 +3314,7 @@ static std::string GenerateTargetRequirements(const Record &Attr,
// applies to multiple target architectures. In order for the attribute to be
// considered valid, all of its architectures need to be included.
if (!Attr.isValueUnset("ParseKind")) {
- std::string APK = Attr.getValueAsString("ParseKind");
+ const StringRef APK = Attr.getValueAsString("ParseKind");
for (const auto &I : Dupes) {
if (I.first == APK) {
std::vector<StringRef> DA =
@@ -3438,7 +3410,8 @@ void EmitClangAttrParsedAttrImpl(RecordKeeper &Records, raw_ostream &OS) {
// another mapping. At the same time, generate the AttrInfoMap object
// contents. Due to the reliance on generated code, use separate streams so
// that code will not be interleaved.
- std::stringstream SS;
+ std::string Buffer;
+ raw_string_ostream SS {Buffer};
for (auto I = Attrs.begin(), E = Attrs.end(); I != E; ++I) {
// TODO: If the attribute's kind appears in the list of duplicates, that is
// because it is a target-specific attribute that appears multiple times.
@@ -3484,7 +3457,7 @@ void EmitClangAttrParsedAttrKinds(RecordKeeper &Records, raw_ostream &OS) {
std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr");
std::vector<StringMatcher::StringPair> GNU, Declspec, Microsoft, CXX11,
- Keywords, Pragma;
+ Keywords, Pragma, C2x;
std::set<std::string> Seen;
for (const auto *A : Attrs) {
const Record &Attr = *A;
@@ -3522,6 +3495,10 @@ void EmitClangAttrParsedAttrKinds(RecordKeeper &Records, raw_ostream &OS) {
Matches = &CXX11;
Spelling += S.nameSpace();
Spelling += "::";
+ } else if (Variety == "C2x") {
+ Matches = &C2x;
+ Spelling += S.nameSpace();
+ Spelling += "::";
} else if (Variety == "GNU")
Matches = &GNU;
else if (Variety == "Declspec")
@@ -3560,6 +3537,8 @@ void EmitClangAttrParsedAttrKinds(RecordKeeper &Records, raw_ostream &OS) {
StringMatcher("Name", Microsoft, OS).Emit();
OS << " } else if (AttributeList::AS_CXX11 == Syntax) {\n";
StringMatcher("Name", CXX11, OS).Emit();
+ OS << " } else if (AttributeList::AS_C2x == Syntax) {\n";
+ StringMatcher("Name", C2x, OS).Emit();
OS << " } else if (AttributeList::AS_Keyword == Syntax || ";
OS << "AttributeList::AS_ContextSensitiveKeyword == Syntax) {\n";
StringMatcher("Name", Keywords, OS).Emit();
@@ -3624,20 +3603,25 @@ class DocumentationData {
public:
const Record *Documentation;
const Record *Attribute;
+ std::string Heading;
+ unsigned SupportedSpellings;
- DocumentationData(const Record &Documentation, const Record &Attribute)
- : Documentation(&Documentation), Attribute(&Attribute) {}
+ DocumentationData(const Record &Documentation, const Record &Attribute,
+ const std::pair<std::string, unsigned> HeadingAndKinds)
+ : Documentation(&Documentation), Attribute(&Attribute),
+ Heading(std::move(HeadingAndKinds.first)),
+ SupportedSpellings(HeadingAndKinds.second) {}
};
static void WriteCategoryHeader(const Record *DocCategory,
raw_ostream &OS) {
- const std::string &Name = DocCategory->getValueAsString("Name");
- OS << Name << "\n" << std::string(Name.length(), '=') << "\n";
+ const StringRef Name = DocCategory->getValueAsString("Name");
+ OS << Name << "\n" << std::string(Name.size(), '=') << "\n";
// If there is content, print that as well.
- std::string ContentStr = DocCategory->getValueAsString("Content");
+ const StringRef ContentStr = DocCategory->getValueAsString("Content");
// Trim leading and trailing newlines and spaces.
- OS << StringRef(ContentStr).trim();
+ OS << ContentStr.trim();
OS << "\n\n";
}
@@ -3645,22 +3629,24 @@ static void WriteCategoryHeader(const Record *DocCategory,
enum SpellingKind {
GNU = 1 << 0,
CXX11 = 1 << 1,
- Declspec = 1 << 2,
- Microsoft = 1 << 3,
- Keyword = 1 << 4,
- Pragma = 1 << 5
+ C2x = 1 << 2,
+ Declspec = 1 << 3,
+ Microsoft = 1 << 4,
+ Keyword = 1 << 5,
+ Pragma = 1 << 6
};
-static void WriteDocumentation(RecordKeeper &Records,
- const DocumentationData &Doc, raw_ostream &OS) {
+static std::pair<std::string, unsigned>
+GetAttributeHeadingAndSpellingKinds(const Record &Documentation,
+ const Record &Attribute) {
// FIXME: there is no way to have a per-spelling category for the attribute
// documentation. This may not be a limiting factor since the spellings
// should generally be consistently applied across the category.
- std::vector<FlattenedSpelling> Spellings = GetFlattenedSpellings(*Doc.Attribute);
+ std::vector<FlattenedSpelling> Spellings = GetFlattenedSpellings(Attribute);
// Determine the heading to be used for this attribute.
- std::string Heading = Doc.Documentation->getValueAsString("Heading");
+ std::string Heading = Documentation.getValueAsString("Heading");
bool CustomHeading = !Heading.empty();
if (Heading.empty()) {
// If there's only one spelling, we can simply use that.
@@ -3682,7 +3668,7 @@ static void WriteDocumentation(RecordKeeper &Records,
// If the heading is still empty, it is an error.
if (Heading.empty())
- PrintFatalError(Doc.Attribute->getLoc(),
+ PrintFatalError(Attribute.getLoc(),
"This attribute requires a heading to be specified");
// Gather a list of unique spellings; this is not the same as the semantic
@@ -3695,6 +3681,7 @@ static void WriteDocumentation(RecordKeeper &Records,
SpellingKind Kind = StringSwitch<SpellingKind>(I.variety())
.Case("GNU", GNU)
.Case("CXX11", CXX11)
+ .Case("C2x", C2x)
.Case("Declspec", Declspec)
.Case("Microsoft", Microsoft)
.Case("Keyword", Keyword)
@@ -3704,7 +3691,7 @@ static void WriteDocumentation(RecordKeeper &Records,
SupportedSpellings |= Kind;
std::string Name;
- if (Kind == CXX11 && !I.nameSpace().empty())
+ if ((Kind == CXX11 || Kind == C2x) && !I.nameSpace().empty())
Name = I.nameSpace() + "::";
Name += I.name();
@@ -3724,27 +3711,33 @@ static void WriteDocumentation(RecordKeeper &Records,
}
Heading += ")";
}
- OS << Heading << "\n" << std::string(Heading.length(), '-') << "\n";
-
if (!SupportedSpellings)
- PrintFatalError(Doc.Attribute->getLoc(),
+ PrintFatalError(Attribute.getLoc(),
"Attribute has no supported spellings; cannot be "
"documented");
+ return std::make_pair(std::move(Heading), SupportedSpellings);
+}
+
+static void WriteDocumentation(RecordKeeper &Records,
+ const DocumentationData &Doc, raw_ostream &OS) {
+ OS << Doc.Heading << "\n" << std::string(Doc.Heading.length(), '-') << "\n";
// List what spelling syntaxes the attribute supports.
OS << ".. csv-table:: Supported Syntaxes\n";
- OS << " :header: \"GNU\", \"C++11\", \"__declspec\", \"Keyword\",";
+ OS << " :header: \"GNU\", \"C++11\", \"C2x\", \"__declspec\", \"Keyword\",";
OS << " \"Pragma\", \"Pragma clang attribute\"\n\n";
OS << " \"";
- if (SupportedSpellings & GNU) OS << "X";
+ if (Doc.SupportedSpellings & GNU) OS << "X";
+ OS << "\",\"";
+ if (Doc.SupportedSpellings & CXX11) OS << "X";
OS << "\",\"";
- if (SupportedSpellings & CXX11) OS << "X";
+ if (Doc.SupportedSpellings & C2x) OS << "X";
OS << "\",\"";
- if (SupportedSpellings & Declspec) OS << "X";
+ if (Doc.SupportedSpellings & Declspec) OS << "X";
OS << "\",\"";
- if (SupportedSpellings & Keyword) OS << "X";
+ if (Doc.SupportedSpellings & Keyword) OS << "X";
OS << "\", \"";
- if (SupportedSpellings & Pragma) OS << "X";
+ if (Doc.SupportedSpellings & Pragma) OS << "X";
OS << "\", \"";
if (getPragmaAttributeSupport(Records).isAttributedSupported(*Doc.Attribute))
OS << "X";
@@ -3756,16 +3749,16 @@ static void WriteDocumentation(RecordKeeper &Records,
OS << "This attribute has been deprecated, and may be removed in a future "
<< "version of Clang.";
const Record &Deprecated = *Doc.Documentation->getValueAsDef("Deprecated");
- std::string Replacement = Deprecated.getValueAsString("Replacement");
+ const StringRef Replacement = Deprecated.getValueAsString("Replacement");
if (!Replacement.empty())
OS << " This attribute has been superseded by ``"
<< Replacement << "``.";
OS << "\n\n";
}
- std::string ContentStr = Doc.Documentation->getValueAsString("Content");
+ const StringRef ContentStr = Doc.Documentation->getValueAsString("Content");
// Trim leading and trailing newlines and spaces.
- OS << StringRef(ContentStr).trim();
+ OS << ContentStr.trim();
OS << "\n\n\n";
}
@@ -3794,23 +3787,29 @@ void EmitClangAttrDocs(RecordKeeper &Records, raw_ostream &OS) {
// If the category is "undocumented", then there cannot be any other
// documentation categories (otherwise, the attribute would become
// documented).
- std::string Cat = Category->getValueAsString("Name");
+ const StringRef Cat = Category->getValueAsString("Name");
bool Undocumented = Cat == "Undocumented";
if (Undocumented && Docs.size() > 1)
PrintFatalError(Doc.getLoc(),
"Attribute is \"Undocumented\", but has multiple "
- "documentation categories");
+ "documentation categories");
if (!Undocumented)
- SplitDocs[Category].push_back(DocumentationData(Doc, Attr));
+ SplitDocs[Category].push_back(DocumentationData(
+ Doc, Attr, GetAttributeHeadingAndSpellingKinds(Doc, Attr)));
}
}
// Having split the attributes out based on what documentation goes where,
// we can begin to generate sections of documentation.
- for (const auto &I : SplitDocs) {
+ for (auto &I : SplitDocs) {
WriteCategoryHeader(I.first, OS);
+ std::sort(I.second.begin(), I.second.end(),
+ [](const DocumentationData &D1, const DocumentationData &D2) {
+ return D1.Heading < D2.Heading;
+ });
+
// Walk over each of the attributes in the category and write out their
// documentation.
for (const auto &Doc : I.second)
diff --git a/utils/TableGen/ClangDataCollectorsEmitter.cpp b/utils/TableGen/ClangDataCollectorsEmitter.cpp
new file mode 100644
index 000000000000..4079efc80823
--- /dev/null
+++ b/utils/TableGen/ClangDataCollectorsEmitter.cpp
@@ -0,0 +1,18 @@
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
+
+using namespace llvm;
+
+namespace clang {
+void EmitClangDataCollectors(RecordKeeper &RK, raw_ostream &OS) {
+ const auto &Defs = RK.getClasses();
+ for (const auto &Entry : Defs) {
+ Record &R = *Entry.second;
+ OS << "DEF_ADD_DATA(" << R.getName() << ", {\n";
+ auto Code = R.getValue("Code")->getValue();
+ OS << Code->getAsUnquotedString() << "}\n)";
+ OS << "\n";
+ }
+ OS << "#undef DEF_ADD_DATA\n";
+}
+} // end namespace clang
diff --git a/utils/TableGen/TableGen.cpp b/utils/TableGen/TableGen.cpp
index 781518ddbc31..840b330a732c 100644
--- a/utils/TableGen/TableGen.cpp
+++ b/utils/TableGen/TableGen.cpp
@@ -57,6 +57,7 @@ enum ActionType {
GenAttrDocs,
GenDiagDocs,
GenOptDocs,
+ GenDataCollectors,
GenTestPragmaAttributeSupportedAttributes
};
@@ -147,6 +148,8 @@ cl::opt<ActionType> Action(
clEnumValN(GenDiagDocs, "gen-diag-docs",
"Generate diagnostic documentation"),
clEnumValN(GenOptDocs, "gen-opt-docs", "Generate option documentation"),
+ clEnumValN(GenDataCollectors, "gen-clang-data-collectors",
+ "Generate data collectors for AST nodes"),
clEnumValN(GenTestPragmaAttributeSupportedAttributes,
"gen-clang-test-pragma-attribute-supported-attributes",
"Generate a list of attributes supported by #pragma clang "
@@ -262,6 +265,9 @@ bool ClangTableGenMain(raw_ostream &OS, RecordKeeper &Records) {
case GenOptDocs:
EmitClangOptDocs(Records, OS);
break;
+ case GenDataCollectors:
+ EmitClangDataCollectors(Records, OS);
+ break;
case GenTestPragmaAttributeSupportedAttributes:
EmitTestPragmaAttributeSupportedAttributes(Records, OS);
break;
diff --git a/utils/TableGen/TableGenBackends.h b/utils/TableGen/TableGenBackends.h
index e1b7d0ec63be..342c889ca47a 100644
--- a/utils/TableGen/TableGenBackends.h
+++ b/utils/TableGen/TableGenBackends.h
@@ -75,6 +75,8 @@ void EmitClangAttrDocs(RecordKeeper &Records, raw_ostream &OS);
void EmitClangDiagDocs(RecordKeeper &Records, raw_ostream &OS);
void EmitClangOptDocs(RecordKeeper &Records, raw_ostream &OS);
+void EmitClangDataCollectors(RecordKeeper &Records, raw_ostream &OS);
+
void EmitTestPragmaAttributeSupportedAttributes(RecordKeeper &Records,
raw_ostream &OS);
diff --git a/utils/analyzer/CmpRuns.py b/utils/analyzer/CmpRuns.py
index 2d1f44f6880c..2c0ed6aae3a2 100755
--- a/utils/analyzer/CmpRuns.py
+++ b/utils/analyzer/CmpRuns.py
@@ -6,8 +6,8 @@ which reports have been added, removed, or changed.
This is designed to support automated testing using the static analyzer, from
two perspectives:
- 1. To monitor changes in the static analyzer's reports on real code bases, for
- regression testing.
+ 1. To monitor changes in the static analyzer's reports on real code bases,
+ for regression testing.
2. For use by end users who want to integrate regular static analyzer testing
into a buildbot like environment.
@@ -28,7 +28,7 @@ Usage:
import os
import plistlib
-import CmpRuns
+
# Information about analysis run:
# path - the analysis output directory
@@ -40,6 +40,7 @@ class SingleRunInfo:
self.root = root.rstrip("/\\")
self.verboseLog = verboseLog
+
class AnalysisDiagnostic:
def __init__(self, data, report, htmlReport):
self._data = data
@@ -51,7 +52,7 @@ class AnalysisDiagnostic:
root = self._report.run.root
fileName = self._report.files[self._loc['file']]
if fileName.startswith(root) and len(root) > 0:
- return fileName[len(root)+1:]
+ return fileName[len(root) + 1:]
return fileName
def getLine(self):
@@ -66,12 +67,12 @@ class AnalysisDiagnostic:
def getDescription(self):
return self._data['description']
- def getIssueIdentifier(self) :
+ def getIssueIdentifier(self):
id = self.getFileName() + "+"
- if 'issue_context' in self._data :
- id += self._data['issue_context'] + "+"
- if 'issue_hash_content_of_line_in_context' in self._data :
- id += str(self._data['issue_hash_content_of_line_in_context'])
+ if 'issue_context' in self._data:
+ id += self._data['issue_context'] + "+"
+ if 'issue_hash_content_of_line_in_context' in self._data:
+ id += str(self._data['issue_hash_content_of_line_in_context'])
return id
def getReport(self):
@@ -89,29 +90,6 @@ class AnalysisDiagnostic:
def getRawData(self):
return self._data
-class multidict:
- def __init__(self, elts=()):
- self.data = {}
- for key,value in elts:
- self[key] = value
-
- def __getitem__(self, item):
- return self.data[item]
- def __setitem__(self, key, value):
- if key in self.data:
- self.data[key].append(value)
- else:
- self.data[key] = [value]
- def items(self):
- return self.data.items()
- def values(self):
- return self.data.values()
- def keys(self):
- return self.data.keys()
- def __len__(self):
- return len(self.data)
- def get(self, key, default=None):
- return self.data.get(key, default)
class CmpOptions:
def __init__(self, verboseLog=None, rootA="", rootB=""):
@@ -119,12 +97,14 @@ class CmpOptions:
self.rootB = rootB
self.verboseLog = verboseLog
+
class AnalysisReport:
def __init__(self, run, files):
self.run = run
self.files = files
self.diagnostics = []
+
class AnalysisRun:
def __init__(self, info):
self.path = info.path
@@ -145,14 +125,14 @@ class AnalysisRun:
# reports. Assume that all reports were created using the same
# clang version (this is always true and is more efficient).
if 'clang_version' in data:
- if self.clang_version == None:
+ if self.clang_version is None:
self.clang_version = data.pop('clang_version')
else:
data.pop('clang_version')
# Ignore/delete empty reports.
if not data['files']:
- if deleteEmpty == True:
+ if deleteEmpty:
os.remove(p)
return
@@ -169,8 +149,7 @@ class AnalysisRun:
report = AnalysisReport(self, data.pop('files'))
diagnostics = [AnalysisDiagnostic(d, report, h)
- for d,h in zip(data.pop('diagnostics'),
- htmlFiles)]
+ for d, h in zip(data.pop('diagnostics'), htmlFiles)]
assert not data
@@ -179,15 +158,21 @@ class AnalysisRun:
self.diagnostics.extend(diagnostics)
-# Backward compatibility API.
-def loadResults(path, opts, root = "", deleteEmpty=True):
+def loadResults(path, opts, root="", deleteEmpty=True):
+ """
+ Backwards compatibility API.
+ """
return loadResultsFromSingleRun(SingleRunInfo(path, root, opts.verboseLog),
deleteEmpty)
-# Load results of the analyzes from a given output folder.
-# - info is the SingleRunInfo object
-# - deleteEmpty specifies if the empty plist files should be deleted
+
def loadResultsFromSingleRun(info, deleteEmpty=True):
+ """
+ # Load results of the analyzes from a given output folder.
+ # - info is the SingleRunInfo object
+ # - deleteEmpty specifies if the empty plist files should be deleted
+
+ """
path = info.path
run = AnalysisRun(info)
@@ -203,9 +188,11 @@ def loadResultsFromSingleRun(info, deleteEmpty=True):
return run
-def cmpAnalysisDiagnostic(d) :
+
+def cmpAnalysisDiagnostic(d):
return d.getIssueIdentifier()
+
def compareResults(A, B):
"""
compareResults - Generate a relation from diagnostics in run A to
@@ -224,12 +211,12 @@ def compareResults(A, B):
neqB = []
eltsA = list(A.diagnostics)
eltsB = list(B.diagnostics)
- eltsA.sort(key = cmpAnalysisDiagnostic)
- eltsB.sort(key = cmpAnalysisDiagnostic)
+ eltsA.sort(key=cmpAnalysisDiagnostic)
+ eltsB.sort(key=cmpAnalysisDiagnostic)
while eltsA and eltsB:
a = eltsA.pop()
b = eltsB.pop()
- if (a.getIssueIdentifier() == b.getIssueIdentifier()) :
+ if (a.getIssueIdentifier() == b.getIssueIdentifier()):
res.append((a, b, 0))
elif a.getIssueIdentifier() > b.getIssueIdentifier():
eltsB.append(b)
@@ -240,11 +227,11 @@ def compareResults(A, B):
neqA.extend(eltsA)
neqB.extend(eltsB)
- # FIXME: Add fuzzy matching. One simple and possible effective idea would be
- # to bin the diagnostics, print them in a normalized form (based solely on
- # the structure of the diagnostic), compute the diff, then use that as the
- # basis for matching. This has the nice property that we don't depend in any
- # way on the diagnostic format.
+ # FIXME: Add fuzzy matching. One simple and possible effective idea would
+ # be to bin the diagnostics, print them in a normalized form (based solely
+ # on the structure of the diagnostic), compute the diff, then use that as
+ # the basis for matching. This has the nice property that we don't depend
+ # in any way on the diagnostic format.
for a in neqA:
res.append((a, None, None))
@@ -253,6 +240,7 @@ def compareResults(A, B):
return res
+
def dumpScanBuildResultsDiff(dirA, dirB, opts, deleteEmpty=True):
# Load the run results.
resultsA = loadResults(dirA, opts, opts.rootA, deleteEmpty)
@@ -267,7 +255,7 @@ def dumpScanBuildResultsDiff(dirA, dirB, opts, deleteEmpty=True):
diff = compareResults(resultsA, resultsB)
foundDiffs = 0
for res in diff:
- a,b,confidence = res
+ a, b, confidence = res
if a is None:
print "ADDED: %r" % b.getReadableName()
foundDiffs += 1
@@ -302,6 +290,7 @@ def dumpScanBuildResultsDiff(dirA, dirB, opts, deleteEmpty=True):
return foundDiffs, len(resultsA.diagnostics), len(resultsB.diagnostics)
+
def main():
from optparse import OptionParser
parser = OptionParser("usage: %prog [options] [dir A] [dir B]")
@@ -312,7 +301,8 @@ def main():
help="Prefix to ignore on source files for directory B",
action="store", type=str, default="")
parser.add_option("", "--verbose-log", dest="verboseLog",
- help="Write additional information to LOG [default=None]",
+ help="Write additional information to LOG \
+ [default=None]",
action="store", type=str, default=None,
metavar="LOG")
(opts, args) = parser.parse_args()
@@ -320,9 +310,10 @@ def main():
if len(args) != 2:
parser.error("invalid number of arguments")
- dirA,dirB = args
+ dirA, dirB = args
dumpScanBuildResultsDiff(dirA, dirB, opts)
+
if __name__ == '__main__':
main()
diff --git a/utils/analyzer/SATestAdd.py b/utils/analyzer/SATestAdd.py
index 4b94a109ce64..4c3e35cdcb5d 100644
--- a/utils/analyzer/SATestAdd.py
+++ b/utils/analyzer/SATestAdd.py
@@ -27,15 +27,17 @@ the Repository Directory.
- CachedSource/ - An optional directory containing the source of the
project being analyzed. If present,
download_project.sh will not be called.
- - changes_for_analyzer.patch - An optional patch file for any local changes
+ - changes_for_analyzer.patch - An optional patch file for any local
+ changes
(e.g., to adapt to newer version of clang)
that should be applied to CachedSource
before analysis. To construct this patch,
run the the download script to download
the project to CachedSource, copy the
CachedSource to another directory (for
- example, PatchedSource) and make any needed
- modifications to the the copied source.
+ example, PatchedSource) and make any
+ needed modifications to the the copied
+ source.
Then run:
diff -ur CachedSource PatchedSource \
> changes_for_analyzer.patch
@@ -46,18 +48,21 @@ import os
import csv
import sys
-def isExistingProject(PMapFile, projectID) :
+
+def isExistingProject(PMapFile, projectID):
PMapReader = csv.reader(PMapFile)
for I in PMapReader:
if projectID == I[0]:
return True
return False
-# Add a new project for testing: build it and add to the Project Map file.
-# Params:
-# Dir is the directory where the sources are.
-# ID is a short string used to identify a project.
-def addNewProject(ID, BuildMode) :
+
+def addNewProject(ID, BuildMode):
+ """
+ Add a new project for testing: build it and add to the Project Map file.
+ :param ID: is a short string used to identify a project.
+ """
+
CurDir = os.path.abspath(os.curdir)
Dir = SATestBuild.getProjectDir(ID)
if not os.path.exists(Dir):
@@ -65,36 +70,37 @@ def addNewProject(ID, BuildMode) :
sys.exit(-1)
# Build the project.
- SATestBuild.testProject(ID, BuildMode, IsReferenceBuild=True, Dir=Dir)
+ SATestBuild.testProject(ID, BuildMode, IsReferenceBuild=True)
# Add the project ID to the project map.
ProjectMapPath = os.path.join(CurDir, SATestBuild.ProjectMapFile)
+
if os.path.exists(ProjectMapPath):
- PMapFile = open(ProjectMapPath, "r+b")
+ FileMode = "r+b"
else:
print "Warning: Creating the Project Map file!!"
- PMapFile = open(ProjectMapPath, "w+b")
- try:
- if (isExistingProject(PMapFile, ID)) :
+ FileMode = "w+b"
+
+ with open(ProjectMapPath, FileMode) as PMapFile:
+ if (isExistingProject(PMapFile, ID)):
print >> sys.stdout, 'Warning: Project with ID \'', ID, \
'\' already exists.'
print >> sys.stdout, "Reference output has been regenerated."
else:
PMapWriter = csv.writer(PMapFile)
- PMapWriter.writerow( (ID, int(BuildMode)) );
+ PMapWriter.writerow((ID, int(BuildMode)))
print "The project map is updated: ", ProjectMapPath
- finally:
- PMapFile.close()
# TODO: Add an option not to build.
# TODO: Set the path to the Repository directory.
if __name__ == '__main__':
- if len(sys.argv) < 2:
- print >> sys.stderr, 'Usage: ', sys.argv[0],\
- 'project_ID <mode>' \
- 'mode - 0 for single file project; ' \
- '1 for scan_build; ' \
+ if len(sys.argv) < 2 or sys.argv[1] in ('-h', '--help'):
+ print >> sys.stderr, 'Add a new project for testing to the analyzer'\
+ '\nUsage: ', sys.argv[0],\
+ 'project_ID <mode>\n' \
+ 'mode: 0 for single file project, ' \
+ '1 for scan_build, ' \
'2 for single file c++11 project'
sys.exit(-1)
diff --git a/utils/analyzer/SATestBuild.py b/utils/analyzer/SATestBuild.py
index 18c5393988ae..60c8796e338f 100644
--- a/utils/analyzer/SATestBuild.py
+++ b/utils/analyzer/SATestBuild.py
@@ -3,8 +3,8 @@
"""
Static Analyzer qualification infrastructure.
-The goal is to test the analyzer against different projects, check for failures,
-compare results, and measure performance.
+The goal is to test the analyzer against different projects,
+check for failures, compare results, and measure performance.
Repository Directory will contain sources of the projects as well as the
information on how to build them and the expected output.
@@ -20,7 +20,8 @@ Note that the build tree must be inside the project dir.
To test the build of the analyzer one would:
- Copy over a copy of the Repository Directory. (TODO: Prefer to ensure that
- the build directory does not pollute the repository to min network traffic).
+ the build directory does not pollute the repository to min network
+ traffic).
- Build all projects, until error. Produce logs to report errors.
- Compare results.
@@ -42,6 +43,7 @@ For testing additional checkers, use the SA_ADDITIONAL_CHECKERS environment
variable. It should contain a comma separated list.
"""
import CmpRuns
+import SATestUtils
import os
import csv
@@ -52,103 +54,53 @@ import shutil
import time
import plistlib
import argparse
-from subprocess import check_call, check_output, CalledProcessError
+from subprocess import check_call, CalledProcessError
+import multiprocessing
#------------------------------------------------------------------------------
# Helper functions.
#------------------------------------------------------------------------------
-def detectCPUs():
- """
- Detects the number of CPUs on a system. Cribbed from pp.
- """
- # Linux, Unix and MacOS:
- if hasattr(os, "sysconf"):
- if os.sysconf_names.has_key("SC_NPROCESSORS_ONLN"):
- # Linux & Unix:
- ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
- if isinstance(ncpus, int) and ncpus > 0:
- return ncpus
- else: # OSX:
- return int(capture(['sysctl', '-n', 'hw.ncpu']))
- # Windows:
- if os.environ.has_key("NUMBER_OF_PROCESSORS"):
- ncpus = int(os.environ["NUMBER_OF_PROCESSORS"])
- if ncpus > 0:
- return ncpus
- return 1 # Default
-
-def which(command, paths = None):
- """which(command, [paths]) - Look up the given command in the paths string
- (or the PATH environment variable, if unspecified)."""
-
- if paths is None:
- paths = os.environ.get('PATH','')
-
- # Check for absolute match first.
- if os.path.exists(command):
- return command
-
- # Would be nice if Python had a lib function for this.
- if not paths:
- paths = os.defpath
-
- # Get suffixes to search.
- # On Cygwin, 'PATHEXT' may exist but it should not be used.
- if os.pathsep == ';':
- pathext = os.environ.get('PATHEXT', '').split(';')
- else:
- pathext = ['']
- # Search the paths...
- for path in paths.split(os.pathsep):
- for ext in pathext:
- p = os.path.join(path, command + ext)
- if os.path.exists(p):
- return p
+sys.stdout = SATestUtils.flushfile(sys.stdout)
- return None
-
-# Make sure we flush the output after every print statement.
-class flushfile(object):
- def __init__(self, f):
- self.f = f
- def write(self, x):
- self.f.write(x)
- self.f.flush()
-
-sys.stdout = flushfile(sys.stdout)
def getProjectMapPath():
ProjectMapPath = os.path.join(os.path.abspath(os.curdir),
ProjectMapFile)
if not os.path.exists(ProjectMapPath):
print "Error: Cannot find the Project Map file " + ProjectMapPath +\
- "\nRunning script for the wrong directory?"
- sys.exit(-1)
+ "\nRunning script for the wrong directory?"
+ sys.exit(1)
return ProjectMapPath
+
def getProjectDir(ID):
return os.path.join(os.path.abspath(os.curdir), ID)
-def getSBOutputDirName(IsReferenceBuild) :
- if IsReferenceBuild == True :
+
+def getSBOutputDirName(IsReferenceBuild):
+ if IsReferenceBuild:
return SBOutputDirReferencePrefix + SBOutputDirName
- else :
+ else:
return SBOutputDirName
#------------------------------------------------------------------------------
# Configuration setup.
#------------------------------------------------------------------------------
+
# Find Clang for static analysis.
-Clang = which("clang", os.environ['PATH'])
+if 'CC' in os.environ:
+ Clang = os.environ['CC']
+else:
+ Clang = SATestUtils.which("clang", os.environ['PATH'])
if not Clang:
print "Error: cannot find 'clang' in PATH"
- sys.exit(-1)
+ sys.exit(1)
# Number of jobs.
-Jobs = int(math.ceil(detectCPUs() * 0.75))
+Jobs = int(math.ceil(multiprocessing.cpu_count() * 0.75))
# Project map stores info about all the "registered" projects.
ProjectMapFile = "projectMap.csv"
@@ -168,16 +120,15 @@ BuildLogName = "run_static_analyzer.log"
# displayed when buildbot detects a build failure.
NumOfFailuresInSummary = 10
FailuresSummaryFileName = "failures.txt"
-# Summary of the result diffs.
-DiffsSummaryFileName = "diffs.txt"
# The scan-build result directory.
SBOutputDirName = "ScanBuildResults"
SBOutputDirReferencePrefix = "Ref"
-# The name of the directory storing the cached project source. If this directory
-# does not exist, the download script will be executed. That script should
-# create the "CachedSource" directory and download the project source into it.
+# The name of the directory storing the cached project source. If this
+# directory does not exist, the download script will be executed.
+# That script should create the "CachedSource" directory and download the
+# project source into it.
CachedSourceDirName = "CachedSource"
# The name of the directory containing the source code that will be analyzed.
@@ -193,7 +144,18 @@ PatchfileName = "changes_for_analyzer.patch"
# The list of checkers used during analyzes.
# Currently, consists of all the non-experimental checkers, plus a few alpha
# checkers we don't want to regress on.
-Checkers="alpha.unix.SimpleStream,alpha.security.taint,cplusplus.NewDeleteLeaks,core,cplusplus,deadcode,security,unix,osx"
+Checkers = ",".join([
+ "alpha.unix.SimpleStream",
+ "alpha.security.taint",
+ "cplusplus.NewDeleteLeaks",
+ "core",
+ "cplusplus",
+ "deadcode",
+ "security",
+ "unix",
+ "osx",
+ "nullability"
+])
Verbose = 1
@@ -201,46 +163,38 @@ Verbose = 1
# Test harness logic.
#------------------------------------------------------------------------------
-# Run pre-processing script if any.
+
def runCleanupScript(Dir, PBuildLogFile):
+ """
+ Run pre-processing script if any.
+ """
Cwd = os.path.join(Dir, PatchedSourceDirName)
ScriptPath = os.path.join(Dir, CleanupScript)
- runScript(ScriptPath, PBuildLogFile, Cwd)
+ SATestUtils.runScript(ScriptPath, PBuildLogFile, Cwd)
+
-# Run the script to download the project, if it exists.
def runDownloadScript(Dir, PBuildLogFile):
+ """
+ Run the script to download the project, if it exists.
+ """
ScriptPath = os.path.join(Dir, DownloadScript)
- runScript(ScriptPath, PBuildLogFile, Dir)
+ SATestUtils.runScript(ScriptPath, PBuildLogFile, Dir)
-# Run the provided script if it exists.
-def runScript(ScriptPath, PBuildLogFile, Cwd):
- if os.path.exists(ScriptPath):
- try:
- if Verbose == 1:
- print " Executing: %s" % (ScriptPath,)
- check_call("chmod +x '%s'" % ScriptPath, cwd = Cwd,
- stderr=PBuildLogFile,
- stdout=PBuildLogFile,
- shell=True)
- check_call("'%s'" % ScriptPath, cwd = Cwd, stderr=PBuildLogFile,
- stdout=PBuildLogFile,
- shell=True)
- except:
- print "Error: Running %s failed. See %s for details." % (ScriptPath,
- PBuildLogFile.name)
- sys.exit(-1)
-# Download the project and apply the local patchfile if it exists.
def downloadAndPatch(Dir, PBuildLogFile):
+ """
+ Download the project and apply the local patchfile if it exists.
+ """
CachedSourceDirPath = os.path.join(Dir, CachedSourceDirName)
# If the we don't already have the cached source, run the project's
# download script to download it.
if not os.path.exists(CachedSourceDirPath):
- runDownloadScript(Dir, PBuildLogFile)
- if not os.path.exists(CachedSourceDirPath):
- print "Error: '%s' not found after download." % (CachedSourceDirPath)
- exit(-1)
+ runDownloadScript(Dir, PBuildLogFile)
+ if not os.path.exists(CachedSourceDirPath):
+ print "Error: '%s' not found after download." % (
+ CachedSourceDirPath)
+ exit(1)
PatchedSourceDirPath = os.path.join(Dir, PatchedSourceDirName)
@@ -252,6 +206,7 @@ def downloadAndPatch(Dir, PBuildLogFile):
shutil.copytree(CachedSourceDirPath, PatchedSourceDirPath, symlinks=True)
applyPatch(Dir, PBuildLogFile)
+
def applyPatch(Dir, PBuildLogFile):
PatchfilePath = os.path.join(Dir, PatchfileName)
PatchedSourceDirPath = os.path.join(Dir, PatchedSourceDirName)
@@ -262,30 +217,33 @@ def applyPatch(Dir, PBuildLogFile):
print " Applying patch."
try:
check_call("patch -p1 < '%s'" % (PatchfilePath),
- cwd = PatchedSourceDirPath,
- stderr=PBuildLogFile,
- stdout=PBuildLogFile,
- shell=True)
+ cwd=PatchedSourceDirPath,
+ stderr=PBuildLogFile,
+ stdout=PBuildLogFile,
+ shell=True)
except:
print "Error: Patch failed. See %s for details." % (PBuildLogFile.name)
- sys.exit(-1)
+ sys.exit(1)
+
-# Build the project with scan-build by reading in the commands and
-# prefixing them with the scan-build options.
def runScanBuild(Dir, SBOutputDir, PBuildLogFile):
+ """
+ Build the project with scan-build by reading in the commands and
+ prefixing them with the scan-build options.
+ """
BuildScriptPath = os.path.join(Dir, BuildScript)
if not os.path.exists(BuildScriptPath):
print "Error: build script is not defined: %s" % BuildScriptPath
- sys.exit(-1)
+ sys.exit(1)
AllCheckers = Checkers
- if os.environ.has_key('SA_ADDITIONAL_CHECKERS'):
+ if 'SA_ADDITIONAL_CHECKERS' in os.environ:
AllCheckers = AllCheckers + ',' + os.environ['SA_ADDITIONAL_CHECKERS']
# Run scan-build from within the patched source directory.
SBCwd = os.path.join(Dir, PatchedSourceDirName)
- SBOptions = "--use-analyzer '%s' " % Clang
+ SBOptions = "--use-analyzer '%s' " % Clang
SBOptions += "-plist-html -o '%s' " % SBOutputDir
SBOptions += "-enable-checker " + AllCheckers + " "
SBOptions += "--keep-empty "
@@ -298,80 +256,63 @@ def runScanBuild(Dir, SBOutputDir, PBuildLogFile):
for Command in SBCommandFile:
Command = Command.strip()
if len(Command) == 0:
- continue;
+ continue
# If using 'make', auto imply a -jX argument
# to speed up analysis. xcodebuild will
# automatically use the maximum number of cores.
if (Command.startswith("make ") or Command == "make") and \
- "-j" not in Command:
+ "-j" not in Command:
Command += " -j%d" % Jobs
SBCommand = SBPrefix + Command
if Verbose == 1:
print " Executing: %s" % (SBCommand,)
- check_call(SBCommand, cwd = SBCwd, stderr=PBuildLogFile,
- stdout=PBuildLogFile,
- shell=True)
- except:
- print "Error: scan-build failed. See ",PBuildLogFile.name,\
- " for details."
- raise
-
-def hasNoExtension(FileName):
- (Root, Ext) = os.path.splitext(FileName)
- if ((Ext == "")) :
- return True
- return False
-
-def isValidSingleInputFile(FileName):
- (Root, Ext) = os.path.splitext(FileName)
- if ((Ext == ".i") | (Ext == ".ii") |
- (Ext == ".c") | (Ext == ".cpp") |
- (Ext == ".m") | (Ext == "")) :
- return True
- return False
-
-# Get the path to the SDK for the given SDK name. Returns None if
-# the path cannot be determined.
-def getSDKPath(SDKName):
- if which("xcrun") is None:
- return None
+ check_call(SBCommand, cwd=SBCwd,
+ stderr=PBuildLogFile,
+ stdout=PBuildLogFile,
+ shell=True)
+ except CalledProcessError:
+ print "Error: scan-build failed. Its output was: "
+ PBuildLogFile.seek(0)
+ shutil.copyfileobj(PBuildLogFile, sys.stdout)
+ sys.exit(1)
- Cmd = "xcrun --sdk " + SDKName + " --show-sdk-path"
- return check_output(Cmd, shell=True).rstrip()
-# Run analysis on a set of preprocessed files.
def runAnalyzePreprocessed(Dir, SBOutputDir, Mode):
+ """
+ Run analysis on a set of preprocessed files.
+ """
if os.path.exists(os.path.join(Dir, BuildScript)):
print "Error: The preprocessed files project should not contain %s" % \
- BuildScript
+ BuildScript
raise Exception()
CmdPrefix = Clang + " -cc1 "
# For now, we assume the preprocessed files should be analyzed
# with the OS X SDK.
- SDKPath = getSDKPath("macosx")
+ SDKPath = SATestUtils.getSDKPath("macosx")
if SDKPath is not None:
- CmdPrefix += "-isysroot " + SDKPath + " "
+ CmdPrefix += "-isysroot " + SDKPath + " "
CmdPrefix += "-analyze -analyzer-output=plist -w "
- CmdPrefix += "-analyzer-checker=" + Checkers +" -fcxx-exceptions -fblocks "
+ CmdPrefix += "-analyzer-checker=" + Checkers
+ CmdPrefix += " -fcxx-exceptions -fblocks "
- if (Mode == 2) :
+ if (Mode == 2):
CmdPrefix += "-std=c++11 "
PlistPath = os.path.join(Dir, SBOutputDir, "date")
- FailPath = os.path.join(PlistPath, "failures");
- os.makedirs(FailPath);
+ FailPath = os.path.join(PlistPath, "failures")
+ os.makedirs(FailPath)
for FullFileName in glob.glob(Dir + "/*"):
FileName = os.path.basename(FullFileName)
Failed = False
# Only run the analyzes on supported files.
- if (hasNoExtension(FileName)):
+ if SATestUtils.hasNoExtension(FileName):
continue
- if (isValidSingleInputFile(FileName) == False):
+ if not SATestUtils.isValidSingleInputFile(FileName):
print "Error: Invalid single input file %s." % (FullFileName,)
raise Exception()
@@ -382,44 +323,47 @@ def runAnalyzePreprocessed(Dir, SBOutputDir, Mode):
try:
if Verbose == 1:
print " Executing: %s" % (Command,)
- check_call(Command, cwd = Dir, stderr=LogFile,
- stdout=LogFile,
- shell=True)
+ check_call(Command, cwd=Dir, stderr=LogFile,
+ stdout=LogFile,
+ shell=True)
except CalledProcessError, e:
print "Error: Analyzes of %s failed. See %s for details." \
- "Error code %d." % \
- (FullFileName, LogFile.name, e.returncode)
+ "Error code %d." % (
+ FullFileName, LogFile.name, e.returncode)
Failed = True
finally:
LogFile.close()
# If command did not fail, erase the log file.
- if Failed == False:
- os.remove(LogFile.name);
+ if not Failed:
+ os.remove(LogFile.name)
+
def getBuildLogPath(SBOutputDir):
- return os.path.join(SBOutputDir, LogFolderName, BuildLogName)
+ return os.path.join(SBOutputDir, LogFolderName, BuildLogName)
+
def removeLogFile(SBOutputDir):
- BuildLogPath = getBuildLogPath(SBOutputDir)
- # Clean up the log file.
- if (os.path.exists(BuildLogPath)) :
- RmCommand = "rm '%s'" % BuildLogPath
- if Verbose == 1:
- print " Executing: %s" % (RmCommand,)
- check_call(RmCommand, shell=True)
+ BuildLogPath = getBuildLogPath(SBOutputDir)
+ # Clean up the log file.
+ if (os.path.exists(BuildLogPath)):
+ RmCommand = "rm '%s'" % BuildLogPath
+ if Verbose == 1:
+ print " Executing: %s" % (RmCommand,)
+ check_call(RmCommand, shell=True)
+
def buildProject(Dir, SBOutputDir, ProjectBuildMode, IsReferenceBuild):
TBegin = time.time()
BuildLogPath = getBuildLogPath(SBOutputDir)
print "Log file: %s" % (BuildLogPath,)
- print "Output directory: %s" %(SBOutputDir, )
+ print "Output directory: %s" % (SBOutputDir, )
removeLogFile(SBOutputDir)
# Clean up scan build results.
- if (os.path.exists(SBOutputDir)) :
+ if (os.path.exists(SBOutputDir)):
RmCommand = "rm -r '%s'" % SBOutputDir
if Verbose == 1:
print " Executing: %s" % (RmCommand,)
@@ -427,11 +371,8 @@ def buildProject(Dir, SBOutputDir, ProjectBuildMode, IsReferenceBuild):
assert(not os.path.exists(SBOutputDir))
os.makedirs(os.path.join(SBOutputDir, LogFolderName))
- # Open the log file.
- PBuildLogFile = open(BuildLogPath, "wb+")
-
# Build and analyze the project.
- try:
+ with open(BuildLogPath, "wb+") as PBuildLogFile:
if (ProjectBuildMode == 1):
downloadAndPatch(Dir, PBuildLogFile)
runCleanupScript(Dir, PBuildLogFile)
@@ -439,34 +380,48 @@ def buildProject(Dir, SBOutputDir, ProjectBuildMode, IsReferenceBuild):
else:
runAnalyzePreprocessed(Dir, SBOutputDir, ProjectBuildMode)
- if IsReferenceBuild :
+ if IsReferenceBuild:
runCleanupScript(Dir, PBuildLogFile)
+ normalizeReferenceResults(Dir, SBOutputDir, ProjectBuildMode)
- # Make the absolute paths relative in the reference results.
- for (DirPath, Dirnames, Filenames) in os.walk(SBOutputDir):
- for F in Filenames:
- if (not F.endswith('plist')):
- continue
- Plist = os.path.join(DirPath, F)
- Data = plistlib.readPlist(Plist)
- PathPrefix = Dir
- if (ProjectBuildMode == 1):
- PathPrefix = os.path.join(Dir, PatchedSourceDirName)
- Paths = [SourceFile[len(PathPrefix)+1:]\
- if SourceFile.startswith(PathPrefix)\
- else SourceFile for SourceFile in Data['files']]
- Data['files'] = Paths
- plistlib.writePlist(Data, Plist)
+ print "Build complete (time: %.2f). See the log for more details: %s" % \
+ ((time.time() - TBegin), BuildLogPath)
- finally:
- PBuildLogFile.close()
- print "Build complete (time: %.2f). See the log for more details: %s" % \
- ((time.time()-TBegin), BuildLogPath)
+def normalizeReferenceResults(Dir, SBOutputDir, ProjectBuildMode):
+ """
+ Make the absolute paths relative in the reference results.
+ """
+ for (DirPath, Dirnames, Filenames) in os.walk(SBOutputDir):
+ for F in Filenames:
+ if (not F.endswith('plist')):
+ continue
+ Plist = os.path.join(DirPath, F)
+ Data = plistlib.readPlist(Plist)
+ PathPrefix = Dir
+ if (ProjectBuildMode == 1):
+ PathPrefix = os.path.join(Dir, PatchedSourceDirName)
+ Paths = [SourceFile[len(PathPrefix) + 1:]
+ if SourceFile.startswith(PathPrefix)
+ else SourceFile for SourceFile in Data['files']]
+ Data['files'] = Paths
+
+ # Remove transient fields which change from run to run.
+ for Diag in Data['diagnostics']:
+ if 'HTMLDiagnostics_files' in Diag:
+ Diag.pop('HTMLDiagnostics_files')
+ if 'clang_version' in Data:
+ Data.pop('clang_version')
+
+ plistlib.writePlist(Data, Plist)
+
-# A plist file is created for each call to the analyzer(each source file).
-# We are only interested on the once that have bug reports, so delete the rest.
def CleanUpEmptyPlists(SBOutputDir):
+ """
+ A plist file is created for each call to the analyzer(each source file).
+ We are only interested on the once that have bug reports,
+ so delete the rest.
+ """
for F in glob.glob(SBOutputDir + "/*/*.plist"):
P = os.path.join(SBOutputDir, F)
@@ -476,62 +431,65 @@ def CleanUpEmptyPlists(SBOutputDir):
os.remove(P)
continue
-# Given the scan-build output directory, checks if the build failed
-# (by searching for the failures directories). If there are failures, it
-# creates a summary file in the output directory.
+
+def CleanUpEmptyFolders(SBOutputDir):
+ """
+ Remove empty folders from results, as git would not store them.
+ """
+ Subfolders = glob.glob(SBOutputDir + "/*")
+ for Folder in Subfolders:
+ if not os.listdir(Folder):
+ os.removedirs(Folder)
+
+
def checkBuild(SBOutputDir):
+ """
+ Given the scan-build output directory, checks if the build failed
+ (by searching for the failures directories). If there are failures, it
+ creates a summary file in the output directory.
+
+ """
# Check if there are failures.
Failures = glob.glob(SBOutputDir + "/*/failures/*.stderr.txt")
- TotalFailed = len(Failures);
+ TotalFailed = len(Failures)
if TotalFailed == 0:
CleanUpEmptyPlists(SBOutputDir)
+ CleanUpEmptyFolders(SBOutputDir)
Plists = glob.glob(SBOutputDir + "/*/*.plist")
print "Number of bug reports (non-empty plist files) produced: %d" %\
- len(Plists)
- return;
-
- # Create summary file to display when the build fails.
- SummaryPath = os.path.join(SBOutputDir, LogFolderName, FailuresSummaryFileName)
- if (Verbose > 0):
- print " Creating the failures summary file %s" % (SummaryPath,)
+ len(Plists)
+ return
- SummaryLog = open(SummaryPath, "w+")
- try:
- SummaryLog.write("Total of %d failures discovered.\n" % (TotalFailed,))
- if TotalFailed > NumOfFailuresInSummary:
- SummaryLog.write("See the first %d below.\n"
- % (NumOfFailuresInSummary,))
+ print "Error: analysis failed."
+ print "Total of %d failures discovered." % TotalFailed
+ if TotalFailed > NumOfFailuresInSummary:
+ print "See the first %d below.\n" % NumOfFailuresInSummary
# TODO: Add a line "See the results folder for more."
- FailuresCopied = NumOfFailuresInSummary
- Idx = 0
- for FailLogPathI in Failures:
- if Idx >= NumOfFailuresInSummary:
- break;
- Idx += 1
- SummaryLog.write("\n-- Error #%d -----------\n" % (Idx,));
- FailLogI = open(FailLogPathI, "r");
- try:
- shutil.copyfileobj(FailLogI, SummaryLog);
- finally:
- FailLogI.close()
- finally:
- SummaryLog.close()
+ Idx = 0
+ for FailLogPathI in Failures:
+ if Idx >= NumOfFailuresInSummary:
+ break
+ Idx += 1
+ print "\n-- Error #%d -----------\n" % Idx
+ with open(FailLogPathI, "r") as FailLogI:
+ shutil.copyfileobj(FailLogI, sys.stdout)
- print "Error: analysis failed. See ", SummaryPath
- sys.exit(-1)
+ sys.exit(1)
-# Auxiliary object to discard stdout.
-class Discarder(object):
- def write(self, text):
- pass # do nothing
-# Compare the warnings produced by scan-build.
-# Strictness defines the success criteria for the test:
-# 0 - success if there are no crashes or analyzer failure.
-# 1 - success if there are no difference in the number of reported bugs.
-# 2 - success if all the bug reports are identical.
-def runCmpResults(Dir, Strictness = 0):
+def runCmpResults(Dir, Strictness=0):
+ """
+ Compare the warnings produced by scan-build.
+ Strictness defines the success criteria for the test:
+ 0 - success if there are no crashes or analyzer failure.
+ 1 - success if there are no difference in the number of reported bugs.
+ 2 - success if all the bug reports are identical.
+
+ :return success: Whether tests pass according to the Strictness
+ criteria.
+ """
+ TestsPassed = True
TBegin = time.time()
RefDir = os.path.join(Dir, SBOutputDirReferencePrefix + SBOutputDirName)
@@ -547,9 +505,10 @@ def runCmpResults(Dir, Strictness = 0):
RefList.remove(RefLogDir)
NewList.remove(os.path.join(NewDir, LogFolderName))
- if len(RefList) == 0 or len(NewList) == 0:
- return False
- assert(len(RefList) == len(NewList))
+ if len(RefList) != len(NewList):
+ print "Mismatch in number of results folders: %s vs %s" % (
+ RefList, NewList)
+ sys.exit(1)
# There might be more then one folder underneath - one per each scan-build
# command (Ex: one for configure and one for make).
@@ -569,32 +528,31 @@ def runCmpResults(Dir, Strictness = 0):
if Verbose == 1:
print " Comparing Results: %s %s" % (RefDir, NewDir)
- DiffsPath = os.path.join(NewDir, DiffsSummaryFileName)
PatchedSourceDirPath = os.path.join(Dir, PatchedSourceDirName)
- Opts = CmpRuns.CmpOptions(DiffsPath, "", PatchedSourceDirPath)
- # Discard everything coming out of stdout (CmpRun produces a lot of them).
- OLD_STDOUT = sys.stdout
- sys.stdout = Discarder()
+ Opts = CmpRuns.CmpOptions(rootA="", rootB=PatchedSourceDirPath)
# Scan the results, delete empty plist files.
NumDiffs, ReportsInRef, ReportsInNew = \
CmpRuns.dumpScanBuildResultsDiff(RefDir, NewDir, Opts, False)
- sys.stdout = OLD_STDOUT
- if (NumDiffs > 0) :
- print "Warning: %r differences in diagnostics. See %s" % \
- (NumDiffs, DiffsPath,)
+ if (NumDiffs > 0):
+ print "Warning: %s differences in diagnostics." % NumDiffs
if Strictness >= 2 and NumDiffs > 0:
print "Error: Diffs found in strict mode (2)."
- sys.exit(-1)
+ TestsPassed = False
elif Strictness >= 1 and ReportsInRef != ReportsInNew:
- print "Error: The number of results are different in strict mode (1)."
- sys.exit(-1)
+ print "Error: The number of results are different in "\
+ "strict mode (1)."
+ TestsPassed = False
+
+ print "Diagnostic comparison complete (time: %.2f)." % (
+ time.time() - TBegin)
+ return TestsPassed
- print "Diagnostic comparison complete (time: %.2f)." % (time.time()-TBegin)
- return (NumDiffs > 0)
def cleanupReferenceResults(SBOutputDir):
- # Delete html, css, and js files from reference results. These can
- # include multiple copies of the benchmark source and so get very large.
+ """
+ Delete html, css, and js files from reference results. These can
+ include multiple copies of the benchmark source and so get very large.
+ """
Extensions = ["html", "css", "js"]
for E in Extensions:
for F in glob.glob("%s/*/*.%s" % (SBOutputDir, E)):
@@ -605,42 +563,18 @@ def cleanupReferenceResults(SBOutputDir):
# Remove the log file. It leaks absolute path names.
removeLogFile(SBOutputDir)
-def updateSVN(Mode, ProjectsMap):
- try:
- ProjectsMap.seek(0)
- for I in csv.reader(ProjectsMap):
- ProjName = I[0]
- Path = os.path.join(ProjName, getSBOutputDirName(True))
-
- if Mode == "delete":
- Command = "svn delete '%s'" % (Path,)
- else:
- Command = "svn add '%s'" % (Path,)
-
- if Verbose == 1:
- print " Executing: %s" % (Command,)
- check_call(Command, shell=True)
- if Mode == "delete":
- CommitCommand = "svn commit -m \"[analyzer tests] Remove " \
- "reference results.\""
- else:
- CommitCommand = "svn commit -m \"[analyzer tests] Add new " \
- "reference results.\""
- if Verbose == 1:
- print " Executing: %s" % (CommitCommand,)
- check_call(CommitCommand, shell=True)
- except:
- print "Error: SVN update failed."
- sys.exit(-1)
-
-def testProject(ID, ProjectBuildMode, IsReferenceBuild=False, Dir=None, Strictness = 0):
+def testProject(ID, ProjectBuildMode, IsReferenceBuild=False, Strictness=0):
+ """
+ Test a given project.
+ :return TestsPassed: Whether tests have passed according
+ to the :param Strictness: criteria.
+ """
print " \n\n--- Building project %s" % (ID,)
TBegin = time.time()
- if Dir is None :
- Dir = getProjectDir(ID)
+ Dir = getProjectDir(ID)
if Verbose == 1:
print " Build directory: %s." % (Dir,)
@@ -652,76 +586,78 @@ def testProject(ID, ProjectBuildMode, IsReferenceBuild=False, Dir=None, Strictne
checkBuild(SBOutputDir)
- if IsReferenceBuild == False:
- runCmpResults(Dir, Strictness)
- else:
+ if IsReferenceBuild:
cleanupReferenceResults(SBOutputDir)
+ TestsPassed = True
+ else:
+ TestsPassed = runCmpResults(Dir, Strictness)
print "Completed tests for project %s (time: %.2f)." % \
- (ID, (time.time()-TBegin))
+ (ID, (time.time() - TBegin))
+ return TestsPassed
-def isCommentCSVLine(Entries):
- # Treat CSV lines starting with a '#' as a comment.
- return len(Entries) > 0 and Entries[0].startswith("#")
-def testAll(IsReferenceBuild = False, UpdateSVN = False, Strictness = 0):
- PMapFile = open(getProjectMapPath(), "rb")
- try:
- # Validate the input.
- for I in csv.reader(PMapFile):
- if (isCommentCSVLine(I)):
- continue
- if (len(I) != 2) :
- print "Error: Rows in the ProjectMapFile should have 3 entries."
- raise Exception()
- if (not ((I[1] == "0") | (I[1] == "1") | (I[1] == "2"))):
- print "Error: Second entry in the ProjectMapFile should be 0" \
- " (single file), 1 (project), or 2(single file c++11)."
- raise Exception()
+def projectFileHandler():
+ return open(getProjectMapPath(), "rb")
- # When we are regenerating the reference results, we might need to
- # update svn. Remove reference results from SVN.
- if UpdateSVN == True:
- assert(IsReferenceBuild == True);
- updateSVN("delete", PMapFile);
- # Test the projects.
- PMapFile.seek(0)
- for I in csv.reader(PMapFile):
- if isCommentCSVLine(I):
- continue;
- testProject(I[0], int(I[1]), IsReferenceBuild, None, Strictness)
+def iterateOverProjects(PMapFile):
+ """
+ Iterate over all projects defined in the project file handler `PMapFile`
+ from the start.
+ """
+ PMapFile.seek(0)
+ for I in csv.reader(PMapFile):
+ if (SATestUtils.isCommentCSVLine(I)):
+ continue
+ yield I
- # Add reference results to SVN.
- if UpdateSVN == True:
- updateSVN("add", PMapFile);
- except:
- print "Error occurred. Premature termination."
- raise
- finally:
- PMapFile.close()
+def validateProjectFile(PMapFile):
+ """
+ Validate project file.
+ """
+ for I in iterateOverProjects(PMapFile):
+ if len(I) != 2:
+ print "Error: Rows in the ProjectMapFile should have 2 entries."
+ raise Exception()
+ if I[1] not in ('0', '1', '2'):
+ print "Error: Second entry in the ProjectMapFile should be 0" \
+ " (single file), 1 (project), or 2(single file c++11)."
+ raise Exception()
+
+
+def testAll(IsReferenceBuild=False, Strictness=0):
+ TestsPassed = True
+ with projectFileHandler() as PMapFile:
+ validateProjectFile(PMapFile)
+
+ # Test the projects.
+ for (ProjName, ProjBuildMode) in iterateOverProjects(PMapFile):
+ TestsPassed &= testProject(
+ ProjName, int(ProjBuildMode), IsReferenceBuild, Strictness)
+ return TestsPassed
+
if __name__ == '__main__':
# Parse command line arguments.
- Parser = argparse.ArgumentParser(description='Test the Clang Static Analyzer.')
+ Parser = argparse.ArgumentParser(
+ description='Test the Clang Static Analyzer.')
Parser.add_argument('--strictness', dest='strictness', type=int, default=0,
- help='0 to fail on runtime errors, 1 to fail when the number\
- of found bugs are different from the reference, 2 to \
- fail on any difference from the reference. Default is 0.')
- Parser.add_argument('-r', dest='regenerate', action='store_true', default=False,
- help='Regenerate reference output.')
- Parser.add_argument('-rs', dest='update_reference', action='store_true',
- default=False, help='Regenerate reference output and update svn.')
+ help='0 to fail on runtime errors, 1 to fail when the \
+ number of found bugs are different from the \
+ reference, 2 to fail on any difference from the \
+ reference. Default is 0.')
+ Parser.add_argument('-r', dest='regenerate', action='store_true',
+ default=False, help='Regenerate reference output.')
Args = Parser.parse_args()
IsReference = False
- UpdateSVN = False
Strictness = Args.strictness
if Args.regenerate:
IsReference = True
- elif Args.update_reference:
- IsReference = True
- UpdateSVN = True
- testAll(IsReference, UpdateSVN, Strictness)
+ TestsPassed = testAll(IsReference, Strictness)
+ if not TestsPassed:
+ print "ERROR: Tests failed."
+ sys.exit(42)
diff --git a/utils/analyzer/SATestUpdateDiffs.py b/utils/analyzer/SATestUpdateDiffs.py
new file mode 100755
index 000000000000..2282af15a523
--- /dev/null
+++ b/utils/analyzer/SATestUpdateDiffs.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+
+"""
+Update reference results for static analyzer.
+"""
+
+import SATestBuild
+
+from subprocess import check_call
+import os
+import sys
+
+Verbose = 1
+
+
+def runCmd(Command):
+ if Verbose:
+ print "Executing %s" % Command
+ check_call(Command, shell=True)
+
+
+def updateReferenceResults(ProjName, ProjBuildMode):
+ ProjDir = SATestBuild.getProjectDir(ProjName)
+
+ RefResultsPath = os.path.join(
+ ProjDir,
+ SATestBuild.getSBOutputDirName(IsReferenceBuild=True))
+ CreatedResultsPath = os.path.join(
+ ProjDir,
+ SATestBuild.getSBOutputDirName(IsReferenceBuild=False))
+
+ if not os.path.exists(CreatedResultsPath):
+ print >> sys.stderr, "New results not found, was SATestBuild.py "\
+ "previously run?"
+ sys.exit(1)
+
+ # Remove reference results: in git, and then again for a good measure
+ # with rm, as git might not remove things fully if there are empty
+ # directories involved.
+ runCmd('git rm -r -q "%s"' % (RefResultsPath,))
+ runCmd('rm -rf "%s"' % (RefResultsPath,))
+
+ # Replace reference results with a freshly computed once.
+ runCmd('cp -r "%s" "%s"' % (CreatedResultsPath, RefResultsPath,))
+
+ # Run cleanup script.
+ BuildLogPath = SATestBuild.getBuildLogPath(RefResultsPath)
+ with open(BuildLogPath, "wb+") as PBuildLogFile:
+ SATestBuild.runCleanupScript(ProjDir, PBuildLogFile)
+
+ SATestBuild.normalizeReferenceResults(
+ ProjDir, RefResultsPath, ProjBuildMode)
+
+ # Clean up the generated difference results.
+ SATestBuild.cleanupReferenceResults(RefResultsPath)
+
+ runCmd('git add "%s"' % (RefResultsPath,))
+
+
+def main(argv):
+ if len(argv) == 2 and argv[1] in ('-h', '--help'):
+ print >> sys.stderr, "Update static analyzer reference results based "\
+ "\non the previous run of SATestBuild.py.\n"\
+ "\nN.B.: Assumes that SATestBuild.py was just run"
+ sys.exit(1)
+
+ with SATestBuild.projectFileHandler() as f:
+ for (ProjName, ProjBuildMode) in SATestBuild.iterateOverProjects(f):
+ updateReferenceResults(ProjName, int(ProjBuildMode))
+
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/utils/analyzer/SATestUtils.py b/utils/analyzer/SATestUtils.py
new file mode 100644
index 000000000000..9220acc1bdbe
--- /dev/null
+++ b/utils/analyzer/SATestUtils.py
@@ -0,0 +1,100 @@
+import os
+from subprocess import check_output, check_call
+import sys
+
+
+Verbose = 1
+
+def which(command, paths=None):
+ """which(command, [paths]) - Look up the given command in the paths string
+ (or the PATH environment variable, if unspecified)."""
+
+ if paths is None:
+ paths = os.environ.get('PATH', '')
+
+ # Check for absolute match first.
+ if os.path.exists(command):
+ return command
+
+ # Would be nice if Python had a lib function for this.
+ if not paths:
+ paths = os.defpath
+
+ # Get suffixes to search.
+ # On Cygwin, 'PATHEXT' may exist but it should not be used.
+ if os.pathsep == ';':
+ pathext = os.environ.get('PATHEXT', '').split(';')
+ else:
+ pathext = ['']
+
+ # Search the paths...
+ for path in paths.split(os.pathsep):
+ for ext in pathext:
+ p = os.path.join(path, command + ext)
+ if os.path.exists(p):
+ return p
+
+ return None
+
+
+class flushfile(object):
+ """
+ Wrapper to flush the output after every print statement.
+ """
+ def __init__(self, f):
+ self.f = f
+
+ def write(self, x):
+ self.f.write(x)
+ self.f.flush()
+
+
+def hasNoExtension(FileName):
+ (Root, Ext) = os.path.splitext(FileName)
+ return (Ext == "")
+
+
+def isValidSingleInputFile(FileName):
+ (Root, Ext) = os.path.splitext(FileName)
+ return Ext in (".i", ".ii", ".c", ".cpp", ".m", "")
+
+
+def getSDKPath(SDKName):
+ """
+ Get the path to the SDK for the given SDK name. Returns None if
+ the path cannot be determined.
+ """
+ if which("xcrun") is None:
+ return None
+
+ Cmd = "xcrun --sdk " + SDKName + " --show-sdk-path"
+ return check_output(Cmd, shell=True).rstrip()
+
+
+def runScript(ScriptPath, PBuildLogFile, Cwd):
+ """
+ Run the provided script if it exists.
+ """
+ if os.path.exists(ScriptPath):
+ try:
+ if Verbose == 1:
+ print " Executing: %s" % (ScriptPath,)
+ check_call("chmod +x '%s'" % ScriptPath, cwd=Cwd,
+ stderr=PBuildLogFile,
+ stdout=PBuildLogFile,
+ shell=True)
+ check_call("'%s'" % ScriptPath, cwd=Cwd,
+ stderr=PBuildLogFile,
+ stdout=PBuildLogFile,
+ shell=True)
+ except:
+ print "Error: Running %s failed. See %s for details." % (
+ ScriptPath, PBuildLogFile.name)
+ sys.exit(-1)
+
+
+def isCommentCSVLine(Entries):
+ """
+ Treat CSV lines starting with a '#' as a comment.
+ """
+ return len(Entries) > 0 and Entries[0].startswith("#")
diff --git a/utils/analyzer/SumTimerInfo.py b/utils/analyzer/SumTimerInfo.py
index 0c3585bbc279..50e1cb854f4e 100644
--- a/utils/analyzer/SumTimerInfo.py
+++ b/utils/analyzer/SumTimerInfo.py
@@ -5,11 +5,8 @@ Script to Summarize statistics in the scan-build output.
Statistics are enabled by passing '-internal-stats' option to scan-build
(or '-analyzer-stats' to the analyzer).
-
"""
-import string
-from operator import itemgetter
import sys
if __name__ == '__main__':
@@ -31,44 +28,42 @@ if __name__ == '__main__':
NumInlinedCallSites = 0
NumBifurcatedCallSites = 0
MaxCFGSize = 0
- Mode = 1
for line in f:
- if ("Miscellaneous Ungrouped Timers" in line) :
- Mode = 1
- if (("Analyzer Total Time" in line) and (Mode == 1)) :
- s = line.split()
- Time = Time + float(s[6])
- Count = Count + 1
- if (float(s[6]) > MaxTime) :
- MaxTime = float(s[6])
- if ((("warning generated." in line) or ("warnings generated" in line)) and Mode == 1) :
- s = line.split()
- Warnings = Warnings + int(s[0])
- if (("The # of functions analysed (as top level)" in line) and (Mode == 1)) :
- s = line.split()
- FunctionsAnalyzed = FunctionsAnalyzed + int(s[0])
- if (("The % of reachable basic blocks" in line) and (Mode == 1)) :
- s = line.split()
- ReachableBlocks = ReachableBlocks + int(s[0])
- if (("The # of times we reached the max number of steps" in line) and (Mode == 1)) :
- s = line.split()
- ReachedMaxSteps = ReachedMaxSteps + int(s[0])
- if (("The maximum number of basic blocks in a function" in line) and (Mode == 1)) :
- s = line.split()
- if (MaxCFGSize < int(s[0])) :
- MaxCFGSize = int(s[0])
- if (("The # of steps executed" in line) and (Mode == 1)) :
- s = line.split()
- NumSteps = NumSteps + int(s[0])
- if (("The # of times we inlined a call" in line) and (Mode == 1)) :
- s = line.split()
- NumInlinedCallSites = NumInlinedCallSites + int(s[0])
- if (("The # of times we split the path due to imprecise dynamic dispatch info" in line) and (Mode == 1)) :
- s = line.split()
- NumBifurcatedCallSites = NumBifurcatedCallSites + int(s[0])
- if ((") Total" in line) and (Mode == 1)) :
- s = line.split()
- TotalTime = TotalTime + float(s[6])
+ if ("Analyzer Total Time" in line):
+ s = line.split()
+ Time = Time + float(s[6])
+ Count = Count + 1
+ if (float(s[6]) > MaxTime):
+ MaxTime = float(s[6])
+ if ("warning generated." in line) or ("warnings generated" in line):
+ s = line.split()
+ Warnings = Warnings + int(s[0])
+ if "The # of functions analysed (as top level)" in line:
+ s = line.split()
+ FunctionsAnalyzed = FunctionsAnalyzed + int(s[0])
+ if "The % of reachable basic blocks" in line:
+ s = line.split()
+ ReachableBlocks = ReachableBlocks + int(s[0])
+ if "The # of times we reached the max number of steps" in line:
+ s = line.split()
+ ReachedMaxSteps = ReachedMaxSteps + int(s[0])
+ if "The maximum number of basic blocks in a function" in line:
+ s = line.split()
+ if MaxCFGSize < int(s[0]):
+ MaxCFGSize = int(s[0])
+ if "The # of steps executed" in line:
+ s = line.split()
+ NumSteps = NumSteps + int(s[0])
+ if "The # of times we inlined a call" in line:
+ s = line.split()
+ NumInlinedCallSites = NumInlinedCallSites + int(s[0])
+ if "The # of times we split the path due \
+ to imprecise dynamic dispatch info" in line:
+ s = line.split()
+ NumBifurcatedCallSites = NumBifurcatedCallSites + int(s[0])
+ if ") Total" in line:
+ s = line.split()
+ TotalTime = TotalTime + float(s[6])
print "TU Count %d" % (Count)
print "Time %f" % (Time)
@@ -77,7 +72,8 @@ if __name__ == '__main__':
print "Reachable Blocks %d" % (ReachableBlocks)
print "Reached Max Steps %d" % (ReachedMaxSteps)
print "Number of Steps %d" % (NumSteps)
- print "Number of Inlined calls %d (bifurcated %d)" % (NumInlinedCallSites, NumBifurcatedCallSites)
+ print "Number of Inlined calls %d (bifurcated %d)" % (
+ NumInlinedCallSites, NumBifurcatedCallSites)
print "MaxTime %f" % (MaxTime)
print "TotalTime %f" % (TotalTime)
print "Max CFG Size %d" % (MaxCFGSize)
diff --git a/utils/analyzer/ubiviz b/utils/analyzer/ubiviz
index 9d821c3c10a0..137e130fe74c 100755
--- a/utils/analyzer/ubiviz
+++ b/utils/analyzer/ubiviz
@@ -5,69 +5,72 @@
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
-##===----------------------------------------------------------------------===##
+##===--------------------------------------------------------------------===##
#
# This script reads visualization data emitted by the static analyzer for
# display in Ubigraph.
#
-##===----------------------------------------------------------------------===##
+##===--------------------------------------------------------------------===##
import xmlrpclib
import sys
+
def Error(message):
print >> sys.stderr, 'ubiviz: ' + message
sys.exit(1)
+
def StreamData(filename):
- file = open(filename)
- for ln in file:
- yield eval(ln)
- file.close()
+ file = open(filename)
+ for ln in file:
+ yield eval(ln)
+ file.close()
+
def Display(G, data):
- action = data[0]
- if action == 'vertex':
- vertex = data[1]
- G.new_vertex_w_id(vertex)
- for attribute in data[2:]:
- G.set_vertex_attribute(vertex, attribute[0], attribute[1])
- elif action == 'edge':
- src = data[1]
- dst = data[2]
- edge = G.new_edge(src,dst)
- for attribute in data[3:]:
- G.set_edge_attribute(edge, attribute[0], attribute[1])
- elif action == "vertex_style":
- style_id = data[1]
- parent_id = data[2]
- G.new_vertex_style_w_id(style_id, parent_id)
- for attribute in data[3:]:
- G.set_vertex_style_attribute(style_id, attribute[0], attribute[1])
- elif action == "vertex_style_attribute":
- style_id = data[1]
- for attribute in data[2:]:
- G.set_vertex_style_attribute(style_id, attribute[0], attribute[1])
- elif action == "change_vertex_style":
- vertex_id = data[1]
- style_id = data[2]
- G.change_vertex_style(vertex_id,style_id)
+ action = data[0]
+ if action == 'vertex':
+ vertex = data[1]
+ G.new_vertex_w_id(vertex)
+ for attribute in data[2:]:
+ G.set_vertex_attribute(vertex, attribute[0], attribute[1])
+ elif action == 'edge':
+ src = data[1]
+ dst = data[2]
+ edge = G.new_edge(src, dst)
+ for attribute in data[3:]:
+ G.set_edge_attribute(edge, attribute[0], attribute[1])
+ elif action == "vertex_style":
+ style_id = data[1]
+ parent_id = data[2]
+ G.new_vertex_style_w_id(style_id, parent_id)
+ for attribute in data[3:]:
+ G.set_vertex_style_attribute(style_id, attribute[0], attribute[1])
+ elif action == "vertex_style_attribute":
+ style_id = data[1]
+ for attribute in data[2:]:
+ G.set_vertex_style_attribute(style_id, attribute[0], attribute[1])
+ elif action == "change_vertex_style":
+ vertex_id = data[1]
+ style_id = data[2]
+ G.change_vertex_style(vertex_id, style_id)
+
def main(args):
- if len(args) == 0:
- Error('no input files')
+ if len(args) == 0:
+ Error('no input files')
- server = xmlrpclib.Server('http://127.0.0.1:20738/RPC2')
- G = server.ubigraph
+ server = xmlrpclib.Server('http://127.0.0.1:20738/RPC2')
+ G = server.ubigraph
- for arg in args:
- G.clear()
- for x in StreamData(arg):
- Display(G,x)
+ for arg in args:
+ G.clear()
+ for x in StreamData(arg):
+ Display(G, x)
- sys.exit(0)
+ sys.exit(0)
if __name__ == '__main__':
main(sys.argv[1:])
-
diff --git a/utils/clangdiag.py b/utils/clangdiag.py
new file mode 100755
index 000000000000..f434bfeaa4c1
--- /dev/null
+++ b/utils/clangdiag.py
@@ -0,0 +1,192 @@
+#!/usr/bin/python
+
+#----------------------------------------------------------------------
+# Be sure to add the python path that points to the LLDB shared library.
+#
+# # To use this in the embedded python interpreter using "lldb" just
+# import it with the full path using the "command script import"
+# command
+# (lldb) command script import /path/to/clandiag.py
+#----------------------------------------------------------------------
+
+import lldb
+import argparse
+import commands
+import shlex
+import os
+import re
+import subprocess
+
+class MyParser(argparse.ArgumentParser):
+ def format_help(self):
+ return ''' Commands for managing clang diagnostic breakpoints
+
+Syntax: clangdiag enable [<warning>|<diag-name>]
+ clangdiag disable
+ clangdiag diagtool [<path>|reset]
+
+The following subcommands are supported:
+
+ enable -- Enable clang diagnostic breakpoints.
+ disable -- Disable all clang diagnostic breakpoints.
+ diagtool -- Return, set, or reset diagtool path.
+
+This command sets breakpoints in clang, and clang based tools, that
+emit diagnostics. When a diagnostic is emitted, and clangdiag is
+enabled, it will use the appropriate diagtool application to determine
+the name of the DiagID, and set breakpoints in all locations that
+'diag::name' appears in the source. Since the new breakpoints are set
+after they are encountered, users will need to launch the executable a
+second time in order to hit the new breakpoints.
+
+For in-tree builds, the diagtool application, used to map DiagID's to
+names, is found automatically in the same directory as the target
+executable. However, out-or-tree builds must use the 'diagtool'
+subcommand to set the appropriate path for diagtool in the clang debug
+bin directory. Since this mapping is created at build-time, it's
+important for users to use the same version that was generated when
+clang was compiled, or else the id's won't match.
+
+Notes:
+- Substrings can be passed for both <warning> and <diag-name>.
+- If <warning> is passed, only enable the DiagID(s) for that warning.
+- If <diag-name> is passed, only enable that DiagID.
+- Rerunning enable clears existing breakpoints.
+- diagtool is used in breakpoint callbacks, so it can be changed
+ without the need to rerun enable.
+- Adding this to your ~.lldbinit file makes clangdiag available at startup:
+ "command script import /path/to/clangdiag.py"
+
+'''
+
+def create_diag_options():
+ parser = MyParser(prog='clangdiag')
+ subparsers = parser.add_subparsers(
+ title='subcommands',
+ dest='subcommands',
+ metavar='')
+ disable_parser = subparsers.add_parser('disable')
+ enable_parser = subparsers.add_parser('enable')
+ enable_parser.add_argument('id', nargs='?')
+ diagtool_parser = subparsers.add_parser('diagtool')
+ diagtool_parser.add_argument('path', nargs='?')
+ return parser
+
+def getDiagtool(target, diagtool = None):
+ id = target.GetProcess().GetProcessID()
+ if 'diagtool' not in getDiagtool.__dict__:
+ getDiagtool.diagtool = {}
+ if diagtool:
+ if diagtool == 'reset':
+ getDiagtool.diagtool[id] = None
+ elif os.path.exists(diagtool):
+ getDiagtool.diagtool[id] = diagtool
+ else:
+ print('clangdiag: %s not found.' % diagtool)
+ if not id in getDiagtool.diagtool or not getDiagtool.diagtool[id]:
+ getDiagtool.diagtool[id] = None
+ exe = target.GetExecutable()
+ if not exe.Exists():
+ print('clangdiag: Target (%s) not set.' % exe.GetFilename())
+ else:
+ diagtool = os.path.join(exe.GetDirectory(), 'diagtool')
+ if os.path.exists(diagtool):
+ getDiagtool.diagtool[id] = diagtool
+ else:
+ print('clangdiag: diagtool not found along side %s' % exe)
+
+ return getDiagtool.diagtool[id]
+
+def setDiagBreakpoint(frame, bp_loc, dict):
+ id = frame.FindVariable("DiagID").GetValue()
+ if id is None:
+ print('clangdiag: id is None')
+ return False
+
+ # Don't need to test this time, since we did that in enable.
+ target = frame.GetThread().GetProcess().GetTarget()
+ diagtool = getDiagtool(target)
+ name = subprocess.check_output([diagtool, "find-diagnostic-id", id]).rstrip();
+ # Make sure we only consider errors, warnings, and extentions.
+ # FIXME: Make this configurable?
+ prefixes = ['err_', 'warn_', 'exp_']
+ if len([prefix for prefix in prefixes+[''] if name.startswith(prefix)][0]):
+ bp = target.BreakpointCreateBySourceRegex(name, lldb.SBFileSpec())
+ bp.AddName("clang::Diagnostic")
+
+ return False
+
+def enable(exe_ctx, args):
+ # Always disable existing breakpoints
+ disable(exe_ctx)
+
+ target = exe_ctx.GetTarget()
+ numOfBreakpoints = target.GetNumBreakpoints()
+
+ if args.id:
+ # Make sure we only consider errors, warnings, and extentions.
+ # FIXME: Make this configurable?
+ prefixes = ['err_', 'warn_', 'exp_']
+ if len([prefix for prefix in prefixes+[''] if args.id.startswith(prefix)][0]):
+ bp = target.BreakpointCreateBySourceRegex(args.id, lldb.SBFileSpec())
+ bp.AddName("clang::Diagnostic")
+ else:
+ diagtool = getDiagtool(target)
+ list = subprocess.check_output([diagtool, "list-warnings"]).rstrip();
+ for line in list.splitlines(True):
+ m = re.search(r' *(.*) .*\[\-W' + re.escape(args.id) + r'.*].*', line)
+ # Make sure we only consider warnings.
+ if m and m.group(1).startswith('warn_'):
+ bp = target.BreakpointCreateBySourceRegex(m.group(1), lldb.SBFileSpec())
+ bp.AddName("clang::Diagnostic")
+ else:
+ print('Adding callbacks.')
+ bp = target.BreakpointCreateByName('DiagnosticsEngine::Report')
+ bp.SetScriptCallbackFunction('clangdiag.setDiagBreakpoint')
+ bp.AddName("clang::Diagnostic")
+
+ count = target.GetNumBreakpoints() - numOfBreakpoints
+ print('%i breakpoint%s added.' % (count, "s"[count==1:]))
+
+ return
+
+def disable(exe_ctx):
+ target = exe_ctx.GetTarget()
+ # Remove all diag breakpoints.
+ bkpts = lldb.SBBreakpointList(target)
+ target.FindBreakpointsByName("clang::Diagnostic", bkpts)
+ for i in range(bkpts.GetSize()):
+ target.BreakpointDelete(bkpts.GetBreakpointAtIndex(i).GetID())
+
+ return
+
+def the_diag_command(debugger, command, exe_ctx, result, dict):
+ # Use the Shell Lexer to properly parse up command options just like a
+ # shell would
+ command_args = shlex.split(command)
+ parser = create_diag_options()
+ try:
+ args = parser.parse_args(command_args)
+ except:
+ return
+
+ if args.subcommands == 'enable':
+ enable(exe_ctx, args)
+ elif args.subcommands == 'disable':
+ disable(exe_ctx)
+ else:
+ diagtool = getDiagtool(exe_ctx.GetTarget(), args.path)
+ print('diagtool = %s' % diagtool)
+
+ return
+
+def __lldb_init_module(debugger, dict):
+ # This initializer is being run from LLDB in the embedded command interpreter
+ # Make the options so we can generate the help text for the new LLDB
+ # command line command prior to registering it with LLDB below
+ parser = create_diag_options()
+ the_diag_command.__doc__ = parser.format_help()
+ # Add any commands contained in this module to LLDB
+ debugger.HandleCommand(
+ 'command script add -f clangdiag.the_diag_command clangdiag')
+ print 'The "clangdiag" command has been installed, type "help clangdiag" or "clangdiag --help" for detailed help.'
diff --git a/utils/perf-training/CMakeLists.txt b/utils/perf-training/CMakeLists.txt
index c046a1dac40b..39f9a4ca3c13 100644
--- a/utils/perf-training/CMakeLists.txt
+++ b/utils/perf-training/CMakeLists.txt
@@ -30,13 +30,13 @@ if(LLVM_BUILD_INSTRUMENTED)
endif()
if(NOT LLVM_PROFDATA)
- message(FATAL_ERROR "Must set LLVM_PROFDATA to point to llvm-profdata to use for merging PGO data")
+ message(STATUS "To enable merging PGO data LLVM_PROFDATA has to point to llvm-profdata")
+ else()
+ add_custom_target(generate-profdata
+ COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/perf-helper.py merge ${LLVM_PROFDATA} ${CMAKE_CURRENT_BINARY_DIR}/clang.profdata ${CMAKE_CURRENT_BINARY_DIR}
+ COMMENT "Merging profdata"
+ DEPENDS generate-profraw)
endif()
-
- add_custom_target(generate-profdata
- COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/perf-helper.py merge ${LLVM_PROFDATA} ${CMAKE_CURRENT_BINARY_DIR}/clang.profdata ${CMAKE_CURRENT_BINARY_DIR}
- COMMENT "Merging profdata"
- DEPENDS generate-profraw)
endif()
find_program(DTRACE dtrace)