aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm/tools/clang/lib/Lex
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/tools/clang/lib/Lex')
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/HeaderMap.cpp5
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp49
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/Lexer.cpp78
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp3
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp66
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp201
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp109
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp13
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp748
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/Pragma.cpp57
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp68
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/TokenConcatenation.cpp4
13 files changed, 349 insertions, 1054 deletions
diff --git a/contrib/llvm/tools/clang/lib/Lex/HeaderMap.cpp b/contrib/llvm/tools/clang/lib/Lex/HeaderMap.cpp
index 24a14b6cdb57..23cb053c2d71 100644
--- a/contrib/llvm/tools/clang/lib/Lex/HeaderMap.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/HeaderMap.cpp
@@ -48,7 +48,8 @@ static inline unsigned HashHMapKey(StringRef Str) {
/// map. If it doesn't look like a HeaderMap, it gives up and returns null.
/// If it looks like a HeaderMap but is obviously corrupted, it puts a reason
/// into the string error argument and returns null.
-const HeaderMap *HeaderMap::Create(const FileEntry *FE, FileManager &FM) {
+std::unique_ptr<HeaderMap> HeaderMap::Create(const FileEntry *FE,
+ FileManager &FM) {
// If the file is too small to be a header map, ignore it.
unsigned FileSize = FE->getSize();
if (FileSize <= sizeof(HMapHeader)) return nullptr;
@@ -59,7 +60,7 @@ const HeaderMap *HeaderMap::Create(const FileEntry *FE, FileManager &FM) {
bool NeedsByteSwap;
if (!checkHeader(**FileBuffer, NeedsByteSwap))
return nullptr;
- return new HeaderMap(std::move(*FileBuffer), NeedsByteSwap);
+ return std::unique_ptr<HeaderMap>(new HeaderMap(std::move(*FileBuffer), NeedsByteSwap));
}
bool HeaderMapImpl::checkHeader(const llvm::MemoryBuffer &File,
diff --git a/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp b/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp
index fbfa54b2fffd..c65fb47c0fe5 100644
--- a/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp
@@ -17,7 +17,6 @@
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/SourceManager.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Lex/DirectoryLookup.h"
#include "clang/Lex/ExternalPreprocessorSource.h"
#include "clang/Lex/HeaderMap.h"
@@ -35,6 +34,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
@@ -75,12 +75,6 @@ HeaderSearch::HeaderSearch(std::shared_ptr<HeaderSearchOptions> HSOpts,
FileMgr(SourceMgr.getFileManager()), FrameworkMap(64),
ModMap(SourceMgr, Diags, LangOpts, Target, *this) {}
-HeaderSearch::~HeaderSearch() {
- // Delete headermaps.
- for (unsigned i = 0, e = HeaderMaps.size(); i != e; ++i)
- delete HeaderMaps[i].second;
-}
-
void HeaderSearch::PrintStats() {
fprintf(stderr, "\n*** HeaderSearch Stats:\n");
fprintf(stderr, "%d files tracked.\n", (int)FileInfo.size());
@@ -113,12 +107,12 @@ const HeaderMap *HeaderSearch::CreateHeaderMap(const FileEntry *FE) {
// Pointer equality comparison of FileEntries works because they are
// already uniqued by inode.
if (HeaderMaps[i].first == FE)
- return HeaderMaps[i].second;
+ return HeaderMaps[i].second.get();
}
- if (const HeaderMap *HM = HeaderMap::Create(FE, FileMgr)) {
- HeaderMaps.push_back(std::make_pair(FE, HM));
- return HM;
+ if (std::unique_ptr<HeaderMap> HM = HeaderMap::Create(FE, FileMgr)) {
+ HeaderMaps.emplace_back(FE, std::move(HM));
+ return HeaderMaps.back().second.get();
}
return nullptr;
@@ -654,7 +648,7 @@ static bool isFrameworkStylePath(StringRef Path, bool &IsPrivateHeader,
++I;
}
- return FoundComp >= 2;
+ return !FrameworkName.empty() && FoundComp >= 2;
}
static void
@@ -1577,20 +1571,21 @@ void HeaderSearch::collectAllModules(SmallVectorImpl<Module *> &Modules) {
DirNative);
// Search each of the ".framework" directories to load them as modules.
- vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
- for (vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC), DirEnd;
+ llvm::vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
+ for (llvm::vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC),
+ DirEnd;
Dir != DirEnd && !EC; Dir.increment(EC)) {
- if (llvm::sys::path::extension(Dir->getName()) != ".framework")
+ if (llvm::sys::path::extension(Dir->path()) != ".framework")
continue;
const DirectoryEntry *FrameworkDir =
- FileMgr.getDirectory(Dir->getName());
+ FileMgr.getDirectory(Dir->path());
if (!FrameworkDir)
continue;
// Load this framework module.
- loadFrameworkModule(llvm::sys::path::stem(Dir->getName()),
- FrameworkDir, IsSystem);
+ loadFrameworkModule(llvm::sys::path::stem(Dir->path()), FrameworkDir,
+ IsSystem);
}
continue;
}
@@ -1643,15 +1638,16 @@ void HeaderSearch::loadSubdirectoryModuleMaps(DirectoryLookup &SearchDir) {
return;
std::error_code EC;
+ SmallString<128> Dir = SearchDir.getDir()->getName();
+ FileMgr.makeAbsolutePath(Dir);
SmallString<128> DirNative;
- llvm::sys::path::native(SearchDir.getDir()->getName(), DirNative);
- vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
- for (vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC), DirEnd;
+ llvm::sys::path::native(Dir, DirNative);
+ llvm::vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
+ for (llvm::vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC), DirEnd;
Dir != DirEnd && !EC; Dir.increment(EC)) {
- bool IsFramework =
- llvm::sys::path::extension(Dir->getName()) == ".framework";
+ bool IsFramework = llvm::sys::path::extension(Dir->path()) == ".framework";
if (IsFramework == SearchDir.isFramework())
- loadModuleMapFile(Dir->getName(), SearchDir.isSystemHeaderDirectory(),
+ loadModuleMapFile(Dir->path(), SearchDir.isSystemHeaderDirectory(),
SearchDir.isFramework());
}
@@ -1682,9 +1678,8 @@ std::string HeaderSearch::suggestPathToFileForDiagnostics(
StringRef Dir = SearchDirs[I].getDir()->getName();
llvm::SmallString<32> DirPath(Dir.begin(), Dir.end());
if (!WorkingDir.empty() && !path::is_absolute(Dir)) {
- auto err = fs::make_absolute(WorkingDir, DirPath);
- if (!err)
- path::remove_dots(DirPath, /*remove_dot_dot=*/true);
+ fs::make_absolute(WorkingDir, DirPath);
+ path::remove_dots(DirPath, /*remove_dot_dot=*/true);
Dir = DirPath;
}
for (auto NI = path::begin(File), NE = path::end(File),
diff --git a/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp b/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp
index e8588a771a43..d4723091114a 100644
--- a/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp
@@ -1015,7 +1015,7 @@ StringRef Lexer::getImmediateMacroName(SourceLocation Loc,
StringRef Lexer::getImmediateMacroNameForDiagnostics(
SourceLocation Loc, const SourceManager &SM, const LangOptions &LangOpts) {
assert(Loc.isMacroID() && "Only reasonable to call this on macros");
- // Walk past macro argument expanions.
+ // Walk past macro argument expansions.
while (SM.isMacroArgExpansion(Loc))
Loc = SM.getImmediateExpansionRange(Loc).getBegin();
@@ -1510,8 +1510,17 @@ static void maybeDiagnoseUTF8Homoglyph(DiagnosticsEngine &Diags, uint32_t C,
bool operator<(HomoglyphPair R) const { return Character < R.Character; }
};
static constexpr HomoglyphPair SortedHomoglyphs[] = {
+ {U'\u00ad', 0}, // SOFT HYPHEN
{U'\u01c3', '!'}, // LATIN LETTER RETROFLEX CLICK
{U'\u037e', ';'}, // GREEK QUESTION MARK
+ {U'\u200b', 0}, // ZERO WIDTH SPACE
+ {U'\u200c', 0}, // ZERO WIDTH NON-JOINER
+ {U'\u200d', 0}, // ZERO WIDTH JOINER
+ {U'\u2060', 0}, // WORD JOINER
+ {U'\u2061', 0}, // FUNCTION APPLICATION
+ {U'\u2062', 0}, // INVISIBLE TIMES
+ {U'\u2063', 0}, // INVISIBLE SEPARATOR
+ {U'\u2064', 0}, // INVISIBLE PLUS
{U'\u2212', '-'}, // MINUS SIGN
{U'\u2215', '/'}, // DIVISION SLASH
{U'\u2216', '\\'}, // SET MINUS
@@ -1521,6 +1530,7 @@ static void maybeDiagnoseUTF8Homoglyph(DiagnosticsEngine &Diags, uint32_t C,
{U'\u2236', ':'}, // RATIO
{U'\u223c', '~'}, // TILDE OPERATOR
{U'\ua789', ':'}, // MODIFIER LETTER COLON
+ {U'\ufeff', 0}, // ZERO WIDTH NO-BREAK SPACE
{U'\uff01', '!'}, // FULLWIDTH EXCLAMATION MARK
{U'\uff03', '#'}, // FULLWIDTH NUMBER SIGN
{U'\uff04', '$'}, // FULLWIDTH DOLLAR SIGN
@@ -1560,9 +1570,14 @@ static void maybeDiagnoseUTF8Homoglyph(DiagnosticsEngine &Diags, uint32_t C,
llvm::raw_svector_ostream CharOS(CharBuf);
llvm::write_hex(CharOS, C, llvm::HexPrintStyle::Upper, 4);
}
- const char LooksLikeStr[] = {Homoglyph->LooksLike, 0};
- Diags.Report(Range.getBegin(), diag::warn_utf8_symbol_homoglyph)
- << Range << CharBuf << LooksLikeStr;
+ if (Homoglyph->LooksLike) {
+ const char LooksLikeStr[] = {Homoglyph->LooksLike, 0};
+ Diags.Report(Range.getBegin(), diag::warn_utf8_symbol_homoglyph)
+ << Range << CharBuf << LooksLikeStr;
+ } else {
+ Diags.Report(Range.getBegin(), diag::warn_utf8_symbol_zero_width)
+ << Range << CharBuf;
+ }
}
}
@@ -1881,6 +1896,7 @@ const char *Lexer::LexUDSuffix(Token &Result, const char *CurPtr,
/// either " or L" or u8" or u" or U".
bool Lexer::LexStringLiteral(Token &Result, const char *CurPtr,
tok::TokenKind Kind) {
+ const char *AfterQuote = CurPtr;
// Does this string contain the \0 character?
const char *NulCharacter = nullptr;
@@ -1909,8 +1925,11 @@ bool Lexer::LexStringLiteral(Token &Result, const char *CurPtr,
if (C == 0) {
if (isCodeCompletionPoint(CurPtr-1)) {
- PP->CodeCompleteNaturalLanguage();
- FormTokenWithChars(Result, CurPtr-1, tok::unknown);
+ if (ParsingFilename)
+ codeCompleteIncludedFile(AfterQuote, CurPtr - 1, /*IsAngled=*/false);
+ else
+ PP->CodeCompleteNaturalLanguage();
+ FormTokenWithChars(Result, CurPtr - 1, tok::unknown);
cutOffLexing();
return true;
}
@@ -2028,9 +2047,8 @@ bool Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) {
if (C == '\\')
C = getAndAdvanceChar(CurPtr, Result);
- if (C == '\n' || C == '\r' || // Newline.
- (C == 0 && (CurPtr-1 == BufferEnd || // End of file.
- isCodeCompletionPoint(CurPtr-1)))) {
+ if (C == '\n' || C == '\r' || // Newline.
+ (C == 0 && (CurPtr - 1 == BufferEnd))) { // End of file.
// If the filename is unterminated, then it must just be a lone <
// character. Return this as such.
FormTokenWithChars(Result, AfterLessPos, tok::less);
@@ -2038,6 +2056,12 @@ bool Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) {
}
if (C == 0) {
+ if (isCodeCompletionPoint(CurPtr - 1)) {
+ codeCompleteIncludedFile(AfterLessPos, CurPtr - 1, /*IsAngled=*/true);
+ cutOffLexing();
+ FormTokenWithChars(Result, CurPtr - 1, tok::unknown);
+ return true;
+ }
NulCharacter = CurPtr-1;
}
C = getAndAdvanceChar(CurPtr, Result);
@@ -2054,6 +2078,34 @@ bool Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) {
return true;
}
+void Lexer::codeCompleteIncludedFile(const char *PathStart,
+ const char *CompletionPoint,
+ bool IsAngled) {
+ // Completion only applies to the filename, after the last slash.
+ StringRef PartialPath(PathStart, CompletionPoint - PathStart);
+ auto Slash = PartialPath.find_last_of(LangOpts.MSVCCompat ? "/\\" : "/");
+ StringRef Dir =
+ (Slash == StringRef::npos) ? "" : PartialPath.take_front(Slash);
+ const char *StartOfFilename =
+ (Slash == StringRef::npos) ? PathStart : PathStart + Slash + 1;
+ // Code completion filter range is the filename only, up to completion point.
+ PP->setCodeCompletionIdentifierInfo(&PP->getIdentifierTable().get(
+ StringRef(StartOfFilename, CompletionPoint - StartOfFilename)));
+ // We should replace the characters up to the closing quote, if any.
+ while (CompletionPoint < BufferEnd) {
+ char Next = *(CompletionPoint + 1);
+ if (Next == 0 || Next == '\r' || Next == '\n')
+ break;
+ ++CompletionPoint;
+ if (Next == (IsAngled ? '>' : '"'))
+ break;
+ }
+ PP->setCodeCompletionTokenRange(
+ FileLoc.getLocWithOffset(StartOfFilename - BufferStart),
+ FileLoc.getLocWithOffset(CompletionPoint - BufferStart));
+ PP->CodeCompleteIncludedFile(Dir, IsAngled);
+}
+
/// LexCharConstant - Lex the remainder of a character constant, after having
/// lexed either ' or L' or u8' or u' or U'.
bool Lexer::LexCharConstant(Token &Result, const char *CurPtr,
@@ -3033,6 +3085,8 @@ bool Lexer::LexUnicode(Token &Result, uint32_t C, const char *CurPtr) {
maybeDiagnoseIDCharCompat(PP->getDiagnostics(), C,
makeCharRange(*this, BufferPtr, CurPtr),
/*IsFirst=*/true);
+ maybeDiagnoseUTF8Homoglyph(PP->getDiagnostics(), C,
+ makeCharRange(*this, BufferPtr, CurPtr));
}
MIOpt.ReadToken();
@@ -3790,7 +3844,7 @@ LexNextToken:
case '@':
// Objective C support.
- if (CurPtr[-1] == '@' && LangOpts.ObjC1)
+ if (CurPtr[-1] == '@' && LangOpts.ObjC)
Kind = tok::at;
else
Kind = tok::unknown;
@@ -3827,7 +3881,6 @@ LexNextToken:
// We can't just reset CurPtr to BufferPtr because BufferPtr may point to
// an escaped newline.
--CurPtr;
- const char *UTF8StartPtr = CurPtr;
llvm::ConversionResult Status =
llvm::convertUTF8Sequence((const llvm::UTF8 **)&CurPtr,
(const llvm::UTF8 *)BufferEnd,
@@ -3842,9 +3895,6 @@ LexNextToken:
// (We manually eliminate the tail call to avoid recursion.)
goto LexNextToken;
}
- if (!isLexingRawMode())
- maybeDiagnoseUTF8Homoglyph(PP->getDiagnostics(), CodePoint,
- makeCharRange(*this, UTF8StartPtr, CurPtr));
return LexUnicode(Result, CodePoint, CurPtr);
}
diff --git a/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp b/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp
index 3f2af1a74e5a..fa0815eb9c6c 100644
--- a/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp
@@ -693,7 +693,7 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
break;
}
}
- // fall through.
+ LLVM_FALLTHROUGH;
case 'j':
case 'J':
if (isImaginary) break; // Cannot be repeated.
diff --git a/contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp b/contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp
index 4ed69ecc465d..434c12007596 100644
--- a/contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp
@@ -200,7 +200,8 @@ MacroDirective::DefInfo MacroDirective::getDefinition() {
}
const MacroDirective::DefInfo
-MacroDirective::findDirectiveAtLoc(SourceLocation L, SourceManager &SM) const {
+MacroDirective::findDirectiveAtLoc(SourceLocation L,
+ const SourceManager &SM) const {
assert(L.isValid() && "SourceLocation is invalid.");
for (DefInfo Def = getDefinition(); Def; Def = Def.getPreviousDefinition()) {
if (Def.getLocation().isInvalid() || // For macros defined on the command line.
diff --git a/contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp b/contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp
index 87749f74734c..cff950b703a6 100644
--- a/contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp
@@ -22,7 +22,6 @@
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/LexDiagnostic.h"
@@ -43,6 +42,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
@@ -54,6 +54,8 @@
using namespace clang;
+void ModuleMapCallbacks::anchor() {}
+
void ModuleMap::resolveLinkAsDependencies(Module *Mod) {
auto PendingLinkAs = PendingLinkAsModule.find(Mod->Name);
if (PendingLinkAs != PendingLinkAsModule.end()) {
@@ -523,7 +525,7 @@ void ModuleMap::diagnoseHeaderInclusion(Module *RequestingModule,
// At this point, only non-modular includes remain.
- if (LangOpts.ModulesStrictDeclUse) {
+ if (RequestingModule && LangOpts.ModulesStrictDeclUse) {
Diags.Report(FilenameLoc, diag::err_undeclared_use_of_module)
<< RequestingModule->getTopLevelModule()->Name << Filename;
} else if (RequestingModule && RequestingModuleIsModuleInterface &&
@@ -806,12 +808,11 @@ std::pair<Module *, bool> ModuleMap::findOrCreateModule(StringRef Name,
}
Module *ModuleMap::createGlobalModuleForInterfaceUnit(SourceLocation Loc) {
- assert(!PendingGlobalModule && "created multiple global modules");
- PendingGlobalModule.reset(
+ PendingSubmodules.emplace_back(
new Module("<global>", Loc, nullptr, /*IsFramework*/ false,
/*IsExplicit*/ true, NumCreatedModules++));
- PendingGlobalModule->Kind = Module::GlobalModuleFragment;
- return PendingGlobalModule.get();
+ PendingSubmodules.back()->Kind = Module::GlobalModuleFragment;
+ return PendingSubmodules.back().get();
}
Module *ModuleMap::createModuleForInterfaceUnit(SourceLocation Loc,
@@ -827,10 +828,11 @@ Module *ModuleMap::createModuleForInterfaceUnit(SourceLocation Loc,
Modules[Name] = SourceModule = Result;
// Reparent the current global module fragment as a submodule of this module.
- assert(GlobalModule == PendingGlobalModule.get() &&
- "unexpected global module");
- GlobalModule->setParent(Result);
- PendingGlobalModule.release(); // now owned by parent
+ for (auto &Submodule : PendingSubmodules) {
+ Submodule->setParent(Result);
+ Submodule.release(); // now owned by parent
+ }
+ PendingSubmodules.clear();
// Mark the main source file as being within the newly-created module so that
// declarations and macros are properly visibility-restricted to it.
@@ -841,6 +843,29 @@ Module *ModuleMap::createModuleForInterfaceUnit(SourceLocation Loc,
return Result;
}
+Module *ModuleMap::createHeaderModule(StringRef Name,
+ ArrayRef<Module::Header> Headers) {
+ assert(LangOpts.CurrentModule == Name && "module name mismatch");
+ assert(!Modules[Name] && "redefining existing module");
+
+ auto *Result =
+ new Module(Name, SourceLocation(), nullptr, /*IsFramework*/ false,
+ /*IsExplicit*/ false, NumCreatedModules++);
+ Result->Kind = Module::ModuleInterfaceUnit;
+ Modules[Name] = SourceModule = Result;
+
+ for (const Module::Header &H : Headers) {
+ auto *M = new Module(H.NameAsWritten, SourceLocation(), Result,
+ /*IsFramework*/ false,
+ /*IsExplicit*/ true, NumCreatedModules++);
+ // Header modules are implicitly 'export *'.
+ M->Exports.push_back(Module::ExportDecl(nullptr, true));
+ addHeader(M, H, NormalHeader);
+ }
+
+ return Result;
+}
+
/// For a framework module, infer the framework against which we
/// should link.
static void inferFrameworkLink(Module *Mod, const DirectoryEntry *FrameworkDir,
@@ -997,15 +1022,16 @@ Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
= StringRef(FrameworkDir->getName());
llvm::sys::path::append(SubframeworksDirName, "Frameworks");
llvm::sys::path::native(SubframeworksDirName);
- vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
- for (vfs::directory_iterator Dir = FS.dir_begin(SubframeworksDirName, EC),
- DirEnd;
+ llvm::vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
+ for (llvm::vfs::directory_iterator
+ Dir = FS.dir_begin(SubframeworksDirName, EC),
+ DirEnd;
Dir != DirEnd && !EC; Dir.increment(EC)) {
- if (!StringRef(Dir->getName()).endswith(".framework"))
+ if (!StringRef(Dir->path()).endswith(".framework"))
continue;
if (const DirectoryEntry *SubframeworkDir =
- FileMgr.getDirectory(Dir->getName())) {
+ FileMgr.getDirectory(Dir->path())) {
// Note: as an egregious but useful hack, we use the real path here and
// check whether it is actually a subdirectory of the parent directory.
// This will not be the case if the 'subframework' is actually a symlink
@@ -2371,13 +2397,13 @@ void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
// uncommonly used Tcl module on Darwin platforms.
std::error_code EC;
SmallVector<Module::Header, 6> Headers;
- vfs::FileSystem &FS = *SourceMgr.getFileManager().getVirtualFileSystem();
- for (vfs::recursive_directory_iterator I(FS, Dir->getName(), EC), E;
+ llvm::vfs::FileSystem &FS =
+ *SourceMgr.getFileManager().getVirtualFileSystem();
+ for (llvm::vfs::recursive_directory_iterator I(FS, Dir->getName(), EC), E;
I != E && !EC; I.increment(EC)) {
- if (const FileEntry *FE =
- SourceMgr.getFileManager().getFile(I->getName())) {
+ if (const FileEntry *FE = SourceMgr.getFileManager().getFile(I->path())) {
- Module::Header Header = {I->getName(), FE};
+ Module::Header Header = {I->path(), FE};
Headers.push_back(std::move(Header));
}
}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp b/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp
index 66a9faa6e60a..d62a3513c777 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp
@@ -31,7 +31,6 @@
#include "clang/Lex/Pragma.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorOptions.h"
-#include "clang/Lex/PTHLexer.h"
#include "clang/Lex/Token.h"
#include "clang/Lex/VariadicMacroSupport.h"
#include "llvm/ADT/ArrayRef.h"
@@ -119,7 +118,7 @@ static bool isReservedId(StringRef Text, const LangOptions &Lang) {
// the specified module, meaning clang won't build the specified module. This is
// useful in a number of situations, for instance, when building a library that
// vends a module map, one might want to avoid hitting intermediate build
-// products containig the the module map or avoid finding the system installed
+// products containimg the the module map or avoid finding the system installed
// modulemap for that library.
static bool isForModuleBuilding(Module *M, StringRef CurrentModule,
StringRef ModuleName) {
@@ -383,11 +382,6 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
CurPPLexer->pushConditionalLevel(IfTokenLoc, /*isSkipping*/ false,
FoundNonSkipPortion, FoundElse);
- if (CurPTHLexer) {
- PTHSkipExcludedConditionalBlock();
- return;
- }
-
// Enter raw mode to disable identifier lookup (and thus macro expansion),
// disabling warnings, etc.
CurPPLexer->LexingRawMode = true;
@@ -405,7 +399,7 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
// If this is the end of the buffer, we have an error.
if (Tok.is(tok::eof)) {
// We don't emit errors for unterminated conditionals here,
- // Lexer::LexEndOfFile can do that propertly.
+ // Lexer::LexEndOfFile can do that properly.
// Just return and let the caller lex after this #include.
if (PreambleConditionalStack.isRecording())
PreambleConditionalStack.SkipInfo.emplace(
@@ -585,83 +579,6 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
Tok.getLocation());
}
-void Preprocessor::PTHSkipExcludedConditionalBlock() {
- while (true) {
- assert(CurPTHLexer);
- assert(CurPTHLexer->LexingRawMode == false);
-
- // Skip to the next '#else', '#elif', or #endif.
- if (CurPTHLexer->SkipBlock()) {
- // We have reached an #endif. Both the '#' and 'endif' tokens
- // have been consumed by the PTHLexer. Just pop off the condition level.
- PPConditionalInfo CondInfo;
- bool InCond = CurPTHLexer->popConditionalLevel(CondInfo);
- (void)InCond; // Silence warning in no-asserts mode.
- assert(!InCond && "Can't be skipping if not in a conditional!");
- break;
- }
-
- // We have reached a '#else' or '#elif'. Lex the next token to get
- // the directive flavor.
- Token Tok;
- LexUnexpandedToken(Tok);
-
- // We can actually look up the IdentifierInfo here since we aren't in
- // raw mode.
- tok::PPKeywordKind K = Tok.getIdentifierInfo()->getPPKeywordID();
-
- if (K == tok::pp_else) {
- // #else: Enter the else condition. We aren't in a nested condition
- // since we skip those. We're always in the one matching the last
- // blocked we skipped.
- PPConditionalInfo &CondInfo = CurPTHLexer->peekConditionalLevel();
- // Note that we've seen a #else in this conditional.
- CondInfo.FoundElse = true;
-
- // If the #if block wasn't entered then enter the #else block now.
- if (!CondInfo.FoundNonSkip) {
- CondInfo.FoundNonSkip = true;
-
- // Scan until the eod token.
- CurPTHLexer->ParsingPreprocessorDirective = true;
- DiscardUntilEndOfDirective();
- CurPTHLexer->ParsingPreprocessorDirective = false;
-
- break;
- }
-
- // Otherwise skip this block.
- continue;
- }
-
- assert(K == tok::pp_elif);
- PPConditionalInfo &CondInfo = CurPTHLexer->peekConditionalLevel();
-
- // If this is a #elif with a #else before it, report the error.
- if (CondInfo.FoundElse)
- Diag(Tok, diag::pp_err_elif_after_else);
-
- // If this is in a skipping block or if we're already handled this #if
- // block, don't bother parsing the condition. We just skip this block.
- if (CondInfo.FoundNonSkip)
- continue;
-
- // Evaluate the condition of the #elif.
- IdentifierInfo *IfNDefMacro = nullptr;
- CurPTHLexer->ParsingPreprocessorDirective = true;
- bool ShouldEnter = EvaluateDirectiveExpression(IfNDefMacro).Conditional;
- CurPTHLexer->ParsingPreprocessorDirective = false;
-
- // If this condition is true, enter it!
- if (ShouldEnter) {
- CondInfo.FoundNonSkip = true;
- break;
- }
-
- // Otherwise, skip this block and go to the next one.
- }
-}
-
Module *Preprocessor::getModuleForLocation(SourceLocation Loc) {
if (!SourceMgr.isInMainFile(Loc)) {
// Try to determine the module of the include directive.
@@ -690,7 +607,7 @@ Preprocessor::getModuleHeaderToIncludeForDiagnostics(SourceLocation IncLoc,
// If we have a module import syntax, we shouldn't include a header to
// make a particular module visible.
- if (getLangOpts().ObjC2)
+ if (getLangOpts().ObjC)
return nullptr;
Module *TopM = M->getTopLevelModule();
@@ -887,18 +804,29 @@ private:
bool save;
};
-/// Process a directive while looking for the through header.
-/// Only #include (to check if it is the through header) and #define (to warn
-/// about macros that don't match the PCH) are handled. All other directives
-/// are completely discarded.
-void Preprocessor::HandleSkippedThroughHeaderDirective(Token &Result,
+/// Process a directive while looking for the through header or a #pragma
+/// hdrstop. The following directives are handled:
+/// #include (to check if it is the through header)
+/// #define (to warn about macros that don't match the PCH)
+/// #pragma (to check for pragma hdrstop).
+/// All other directives are completely discarded.
+void Preprocessor::HandleSkippedDirectiveWhileUsingPCH(Token &Result,
SourceLocation HashLoc) {
if (const IdentifierInfo *II = Result.getIdentifierInfo()) {
- if (II->getPPKeywordID() == tok::pp_include)
- return HandleIncludeDirective(HashLoc, Result);
- if (II->getPPKeywordID() == tok::pp_define)
+ if (II->getPPKeywordID() == tok::pp_define) {
return HandleDefineDirective(Result,
/*ImmediatelyAfterHeaderGuard=*/false);
+ }
+ if (SkippingUntilPCHThroughHeader &&
+ II->getPPKeywordID() == tok::pp_include) {
+ return HandleIncludeDirective(HashLoc, Result);
+ }
+ if (SkippingUntilPragmaHdrStop && II->getPPKeywordID() == tok::pp_pragma) {
+ Token P = LookAhead(0);
+ auto *II = P.getIdentifierInfo();
+ if (II && II->getName() == "hdrstop")
+ return HandlePragmaDirective(HashLoc, PIK_HashPragma);
+ }
}
DiscardUntilEndOfDirective();
}
@@ -964,8 +892,8 @@ void Preprocessor::HandleDirective(Token &Result) {
// and reset to previous state when returning from this function.
ResetMacroExpansionHelper helper(this);
- if (SkippingUntilPCHThroughHeader)
- return HandleSkippedThroughHeaderDirective(Result, SavedHash.getLocation());
+ if (SkippingUntilPCHThroughHeader || SkippingUntilPragmaHdrStop)
+ return HandleSkippedDirectiveWhileUsingPCH(Result, SavedHash.getLocation());
switch (Result.getKind()) {
case tok::eod:
@@ -1376,10 +1304,6 @@ void Preprocessor::HandleDigitDirective(Token &DigitTok) {
///
void Preprocessor::HandleUserDiagnosticDirective(Token &Tok,
bool isWarning) {
- // PTH doesn't emit #warning or #error directives.
- if (CurPTHLexer)
- return CurPTHLexer->DiscardToEndOfLine();
-
// Read the rest of the line raw. We do this because we don't want macros
// to be expanded and we don't require that the tokens be valid preprocessing
// tokens. For example, this is allowed: "#warning ` 'foo". GCC does
@@ -1618,7 +1542,7 @@ static void diagnoseAutoModuleImport(
Preprocessor &PP, SourceLocation HashLoc, Token &IncludeTok,
ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> Path,
SourceLocation PathEnd) {
- assert(PP.getLangOpts().ObjC2 && "no import syntax available");
+ assert(PP.getLangOpts().ObjC && "no import syntax available");
SmallString<128> PathString;
for (size_t I = 0, N = Path.size(); I != N; ++I) {
@@ -1783,6 +1707,7 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
// Check that we don't have infinite #include recursion.
if (IncludeMacroStack.size() == MaxAllowedIncludeStackDepth-1) {
Diag(FilenameTok, diag::err_pp_include_too_deep);
+ HasReachedMaxIncludeDepth = true;
return;
}
@@ -1868,15 +1793,58 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
Callbacks ? &RelativePath : nullptr, &SuggestedModule, &IsMapped);
if (File) {
SourceRange Range(FilenameTok.getLocation(), CharEnd);
- Diag(FilenameTok, diag::err_pp_file_not_found_not_fatal) <<
+ Diag(FilenameTok, diag::err_pp_file_not_found_angled_include_not_fatal) <<
Filename <<
FixItHint::CreateReplacement(Range, "\"" + Filename.str() + "\"");
}
}
+ // Check for likely typos due to leading or trailing non-isAlphanumeric
+ // characters
+ StringRef OriginalFilename = Filename;
+ if (LangOpts.SpellChecking && !File) {
+ // A heuristic to correct a typo file name by removing leading and
+ // trailing non-isAlphanumeric characters.
+ auto CorrectTypoFilename = [](llvm::StringRef Filename) {
+ Filename = Filename.drop_until(isAlphanumeric);
+ while (!Filename.empty() && !isAlphanumeric(Filename.back())) {
+ Filename = Filename.drop_back();
+ }
+ return Filename;
+ };
+ StringRef TypoCorrectionName = CorrectTypoFilename(Filename);
+ SmallString<128> NormalizedTypoCorrectionPath;
+ if (LangOpts.MSVCCompat) {
+ NormalizedTypoCorrectionPath = TypoCorrectionName.str();
+#ifndef _WIN32
+ llvm::sys::path::native(NormalizedTypoCorrectionPath);
+#endif
+ }
+ File = LookupFile(
+ FilenameLoc,
+ LangOpts.MSVCCompat ? NormalizedTypoCorrectionPath.c_str()
+ : TypoCorrectionName,
+ isAngled, LookupFrom, LookupFromFile, CurDir,
+ Callbacks ? &SearchPath : nullptr,
+ Callbacks ? &RelativePath : nullptr, &SuggestedModule, &IsMapped);
+ if (File) {
+ SourceRange Range(FilenameTok.getLocation(), CharEnd);
+ auto Hint = isAngled
+ ? FixItHint::CreateReplacement(
+ Range, "<" + TypoCorrectionName.str() + ">")
+ : FixItHint::CreateReplacement(
+ Range, "\"" + TypoCorrectionName.str() + "\"");
+ Diag(FilenameTok, diag::err_pp_file_not_found_typo_not_fatal)
+ << OriginalFilename << TypoCorrectionName << Hint;
+ // We found the file, so set the Filename to the name after typo
+ // correction.
+ Filename = TypoCorrectionName;
+ }
+ }
+
// If the file is still not found, just go with the vanilla diagnostic
if (!File)
- Diag(FilenameTok, diag::err_pp_file_not_found) << Filename
+ Diag(FilenameTok, diag::err_pp_file_not_found) << OriginalFilename
<< FilenameRange;
}
}
@@ -1896,10 +1864,11 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
if (PPOpts->SingleFileParseMode)
ShouldEnter = false;
- // Any diagnostics after the fatal error will not be visible. As the
- // compilation failed already and errors in subsequently included files won't
- // be visible, avoid preprocessing those files.
- if (ShouldEnter && Diags->hasFatalErrorOccurred())
+ // If we've reached the max allowed include depth, it is usually due to an
+ // include cycle. Don't enter already processed files again as it can lead to
+ // reaching the max allowed include depth again.
+ if (ShouldEnter && HasReachedMaxIncludeDepth && File &&
+ HeaderInfo.getFileInfo(File).NumIncludes)
ShouldEnter = false;
// Determine whether we should try to import the module for this #include, if
@@ -1932,7 +1901,7 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
// Warn that we're replacing the include/import with a module import.
// We only do this in Objective-C, where we have a module-import syntax.
- if (getLangOpts().ObjC2)
+ if (getLangOpts().ObjC)
diagnoseAutoModuleImport(*this, HashLoc, IncludeTok, Path, CharEnd);
// Load the module to import its macros. We'll make the declarations
@@ -1961,14 +1930,10 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
if (hadModuleLoaderFatalFailure()) {
// With a fatal failure in the module loader, we abort parsing.
Token &Result = IncludeTok;
- if (CurLexer) {
- Result.startToken();
- CurLexer->FormTokenWithChars(Result, CurLexer->BufferEnd, tok::eof);
- CurLexer->cutOffLexing();
- } else {
- assert(CurPTHLexer && "#include but no current lexer set!");
- CurPTHLexer->getEOF(Result);
- }
+ assert(CurLexer && "#include but no current lexer set!");
+ Result.startToken();
+ CurLexer->FormTokenWithChars(Result, CurLexer->BufferEnd, tok::eof);
+ CurLexer->cutOffLexing();
}
return;
}
@@ -2169,7 +2134,7 @@ void Preprocessor::HandleMicrosoftImportDirective(Token &Tok) {
///
void Preprocessor::HandleImportDirective(SourceLocation HashLoc,
Token &ImportTok) {
- if (!LangOpts.ObjC1) { // #import is standard for ObjC.
+ if (!LangOpts.ObjC) { // #import is standard for ObjC.
if (LangOpts.MSVCCompat)
return HandleMicrosoftImportDirective(ImportTok);
Diag(ImportTok, diag::ext_pp_import_directive);
@@ -2640,7 +2605,7 @@ void Preprocessor::HandleDefineDirective(
II->isStr("__unsafe_unretained") ||
II->isStr("__autoreleasing");
};
- if (getLangOpts().ObjC1 &&
+ if (getLangOpts().ObjC &&
SourceMgr.getFileID(OtherMI->getDefinitionLoc())
== getPredefinesFileID() &&
isObjCProtectedMacro(MacroNameTok.getIdentifierInfo())) {
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp b/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp
index 6631b13b1583..e321dd38fed6 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp
@@ -19,7 +19,6 @@
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/LexDiagnostic.h"
#include "clang/Lex/MacroInfo.h"
-#include "clang/Lex/PTHManager.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
@@ -76,13 +75,6 @@ bool Preprocessor::EnterSourceFile(FileID FID, const DirectoryLookup *CurDir,
if (MaxIncludeStackDepth < IncludeMacroStack.size())
MaxIncludeStackDepth = IncludeMacroStack.size();
- if (PTH) {
- if (PTHLexer *PL = PTH->CreateLexer(FID)) {
- EnterSourceFileWithPTH(PL, CurDir);
- return false;
- }
- }
-
// Get the MemoryBuffer for this FID, if it fails, we fail.
bool Invalid = false;
const llvm::MemoryBuffer *InputFile =
@@ -131,31 +123,6 @@ void Preprocessor::EnterSourceFileWithLexer(Lexer *TheLexer,
}
}
-/// EnterSourceFileWithPTH - Add a source file to the top of the include stack
-/// and start getting tokens from it using the PTH cache.
-void Preprocessor::EnterSourceFileWithPTH(PTHLexer *PL,
- const DirectoryLookup *CurDir) {
-
- if (CurPPLexer || CurTokenLexer)
- PushIncludeMacroStack();
-
- CurDirLookup = CurDir;
- CurPTHLexer.reset(PL);
- CurPPLexer = CurPTHLexer.get();
- CurLexerSubmodule = nullptr;
- if (CurLexerKind != CLK_LexAfterModuleImport)
- CurLexerKind = CLK_PTHLexer;
-
- // Notify the client, if desired, that we are in a new source file.
- if (Callbacks) {
- FileID FID = CurPPLexer->getFileID();
- SourceLocation EnterLoc = SourceMgr.getLocForStartOfFile(FID);
- SrcMgr::CharacteristicKind FileType =
- SourceMgr.getFileCharacteristic(EnterLoc);
- Callbacks->FileChanged(EnterLoc, PPCallbacks::EnterFile, FileType);
- }
-}
-
/// EnterMacro - Add a Macro to the top of the include stack and start lexing
/// tokens from it instead of the current buffer.
void Preprocessor::EnterMacro(Token &Tok, SourceLocation ILEnd,
@@ -304,20 +271,21 @@ void Preprocessor::diagnoseMissingHeaderInUmbrellaDir(const Module &Mod) {
ModuleMap &ModMap = getHeaderSearchInfo().getModuleMap();
const DirectoryEntry *Dir = Mod.getUmbrellaDir().Entry;
- vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
+ llvm::vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
std::error_code EC;
- for (vfs::recursive_directory_iterator Entry(FS, Dir->getName(), EC), End;
+ for (llvm::vfs::recursive_directory_iterator Entry(FS, Dir->getName(), EC),
+ End;
Entry != End && !EC; Entry.increment(EC)) {
using llvm::StringSwitch;
// Check whether this entry has an extension typically associated with
// headers.
- if (!StringSwitch<bool>(llvm::sys::path::extension(Entry->getName()))
+ if (!StringSwitch<bool>(llvm::sys::path::extension(Entry->path()))
.Cases(".h", ".H", ".hh", ".hpp", true)
.Default(false))
continue;
- if (const FileEntry *Header = getFileManager().getFile(Entry->getName()))
+ if (const FileEntry *Header = getFileManager().getFile(Entry->path()))
if (!getSourceManager().hasFileInfo(Header)) {
if (!ModMap.isHeaderInUnavailableModule(Header)) {
// Find the relative path that would access this header.
@@ -339,7 +307,6 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
// If we have an unclosed module region from a pragma at the end of a
// module, complain and close it now.
- // FIXME: This is not correct if we are building a module from PTH.
const bool LeavingSubmodule = CurLexer && CurLexerSubmodule;
if ((LeavingSubmodule || IncludeMacroStack.empty()) &&
!BuildingSubmoduleStack.empty() &&
@@ -436,15 +403,10 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
if (isCodeCompletionEnabled() && CurPPLexer &&
SourceMgr.getLocForStartOfFile(CurPPLexer->getFileID()) ==
CodeCompletionFileLoc) {
- if (CurLexer) {
- Result.startToken();
- CurLexer->FormTokenWithChars(Result, CurLexer->BufferEnd, tok::eof);
- CurLexer.reset();
- } else {
- assert(CurPTHLexer && "Got EOF but no current lexer set!");
- CurPTHLexer->getEOF(Result);
- CurPTHLexer.reset();
- }
+ assert(CurLexer && "Got EOF but no current lexer set!");
+ Result.startToken();
+ CurLexer->FormTokenWithChars(Result, CurLexer->BufferEnd, tok::eof);
+ CurLexer.reset();
CurPPLexer = nullptr;
recomputeCurLexerKind();
@@ -522,39 +484,34 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
}
// If this is the end of the main file, form an EOF token.
- if (CurLexer) {
- const char *EndPos = getCurLexerEndPos();
- Result.startToken();
- CurLexer->BufferPtr = EndPos;
- CurLexer->FormTokenWithChars(Result, EndPos, tok::eof);
-
- if (isCodeCompletionEnabled()) {
- // Inserting the code-completion point increases the source buffer by 1,
- // but the main FileID was created before inserting the point.
- // Compensate by reducing the EOF location by 1, otherwise the location
- // will point to the next FileID.
- // FIXME: This is hacky, the code-completion point should probably be
- // inserted before the main FileID is created.
- if (CurLexer->getFileLoc() == CodeCompletionFileLoc)
- Result.setLocation(Result.getLocation().getLocWithOffset(-1));
- }
-
- if (creatingPCHWithThroughHeader() && !LeavingPCHThroughHeader) {
- // Reached the end of the compilation without finding the through header.
- Diag(CurLexer->getFileLoc(), diag::err_pp_through_header_not_seen)
- << PPOpts->PCHThroughHeader << 0;
- }
+ assert(CurLexer && "Got EOF but no current lexer set!");
+ const char *EndPos = getCurLexerEndPos();
+ Result.startToken();
+ CurLexer->BufferPtr = EndPos;
+ CurLexer->FormTokenWithChars(Result, EndPos, tok::eof);
+
+ if (isCodeCompletionEnabled()) {
+ // Inserting the code-completion point increases the source buffer by 1,
+ // but the main FileID was created before inserting the point.
+ // Compensate by reducing the EOF location by 1, otherwise the location
+ // will point to the next FileID.
+ // FIXME: This is hacky, the code-completion point should probably be
+ // inserted before the main FileID is created.
+ if (CurLexer->getFileLoc() == CodeCompletionFileLoc)
+ Result.setLocation(Result.getLocation().getLocWithOffset(-1));
+ }
- if (!isIncrementalProcessingEnabled())
- // We're done with lexing.
- CurLexer.reset();
- } else {
- assert(CurPTHLexer && "Got EOF but no current lexer set!");
- CurPTHLexer->getEOF(Result);
- CurPTHLexer.reset();
+ if (creatingPCHWithThroughHeader() && !LeavingPCHThroughHeader) {
+ // Reached the end of the compilation without finding the through header.
+ Diag(CurLexer->getFileLoc(), diag::err_pp_through_header_not_seen)
+ << PPOpts->PCHThroughHeader << 0;
}
if (!isIncrementalProcessingEnabled())
+ // We're done with lexing.
+ CurLexer.reset();
+
+ if (!isIncrementalProcessingEnabled())
CurPPLexer = nullptr;
if (TUKind == TU_Complete) {
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp b/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp
index 346dd947c028..c70ff46ec904 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp
@@ -23,12 +23,12 @@
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/DirectoryLookup.h"
#include "clang/Lex/ExternalPreprocessorSource.h"
+#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/LexDiagnostic.h"
#include "clang/Lex/MacroArgs.h"
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorLexer.h"
-#include "clang/Lex/PTHLexer.h"
#include "clang/Lex/Token.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
@@ -428,8 +428,6 @@ bool Preprocessor::isNextPPTokenLParen() {
unsigned Val;
if (CurLexer)
Val = CurLexer->isNextPPTokenLParen();
- else if (CurPTHLexer)
- Val = CurPTHLexer->isNextPPTokenLParen();
else
Val = CurTokenLexer->isNextTokenLParen();
@@ -442,8 +440,6 @@ bool Preprocessor::isNextPPTokenLParen() {
for (const IncludeStackInfo &Entry : llvm::reverse(IncludeMacroStack)) {
if (Entry.TheLexer)
Val = Entry.TheLexer->isNextPPTokenLParen();
- else if (Entry.ThePTHLexer)
- Val = Entry.ThePTHLexer->isNextPPTokenLParen();
else
Val = Entry.TheTokenLexer->isNextTokenLParen();
@@ -1242,6 +1238,13 @@ static bool EvaluateHasIncludeCommon(Token &Tok,
PP.LookupFile(FilenameLoc, Filename, isAngled, LookupFrom, LookupFromFile,
CurDir, nullptr, nullptr, nullptr, nullptr);
+ if (PPCallbacks *Callbacks = PP.getPPCallbacks()) {
+ SrcMgr::CharacteristicKind FileType = SrcMgr::C_User;
+ if (File)
+ FileType = PP.getHeaderSearchInfo().getFileDirFlavor(File);
+ Callbacks->HasInclude(FilenameLoc, Filename, isAngled, File, FileType);
+ }
+
// Get the result value. A result of true means the file exists.
return File != nullptr;
}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp b/contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp
deleted file mode 100644
index 45cff56dcaa1..000000000000
--- a/contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp
+++ /dev/null
@@ -1,748 +0,0 @@
-//===- PTHLexer.cpp - Lex from a token stream -----------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the PTHLexer interface.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/Lex/PTHLexer.h"
-#include "clang/Basic/Diagnostic.h"
-#include "clang/Basic/FileManager.h"
-#include "clang/Basic/FileSystemStatCache.h"
-#include "clang/Basic/IdentifierTable.h"
-#include "clang/Basic/SourceManager.h"
-#include "clang/Basic/TokenKinds.h"
-#include "clang/Lex/LexDiagnostic.h"
-#include "clang/Lex/PTHManager.h"
-#include "clang/Lex/Preprocessor.h"
-#include "clang/Lex/Token.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/Support/DJB.h"
-#include "llvm/Support/Endian.h"
-#include "llvm/Support/ErrorOr.h"
-#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/OnDiskHashTable.h"
-#include <cassert>
-#include <cstdint>
-#include <cstdlib>
-#include <cstring>
-#include <ctime>
-#include <memory>
-#include <utility>
-
-using namespace clang;
-
-static const unsigned StoredTokenSize = 1 + 1 + 2 + 4 + 4;
-
-//===----------------------------------------------------------------------===//
-// PTHLexer methods.
-//===----------------------------------------------------------------------===//
-
-PTHLexer::PTHLexer(Preprocessor &PP, FileID FID, const unsigned char *D,
- const unsigned char *ppcond, PTHManager &PM)
- : PreprocessorLexer(&PP, FID), TokBuf(D), CurPtr(D), PPCond(ppcond),
- CurPPCondPtr(ppcond), PTHMgr(PM) {
- FileStartLoc = PP.getSourceManager().getLocForStartOfFile(FID);
-}
-
-bool PTHLexer::Lex(Token& Tok) {
- //===--------------------------------------==//
- // Read the raw token data.
- //===--------------------------------------==//
- using namespace llvm::support;
-
- // Shadow CurPtr into an automatic variable.
- const unsigned char *CurPtrShadow = CurPtr;
-
- // Read in the data for the token.
- unsigned Word0 = endian::readNext<uint32_t, little, aligned>(CurPtrShadow);
- uint32_t IdentifierID =
- endian::readNext<uint32_t, little, aligned>(CurPtrShadow);
- uint32_t FileOffset =
- endian::readNext<uint32_t, little, aligned>(CurPtrShadow);
-
- tok::TokenKind TKind = (tok::TokenKind) (Word0 & 0xFF);
- Token::TokenFlags TFlags = (Token::TokenFlags) ((Word0 >> 8) & 0xFF);
- uint32_t Len = Word0 >> 16;
-
- CurPtr = CurPtrShadow;
-
- //===--------------------------------------==//
- // Construct the token itself.
- //===--------------------------------------==//
-
- Tok.startToken();
- Tok.setKind(TKind);
- Tok.setFlag(TFlags);
- assert(!LexingRawMode);
- Tok.setLocation(FileStartLoc.getLocWithOffset(FileOffset));
- Tok.setLength(Len);
-
- // Handle identifiers.
- if (Tok.isLiteral()) {
- Tok.setLiteralData((const char*) (PTHMgr.SpellingBase + IdentifierID));
- }
- else if (IdentifierID) {
- MIOpt.ReadToken();
- IdentifierInfo *II = PTHMgr.GetIdentifierInfo(IdentifierID-1);
-
- Tok.setIdentifierInfo(II);
-
- // Change the kind of this identifier to the appropriate token kind, e.g.
- // turning "for" into a keyword.
- Tok.setKind(II->getTokenID());
-
- if (II->isHandleIdentifierCase())
- return PP->HandleIdentifier(Tok);
-
- return true;
- }
-
- //===--------------------------------------==//
- // Process the token.
- //===--------------------------------------==//
- if (TKind == tok::eof) {
- // Save the end-of-file token.
- EofToken = Tok;
-
- assert(!ParsingPreprocessorDirective);
- assert(!LexingRawMode);
-
- return LexEndOfFile(Tok);
- }
-
- if (TKind == tok::hash && Tok.isAtStartOfLine()) {
- LastHashTokPtr = CurPtr - StoredTokenSize;
- assert(!LexingRawMode);
- PP->HandleDirective(Tok);
-
- return false;
- }
-
- if (TKind == tok::eod) {
- assert(ParsingPreprocessorDirective);
- ParsingPreprocessorDirective = false;
- return true;
- }
-
- MIOpt.ReadToken();
- return true;
-}
-
-bool PTHLexer::LexEndOfFile(Token &Result) {
- // If we hit the end of the file while parsing a preprocessor directive,
- // end the preprocessor directive first. The next token returned will
- // then be the end of file.
- if (ParsingPreprocessorDirective) {
- ParsingPreprocessorDirective = false; // Done parsing the "line".
- return true; // Have a token.
- }
-
- assert(!LexingRawMode);
-
- // If we are in a #if directive, emit an error.
- while (!ConditionalStack.empty()) {
- if (PP->getCodeCompletionFileLoc() != FileStartLoc)
- PP->Diag(ConditionalStack.back().IfLoc,
- diag::err_pp_unterminated_conditional);
- ConditionalStack.pop_back();
- }
-
- // Finally, let the preprocessor handle this.
- return PP->HandleEndOfFile(Result);
-}
-
-// FIXME: We can just grab the last token instead of storing a copy
-// into EofToken.
-void PTHLexer::getEOF(Token& Tok) {
- assert(EofToken.is(tok::eof));
- Tok = EofToken;
-}
-
-void PTHLexer::DiscardToEndOfLine() {
- assert(ParsingPreprocessorDirective && ParsingFilename == false &&
- "Must be in a preprocessing directive!");
-
- // We assume that if the preprocessor wishes to discard to the end of
- // the line that it also means to end the current preprocessor directive.
- ParsingPreprocessorDirective = false;
-
- // Skip tokens by only peeking at their token kind and the flags.
- // We don't need to actually reconstruct full tokens from the token buffer.
- // This saves some copies and it also reduces IdentifierInfo* lookup.
- const unsigned char* p = CurPtr;
- while (true) {
- // Read the token kind. Are we at the end of the file?
- tok::TokenKind x = (tok::TokenKind) (uint8_t) *p;
- if (x == tok::eof) break;
-
- // Read the token flags. Are we at the start of the next line?
- Token::TokenFlags y = (Token::TokenFlags) (uint8_t) p[1];
- if (y & Token::StartOfLine) break;
-
- // Skip to the next token.
- p += StoredTokenSize;
- }
-
- CurPtr = p;
-}
-
-/// SkipBlock - Used by Preprocessor to skip the current conditional block.
-bool PTHLexer::SkipBlock() {
- using namespace llvm::support;
-
- assert(CurPPCondPtr && "No cached PP conditional information.");
- assert(LastHashTokPtr && "No known '#' token.");
-
- const unsigned char *HashEntryI = nullptr;
- uint32_t TableIdx;
-
- do {
- // Read the token offset from the side-table.
- uint32_t Offset = endian::readNext<uint32_t, little, aligned>(CurPPCondPtr);
-
- // Read the target table index from the side-table.
- TableIdx = endian::readNext<uint32_t, little, aligned>(CurPPCondPtr);
-
- // Compute the actual memory address of the '#' token data for this entry.
- HashEntryI = TokBuf + Offset;
-
- // Optimization: "Sibling jumping". #if...#else...#endif blocks can
- // contain nested blocks. In the side-table we can jump over these
- // nested blocks instead of doing a linear search if the next "sibling"
- // entry is not at a location greater than LastHashTokPtr.
- if (HashEntryI < LastHashTokPtr && TableIdx) {
- // In the side-table we are still at an entry for a '#' token that
- // is earlier than the last one we saw. Check if the location we would
- // stride gets us closer.
- const unsigned char* NextPPCondPtr =
- PPCond + TableIdx*(sizeof(uint32_t)*2);
- assert(NextPPCondPtr >= CurPPCondPtr);
- // Read where we should jump to.
- const unsigned char *HashEntryJ =
- TokBuf + endian::readNext<uint32_t, little, aligned>(NextPPCondPtr);
-
- if (HashEntryJ <= LastHashTokPtr) {
- // Jump directly to the next entry in the side table.
- HashEntryI = HashEntryJ;
- TableIdx = endian::readNext<uint32_t, little, aligned>(NextPPCondPtr);
- CurPPCondPtr = NextPPCondPtr;
- }
- }
- }
- while (HashEntryI < LastHashTokPtr);
- assert(HashEntryI == LastHashTokPtr && "No PP-cond entry found for '#'");
- assert(TableIdx && "No jumping from #endifs.");
-
- // Update our side-table iterator.
- const unsigned char* NextPPCondPtr = PPCond + TableIdx*(sizeof(uint32_t)*2);
- assert(NextPPCondPtr >= CurPPCondPtr);
- CurPPCondPtr = NextPPCondPtr;
-
- // Read where we should jump to.
- HashEntryI =
- TokBuf + endian::readNext<uint32_t, little, aligned>(NextPPCondPtr);
- uint32_t NextIdx = endian::readNext<uint32_t, little, aligned>(NextPPCondPtr);
-
- // By construction NextIdx will be zero if this is a #endif. This is useful
- // to know to obviate lexing another token.
- bool isEndif = NextIdx == 0;
-
- // This case can occur when we see something like this:
- //
- // #if ...
- // /* a comment or nothing */
- // #elif
- //
- // If we are skipping the first #if block it will be the case that CurPtr
- // already points 'elif'. Just return.
-
- if (CurPtr > HashEntryI) {
- assert(CurPtr == HashEntryI + StoredTokenSize);
- // Did we reach a #endif? If so, go ahead and consume that token as well.
- if (isEndif)
- CurPtr += StoredTokenSize * 2;
- else
- LastHashTokPtr = HashEntryI;
-
- return isEndif;
- }
-
- // Otherwise, we need to advance. Update CurPtr to point to the '#' token.
- CurPtr = HashEntryI;
-
- // Update the location of the last observed '#'. This is useful if we
- // are skipping multiple blocks.
- LastHashTokPtr = CurPtr;
-
- // Skip the '#' token.
- assert(((tok::TokenKind)*CurPtr) == tok::hash);
- CurPtr += StoredTokenSize;
-
- // Did we reach a #endif? If so, go ahead and consume that token as well.
- if (isEndif) {
- CurPtr += StoredTokenSize * 2;
- }
-
- return isEndif;
-}
-
-SourceLocation PTHLexer::getSourceLocation() {
- // getSourceLocation is not on the hot path. It is used to get the location
- // of the next token when transitioning back to this lexer when done
- // handling a #included file. Just read the necessary data from the token
- // data buffer to construct the SourceLocation object.
- // NOTE: This is a virtual function; hence it is defined out-of-line.
- using namespace llvm::support;
-
- const unsigned char *OffsetPtr = CurPtr + (StoredTokenSize - 4);
- uint32_t Offset = endian::readNext<uint32_t, little, aligned>(OffsetPtr);
- return FileStartLoc.getLocWithOffset(Offset);
-}
-
-//===----------------------------------------------------------------------===//
-// PTH file lookup: map from strings to file data.
-//===----------------------------------------------------------------------===//
-
-/// PTHFileLookup - This internal data structure is used by the PTHManager
-/// to map from FileEntry objects managed by FileManager to offsets within
-/// the PTH file.
-namespace {
-
-class PTHFileData {
- const uint32_t TokenOff;
- const uint32_t PPCondOff;
-
-public:
- PTHFileData(uint32_t tokenOff, uint32_t ppCondOff)
- : TokenOff(tokenOff), PPCondOff(ppCondOff) {}
-
- uint32_t getTokenOffset() const { return TokenOff; }
- uint32_t getPPCondOffset() const { return PPCondOff; }
-};
-
-class PTHFileLookupCommonTrait {
-public:
- using internal_key_type = std::pair<unsigned char, StringRef>;
- using hash_value_type = unsigned;
- using offset_type = unsigned;
-
- static hash_value_type ComputeHash(internal_key_type x) {
- return llvm::djbHash(x.second);
- }
-
- static std::pair<unsigned, unsigned>
- ReadKeyDataLength(const unsigned char*& d) {
- using namespace llvm::support;
-
- unsigned keyLen =
- (unsigned)endian::readNext<uint16_t, little, unaligned>(d);
- unsigned dataLen = (unsigned) *(d++);
- return std::make_pair(keyLen, dataLen);
- }
-
- static internal_key_type ReadKey(const unsigned char* d, unsigned) {
- unsigned char k = *(d++); // Read the entry kind.
- return std::make_pair(k, (const char*) d);
- }
-};
-
-} // namespace
-
-class PTHManager::PTHFileLookupTrait : public PTHFileLookupCommonTrait {
-public:
- using external_key_type = const FileEntry *;
- using data_type = PTHFileData;
-
- static internal_key_type GetInternalKey(const FileEntry* FE) {
- return std::make_pair((unsigned char) 0x1, FE->getName());
- }
-
- static bool EqualKey(internal_key_type a, internal_key_type b) {
- return a.first == b.first && a.second == b.second;
- }
-
- static PTHFileData ReadData(const internal_key_type& k,
- const unsigned char* d, unsigned) {
- using namespace llvm::support;
-
- assert(k.first == 0x1 && "Only file lookups can match!");
- uint32_t x = endian::readNext<uint32_t, little, unaligned>(d);
- uint32_t y = endian::readNext<uint32_t, little, unaligned>(d);
- return PTHFileData(x, y);
- }
-};
-
-class PTHManager::PTHStringLookupTrait {
-public:
- using data_type = uint32_t;
- using external_key_type = const std::pair<const char *, unsigned>;
- using internal_key_type = external_key_type;
- using hash_value_type = uint32_t;
- using offset_type = unsigned;
-
- static bool EqualKey(const internal_key_type& a,
- const internal_key_type& b) {
- return (a.second == b.second) ? memcmp(a.first, b.first, a.second) == 0
- : false;
- }
-
- static hash_value_type ComputeHash(const internal_key_type& a) {
- return llvm::djbHash(StringRef(a.first, a.second));
- }
-
- // This hopefully will just get inlined and removed by the optimizer.
- static const internal_key_type&
- GetInternalKey(const external_key_type& x) { return x; }
-
- static std::pair<unsigned, unsigned>
- ReadKeyDataLength(const unsigned char*& d) {
- using namespace llvm::support;
-
- return std::make_pair(
- (unsigned)endian::readNext<uint16_t, little, unaligned>(d),
- sizeof(uint32_t));
- }
-
- static std::pair<const char*, unsigned>
- ReadKey(const unsigned char* d, unsigned n) {
- assert(n >= 2 && d[n-1] == '\0');
- return std::make_pair((const char*) d, n-1);
- }
-
- static uint32_t ReadData(const internal_key_type& k, const unsigned char* d,
- unsigned) {
- using namespace llvm::support;
-
- return endian::readNext<uint32_t, little, unaligned>(d);
- }
-};
-
-//===----------------------------------------------------------------------===//
-// PTHManager methods.
-//===----------------------------------------------------------------------===//
-
-PTHManager::PTHManager(
- std::unique_ptr<const llvm::MemoryBuffer> buf,
- std::unique_ptr<PTHFileLookup> fileLookup, const unsigned char *idDataTable,
- std::unique_ptr<IdentifierInfo *[], llvm::FreeDeleter> perIDCache,
- std::unique_ptr<PTHStringIdLookup> stringIdLookup, unsigned numIds,
- const unsigned char *spellingBase, const char *originalSourceFile)
- : Buf(std::move(buf)), PerIDCache(std::move(perIDCache)),
- FileLookup(std::move(fileLookup)), IdDataTable(idDataTable),
- StringIdLookup(std::move(stringIdLookup)), NumIds(numIds),
- SpellingBase(spellingBase), OriginalSourceFile(originalSourceFile) {}
-
-PTHManager::~PTHManager() = default;
-
-static void InvalidPTH(DiagnosticsEngine &Diags, const char *Msg) {
- Diags.Report(Diags.getCustomDiagID(DiagnosticsEngine::Error, "%0")) << Msg;
-}
-
-PTHManager *PTHManager::Create(StringRef file, DiagnosticsEngine &Diags) {
- // Memory map the PTH file.
- llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> FileOrErr =
- llvm::MemoryBuffer::getFile(file);
-
- if (!FileOrErr) {
- // FIXME: Add ec.message() to this diag.
- Diags.Report(diag::err_invalid_pth_file) << file;
- return nullptr;
- }
- std::unique_ptr<llvm::MemoryBuffer> File = std::move(FileOrErr.get());
-
- using namespace llvm::support;
-
- // Get the buffer ranges and check if there are at least three 32-bit
- // words at the end of the file.
- const unsigned char *BufBeg = (const unsigned char*)File->getBufferStart();
- const unsigned char *BufEnd = (const unsigned char*)File->getBufferEnd();
-
- // Check the prologue of the file.
- if ((BufEnd - BufBeg) < (signed)(sizeof("cfe-pth") + 4 + 4) ||
- memcmp(BufBeg, "cfe-pth", sizeof("cfe-pth")) != 0) {
- Diags.Report(diag::err_invalid_pth_file) << file;
- return nullptr;
- }
-
- // Read the PTH version.
- const unsigned char *p = BufBeg + (sizeof("cfe-pth"));
- unsigned Version = endian::readNext<uint32_t, little, aligned>(p);
-
- if (Version < PTHManager::Version) {
- InvalidPTH(Diags,
- Version < PTHManager::Version
- ? "PTH file uses an older PTH format that is no longer supported"
- : "PTH file uses a newer PTH format that cannot be read");
- return nullptr;
- }
-
- // Compute the address of the index table at the end of the PTH file.
- const unsigned char *PrologueOffset = p;
-
- if (PrologueOffset >= BufEnd) {
- Diags.Report(diag::err_invalid_pth_file) << file;
- return nullptr;
- }
-
- // Construct the file lookup table. This will be used for mapping from
- // FileEntry*'s to cached tokens.
- const unsigned char* FileTableOffset = PrologueOffset + sizeof(uint32_t)*2;
- const unsigned char *FileTable =
- BufBeg + endian::readNext<uint32_t, little, aligned>(FileTableOffset);
-
- if (!(FileTable > BufBeg && FileTable < BufEnd)) {
- Diags.Report(diag::err_invalid_pth_file) << file;
- return nullptr; // FIXME: Proper error diagnostic?
- }
-
- std::unique_ptr<PTHFileLookup> FL(PTHFileLookup::Create(FileTable, BufBeg));
-
- // Warn if the PTH file is empty. We still want to create a PTHManager
- // as the PTH could be used with -include-pth.
- if (FL->isEmpty())
- InvalidPTH(Diags, "PTH file contains no cached source data");
-
- // Get the location of the table mapping from persistent ids to the
- // data needed to reconstruct identifiers.
- const unsigned char* IDTableOffset = PrologueOffset + sizeof(uint32_t)*0;
- const unsigned char *IData =
- BufBeg + endian::readNext<uint32_t, little, aligned>(IDTableOffset);
-
- if (!(IData >= BufBeg && IData < BufEnd)) {
- Diags.Report(diag::err_invalid_pth_file) << file;
- return nullptr;
- }
-
- // Get the location of the hashtable mapping between strings and
- // persistent IDs.
- const unsigned char* StringIdTableOffset = PrologueOffset + sizeof(uint32_t)*1;
- const unsigned char *StringIdTable =
- BufBeg + endian::readNext<uint32_t, little, aligned>(StringIdTableOffset);
- if (!(StringIdTable >= BufBeg && StringIdTable < BufEnd)) {
- Diags.Report(diag::err_invalid_pth_file) << file;
- return nullptr;
- }
-
- std::unique_ptr<PTHStringIdLookup> SL(
- PTHStringIdLookup::Create(StringIdTable, BufBeg));
-
- // Get the location of the spelling cache.
- const unsigned char* spellingBaseOffset = PrologueOffset + sizeof(uint32_t)*3;
- const unsigned char *spellingBase =
- BufBeg + endian::readNext<uint32_t, little, aligned>(spellingBaseOffset);
- if (!(spellingBase >= BufBeg && spellingBase < BufEnd)) {
- Diags.Report(diag::err_invalid_pth_file) << file;
- return nullptr;
- }
-
- // Get the number of IdentifierInfos and pre-allocate the identifier cache.
- uint32_t NumIds = endian::readNext<uint32_t, little, aligned>(IData);
-
- // Pre-allocate the persistent ID -> IdentifierInfo* cache. We use calloc()
- // so that we in the best case only zero out memory once when the OS returns
- // us new pages.
- std::unique_ptr<IdentifierInfo *[], llvm::FreeDeleter> PerIDCache;
-
- if (NumIds) {
- PerIDCache.reset((IdentifierInfo **)calloc(NumIds, sizeof(PerIDCache[0])));
- if (!PerIDCache) {
- InvalidPTH(Diags, "Could not allocate memory for processing PTH file");
- return nullptr;
- }
- }
-
- // Compute the address of the original source file.
- const unsigned char* originalSourceBase = PrologueOffset + sizeof(uint32_t)*4;
- unsigned len =
- endian::readNext<uint16_t, little, unaligned>(originalSourceBase);
- if (!len) originalSourceBase = nullptr;
-
- // Create the new PTHManager.
- return new PTHManager(std::move(File), std::move(FL), IData,
- std::move(PerIDCache), std::move(SL), NumIds,
- spellingBase, (const char *)originalSourceBase);
-}
-
-IdentifierInfo* PTHManager::LazilyCreateIdentifierInfo(unsigned PersistentID) {
- using namespace llvm::support;
-
- // Look in the PTH file for the string data for the IdentifierInfo object.
- const unsigned char* TableEntry = IdDataTable + sizeof(uint32_t)*PersistentID;
- const unsigned char *IDData =
- (const unsigned char *)Buf->getBufferStart() +
- endian::readNext<uint32_t, little, aligned>(TableEntry);
- assert(IDData < (const unsigned char*)Buf->getBufferEnd());
-
- // Allocate the object.
- std::pair<IdentifierInfo,const unsigned char*> *Mem =
- Alloc.Allocate<std::pair<IdentifierInfo, const unsigned char *>>();
-
- Mem->second = IDData;
- assert(IDData[0] != '\0');
- IdentifierInfo *II = new ((void*) Mem) IdentifierInfo();
-
- // Store the new IdentifierInfo in the cache.
- PerIDCache[PersistentID] = II;
- assert(II->getNameStart() && II->getNameStart()[0] != '\0');
- return II;
-}
-
-IdentifierInfo* PTHManager::get(StringRef Name) {
- // Double check our assumption that the last character isn't '\0'.
- assert(Name.empty() || Name.back() != '\0');
- PTHStringIdLookup::iterator I =
- StringIdLookup->find(std::make_pair(Name.data(), Name.size()));
- if (I == StringIdLookup->end()) // No identifier found?
- return nullptr;
-
- // Match found. Return the identifier!
- assert(*I > 0);
- return GetIdentifierInfo(*I-1);
-}
-
-PTHLexer *PTHManager::CreateLexer(FileID FID) {
- const FileEntry *FE = PP->getSourceManager().getFileEntryForID(FID);
- if (!FE)
- return nullptr;
-
- using namespace llvm::support;
-
- // Lookup the FileEntry object in our file lookup data structure. It will
- // return a variant that indicates whether or not there is an offset within
- // the PTH file that contains cached tokens.
- PTHFileLookup::iterator I = FileLookup->find(FE);
-
- if (I == FileLookup->end()) // No tokens available?
- return nullptr;
-
- const PTHFileData& FileData = *I;
-
- const unsigned char *BufStart = (const unsigned char *)Buf->getBufferStart();
- // Compute the offset of the token data within the buffer.
- const unsigned char* data = BufStart + FileData.getTokenOffset();
-
- // Get the location of pp-conditional table.
- const unsigned char* ppcond = BufStart + FileData.getPPCondOffset();
- uint32_t Len = endian::readNext<uint32_t, little, aligned>(ppcond);
- if (Len == 0) ppcond = nullptr;
-
- assert(PP && "No preprocessor set yet!");
- return new PTHLexer(*PP, FID, data, ppcond, *this);
-}
-
-//===----------------------------------------------------------------------===//
-// 'stat' caching.
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class PTHStatData {
-public:
- uint64_t Size;
- time_t ModTime;
- llvm::sys::fs::UniqueID UniqueID;
- const bool HasData = false;
- bool IsDirectory;
-
- PTHStatData() = default;
- PTHStatData(uint64_t Size, time_t ModTime, llvm::sys::fs::UniqueID UniqueID,
- bool IsDirectory)
- : Size(Size), ModTime(ModTime), UniqueID(UniqueID), HasData(true),
- IsDirectory(IsDirectory) {}
-};
-
-class PTHStatLookupTrait : public PTHFileLookupCommonTrait {
-public:
- using external_key_type = StringRef; // const char*
- using data_type = PTHStatData;
-
- static internal_key_type GetInternalKey(StringRef path) {
- // The key 'kind' doesn't matter here because it is ignored in EqualKey.
- return std::make_pair((unsigned char) 0x0, path);
- }
-
- static bool EqualKey(internal_key_type a, internal_key_type b) {
- // When doing 'stat' lookups we don't care about the kind of 'a' and 'b',
- // just the paths.
- return a.second == b.second;
- }
-
- static data_type ReadData(const internal_key_type& k, const unsigned char* d,
- unsigned) {
- if (k.first /* File or Directory */) {
- bool IsDirectory = true;
- if (k.first == 0x1 /* File */) {
- IsDirectory = false;
- d += 4 * 2; // Skip the first 2 words.
- }
-
- using namespace llvm::support;
-
- uint64_t File = endian::readNext<uint64_t, little, unaligned>(d);
- uint64_t Device = endian::readNext<uint64_t, little, unaligned>(d);
- llvm::sys::fs::UniqueID UniqueID(Device, File);
- time_t ModTime = endian::readNext<uint64_t, little, unaligned>(d);
- uint64_t Size = endian::readNext<uint64_t, little, unaligned>(d);
- return data_type(Size, ModTime, UniqueID, IsDirectory);
- }
-
- // Negative stat. Don't read anything.
- return data_type();
- }
-};
-
-} // namespace
-
-namespace clang {
-
-class PTHStatCache : public FileSystemStatCache {
- using CacheTy = llvm::OnDiskChainedHashTable<PTHStatLookupTrait>;
-
- CacheTy Cache;
-
-public:
- PTHStatCache(PTHManager::PTHFileLookup &FL)
- : Cache(FL.getNumBuckets(), FL.getNumEntries(), FL.getBuckets(),
- FL.getBase()) {}
-
- LookupResult getStat(StringRef Path, FileData &Data, bool isFile,
- std::unique_ptr<vfs::File> *F,
- vfs::FileSystem &FS) override {
- // Do the lookup for the file's data in the PTH file.
- CacheTy::iterator I = Cache.find(Path);
-
- // If we don't get a hit in the PTH file just forward to 'stat'.
- if (I == Cache.end())
- return statChained(Path, Data, isFile, F, FS);
-
- const PTHStatData &D = *I;
-
- if (!D.HasData)
- return CacheMissing;
-
- Data.Name = Path;
- Data.Size = D.Size;
- Data.ModTime = D.ModTime;
- Data.UniqueID = D.UniqueID;
- Data.IsDirectory = D.IsDirectory;
- Data.IsNamedPipe = false;
- Data.InPCH = true;
-
- return CacheExists;
- }
-};
-
-} // namespace clang
-
-std::unique_ptr<FileSystemStatCache> PTHManager::createStatCache() {
- return llvm::make_unique<PTHStatCache>(*FileLookup);
-}
diff --git a/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp b/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp
index 37c0a23646c5..575935119f6f 100644
--- a/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp
@@ -31,7 +31,6 @@
#include "clang/Lex/PPCallbacks.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorLexer.h"
-#include "clang/Lex/PTHLexer.h"
#include "clang/Lex/Token.h"
#include "clang/Lex/TokenLexer.h"
#include "llvm/ADT/ArrayRef.h"
@@ -404,10 +403,7 @@ void Preprocessor::HandlePragmaOnce(Token &OnceTok) {
void Preprocessor::HandlePragmaMark() {
assert(CurPPLexer && "No current lexer?");
- if (CurLexer)
- CurLexer->ReadToEndOfLine();
- else
- CurPTHLexer->DiscardToEndOfLine();
+ CurLexer->ReadToEndOfLine();
}
/// HandlePragmaPoison - Handle \#pragma GCC poison. PoisonTok is the 'poison'.
@@ -810,12 +806,6 @@ void Preprocessor::HandlePragmaModuleBuild(Token &Tok) {
DiscardUntilEndOfDirective();
}
- if (CurPTHLexer) {
- // FIXME: Support this somehow?
- Diag(Loc, diag::err_pp_module_build_pth);
- return;
- }
-
CurLexer->LexingRawMode = true;
auto TryConsumeIdentifier = [&](StringRef Ident) -> bool {
@@ -876,6 +866,37 @@ void Preprocessor::HandlePragmaModuleBuild(Token &Tok) {
StringRef(Start, End - Start));
}
+void Preprocessor::HandlePragmaHdrstop(Token &Tok) {
+ Lex(Tok);
+ if (Tok.is(tok::l_paren)) {
+ Diag(Tok.getLocation(), diag::warn_pp_hdrstop_filename_ignored);
+
+ std::string FileName;
+ if (!LexStringLiteral(Tok, FileName, "pragma hdrstop", false))
+ return;
+
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok, diag::err_expected) << tok::r_paren;
+ return;
+ }
+ Lex(Tok);
+ }
+ if (Tok.isNot(tok::eod))
+ Diag(Tok.getLocation(), diag::ext_pp_extra_tokens_at_eol)
+ << "pragma hdrstop";
+
+ if (creatingPCHWithPragmaHdrStop() &&
+ SourceMgr.isInMainFile(Tok.getLocation())) {
+ assert(CurLexer && "no lexer for #pragma hdrstop processing");
+ Token &Result = Tok;
+ Result.startToken();
+ CurLexer->FormTokenWithChars(Result, CurLexer->BufferEnd, tok::eof);
+ CurLexer->cutOffLexing();
+ }
+ if (usingPCHWithPragmaHdrStop())
+ SkippingUntilPragmaHdrStop = false;
+}
+
/// AddPragmaHandler - Add the specified pragma handler to the preprocessor.
/// If 'Namespace' is non-null, then it is a token required to exist on the
/// pragma line before the pragma string starts, e.g. "STDC" or "GCC".
@@ -1099,10 +1120,6 @@ struct PragmaDebugHandler : public PragmaHandler {
}
void HandleCaptured(Preprocessor &PP) {
- // Skip if emitting preprocessed output.
- if (PP.isPreprocessedOutput())
- return;
-
Token Tok;
PP.LexUnexpandedToken(Tok);
@@ -1220,6 +1237,15 @@ public:
}
};
+/// "\#pragma hdrstop [<header-name-string>]"
+struct PragmaHdrstopHandler : public PragmaHandler {
+ PragmaHdrstopHandler() : PragmaHandler("hdrstop") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &DepToken) override {
+ PP.HandlePragmaHdrstop(DepToken);
+ }
+};
+
/// "\#pragma warning(...)". MSVC's diagnostics do not map cleanly to clang's
/// diagnostics, so we don't really implement this pragma. We parse it and
/// ignore it to avoid -Wunknown-pragma warnings.
@@ -1799,6 +1825,7 @@ void Preprocessor::RegisterBuiltinPragmas() {
if (LangOpts.MicrosoftExt) {
AddPragmaHandler(new PragmaWarningHandler());
AddPragmaHandler(new PragmaIncludeAliasHandler());
+ AddPragmaHandler(new PragmaHdrstopHandler());
}
// Pragmas added by plugins
diff --git a/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp b/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp
index def47b2f1095..047a4caaca73 100644
--- a/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp
@@ -44,8 +44,6 @@
#include "clang/Lex/MacroArgs.h"
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/ModuleLoader.h"
-#include "clang/Lex/PTHLexer.h"
-#include "clang/Lex/PTHManager.h"
#include "clang/Lex/Pragma.h"
#include "clang/Lex/PreprocessingRecord.h"
#include "clang/Lex/PreprocessorLexer.h"
@@ -149,6 +147,10 @@ Preprocessor::Preprocessor(std::shared_ptr<PreprocessorOptions> PPOpts,
Ident_AbnormalTermination = nullptr;
}
+ // If using a PCH where a #pragma hdrstop is expected, start skipping tokens.
+ if (usingPCHWithPragmaHdrStop())
+ SkippingUntilPragmaHdrStop = true;
+
// If using a PCH with a through header, start skipping tokens.
if (!this->PPOpts->PCHThroughHeader.empty() &&
!this->PPOpts->ImplicitPCHInclude.empty())
@@ -220,11 +222,6 @@ void Preprocessor::FinalizeForModelFile() {
PragmaHandlers = std::move(PragmaHandlersBackup);
}
-void Preprocessor::setPTHManager(PTHManager* pm) {
- PTH.reset(pm);
- FileMgr.addStatCache(PTH->createStatCache());
-}
-
void Preprocessor::DumpToken(const Token &Tok, bool DumpFlags) const {
llvm::errs() << tok::getTokenName(Tok.getKind()) << " '"
<< getSpelling(Tok) << "'";
@@ -250,7 +247,7 @@ void Preprocessor::DumpToken(const Token &Tok, bool DumpFlags) const {
}
void Preprocessor::DumpLocation(SourceLocation Loc) const {
- Loc.dump(SourceMgr);
+ Loc.print(llvm::errs(), SourceMgr);
}
void Preprocessor::DumpMacro(const MacroInfo &MI) const {
@@ -375,8 +372,6 @@ StringRef Preprocessor::getLastMacroWithSpelling(
void Preprocessor::recomputeCurLexerKind() {
if (CurLexer)
CurLexerKind = CLK_Lexer;
- else if (CurPTHLexer)
- CurLexerKind = CLK_PTHLexer;
else if (CurTokenLexer)
CurLexerKind = CLK_TokenLexer;
else
@@ -441,6 +436,13 @@ bool Preprocessor::SetCodeCompletionPoint(const FileEntry *File,
return false;
}
+void Preprocessor::CodeCompleteIncludedFile(llvm::StringRef Dir,
+ bool IsAngled) {
+ if (CodeComplete)
+ CodeComplete->CodeCompleteIncludedFile(Dir, IsAngled);
+ setCodeCompletionReached();
+}
+
void Preprocessor::CodeCompleteNaturalLanguage() {
if (CodeComplete)
CodeComplete->CodeCompleteNaturalLanguage();
@@ -576,8 +578,9 @@ void Preprocessor::EnterMainSourceFile() {
}
// Skip tokens from the Predefines and if needed the main file.
- if (usingPCHWithThroughHeader() && SkippingUntilPCHThroughHeader)
- SkipTokensUntilPCHThroughHeader();
+ if ((usingPCHWithThroughHeader() && SkippingUntilPCHThroughHeader) ||
+ (usingPCHWithPragmaHdrStop() && SkippingUntilPragmaHdrStop))
+ SkipTokensWhileUsingPCH();
}
void Preprocessor::setPCHThroughHeaderFileID(FileID FID) {
@@ -602,12 +605,23 @@ bool Preprocessor::usingPCHWithThroughHeader() {
PCHThroughHeaderFileID.isValid();
}
-/// Skip tokens until after the #include of the through header.
-/// Tokens in the predefines file and the main file may be skipped. If the end
-/// of the predefines file is reached, skipping continues into the main file.
-/// If the end of the main file is reached, it's a fatal error.
-void Preprocessor::SkipTokensUntilPCHThroughHeader() {
+bool Preprocessor::creatingPCHWithPragmaHdrStop() {
+ return TUKind == TU_Prefix && PPOpts->PCHWithHdrStop;
+}
+
+bool Preprocessor::usingPCHWithPragmaHdrStop() {
+ return TUKind != TU_Prefix && PPOpts->PCHWithHdrStop;
+}
+
+/// Skip tokens until after the #include of the through header or
+/// until after a #pragma hdrstop is seen. Tokens in the predefines file
+/// and the main file may be skipped. If the end of the predefines file
+/// is reached, skipping continues into the main file. If the end of the
+/// main file is reached, it's a fatal error.
+void Preprocessor::SkipTokensWhileUsingPCH() {
bool ReachedMainFileEOF = false;
+ bool UsingPCHThroughHeader = SkippingUntilPCHThroughHeader;
+ bool UsingPragmaHdrStop = SkippingUntilPragmaHdrStop;
Token Tok;
while (true) {
bool InPredefines = (CurLexer->getFileID() == getPredefinesFileID());
@@ -616,12 +630,18 @@ void Preprocessor::SkipTokensUntilPCHThroughHeader() {
ReachedMainFileEOF = true;
break;
}
- if (!SkippingUntilPCHThroughHeader)
+ if (UsingPCHThroughHeader && !SkippingUntilPCHThroughHeader)
break;
+ if (UsingPragmaHdrStop && !SkippingUntilPragmaHdrStop)
+ break;
+ }
+ if (ReachedMainFileEOF) {
+ if (UsingPCHThroughHeader)
+ Diag(SourceLocation(), diag::err_pp_through_header_not_seen)
+ << PPOpts->PCHThroughHeader << 1;
+ else if (!PPOpts->PCHWithHdrStopCreate)
+ Diag(SourceLocation(), diag::err_pp_pragma_hdrstop_not_seen);
}
- if (ReachedMainFileEOF)
- Diag(SourceLocation(), diag::err_pp_through_header_not_seen)
- << PPOpts->PCHThroughHeader << 1;
}
void Preprocessor::replayPreambleConditionalStack() {
@@ -848,9 +868,6 @@ void Preprocessor::Lex(Token &Result) {
case CLK_Lexer:
ReturnedToken = CurLexer->Lex(Result);
break;
- case CLK_PTHLexer:
- ReturnedToken = CurPTHLexer->Lex(Result);
- break;
case CLK_TokenLexer:
ReturnedToken = CurTokenLexer->Lex(Result);
break;
@@ -868,6 +885,7 @@ void Preprocessor::Lex(Token &Result) {
if (Result.is(tok::code_completion) && Result.getIdentifierInfo()) {
// Remember the identifier before code completion token.
setCodeCompletionIdentifierInfo(Result.getIdentifierInfo());
+ setCodeCompletionTokenRange(Result.getLocation(), Result.getEndLoc());
// Set IdenfitierInfo to null to avoid confusing code that handles both
// identifiers and completion tokens.
Result.setIdentifierInfo(nullptr);
@@ -913,7 +931,7 @@ void Preprocessor::LexAfterModuleImport(Token &Result) {
// If we have a non-empty module path, load the named module.
if (!ModuleImportPath.empty()) {
// Under the Modules TS, the dot is just part of the module name, and not
- // a real hierarachy separator. Flatten such module names now.
+ // a real hierarchy separator. Flatten such module names now.
//
// FIXME: Is this the right level to be performing this transformation?
std::string FlatModuleName;
diff --git a/contrib/llvm/tools/clang/lib/Lex/TokenConcatenation.cpp b/contrib/llvm/tools/clang/lib/Lex/TokenConcatenation.cpp
index ec73479cb54f..f810c28ccdf1 100644
--- a/contrib/llvm/tools/clang/lib/Lex/TokenConcatenation.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/TokenConcatenation.cpp
@@ -67,7 +67,7 @@ bool TokenConcatenation::IsIdentifierStringPrefix(const Token &Tok) const {
return IsStringPrefix(StringRef(PP.getSpelling(Tok)), LangOpts.CPlusPlus11);
}
-TokenConcatenation::TokenConcatenation(Preprocessor &pp) : PP(pp) {
+TokenConcatenation::TokenConcatenation(const Preprocessor &pp) : PP(pp) {
memset(TokenInfo, 0, sizeof(TokenInfo));
// These tokens have custom code in AvoidConcat.
@@ -126,7 +126,7 @@ TokenConcatenation::TokenConcatenation(Preprocessor &pp) : PP(pp) {
/// GetFirstChar - Get the first character of the token \arg Tok,
/// avoiding calls to getSpelling where possible.
-static char GetFirstChar(Preprocessor &PP, const Token &Tok) {
+static char GetFirstChar(const Preprocessor &PP, const Token &Tok) {
if (IdentifierInfo *II = Tok.getIdentifierInfo()) {
// Avoid spelling identifiers, the most common form of token.
return II->getNameStart()[0];