aboutsummaryrefslogtreecommitdiff
path: root/lib/Target/X86/AsmParser/X86AsmParser.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/X86/AsmParser/X86AsmParser.cpp')
-rw-r--r--lib/Target/X86/AsmParser/X86AsmParser.cpp447
1 files changed, 340 insertions, 107 deletions
diff --git a/lib/Target/X86/AsmParser/X86AsmParser.cpp b/lib/Target/X86/AsmParser/X86AsmParser.cpp
index 899b50d0f78f..95cbf46d37ed 100644
--- a/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -1,17 +1,16 @@
//===-- X86AsmParser.cpp - Parse X86 assembly to MCInst instructions ------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-#include "InstPrinter/X86IntelInstPrinter.h"
#include "MCTargetDesc/X86BaseInfo.h"
+#include "MCTargetDesc/X86IntelInstPrinter.h"
#include "MCTargetDesc/X86MCExpr.h"
#include "MCTargetDesc/X86TargetStreamer.h"
-#include "X86AsmInstrumentation.h"
+#include "TargetInfo/X86TargetInfo.h"
#include "X86AsmParserCommon.h"
#include "X86Operand.h"
#include "llvm/ADT/STLExtras.h"
@@ -71,9 +70,17 @@ static const char OpPrecedence[] = {
class X86AsmParser : public MCTargetAsmParser {
ParseInstructionInfo *InstInfo;
- std::unique_ptr<X86AsmInstrumentation> Instrumentation;
bool Code16GCC;
+ enum VEXEncoding {
+ VEXEncoding_Default,
+ VEXEncoding_VEX2,
+ VEXEncoding_VEX3,
+ VEXEncoding_EVEX,
+ };
+
+ VEXEncoding ForcedVEXEncoding = VEXEncoding_Default;
+
private:
SMLoc consumeToken() {
MCAsmParser &Parser = getParser();
@@ -90,13 +97,14 @@ private:
}
unsigned MatchInstruction(const OperandVector &Operands, MCInst &Inst,
- uint64_t &ErrorInfo, bool matchingInlineAsm,
- unsigned VariantID = 0) {
+ uint64_t &ErrorInfo, FeatureBitset &MissingFeatures,
+ bool matchingInlineAsm, unsigned VariantID = 0) {
// In Code16GCC mode, match as 32-bit.
if (Code16GCC)
SwitchMode(X86::Mode32Bit);
unsigned rv = MatchInstructionImpl(Operands, Inst, ErrorInfo,
- matchingInlineAsm, VariantID);
+ MissingFeatures, matchingInlineAsm,
+ VariantID);
if (Code16GCC)
SwitchMode(X86::Mode16Bit);
return rv;
@@ -840,6 +848,8 @@ private:
const SMLoc &StartLoc,
SMLoc &EndLoc);
+ X86::CondCode ParseConditionCode(StringRef CCode);
+
bool ParseIntelMemoryOperandSize(unsigned &Size);
std::unique_ptr<X86Operand>
CreateMemForInlineAsm(unsigned SegReg, const MCExpr *Disp, unsigned BaseReg,
@@ -860,6 +870,8 @@ private:
bool parseDirectiveFPOEndProc(SMLoc L);
bool parseDirectiveFPOData(SMLoc L);
+ unsigned checkTargetMatchPredicate(MCInst &Inst) override;
+
bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
bool processInstruction(MCInst &Inst, const OperandVector &Ops);
@@ -875,7 +887,7 @@ private:
void MatchFPUWaitAlias(SMLoc IDLoc, X86Operand &Op, OperandVector &Operands,
MCStreamer &Out, bool MatchingInlineAsm);
- bool ErrorMissingFeature(SMLoc IDLoc, uint64_t ErrorInfo,
+ bool ErrorMissingFeature(SMLoc IDLoc, const FeatureBitset &MissingFeatures,
bool MatchingInlineAsm);
bool MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
@@ -914,7 +926,7 @@ private:
MCSubtargetInfo &STI = copySTI();
FeatureBitset AllModes({X86::Mode64Bit, X86::Mode32Bit, X86::Mode16Bit});
FeatureBitset OldMode = STI.getFeatureBits() & AllModes;
- uint64_t FB = ComputeAvailableFeatures(
+ FeatureBitset FB = ComputeAvailableFeatures(
STI.ToggleFeature(OldMode.flip(mode)));
setAvailableFeatures(FB);
@@ -941,6 +953,9 @@ private:
/// }
public:
+ enum X86MatchResultTy {
+ Match_Unsupported = FIRST_TARGET_MATCH_RESULT_TY,
+ };
X86AsmParser(const MCSubtargetInfo &sti, MCAsmParser &Parser,
const MCInstrInfo &mii, const MCTargetOptions &Options)
@@ -951,14 +966,10 @@ public:
// Initialize the set of available features.
setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
- Instrumentation.reset(
- CreateX86AsmInstrumentation(Options, Parser.getContext(), STI));
}
bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
- void SetFrameRegister(unsigned RegNo) override;
-
bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) override;
bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
@@ -1115,8 +1126,7 @@ bool X86AsmParser::ParseRegister(unsigned &RegNo,
}
// Parse "%st" as "%st(0)" and "%st(1)", which is multiple tokens.
- if (RegNo == 0 && (Tok.getString() == "st" || Tok.getString() == "ST")) {
- RegNo = X86::ST0;
+ if (RegNo == X86::ST0) {
Parser.Lex(); // Eat 'st'
// Check to see if we have '(4)' after %st.
@@ -1194,10 +1204,6 @@ bool X86AsmParser::ParseRegister(unsigned &RegNo,
return false;
}
-void X86AsmParser::SetFrameRegister(unsigned RegNo) {
- Instrumentation->SetInitialFrameRegister(RegNo);
-}
-
std::unique_ptr<X86Operand> X86AsmParser::DefaultMemSIOperand(SMLoc Loc) {
bool Parse32 = is32BitMode() || Code16GCC;
unsigned Basereg = is64BitMode() ? X86::RSI : (Parse32 ? X86::ESI : X86::SI);
@@ -1656,6 +1662,8 @@ X86AsmParser::ParseRoundingModeOp(SMLoc Start) {
const AsmToken &Tok = Parser.getTok();
// Eat "{" and mark the current place.
const SMLoc consumedToken = consumeToken();
+ if (Tok.isNot(AsmToken::Identifier))
+ return ErrorOperand(Tok.getLoc(), "Expected an identifier after {");
if (Tok.getIdentifier().startswith("r")){
int rndMode = StringSwitch<int>(Tok.getIdentifier())
.Case("rn", X86::STATIC_ROUNDING::TO_NEAREST_INT)
@@ -1999,6 +2007,29 @@ std::unique_ptr<X86Operand> X86AsmParser::ParseATTOperand() {
}
}
+// X86::COND_INVALID if not a recognized condition code or alternate mnemonic,
+// otherwise the EFLAGS Condition Code enumerator.
+X86::CondCode X86AsmParser::ParseConditionCode(StringRef CC) {
+ return StringSwitch<X86::CondCode>(CC)
+ .Case("o", X86::COND_O) // Overflow
+ .Case("no", X86::COND_NO) // No Overflow
+ .Cases("b", "nae", X86::COND_B) // Below/Neither Above nor Equal
+ .Cases("ae", "nb", X86::COND_AE) // Above or Equal/Not Below
+ .Cases("e", "z", X86::COND_E) // Equal/Zero
+ .Cases("ne", "nz", X86::COND_NE) // Not Equal/Not Zero
+ .Cases("be", "na", X86::COND_BE) // Below or Equal/Not Above
+ .Cases("a", "nbe", X86::COND_A) // Above/Neither Below nor Equal
+ .Case("s", X86::COND_S) // Sign
+ .Case("ns", X86::COND_NS) // No Sign
+ .Cases("p", "pe", X86::COND_P) // Parity/Parity Even
+ .Cases("np", "po", X86::COND_NP) // No Parity/Parity Odd
+ .Cases("l", "nge", X86::COND_L) // Less/Neither Greater nor Equal
+ .Cases("ge", "nl", X86::COND_GE) // Greater or Equal/Not Less
+ .Cases("le", "ng", X86::COND_LE) // Less or Equal/Not Greater
+ .Cases("g", "nle", X86::COND_G) // Greater/Neither Less nor Equal
+ .Default(X86::COND_INVALID);
+}
+
// true on failure, false otherwise
// If no {z} mark was found - Parser doesn't advance
bool X86AsmParser::ParseZ(std::unique_ptr<X86Operand> &Z,
@@ -2305,18 +2336,64 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
SMLoc NameLoc, OperandVector &Operands) {
MCAsmParser &Parser = getParser();
InstInfo = &Info;
+
+ // Reset the forced VEX encoding.
+ ForcedVEXEncoding = VEXEncoding_Default;
+
+ // Parse pseudo prefixes.
+ while (1) {
+ if (Name == "{") {
+ if (getLexer().isNot(AsmToken::Identifier))
+ return Error(Parser.getTok().getLoc(), "Unexpected token after '{'");
+ std::string Prefix = Parser.getTok().getString().lower();
+ Parser.Lex(); // Eat identifier.
+ if (getLexer().isNot(AsmToken::RCurly))
+ return Error(Parser.getTok().getLoc(), "Expected '}'");
+ Parser.Lex(); // Eat curly.
+
+ if (Prefix == "vex2")
+ ForcedVEXEncoding = VEXEncoding_VEX2;
+ else if (Prefix == "vex3")
+ ForcedVEXEncoding = VEXEncoding_VEX3;
+ else if (Prefix == "evex")
+ ForcedVEXEncoding = VEXEncoding_EVEX;
+ else
+ return Error(NameLoc, "unknown prefix");
+
+ NameLoc = Parser.getTok().getLoc();
+ if (getLexer().is(AsmToken::LCurly)) {
+ Parser.Lex();
+ Name = "{";
+ } else {
+ if (getLexer().isNot(AsmToken::Identifier))
+ return Error(Parser.getTok().getLoc(), "Expected identifier");
+ // FIXME: The mnemonic won't match correctly if its not in lower case.
+ Name = Parser.getTok().getString();
+ Parser.Lex();
+ }
+ continue;
+ }
+
+ break;
+ }
+
StringRef PatchedName = Name;
- if ((Name.equals("jmp") || Name.equals("jc") || Name.equals("jz")) &&
- isParsingIntelSyntax() && isParsingInlineAsm()) {
+ // Hack to skip "short" following Jcc.
+ if (isParsingIntelSyntax() &&
+ (PatchedName == "jmp" || PatchedName == "jc" || PatchedName == "jnc" ||
+ PatchedName == "jcxz" || PatchedName == "jexcz" ||
+ (PatchedName.startswith("j") &&
+ ParseConditionCode(PatchedName.substr(1)) != X86::COND_INVALID))) {
StringRef NextTok = Parser.getTok().getString();
if (NextTok == "short") {
SMLoc NameEndLoc =
NameLoc.getFromPointer(NameLoc.getPointer() + Name.size());
- // Eat the short keyword
+ // Eat the short keyword.
Parser.Lex();
- // MS ignores the short keyword, it determines the jmp type based
- // on the distance of the label
+ // MS and GAS ignore the short keyword; they both determine the jmp type
+ // based on the distance of the label. (NASM does emit different code with
+ // and without "short," though.)
InstInfo->AsmRewrites->emplace_back(AOK_Skip, NameEndLoc,
NextTok.size() + 1);
}
@@ -2327,13 +2404,15 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
PatchedName != "setb" && PatchedName != "setnb")
PatchedName = PatchedName.substr(0, Name.size()-1);
+ unsigned ComparisonPredicate = ~0U;
+
// FIXME: Hack to recognize cmp<comparison code>{ss,sd,ps,pd}.
if ((PatchedName.startswith("cmp") || PatchedName.startswith("vcmp")) &&
(PatchedName.endswith("ss") || PatchedName.endswith("sd") ||
PatchedName.endswith("ps") || PatchedName.endswith("pd"))) {
bool IsVCMP = PatchedName[0] == 'v';
unsigned CCIdx = IsVCMP ? 4 : 3;
- unsigned ComparisonCode = StringSwitch<unsigned>(
+ unsigned CC = StringSwitch<unsigned>(
PatchedName.slice(CCIdx, PatchedName.size() - 2))
.Case("eq", 0x00)
.Case("eq_oq", 0x00)
@@ -2383,26 +2462,29 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
.Case("gt_oq", 0x1E)
.Case("true_us", 0x1F)
.Default(~0U);
- if (ComparisonCode != ~0U && (IsVCMP || ComparisonCode < 8)) {
-
- Operands.push_back(X86Operand::CreateToken(PatchedName.slice(0, CCIdx),
- NameLoc));
-
- const MCExpr *ImmOp = MCConstantExpr::create(ComparisonCode,
- getParser().getContext());
- Operands.push_back(X86Operand::CreateImm(ImmOp, NameLoc, NameLoc));
+ if (CC != ~0U && (IsVCMP || CC < 8)) {
+ if (PatchedName.endswith("ss"))
+ PatchedName = IsVCMP ? "vcmpss" : "cmpss";
+ else if (PatchedName.endswith("sd"))
+ PatchedName = IsVCMP ? "vcmpsd" : "cmpsd";
+ else if (PatchedName.endswith("ps"))
+ PatchedName = IsVCMP ? "vcmpps" : "cmpps";
+ else if (PatchedName.endswith("pd"))
+ PatchedName = IsVCMP ? "vcmppd" : "cmppd";
+ else
+ llvm_unreachable("Unexpected suffix!");
- PatchedName = PatchedName.substr(PatchedName.size() - 2);
+ ComparisonPredicate = CC;
}
}
// FIXME: Hack to recognize vpcmp<comparison code>{ub,uw,ud,uq,b,w,d,q}.
if (PatchedName.startswith("vpcmp") &&
- (PatchedName.endswith("b") || PatchedName.endswith("w") ||
- PatchedName.endswith("d") || PatchedName.endswith("q"))) {
- unsigned CCIdx = PatchedName.drop_back().back() == 'u' ? 2 : 1;
- unsigned ComparisonCode = StringSwitch<unsigned>(
- PatchedName.slice(5, PatchedName.size() - CCIdx))
+ (PatchedName.back() == 'b' || PatchedName.back() == 'w' ||
+ PatchedName.back() == 'd' || PatchedName.back() == 'q')) {
+ unsigned SuffixSize = PatchedName.drop_back().back() == 'u' ? 2 : 1;
+ unsigned CC = StringSwitch<unsigned>(
+ PatchedName.slice(5, PatchedName.size() - SuffixSize))
.Case("eq", 0x0) // Only allowed on unsigned. Checked below.
.Case("lt", 0x1)
.Case("le", 0x2)
@@ -2412,24 +2494,26 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
.Case("nle", 0x6)
//.Case("true", 0x7) // Not a documented alias.
.Default(~0U);
- if (ComparisonCode != ~0U && (ComparisonCode != 0 || CCIdx == 2)) {
- Operands.push_back(X86Operand::CreateToken("vpcmp", NameLoc));
-
- const MCExpr *ImmOp = MCConstantExpr::create(ComparisonCode,
- getParser().getContext());
- Operands.push_back(X86Operand::CreateImm(ImmOp, NameLoc, NameLoc));
-
- PatchedName = PatchedName.substr(PatchedName.size() - CCIdx);
+ if (CC != ~0U && (CC != 0 || SuffixSize == 2)) {
+ switch (PatchedName.back()) {
+ default: llvm_unreachable("Unexpected character!");
+ case 'b': PatchedName = SuffixSize == 2 ? "vpcmpub" : "vpcmpb"; break;
+ case 'w': PatchedName = SuffixSize == 2 ? "vpcmpuw" : "vpcmpw"; break;
+ case 'd': PatchedName = SuffixSize == 2 ? "vpcmpud" : "vpcmpd"; break;
+ case 'q': PatchedName = SuffixSize == 2 ? "vpcmpuq" : "vpcmpq"; break;
+ }
+ // Set up the immediate to push into the operands later.
+ ComparisonPredicate = CC;
}
}
// FIXME: Hack to recognize vpcom<comparison code>{ub,uw,ud,uq,b,w,d,q}.
if (PatchedName.startswith("vpcom") &&
- (PatchedName.endswith("b") || PatchedName.endswith("w") ||
- PatchedName.endswith("d") || PatchedName.endswith("q"))) {
- unsigned CCIdx = PatchedName.drop_back().back() == 'u' ? 2 : 1;
- unsigned ComparisonCode = StringSwitch<unsigned>(
- PatchedName.slice(5, PatchedName.size() - CCIdx))
+ (PatchedName.back() == 'b' || PatchedName.back() == 'w' ||
+ PatchedName.back() == 'd' || PatchedName.back() == 'q')) {
+ unsigned SuffixSize = PatchedName.drop_back().back() == 'u' ? 2 : 1;
+ unsigned CC = StringSwitch<unsigned>(
+ PatchedName.slice(5, PatchedName.size() - SuffixSize))
.Case("lt", 0x0)
.Case("le", 0x1)
.Case("gt", 0x2)
@@ -2439,14 +2523,16 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
.Case("false", 0x6)
.Case("true", 0x7)
.Default(~0U);
- if (ComparisonCode != ~0U) {
- Operands.push_back(X86Operand::CreateToken("vpcom", NameLoc));
-
- const MCExpr *ImmOp = MCConstantExpr::create(ComparisonCode,
- getParser().getContext());
- Operands.push_back(X86Operand::CreateImm(ImmOp, NameLoc, NameLoc));
-
- PatchedName = PatchedName.substr(PatchedName.size() - CCIdx);
+ if (CC != ~0U) {
+ switch (PatchedName.back()) {
+ default: llvm_unreachable("Unexpected character!");
+ case 'b': PatchedName = SuffixSize == 2 ? "vpcomub" : "vpcomb"; break;
+ case 'w': PatchedName = SuffixSize == 2 ? "vpcomuw" : "vpcomw"; break;
+ case 'd': PatchedName = SuffixSize == 2 ? "vpcomud" : "vpcomd"; break;
+ case 'q': PatchedName = SuffixSize == 2 ? "vpcomuq" : "vpcomq"; break;
+ }
+ // Set up the immediate to push into the operands later.
+ ComparisonPredicate = CC;
}
}
@@ -2489,6 +2575,7 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
Flags = X86::IP_NO_PREFIX;
break;
}
+ // FIXME: The mnemonic won't match correctly if its not in lower case.
Name = Parser.getTok().getString();
Parser.Lex(); // eat the prefix
// Hack: we could have something like "rep # some comment" or
@@ -2496,6 +2583,7 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
while (Name.startswith(";") || Name.startswith("\n") ||
Name.startswith("#") || Name.startswith("\t") ||
Name.startswith("/")) {
+ // FIXME: The mnemonic won't match correctly if its not in lower case.
Name = Parser.getTok().getString();
Parser.Lex(); // go to next prefix or instr
}
@@ -2519,6 +2607,13 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
Operands.push_back(X86Operand::CreateToken(PatchedName, NameLoc));
+ // Push the immediate if we extracted one from the mnemonic.
+ if (ComparisonPredicate != ~0U && !isParsingIntelSyntax()) {
+ const MCExpr *ImmOp = MCConstantExpr::create(ComparisonPredicate,
+ getParser().getContext());
+ Operands.push_back(X86Operand::CreateImm(ImmOp, NameLoc, NameLoc));
+ }
+
// This does the actual operand parsing. Don't parse any more if we have a
// prefix juxtaposed with an operation like "lock incl 4(%rax)", because we
// just want to parse the "lock" as the first instruction and the "incl" as
@@ -2553,6 +2648,13 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
return TokError("unexpected token in argument list");
}
+ // Push the immediate if we extracted one from the mnemonic.
+ if (ComparisonPredicate != ~0U && isParsingIntelSyntax()) {
+ const MCExpr *ImmOp = MCConstantExpr::create(ComparisonPredicate,
+ getParser().getContext());
+ Operands.push_back(X86Operand::CreateImm(ImmOp, NameLoc, NameLoc));
+ }
+
// Consume the EndOfStatement or the prefix separator Slash
if (getLexer().is(AsmToken::EndOfStatement) ||
(isPrefix && getLexer().is(AsmToken::Slash)))
@@ -2576,13 +2678,13 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
static_cast<X86Operand &>(*Operands[0]).setTokenValue(Repl);
}
- // Moving a 32 or 16 bit value into a segment register has the same
- // behavior. Modify such instructions to always take shorter form.
if ((Name == "mov" || Name == "movw" || Name == "movl") &&
(Operands.size() == 3)) {
X86Operand &Op1 = (X86Operand &)*Operands[1];
X86Operand &Op2 = (X86Operand &)*Operands[2];
SMLoc Loc = Op1.getEndLoc();
+ // Moving a 32 or 16 bit value into a segment register has the same
+ // behavior. Modify such instructions to always take shorter form.
if (Op1.isReg() && Op2.isReg() &&
X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(
Op2.getReg()) &&
@@ -2759,7 +2861,69 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
}
bool X86AsmParser::processInstruction(MCInst &Inst, const OperandVector &Ops) {
- return false;
+ const MCRegisterInfo *MRI = getContext().getRegisterInfo();
+
+ switch (Inst.getOpcode()) {
+ default: return false;
+ case X86::VMOVZPQILo2PQIrr:
+ case X86::VMOVAPDrr:
+ case X86::VMOVAPDYrr:
+ case X86::VMOVAPSrr:
+ case X86::VMOVAPSYrr:
+ case X86::VMOVDQArr:
+ case X86::VMOVDQAYrr:
+ case X86::VMOVDQUrr:
+ case X86::VMOVDQUYrr:
+ case X86::VMOVUPDrr:
+ case X86::VMOVUPDYrr:
+ case X86::VMOVUPSrr:
+ case X86::VMOVUPSYrr: {
+ // We can get a smaller encoding by using VEX.R instead of VEX.B if one of
+ // the registers is extended, but other isn't.
+ if (ForcedVEXEncoding == VEXEncoding_VEX3 ||
+ MRI->getEncodingValue(Inst.getOperand(0).getReg()) >= 8 ||
+ MRI->getEncodingValue(Inst.getOperand(1).getReg()) < 8)
+ return false;
+
+ unsigned NewOpc;
+ switch (Inst.getOpcode()) {
+ default: llvm_unreachable("Invalid opcode");
+ case X86::VMOVZPQILo2PQIrr: NewOpc = X86::VMOVPQI2QIrr; break;
+ case X86::VMOVAPDrr: NewOpc = X86::VMOVAPDrr_REV; break;
+ case X86::VMOVAPDYrr: NewOpc = X86::VMOVAPDYrr_REV; break;
+ case X86::VMOVAPSrr: NewOpc = X86::VMOVAPSrr_REV; break;
+ case X86::VMOVAPSYrr: NewOpc = X86::VMOVAPSYrr_REV; break;
+ case X86::VMOVDQArr: NewOpc = X86::VMOVDQArr_REV; break;
+ case X86::VMOVDQAYrr: NewOpc = X86::VMOVDQAYrr_REV; break;
+ case X86::VMOVDQUrr: NewOpc = X86::VMOVDQUrr_REV; break;
+ case X86::VMOVDQUYrr: NewOpc = X86::VMOVDQUYrr_REV; break;
+ case X86::VMOVUPDrr: NewOpc = X86::VMOVUPDrr_REV; break;
+ case X86::VMOVUPDYrr: NewOpc = X86::VMOVUPDYrr_REV; break;
+ case X86::VMOVUPSrr: NewOpc = X86::VMOVUPSrr_REV; break;
+ case X86::VMOVUPSYrr: NewOpc = X86::VMOVUPSYrr_REV; break;
+ }
+ Inst.setOpcode(NewOpc);
+ return true;
+ }
+ case X86::VMOVSDrr:
+ case X86::VMOVSSrr: {
+ // We can get a smaller encoding by using VEX.R instead of VEX.B if one of
+ // the registers is extended, but other isn't.
+ if (ForcedVEXEncoding == VEXEncoding_VEX3 ||
+ MRI->getEncodingValue(Inst.getOperand(0).getReg()) >= 8 ||
+ MRI->getEncodingValue(Inst.getOperand(2).getReg()) < 8)
+ return false;
+
+ unsigned NewOpc;
+ switch (Inst.getOpcode()) {
+ default: llvm_unreachable("Invalid opcode");
+ case X86::VMOVSDrr: NewOpc = X86::VMOVSDrr_REV; break;
+ case X86::VMOVSSrr: NewOpc = X86::VMOVSSrr_REV; break;
+ }
+ Inst.setOpcode(NewOpc);
+ return true;
+ }
+ }
}
bool X86AsmParser::validateInstruction(MCInst &Inst, const OperandVector &Ops) {
@@ -2865,9 +3029,7 @@ static const char *getSubtargetFeatureName(uint64_t Val);
void X86AsmParser::EmitInstruction(MCInst &Inst, OperandVector &Operands,
MCStreamer &Out) {
- Instrumentation->InstrumentAndEmitInstruction(
- Inst, Operands, getContext(), MII, Out,
- getParser().shouldPrintSchedInfo());
+ Out.EmitInstruction(Inst, getSTI());
}
bool X86AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
@@ -2907,17 +3069,16 @@ void X86AsmParser::MatchFPUWaitAlias(SMLoc IDLoc, X86Operand &Op,
}
}
-bool X86AsmParser::ErrorMissingFeature(SMLoc IDLoc, uint64_t ErrorInfo,
+bool X86AsmParser::ErrorMissingFeature(SMLoc IDLoc,
+ const FeatureBitset &MissingFeatures,
bool MatchingInlineAsm) {
- assert(ErrorInfo && "Unknown missing feature!");
+ assert(MissingFeatures.any() && "Unknown missing feature!");
SmallString<126> Msg;
raw_svector_ostream OS(Msg);
OS << "instruction requires:";
- uint64_t Mask = 1;
- for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
- if (ErrorInfo & Mask)
- OS << ' ' << getSubtargetFeatureName(ErrorInfo & Mask);
- Mask <<= 1;
+ for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
+ if (MissingFeatures[i])
+ OS << ' ' << getSubtargetFeatureName(i);
}
return Error(IDLoc, OS.str(), SMRange(), MatchingInlineAsm);
}
@@ -2932,30 +3093,70 @@ static unsigned getPrefixes(OperandVector &Operands) {
return Result;
}
+unsigned X86AsmParser::checkTargetMatchPredicate(MCInst &Inst) {
+ unsigned Opc = Inst.getOpcode();
+ const MCInstrDesc &MCID = MII.get(Opc);
+
+ if (ForcedVEXEncoding == VEXEncoding_EVEX &&
+ (MCID.TSFlags & X86II::EncodingMask) != X86II::EVEX)
+ return Match_Unsupported;
+
+ if ((ForcedVEXEncoding == VEXEncoding_VEX2 ||
+ ForcedVEXEncoding == VEXEncoding_VEX3) &&
+ (MCID.TSFlags & X86II::EncodingMask) != X86II::VEX)
+ return Match_Unsupported;
+
+ // These instructions match ambiguously with their VEX encoded counterparts
+ // and appear first in the matching table. Reject them unless we're forcing
+ // EVEX encoding.
+ // FIXME: We really need a way to break the ambiguity.
+ switch (Opc) {
+ case X86::VCVTSD2SIZrm_Int:
+ case X86::VCVTSD2SI64Zrm_Int:
+ case X86::VCVTSS2SIZrm_Int:
+ case X86::VCVTSS2SI64Zrm_Int:
+ case X86::VCVTTSD2SIZrm: case X86::VCVTTSD2SIZrm_Int:
+ case X86::VCVTTSD2SI64Zrm: case X86::VCVTTSD2SI64Zrm_Int:
+ case X86::VCVTTSS2SIZrm: case X86::VCVTTSS2SIZrm_Int:
+ case X86::VCVTTSS2SI64Zrm: case X86::VCVTTSS2SI64Zrm_Int:
+ if (ForcedVEXEncoding != VEXEncoding_EVEX)
+ return Match_Unsupported;
+ }
+
+ return Match_Success;
+}
+
bool X86AsmParser::MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
OperandVector &Operands,
MCStreamer &Out,
uint64_t &ErrorInfo,
bool MatchingInlineAsm) {
assert(!Operands.empty() && "Unexpect empty operand list!");
- X86Operand &Op = static_cast<X86Operand &>(*Operands[0]);
- assert(Op.isToken() && "Leading operand should always be a mnemonic!");
+ assert((*Operands[0]).isToken() && "Leading operand should always be a mnemonic!");
SMRange EmptyRange = None;
// First, handle aliases that expand to multiple instructions.
- MatchFPUWaitAlias(IDLoc, Op, Operands, Out, MatchingInlineAsm);
-
- bool WasOriginallyInvalidOperand = false;
+ MatchFPUWaitAlias(IDLoc, static_cast<X86Operand &>(*Operands[0]), Operands,
+ Out, MatchingInlineAsm);
+ X86Operand &Op = static_cast<X86Operand &>(*Operands[0]);
unsigned Prefixes = getPrefixes(Operands);
MCInst Inst;
+ // If VEX3 encoding is forced, we need to pass the USE_VEX3 flag to the
+ // encoder.
+ if (ForcedVEXEncoding == VEXEncoding_VEX3)
+ Prefixes |= X86::IP_USE_VEX3;
+
if (Prefixes)
Inst.setFlags(Prefixes);
// First, try a direct match.
- switch (MatchInstruction(Operands, Inst, ErrorInfo, MatchingInlineAsm,
- isParsingIntelSyntax())) {
+ FeatureBitset MissingFeatures;
+ unsigned OriginalError = MatchInstruction(Operands, Inst, ErrorInfo,
+ MissingFeatures, MatchingInlineAsm,
+ isParsingIntelSyntax());
+ switch (OriginalError) {
default: llvm_unreachable("Unexpected match result!");
case Match_Success:
if (!MatchingInlineAsm && validateInstruction(Inst, Operands))
@@ -2973,13 +3174,17 @@ bool X86AsmParser::MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
Opcode = Inst.getOpcode();
return false;
case Match_MissingFeature:
- return ErrorMissingFeature(IDLoc, ErrorInfo, MatchingInlineAsm);
+ return ErrorMissingFeature(IDLoc, MissingFeatures, MatchingInlineAsm);
case Match_InvalidOperand:
- WasOriginallyInvalidOperand = true;
- break;
case Match_MnemonicFail:
+ case Match_Unsupported:
break;
}
+ if (Op.getToken().empty()) {
+ Error(IDLoc, "instruction must have size higher than 0", EmptyRange,
+ MatchingInlineAsm);
+ return true;
+ }
// FIXME: Ideally, we would only attempt suffix matches for things which are
// valid prefixes, and we could just infer the right unambiguous
@@ -3003,16 +3208,17 @@ bool X86AsmParser::MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
// Check for the various suffix matches.
uint64_t ErrorInfoIgnore;
- uint64_t ErrorInfoMissingFeature = 0; // Init suppresses compiler warnings.
+ FeatureBitset ErrorInfoMissingFeatures; // Init suppresses compiler warnings.
unsigned Match[4];
for (unsigned I = 0, E = array_lengthof(Match); I != E; ++I) {
Tmp.back() = Suffixes[I];
Match[I] = MatchInstruction(Operands, Inst, ErrorInfoIgnore,
- MatchingInlineAsm, isParsingIntelSyntax());
+ MissingFeatures, MatchingInlineAsm,
+ isParsingIntelSyntax());
// If this returned as a missing feature failure, remember that.
if (Match[I] == Match_MissingFeature)
- ErrorInfoMissingFeature = ErrorInfoIgnore;
+ ErrorInfoMissingFeatures = MissingFeatures;
}
// Restore the old token.
@@ -3062,11 +3268,15 @@ bool X86AsmParser::MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
// If all of the instructions reported an invalid mnemonic, then the original
// mnemonic was invalid.
if (std::count(std::begin(Match), std::end(Match), Match_MnemonicFail) == 4) {
- if (!WasOriginallyInvalidOperand) {
+ if (OriginalError == Match_MnemonicFail)
return Error(IDLoc, "invalid instruction mnemonic '" + Base + "'",
Op.getLocRange(), MatchingInlineAsm);
- }
+ if (OriginalError == Match_Unsupported)
+ return Error(IDLoc, "unsupported instruction", EmptyRange,
+ MatchingInlineAsm);
+
+ assert(OriginalError == Match_InvalidOperand && "Unexpected error");
// Recover location info for the operand if we know which was the problem.
if (ErrorInfo != ~0ULL) {
if (ErrorInfo >= Operands.size())
@@ -3085,12 +3295,19 @@ bool X86AsmParser::MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
MatchingInlineAsm);
}
+ // If one instruction matched as unsupported, report this as unsupported.
+ if (std::count(std::begin(Match), std::end(Match),
+ Match_Unsupported) == 1) {
+ return Error(IDLoc, "unsupported instruction", EmptyRange,
+ MatchingInlineAsm);
+ }
+
// If one instruction matched with a missing feature, report this as a
// missing feature.
if (std::count(std::begin(Match), std::end(Match),
Match_MissingFeature) == 1) {
- ErrorInfo = ErrorInfoMissingFeature;
- return ErrorMissingFeature(IDLoc, ErrorInfoMissingFeature,
+ ErrorInfo = Match_MissingFeature;
+ return ErrorMissingFeature(IDLoc, ErrorInfoMissingFeatures,
MatchingInlineAsm);
}
@@ -3114,18 +3331,23 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
uint64_t &ErrorInfo,
bool MatchingInlineAsm) {
assert(!Operands.empty() && "Unexpect empty operand list!");
- X86Operand &Op = static_cast<X86Operand &>(*Operands[0]);
- assert(Op.isToken() && "Leading operand should always be a mnemonic!");
- StringRef Mnemonic = Op.getToken();
+ assert((*Operands[0]).isToken() && "Leading operand should always be a mnemonic!");
+ StringRef Mnemonic = (static_cast<X86Operand &>(*Operands[0])).getToken();
SMRange EmptyRange = None;
- StringRef Base = Op.getToken();
+ StringRef Base = (static_cast<X86Operand &>(*Operands[0])).getToken();
unsigned Prefixes = getPrefixes(Operands);
// First, handle aliases that expand to multiple instructions.
- MatchFPUWaitAlias(IDLoc, Op, Operands, Out, MatchingInlineAsm);
+ MatchFPUWaitAlias(IDLoc, static_cast<X86Operand &>(*Operands[0]), Operands, Out, MatchingInlineAsm);
+ X86Operand &Op = static_cast<X86Operand &>(*Operands[0]);
MCInst Inst;
+ // If VEX3 encoding is forced, we need to pass the USE_VEX3 flag to the
+ // encoder.
+ if (ForcedVEXEncoding == VEXEncoding_VEX3)
+ Prefixes |= X86::IP_USE_VEX3;
+
if (Prefixes)
Inst.setFlags(Prefixes);
@@ -3154,7 +3376,8 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
}
SmallVector<unsigned, 8> Match;
- uint64_t ErrorInfoMissingFeature = 0;
+ FeatureBitset ErrorInfoMissingFeatures;
+ FeatureBitset MissingFeatures;
// If unsized push has immediate operand we should default the default pointer
// size for the size.
@@ -3174,7 +3397,7 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
Op.setTokenValue(Tmp);
// Do match in ATT mode to allow explicit suffix usage.
Match.push_back(MatchInstruction(Operands, Inst, ErrorInfo,
- MatchingInlineAsm,
+ MissingFeatures, MatchingInlineAsm,
false /*isParsingIntelSyntax()*/));
Op.setTokenValue(Base);
}
@@ -3191,13 +3414,14 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
uint64_t ErrorInfoIgnore;
unsigned LastOpcode = Inst.getOpcode();
unsigned M = MatchInstruction(Operands, Inst, ErrorInfoIgnore,
- MatchingInlineAsm, isParsingIntelSyntax());
+ MissingFeatures, MatchingInlineAsm,
+ isParsingIntelSyntax());
if (Match.empty() || LastOpcode != Inst.getOpcode())
Match.push_back(M);
// If this returned as a missing feature failure, remember that.
if (Match.back() == Match_MissingFeature)
- ErrorInfoMissingFeature = ErrorInfoIgnore;
+ ErrorInfoMissingFeatures = MissingFeatures;
}
// Restore the size of the unsized memory operand if we modified it.
@@ -3209,10 +3433,11 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
// matching with the unsized operand.
if (Match.empty()) {
Match.push_back(MatchInstruction(
- Operands, Inst, ErrorInfo, MatchingInlineAsm, isParsingIntelSyntax()));
+ Operands, Inst, ErrorInfo, MissingFeatures, MatchingInlineAsm,
+ isParsingIntelSyntax()));
// If this returned as a missing feature failure, remember that.
if (Match.back() == Match_MissingFeature)
- ErrorInfoMissingFeature = ErrorInfo;
+ ErrorInfoMissingFeatures = MissingFeatures;
}
// Restore the size of the unsized memory operand if we modified it.
@@ -3234,7 +3459,8 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
UnsizedMemOp->getMemFrontendSize()) {
UnsizedMemOp->Mem.Size = UnsizedMemOp->getMemFrontendSize();
unsigned M = MatchInstruction(
- Operands, Inst, ErrorInfo, MatchingInlineAsm, isParsingIntelSyntax());
+ Operands, Inst, ErrorInfo, MissingFeatures, MatchingInlineAsm,
+ isParsingIntelSyntax());
if (M == Match_Success)
NumSuccessfulMatches = 1;
@@ -3270,12 +3496,19 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
UnsizedMemOp->getLocRange());
}
+ // If one instruction matched as unsupported, report this as unsupported.
+ if (std::count(std::begin(Match), std::end(Match),
+ Match_Unsupported) == 1) {
+ return Error(IDLoc, "unsupported instruction", EmptyRange,
+ MatchingInlineAsm);
+ }
+
// If one instruction matched with a missing feature, report this as a
// missing feature.
if (std::count(std::begin(Match), std::end(Match),
Match_MissingFeature) == 1) {
- ErrorInfo = ErrorInfoMissingFeature;
- return ErrorMissingFeature(IDLoc, ErrorInfoMissingFeature,
+ ErrorInfo = Match_MissingFeature;
+ return ErrorMissingFeature(IDLoc, ErrorInfoMissingFeatures,
MatchingInlineAsm);
}